repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
harikishen/addons-server
src/olympia/api/fields.py
1
10385
from django.conf import settings from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.utils.encoding import smart_text from django.utils.translation import get_language, ugettext_lazy as _ from rest_framework import fields, serializers from olympia.amo.utils import to_language from olympia.translations.models import Translation class ReverseChoiceField(fields.ChoiceField): """ A ChoiceField that exposes the "human-readable" values of its choices, while storing the "actual" corresponding value as normal. This is useful when you want to expose string constants to clients while storing integers in the database. Note that the values in the `choices_dict` must be unique, since they are used for both serialization and de-serialization. """ def __init__(self, *args, **kwargs): self.reversed_choices = {v: k for k, v in kwargs['choices']} super(ReverseChoiceField, self).__init__(*args, **kwargs) def to_representation(self, value): """ Convert to representation by getting the "human-readable" value from the "actual" one. """ value = self.choices.get(value, None) return super(ReverseChoiceField, self).to_representation(value) def to_internal_value(self, value): """ Convert to internal value by getting the "actual" value from the "human-readable" one that is passed. """ try: value = self.reversed_choices[value] except KeyError: self.fail('invalid_choice', input=value) return super(ReverseChoiceField, self).to_internal_value(value) class TranslationSerializerField(fields.Field): """ Django-rest-framework custom serializer field for our TranslatedFields. - When deserializing, in `to_internal_value`, it accepts both a string or a dictionary. If a string is given, it'll be considered to be in the default language. - When serializing, its behavior depends on the parent's serializer context: If a request was included, and its method is 'GET', and a 'lang' parameter was passed, then only returns one translation (letting the TranslatedField figure out automatically which language to use). Else, just returns a dict with all translations for the given `field_name` on `obj`, with languages as the keys. """ default_error_messages = { 'min_length': _(u'The field must have a length of at least {num} ' u'characters.'), 'unknown_locale': _(u'The language code {lang_code} is invalid.') } def __init__(self, *args, **kwargs): self.min_length = kwargs.pop('min_length', None) super(TranslationSerializerField, self).__init__(*args, **kwargs) def fetch_all_translations(self, obj, source, field): translations = field.__class__.objects.filter( id=field.id, localized_string__isnull=False) return {to_language(trans.locale): unicode(trans) for trans in translations} if translations else None def fetch_single_translation(self, obj, source, field, requested_language): return unicode(field) if field else None def get_attribute(self, obj): source = self.source or self.field_name field = fields.get_attribute(obj, source.split('.')) if not field: return None requested_language = None request = self.context.get('request', None) if request and request.method == 'GET' and 'lang' in request.GET: requested_language = request.GET['lang'] if requested_language: return self.fetch_single_translation(obj, source, field, requested_language) else: return self.fetch_all_translations(obj, source, field) def to_representation(self, val): return val def to_internal_value(self, data): if isinstance(data, basestring): self.validate(data) return data.strip() elif isinstance(data, dict): self.validate(data) for key, value in data.items(): data[key] = value and value.strip() return data return unicode(data) def validate(self, value): value_too_short = True if isinstance(value, basestring): if len(value.strip()) >= self.min_length: value_too_short = False else: for locale, string in value.items(): if locale.lower() not in settings.LANGUAGES: raise ValidationError( self.error_messages['unknown_locale'].format( lang_code=repr(locale))) if string and (len(string.strip()) >= self.min_length): value_too_short = False break if self.min_length and value_too_short: raise ValidationError( self.error_messages['min_length'].format(num=self.min_length)) class ESTranslationSerializerField(TranslationSerializerField): """ Like TranslationSerializerField, but fetching the data from a dictionary built from ES data that we previously attached on the object. """ suffix = '_translations' _source = None def get_source(self): if self._source is None: return None return self._source + self.suffix def set_source(self, val): self._source = val source = property(get_source, set_source) def attach_translations(self, obj, data, source_name, target_name=None): """ Look for the translation of `source_name` in `data` and create a dict with all translations for this field (which will look like {'en-US': 'mytranslation'}) and attach it to a property on `obj`. The property name is built with `target_name` and `cls.suffix`. If `target_name` is None, `source_name` is used instead. The suffix is necessary for two reasons: 1) The translations app won't let us set the dict on the real field without making db queries 2) This also exactly matches how we store translations in ES, so we can directly fetch the translations in the data passed to this method. """ if target_name is None: target_name = source_name target_key = '%s%s' % (target_name, self.suffix) source_key = '%s%s' % (source_name, self.suffix) target_translations = {v.get('lang', ''): v.get('string', '') for v in data.get(source_key, {}) or {}} setattr(obj, target_key, target_translations) # Serializer might need the single translation in the current language, # so fetch it and attach it directly under `target_name`. We need a # fake Translation() instance to prevent SQL queries from being # automatically made by the translations app. translation = self.fetch_single_translation( obj, target_name, target_translations, get_language()) setattr(obj, target_name, Translation(localized_string=translation)) def fetch_all_translations(self, obj, source, field): return field or None def fetch_single_translation(self, obj, source, field, requested_language): translations = self.fetch_all_translations(obj, source, field) or {} return (translations.get(requested_language) or translations.get(getattr(obj, 'default_locale', None)) or translations.get(getattr(obj, 'default_language', None)) or translations.get(settings.LANGUAGE_CODE) or None) class SplitField(fields.Field): """ A field composed of two separate fields: one used for input, and another used for output. Most commonly used to accept a primary key for input and use a full serializer for output. Example usage: addon = SplitField(serializers.PrimaryKeyRelatedField(), AddonSerializer()) """ label = None def __init__(self, _input, output, **kwargs): self.input = _input self.output = output kwargs['required'] = _input.required fields.Field.__init__(self, source=_input.source, **kwargs) def bind(self, field_name, parent): fields.Field.bind(self, field_name, parent) self.input.bind(field_name, parent) self.output.bind(field_name, parent) def get_read_only(self): return self._read_only def set_read_only(self, val): self._read_only = val self.input.read_only = val self.output.read_only = val read_only = property(get_read_only, set_read_only) def get_value(self, data): return self.input.get_value(data) def to_internal_value(self, value): return self.input.to_internal_value(value) def get_attribute(self, obj): return self.output.get_attribute(obj) def to_representation(self, value): return self.output.to_representation(value) class SlugOrPrimaryKeyRelatedField(serializers.RelatedField): """ Combines SlugRelatedField and PrimaryKeyRelatedField. Takes a `render_as` argument (either "pk" or "slug") to indicate how to serialize. """ read_only = False def __init__(self, *args, **kwargs): self.render_as = kwargs.pop('render_as', 'pk') if self.render_as not in ['pk', 'slug']: raise ValueError("'render_as' must be one of 'pk' or 'slug', " "not %r" % (self.render_as,)) self.slug_field = kwargs.pop('slug_field', 'slug') super(SlugOrPrimaryKeyRelatedField, self).__init__( *args, **kwargs) def to_representation(self, obj): if self.render_as == 'slug': return getattr(obj, self.slug_field) else: return obj.pk def to_internal_value(self, data): try: return self.queryset.get(pk=data) except: try: return self.queryset.get(**{self.slug_field: data}) except ObjectDoesNotExist: msg = (_('Invalid pk or slug "%s" - object does not exist.') % smart_text(data)) raise ValidationError(msg)
bsd-3-clause
9,180,062,847,117,697,000
37.040293
79
0.62494
false
4.318087
false
false
false
mppmu/secdec
examples/box1L/integrate_box1L.py
1
1796
from __future__ import print_function from pySecDec.integral_interface import IntegralLibrary import sympy as sp # load c++ library box1L = IntegralLibrary('box1L/box1L_pylink.so') # choose integrator box1L.use_Vegas(flags=2) # ``flags=2``: verbose --> see Cuba manual # integrate str_integral_without_prefactor, str_prefactor, str_integral_with_prefactor = box1L(real_parameters=[4.0, -0.75, 1.25, 1.0]) # convert complex numbers from c++ to sympy notation str_integral_with_prefactor = str_integral_with_prefactor.replace(',','+I*') str_prefactor = str_prefactor.replace(',','+I*') str_integral_without_prefactor = str_integral_without_prefactor.replace(',','+I*') # convert result to sympy expressions integral_with_prefactor = sp.sympify(str_integral_with_prefactor.replace('+/-','*value+error*')) integral_with_prefactor_err = sp.sympify(str_integral_with_prefactor.replace('+/-','*value+error*')) prefactor = sp.sympify(str_prefactor) integral_without_prefactor = sp.sympify(str_integral_without_prefactor.replace('+/-','*value+error*')) integral_without_prefactor_err = sp.sympify(str_integral_without_prefactor.replace('+/-','*value+error*')) # examples how to access individual orders print('Numerical Result') print('eps^-2:', integral_with_prefactor.coeff('eps',-2).coeff('value'), '+/- (', integral_with_prefactor_err.coeff('eps',-2).coeff('error'), ')') print('eps^-1:', integral_with_prefactor.coeff('eps',-1).coeff('value'), '+/- (', integral_with_prefactor_err.coeff('eps',-1).coeff('error'), ')') print('eps^0 :', integral_with_prefactor.coeff('eps',0).coeff('value'), '+/- (', integral_with_prefactor_err.coeff('eps',0).coeff('error'), ')') print('Analytic Result') print('eps^-2: -0.1428571429') print('eps^-1: 0.6384337090') print('eps^0 : -0.426354612+I*1.866502363')
gpl-3.0
-5,924,396,011,331,012,000
50.314286
146
0.712695
false
2.963696
false
true
false
pombredanne/unuk
src/unuk/contrib/eventweb/resources.py
1
1601
import logging from unuk.core import exceptions logger = logging.getLogger('eventweb') class WSGIResource(object): def __init__(self, server): self.server = server self.handler = server.handler def __call__(self, environ, start_response): handler = self.handler.get_handler(environ['PATH_INFO']) if not handler: start_response('404 Not Found', []) return [] response = getattr(self,'{0}_response'.format(handler.serve_as)) return response(handler, environ, start_response) def jsonrpc_response(self, rpc, environ, start_response): if environ['REQUEST_METHOD'] != 'POST': start_response('403 Forbidden', []) return [] content = environ['wsgi.input'].read() method, args, kwargs, id, version = rpc.get_method_and_args(content) if not method: result = exceptions.InvalidRequest('Method not available') try: function = rpc._getFunction(method) except exceptions.Fault, f: result = f else: rpc.logger.debug('invoking function %s' % method) result = function(rpc,environ,*args,**kwargs) data = rpc.dumps(result, id = id, version = version) start_response('200 OK', [('content-type', 'text/html'), ('content-length', str(len(data)))]) return [data] def wsgi_response(self, handler, environ, start_response): return handler(environ, start_response)
bsd-3-clause
-2,716,399,108,737,009,700
32.375
76
0.571518
false
4.374317
false
false
false
hyperopt/hyperopt-nnet
hpnnet/nips2011_dbn.py
1
5468
""" Deep Belief Network (DBN) search spaces used in [1] and [2]. The functions in this file return pyll graphs that can be used as the `space` argument to e.g. `hyperopt.fmin`. The pyll graphs include hyperparameter constructs (e.g. `hyperopt.hp.uniform`) so `hyperopt.fmin` can perform hyperparameter optimization. See ./skdata_learning_algo.py for example usage of these functions. [1] Bergstra, J., Bardenet, R., Bengio, Y., Kegl, B. (2011). Algorithms for Hyper-parameter optimization, NIPS 2011. [2] Bergstra, J., Bengio, Y. (2012). Random Search for Hyper-Parameter Optimization, JMLR 13:281--305. """ __author__ = "James Bergstra" __license__ = "BSD-3" import numpy as np from hyperopt.pyll import scope from hyperopt import hp import pyll_stubs import nnet # -- load scope with nnet symbols def preproc_space( sup_min_epochs=300, sup_max_epochs=2000, max_seconds=60 * 60, ): """ Return a hyperopt-compatible pyll expression for a trained neural network. The trained neural network will have 0, 1, 2, or 3 hidden layers, and may have an affine first layer that does column normalization or PCA pre-processing. Each layer of the network will be pre-trained by some amount of contrastive divergence before being fine-tuning by SGD. The training program is built using stub literals `pyll_stubs.train_task` and `pyll_stubs.valid_task`. When evaluating the pyll program, these literals must be replaced with skdata Task objects with `vector_classification` semantics. See `skdata_learning_algo.py` for how to use the `use_obj_for_literal_in_memo` function to swap live Task objects in for these stubs. The search space described by this function corresponds to the DBN model used in [1] and [2]. """ train_task_x = scope.getattr(pyll_stubs.train_task, 'x') nnet0 = scope.NNet([], n_out=scope.getattr(train_task_x, 'shape')[1]) nnet1 = hp.choice('preproc', [ nnet0, # -- raw data scope.nnet_add_layers( # -- ZCA of data nnet0, scope.zca_layer( train_task_x, energy=hp.uniform('pca_energy', .5, 1), eps=1e-14, )), ]) param_seed = hp.choice('iseed', [5, 6, 7, 8]) time_limit = scope.time() + max_seconds nnets = [nnet1] nnet_i_pt = nnet1 for ii, cd_epochs_max in enumerate([3000, 2000, 1500]): layer = scope.random_sigmoid_layer( # -- hack to get different seeds for dif't layers seed=param_seed + cd_epochs_max, n_in=scope.getattr(nnet_i_pt, 'n_out'), n_out=hp.qloguniform('n_hid_%i' % ii, np.log(2**7), np.log(2**12), q=16), dist=hp.choice('W_idist_%i' % ii, ['uniform', 'normal']), scale_heuristic=hp.choice( 'W_ialgo_%i' % ii, [ ('old', hp.lognormal('W_imult_%i' % ii, 0, 1)), ('Glorot',)]), squash='logistic', ) nnet_i_raw = scope.nnet_add_layer(nnet_i_pt, layer) # -- repeatedly calculating lower-layers wastes some CPU, but keeps # memory usage much more stable across jobs (good for cluster) # and the wasted CPU is not so much overall. nnet_i_pt = scope.nnet_pretrain_top_layer_cd( nnet_i_raw, train_task_x, lr=hp.lognormal('cd_lr_%i' % ii, np.log(.01), 2), seed=1 + hp.randint('cd_seed_%i' % ii, 10), n_epochs=hp.qloguniform('cd_epochs_%i' % ii, np.log(1), np.log(cd_epochs_max), q=1), # -- for whatever reason (?), this was fixed at 100 batchsize=100, sample_v0s=hp.choice('sample_v0s_%i' % ii, [False, True]), lr_anneal_start=hp.qloguniform('lr_anneal_%i' % ii, np.log(10), np.log(10000), q=1), time_limit=time_limit, ) nnets.append(nnet_i_pt) # this prior is not what I would do now, but it is what I did then... nnet_features = hp.pchoice( 'depth', [(.5, nnets[0]), (.25, nnets[1]), (.125, nnets[2]), (.125, nnets[3])]) sup_nnet = scope.nnet_add_layer( nnet_features, scope.zero_softmax_layer( n_in=scope.getattr(nnet_features, 'n_out'), n_out=scope.getattr(pyll_stubs.train_task, 'n_classes'))) nnet4, report = scope.nnet_sgd_finetune_classifier( sup_nnet, pyll_stubs.train_task, pyll_stubs.valid_task, fixed_nnet=nnet1, max_epochs=sup_max_epochs, min_epochs=sup_min_epochs, batch_size=hp.choice('batch_size', [20, 100]), lr=hp.lognormal('lr', np.log(.01), 3.), lr_anneal_start=hp.qloguniform( 'lr_anneal_start', np.log(100), np.log(10000), q=1), l2_penalty=hp.choice('l2_penalty', [ 0, hp.lognormal('l2_penalty_nz', np.log(1.0e-6), 2.)]), time_limit=time_limit, ) return nnet4, report
bsd-3-clause
-6,206,191,359,409,669,000
34.051282
78
0.544989
false
3.428213
false
false
false
petertseng/x-common
bin/check-immutability.py
1
1580
#!/usr/bin/env python import json import subprocess import sys oldf = sys.argv[1] newf = sys.argv[2] immutable_keys = ('property', 'input', 'expected') # Use jq to flatten the test data, and parse it old = json.loads(subprocess.run([f"jq -r '[.. | objects | select(.uuid != null)]' {oldf}"], stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8')) new = json.loads(subprocess.run([f"jq -r '[.. | objects | select(.uuid != null)]' {newf}"], stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8')) # Convert new to dict uuid => case new = {case['uuid']: case for case in new} fails = set() deleted = set() # Iterate through old cases as only those could potentially be mutated for case in old: uuid = case['uuid'] # Check if the case has been deleted if uuid not in new: deleted.add(uuid) continue # Check that scenarios are only updated additively if 'scenarios' in case and not set(case['scenarios']).issubset(set(new[uuid]['scenarios'])): fails.add(uuid) continue # Check for changes to immutable keys for key in immutable_keys: if case[key] != new[uuid][key]: fails.add(uuid) break if len(fails) == 0 and len(deleted) == 0: sys.exit(0) if len(fails) > 0: print('The following tests contain illegal mutations:') for failure in fails: print(f" - {failure} ({new[failure]['description']})") if len(deleted) > 0: print('The following tests have been deleted illegally:') for deletion in deleted: print(f" - {deletion}") sys.exit(1)
mit
-3,237,632,367,559,430,000
28.259259
151
0.640506
false
3.442266
false
false
false
ssfrr/advenshare
mouserver/mouserver/server.py
1
6082
import websocket import json import logging import coloredlogs import sys import ssl from getopt import gnu_getopt, GetoptError from mouserver_ext import grab_window, Window import random import string import time class Mouserver: def __init__(self, ws_url, session, window): self.ws_url = ws_url self.session = session self.window = window self.log = logging.getLogger('mouserver') self.ws_log = logging.getLogger('websocket') self.uid = ''.join(random.choice(string.letters) for i in xrange(20)) self.name = 'MouServer' self.log.info("Websocket URL: %s", self.ws_url) self.log.info("Session ID: %s", self.session) window_name = self.window.get_name() w, h = self.window.get_size() self.log.info("Window: %s (%dx%d)", window_name, w, h) self.method_table = {} self.register('mouseMove', self.mouse_move) self.register('mouseDown', self.mouse_down) self.register('mouseUp', self.mouse_up) self.wsapp = websocket.WebSocketApp( ws_url, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close, on_open=self.on_open) def run_forever(self): self.wsapp.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE}) def on_message(self, ws, message): try: msg = json.loads(message) except ValueError: self.log.warning("Received non-JSON data") return if 'type' not in msg: self.log.warning("Received data with no command") return msg_type = msg['type'] method = self.method_table.get(msg_type, None) if method is not None: method(msg) else: self.log.warning("Received unknown msg type: %s", msg_type) def on_error(self, ws, error): self.ws_log.error(error) def on_close(self, ws): self.ws_log.error("Connection closed") raise MouserverConnectionClosedError("Connection closed") def on_open(self, ws): self.ws_log.info("Connection established") self.ws_log.info("Joining session: %s", self.session) ws.send(json.dumps({ 'type': 'announce', 'srcID': self.uid, 'userName': self.name, 'activeMouseOnly': True })) ws.send(json.dumps({ 'type': 'joinSession', 'srcID': self.uid, 'sessionID': self.session })) def register(self, msg_type, method): self.method_table[msg_type] = method def mouse_move(self, msg): x = float(msg['x']) y = float(msg['y']) self.log.debug("mouse_move (%f, %f)", x, y) self.window.mouse_move_ratio(x, y) def mouse_down(self, msg): x = float(msg['x']) y = float(msg['y']) # javascript (and the websockets) use 0, 1, 2 for the mouse buttons, # but libxdo uses 1, 2, 3 button = int(msg['button']) + 1 self.log.debug("mouse_down (%f, %f, %d)", (x, y, button)) self.window.mouse_move_ratio(x, y) self.window.mouse_down(button) def mouse_up(self, msg): x = float(msg['x']) y = float(msg['y']) # javascript (and the websockets) use 0, 1, 2 for the mouse buttons, # but libxdo uses 1, 2, 3 button = int(msg['button']) + 1 self.log.debug("mouse_up (%f, %f, %d)", (x, y, button)) self.window.mouse_move_ratio(x, y) self.window.mouse_up(button) class MouserverConnectionClosedError(Exception): pass def print_usage(): print "usage: %s -u <websocket_url> -s <session_id> [-w <window_id>]" % sys.argv[0] print "" print " --url, -u <websocket_url>" print " specifies the websocket URL to which the program should" print " connect to receive user interaction events (required)" print " --session, -s <session_id>" print " specifies the string that uniquely identifies this session" print " (required)" print " --window, -w <window_id>" print " specifies the X11 window ID of the window with which to interact." print " If this is not specified, you will be prompted to select a window" print " by clicking on it at startup." print "" print " --verbose, -v" print " outputs lots of protocol information" print " --help, -h" print " displays this usage information." def main(): loglevel = logging.INFO url = None session = None window = None short_opts = "hvu:s:w:" long_opts = [ 'help', 'verbose', 'url=', 'session=', 'window=', ] try: opts, args = gnu_getopt(sys.argv[1:], short_opts, long_opts) except GetoptError as err: print str(err) print_usage() sys.exit(2) for o, a in opts: if o in ('-h', '--help'): print_usage() sys.exit(0) elif o in ('-v', '--verbose'): loglevel = logging.DEBUG elif o in ('-u', '--url'): url = a elif o in ('-s', '--session'): session = a elif o in ('-w', '--window'): window = long(a) else: print "Unknown option: %s" % o print_usage() sys.exit(2) if url is None: print "Must specify server URL (-u)" sys.exit(1) if session is None: print "Must specify session ID (-s)" sys.exit(1) if window is None: print "Please select a window by clicking on it." window = grab_window() else: window = Window(window) log = logging.getLogger("main") coloredlogs.install(level=loglevel) while True: server = Mouserver(url, session, window) server.run_forever() time.sleep(5.0) log.warning("Restarting after 5 seconds due to dropped connection") if __name__ == '__main__': main()
mit
6,848,767,843,313,734,000
30.189744
87
0.554916
false
3.663855
false
false
false
dmlc/tvm
python/tvm/topi/vision/rcnn/roi_align.py
1
3987
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name """Roi align operator""" import tvm from tvm import te from ...utils import get_const_tuple from ...cpp.utils import bilinear_sample_nchw def roi_align_nchw(data, rois, pooled_size, spatial_scale, sample_ratio=-1): """ROI align operator in NCHW layout. Parameters ---------- data : tvm.te.Tensor 4-D with shape [batch, channel, height, width] rois : tvm.te.Tensor 2-D with shape [num_roi, 5]. The last dimension should be in format of [batch_index, w_start, h_start, w_end, h_end] pooled_size : int or list/tuple of two ints output size, or [out_height, out_width] spatial_scale : float Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal of total stride in convolutional layers, which should be in range (0.0, 1.0] sample_ratio : int Optional sampling ratio of ROI align, using adaptive size by default. Returns ------- output : tvm.te.Tensor 4-D with shape [num_roi, channel, pooled_size, pooled_size] """ dtype = rois.dtype _, channel, height, width = get_const_tuple(data.shape) num_roi, _ = get_const_tuple(rois.shape) if isinstance(pooled_size, int): pooled_size_h = pooled_size_w = pooled_size else: pooled_size_h, pooled_size_w = pooled_size def _bilinear(i, c, y, x): outside = tvm.tir.any(y < -1.0, x < -1.0, y > height, x > width) y = tvm.te.max(y, 0.0) x = tvm.te.max(x, 0.0) val = bilinear_sample_nchw(data, (i, c, y, x), height - 1, width - 1) return tvm.tir.if_then_else(outside, 0.0, val) def _sample(i, c, ph, pw): roi = rois[i] batch_index = roi[0].astype("int32") roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1], roi[2], roi[3], roi[4] roi_start_h *= spatial_scale roi_end_h *= spatial_scale roi_start_w *= spatial_scale roi_end_w *= spatial_scale # force malformed ROIs to be 1x1 roi_h = tvm.te.max(roi_end_h - roi_start_h, tvm.tir.const(1.0, dtype)) roi_w = tvm.te.max(roi_end_w - roi_start_w, tvm.tir.const(1.0, dtype)) bin_h = roi_h / pooled_size_h bin_w = roi_w / pooled_size_w if sample_ratio > 0: roi_bin_grid_h = roi_bin_grid_w = tvm.tir.const(sample_ratio, "int32") else: roi_bin_grid_h = te.ceil(roi_h / pooled_size_h).astype("int32") roi_bin_grid_w = te.ceil(roi_w / pooled_size_w).astype("int32") count = roi_bin_grid_h * roi_bin_grid_w rh = te.reduce_axis((0, roi_bin_grid_h)) rw = te.reduce_axis((0, roi_bin_grid_w)) roi_start_h += ph * bin_h roi_start_w += pw * bin_w return te.sum( _bilinear( batch_index, c, roi_start_h + (rh + 0.5) * bin_h / roi_bin_grid_h, roi_start_w + (rw + 0.5) * bin_w / roi_bin_grid_w, ) / count, axis=[rh, rw], ) return te.compute( (num_roi, channel, pooled_size_h, pooled_size_w), _sample, tag="pool,roi_align_nchw" )
apache-2.0
9,211,143,365,963,043,000
35.916667
98
0.605217
false
3.146803
false
false
false
tasleson/lvm-dubstep
lvmdbus/cmdhandler.py
1
14945
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from subprocess import Popen, PIPE import time import threading from itertools import chain try: from . import cfg from .utils import pv_dest_ranges, log_debug, log_error from .lvm_shell_proxy import LVMShellProxy except SystemError: import cfg from utils import pv_dest_ranges, log_debug, log_error from lvm_shell_proxy import LVMShellProxy SEP = '{|}' total_time = 0.0 total_count = 0 # We need to prevent different threads from using the same lvm shell # at the same time. cmd_lock = threading.Lock() # The actual method which gets called to invoke the lvm command, can vary # from forking a new process to using lvm shell _t_call = None def _debug_c(cmd, exit_code, out): log_error('CMD= %s' % ' '.join(cmd)) log_error(("EC= %d" % exit_code)) log_error(("STDOUT=\n %s\n" % out[0])) log_error(("STDERR=\n %s\n" % out[1])) def call_lvm(command, debug=False): """ Call an executable and return a tuple of exitcode, stdout, stderr :param command: Command to execute :param debug: Dump debug to stdout """ # print 'STACK:' # for line in traceback.format_stack(): # print line.strip() # Prepend the full lvm executable so that we can run different versions # in different locations on the same box command.insert(0, cfg.LVM_CMD) process = Popen(command, stdout=PIPE, stderr=PIPE, close_fds=True) out = process.communicate() stdout_text = bytes(out[0]).decode("utf-8") stderr_text = bytes(out[1]).decode("utf-8") if debug or process.returncode != 0: _debug_c(command, process.returncode, (stdout_text, stderr_text)) if process.returncode == 0: if cfg.DEBUG and out[1] and len(out[1]): log_error('WARNING: lvm is out-putting text to STDERR on success!') _debug_c(command, process.returncode, (stdout_text, stderr_text)) return process.returncode, stdout_text, stderr_text def _shell_cfg(): global _t_call log_debug('Using lvm shell!') lvm_shell = LVMShellProxy() _t_call = lvm_shell.call_lvm if cfg.USE_SHELL: _shell_cfg() else: _t_call = call_lvm def set_execution(shell): global _t_call with cmd_lock: _t_call = None if shell: log_debug('Using lvm shell!') lvm_shell = LVMShellProxy() _t_call = lvm_shell.call_lvm else: _t_call = call_lvm def time_wrapper(command, debug=False): global total_time global total_count with cmd_lock: start = time.time() results = _t_call(command, debug) total_time += (time.time() - start) total_count += 1 return results call = time_wrapper # Default cmd # Place default arguments for every command here. def _dc(cmd, args): c = [cmd, '--noheading', '--separator', '%s' % SEP, '--nosuffix', '--unbuffered', '--units', 'b'] c.extend(args) return c def parse(out): rc = [] for line in out.split('\n'): # This line includes separators, so process them if SEP in line: elem = line.split(SEP) cleaned_elem = [] for e in elem: e = e.strip() cleaned_elem.append(e) if len(cleaned_elem) > 1: rc.append(cleaned_elem) else: t = line.strip() if len(t) > 0: rc.append(t) return rc def parse_column_names(out, column_names): lines = parse(out) rc = [] for i in range(0, len(lines)): d = dict(list(zip(column_names, lines[i]))) rc.append(d) return rc def options_to_cli_args(options): rc = [] for k, v in list(dict(options).items()): if k.startswith("-"): rc.append(k) else: rc.append("--%s" % k) if v != "": rc.append(str(v)) return rc def pv_remove(device, remove_options): cmd = ['pvremove'] cmd.extend(options_to_cli_args(remove_options)) cmd.append(device) return call(cmd) def _tag(operation, what, add, rm, tag_options): cmd = [operation] cmd.extend(options_to_cli_args(tag_options)) if isinstance(what, list): cmd.extend(what) else: cmd.append(what) if add: cmd.extend(list(chain.from_iterable(('--addtag', x) for x in add))) if rm: cmd.extend(list(chain.from_iterable(('--deltag', x) for x in rm))) return call(cmd, False) def pv_tag(pv_devices, add, rm, tag_options): return _tag('pvchange', pv_devices, add, rm, tag_options) def vg_tag(vg_name, add, rm, tag_options): return _tag('vgchange', vg_name, add, rm, tag_options) def lv_tag(lv_name, add, rm, tag_options): return _tag('lvchange', lv_name, add, rm, tag_options) def vg_rename(vg, new_name, rename_options): cmd = ['vgrename'] cmd.extend(options_to_cli_args(rename_options)) cmd.extend([vg, new_name]) return call(cmd) def vg_remove(vg_name, remove_options): cmd = ['vgremove'] cmd.extend(options_to_cli_args(remove_options)) cmd.extend(['-f', vg_name]) return call(cmd) def vg_lv_create(vg_name, create_options, name, size_bytes, pv_dests): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--size', str(size_bytes) + 'B']) cmd.extend(['--name', name, vg_name]) pv_dest_ranges(cmd, pv_dests) return call(cmd) def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(snapshot_options)) cmd.extend(["-s"]) if size_bytes != 0: cmd.extend(['--size', str(size_bytes) + 'B']) cmd.extend(['--name', name, vg_name]) return call(cmd) def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) if not thin_pool: cmd.extend(['--size', str(size_bytes) + 'B']) else: cmd.extend(['--thin', '--size', str(size_bytes) + 'B']) cmd.extend(['--name', name, vg_name]) return call(cmd) def vg_lv_create_striped(vg_name, create_options, name, size_bytes, num_stripes, stripe_size_kb, thin_pool): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) if not thin_pool: cmd.extend(['--size', str(size_bytes) + 'B']) else: cmd.extend(['--thin', '--size', str(size_bytes) + 'B']) cmd.extend(['--stripes', str(num_stripes)]) if stripe_size_kb != 0: cmd.extend(['--stripesize', str(stripe_size_kb)]) cmd.extend(['--name', name, vg_name]) return call(cmd) def _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes, num_stripes, stripe_size_kb): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--type', raid_type]) cmd.extend(['--size', str(size_bytes) + 'B']) if num_stripes != 0: cmd.extend(['--stripes', str(num_stripes)]) if stripe_size_kb != 0: cmd.extend(['--stripesize', str(stripe_size_kb)]) cmd.extend(['--name', name, vg_name]) return call(cmd) def vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes, num_stripes, stripe_size_kb): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) return _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes, num_stripes, stripe_size_kb) def vg_lv_create_mirror(vg_name, create_options, name, size_bytes, num_copies): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--type', 'mirror']) cmd.extend(['--mirrors', str(num_copies)]) cmd.extend(['--size', str(size_bytes) + 'B']) cmd.extend(['--name', name, vg_name]) return call(cmd) def vg_create_cache_pool(md_full_name, data_full_name, create_options): cmd = ['lvconvert'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--type', 'cache-pool', '--force', '-y', '--poolmetadata', md_full_name, data_full_name]) return call(cmd) def vg_create_thin_pool(md_full_name, data_full_name, create_options): cmd = ['lvconvert'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--type', 'thin-pool', '--force', '-y', '--poolmetadata', md_full_name, data_full_name]) return call(cmd) def lv_remove(lv_path, remove_options): cmd = ['lvremove'] cmd.extend(options_to_cli_args(remove_options)) cmd.extend(['-f', lv_path]) return call(cmd) def lv_rename(lv_path, new_name, rename_options): cmd = ['lvrename'] cmd.extend(options_to_cli_args(rename_options)) cmd.extend([lv_path, new_name]) return call(cmd) def lv_resize(lv_full_name, size_change, pv_dests, resize_options): cmd = ['lvresize', '--force'] cmd.extend(options_to_cli_args(resize_options)) if size_change < 0: cmd.append("-L-%dB" % (-size_change)) else: cmd.append("-L+%dB" % (size_change)) cmd.append(lv_full_name) pv_dest_ranges(cmd, pv_dests) return call(cmd) def lv_lv_create(lv_full_name, create_options, name, size_bytes): cmd = ['lvcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(['--virtualsize', str(size_bytes) + 'B', '-T']) cmd.extend(['--name', name, lv_full_name]) return call(cmd) def lv_cache_lv(cache_pool_full_name, lv_full_name, cache_options): # lvconvert --type cache --cachepool VG/CachePoolLV VG/OriginLV cmd = ['lvconvert'] cmd.extend(options_to_cli_args(cache_options)) cmd.extend(['--type', 'cache', '--cachepool', cache_pool_full_name, lv_full_name]) return call(cmd) def lv_detach_cache(lv_full_name, detach_options, destroy_cache): cmd = ['lvconvert'] if destroy_cache: option = '--uncache' else: # Currently fairly dangerous # see: https://bugzilla.redhat.com/show_bug.cgi?id=1248972 option = '--splitcache' cmd.extend(options_to_cli_args(detach_options)) # needed to prevent interactive questions cmd.extend(["--yes", "--force"]) cmd.extend([option, lv_full_name]) return call(cmd) def pv_retrieve_with_segs(device=None): d = [] columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free', 'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free', 'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count', 'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name', 'vg_uuid', 'pv_seg_start', 'pvseg_size', 'segtype'] # Lvm has some issues where it returns failure when querying pvs when other # operations are in process, see: # https://bugzilla.redhat.com/show_bug.cgi?id=1274085 while True: cmd = _dc('pvs', ['-o', ','.join(columns)]) if device: cmd.extend(device) rc, out, err = call(cmd) if rc == 0: d = parse_column_names(out, columns) break else: time.sleep(0.2) log_debug("LVM Bug workaround, retrying pvs command...") return d def pv_resize(device, size_bytes, create_options): cmd = ['pvresize'] cmd.extend(options_to_cli_args(create_options)) if size_bytes != 0: cmd.extend(['--setphysicalvolumesize', str(size_bytes) + 'B']) cmd.extend([device]) return call(cmd) def pv_create(create_options, devices): cmd = ['pvcreate', '-ff'] cmd.extend(options_to_cli_args(create_options)) cmd.extend(devices) return call(cmd) def pv_allocatable(device, yes, allocation_options): yn = 'n' if yes: yn = 'y' cmd = ['pvchange'] cmd.extend(options_to_cli_args(allocation_options)) cmd.extend(['-x', yn, device]) return call(cmd) def pv_scan(activate, cache, device_paths, major_minors, scan_options): cmd = ['pvscan'] cmd.extend(options_to_cli_args(scan_options)) if activate: cmd.extend(['--activate', "ay"]) if cache: cmd.append('--cache') if len(device_paths) > 0: for d in device_paths: cmd.append(d) if len(major_minors) > 0: for mm in major_minors: cmd.append("%s:%s" % (mm)) return call(cmd) def vg_create(create_options, pv_devices, name): cmd = ['vgcreate'] cmd.extend(options_to_cli_args(create_options)) cmd.append(name) cmd.extend(pv_devices) return call(cmd) def vg_change(change_options, name): cmd = ['vgchange'] cmd.extend(options_to_cli_args(change_options)) cmd.append(name) return call(cmd) def vg_reduce(vg_name, missing, pv_devices, reduce_options): cmd = ['vgreduce'] cmd.extend(options_to_cli_args(reduce_options)) if len(pv_devices) == 0: cmd.append('--all') if missing: cmd.append('--removemissing') cmd.append(vg_name) cmd.extend(pv_devices) return call(cmd) def vg_extend(vg_name, extend_devices, extend_options): cmd = ['vgextend'] cmd.extend(options_to_cli_args(extend_options)) cmd.append(vg_name) cmd.extend(extend_devices) return call(cmd) def _vg_value_set(name, arguments, options): cmd = ['vgchange'] cmd.extend(options_to_cli_args(options)) cmd.append(name) cmd.extend(arguments) return call(cmd) def vg_allocation_policy(vg_name, policy, policy_options): return _vg_value_set(vg_name, ['--alloc', policy], policy_options) def vg_max_pv(vg_name, number, max_options): return _vg_value_set(vg_name, ['--maxphysicalvolumes', str(number)], max_options) def vg_max_lv(vg_name, number, max_options): return _vg_value_set(vg_name, ['-l', str(number)], max_options) def vg_uuid_gen(vg_name, ignore, options): assert ignore is None return _vg_value_set(vg_name, ['--uuid'], options) def activate_deactivate(op, name, activate, control_flags, options): cmd = [op] cmd.extend(options_to_cli_args(options)) op = '-a' if control_flags: # Autoactivation if (1 << 0) & control_flags: op += 'a' # Exclusive locking (Cluster) if (1 << 1) & control_flags: op += 'e' # Local node activation if (1 << 2) & control_flags: op += 'l' # Activation modes if (1 << 3) & control_flags: cmd.extend(['--activationmode', 'complete']) elif (1 << 4) & control_flags: cmd.extend(['--activationmode', 'partial']) # Ignore activation skip if (1 << 5) & control_flags: cmd.append('--ignoreactivationskip') if activate: op += 'y' else: op += 'n' cmd.append(op) cmd.append(name) return call(cmd) def vg_retrieve(vg_specific): if vg_specific: assert isinstance(vg_specific, list) columns = ['vg_name', 'vg_uuid', 'vg_fmt', 'vg_size', 'vg_free', 'vg_sysid', 'vg_extent_size', 'vg_extent_count', 'vg_free_count', 'vg_profile', 'max_lv', 'max_pv', 'pv_count', 'lv_count', 'snap_count', 'vg_seqno', 'vg_mda_count', 'vg_mda_free', 'vg_mda_size', 'vg_mda_used_count', 'vg_attr', 'vg_tags'] cmd = _dc('vgs', ['-o', ','.join(columns)]) if vg_specific: cmd.extend(vg_specific) d = [] rc, out, err = call(cmd) if rc == 0: d = parse_column_names(out, columns) return d def lv_retrieve_with_segments(): columns = ['lv_uuid', 'lv_name', 'lv_path', 'lv_size', 'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid', 'origin', 'data_percent', 'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv', 'metadata_lv', 'seg_pe_ranges', 'segtype', 'lv_parent', 'lv_role', 'lv_layout'] cmd = _dc('lvs', ['-a', '-o', ','.join(columns)]) rc, out, err = call(cmd) d = [] if rc == 0: d = parse_column_names(out, columns) return d if __name__ == '__main__': pv_data = pv_retrieve_with_segs() for p in pv_data: log_debug(str(p))
gpl-2.0
-7,792,544,381,953,663,000
23.14378
79
0.658147
false
2.718756
false
false
false
gmimano/commcaretest
corehq/apps/reports/filters/fixtures.py
1
5198
import json from django.core.urlresolvers import reverse from django.utils.translation import ugettext_noop from corehq.apps.fixtures.models import FixtureDataType, FixtureDataItem from corehq.apps.locations.util import load_locs_json, location_hierarchy_config from corehq.apps.reports.filters.base import BaseReportFilter class AsyncDrillableFilter(BaseReportFilter): # todo: add documentation # todo: cleanup template """ example_hierarchy = [{"type": "state", "display": "name"}, {"type": "district", "parent_ref": "state_id", "references": "id", "display": "name"}, {"type": "block", "parent_ref": "district_id", "references": "id", "display": "name"}, {"type": "village", "parent_ref": "block_id", "references": "id", "display": "name"}] """ template = "reports/filters/drillable_async.html" hierarchy = [] # a list of fixture data type names that representing different levels of the hierarchy. Starting with the root def fdi_to_json(self, fdi): return { 'fixture_type': fdi.data_type_id, 'fields': fdi.fields, 'id': fdi.get_id, 'children': getattr(fdi, '_children', None), } fdts = {} def data_types(self, index=None): if not self.fdts: self.fdts = [FixtureDataType.by_domain_tag(self.domain, h["type"]).one() for h in self.hierarchy] return self.fdts if index is None else self.fdts[index] @property def api_root(self): return reverse('api_dispatch_list', kwargs={'domain': self.domain, 'resource_name': 'fixture', 'api_name': 'v0.1'}) @property def full_hierarchy(self): ret = [] for i, h in enumerate(self.hierarchy): new_h = dict(h) new_h['id'] = self.data_types(i).get_id ret.append(new_h) return ret def generate_lineage(self, leaf_type, leaf_item_id): leaf_fdi = FixtureDataItem.get(leaf_item_id) index = None for i, h in enumerate(self.hierarchy[::-1]): if h["type"] == leaf_type: index = i if index is None: raise Exception( "Could not generate lineage for AsyncDrillableField due to a nonexistent leaf_type (%s)" % leaf_type) lineage = [leaf_fdi] for i, h in enumerate(self.full_hierarchy[::-1]): if i < index or i >= len(self.hierarchy)-1: continue real_index = len(self.hierarchy) - (i+1) lineage.insert(0, FixtureDataItem.by_field_value(self.domain, self.data_types(real_index - 1), h["references"], lineage[0].fields[h["parent_ref"]]).one()) return lineage @property def filter_context(self): root_fdis = [self.fdi_to_json(f) for f in FixtureDataItem.by_data_type(self.domain, self.data_types(0).get_id)] f_id = self.request.GET.get('fixture_id', None) selected_fdi_type = f_id.split(':')[0] if f_id else None selected_fdi_id = f_id.split(':')[1] if f_id else None if selected_fdi_id: index = 0 lineage = self.generate_lineage(selected_fdi_type, selected_fdi_id) parent = {'children': root_fdis} for i, fdi in enumerate(lineage[:-1]): this_fdi = [f for f in parent['children'] if f['id'] == fdi.get_id][0] next_h = self.hierarchy[i+1] this_fdi['children'] = [self.fdi_to_json(f) for f in FixtureDataItem.by_field_value(self.domain, self.data_types(i+1), next_h["parent_ref"], fdi.fields[next_h["references"]])] parent = this_fdi return { 'api_root': self.api_root, 'control_name': self.label, 'control_slug': self.slug, 'selected_fdi_id': selected_fdi_id, 'fdis': json.dumps(root_fdis), 'hierarchy': self.full_hierarchy } class AsyncLocationFilter(BaseReportFilter): # todo: cleanup template label = ugettext_noop("Location") slug = "location_async" template = "reports/filters/location_async.html" @property def filter_context(self): api_root = reverse('api_dispatch_list', kwargs={'domain': self.domain, 'resource_name': 'location', 'api_name': 'v0.3'}) selected_loc_id = self.request.GET.get('location_id') return { 'api_root': api_root, 'control_name': self.label, # todo: cleanup, don't follow this structure 'control_slug': self.slug, # todo: cleanup, don't follow this structure 'loc_id': selected_loc_id, 'locations': json.dumps(load_locs_json(self.domain, selected_loc_id)), 'hierarchy': location_hierarchy_config(self.domain), } class MultiLocationFilter(AsyncDrillableFilter): template = "reports/filters/multi_location.html"
bsd-3-clause
-391,699,841,776,118,460
40.919355
130
0.563294
false
3.758496
false
false
false
mathieudesro/pathos
pathos/pp_map.py
1
7526
#!/usr/bin/env python # Based on code by Kirk Strauser <kirk@strauser.com> # Rev: 1139; Date: 2008-04-16 # (also see code in pathos.pp) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # * Neither the name of Kirk Strauser nor the names of other # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Forked by: Mike McKerns (April 2008) # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2008-2015 California Institute of Technology. # License: 3-clause BSD. The full license text is available at: # - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/pathos/LICENSE """ Very basic parallel processing support Implements a work-alike of the builtin map() function that distributes work across many processes. As it uses Parallel Python to do the actual multi-processing, code using this must conform to the usual PP restrictions (arguments must be serializable, etc.) """ from pathos.pp import __STATE, stats, __print_stats as print_stats #from pathos.pp import ParallelPythonPool as Pool from pathos.helpers.pp_helper import Server as ppServer def ppmap(processes, function, sequence, *sequences): """Split the work of 'function' across the given number of processes. Set 'processes' to None to let Parallel Python autodetect the number of children to use. Although the calling semantics should be identical to __builtin__.map (even using __builtin__.map to process arguments), it differs in that it returns a generator instead of a list. This enables lazy evaluation of the results so that other work can be done while the subprocesses are still running. >>> def rangetotal(n): return n, sum(range(n)) >>> list(map(rangetotal, range(1, 6))) [(1, 0), (2, 1), (3, 3), (4, 6), (5, 10)] >>> list(ppmap(1, rangetotal, range(1, 6))) [(1, 0), (2, 1), (3, 3), (4, 6), (5, 10)] """ ppservers = ("*",) # autodetect #from _ppserver_config import ppservers # read from a config file # Create a new server if one isn't already initialized if not __STATE['server']: __STATE['server'] = ppServer(ppservers=ppservers) #class dill_wrapper(object): # """handle non-picklable functions by wrapping with dill""" # def __init__(self, function): # from dill import dumps # self.pickled_function = dumps(function) # def __call__(self, *args): # from dill import loads #XXX: server now requires dill # f = loads(self.pickled_function) # return f(*args) # def dill_wrapper(function): # """handle non-picklable functions by wrapping with dill""" # from dill import dumps # pickled_function = dumps(function) # def unwrap(*args): # from dill import loads #XXX: server now requires dill # f = loads(pickled_function) # return f(*args) # return unwrap def submit(*args): #XXX: needs **kwds to allow "depfuncs, modules, ...? """Send a job to the server""" #print globals()['ncalls'] #FIXME: ncalls not in globals() #XXX: options for submit... #XXX: func -- function to be executed #XXX: depfuncs -- functions called from 'func' #XXX: modules -- modules to import #XXX: callback -- callback function to be called after 'func' completes #XXX: callbackargs -- additional args for callback(result, *args) #XXX: group -- allows naming of 'job group' to use in wait(group) #XXX: globals -- dictionary from which everything imports # from mystic.tools import wrap_function, wrap_bounds # return __STATE['server'].submit(function, args, \ # depfuncs=(wrap_function,wrap_bounds), \ ## modules=("mystic","numpy"), \ # globals=globals()) # p_function = dill_wrapper(function) # return __STATE['server'].submit(p_function, args, globals=globals()) #print __STATE['server'].get_ncpus(), "local workers" #XXX: debug return __STATE['server'].submit(function, args, globals=globals()) # Merge all the passed-in argument lists together. This is done # that way because as with the map() function, at least one list # is required but the rest are optional. a = [sequence] a.extend(sequences) # Set the requested level of multi-processing #__STATE['server'].set_ncpus(processes or 'autodetect') # never processes=0 if processes == None: __STATE['server'].set_ncpus('autodetect') else: __STATE['server'].set_ncpus(processes) # allow processes=0 #print "running with", __STATE['server'].get_ncpus(), "local workers" #XXX: debug # First, submit all the jobs. Then harvest the results as they # come available. return (subproc() for subproc in map(submit, *a)) def pp_map(function, sequence, *args, **kwds): '''extend python's parallel map function to parallel python Inputs: function -- target function sequence -- sequence to process in parallel Additional Inputs: ncpus -- number of 'local' processors to use [defaut = 'autodetect'] servers -- available distributed parallel python servers [default = ()] ''' procs = None servers = () if kwds.has_key('ncpus'): procs = kwds['ncpus'] kwds.pop('ncpus') if kwds.has_key('servers'): servers = kwds['servers'] kwds.pop('servers') # remove all the junk kwds that are added due to poor design! if kwds.has_key('nnodes'): kwds.pop('nnodes') if kwds.has_key('nodes'): kwds.pop('nodes') if kwds.has_key('launcher'): kwds.pop('launcher') if kwds.has_key('mapper'): kwds.pop('mapper') if kwds.has_key('queue'): kwds.pop('queue') if kwds.has_key('timelimit'): kwds.pop('timelimit') if kwds.has_key('scheduler'): kwds.pop('scheduler') # return Pool(procs, servers=servers).map(function, sequence, *args, **kwds) if not __STATE['server']: __STATE['server'] = job_server = ppServer(ppservers=servers) return list(ppmap(procs,function,sequence,*args)) if __name__ == '__main__': # code moved to "pathos/examples/pp_map.py pass # EOF
bsd-3-clause
8,449,697,180,205,107,000
40.811111
84
0.671273
false
3.80101
false
false
false
HSASec/ProFuzz
Connector_tcp.py
1
1675
import socket import sys import getopt import DataGenerator import time import thread tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) usage = sys.argv[0] +' -h <host> -p <port> [-l <length>] [-c <command>]' def main(argv): try: opts, args = getopt.getopt(sys.argv[1:],"h:p:l:") except getopt.GetoptError, err: print usage sys.exit(1) #check and set arguments for opt, arg in opts: if opt == '-h': host = arg elif opt == "-p": port = arg elif opt == "-b": length = arg elif opt == "-c": command = arg #check if values exist try: host except NameError: print 'a host is necessary!' print usage sys.exit(0) try: port except NameError: print 'a port is necessary' print usage sys.exit(0) #if there are no length given use random (length=0) try: length except NameError: length = 0 print 'using random length' try: tcp.connect((host, int(port))) print "Connected" except socket.error: print "Couldn't connect to Server:" + host + ":" + port sys.exit(2) while(True): try: random = DataGenerator.randString(int(length)) dataSent = tcp.send(random) print "sent" time.sleep(5) except socket.error: print "Connection lost..." break if __name__ == "__main__": main(sys.argv[1:])
gpl-3.0
-1,375,576,282,421,108,200
22.661765
72
0.492537
false
4.115479
false
false
false
ergoregion/Rota-Program
Rota_System/Saving/Excell/Population.py
1
5654
__author__ = 'Neil Butcher' from Error import ExcellImportExportError import xlwt, xlrd from Rota_System.Roles import GlobalRoleList, role from Rota_System.Worker import Worker from Rota_System.StandardTimes import date_string, get_date def name(an_object): return an_object.name class PopulationSavingObject(object): def __init__(self, population, filename): if filename: self._filename = filename else: raise ExcellImportExportError('No filename set') self._population = population self._book = None def create(self): self._book = xlwt.Workbook(encoding="utf-8") self._population_sheet = self._book.add_sheet("Population") self._population_sheet.write(0, 0, 'Name') self._population_sheet.write(0, 1, 'phone') self._population_sheet.write(0, 2, 'email') self._population_sheet.write(0, 3, 'address') self._qualifications_sheet = self._book.add_sheet("Qualifications") self._qualifications_sheet.write(0, 0, 'Name') j = 1 for r in GlobalRoleList.roles: self._qualifications_sheet.write(0, j, r.description) j += 1 self._dates_sheet = self._book.add_sheet("Unavailable Dates") self._dates_sheet.write(0, 0, 'Name') self._save() def _add_individual(self, person, row): self._population_sheet.write(row, 0, person.name) self._population_sheet.write(row, 1, person.phone_number) self._population_sheet.write(row, 2, person.email) self._population_sheet.write(row, 3, person.address) self._qualifications_sheet.write(row, 0, person.name) self._dates_sheet.write(row, 0, person.name) j = 1 for r in GlobalRoleList.roles: if person.suitable_for_role(r): self._qualifications_sheet.write(row, j, "Y") j += 1 j = 1 for d in person.blacklisted_dates(): self._dates_sheet.write(row, j, date_string(d)) j += 1 def populate(self): for j, person in enumerate(self._population): self._add_individual(person, j + 1) self._save() def _save(self): self._book.save(self._filename) def load(self): self._book = xlrd.open_workbook(self._filename) self._get_sheets() self._get_roles() self._get_people() return self._population def _get_sheets(self): names = self._book.sheet_names() if "Population" not in names: raise ExcellImportExportError('There is no population sheet in the file') else: self._population_sheet = self._book.sheet_by_name("Population") if "Qualifications" not in names: raise ExcellImportExportError('There is no qualification sheet in the file') else: self._qualifications_sheet = self._book.sheet_by_name("Qualifications") if "Unavailable Dates" in names: self._dates_sheet = self._book.sheet_by_name("Unavailable Dates") else: self._dates_sheet = None def _get_roles(self): self._sheet_role_list = [] for i, cell in enumerate(self._qualifications_sheet.row(0)): if cell.ctype is 0: break try: r = role(cell.value) except: raise ExcellImportExportError('There was an unidentified role: ' + cell.value) if r is None: if i > 0: raise ExcellImportExportError('There was an unidentified role: ' + cell.value) else: self._sheet_role_list.append(r) for r in GlobalRoleList.roles: if r not in self._sheet_role_list: raise ExcellImportExportError('There was an role unlisted on the sheet: ' + r.description) def _get_people(self): self._population = [] for i in range(1, self._population_sheet.nrows): if self._population_sheet.cell_type(i, 0) is 0: break else: p = Worker() p.name = self._population_sheet.cell_value(i, 0) p.phone_number = self._population_sheet.cell_value(i, 1) p.email = self._population_sheet.cell_value(i, 2) p.address = self._population_sheet.cell_value(i, 3) self._get_qualifications(i, p) self._get_dates(i, p) if p.name in map(name, self._population): raise ExcellImportExportError('There were people with the same name : ' + p.name) else: self._population.append(p) def _get_qualifications(self, row, person): cells = self._qualifications_sheet.row(row) if cells[0].value != person.name: raise ExcellImportExportError('There was a mismatch between people and qalifications on row: ' + str(row)) person.does_nothing() for i, r in enumerate(self._sheet_role_list): if cells[i + 1].ctype is not 0: person.add_role(r.code) def _get_dates(self, row, person): if self._dates_sheet is None: return cells = self._dates_sheet.row(row) if cells[0].value != person.name: raise ExcellImportExportError('There was a mismatch between people and qualifications on row: ' + str(row)) person.clear_blacklist() for i in range(1, len(cells)): if cells[i].ctype is not 0: person.blacklist_date(get_date(cells[i].value))
mit
-4,878,196,566,270,454,000
33.901235
119
0.581889
false
3.843644
false
false
false
kirchenreich/osm-api-cache
osmcache/models.py
1
2003
from sqlalchemy import ( BigInteger, Binary, Column, DateTime, Integer, Numeric, String, ) from sqlalchemy.dialects.postgresql import JSON, TIMESTAMP from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.sql import func Base = declarative_base() class OsmBase(Base): __abstract__ = True created_on = Column(TIMESTAMP, default=func.now()) updated_on = Column(TIMESTAMP, default=func.now(), onupdate=func.now()) class Node(OsmBase): __tablename__ = 'node' id = Column(BigInteger, primary_key=True, autoincrement=False) tags = Column(JSON, nullable=True) meta = Column(JSON, nullable=True) changeset = Column(BigInteger, nullable=True) lat = Column(Numeric(11, 8), nullable=True) lon = Column(Numeric(11, 8), nullable=True) class Way(OsmBase): __tablename__ = 'way' id = Column(BigInteger, primary_key=True, autoincrement=False) tags = Column(JSON, nullable=True) meta = Column(JSON, nullable=True) changeset = Column(BigInteger, nullable=True) class WayMember(OsmBase): __tablename__ = 'waymember' id = Column(BigInteger, primary_key=True, autoincrement=False) way_id = Column(BigInteger, index=True) node_id = Column(BigInteger, index=True) order = Column(Integer, nullable=True, index=True) class Relation(OsmBase): __tablename__ = 'relation' id = Column(BigInteger, primary_key=True, autoincrement=False) tags = Column(JSON, nullable=True) meta = Column(JSON, nullable=True) changeset = Column(BigInteger, nullable=True) class RelationMember(OsmBase): __tablename__ = 'relationmember' id = Column(BigInteger, primary_key=True, autoincrement=False) relation_id = Column(BigInteger, index=True) # allowed values: n, w, r element_type = Column(String(1), index=True) element_id = Column(BigInteger, index=True) order = Column(Integer, nullable=True, index=True) role = Column(String, nullable=True)
mit
-8,110,983,343,575,887,000
24.0375
75
0.688967
false
3.609009
false
false
false
coassets/initial-d
sample_project/urls.py
1
1537
from django.conf.urls.defaults import * from django.conf import settings from crm.xmlrpc import rpc_handler # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # crm and contactinfo URLs (required) (r'^crm/', include('crm.urls')), (r'^contactinfo/', include('contactinfo.urls')), (r'^ajax/', include('ajax_select.urls')), url(r'^xml-rpc/', rpc_handler, name='xml_rpc'), # Uncomment the admin/doc line below and add 'django.contrib.admindocs' # to INSTALLED_APPS to enable admin documentation: (r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: #url(r'^admin/(.*)', admin.site.root), url(r'^admin/', include(admin.site.urls)), # use the contrib.auth login/logout views for authentication (optional) url( r'^accounts/login/$', 'django.contrib.auth.views.login', name='auth_login', ), url( r'^accounts/logout/$', 'django.contrib.auth.views.logout', name='auth_logout', ), # redirect '/' to the CRM dashboard (optional) url( '^$', 'django.views.generic.simple.redirect_to', {'url': '/crm/dashboard/'}, ), ) if settings.DEBUG: urlpatterns += patterns('', ( r'^%s(?P<path>.*)' % settings.MEDIA_URL.lstrip('/'), 'django.views.static.serve', { 'document_root' : settings.MEDIA_ROOT, 'show_indexes': True } ), )
gpl-2.0
-1,501,874,727,447,595,000
30.367347
76
0.615485
false
3.677033
false
false
false
ajrbyers/mondroid
src/monitor/management/commands/install_droids.py
1
1680
from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User from django.core.mail import send_mail from django.conf import settings from monitor import models from crontab import CronTab import os import sys try: action = sys.argv[1:][1] except: action = '' def find_job(tab, comment): for job in tab: if job.comment == comment: return job return None class Command(BaseCommand): help = 'Installs cron tasks for the monitor.' def handle(self, *args, **options): monitor_list = models.Monitor.objects.all() virtualenv = os.environ.get('VIRTUAL_ENV', None) tab = CronTab() for monitor in monitor_list: current_job = find_job(tab, "fetcher_droid_%s" % monitor.slug) if current_job == None: django_command = "&& python %s/manage.py fetcher_droid %s >> /var/log/mondroid/%s.fetcher.log" % (settings.BASE_DIR, monitor.slug, monitor.slug) if virtualenv: command = 'export PATH=%s/bin:/usr/local/bin:/usr/bin:/bin %s' % (virtualenv, django_command) else: command = '%s' % (django_command) cron_job = tab.new(command, comment="fetcher_droid_%s" % monitor.slug) cron_job.minute.every(5) # Install the parser droid command if it doesn't exist already current_job = find_job(tab, "parser_droid") if current_job == None: if virtualenv: command = 'export PATH=%s/bin:/usr/local/bin:/usr/bin:/bin && python %s/manage.py parser_droid' % (virtualenv, settings.BASE_DIR) cron_job = tab.new(command, comment="parser_droid") cron_job.minute.every(5) if action == 'test': print tab.render() elif action == 'quiet': pass else: tab.write()
gpl-2.0
6,182,176,925,109,408,000
26.557377
148
0.685119
false
3.071298
false
false
false
probablytom/msci-model
resp_base/ResponsibleWorkflows.py
1
2256
from theatre_ag.theatre_ag.workflow import treat_as_workflow from .Constraints import Deadline from random import random, choice class CourseworkWorkflow: def __init__(self): self.agent = None self.competence = {'essays_written': 0.95, 'working_programs': 0.95} def assign_agent(self, agent): self.agent = agent # Lecturers can write essays # (We're calling papers essays, so as to simplify the model's ontology.) # RETURNS: tuple (a,b): # a: success bool # b: set of constraints with pass/failure def write_essay(self, agent): written_successfully = (random() < self.competence['essays_written']) if 'essays_written' not in agent.socio_states.keys(): agent.socio_states['essays_written'] = 0 for i in range(len(agent.current_responsibility.constraints)): agent.current_responsibility.constraints[i].record_outcome(True) if written_successfully: agent.socio_states['essays_written'] += 1 else: choice([c for c in agent.current_responsibility.constraints if type(c) is not Deadline]).record_outcome(False) return (written_successfully, agent.current_responsibility.constraints) def write_program(self, agent): written_successfully = (random() < self.competence['working_programs']) if 'working_programs' not in agent.socio_states.keys(): agent.socio_states['working_programs'] = 0 for i in range(len(agent.current_responsibility.constraints)): agent.current_responsibility.constraints[i].record_outcome(True) if written_successfully: agent.socio_states['working_programs'] += 1 else: choice([c for c in agent.current_responsibility.constraints if type(c) is not Deadline]).record_outcome(False) return (written_successfully, agent.current_responsibility.constraints) class IncompetentCourseworkWorkflow(CourseworkWorkflow): def __init__(self): super().__init__() self.competence = {'essays_written': 0.2, 'working_programs': 0.2} class DummyWorkflow: is_workflow = True treat_as_workflow(CourseworkWorkflow)
mit
5,694,300,392,099,812,000
39.285714
122
0.655142
false
3.876289
false
false
false
frisk028/flask-app-umn-courses
courses/public/forms.py
1
5377
# -*- coding: utf-8 -*- from flask_wtf import Form from wtforms import TextField, SelectField, SelectMultipleField, widgets from wtforms.validators import DataRequired import json import os class MultiCheckboxField(SelectMultipleField): widget = widgets.ListWidget(prefix_label=False) option_widget = widgets.CheckboxInput() class SearchForm(Form): CAMPUS_CHOICES = [('umntc', 'Twin Cities'), ('umndl', 'Duluth'), ('umnro', 'Rochester'), ('umncr', 'Crookston'), ('umnmo', 'Morris')] TERM_CHOICES = [('1165', 'Summer 2016'), ('1169', 'Fall 2016'), ('1173', 'Spring 2017')] COMPARE_CHOICES = [('','--choose comparison--'), ('<', 'less than'), ('<=', 'less than or equal to'), ('=','equal to'), ('>=', 'greater than or equal to'), ('>', 'greater than')] LEVEL_CHOICES = [('catalog_number<5000', 'Undergraduate Courses'), ('catalog_number>4999', 'Graduate and Professional Courses')] CLE_CHOICES =[('AH', 'Arts and Humanities'), ('BIOL', 'Biological Sciences'), ('CIV', 'Civic Life and Ethics'), ('DSJ', 'Diversity and Social Justice'), ('ENV', 'Environment'), ('GP', 'Global Perspectives'), ('HIS','Historical Perspectives'), ('LITR', 'Literature'), ('MATH', 'Mathmatical Thinking'), ('PHYS', 'Physical Sciences'), ('SOCS', 'Social Sciences'), ('TS', 'Technology and Society'), ('WI', 'Writing Intensive')] GE_CHOICES = [('BIOL SCI', 'Biological Sciences'), ('COMMUNICAT', 'Written/Oral Communications'), ('ETH/CIV RE', 'Ethic/Civil Responsibility'), ('GLOB PERSP', 'Global Perspective'), ('HI/BEH/SCC', 'History & Behavioral/Social Sciences'), ('HUMAN DIV', 'Human Diversity'), ('HUMANITIES', 'Humanities'), ('LIB ED ELC', 'Liberal Education Elective'), ('PEOPLE/ENV', 'People/Environment'), ('PHYS SCI', 'Physical Sciences'), ('MATH THINK', 'Mathematical Thinking')] GER_CHOICES = [('ARTP', 'Artistic Performance'), ('HUM', 'Communication, Language, Literature, and Philosophy'), ('ECR', 'Ethical & Civic Responsibility'), ('ENVT', 'People and Environment'), ('FA', 'Fine Arts'), ('FL', 'Foreign Language'), ('HIST', 'Historical Perspectives'), ('SS', 'Human Behavior, Social Processes, and Institutions'), ('HDIV', 'Human Diversity'), ('IC', 'Intellectual Community'), ('IP', 'International Perspective'), ('M/SR', 'Mathematical/Symbolic Reasoning'), ('SCI', 'Physical & Biological Sciences'), ('SCIL', 'Physical & Biological Sciences with Lab'), ('WLA', 'Writing for the Liberal Arts')] DLE_CHOICES = [('CDIVERSITY', 'Cultural Diversity in the US'), ('FINE ARTS', 'Fine Arts'), ('GLOBAL PER', 'Global Perspectives'), ('HUMANITIES', 'Humanities'), ('LOGIC & QR', 'Logic & Quantitative Reasoning'), ('NAT SCI', 'Natural Sciences'), ('COMM & LAN', 'Oral Communication & Languages'), ('SOC SCI', 'Social Sciences'), ('SUSTAIN', 'Sustainability'), ('WRITING', 'Writing & Information Literacy')] campus = SelectField(label='Campus', choices=CAMPUS_CHOICES, validators=[DataRequired()]) cle = MultiCheckboxField(label='Twin Cities/Rochester Liberal Education', choices=CLE_CHOICES) dle = MultiCheckboxField(label='Duluth Liberal Education', choices=DLE_CHOICES) ge = MultiCheckboxField(label='Crookston Liberal Education', choices=GE_CHOICES) ger = MultiCheckboxField(label='Morris Liberal Education', choices=GER_CHOICES) term = SelectField(label='Term', choices=TERM_CHOICES, validators=[DataRequired()], default='1159') level = SelectField(label='Level', choices=LEVEL_CHOICES, validators=[DataRequired()]) subject = TextField(label='Subject') course_number = TextField(label='Course Number') compare = SelectField(label='Course Number', choices=COMPARE_CHOICES) def __init__(self, *args, **kwargs): super(SearchForm, self).__init__(*args, **kwargs) self.user = None def validate(self): found = False json_file = '/majors.json' initial_validation = super(SearchForm, self).validate() if self.course_number.data: if self.compare.data == '': self.compare.errors.append('Please enter a comparison') return False if str(self.campus.data) == 'umnmo': json_file = '/morris.json' elif str(self.campus.data) == 'umncr': json_file = '/crookston.json' elif str(self.campus.data) == 'umndl': json_file = '/duluth.json' json_url = os.path.realpath(os.path.dirname(__file__)) + json_file f = open(json_url,'r') json_data = json.loads(f.read()) subject = self.subject.data.upper() if subject: # make sure to only validate subject if something was entered. for key, value in json_data.iteritems(): if subject == key: found = True if not found: self.subject.errors.append('Please enter a valid course subject') return False return True
mit
-3,242,450,863,080,279,000
54.43299
134
0.590664
false
3.675325
false
false
false
rjschwei/azure-sdk-for-python
unreleased/azure-mgmt-intune/setup.py
1
2533
#!/usr/bin/env python #------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- from setuptools import find_packages, setup from io import open import re import os.path # Change the PACKAGE_NAME only to change folder and different name PACKAGE_NAME = "azure-mgmt-intune" PACKAGE_PPRINT_NAME = "Intune Management" # a-b-c => a/b/c package_folder_path = PACKAGE_NAME.replace('-', '/') # a-b-c => a.b.c namespace_name = PACKAGE_NAME.replace('-', '.') # azure v0.x is not compatible with this package # azure v0.x used to have a __version__ attribute (newer versions don't) try: import azure try: ver = azure.__version__ raise Exception( 'This package is incompatible with azure=={}. '.format(ver) + 'Uninstall it with "pip uninstall azure".' ) except AttributeError: pass except ImportError: pass # Version extraction inspired from 'requests' with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd: version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) if not version: raise RuntimeError('Cannot find version information') with open('README.rst', encoding='utf-8') as f: readme = f.read() with open('HISTORY.rst', encoding='utf-8') as f: history = f.read() setup( name=PACKAGE_NAME, version=version, description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), long_description=readme + '\n\n' + history, license='MIT License', author='Microsoft Corporation', author_email='ptvshelp@microsoft.com', url='https://github.com/Azure/azure-sdk-for-python', classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'License :: OSI Approved :: MIT License', ], zip_safe=False, packages=find_packages(), install_requires=[ 'azure-mgmt-nspkg', 'azure-common[autorest]==1.1.4', ], )
mit
7,684,601,118,646,094,000
31.896104
91
0.597315
false
3.921053
false
false
false
WilJoey/tn_ckan
ckan/controllers/group.py
1
36100
import re import os import logging import genshi import cgi import datetime from urllib import urlencode from pylons.i18n import get_lang import ckan.lib.base as base import ckan.lib.helpers as h import ckan.lib.maintain as maintain import ckan.lib.navl.dictization_functions as dict_fns import ckan.logic as logic import ckan.lib.search as search import ckan.model as model import ckan.new_authz as new_authz import ckan.lib.plugins import ckan.plugins as plugins from ckan.common import OrderedDict, c, g, request, _ log = logging.getLogger(__name__) render = base.render abort = base.abort NotFound = logic.NotFound NotAuthorized = logic.NotAuthorized ValidationError = logic.ValidationError check_access = logic.check_access get_action = logic.get_action tuplize_dict = logic.tuplize_dict clean_dict = logic.clean_dict parse_params = logic.parse_params lookup_group_plugin = ckan.lib.plugins.lookup_group_plugin class GroupController(base.BaseController): group_type = 'group' ## hooks for subclasses def _group_form(self, group_type=None): return lookup_group_plugin(group_type).group_form() def _form_to_db_schema(self, group_type=None): return lookup_group_plugin(group_type).form_to_db_schema() def _db_to_form_schema(self, group_type=None): '''This is an interface to manipulate data from the database into a format suitable for the form (optional)''' return lookup_group_plugin(group_type).db_to_form_schema() def _setup_template_variables(self, context, data_dict, group_type=None): return lookup_group_plugin(group_type).\ setup_template_variables(context, data_dict) def _new_template(self, group_type): return lookup_group_plugin(group_type).new_template() def _index_template(self, group_type): return lookup_group_plugin(group_type).index_template() def _about_template(self, group_type): return lookup_group_plugin(group_type).about_template() def _read_template(self, group_type): return lookup_group_plugin(group_type).read_template() def _history_template(self, group_type): return lookup_group_plugin(group_type).history_template() def _edit_template(self, group_type): return lookup_group_plugin(group_type).edit_template() def _activity_template(self, group_type): return lookup_group_plugin(group_type).activity_template() def _admins_template(self, group_type): return lookup_group_plugin(group_type).admins_template() def _bulk_process_template(self, group_type): return lookup_group_plugin(group_type).bulk_process_template() ## end hooks def _replace_group_org(self, string): ''' substitute organization for group if this is an org''' if self.group_type == 'organization': string = re.sub('^group', 'organization', string) return string def _action(self, action_name): ''' select the correct group/org action ''' return get_action(self._replace_group_org(action_name)) def _check_access(self, action_name, *args, **kw): ''' select the correct group/org check_access ''' return check_access(self._replace_group_org(action_name), *args, **kw) def _render_template(self, template_name): ''' render the correct group/org template ''' return render(self._replace_group_org(template_name)) def _redirect_to(self, *args, **kw): ''' wrapper to ensue the correct controller is used ''' if self.group_type == 'organization' and 'controller' in kw: kw['controller'] = 'organization' return h.redirect_to(*args, **kw) def _url_for(self, *args, **kw): ''' wrapper to ensue the correct controller is used ''' if self.group_type == 'organization' and 'controller' in kw: kw['controller'] = 'organization' return h.url_for(*args, **kw) def _guess_group_type(self, expecting_name=False): """ Guess the type of group from the URL handling the case where there is a prefix on the URL (such as /data/organization) """ parts = [x for x in request.path.split('/') if x] idx = -1 if expecting_name: idx = -2 gt = parts[idx] if gt == 'group': gt = None return gt def index(self): group_type = self._guess_group_type() context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'for_view': True, 'with_private': False} q = c.q = request.params.get('q', '') data_dict = {'all_fields': True, 'q': q} sort_by = c.sort_by_selected = request.params.get('sort') if sort_by: data_dict['sort'] = sort_by try: self._check_access('site_read', context) except NotAuthorized: abort(401, _('Not authorized to see this page')) # pass user info to context as needed to view private datasets of # orgs correctly if c.userobj: context['user_id'] = c.userobj.id context['user_is_admin'] = c.userobj.sysadmin results = self._action('group_list')(context, data_dict) c.page = h.Page( collection=results, page=request.params.get('page', 1), url=h.pager_url, items_per_page=21 ) return render(self._index_template(group_type)) def read(self, id, limit=20): group_type = self._get_group_type(id.split('@')[0]) if group_type != self.group_type: abort(404, _('Incorrect group type')) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'schema': self._db_to_form_schema(group_type=group_type), 'for_view': True} data_dict = {'id': id} # unicode format (decoded from utf8) q = c.q = request.params.get('q', '') try: # Do not query for the group datasets when dictizing, as they will # be ignored and get requested on the controller anyway context['include_datasets'] = False c.group_dict = self._action('group_show')(context, data_dict) c.group = context['group'] except NotFound: abort(404, _('Group not found')) except NotAuthorized: abort(401, _('Unauthorized to read group %s') % id) self._read(id, limit) return render(self._read_template(c.group_dict['type'])) def _read(self, id, limit): ''' This is common code used by both read and bulk_process''' group_type = self._get_group_type(id.split('@')[0]) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'schema': self._db_to_form_schema(group_type=group_type), 'for_view': True, 'extras_as_string': True} q = c.q = request.params.get('q', '') # Search within group if c.group_dict.get('is_organization'): q += ' owner_org:"%s"' % c.group_dict.get('id') else: q += ' groups:"%s"' % c.group_dict.get('name') c.description_formatted = h.render_markdown(c.group_dict.get('description')) context['return_query'] = True # c.group_admins is used by CKAN's legacy (Genshi) templates only, # if we drop support for those then we can delete this line. c.group_admins = new_authz.get_group_or_org_admin_ids(c.group.id) try: page = int(request.params.get('page', 1)) except ValueError, e: abort(400, ('"page" parameter must be an integer')) # most search operations should reset the page counter: params_nopage = [(k, v) for k, v in request.params.items() if k != 'page'] #sort_by = request.params.get('sort', 'name asc') sort_by = request.params.get('sort', None) def search_url(params): if group_type == 'organization': if c.action == 'bulk_process': url = self._url_for(controller='organization', action='bulk_process', id=id) else: url = self._url_for(controller='organization', action='read', id=id) else: url = self._url_for(controller='group', action='read', id=id) params = [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v)) for k, v in params] return url + u'?' + urlencode(params) def drill_down_url(**by): return h.add_url_param(alternative_url=None, controller='group', action='read', extras=dict(id=c.group_dict.get('name')), new_params=by) c.drill_down_url = drill_down_url def remove_field(key, value=None, replace=None): return h.remove_url_param(key, value=value, replace=replace, controller='group', action='read', extras=dict(id=c.group_dict.get('name'))) c.remove_field = remove_field def pager_url(q=None, page=None): params = list(params_nopage) params.append(('page', page)) return search_url(params) try: c.fields = [] search_extras = {} for (param, value) in request.params.items(): if not param in ['q', 'page', 'sort'] \ and len(value) and not param.startswith('_'): if not param.startswith('ext_'): c.fields.append((param, value)) q += ' %s: "%s"' % (param, value) else: search_extras[param] = value fq = 'capacity:"public"' user_member_of_orgs = [org['id'] for org in h.organizations_available('read')] if (c.group and c.group.id in user_member_of_orgs): fq = '' context['ignore_capacity_check'] = True facets = OrderedDict() default_facet_titles = {'organization': _('Organizations'), 'groups': _('Groups'), 'tags': _('Tags'), 'res_format': _('Formats'), 'license_id': _('Licenses')} for facet in g.facets: if facet in default_facet_titles: facets[facet] = default_facet_titles[facet] else: facets[facet] = facet # Facet titles for plugin in plugins.PluginImplementations(plugins.IFacets): if self.group_type == 'organization': facets = plugin.organization_facets( facets, self.group_type, None) else: facets = plugin.group_facets( facets, self.group_type, None) if 'capacity' in facets and (self.group_type != 'organization' or not user_member_of_orgs): del facets['capacity'] c.facet_titles = facets data_dict = { 'q': q, 'fq': fq, 'facet.field': facets.keys(), 'rows': limit, 'sort': sort_by, 'start': (page - 1) * limit, 'extras': search_extras } query = get_action('package_search')(context, data_dict) c.page = h.Page( collection=query['results'], page=page, url=pager_url, item_count=query['count'], items_per_page=limit ) c.group_dict['package_count'] = query['count'] c.facets = query['facets'] maintain.deprecate_context_item('facets', 'Use `c.search_facets` instead.') c.search_facets = query['search_facets'] c.search_facets_limits = {} for facet in c.facets.keys(): limit = int(request.params.get('_%s_limit' % facet, g.facets_default_number)) c.search_facets_limits[facet] = limit c.page.items = query['results'] c.sort_by_selected = sort_by except search.SearchError, se: log.error('Group search error: %r', se.args) c.query_error = True c.facets = {} c.page = h.Page(collection=[]) self._setup_template_variables(context, {'id':id}, group_type=group_type) def bulk_process(self, id): ''' Allow bulk processing of datasets for an organization. Make private/public or delete. For organization admins.''' group_type = self._get_group_type(id.split('@')[0]) if group_type != 'organization': # FIXME: better error raise Exception('Must be an organization') # check we are org admin context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'schema': self._db_to_form_schema(group_type=group_type), 'for_view': True, 'extras_as_string': True} data_dict = {'id': id} try: # Do not query for the group datasets when dictizing, as they will # be ignored and get requested on the controller anyway context['include_datasets'] = False c.group_dict = self._action('group_show')(context, data_dict) c.group = context['group'] except NotFound: abort(404, _('Group not found')) except NotAuthorized: abort(401, _('Unauthorized to read group %s') % id) #use different form names so that ie7 can be detected form_names = set(["bulk_action.public", "bulk_action.delete", "bulk_action.private"]) actions_in_form = set(request.params.keys()) actions = form_names.intersection(actions_in_form) # If no action then just show the datasets if not actions: # unicode format (decoded from utf8) limit = 500 self._read(id, limit) c.packages = c.page.items return render(self._bulk_process_template(group_type)) #ie7 puts all buttons in form params but puts submitted one twice for key, value in dict(request.params.dict_of_lists()).items(): if len(value) == 2: action = key.split('.')[-1] break else: #normal good browser form submission action = actions.pop().split('.')[-1] # process the action first find the datasets to perform the action on. # they are prefixed by dataset_ in the form data datasets = [] for param in request.params: if param.startswith('dataset_'): datasets.append(param[8:]) action_functions = { 'private': 'bulk_update_private', 'public': 'bulk_update_public', 'delete': 'bulk_update_delete', } data_dict = {'datasets': datasets, 'org_id': c.group_dict['id']} try: get_action(action_functions[action])(context, data_dict) except NotAuthorized: abort(401, _('Not authorized to perform bulk update')) base.redirect(h.url_for(controller='organization', action='bulk_process', id=id)) def new(self, data=None, errors=None, error_summary=None): group_type = self._guess_group_type(True) if data: data['type'] = group_type context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'save': 'save' in request.params, 'parent': request.params.get('parent', None)} try: self._check_access('group_create', context) except NotAuthorized: abort(401, _('Unauthorized to create a group')) if context['save'] and not data: return self._save_new(context, group_type) data = data or {} if not data.get('image_url', '').startswith('http'): data.pop('image_url', None) errors = errors or {} error_summary = error_summary or {} vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'action': 'new'} self._setup_template_variables(context, data, group_type=group_type) c.form = render(self._group_form(group_type=group_type), extra_vars=vars) return render(self._new_template(group_type)) def edit(self, id, data=None, errors=None, error_summary=None): group_type = self._get_group_type(id.split('@')[0]) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'save': 'save' in request.params, 'for_edit': True, 'parent': request.params.get('parent', None) } data_dict = {'id': id} if context['save'] and not data: return self._save_edit(id, context) try: old_data = self._action('group_show')(context, data_dict) c.grouptitle = old_data.get('title') c.groupname = old_data.get('name') data = data or old_data except NotFound: abort(404, _('Group not found')) except NotAuthorized: abort(401, _('Unauthorized to read group %s') % '') group = context.get("group") c.group = group c.group_dict = self._action('group_show')(context, data_dict) try: self._check_access('group_update', context) except NotAuthorized, e: abort(401, _('User %r not authorized to edit %s') % (c.user, id)) errors = errors or {} vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'action': 'edit'} self._setup_template_variables(context, data, group_type=group_type) c.form = render(self._group_form(group_type), extra_vars=vars) return render(self._edit_template(c.group.type)) def _get_group_type(self, id): """ Given the id of a group it determines the type of a group given a valid id/name for the group. """ group = model.Group.get(id) if not group: return None return group.type def _save_new(self, context, group_type=None): try: data_dict = clean_dict(dict_fns.unflatten( tuplize_dict(parse_params(request.params)))) data_dict['type'] = group_type or 'group' context['message'] = data_dict.get('log_message', '') data_dict['users'] = [{'name': c.user, 'capacity': 'admin'}] group = self._action('group_create')(context, data_dict) # Redirect to the appropriate _read route for the type of group h.redirect_to(group['type'] + '_read', id=group['name']) except NotAuthorized: abort(401, _('Unauthorized to read group %s') % '') except NotFound, e: abort(404, _('Group not found')) except dict_fns.DataError: abort(400, _(u'Integrity Error')) except ValidationError, e: errors = e.error_dict error_summary = e.error_summary return self.new(data_dict, errors, error_summary) def _force_reindex(self, grp): ''' When the group name has changed, we need to force a reindex of the datasets within the group, otherwise they will stop appearing on the read page for the group (as they're connected via the group name)''' group = model.Group.get(grp['name']) for dataset in group.packages(): search.rebuild(dataset.name) def _save_edit(self, id, context): try: data_dict = clean_dict(dict_fns.unflatten( tuplize_dict(parse_params(request.params)))) context['message'] = data_dict.get('log_message', '') data_dict['id'] = id context['allow_partial_update'] = True group = self._action('group_update')(context, data_dict) if id != group['name']: self._force_reindex(group) h.redirect_to('%s_read' % group['type'], id=group['name']) except NotAuthorized: abort(401, _('Unauthorized to read group %s') % id) except NotFound, e: abort(404, _('Group not found')) except dict_fns.DataError: abort(400, _(u'Integrity Error')) except ValidationError, e: errors = e.error_dict error_summary = e.error_summary return self.edit(id, data_dict, errors, error_summary) def authz(self, id): group = model.Group.get(id) if group is None: abort(404, _('Group not found')) c.groupname = group.name c.grouptitle = group.display_name try: context = \ {'model': model, 'user': c.user or c.author, 'group': group} self._check_access('group_edit_permissions', context) c.authz_editable = True c.group = context['group'] except NotAuthorized: c.authz_editable = False if not c.authz_editable: abort(401, _('User %r not authorized to edit %s authorizations') % (c.user, id)) roles = self._handle_update_of_authz(group) self._prepare_authz_info_for_render(roles) return render('group/authz.html') def delete(self, id): if 'cancel' in request.params: self._redirect_to(controller='group', action='edit', id=id) context = {'model': model, 'session': model.Session, 'user': c.user or c.author} try: self._check_access('group_delete', context, {'id': id}) except NotAuthorized: abort(401, _('Unauthorized to delete group %s') % '') try: if request.method == 'POST': self._action('group_delete')(context, {'id': id}) if self.group_type == 'organization': h.flash_notice(_('Organization has been deleted.')) else: h.flash_notice(_('Group has been deleted.')) self._redirect_to(controller='group', action='index') c.group_dict = self._action('group_show')(context, {'id': id}) except NotAuthorized: abort(401, _('Unauthorized to delete group %s') % '') except NotFound: abort(404, _('Group not found')) return self._render_template('group/confirm_delete.html') def members(self, id): context = {'model': model, 'session': model.Session, 'user': c.user or c.author} try: c.members = self._action('member_list')( context, {'id': id, 'object_type': 'user'} ) c.group_dict = self._action('group_show')(context, {'id': id}) except NotAuthorized: abort(401, _('Unauthorized to delete group %s') % '') except NotFound: abort(404, _('Group not found')) return self._render_template('group/members.html') def member_new(self, id): context = {'model': model, 'session': model.Session, 'user': c.user or c.author} #self._check_access('group_delete', context, {'id': id}) try: if request.method == 'POST': data_dict = clean_dict(dict_fns.unflatten( tuplize_dict(parse_params(request.params)))) data_dict['id'] = id email = data_dict.get('email') if email: user_data_dict = { 'email': email, 'group_id': data_dict['id'], 'role': data_dict['role'] } del data_dict['email'] user_dict = self._action('user_invite')(context, user_data_dict) data_dict['username'] = user_dict['name'] c.group_dict = self._action('group_member_create')(context, data_dict) self._redirect_to(controller='group', action='members', id=id) else: user = request.params.get('user') if user: c.user_dict = get_action('user_show')(context, {'id': user}) c.user_role = new_authz.users_role_for_group_or_org(id, user) or 'member' else: c.user_role = 'member' c.group_dict = self._action('group_show')(context, {'id': id}) group_type = 'organization' if c.group_dict['is_organization'] else 'group' c.roles = self._action('member_roles_list')( context, {'group_type': group_type} ) except NotAuthorized: abort(401, _('Unauthorized to add member to group %s') % '') except NotFound: abort(404, _('Group not found')) except ValidationError, e: h.flash_error(e.error_summary) return self._render_template('group/member_new.html') def member_delete(self, id): if 'cancel' in request.params: self._redirect_to(controller='group', action='members', id=id) context = {'model': model, 'session': model.Session, 'user': c.user or c.author} try: self._check_access('group_member_delete', context, {'id': id}) except NotAuthorized: abort(401, _('Unauthorized to delete group %s members') % '') try: user_id = request.params.get('user') if request.method == 'POST': self._action('group_member_delete')(context, {'id': id, 'user_id': user_id}) h.flash_notice(_('Group member has been deleted.')) self._redirect_to(controller='group', action='members', id=id) c.user_dict = self._action('user_show')(context, {'id': user_id}) c.user_id = user_id c.group_id = id except NotAuthorized: abort(401, _('Unauthorized to delete group %s') % '') except NotFound: abort(404, _('Group not found')) return self._render_template('group/confirm_delete_member.html') def history(self, id): if 'diff' in request.params or 'selected1' in request.params: try: params = {'id': request.params.getone('group_name'), 'diff': request.params.getone('selected1'), 'oldid': request.params.getone('selected2'), } except KeyError, e: if 'group_name' in dict(request.params): id = request.params.getone('group_name') c.error = \ _('Select two revisions before doing the comparison.') else: params['diff_entity'] = 'group' h.redirect_to(controller='revision', action='diff', **params) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'schema': self._db_to_form_schema()} data_dict = {'id': id} try: c.group_dict = self._action('group_show')(context, data_dict) c.group_revisions = self._action('group_revision_list')(context, data_dict) #TODO: remove # Still necessary for the authz check in group/layout.html c.group = context['group'] except NotFound: abort(404, _('Group not found')) except NotAuthorized: abort(401, _('User %r not authorized to edit %r') % (c.user, id)) format = request.params.get('format', '') if format == 'atom': # Generate and return Atom 1.0 document. from webhelpers.feedgenerator import Atom1Feed feed = Atom1Feed( title=_(u'CKAN Group Revision History'), link=self._url_for(controller='group', action='read', id=c.group_dict['name']), description=_(u'Recent changes to CKAN Group: ') + c.group_dict['display_name'], language=unicode(get_lang()), ) for revision_dict in c.group_revisions: revision_date = h.date_str_to_datetime( revision_dict['timestamp']) try: dayHorizon = int(request.params.get('days')) except: dayHorizon = 30 dayAge = (datetime.datetime.now() - revision_date).days if dayAge >= dayHorizon: break if revision_dict['message']: item_title = u'%s' % revision_dict['message'].\ split('\n')[0] else: item_title = u'%s' % revision_dict['id'] item_link = h.url_for(controller='revision', action='read', id=revision_dict['id']) item_description = _('Log message: ') item_description += '%s' % (revision_dict['message'] or '') item_author_name = revision_dict['author'] item_pubdate = revision_date feed.add_item( title=item_title, link=item_link, description=item_description, author_name=item_author_name, pubdate=item_pubdate, ) feed.content_type = 'application/atom+xml' return feed.writeString('utf-8') return render(self._history_template(c.group_dict['type'])) def activity(self, id, offset=0): '''Render this group's public activity stream page.''' context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'for_view': True} try: c.group_dict = self._get_group_dict(id) except NotFound: abort(404, _('Group not found')) except NotAuthorized: abort(401, _('Unauthorized to read group {group_id}').format( group_id=id)) # Add the group's activity stream (already rendered to HTML) to the # template context for the group/read.html template to retrieve later. c.group_activity_stream = self._action('group_activity_list_html')( context, {'id': c.group_dict['id'], 'offset': offset}) return render(self._activity_template(c.group_dict['type'])) def follow(self, id): '''Start following this group.''' context = {'model': model, 'session': model.Session, 'user': c.user or c.author} data_dict = {'id': id} try: get_action('follow_group')(context, data_dict) group_dict = get_action('group_show')(context, data_dict) h.flash_success(_("You are now following {0}").format( group_dict['title'])) except ValidationError as e: error_message = (e.extra_msg or e.message or e.error_summary or e.error_dict) h.flash_error(error_message) except NotAuthorized as e: h.flash_error(e.extra_msg) h.redirect_to(controller='group', action='read', id=id) def unfollow(self, id): '''Stop following this group.''' context = {'model': model, 'session': model.Session, 'user': c.user or c.author} data_dict = {'id': id} try: get_action('unfollow_group')(context, data_dict) group_dict = get_action('group_show')(context, data_dict) h.flash_success(_("You are no longer following {0}").format( group_dict['title'])) except ValidationError as e: error_message = (e.extra_msg or e.message or e.error_summary or e.error_dict) h.flash_error(error_message) except (NotFound, NotAuthorized) as e: error_message = e.extra_msg or e.message h.flash_error(error_message) h.redirect_to(controller='group', action='read', id=id) def followers(self, id): context = {'model': model, 'session': model.Session, 'user': c.user or c.author} c.group_dict = self._get_group_dict(id) try: c.followers = get_action('group_follower_list')(context, {'id': id}) except NotAuthorized: abort(401, _('Unauthorized to view followers %s') % '') return render('group/followers.html') def admins(self, id): c.group_dict = self._get_group_dict(id) c.admins = new_authz.get_group_or_org_admin_ids(id) return render(self._admins_template(c.group_dict['type'])) def about(self, id): context = {'model': model, 'session': model.Session, 'user': c.user or c.author} c.group_dict = self._get_group_dict(id) group_type = c.group_dict['type'] self._setup_template_variables(context, {'id': id}, group_type=group_type) return render(self._about_template(group_type)) def _get_group_dict(self, id): ''' returns the result of group_show action or aborts if there is a problem ''' context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'for_view': True} try: return self._action('group_show')(context, {'id': id}) except NotFound: abort(404, _('Group not found')) except NotAuthorized: abort(401, _('Unauthorized to read group %s') % id) def _render_edit_form(self, fs): # errors arrive in c.error and fs.errors c.fieldset = fs return render('group/edit_form.html') def _update(self, fs, group_name, group_id): ''' Writes the POST data (associated with a group edit) to the database @input c.error ''' validation = fs.validate() if not validation: c.form = self._render_edit_form(fs) raise base.ValidationException(fs) try: fs.sync() except Exception, inst: model.Session.rollback() raise else: model.Session.commit() def _update_authz(self, fs): validation = fs.validate() if not validation: c.form = self._render_edit_form(fs) raise base.ValidationException(fs) try: fs.sync() except Exception, inst: model.Session.rollback() raise else: model.Session.commit()
mit
8,639,835,599,501,780,000
38.496718
93
0.531163
false
4.158507
false
false
false
anthonyfok/frescobaldi
frescobaldi_app/snippet/insert.py
1
9303
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/ # # Copyright (c) 2008 - 2014 by Wilbert Berendsen # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # See http://www.gnu.org/licenses/ for more information. """ Insert snippets into a Document. """ import sys from PyQt5.QtCore import QSettings from PyQt5.QtGui import QTextCursor from PyQt5.QtWidgets import QMessageBox import cursortools import tokeniter import indent from . import snippets from . import expand def insert(name, view): """Insert named snippet into the view.""" text, variables = snippets.get(name) cursor = view.textCursor() selection = variables.get('selection', '') if 'yes' in selection and not cursor.hasSelection(): return if 'strip' in selection: cursortools.strip_selection(cursor) pos = cursor.selectionStart() with cursortools.compress_undo(cursor): # insert the snippet, might return a new cursor if 'python' in variables: new = insert_python(text, cursor, name, view) elif 'macro' in variables: new = insert_macro(text, view) else: new = insert_snippet(text, cursor, variables) # QTextBlocks the snippet starts and ends block = cursor.document().findBlock(pos) last = cursor.block() # re-indent if not explicitly suppressed by a 'indent: no' variable if last != block and 'no' not in variables.get('indent', ''): c = QTextCursor(last) c.setPosition(block.position(), QTextCursor.KeepAnchor) with cursortools.compress_undo(c, True): indent.re_indent(c, True) if not new and 'keep' in selection: end = cursor.position() cursor.setPosition(pos) cursor.setPosition(end, QTextCursor.KeepAnchor) view.setTextCursor(new or cursor) def insert_snippet(text, cursor, variables): """Inserts a normal text snippet. After the insert, the cursor points to the end of the inserted snippet. If this function returns a cursor it must be set as the cursor for the view after the snippet has been inserted. """ exp_base = expand.Expander(cursor) evs = [] # make a list of events, either text or a constant for text, key in snippets.expand(text): if text: evs.append(text) if key == '$': evs.append('$') elif key: # basic variables func = getattr(exp_base, key, None) if func: evs.append(func()) selectionUsed = expand.SELECTION in evs # do the padding if 'selection: strip;' is used if selectionUsed and 'strip' in variables.get('selection', ''): space = '\n' if '\n' in cursor.selection().toPlainText() else ' ' # change whitespace in previous and next piece of text i = evs.index(expand.SELECTION) for j in range(i-1, -i, -1): if evs[j] not in expand.constants: evs[j] = evs[j].rstrip() + space break for j in range(i+1, len(evs)): if evs[j] not in expand.constants: evs[j] = space + evs[j].lstrip() break # now insert the text ins = QTextCursor(cursor) selectionUsed and ins.setPosition(cursor.selectionStart()) a, c = -1, -1 for e in evs: if e == expand.ANCHOR: a = ins.position() elif e == expand.CURSOR: c = ins.position() elif e == expand.SELECTION: ins.setPosition(cursor.selectionEnd()) else: ins.insertText(e) cursor.setPosition(ins.position()) # return a new cursor if requested if (a, c) != (-1, -1): new = QTextCursor(cursor) if a != -1: new.setPosition(a) if c != -1: new.setPosition(c, QTextCursor.KeepAnchor if a != -1 else QTextCursor.MoveAnchor) return new def insert_python(text, cursor, name, view): """Regards the text as Python code, and exec it. name and view are given in case an exception occurs. The following variables are available: - text: contains selection or '', set it to insert new text - state: contains simplestate for the cursor position - cursor: the QTextCursor After the insert, the cursor points to the end of the inserted snippet. """ namespace = { 'cursor': QTextCursor(cursor), 'state': state(cursor), 'text': cursor.selection().toPlainText(), 'view': view, 'ANCHOR': 1, 'CURSOR': 2, } try: code = compile(text, "<snippet>", "exec") if sys.version_info < (3, 0): exec("exec code in namespace") else: exec(code, namespace) if 'main' in namespace: return namespace['main']() except Exception: handle_exception(name, view) else: text = namespace.get('text', '') if isinstance(text, (tuple, list)): ANCHOR = namespace.get('ANCHOR', 1) CURSOR = namespace.get('CURSOR', 2) a, c = -1, -1 for t in text: if t == ANCHOR: a = cursor.selectionStart() elif t == CURSOR: c = cursor.selectionStart() else: cursor.insertText(t) if (a, c) != (-1, -1): new = QTextCursor(cursor) if a != -1: new.setPosition(a) if c != -1: new.setPosition(c, QTextCursor.KeepAnchor if a != -1 else QTextCursor.MoveAnchor) return new else: cursor.insertText(namespace['text']) def insert_macro(text, view): """The macro snippet is a sequence of commands which are either Frescobaldi actions or other snippets. """ import re import actioncollectionmanager from . import model avail_snippets = {} for n in model.model().names(): varname = snippets.get(n).variables.get('name') if varname: avail_snippets[varname] = n avail_actions = {} win = view.window() for collection in actioncollectionmanager.manager(win).actionCollections(): for name, action in collection.actions().items(): avail_actions[name] = action commands = [x.strip() for x in text.split('\n') if x] for c in commands: if c in avail_snippets: insert(avail_snippets[c], view) elif c in avail_actions: avail_actions[c].trigger() def state(cursor): """Returns the simplestate string for the position of the cursor.""" import simplestate pos = cursor.selectionStart() block = cursor.document().findBlock(pos) tokens = tokeniter.tokens(block) state = tokeniter.state(block) column = pos - block.position() for t in tokens: if t.end > column: break state.follow(t) return simplestate.state(state) def handle_exception(name, view): """Called when a snippet raises a Python exception. Shows the error message and offers the option to edit the offending snippet. """ import sys, traceback exc_type, exc_value, exc_traceback = sys.exc_info() tb = traceback.extract_tb(exc_traceback) while tb and tb[0][0] != "<snippet>": del tb[0] msg = ''.join(traceback.format_list(tb) + traceback.format_exception_only(exc_type, exc_value)) dlg = QMessageBox(QMessageBox.Critical, _("Snippet error"), msg, QMessageBox.Ok | QMessageBox.Cancel) dlg.button(QMessageBox.Ok).setText(_("Edit Snippet")) dlg.setDefaultButton(QMessageBox.Cancel) dlg.setEscapeButton(QMessageBox.Cancel) if dlg.exec_() != QMessageBox.Ok: return # determine line number if exc_type is SyntaxError: lineno = exc_value.lineno elif tb: lineno = tb[0][1] else: lineno = None import panelmanager from . import edit widget = panelmanager.manager(view.window()).snippettool.widget() textedit = edit.Edit(widget, name).text if lineno is not None: # convert to line number in full snippet text for block in cursortools.all_blocks(textedit.document()): if block.text().startswith('-*- '): lineno += 1 else: break block = textedit.document().findBlockByNumber(lineno-1) if block.isValid(): textedit.setTextCursor(QTextCursor(block))
gpl-2.0
-6,613,117,712,532,106,000
31.872792
101
0.603031
false
4.05713
false
false
false
skylifewww/pangolinland
article/models.py
1
11009
# -*- coding: utf-8 -*- from django.contrib.auth.models import User # from iosDevCourse.users.models import User from django.db import models from embed_video.fields import EmbedVideoField from django.core.urlresolvers import reverse from mptt.models import MPTTModel, TreeForeignKey from ckeditor.fields import RichTextField from ckeditor_uploader.fields import RichTextUploadingField import mptt from mptt.fields import TreeForeignKey import random from django.conf import settings from easy_thumbnails.fields import ThumbnailerImageField # from content.models import Slide def make_upload_path(instance, filename, prefix=False): n1 = random.randint(0, 10000) n2 = random.randint(0, 10000) n3 = random.randint(0, 10000) c = filename.split(".") filename = str(n1) + "_" + str(n2) + "_" + str(n3) + "." + c[-1] return u"%s/%s" % (settings.IMAGE_UPLOAD_DIR, filename) # Create your models here. class Category(MPTTModel): name = models.CharField(max_length=250, verbose_name="Name Category", blank=True, default="", unique=True) parent = TreeForeignKey('self', related_name="children", blank=True, null=True, db_index=True, verbose_name="Parent class") published = models.BooleanField(verbose_name="Published", blank=True, default="") ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True) def get_slides(self): return Slide.objects.filter(category=self) class Meta: db_table = "category" verbose_name = "Category" verbose_name_plural = "Categories" ordering = ('tree_id','level') def __str__(self): return self.name class MPTTMeta: # level_attr = 'mptt_level' order_insertion_by = ['name'] mptt.register(Category, order_insertion_by=['name']) class Creator(MPTTModel): slug = models.CharField(max_length=250, blank=True, verbose_name="Url") name = models.CharField(max_length=200, verbose_name="Creator device", blank=True, default="", unique=True) parent = TreeForeignKey('self', related_name="children", blank=True, null=True, db_index=True, verbose_name="Parent class") class Meta: db_table = "creators" verbose_name = "Creator" verbose_name_plural = "Creators" ordering = ('tree_id', 'level') def __str__(self): return self.name def pic_slug(self): if self.slug: return u'<img src="%s" width="70"/>' % self.slug else: return '(none)' pic_slug.short_description = 'Logo Creator' pic_slug.allow_tags = True class MPTTMeta: # level_attr = 'mptt_level' order_insertion_by = ['name'] mptt.register(Creator, order_insertion_by=['name']) class Tag(models.Model): tag_name = models.CharField(max_length=50, verbose_name="Tag Name") class Meta: db_table = "tags" verbose_name = "tags" verbose_name_plural = "tag" def __unicode__(self): return self.tag_name # class Works(models.Model): # work_creator = models.CharField(max_length=50, verbose_name="creator", blank=True, null=True, default="") # work_category = TreeForeignKey(Category, related_name="works", verbose_name="Category", default="", blank=True) # # image = ThumbnailerImageField(upload_to=make_upload_path, blank=True, verbose_name="картинка") # slug = models.CharField(max_length=250, blank=True, verbose_name="Url") # short_text = RichTextUploadingField(blank=True, verbose_name="Short text") # full_text = RichTextUploadingField(blank=True, verbose_name="Full text") # work_title = models.CharField(max_length=50, verbose_name="Work Title") # class Meta: # db_table = "works" # verbose_name = "works" # verbose_name_plural = "works" # def __unicode__(self): # return self.work_title # def pic(self): # if self.image: # return u'<img src="%s" width="70"/>' % self.image.url # else: # return '(none)' # pic.short_description = u'Большая картинка' # pic.allow_tags = True # def pic_slug(self): # if self.slug: # return u'<img src="%s" width="70"/>' % self.slug # else: # return '(none)' # pic_slug.short_description = 'work' # pic_slug.allow_tags = True class Article(models.Model): article_title = models.CharField(max_length=250, verbose_name="Article Title") article_date = models.DateTimeField(verbose_name="Release date") article_tag = models.ManyToManyField(Tag, related_name="tags", related_query_name="tags", verbose_name="Tags") # product_works = models.ManyToManyField(Works, related_name="works", related_query_name="works", verbose_name="Works", blank=True, default="") article_category = TreeForeignKey(Category, related_name="articles", verbose_name="Categories", default="", blank=True) article_creator = TreeForeignKey(Creator, related_name="creator", max_length=200, verbose_name="Creator", blank=True, default="") article_video = EmbedVideoField(verbose_name='Video', blank=True, help_text='URL video', null=True) video_published = models.BooleanField( blank=True, default="") slug = models.CharField(max_length=250, blank=True, verbose_name="Url") slogan = models.CharField(max_length=250, verbose_name="Article Slogan") short_text = RichTextUploadingField(blank=True, verbose_name="Short text") full_text = RichTextUploadingField(blank=True, verbose_name="Full text") published = models.BooleanField(verbose_name="Published", blank=True) published_main = models.BooleanField( blank=True, default="", verbose_name="Published on main page",) ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True) def __unicode__(self): return self.product_title class Meta: db_table = 'articles' verbose_name = "Article" verbose_name_plural = "Articles" ordering = ['ordering'] # def pic(self): # if self.image: # return u'<img src="%s" width="70"/>' % self.image.url # else: # return '(none)' # pic.short_description = u'Большая картинка' # pic.allow_tags = True def pic_slug(self): if self.slug: return u'<img src="%s" width="70"/>' % self.slug else: return '(none)' pic_slug.short_description = 'Article image' pic_slug.allow_tags = True class MenuItemArticle(models.Model): category = models.ForeignKey(Category, null=True, blank=True, verbose_name="Category") name = models.CharField(max_length=200, verbose_name="Name") slug = models.CharField(max_length=250, blank=True, verbose_name="Url") published = models.BooleanField(verbose_name="Published") ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True) def __unicode__(self): return self.name class Meta: db_table = 'menuItemsArticles' verbose_name_plural = "Menu Items for Articles" verbose_name = "Menu Item" ordering = ['ordering'] class Support(models.Model): title = models.CharField(max_length=250, verbose_name="Support Title") # date = models.DateTimeField(verbose_name="Release date") tag = models.ManyToManyField(Tag, related_name="support_tags", related_query_name="support_tags", verbose_name="Tags") # product_works = models.ManyToManyField(Works, related_name="works", related_query_name="works", verbose_name="Works", blank=True, default="") category = TreeForeignKey(Category, related_name="supports", verbose_name="Categories", default="", blank=True) # product_creator = TreeForeignKey(Creator, related_name="creator", max_length=200, verbose_name="Creator", blank=True, default="") video = EmbedVideoField(verbose_name='Video', blank=True, help_text='URL video', null=True) video_published = models.BooleanField( blank=True, default="") slug = models.CharField(max_length=250, blank=True, verbose_name="Url") slogan = models.CharField(max_length=250, verbose_name="Support Slogan") short_text = RichTextUploadingField(blank=True, verbose_name="Short text") full_text = RichTextUploadingField(blank=True, verbose_name="Full text") published = models.BooleanField(verbose_name="Published", blank=True) # published_main = models.BooleanField( blank=True, default="", verbose_name="Published on main page",) ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True) def __unicode__(self): return self.title class Meta: db_table = 'support' verbose_name = "Support" verbose_name_plural = "Supports" ordering = ['ordering'] # def pic(self): # if self.image: # return u'<img src="%s" width="70"/>' % self.image.url # else: # return '(none)' # pic.short_description = u'Большая картинка' # pic.allow_tags = True def pic_slug(self): if self.slug: return u'<img src="%s" width="70"/>' % self.slug else: return '(none)' pic_slug.short_description = 'Support image' pic_slug.allow_tags = True # class Slide(models.Model): # category = TreeForeignKey(Category, related_name="slides_article", verbose_name="Category", default="", blank=True, null=True) # name = models.CharField(max_length=250, verbose_name="Name") # product = models.ForeignKey(Product, null=True, blank=True, verbose_name="Product") # # image = models.ImageField(upload_to=make_upload_path, blank=True, verbose_name="Изображение") # slug = models.CharField(max_length=250, blank=True, verbose_name="Url pic") # text1 = RichTextUploadingField(blank=True, verbose_name="Text1") # text2 = RichTextUploadingField(blank=True, verbose_name="Text2") # published = models.BooleanField(verbose_name="Published", blank=True) # published_main = models.BooleanField(verbose_name="Published on main", default="", blank=True) # ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True) # def __unicode__(self): # return self.name # def pic(self): # if self.image: # return u'<img src="%s" width="70"/>' % self.image.url # else: # return '(none)' # pic.short_description = u'Большая картинка' # pic.allow_tags = True # def pic_slug(self): # if self.slug: # return u'<img src="%s" width="70"/>' % self.slug # else: # return '(none)' # pic_slug.short_description = 'Slide' # pic_slug.allow_tags = True # class Meta: # verbose_name_plural = "Slides" # verbose_name = "Slide"
mit
3,099,177,084,353,075,700
37.350877
147
0.647575
false
3.555628
false
false
false
cuckoobox/cuckoo
cuckoo/data/web/local_settings.py
1
1355
# Copyright (C) 2013 Claudio Guarnieri. # Copyright (C) 2014-2017 Cuckoo Foundation. # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. import web.errors # Maximum upload size (10GB, so there's basically no limit). MAX_UPLOAD_SIZE = 10*1024*1024*1024 # Override default secret key stored in $CWD/web/.secret_key # Make this unique, and don't share it with anybody. # SECRET_KEY = "YOUR_RANDOM_KEY" # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = "en-us" ADMINS = ( # ("Your Name", "your_email@example.com"), ) MANAGERS = ADMINS # Allow verbose debug error message in case of application fault. # It's strongly suggested to set it to False if you are serving the # web application from a web server front-end (i.e. Apache). DEBUG = False DEBUG404 = False # A list of strings representing the host/domain names that this Django site # can serve. # Values in this list can be fully qualified names (e.g. 'www.example.com'). # When DEBUG is True or when running tests, host validation is disabled; any # host will be accepted. Thus it's usually only necessary to set it in production. ALLOWED_HOSTS = ["*"] handler404 = web.errors.handler404 handler500 = web.errors.handler500
mit
-6,930,075,890,209,785,000
33.74359
82
0.743911
false
3.43038
false
false
false
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-3.1/Lib/textwrap.py
1
15708
"""Text wrapping and filling. """ # Copyright (C) 1999-2001 Gregory P. Ward. # Copyright (C) 2002, 2003 Python Software Foundation. # Written by Greg Ward <gward@python.net> __revision__ = "$Id: textwrap.py 67747 2008-12-13 23:20:54Z antoine.pitrou $" import string, re __all__ = ['TextWrapper', 'wrap', 'fill', 'dedent'] # Hardcode the recognized whitespace characters to the US-ASCII # whitespace characters. The main reason for doing this is that in # ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales # that character winds up in string.whitespace. Respecting # string.whitespace in those cases would 1) make textwrap treat 0xa0 the # same as any other whitespace char, which is clearly wrong (it's a # *non-breaking* space), 2) possibly cause problems with Unicode, # since 0xa0 is not in range(128). _whitespace = '\t\n\x0b\x0c\r ' class TextWrapper: """ Object for wrapping/filling text. The public interface consists of the wrap() and fill() methods; the other methods are just there for subclasses to override in order to tweak the default behaviour. If you want to completely replace the main wrapping algorithm, you'll probably have to override _wrap_chunks(). Several instance attributes control various aspects of wrapping: width (default: 70) the maximum width of wrapped lines (unless break_long_words is false) initial_indent (default: "") string that will be prepended to the first line of wrapped output. Counts towards the line's width. subsequent_indent (default: "") string that will be prepended to all lines save the first of wrapped output; also counts towards each line's width. expand_tabs (default: true) Expand tabs in input text to spaces before further processing. Each tab will become 1 .. 8 spaces, depending on its position in its line. If false, each tab is treated as a single character. replace_whitespace (default: true) Replace all whitespace characters in the input text by spaces after tab expansion. Note that if expand_tabs is false and replace_whitespace is true, every tab will be converted to a single space! fix_sentence_endings (default: false) Ensure that sentence-ending punctuation is always followed by two spaces. Off by default because the algorithm is (unavoidably) imperfect. break_long_words (default: true) Break words longer than 'width'. If false, those words will not be broken, and some lines might be longer than 'width'. break_on_hyphens (default: true) Allow breaking hyphenated words. If true, wrapping will occur preferably on whitespaces and right after hyphens part of compound words. drop_whitespace (default: true) Drop leading and trailing whitespace from lines. """ unicode_whitespace_trans = {} uspace = ord(' ') for x in _whitespace: unicode_whitespace_trans[ord(x)] = uspace # This funky little regex is just the trick for splitting # text up into word-wrappable chunks. E.g. # "Hello there -- you goof-ball, use the -b option!" # splits into # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! # (after stripping out empty strings). wordsep_re = re.compile( r'(\s+|' # any whitespace r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash # This less funky little regex just split on recognized spaces. E.g. # "Hello there -- you goof-ball, use the -b option!" # splits into # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/ wordsep_simple_re = re.compile(r'(\s+)') # XXX this is not locale- or charset-aware -- string.lowercase # is US-ASCII only (and therefore English-only) sentence_end_re = re.compile(r'[a-z]' # lowercase letter r'[\.\!\?]' # sentence-ending punct. r'[\"\']?' # optional end-of-quote r'\Z') # end of chunk def __init__(self, width=70, initial_indent="", subsequent_indent="", expand_tabs=True, replace_whitespace=True, fix_sentence_endings=False, break_long_words=True, drop_whitespace=True, break_on_hyphens=True): self.width = width self.initial_indent = initial_indent self.subsequent_indent = subsequent_indent self.expand_tabs = expand_tabs self.replace_whitespace = replace_whitespace self.fix_sentence_endings = fix_sentence_endings self.break_long_words = break_long_words self.drop_whitespace = drop_whitespace self.break_on_hyphens = break_on_hyphens # -- Private methods ----------------------------------------------- # (possibly useful for subclasses to override) def _munge_whitespace(self, text): """_munge_whitespace(text : string) -> string Munge whitespace in text: expand tabs and convert all other whitespace characters to spaces. Eg. " foo\tbar\n\nbaz" becomes " foo bar baz". """ if self.expand_tabs: text = text.expandtabs() if self.replace_whitespace: text = text.translate(self.unicode_whitespace_trans) return text def _split(self, text): """_split(text : string) -> [string] Split the text to wrap into indivisible chunks. Chunks are not quite the same as words; see wrap_chunks() for full details. As an example, the text Look, goof-ball -- use the -b option! breaks into the following chunks: 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', 'option!' if break_on_hyphens is True, or in: 'Look,', ' ', 'goof-ball', ' ', '--', ' ', 'use', ' ', 'the', ' ', '-b', ' ', option!' otherwise. """ if self.break_on_hyphens is True: chunks = self.wordsep_re.split(text) else: chunks = self.wordsep_simple_re.split(text) chunks = [c for c in chunks if c] return chunks def _fix_sentence_endings(self, chunks): """_fix_sentence_endings(chunks : [string]) Correct for sentence endings buried in 'chunks'. Eg. when the original text contains "... foo.\nBar ...", munge_whitespace() and split() will convert that to [..., "foo.", " ", "Bar", ...] which has one too few spaces; this method simply changes the one space to two. """ i = 0 pat = self.sentence_end_re while i < len(chunks)-1: if chunks[i+1] == " " and pat.search(chunks[i]): chunks[i+1] = " " i += 2 else: i += 1 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): """_handle_long_word(chunks : [string], cur_line : [string], cur_len : int, width : int) Handle a chunk of text (most likely a word, not whitespace) that is too long to fit in any line. """ # Figure out when indent is larger than the specified width, and make # sure at least one character is stripped off on every pass if width < 1: space_left = 1 else: space_left = width - cur_len # If we're allowed to break long words, then do so: put as much # of the next chunk onto the current line as will fit. if self.break_long_words: cur_line.append(reversed_chunks[-1][:space_left]) reversed_chunks[-1] = reversed_chunks[-1][space_left:] # Otherwise, we have to preserve the long word intact. Only add # it to the current line if there's nothing already there -- # that minimizes how much we violate the width constraint. elif not cur_line: cur_line.append(reversed_chunks.pop()) # If we're not allowed to break long words, and there's already # text on the current line, do nothing. Next time through the # main loop of _wrap_chunks(), we'll wind up here again, but # cur_len will be zero, so the next line will be entirely # devoted to the long word that we can't handle right now. def _wrap_chunks(self, chunks): """_wrap_chunks(chunks : [string]) -> [string] Wrap a sequence of text chunks and return a list of lines of length 'self.width' or less. (If 'break_long_words' is false, some lines may be longer than this.) Chunks correspond roughly to words and the whitespace between them: each chunk is indivisible (modulo 'break_long_words'), but a line break can come between any two chunks. Chunks should not have internal whitespace; ie. a chunk is either all whitespace or a "word". Whitespace chunks will be removed from the beginning and end of lines, but apart from that whitespace is preserved. """ lines = [] if self.width <= 0: raise ValueError("invalid width %r (must be > 0)" % self.width) # Arrange in reverse order so items can be efficiently popped # from a stack of chucks. chunks.reverse() while chunks: # Start the list of chunks that will make up the current line. # cur_len is just the length of all the chunks in cur_line. cur_line = [] cur_len = 0 # Figure out which static string will prefix this line. if lines: indent = self.subsequent_indent else: indent = self.initial_indent # Maximum width for this line. width = self.width - len(indent) # First chunk on line is whitespace -- drop it, unless this # is the very beginning of the text (ie. no lines started yet). if self.drop_whitespace and chunks[-1].strip() == '' and lines: del chunks[-1] while chunks: l = len(chunks[-1]) # Can at least squeeze this chunk onto the current line. if cur_len + l <= width: cur_line.append(chunks.pop()) cur_len += l # Nope, this line is full. else: break # The current line is full, and the next chunk is too big to # fit on *any* line (not just this one). if chunks and len(chunks[-1]) > width: self._handle_long_word(chunks, cur_line, cur_len, width) # If the last chunk on this line is all whitespace, drop it. if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': del cur_line[-1] # Convert current line back to a string and store it in list # of all lines (return value). if cur_line: lines.append(indent + ''.join(cur_line)) return lines # -- Public interface ---------------------------------------------- def wrap(self, text): """wrap(text : string) -> [string] Reformat the single paragraph in 'text' so it fits in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. """ text = self._munge_whitespace(text) chunks = self._split(text) if self.fix_sentence_endings: self._fix_sentence_endings(chunks) return self._wrap_chunks(chunks) def fill(self, text): """fill(text : string) -> string Reformat the single paragraph in 'text' to fit in lines of no more than 'self.width' columns, and return a new string containing the entire wrapped paragraph. """ return "\n".join(self.wrap(text)) # -- Convenience interface --------------------------------------------- def wrap(text, width=70, **kwargs): """Wrap a single paragraph of text, returning a list of wrapped lines. Reformat the single paragraph in 'text' so it fits in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour. """ w = TextWrapper(width=width, **kwargs) return w.wrap(text) def fill(text, width=70, **kwargs): """Fill a single paragraph of text, returning a new string. Reformat the single paragraph in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped paragraph. As with wrap(), tabs are expanded and other whitespace characters converted to space. See TextWrapper class for available keyword args to customize wrapping behaviour. """ w = TextWrapper(width=width, **kwargs) return w.fill(text) # -- Loosely related functionality ------------------------------------- _whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE) _leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE) def dedent(text): """Remove any common leading whitespace from every line in `text`. This can be used to make triple-quoted strings line up with the left edge of the display, while still presenting them in the source code in indented form. Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines " hello" and "\thello" are considered to have no common leading whitespace. (This behaviour is new in Python 2.5; older versions of this module incorrectly expanded tabs before searching for common leading whitespace.) """ # Look for the longest leading string of spaces and tabs common to # all lines. margin = None text = _whitespace_only_re.sub('', text) indents = _leading_whitespace_re.findall(text) for indent in indents: if margin is None: margin = indent # Current line more deeply indented than previous winner: # no change (previous winner is still on top). elif indent.startswith(margin): pass # Current line consistent with and no deeper than previous winner: # it's the new winner. elif margin.startswith(indent): margin = indent # Current line and previous winner have no common whitespace: # there is no margin. else: margin = "" break # sanity check (testing/debugging only) if 0 and margin: for line in text.split("\n"): assert not line or line.startswith(margin), \ "line = %r, margin = %r" % (line, margin) if margin: text = re.sub(r'(?m)^' + margin, '', text) return text if __name__ == "__main__": #print dedent("\tfoo\n\tbar") #print dedent(" \thello there\n \t how are you?") print(dedent("Hello there.\n This is indented."))
mit
-7,864,780,077,585,546,000
39.380463
80
0.592564
false
4.16879
false
false
false
Karosuo/Linux_tools
xls_handlers/xls_sum_venv/lib/python3.6/site-packages/xlsxwriter/chart_column.py
1
3545
############################################################################### # # ChartColumn - A class for writing the Excel XLSX Column charts. # # Copyright 2013-2019, John McNamara, jmcnamara@cpan.org # from . import chart class ChartColumn(chart.Chart): """ A class for writing the Excel XLSX Column charts. """ ########################################################################### # # Public API. # ########################################################################### def __init__(self, options=None): """ Constructor. """ super(ChartColumn, self).__init__() if options is None: options = {} self.subtype = options.get('subtype') if not self.subtype: self.subtype = 'clustered' self.horiz_val_axis = 0 if self.subtype == 'percent_stacked': self.y_axis['defaults']['num_format'] = '0%' # Set the available data label positions for this chart type. self.label_position_default = 'outside_end' self.label_positions = { 'center': 'ctr', 'inside_base': 'inBase', 'inside_end': 'inEnd', 'outside_end': 'outEnd'} self.set_y_axis({}) ########################################################################### # # Private API. # ########################################################################### def _write_chart_type(self, args): # Override the virtual superclass method with a chart specific method. # Write the c:barChart element. self._write_bar_chart(args) def _write_bar_chart(self, args): # Write the <c:barChart> element. if args['primary_axes']: series = self._get_primary_axes_series() else: series = self._get_secondary_axes_series() if not len(series): return subtype = self.subtype if subtype == 'percent_stacked': subtype = 'percentStacked' # Set a default overlap for stacked charts. if 'stacked' in self.subtype: if self.series_overlap_1 is None: self.series_overlap_1 = 100 self._xml_start_tag('c:barChart') # Write the c:barDir element. self._write_bar_dir() # Write the c:grouping element. self._write_grouping(subtype) # Write the c:ser elements. for data in series: self._write_ser(data) # Write the c:gapWidth element. if args['primary_axes']: self._write_gap_width(self.series_gap_1) else: self._write_gap_width(self.series_gap_2) # Write the c:overlap element. if args['primary_axes']: self._write_overlap(self.series_overlap_1) else: self._write_overlap(self.series_overlap_2) # Write the c:axId elements self._write_axis_ids(args) self._xml_end_tag('c:barChart') ########################################################################### # # XML methods. # ########################################################################### def _write_bar_dir(self): # Write the <c:barDir> element. val = 'col' attributes = [('val', val)] self._xml_empty_tag('c:barDir', attributes) def _write_err_dir(self, val): # Overridden from Chart class since it is not used in Column charts. pass
gpl-3.0
-2,085,466,959,176,181,800
26.061069
79
0.464598
false
4.44793
false
false
false
vvoland/py3status
py3status/modules/weather_owm.py
1
29150
# -*- coding: utf-8 -*- """ Display ultimately customizable weather. This module allows you to specify an icon for nearly every weather scenario imaginable. The default configuration options lump many of the icons into a few groups, and due to the limitations of UTF-8, this is really as expressive as it gets. This module uses Timezone API (https://timezoneapi.io) and OpenWeatherMap API (https://openweathermap.org). Requires an API key for OpenWeatherMap (OWM), but the free tier allows you enough requests/sec to get accurate weather even up to the minute. I would highly suggest you install an additional font, such as the incredible (and free!) Weather Icons font (https://erikflowers.github.io/weather-icons), which has icons for most weather scenarios. But, this will still work with the i3bar default font, Deja Vu Sans Mono font, which has Unicode support. You can see the (limited) weather icon support within Unicode in the defaults. For more information, see the documentation (https://openweathermap.org/weather-conditions) on what weather conditions are supported. See the configuration options for how to specify each weather icon. Configuration parameters: api_key: Your OpenWeatherMap API key See https://openweathermap.org/appid. Required! (default None) cache_timeout: The time between API polling in seconds It is recommended to keep this at a higher value to avoid rate limiting with the API's. (default 600) city: The city to display for location information. If set, implicitly disables the Timezone API for determining city name. (default None) country: The country to display for location information. If set, implicitly disables the Timezone API for determining country name. (default None) forecast_days: Number of days to include in the forecast, including today (regardless of the 'forecast_include_today' flag) (default 3) forecast_include_today: Include today in the forecast? (Boolean) (default False) forecast_text_separator: Separator between entries in the forecast (default ' ') format: How to display the weather This also dictates the type of forecast. The placeholders here refer to the format_[...] variables found below. Available placeholders: icon, city, clouds, rain, snow, wind, humidity, pressure, temperature, sunrise, sunset, main, description, forecast (default '{city} {icon} {temperature}[ {rain}], {description} {forecast}') format_clouds: Formatting for cloud coverage (percentage) Available placeholders: icon, coverage (default '{icon} {coverage}%') format_forecast: Formatting for future forecasts Available placeholders: See 'format' This is similar to the 'format' field, but contains information for future weather. Notably, this does not include information about sunrise or sunset times. (default '{icon}') format_humidity: Formatting for humidity (percentage) Available placeholders: icon, humidity (default '{icon} {humidity}%') format_pressure: Formatting for atmospheric pressure Available placeholders: icon, pressure, sea_level (default '{icon} {pressure} hPa') format_rain: Formatting for rain volume over the past 3 hours Available placeholders: icon, amount (default '[\?if=amount {icon} {amount:.0f} {unit}]') format_snow: Formatting for snow volume over the past 3 hours Available placeholders: icon, amount (default '[\?if=amount {icon} {amount:.0f} {unit}]') format_sunrise: Formatting for sunrise time Note that this format accepts strftime/strptime placeholders to populate the output with the time information. Available placeholders: icon (default '{icon} %-I:%M %p') format_sunset: Formatting for sunset time This format accepts strftime/strptime placeholders to populate the output with the time information. Available placeholders: icon (default '{icon} %-I:%M %p') format_temperature: Formatting for temperature Available placeholders: current, icon, max, min (default '{icon} [\?color=all {current:.0f}°{unit}]') format_wind: Formatting for wind degree and speed The 'gust' option represents the speed of wind gusts in the wind unit. Available placeholders: icon, degree, speed, gust (default '[\?if=speed {icon} {speed:.0f} {unit}]') icon_atmosphere: Icon for atmospheric conditions, like fog, smog, etc. (default '🌫') icon_cloud: Icon for clouds (default '☁') icon_extreme: Icon for extreme weather (default '⚠') icon_humidity: Icon for humidity (default '●') icon_pressure: Icon for pressure (default '◌') icon_rain: Icon for rain (default '🌧') icon_snow: Icon for snow (default '❄') icon_sun: Icon for sunshine (default '☼') icon_sunrise: Icon for sunrise (default '⇑') icon_sunset: Icon for sunset (default '⇓') icon_temperature: Icon for temperature (default '○') icon_thunderstorm: Icon for thunderstorms (default '⛈') icon_wind: Icon for wind or breeze (default '☴') icons: A dictionary relating weather code to icon See https://openweathermap.org/weather-conditions for a complete list of supported icons. This will fall-back to the listed icon if there is no specific icon present. However, options included here take precedent over the above 'icon_{...}' options. There are multiple ways to specify individual icons based on the id: * Use the key '601' to reference the condition with id = 601 (snow) * Use the key '230_232' to reference a span of conditions inclusive, in this case conditions (230, 231, 232) (thunderstorm with drizzle) (default None) lang: An ISO 639-1 code for your language (two letters) (default 'en') location: A tuple of floats describing the desired weather location The tuple should follow the form (latitude, longitude), and if set, implicitly disables the Timezone API for determining location. (default None) offset_gmt: A string describing the offset from GMT (UTC) The string should follow the format '+12:34', where the first character is either '+' or '-', followed by the offset in hours, then the offset in minutes. If this is set, it disables the automatic timezone detection from the Timezone API. (default None) request_timeout: The timeout in seconds for contacting the API's. (default 10) thresholds: Configure temperature colors based on limits The numbers specified inherit the unit of the temperature as configured. The default below is intended for Fahrenheit. If the set value is empty or None, the feature is disabled. You can specify this parameter using a dictionary: * Keys are names. You have the option of 'current', 'min', 'max', or 'all' to specify a threshold. The first three are tied to the various temperature values, the last sets the same threshold for all outputs. If both 'all' and one of the first three are set (lets say 'min' for this example), the threshold will default to be the value in 'min', not 'all'. This goes for any configuration * The values are lists of pairs, with temperature (in the configured unit) as the first and the color as the second * To use the thresholds color, place '\?color=all' in the formatting string for temperature, replacing 'all' with any of the valid threshold names for different coloring effects * To have smooth transitions between colors, consider setting the 'gradients' configuration parameter to 'True', either in the global configuration, or in the module configuration! (default {'all': [(-100, '#0FF'), (0, '#00F'), (50, '#0F0'), (150, '#FF0')]}) unit_rain: Unit for rain fall When specified, a unit may be any combination of upper and lower case, such as 'Ft', and still be considered valid as long as it is in the below options. Options: cm, ft, in, mm, m, yd (default 'in') unit_snow: Unit for snow fall Options: cm, ft, in, mm, m, yd (default 'in') unit_temperature: Unit for temperature Options: c, f, k (default 'F') unit_wind: Unit for wind speed Options: fsec, msec, mph, kmh (default 'mph') Format placeholders: All: {icon} The icon associated with a formatting section format_clouds: {coverage} Cloud coverage percentage format_humidity: {humidity} Humidity percentage format_pressure: {pressure} Current atmospheric pressure in Pascals {sea_level} Sea-level atmospheric pressure in Pascals. format_rain: {amount} Rainfall in the specified unit {unit} The unit specified format_snow: {amount} Snowfall in the specified unit {unit} The unit specified format_temperature: {current} Current temperature {max} Maximum temperature in the configured unit {min} Minimum temperature {unit} The unit specified format_wind: {degree} Current wind heading {gust} Wind gusts speed in the specified unit {speed} Wind speed {unit} The unit specified format only: {city} The name of the city where the weather is {country} The name of the country where the weather is {forecast} Output of format_forecast format, format_forecast: {clouds} Output of format_clouds {description} Natural description of the current weather {humidity} Output of format_humidity {main} Short description of the current weather {pressure} Output of format_pressure {snow} Output of format_snow {sunrise} Output of format_sunrise {sunset} Output of format_sunset {temperature} Output of format_temperature {wind} Output of format_wind Examples: ``` # change icons weather_owm { icons = { '200': "☔" '230_232': "🌧" } } ``` @author alexoneill @licence MIT SAMPLE OUTPUT {'full_text': 'New York 🌫 ○ 30°F, mist ☁ ☁ ☁'} diff_weather {'full_text': '🌫 ○ 59°F, foggy ⛅ ☼ 🌧'} """ import datetime # API information OWM_API = '2.5' OWM_CURR_ENDPOINT = 'http://api.openweathermap.org/data/%s/weather?' \ 'APPID=%s&lat=%f&lon=%f&lang=%s' OWM_FUTURE_ENDPOINT = 'http://api.openweathermap.org/data/%s/forecast?' \ 'APPID=%s&lat=%f&lon=%f&lang=%s&cnt=%%d' IP_ENDPOINT = 'https://timezoneapi.io/api/ip' # Paths of information to extract from JSON IP_CITY = '//data/city' IP_COUNTRY = '//data/country' IP_GMT_OFF = '//data/datetime/offset_gmt' IP_LOC = '//data/location' OWM_CLOUD_COVER = '//clouds/all' OWM_DESC = '//weather:0/main' OWM_DESC_LONG = '//weather:0/description' OWM_HUMIDITY = '//main/humidity' OWM_PRESSURE = '//main' OWM_RAIN = '//rain/3h' OWM_SNOW = '//snow/3h' OWM_SUNRISE = '//sys/sunrise' OWM_SUNSET = '//sys/sunset' OWM_TEMP = '//main' OWM_WEATHER_ICON = '//weather:0/id' OWM_WIND = '//wind' # Units constants RAIN_UNITS = set(['cm', 'ft', 'in', 'mm', 'm', 'yd']) SNOW_UNITS = RAIN_UNITS TEMP_UNITS = set(['c', 'f', 'k']) WIND_UNITS = set(['fsec', 'msec', 'mph', 'kmh']) # Conversion factors FT_FROM_METER = 3.28084 IN_FROM_MM = 0.0393701 KMH_FROM_MSEC = 0.277778 MPH_FROM_MSEC = 2.23694 # Thresholds options THRESHOLDS_ALL = 'all' THRESHOLDS_NAMES = set([THRESHOLDS_ALL, 'current', 'min', 'max']) # Thresholds defaults THRESHOLDS = dict([(THRESHOLDS_ALL, [ (-100, '#0FF'), (0, '#00F'), (50, '#0F0'), (150, '#FF0') ])]) class OWMException(Exception): pass class Py3status: """ """ api_key = None cache_timeout = 600 city = None country = None forecast_days = 3 forecast_include_today = False forecast_text_separator = ' ' format = '{city} {icon} {temperature}[ {rain}], {description} {forecast}' format_clouds = '{icon} {coverage}%' format_forecast = '{icon}' format_humidity = '{icon} {humidity}%' format_pressure = '{icon} {pressure} hPa' format_rain = '[\?if=amount {icon} {amount:.0f} {unit}]' format_snow = '[\?if=amount {icon} {amount:.0f} {unit}]' format_sunrise = '{icon} %-I:%M %p' format_sunset = '{icon} %-I:%M %p' format_temperature = u'{icon} [\?color=all {current:.0f}°{unit}]' format_wind = '[\?if=speed {icon} {speed:.0f} {unit}]' icon_atmosphere = u'🌫' icon_cloud = u'☁' icon_extreme = u'⚠' icon_humidity = u'●' icon_pressure = u'◌' icon_rain = u'🌧' icon_snow = u'❄' icon_sun = u'☼' icon_sunrise = u'⇑' icon_sunset = u'⇓' icon_temperature = u'○' icon_thunderstorm = u'⛈' icon_wind = u'☴' icons = None lang = 'en' location = None offset_gmt = None request_timeout = 10 thresholds = THRESHOLDS unit_rain = 'in' unit_snow = 'in' unit_temperature = 'F' unit_wind = 'mph' def _get_icons(self): if self.icons is None: self.icons = {} # Defaults for weather ranges defaults = { '200_299': self.icon_thunderstorm, '300_399': self.icon_rain, '500_599': self.icon_rain, '600_699': self.icon_snow, '700_799': self.icon_atmosphere, '800': self.icon_sun, '801_809': self.icon_cloud, '900_909': self.icon_extreme, '950_959': self.icon_wind, '960_999': self.icon_extreme, } # Handling ranges from OpenWeatherMap data = {} for source in (defaults, self.icons): for key in source: if not key.replace('_', '').isdigit(): raise Exception('Invalid icon id: (%s)' % key) if '_' in key: if key.count('_') != 1: raise Exception('Invalid icon range: %s' % key) # Populate each code (start, end) = tuple(map(int, key.split('_'))) for code in range(start, end + 1): data[code] = source[key] else: data[int(key)] = source[key] return data def post_config_hook(self): # Verify the API key if self.api_key is None: raise OWMException('API Key for OpenWeatherMap cannot be empty!' ' Go to http://openweathermap.org/appid to' ' get an API Key.') # Generate our icon array self.icons = self._get_icons() # Verify the units configuration if self.unit_rain.lower() not in RAIN_UNITS: raise Exception('unit_rain is not recognized') if self.unit_snow.lower() not in SNOW_UNITS: raise Exception('unit_snow is not recognized') if self.unit_temperature.lower() not in TEMP_UNITS: raise Exception('unit_temperature is not recognized') if self.unit_wind.lower() not in WIND_UNITS: raise Exception('unit_wind is not recognized') # Check thresholds for validity if set(self.thresholds.keys()) > THRESHOLDS_NAMES: raise Exception('threshold name(s) are not recognized') # Copy thresholds if available if THRESHOLDS_ALL in self.thresholds: for name in (THRESHOLDS_NAMES - set([THRESHOLDS_ALL])): if name not in self.thresholds: self.thresholds[name] = self.thresholds[THRESHOLDS_ALL] def _get_req_url(self, base, coords): # Construct the url from the pattern params = [OWM_API, self.api_key] + list(coords) + [self.lang] return base % tuple(params) def _make_req(self, url): # Make a request expecting a JSON response req = self.py3.request(url, timeout=self.request_timeout) if req.status_code != 200: data = req.json() raise OWMException(data['message'] if ('message' in data) else 'API Error') return req.json() def _jpath(self, data, query, default=None): # Take the query expression and drill down into the given dictionary parts = query.strip('/').split('/') for part in parts: try: # This represents a key:index expression, representing first # selecting a key, then an index if ':' in part: (part, index) = tuple(part.split(':')) data = data[part] data = data[int(index)] # Select a portion of the dictionary by key in the path else: data = data[part] # Failed, so return the default except (KeyError, IndexError, TypeError): return default return data def _get_loc_tz_info(self): # Helper to parse a GMT offset def _parse_offset(offset): # Parse string (plus, rest) = ((offset[0] == '+'), offset[1:]) (hours, mins) = map(int, rest.split(':')) # Generate timedelta tz_offset = datetime.timedelta(hours=hours, minutes=mins) return (tz_offset if plus else -tz_offset) # Preference a user-set location if all(map(lambda x: x is not None, (self.location, self.city, self.country, self.offset_gmt))): return (self.location, self.city, self.country, _parse_offset(self.offset_gmt)) # Contact the Timezone API try: data = self._make_req(IP_ENDPOINT) except (self.py3.RequestException): return None except (self.py3.RequestURLError): return None # Extract location data lat_lng = self.location if self.location is None: location = self._jpath(data, IP_LOC, '0,0') lat_lng = tuple(map(float, location.split(','))) # Extract city city = self.city if self.city is None: city = self._jpath(data, IP_CITY, '') # Extract country country = self.country if self.country is None: country = self._jpath(data, IP_COUNTRY, '') # Extract timezone offset tz_offset = (_parse_offset(self.offset_gmt) if (self.offset_gmt is not None) else None) if self.offset_gmt is None: offset = self._jpath(data, IP_GMT_OFF, '+0:00') tz_offset = _parse_offset(offset) return (lat_lng, city, country, tz_offset) def _get_weather(self, coords): # Get and process the current weather url = self._get_req_url(OWM_CURR_ENDPOINT, coords) return self._make_req(url) def _get_forecast(self, coords): # Get the next few days if self.forecast_days == 0: return [] # Get raw data url = (self._get_req_url(OWM_FUTURE_ENDPOINT, coords) % (self.forecast_days + 1)) data = self._make_req(url) # Extract forecast weathers = data['list'] return weathers[:-1] if (self.forecast_include_today) else weathers[1:] def _get_icon(self, wthr): # Lookup the icon from the weather code (default sunny) return self.icons[self._jpath(wthr, OWM_WEATHER_ICON, 800)] def _format_clouds(self, wthr): # Format the cloud cover (default clear) return self.py3.safe_format(self.format_clouds, { 'icon': self.icon_cloud, 'coverage': self._jpath(wthr, OWM_CLOUD_COVER, 0), }) def _format_rain(self, wthr): # Format rain fall rain = self._jpath(wthr, OWM_RAIN, 0) # Data comes as mm inches = rain * IN_FROM_MM options = { 'mm': round(rain), 'cm': round(rain / 10), 'm': round(rain / 100), 'in': round(inches), 'ft': round(inches / 12), 'yd': round(inches / 36) } # Format the rain fall return self.py3.safe_format(self.format_rain, { 'icon': self.icon_rain, 'amount': options[self.unit_rain.lower()], 'unit': self.unit_rain, }) def _format_snow(self, wthr): # Format snow fall snow = self._jpath(wthr, OWM_SNOW, 0) # Data comes as mm inches = snow * IN_FROM_MM options = { 'mm': round(snow), 'cm': round(snow / 10), 'm': round(snow / 100), 'in': round(inches), 'ft': round(inches / 12), 'yd': round(inches / 36) } # Format the snow fall return self.py3.safe_format(self.format_snow, { 'icon': self.icon_snow, 'amount': options[self.unit_snow.lower()], 'unit': self.unit_snow, }) def _format_wind(self, wthr): wind = self._jpath(wthr, OWM_WIND, dict()) # Speed and Gust msec_speed = wind['speed'] if ('speed' in wind) else 0 msec_gust = wind['gust'] if ('gust' in wind) else 0 options = { 'fsec': { 'speed': msec_speed * FT_FROM_METER, 'gust': msec_gust * FT_FROM_METER}, 'msec': { 'speed': msec_speed, 'gust': msec_gust}, 'mph': { 'speed': msec_speed * MPH_FROM_MSEC, 'gust': msec_gust * MPH_FROM_MSEC}, 'kmh': { 'speed': msec_speed * KMH_FROM_MSEC, 'gust': msec_gust * KMH_FROM_MSEC}} # Get the choice and add more choice = options[self.unit_wind.lower()] choice['icon'] = self.icon_wind choice['degree'] = wind['deg'] if ('deg' in wind) else 0 choice['unit'] = self.unit_wind # Format the wind speed return self.py3.safe_format(self.format_wind, choice) def _format_humidity(self, wthr): # Format the humidity (default zero humidity) humidity = self._jpath(wthr, OWM_HUMIDITY, 0) return self.py3.safe_format(self.format_humidity, { 'icon': self.icon_humidity, 'humidity': humidity, }) def _format_pressure(self, wthr): # Get data and add the icon pressure = self._jpath(wthr, OWM_PRESSURE, dict()) pressure['icon'] = self.icon_pressure # Format the barometric pressure return self.py3.safe_format(self.format_pressure, pressure) def _format_temp(self, wthr): # Get Kelvin data (default absolute zero) kelvin = self._jpath(wthr, OWM_TEMP, 0) # Temperature conversion methods def kToC(val): return val - 273.15 def kToF(val): return val * (9.0 / 5.0) - 459.67 options = { 'c': { 'current': round(kToC(kelvin['temp'])), 'max': round(kToC(kelvin['temp_max'])), 'min': round(kToC(kelvin['temp_min'])) }, 'f': { 'current': round(kToF(kelvin['temp'])), 'max': round(kToF(kelvin['temp_max'])), 'min': round(kToF(kelvin['temp_min'])) }, 'k': { 'current': round(kelvin['temp']), 'max': round(kelvin['temp_max']), 'min': round(kelvin['temp_min']) } } # Get the choice and add more choice = options[self.unit_temperature.lower()] choice['icon'] = self.icon_temperature choice['unit'] = self.unit_temperature # Calculate thresholds for name in (THRESHOLDS_NAMES - set([THRESHOLDS_ALL])): # Try to apply the specific threshold if name in self.thresholds: self.py3.threshold_get_color(choice[name], name) # Format the temperature return self.py3.safe_format(self.format_temperature, choice) def _format_sunrise(self, wthr, tz_offset): # Get the time for sunrise (default is the start of time) dt = datetime.datetime.utcfromtimestamp( self._jpath(wthr, OWM_SUNRISE, 0)) dt += tz_offset # Format the sunrise replaced = dt.strftime(self.format_sunrise) return self.py3.safe_format(replaced, { 'icon': self.icon_sunrise, }) def _format_sunset(self, wthr, tz_offset): # Get the time for sunset (default is the start of time) dt = datetime.datetime.utcfromtimestamp( self._jpath(wthr, OWM_SUNSET, 0)) dt += tz_offset # Format the sunset replaced = dt.strftime(self.format_sunset) return self.py3.safe_format(replaced, { 'icon': self.icon_sunset, }) def _format_dict(self, wthr, city, country, tz_offset): data = { # Standard options 'icon': self._get_icon(wthr), 'clouds': self._format_clouds(wthr), 'rain': self._format_rain(wthr), 'snow': self._format_snow(wthr), 'wind': self._format_wind(wthr), 'humidity': self._format_humidity(wthr), 'pressure': self._format_pressure(wthr), 'temperature': self._format_temp(wthr), 'sunrise': self._format_sunrise(wthr, tz_offset), 'sunset': self._format_sunset(wthr, tz_offset), # Descriptions (defaults to empty) 'main': self._jpath(wthr, OWM_DESC, '').lower(), 'description': self._jpath(wthr, OWM_DESC_LONG, '').lower(), # Location information 'city': city, 'country': country, } return data def _format(self, wthr, fcsts, city, country, tz_offset): # Format all sections today = self._format_dict(wthr, city, country, tz_offset) # Insert forecasts forecasts = [] for day in fcsts: future = self._format_dict(day, city, country, tz_offset) forecasts.append(self.py3.safe_format(self.format_forecast, future)) # Give the final format today['forecast'] = self.py3.composite_join( self.forecast_text_separator, forecasts) return self.py3.safe_format(self.format, today) def weather_owm(self): # Get weather information loc_tz_info = self._get_loc_tz_info() text = '' if loc_tz_info is not None: (coords, city, country, tz_offset) = loc_tz_info wthr = self._get_weather(coords) fcsts = self._get_forecast(coords) text = self._format(wthr, fcsts, city, country, tz_offset) return { 'full_text': text, 'cached_until': self.py3.time_in(seconds=self.cache_timeout) } if __name__ == '__main__': """ Run module in test mode. """ import os from py3status.module_test import module_test # All possible outputs all_string = '/'.join([ '{clouds}', '{description}', '{main}', '{humidity}', '{pressure}', '{snow}', '{sunrise}', '{sunset}', '{temperature}', '{wind}' ]) module_test(Py3status, config={ 'api_key': os.getenv('OWM_API_KEY'), # Select icons 'icons': { '200': "☔", '230_232': "🌧", }, # Complete configuration 'format_clouds': '{icon} {coverage}%', 'format_humidity': '{icon} {humidity}%', 'format_pressure': '{icon} {pressure} Pa, sea: {sea_level} Pa', 'format_rain': '{icon} {amount:.0f} in', 'format_snow': '{icon} {amount:.0f} in', 'format_temperature': ('{icon}: max: [\?color=max {max:.0f}°F], ' 'min: [\?color=min {min:.0f}°F], ' 'current: [\?color=current {current:.0f}°F]'), 'format_wind': ('{icon} {degree}°, gust: {gust:.0f} mph, ' 'speed: {speed:.0f} mph'), 'format': ('{city}, {country}: {icon} ' + all_string + '//{forecast}'), 'format_forecast': ('{icon} ' + all_string), # Miscellaneous 'forecast_days': 1, 'forecast_text_separator': '//', })
bsd-3-clause
1,267,535,162,416,054,500
34.735547
85
0.576739
false
3.771647
true
false
false
collective/zettwerk.users
setup.py
1
1553
from setuptools import setup, find_packages import os version = '0.2.dev0' install_requires = [ 'setuptools', # -*- Extra requirements: -*- ] tests_require = [ 'mocker', ] setup(name='zettwerk.users', version=version, description="Additional user information for Plone", long_description=open("README.txt").read() + "\n" + open(os.path.join("docs", "HISTORY.txt")).read(), # Get more strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ "Environment :: Web Environment", "Framework :: Plone", "Framework :: Zope2", "License :: OSI Approved :: GNU General Public License (GPL)", "Operating System :: OS Independent", "Programming Language :: Python", ], keywords='Zope CMF Plone Users', author='Christoph Glaubitz', author_email='cg@zettwerk.com', url='http://svn.plone.org/svn/collective/zettwerk.users', license='GPL', packages=find_packages(exclude=['ez_setup']), namespace_packages=['zettwerk'], include_package_data=True, zip_safe=False, install_requires=install_requires, tests_require=install_requires + tests_require, entry_points=""" # -*- Entry points: -*- [distutils.setup_keywords] paster_plugins = setuptools.dist:assert_string_list [egg_info.writers] paster_plugins.txt = setuptools.command.egg_info:write_arg """, paster_plugins = ["ZopeSkel"], )
gpl-2.0
-6,070,823,635,687,436,000
31.354167
84
0.607856
false
3.797066
false
false
false
iron-bun/python_scripts
scrape_your_plate/scrape_your_plate.py
1
7791
#!/usr/bin/env python3 # # Scrape recipes from pepperplate.com. # import requests from bs4 import BeautifulSoup import lxml.html import json import time import getpass import re import os class pepperplate_recipe: def __init__(self, id, html): self.id = id self.soup = BeautifulSoup(html) def get_id(self): return self.id def get_title(self): return self.soup.find(id='cphMiddle_cphMain_lblTitle').get_text().strip() def get_new_body(self): new_soup = BeautifulSoup('<html><head></head><body></body></html>') thumb = self.get_thumbnail() if thumb: hdr = new_soup.new_tag('img') hdr['src'] = './img/{}'.format(self.id + '.jpg') new_soup.body.append(hdr) #Title title = self.get_title() hdr = new_soup.new_tag('title') hdr.append(title) new_soup.head.append(hdr) hdr = new_soup.new_tag('h1') hdr.append(title) new_soup.body.append(hdr) #source source = self.soup.find(id='cphMiddle_cphMain_hlSource') if source: new_soup.body.append(source) #ingredients hdr = new_soup.new_tag('h3') hdr.append('Ingredients') new_soup.body.append(hdr) item = self.soup.find('ul', {'class':'inggroups'}) if item: new_soup.body.append(item) else: new_soup.body.append('No ingedients listed') #instructions hdr = new_soup.new_tag('h3') hdr.append('Instructions') new_soup.body.append(hdr) item = self.soup.find('ol', {'class':'dirgroupitems'}) if item: new_soup.body.append(item) else: new_soup.body.append('No instructions listed') #Notes hdr = new_soup.new_tag('h3') hdr.append('Notes') new_soup.body.append(hdr) notes = self.soup.find(id="cphMiddle_cphMain_lblNotes") if notes: hdr = new_soup.new_tag('pre') hdr.append(notes.get_text()) new_soup.append(hdr) #Tags hdr = new_soup.new_tag('h3') hdr.append('Tags') new_soup.body.append(hdr) tags = self.get_tags() if tags: hdr = new_soup.new_tag('span', id='tags') hdr.append(tags) new_soup.body.append(hdr) return new_soup.prettify('latin-1', formatter="html") def get_thumbnail(self): tmp = self.soup.find(id='cphMiddle_cphMain_imgRecipeThumb') if tmp: return tmp['src'] else: return None def get_tags(self): tmp = self.soup.find(id='cphMiddle_cphMain_pnlTags') if tmp: tmp = tmp.find('span').get_text().strip() return tmp else: return None class pepperplate: def __init__(self, hostname): self.hostname = hostname self.last_page = False self.session = requests.Session() def set_username(self, username): self.username = username def set_password(self, password): self.password = password def login(self): if self.username == None or self.password == None: print('No login details supplied') return False url = 'https://{}/login.aspx'.format(self.hostname) headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36"} self.session.headers.update(headers) r = self.session.get(url) login_page = lxml.html.fromstring(r.content) VIEWSTATE = login_page.xpath('//input[@id="__VIEWSTATE"]/@value')[0] EVENTVALIDATION = login_page.xpath('//input[@id="__EVENTVALIDATION"]/@value')[0] login_data={"__VIEWSTATE":VIEWSTATE, "__EVENTVALIDATION":EVENTVALIDATION, "__EVENTARGUMENT":'', "__EVENTTARGET":'ctl00$cphMain$loginForm$ibSubmit', "ctl00$cphMain$loginForm$tbEmail":self.username, "ctl00$cphMain$loginForm$tbPassword":self.password, "ctl00$cphMain$loginForm$cbRememberMe":'on' } r = self.session.post(url, data=login_data) if r.url != 'http://{}/recipes/default.aspx'.format(self.hostname): print('Login failure') return False return True def get_page(self, page): url = 'http://{}/recipes/default.aspx/GetPageOfResults'.format(self.hostname) parameters = json.dumps({'pageIndex':page, 'pageSize':20, 'sort':4, 'tagIds': [], 'favoritesOnly':0}) headers={'Referer':'http://{}/recipes/default.aspx'.format(self.hostname) ,'Content-Type': 'application/json' ,'X-Requested-With': 'XMLHttpRequest' ,'DNT':'1' ,'Accept': 'application/json, text/javascript, */*; q=0.01' ,'Accept-Language': 'en,de;q=0.7,en-US;q=0.3' ,'Accept-Encoding': 'gzip, deflate'} r = self.session.request('POST', url, data=parameters, headers=headers) page = lxml.html.fromstring(r.json()['d']) self.page = [re.findall(r'id=(\d+)', a)[0] for a in page.xpath('//div[@class="item"]/p/a/@href')] self.last_page = len(self.page) < 20 return self.page def get_recipe(self, id): url = 'http://{}/recipes/view.aspx?id={}'.format(self.hostname, id) r = self.session.request('GET', url) return r.text def get_url(self, url): r = requests.get(url) return r.content def is_last_page(self): return self.last_page def is_logged_in(self): return self.session != None def save_recipe(recipe, savepath): filename = recipe.get_title().replace('/','_').replace('"', '').replace(':','').replace(' ','_') with open(savepath + '/{}.{}.html'.format(filename, recipe.get_id()), 'wb') as f: f.write(recipe.get_new_body()) def save_file(img, savepath): with open(savepath, 'wb') as f: f.write(img) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Scrape recipies from Pepperplate') parser.add_argument('username', help='Username to log in with') parser.add_argument('password', nargs="?", default=None, help='Password to log in with. If not provided on the command line it will be requested by the program') parser.add_argument('directory', nargs="?", default='recipes', help='Directory to which download everything. defaults to "recipes"') args = parser.parse_args() if not args.password: args.password = getpass.getpass('Please enter the password for account {}: '.format(args.username)) imgpath = os.path.join(args.directory, 'img', '{}') if not os.path.exists(imgpath.format("")): os.makedirs(imgpath, exist_ok = True) pp = pepperplate('www.pepperplate.com') pp.set_username(args.username) pp.set_password(args.password) if not pp.login(): exit(1) page = 0 while not pp.is_last_page(): print('Downloading page {}'.format(page+1)) for id in pp.get_page(page): time.sleep(1) #sleep 1 second between requests to not mash the server recipe = pepperplate_recipe(id, pp.get_recipe(id)) print('Downloaded {}'.format(recipe.get_title())) save_recipe(recipe, args.directory) if recipe.get_thumbnail(): save_file(pp.get_url(recipe.get_thumbnail()), imgpath.format(id + '.jpg')) page += 1
unlicense
8,452,935,934,564,985,000
31.327801
165
0.566294
false
3.65089
false
false
false
tuxofil/sort-media
sort_media.py
1
14535
#!/usr/bin/env python """ This script helps to sort images and videos by date and time. It searches given source directory recursively for media files (images and movies), fetch date and time information from them and copies it (or moves, see '--move' command line option) to destination directory. New file location for arbitrary FILENAME will look like: "DESTINATION_DIR/YEAR/YEAR-MONTH-DAY/HOUR:MIN:SEC_FILENAME". The FILENAME also will be lowered in case and destination file will be chmoded (to 0644 by default, see '--chmod' command line option). Additional features: you can sort your files from cameras with badly supported date and time. You can define time shifting with command line options. The media files metainfo will NOT be affected by this shifting but only new file locations and names. Command line arguments and options. Invoke the script with a single '--help' option to see brief cheat-sheet. Dependencies: You need python-exif package to fetching date time from images to work. You need ffmpeg installed to be able to fetch date and time information from movies. """ import sys import getopt import os import os.path import time import datetime import shutil import subprocess import EXIF SUPPORTED_VIDEO_EXTENSIONS = ['avi', 'mpg', 'mp4', '3gp', 'mov', 'm4v'] SUPPORTED_IMAGE_EXTENSIONS = ['jpeg', 'jpg', 'png', 'tif', 'tiff'] # Unfortunately, there is no package like python-ffmpeg (or pyffmpeg # from Google) so I decided to call ffmpeg directly through the shell. # It's not a big overhead for time-to-time task, really. FETCH_VIDEO_DATETIME_CMD = \ """ ffmpeg -y -i "{0}" -f ffmetadata - 2>&1 \ | grep creation_time \ | head -1 \ | sed -r 's/^[^:]+:\s+//' \ | sed -r 's/^.+=//' \ | sed -r 's/(-|:)/ /g' """ # ---------------------------------------------------------------------- # internal definitions quiet = False dry_run = False debug = False files_mode = 0644 remove_cleared_dirs = True ACTION_COPY = 'copy' ACTION_MOVE = 'move' action = ACTION_COPY time_shift = { 'days' : 0, 'hours' : 0, 'minutes' : 0, 'seconds' : 0 } SUPPORTED_EXTENSIONS = \ SUPPORTED_VIDEO_EXTENSIONS + SUPPORTED_IMAGE_EXTENSIONS def usage(): """ Print short help meesage. """ print('Usage:') print(' ' + sys.argv[0] + ' --help') print(' ' + sys.argv[0] + ' [options] /src/dir/path /dst/dir/path') print('Options:') print(' --move move files (will remove source files);') print(' --quiet be quiet;') print(' --dry-run do nothing, only report files and dirs processing;') print(' --dnrcd do not remove cleared directories;') print(' --chmod=Octal permissions for new files. Default is 0644.') print('Time shifting options:') print(' --year-shift=Integer') print(' --month-shift=Integer') print(' --day-shift=Integer') print(' --hour-shift=Integer') print(' --minute-shift=Integer') print(' --second-shift=Integer') sys.exit(1) def err(message): """ Print the message to the stderr stream. :param message: the message to print :type message" string """ sys.stderr.write('Error: {0}\n'.format(message)) def warn(message): """ Print the message to the stderr stream. :param message: the message to print :type message" string """ sys.stderr.write('Warning: {0}\n'.format(message)) def info(message): """ Print the message to the stdout stream. If quiet mode is enabled, just do nothing. :param message: the message to print :type message" string """ if not quiet: sys.stdout.write(message) def dbg(message): """ Print the message to the stdout stream. If quiet mode is enabled, just do nothing. :param message: the message to print :type message" string """ if debug: sys.stdout.write('DEBUG: {0}\n'.format(message)) def process_dir(src_path, dst_path): """ Do process files from source directory (src_path) and move/copy them to destination directory (dst_dir). :param src_path: source directory path :type src_path: string :param dst_path: destination directory path :type dst_path: string """ info('entering {0}\n'.format(src_path)) (files, dirs) = listdir(src_path) items_count = len(files) + len(dirs) for i in files: abs_i = os.path.join(src_path, i) info(' processing {0}: '.format(abs_i)) dates = get_media_file_date_time(abs_i) if dates is not None: (orig_datetime, shifted_datetime) = dates dst_media_path = get_dst_media_path(dst_path, i, orig_datetime, shifted_datetime) if not dry_run: mkdirP(os.path.dirname(dst_media_path)) if action == ACTION_COPY: info('copying to {0}...'.format(dst_media_path)) if dry_run: info('OK (dry run)\n') else: try: shutil.copy(abs_i, dst_media_path) os.chmod(dst_media_path, files_mode) info('OK\n') except Exception as e: info('error: {0}\n'.format(e)) elif action == ACTION_MOVE: info('moving to {0}...'.format(dst_media_path)) if dry_run: info('OK (dry run)\n') else: try: shutil.move(abs_i, dst_media_path) os.chmod(dst_media_path, files_mode) info('OK\n') except Exception as e: info('error: {0}\n'.format(e)) for i in dirs: process_dir(os.path.join(src_path, i), dst_path) if remove_cleared_dirs and \ items_count > 0 and \ len(os.listdir(src_path)) == 0: info('removing empty directory: {0}\n'.format(src_path)) try: os.rmdir(src_path) except Exception as e: warn(e) else: info('leaving {0}\n'.format(src_path)) def listdir(path): """ List directory, filter supported files and return two lists: list of subdirectories and list of media files found. :param path: directory path :type path: string :rtype: tuple of two elements, where first element is list of media filenames (without path) and second element is list of subdirectories (without path). """ files = list() dirs = list() for i in os.listdir(path): abs_i = os.path.join(path, i) if os.path.exists(abs_i): if os.path.isdir(abs_i): dirs.append(i) elif is_media(abs_i): files.append(i) files.sort() dirs.sort() return (files, dirs) def is_media(path): """ Check if given file is supported by the script. :param path: file path :type path: string :rtype: boolean """ ext = os.path.splitext(path)[1].lstrip('.').lower() return ext in SUPPORTED_EXTENSIONS and os.path.isfile(path) def get_media_file_date_time(path): """ Read creation date and time from given media file metadata. Requested time shifting will be applyed automatically. Return tuple (orig_datetime, shifted_datetime) on success, where orig_datetime and shifted_datetime is instance of datetime.datetime class. Return None if no date/time info found. :param path: media file path :type path: string :rtype: tuple or None """ time_struct = None ext = os.path.splitext(path)[1].lstrip('.').lower() if ext in SUPPORTED_IMAGE_EXTENSIONS: with open(path, 'rb') as fd: exif_data = EXIF.process_file(fd) if not exif_data: info('no EXIF information found\n') return None # search for date and time for k in ['Image DateTime', 'EXIF DateTimeOriginal', 'EXIF DateTimeDigitized']: try: time_struct = time.strptime(exif_data[k].printable, '%Y:%m:%d %H:%M:%S') break except: pass elif ext in SUPPORTED_VIDEO_EXTENSIONS: try: raw_datetime = sh(FETCH_VIDEO_DATETIME_CMD.format(path)).strip() time_struct = time.strptime(raw_datetime, '%Y %m %d %H %M %S') except: pass dbg('time_struct: {0}'.format(time_struct)) if time_struct is None: info('no date/time information found\n') return None dbg('time_shift: {0}'.format(time_shift)) timedelta = datetime.timedelta(**time_shift) dbg('timedelta: {0}'.format(timedelta)) orig_datetime = datetime.datetime.fromtimestamp(time.mktime(time_struct)) shifted_datetime = orig_datetime + timedelta dbg('shifted result: {0}'.format(shifted_datetime)) if is_in_future(shifted_datetime): warn('Shifted datetime for {0} is in future ({1})'.format( path, shifted_datetime)) return (orig_datetime, shifted_datetime) def is_in_future(date_time): """ Return True if given datetime is in future. :param date_time: tested datetime :type date_time: instance of datetime.datetime class :rtype: boolean """ return (datetime.datetime.now() - date_time).total_seconds() < 0 def sh(command): """ Run external command (with shell) and return stdout as string. If external command will fail (retcode != 0), None will be returned. :param command: external command to run :type command: string :rtype: string or None """ p = subprocess.Popen([command], stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True, env = {'LC_ALL' : 'C'}) (stdout_data, stderr_data) = p.communicate() retcode = p.wait() if retcode == 0: return stdout_data info('\n') err('external command failed.\n' + \ 'The command was: {0}\n\n' + \ 'STDERR:\n{1}\n'.format(command, stderr_data)) return None def get_dst_media_path(rootdir_path, src_filename, orig_datetime, shifted_datetime): """ Create absolute path of new location for given media file. :param rootdir_path: destination root directory path :type rootdir_path: string :param src_filename: source media file basename :type src_filename: string :param orig_datetime: date and time info for media file (original) :type orig_datetime: instance of datetime.datetime class :param shifted_datetime: date and time info for media file (shifted) :type shifted_datetime: instance of datetime.datetime class :rtype: string """ dst_filename = src_filename.lower() # hack for files, processed by first version of the program if dst_filename.startswith(orig_datetime.strftime('%H:%M_')): dst_filename = dst_filename[6:] # use file prefix based on time to sort files fetched # from various sources filename_prefix = shifted_datetime.strftime('%H:%M:%S_') if not dst_filename.startswith(filename_prefix): dst_filename = filename_prefix + dst_filename return os.path.join( rootdir_path, shifted_datetime.strftime('%Y'), shifted_datetime.strftime('%Y-%m-%d'), dst_filename) def check_dir(path): """ Check directory is exist. Halt script with error if it is not. :param path: directory path :type path: string """ if not os.path.exists(path): err('"{0}" is not exist'.format(path)) sys.exit(1) if not os.path.isdir(path): err('"{0}" is not a directory'.format(path)) sys.exit(1) def str_to_shift(string): """ Cast string to time shift (integer). :param string: textual representation of integer :type string: string :rtype: int """ try: return int(string) except: err('Bad integer: "{0}"'.format(string)) sys.exit(1) def mkdirP(path): """ Analog to 'mkdir -p'. Implementation of os.makedirs() inconsistent with the documentation: the latter points as 'Like mkdir(), but makes all intermediate-level directories needed to contain the leaf directory' but in real life it creates *all* directories. I don't know what will be changed in next Python2.6 update - documentation or os.makedirs() implementation, so I decided to not use os.makedirs() at all. :param path: directory path :type path: string """ if not path or os.path.isdir(path): return mkdirP(os.path.dirname(path)) os.mkdir(path) # ---------------------------------------------------------------------- # entry point if __name__ == '__main__': """ Script entry point """ try: opts, args = getopt.getopt( sys.argv[1:], '', ['move', 'help', 'dry-run', 'quiet', 'dnrcd', 'debug', 'chmod=', 'year-shift=', 'month-shift=', 'day-shift=', 'hour-shift=', 'minute-shift=', 'second-shift=']) except getopt.GetoptError as e: err(e) usage() if len(args) == 0: usage() for o, v in opts: if o == '--help': usage() elif o == '--move': action = ACTION_MOVE elif o == '--quiet': quiet = True elif o == '--dry-run': dry_run = True elif o == '--debug': debug = True elif o == '--dnrcd': remove_cleared_dirs = False elif o == '--chmod': files_mode = int(v, 8) elif o == '--year-shift': time_shift['days'] += str_to_shift(v) * 365 elif o == '--month-shift': time_shift['days'] += str_to_shift(v) * 30 elif o == '--day-shift': time_shift['days'] += str_to_shift(v) elif o == '--hour-shift': time_shift['hours'] = str_to_shift(v) elif o == '--minute-shift': time_shift['minutes'] = str_to_shift(v) elif o == '--second-shift': time_shift['seconds'] = str_to_shift(v) if len(args) != 2: err('bad arguments') sys.exit(1) src_dir = args[0] dst_dir = args[1] check_dir(src_dir) check_dir(dst_dir) process_dir(src_dir, dst_dir)
bsd-2-clause
-3,257,157,324,738,049,000
31.15708
77
0.581218
false
3.821983
false
false
false
alfa-addon/addon
plugin.video.alfa/channels/tnaflix.py
1
7036
# -*- coding: utf-8 -*- #------------------------------------------------------------ import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int if PY3: import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo else: import urlparse # Usamos el nativo de PY2 que es más rápido import re from platformcode import config, logger from core import scrapertools from core.item import Item from core import servertools from core import httptools from core import tmdb from core import jsontools host = 'https://www.tnaflix.com' def mainlist(item): logger.info() itemlist = [] itemlist.append(item.clone(title="Nuevas" , action="lista", url=host + "/new/?d=all&period=all")) itemlist.append(item.clone(title="Popular" , action="lista", url=host + "/popular/?d=all&period=all")) itemlist.append(item.clone(title="Mejor valorado" , action="lista", url=host + "/toprated/?d=all&period=month")) itemlist.append(item.clone(title="Canal" , action="catalogo", url=host + "/channels/all/top-rated/1/all")) itemlist.append(item.clone(title="PornStars" , action="categorias", url=host + "/pornstars")) itemlist.append(item.clone(title="Categorias" , action="categorias", url=host + "/categories/")) itemlist.append(item.clone(title="Buscar", action="search")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = "%s/search.php?what=%s&tab=" % (host, texto) try: return lista(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def catalogo(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '<div class="vidcountSp">(\d+)</div>.*?' patron += '<a class="categoryTitle channelTitle" href="([^"]+)" title="([^"]+)">.*?' patron += 'data-original="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) for cantidad,scrapedurl,scrapedtitle,scrapedthumbnail in matches: scrapedurl = urlparse.urljoin(item.url,scrapedurl) title = "%s (%s)" % (scrapedtitle,cantidad) scrapedplot = "" itemlist.append(item.clone(action="lista", title=title, url=scrapedurl, fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append(item.clone(action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) return itemlist def categorias(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data) if "pornstars" in item.url: data = scrapertools.find_single_match(data,'</i> Hall Of Fame Pornstars</h1>(.*?)</section>') patron = '<a class="thumb" href="([^"]+)">.*?' patron += '<img src="([^"]+)".*?' patron += '<div class="vidcountSp">(.*?)</div>.*?' patron += '<a class="categoryTitle".*?>([^"]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches: scrapedplot = "" if not scrapedthumbnail.startswith("https"): scrapedthumbnail = "https:%s" % scrapedthumbnail scrapedurl = urlparse.urljoin(item.url,scrapedurl) if not scrapedurl.startswith("https"): scrapedurl = "https:%s" % scrapedurl if "profile" in scrapedurl: scrapedurl += "?section=videos" scrapedtitle = "%s (%s)" % (scrapedtitle,cantidad) itemlist.append(item.clone(action="lista", title=scrapedtitle , url=scrapedurl , fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) ) next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append(item.clone(action="categorias", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page_url , folder=True) ) return itemlist def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data) patron = '<a class=\'thumb no_ajax\' href=\'(.*?)\'.*?' patron += 'data-original=\'(.*?)\' alt="([^"]+)"><div class=\'videoDuration\'>([^<]+)</div>(.*?)<div class=\'watchedInfo' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedthumbnail,scrapedtitle,duracion,quality in matches: url = urlparse.urljoin(item.url,scrapedurl) title = "[COLOR yellow]%s[/COLOR] %s" % (duracion, scrapedtitle) if quality: quality= scrapertools.find_single_match(quality, '>(\d+p)<') title = "[COLOR yellow]%s[/COLOR] [COLOR red]%s[/COLOR] %s" % (duracion, quality, scrapedtitle) contentTitle = title thumbnail = scrapedthumbnail plot = "" itemlist.append(item.clone(action="play" , title=title , url=url, thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = contentTitle)) next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append(item.clone(action="lista", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page_url) ) return itemlist def ref(url): logger.info() itemlist = [] data = httptools.downloadpage(url).data VID = scrapertools.find_single_match(data,'id="VID" type="hidden" value="([^"]+)"') vkey = scrapertools.find_single_match(data,'id="vkey" type="hidden" value="([^"]+)"') thumb = scrapertools.find_single_match(data,'id="thumb" type="hidden" value="([^"]+)"') nkey= scrapertools.find_single_match(data,'id="nkey" type="hidden" value="([^"]+)"') url = "https://cdn-fck.tnaflix.com/tnaflix/%s.fid?key=%s&VID=%s&nomp4=1&catID=0&rollover=1&startThumb=%s" % (vkey, nkey, VID, thumb) url += "&embed=0&utm_source=0&multiview=0&premium=1&country=0user=0&vip=1&cd=0&ref=0&alpha" return url def play(item): logger.info() itemlist = [] url= ref(item.url) headers = {'Referer': item.url} data = httptools.downloadpage(url, headers=headers).data patron = '<res>(.*?)</res>.*?' patron += '<videoLink><([^<]+)></videoLink>' matches = scrapertools.find_multiple_matches(data, patron) for title, url in matches: url= url.replace("![CDATA[", "http:").replace("]]", "") itemlist.append([".mp4 %s" % (title), url]) # itemlist.reverse() return itemlist
gpl-3.0
-8,309,674,059,046,816,000
44.070513
139
0.61556
false
3.43143
false
false
false
wittrup/crap
whitespace/definitions.py
1
2242
whspchars = "\t\n " nonwhite = bytearray(set(range(0x00, 0x100)) - {9, 10, 32}) """http://compsoc.dur.ac.uk/whitespace/tutorial.html Whitespace tutorial The only lexical tokens in the whitespace language are Space (ASCII 32), Tab (ASCII 9) and Line Feed (ASCII 10). By only allowing line feed as a token, CR/LF problems are avoided across DOS/Unix file conversions. (Um, not sure. Maybe we'll sort this in a later version.). The language itself is an imperative, stack based language. Each command consists of a series of tokens, beginning with the Instruction Modification Parameter (IMP). These are listed in the table below.""" IMP = {} IMP[" "] = "Stack_Manipulation" IMP["\t "] = "Arithmetic" IMP["\t\t"] = "Heap_access" IMP["\n"] = "Flow_Control" IMP["\t\n"] = "I/O" """The virtual machine on which programs run has a stack and a heap. The programmer is free to push arbitrary width integers onto the stack (only integers, currently there is no implementation of floating point or real numbers). The heap can also be accessed by the user as a permanent store of variables and data structures. Many commands require numbers or labels as parameters. Numbers can be any number of bits wide, and are simply represented as a series of [Space] and [Tab], terminated by a [LF]. [Space] represents the binary digit 0, [Tab] represents 1. The sign of a number is given by its first character, [Space] for positive and [Tab] for negative. Note that this is not twos complement, it just indicates a sign. Labels are simply [LF] terminated lists of spaces and tabs. There is only one global namespace so all labels must be unique.""" ######################################################################################################################################################################################################################## """Stack Manipulation (IMP: [Space]) Stack manipulation is one of the more common operations, hence the shortness of the IMP [Space]. There are four stack instructions.""" SM = {} SM[" "] = "Push the number onto the stack - Parameters Number" SM["\n "] = "Duplicate the top item on the stack" SM["\n\t"] = "Swap the top two items on the stack" SM["\n\n"] = "Discard the top item on the stack"
mit
-2,076,799,982,940,555,000
58.026316
216
0.665031
false
4.03964
false
false
false
Nicotine-Plus/nicotine-plus
pynicotine/plugins/multipaste/__init__.py
1
2462
# COPYRIGHT (C) 2020-2021 Nicotine+ Team # COPYRIGHT (C) 2009 Daelstorm <daelstorm@gmail.com> # COPYRIGHT (C) 2008 Quinox <quinox@users.sf.net> # # GNU GENERAL PUBLIC LICENSE # Version 3, 29 June 2007 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from pynicotine.pluginsystem import BasePlugin from pynicotine.pluginsystem import returncode class Plugin(BasePlugin): __name__ = "Multi Paste" settings = { 'maxpubliclines': 4, 'maxprivatelines': 8, } metasettings = { 'maxpubliclines': {"description": 'The maximum number of lines that will pasted in public', 'type': 'int'}, 'maxprivatelines': {"description": 'The maximum number of lines that will be pasted in private', 'type': 'int'}, } def OutgoingPrivateChatEvent(self, user, line): # noqa lines = [x for x in line.splitlines() if x] if len(lines) > 1: if len(lines) > self.settings['maxprivatelines']: self.log("Posting " + str(self.settings['maxprivatelines']) + " of " + str(len(lines)) + " lines.") else: self.log("Splitting lines.") for split_line in lines[:self.settings['maxprivatelines']]: self.sayprivate(user, split_line) return returncode['zap'] return None def OutgoingPublicChatEvent(self, room, line): # noqa lines = [x for x in line.splitlines() if x] if len(lines) > 1: if len(lines) > self.settings['maxpubliclines']: self.log("Posting " + str(self.settings['maxpubliclines']) + " of " + str(len(lines)) + " lines.") else: self.log("Splitting lines.") for split_line in lines[:self.settings['maxpubliclines']]: self.saypublic(room, split_line) return returncode['zap'] return None
gpl-3.0
7,843,400,834,001,445,000
35.746269
120
0.639318
false
3.817054
false
false
false
Dubrzr/dsfaker
dsfaker/generators/date.py
1
1107
import numpy from . import Generator, ScalingOperator, BoundedGenerator class RandomDatetime(Generator): def __init__(self, generator: BoundedGenerator, start: numpy.datetime64, end: numpy.datetime64, unit): """ A timezone-aware class to generate datetimes between start and end (inclusive) following a certain distribution :param start: The starting date (inclusive) :param end: The ending date (inclusive) :param unit: The time unit to use for the distribution ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'us', 'ms', 'ns', 'ps', 'fs', 'as') """ self.rnb = ScalingOperator(generator=generator, lb=0, ub=(end - start) / numpy.timedelta64(1, unit), dtype=numpy.float64) self.start = start self.end = end self.unit = unit self.td_unit = 'timedelta64[{}]'.format(unit) def get_single(self): return self.start + numpy.timedelta64(int(round(self.rnb.get_single())), self.unit) def get_batch(self, batch_size: int): return self.start + self.rnb.get_batch(batch_size=batch_size).astype(self.td_unit)
mit
7,846,631,893,601,746,000
43.28
134
0.64589
false
3.582524
false
false
false
Jaapp-/cloudomate
cloudomate/gateway/bitpay.py
1
2301
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os from math import pow import electrum.bitcoin as bitcoin from electrum import paymentrequest as pr from future import standard_library from future.moves.urllib import request from future.moves.urllib.parse import urlsplit, parse_qs from cloudomate.gateway.gateway import Gateway, PaymentInfo standard_library.install_aliases() class BitPay(Gateway): @staticmethod def get_name(): return "BitPay" @staticmethod def extract_info(url): """ Extracts amount and BitCoin address from a BitPay URL. :param url: the BitPay URL like "https://bitpay.com/invoice?id=J3qU6XapEqevfSCW35zXXX" :return: a tuple of the amount in BitCoin along with the address """ # https://bitpay.com/ or https://test.bitpay.com uspl = urlsplit(url) base_url = "{0.scheme}://{0.netloc}".format(uspl) print(base_url) invoice_id = uspl.query.split("=")[1] # On the browser, users have to select between Bitcoin and Bitcoin cash # trigger bitcoin selection for successful transaction trigger_url = "{}/invoice-noscript?id={}&buyerSelectedTransactionCurrency=BTC".format(base_url, invoice_id) print(trigger_url) request.urlopen(trigger_url) # Make the payment payment_url = "bitcoin:?r={}/i/{}".format(base_url, invoice_id) print(payment_url) # Check for testnet mode if os.getenv('TESTNET', '0') == '1' and uspl.netloc == 'test.bitpay.com': bitcoin.set_testnet() # get payment request using Electrum's lib pq = parse_qs(urlsplit(payment_url).query) out = {k: v[0] for k, v in pq.items()} payreq = pr.get_payment_request(out.get('r')).get_dict() # amount is in satoshis (1/10e8 Bitcoin) amount = float(payreq.get('amount')) / pow(10, 8) address = payreq.get('requestor') return PaymentInfo(amount, address) @staticmethod def get_gateway_fee(): """Get the BitPay gateway fee. See: https://bitpay.com/pricing :return: The BitPay gateway fee """ return 0.01
lgpl-3.0
-7,944,241,061,399,661,000
31.408451
115
0.646675
false
3.705314
false
false
false
wheeler-microfluidics/teensy-minimal-rpc
rename.py
1
2609
from __future__ import absolute_import import sys import pandas as pd from path_helpers import path def main(root, old_name, new_name): names = pd.Series([old_name, new_name], index=['old', 'new']) underscore_names = names.map(lambda v: v.replace('-', '_')) camel_names = names.str.split('-').map(lambda x: ''.join([y.title() for y in x])) # Replace all occurrences of provided original name with new name, and all # occurrences where dashes (i.e., '-') are replaced with underscores. # # Dashes are used in Python package names, but underscores are used in # Python module names. for p in path(root).walkfiles(): data = p.bytes() if '.git' not in p and (names.old in data or underscore_names.old in data or camel_names.old in data): p.write_bytes(data.replace(names.old, names.new) .replace(underscore_names.old, underscore_names.new) .replace(camel_names.old, camel_names.new)) def rename_path(p): if '.git' in p: return if underscore_names.old in p.name: p.rename(p.parent.joinpath(p.name.replace(underscore_names.old, underscore_names.new))) if camel_names.old in p.name: p.rename(p.parent.joinpath(p.name.replace(camel_names.old, camel_names.new))) # Rename all files/directories containing original name with new name, and # all occurrences where dashes (i.e., '-') are replaced with underscores. # # Process list of paths in *reverse order* to avoid renaming parent # directories before children. for p in sorted(list(path(root).walkdirs()))[-1::-1]: rename_path(p) for p in path(root).walkfiles(): rename_path(p) def parse_args(args=None): """Parses arguments, returns (options, args).""" from argparse import ArgumentParser if args is None: args = sys.argv parser = ArgumentParser(description='Rename template project with' 'hyphen-separated <new name> (path names and in ' 'files).') parser.add_argument('new_name', help='New project name (e.g., ' ' `my-new-project`)') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() main('.', 'teensy-minimal-rpc', args.new_name)
gpl-3.0
-8,312,201,469,927,363,000
36.811594
78
0.558835
false
4.201288
false
false
false
jerjou/gssheet
gssheet/spreadsheet.py
1
9422
import cgi import csv import datetime import json import re import oauth2 class Cell: def __init__(self, xml): self.row = xml['gs$cell']['row'] self.col = xml['gs$cell']['col'] self.value = xml['gs$cell']['inputValue'] self.edit_url = (l['href'] for l in xml['link'] if 'edit' == l['rel']).next() def __repr__(self): return self.value class Cells: def __init__(self, oauth, cells_url, worksheet): self.oauth = oauth self.cells_url = cells_url self.worksheet = worksheet self.__cells = None self._row_count = None self._col_count = None def __iter__(self): cells = self._cells return iter( (cells.get((r+1, c+1)) for c in range(self.col_count)) for r in range(self.row_count)) def __len__(self): return len(self._cells.keys()) def __contains__(self, tup): return tup in self._cells def __getitem__(self, tup): """ Args: tup is a tuple of (row, col) """ #row, col = tup #return self._cells[row][col] return self._cells.get(tup) @property def col_count(self): if not self._col_count: self._get_cells() return self._col_count @property def row_count(self): if not self._row_count: self._get_cells() return self._row_count @property def _cells(self): if not self.__cells: self._get_cells() return self.__cells def _get_cells(self): data = self.oauth.get(self.cells_url, params={ 'min-row': 1, 'min-col': 1, 'return-empty': 'true', }).json self.__cells = dict( ((int(d['gs$cell']['row']), int(d['gs$cell']['col'])), Cell(d)) for d in data['feed']['entry']) self._col_count = int(data['feed']['gs$colCount']['$t']) self._row_count = int(data['feed']['gs$rowCount']['$t']) def append(self, value): self[self.row_count + 1, 1] = value def extend(self, value): self.append(value) def __setitem__(self, key, value): off_row, off_col = key args = {} if isinstance(value, list): if isinstance(value[0], list): cols = max(len(r) for r in value) rows = len(value) else: cols = len(value) rows = 1 else: cols = 1 rows = 1 if off_col + cols - 1 > self.col_count: args['col_count'] = off_col + cols - 1 if off_row + rows - 1 > self.row_count: args['row_count'] = off_row + rows - 1 if args: self.worksheet.modify(**args) # refresh self._get_cells() entry_template = """ <entry> <batch:id>%(id)s</batch:id> <batch:operation type="update"/> <id>%(base_url)s/R%(row)sC%(col)s</id> <link rel="edit" type="application/atom+xml" href="%(edit_url)s"/> <gs:cell row="%(row)s" col="%(col)s" inputValue="%(value)s"/> </entry> """ default_edit_url = '%s/R%%sC%%s' % self.cells_url def make_xml(row, row_offset=off_row): xml = [ entry_template % { 'id': str(c), 'row': row_offset, 'col': c, 'value': cgi.escape( v.strftime('%Y-%m-%d %H:%M:%S') if isinstance(v, datetime.datetime) else str(v), quote=True), 'base_url': self.cells_url, 'edit_url': self[(row_offset, c)].edit_url if ( row_offset,c) in self else default_edit_url % (row_offset, c), } for c, v in enumerate(row, start=off_col)] return '\n'.join(xml) if isinstance(value[0], list): #xml = '\n'.join(make_xml(row, r) for r, row in enumerate(value, start=off_row)) xml = '\n'.join(make_xml(row, row_offset=r) for r, row in enumerate(value, start=off_row)) else: xml = make_xml(value) r = self.oauth.post( self.cells_url + '/batch', data = """ <feed xmlns="http://www.w3.org/2005/Atom" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gs="http://schemas.google.com/spreadsheets/2006"> <id>%s</id> %s </feed> """ % (self.cells_url, xml)) self.__cells = None if 'Error' in r.text: raise Exception('Error updating cells:\n%s' % re.sub(r'<[^>]*>([^<]*)</[^>]*>', '\n\\1', r.text)) if 2 != r.status_code // 100: raise Exception('Error (%s) updating cells: %s' % (r.status_code, r.text)) class Worksheet: def __init__(self, oauth, sheet, parent): self.oauth = oauth links = dict((l['rel'][-9:], l['href']) for l in sheet['link']) self.name = sheet['title']['$t'] self.edit_url = links['edit'] self.cells_url = links['cellsfeed'] self.list_url = links['#listfeed'] self.row_count = sheet['gs$rowCount']['$t'] self.col_count = sheet['gs$colCount']['$t'] self.spreadsheet = parent self._cells = None @property def cells(self): if not self._cells: self._cells = Cells(self.oauth, self.cells_url, self) return self._cells @cells.setter def cells(self, value): # TODO(jerjou): reset the spreadsheet to the size of the value self.cells[1, 1] = value def modify(self, rename=None, row_count=None, col_count=None): data = """ <entry xmlns="http://www.w3.org/2005/Atom" xmlns:gs="http://schemas.google.com/spreadsheets/2006"> <id>%s</id> <updated>%s</updated> <title type="text">%s</title> <gs:rowCount>%s</gs:rowCount> <gs:colCount>%s</gs:colCount> </entry> """ % (self.edit_url, datetime.datetime.utcnow().isoformat(), cgi.escape(rename or self.name, quote=True), row_count or self.row_count, col_count or self.col_count) r = self.oauth.put( self.edit_url, data = data) if 2 == r.status_code // 100: self.__init__(self.oauth, r.json['entry'], self.spreadsheet) else: raise Exception('Error modifying sheet (%s): %s' % (rename or self.name, r.text)) # TODO(jerjou): update the edit_url return r def clear(self, header_rows=1): return self.modify(row_count=header_rows) def delete(self): return self.oauth.delete(self.edit_url) def copy_to(self, new_name): cells = self.cells new_sheet = self.spreadsheet.create_sheet(new_name, header=cells[0]) new_sheet.cells = cells @property def rows(self): raise NotImplementedError if self._rows: return self._rows rows = self.oauth.get(self.list_url).json class Spreadsheet: def __init__(self, oauth, entry): self._title = entry.get('title', {}).get('$t') self._url = (l['href'] for l in entry['link'] if l['rel'][-15:] == '#worksheetsfeed').next() self.use_cache = True self._worksheets = None self.oauth = oauth def __repr__(self): return '%s:%s' % (self._title, self._url) @property def title(self): """The 'title' property. TODO(jerjou): make this settable. """ return self._title @property def worksheets(self): if self.use_cache and self._worksheets: return self._worksheets obj = self.oauth.get(self._url).json self._worksheets = dict( (o.name, o) for o in [Worksheet(self.oauth, sheet, self) for sheet in obj['feed']['entry']]) return self._worksheets def create_sheet(self, title, header=[]): r = self.oauth.post( self._url, """ <entry xmlns="http://www.w3.org/2005/Atom" xmlns:gs="http://schemas.google.com/spreadsheets/2006"> <title>%s</title> <gs:colCount>%s</gs:colCount> <gs:rowCount>1</gs:rowCount> </entry> """ % (cgi.escape(title, quote=True), len(header))) if 2 != r.status_code // 100: raise Exception('Error creating sheet (%s): %s' % (title, r.text)) ws = Worksheet(self.oauth, r.json['entry'], self) ws.cells[(1, 1)] = [header] self._worksheets = None if 2 != r.status_code // 100: raise Exception('Error creating header (%s:%s): %s' % (title, header, r.text)) return self.worksheets[title] class SpreadsheetAPI: _SS_URL = 'https://spreadsheets.google.com/feeds' def __init__(self, client_id, client_secret, spreadsheet_id=None, _oauth=oauth2.OAuth2): """ Args: client_id The oauth2 client secret for your installed application, create at: https://code.google.com/apis/console/#:access client_secret The oauth2 client secret for your installed application. spreadsheet_id The alphanumeric id of the spreadsheet. If you give this a value, the object will have a 'spreadsheet' property, which will be the title-less Spreadsheet given.. """ self._worksheets = None self.oauth = _oauth( client_id, client_secret, default_params={'alt': 'json'}, default_headers={'Content-Type': 'application/atom+xml'}) self._list = [] if spreadsheet_id: href = self._SS_URL + '/worksheets/%s/private/full' % spreadsheet_id self.spreadsheet = Spreadsheet( self.oauth, { 'title': {'$t': ''}, 'link': [ {'href': href, 'rel': '#worksheetsfeed'} ] }) @property def spreadsheets(self): if not self._list: r = self.oauth.get( self._SS_URL + '/spreadsheets/private/full') if 2 == r.status_code // 100: entries = r.json['feed']['entry'] self._list = dict( (s.title, s) for s in ( Spreadsheet(self.oauth, e) for e in entries)) return self._list
bsd-2-clause
-3,269,858,196,076,954,000
27.638298
90
0.573445
false
3.308287
false
false
false
mcfletch/django-assets
django_assets/env.py
1
8523
import imp import threading from importlib import import_module from django.apps import apps from django.contrib.staticfiles import finders from django.conf import settings from webassets.env import ( BaseEnvironment, ConfigStorage, Resolver, url_prefix_join) from django_assets.glob import Globber, has_magic __all__ = ('register',) class DjangoConfigStorage(ConfigStorage): _mapping = { 'debug': 'ASSETS_DEBUG', 'cache': 'ASSETS_CACHE', 'updater': 'ASSETS_UPDATER', 'auto_build': 'ASSETS_AUTO_BUILD', 'url_expire': 'ASSETS_URL_EXPIRE', 'versions': 'ASSETS_VERSIONS', 'manifest': 'ASSETS_MANIFEST', 'load_path': 'ASSETS_LOAD_PATH', 'url_mapping': 'ASSETS_URL_MAPPING', } def _transform_key(self, key): if key.lower() == 'directory': if hasattr(settings, 'ASSETS_ROOT'): return 'ASSETS_ROOT' if getattr(settings, 'STATIC_ROOT', None): # Is None by default return 'STATIC_ROOT' return 'MEDIA_ROOT' if key.lower() == 'url': if hasattr(settings, 'ASSETS_URL'): return 'ASSETS_URL' if getattr(settings, 'STATIC_URL', None): # Is '' by default return 'STATIC_URL' return 'MEDIA_URL' return self._mapping.get(key.lower(), key.upper()) def __contains__(self, key): return hasattr(settings, self._transform_key(key)) def __getitem__(self, key): if self.__contains__(key): value = self._get_deprecated(key) if value is not None: return value return getattr(settings, self._transform_key(key)) else: raise KeyError("Django settings doesn't define %s" % self._transform_key(key)) def __setitem__(self, key, value): if not self._set_deprecated(key, value): setattr(settings, self._transform_key(key), value) def __delitem__(self, key): # This isn't possible to implement in Django without relying # on internals of the settings object, so just set to None. self.__setitem__(key, None) class StorageGlobber(Globber): """Globber that works with a Django storage.""" def __init__(self, storage): self.storage = storage def isdir(self, path): # No API for this, though we could a) check if this is a filesystem # storage, then do a shortcut, otherwise b) use listdir() and see # if we are in the directory set. # However, this is only used for the "sdf/" syntax, so by returning # False we disable this syntax and cause it no match nothing. return False def islink(self, path): # No API for this, just act like we don't know about links. return False def listdir(self, path): directories, files = self.storage.listdir(path) return directories + files def exists(self, path): try: return self.storage.exists(path) except NotImplementedError: return False class DjangoResolver(Resolver): """Adds support for staticfiles resolving.""" @property def use_staticfiles(self): return getattr(settings,'ASSETS_DEBUG',settings.DEBUG) and \ 'django.contrib.staticfiles' in settings.INSTALLED_APPS def glob_staticfiles(self, item): # The staticfiles finder system can't do globs, but we can # access the storages behind the finders, and glob those. for finder in finders.get_finders(): # Builtin finders use either one of those attributes, # though this does seem to be informal; custom finders # may well use neither. Nothing we can do about that. if hasattr(finder, 'storages'): storages = finder.storages.values() elif hasattr(finder, 'storage'): storages = [finder.storage] else: continue for storage in storages: globber = StorageGlobber(storage) for file in globber.glob(item): yield storage.path(file) def search_for_source(self, ctx, item): if not self.use_staticfiles: return Resolver.search_for_source(self, ctx, item) if has_magic(item): return list(self.glob_staticfiles(item)) else: f = finders.find(item) if f is not None: return f raise IOError( "'%s' not found (using staticfiles finders)" % item) def resolve_source_to_url(self, ctx, filepath, item): if not self.use_staticfiles: return Resolver.resolve_source_to_url(self, ctx, filepath, item) # With staticfiles enabled, searching the url mappings, as the # parent implementation does, will not help. Instead, we can # assume that the url is the root url + the original relative # item that was specified (and searched for using the finders). import os item = item.replace(os.sep, "/") return url_prefix_join(ctx.url, item) class DjangoEnvironment(BaseEnvironment): """For Django, we need to redirect all the configuration values this object holds to Django's own settings object. """ config_storage_class = DjangoConfigStorage resolver_class = DjangoResolver # Django has a global state, a global configuration, and so we need a # global instance of a asset environment. env = None env_lock = threading.RLock() def get_env(): # While the first request is within autoload(), a second thread can come # in and without the lock, would use a not-fully-loaded environment. with env_lock: global env if env is None: env = DjangoEnvironment() # Load application's ``assets`` modules. We need to do this in # a delayed fashion, since the main django_assets module imports # this, and the application ``assets`` modules we load will import # ``django_assets``, thus giving us a classic circular dependency # issue. autoload() return env def reset(): global env env = None # The user needn't know about the env though, we can expose the # relevant functionality directly. This is also for backwards-compatibility # with times where ``django-assets`` was a standalone library. def register(*a, **kw): return get_env().register(*a, **kw) _ASSETS_LOADED = False def autoload(): """Find assets by looking for an ``assets`` module within each installed application, similar to how, e.g., the admin autodiscover process works. This is were this code has been adapted from, too. Only runs once. """ global _ASSETS_LOADED if _ASSETS_LOADED: return False # Import this locally, so that we don't have a global Django # dependency. from django.conf import settings for app in apps.get_app_configs(): # For each app, we need to look for an assets.py inside that # app's package. We can't use os.path here -- recall that # modules may be imported different ways (think zip files) -- # so we need to get the app's __path__ and look for # admin.py on that path. # Step 1: find out the app's __path__ Import errors here will # (and should) bubble up, but a missing __path__ (which is # legal, but weird) fails silently -- apps that do weird things # with __path__ might need to roll their own registration. try: app_path = app.path except AttributeError: continue # Step 2: use imp.find_module to find the app's assets.py. # For some reason imp.find_module raises ImportError if the # app can't be found but doesn't actually try to import the # module. So skip this app if its assets.py doesn't exist try: imp.find_module('assets', [app_path]) except ImportError: continue # Step 3: import the app's assets file. If this has errors we # want them to bubble up. #app_name = deduce_app_name(app) import_module("{}.assets".format(app.name)) # Load additional modules. for module in getattr(settings, 'ASSETS_MODULES', []): import_module("%s" % module) _ASSETS_LOADED = True
bsd-2-clause
5,771,456,800,310,562,000
33.228916
78
0.611756
false
4.225583
true
false
false
JeromeParadis/django-activity-stream
actstream/migrations/0007_auto__add_field_follow_started.py
1
6331
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models try: # timezone support for django > 1.4 from django.utils import timezone tz = timezone except ImportError: tz = datetime.datetime class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Follow.started' db.add_column('actstream_follow', 'started', self.gf('django.db.models.fields.DateTimeField')(default=tz.now), keep_default=False) def backwards(self, orm): # Deleting field 'Follow.started' db.delete_column('actstream_follow', 'started') models = { 'actstream.action': { 'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'}, 'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}), 'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 25, 18, 9, 44, 904569)'}), 'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'actstream.follow': { 'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'object_name': 'Follow'}, 'actor_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'started': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 25, 18, 9, 44, 906684)'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 25, 18, 9, 44, 903165)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 25, 18, 9, 44, 903031)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['actstream']
bsd-3-clause
8,487,151,324,711,568,000
71.770115
204
0.561365
false
3.715376
false
false
false
cedricbonhomme/Grenouille
watcher.py
1
2434
#! /usr/bin/env python # -*- coding: utf-8 -*- # Grenouille - An online service for weather data. # Copyright (C) 2014 Cédric Bonhomme - http://cedricbonhomme.org/ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from datetime import datetime import argparse import sys from yoctopuce.yocto_api import YAPI from yoctometeo.station import Station from yoctometeo import logger, __version__ def watch_station(delay=3600, verbose=True, loop=False): delay = delay * 1000 station = Station() def _get_data(): data = {"date": datetime.now()} for sensor, value, fmt_value in station.get_info(): data[sensor.split(".")[-1]] = value if verbose: print data if not loop: _get_data() return while True: _get_data() YAPI.Sleep(delay) def main(): parser = argparse.ArgumentParser(description="Grenouille watcher.") parser.add_argument( "--version", action="store_true", default=False, help="Displays version and exits.", ) parser.add_argument( "-d", "--delay", help="Delay in seconds between two calls.", type=int, default=3600.0, ) parser.add_argument( "-v", "--verbose", action="store_true", help="Verbose", default=False ) parser.add_argument( "-l", "--loop", action="store_true", help="Loop forever", default=False ) args = parser.parse_args() if args.version: yocto = YAPI.GetAPIVersion() print ("Grenouille v%s - Yoctopuce v%s" % (__version__, yocto)) sys.exit(0) try: watch_station(loop=args.loop, delay=args.delay, verbose=args.verbose) except KeyboardInterrupt: pass logger.info("Bye!") if __name__ == "__main__": main()
agpl-3.0
-7,473,430,504,389,210,000
25.445652
79
0.637896
false
3.748844
false
false
false
strogo/djpcms
djpcms/apps/cache.py
1
5941
from copy import deepcopy from django.core.cache import cache from django.contrib.sites.models import Site from django.db.models import signals from django.http import Http404 from djpcms import sites from djpcms.models import Page from djpcms.views import appsite from djpcms.views.baseview import pageview class PageCache(object): def __init__(self): self._domain = None self.applications_url = None def clear(self, request = None): cache.clear() if request: self.session(request)['application-urls-built'] = 0 def session(self, request): return getattr(request,'session',{}) @property def domain(self): if not self._domain: site = Site.objects.get_current() self._domain = site.domain return self._domain def idkey(self, id): return '%s:pagecache:id:%s' % (self.domain,id) def appkey(self, code): return '%s:pagecache:app:%s' % (self.domain,code) def urlkey(self, url): return '%s:pagecache:url:%s' % (self.domain,url) def build_app_urls(self, request, force = True): session = self.session(request) b = session.get('application-urls-built',0) if not self.applications_url or (force and not b): self.applications_url = get_urls() session['application-urls-built'] = 1 return self.applications_url def view_from_url(self, url): '''Get a view object given a url''' page = self.get_from_url(url) if page: return self.view_from_page(page, False) else: return None def view_from_page(self, page, site = None, docache = True): '''Retrive a view instance from a page instance. If the page is for an application view, site must be provided otherwise no search will be performed.''' force = False view = None if docache: force = self._set_if_not(self.urlkey(page.url),page) if page.application_view: if site: view = site.getapp(page.application_view) if not view: raise Http404 else: # Flat pages get created each time view = pageview(page) return view def get_from_id(self, id): key = self.idkey(id) page,created = self._get_and_cache(key, pk = id) return page def get_from_url(self, url): '''Get a page given a url''' key = self.urlkey(url) page = cache.get(key,None) if page: return page try: page = Page.objects.sitepage(url = url) cache.set(key, page) return page except: return None def get_for_application(self, code): '''Return an iterable of pages for a given application view code. Stre them into cache.''' key = self.appkey(code) pages, created = self._get_and_cache(key, application_view = code) if pages and not hasattr(pages,'__iter__'): pages = [pages] #if created: # for page in pages: # if page.application_view: # key = self.urlkey(page.url) # cache.set(key, page) return pages def _get_and_cache(self, key, **kwargs): pages = cache.get(key,None) if pages: return pages, False elif pages is None: try: pages = Page.objects.sitepage(**kwargs) cache.set(key, pages) return pages, True except: pages = Page.objects.sitepages(**kwargs) if pages: cache.set(key, pages) return pages, True else: cache.set(key, False) return None,False else: return None,False def _set_if_not(self, key, page, force = None): if force is None: p = cache.get(key,None) if not p: cache.set(key,page) return True elif force: cache.set(key,page) return True return False def get_children(self,page): key = '%s:pagecache:children:%s' % (self.domain,page.url) children = cache.get(key,None) if children is None: children = list(page.children.all().order_by('in_navigation')) cache.set(key,children) for child in children: cache.set(self.idkey(child.id), child) cache.set(self.urlkey(child.url), child) return children def sitemap(self): from djpcms.views import appsite key = '%s:pagecache:sitemap' % self.domain map = cache.get(key,None) if not map: pages = Page.objects.sitepages(is_published = True, requires_login = False, insitemap = True) map = [] for page in pages: if page.application_view: try: app = appsite.site.getapp(page.application_view) except: continue if app.insitemap and app.has_permission(): if not app.regex.targs: map.append(page) else: appmodel = getattr(app,'appmodel',None) if appmodel: map.extend(app.sitemapchildren()) else: map.append(page) cache.set(key,map) return map def clearcache(*args, **kwargs): sites.clearcache() signals.post_save.connect(clearcache, sender=Page) signals.post_delete.connect(clearcache, sender=Page)
bsd-3-clause
7,900,610,637,577,406,000
31.464481
105
0.529709
false
4.192661
false
false
false
nafitzgerald/allennlp
allennlp/modules/token_embedders/embedding.py
1
13971
import gzip import logging from overrides import overrides import numpy import torch from torch.nn.functional import embedding import h5py from allennlp.common import Params from allennlp.common.checks import ConfigurationError from allennlp.common.file_utils import cached_path from allennlp.data import Vocabulary from allennlp.modules.token_embedders.token_embedder import TokenEmbedder from allennlp.modules.time_distributed import TimeDistributed logger = logging.getLogger(__name__) # pylint: disable=invalid-name @TokenEmbedder.register("embedding") class Embedding(TokenEmbedder): """ A more featureful embedding module than the default in Pytorch. Adds the ability to: 1. embed higher-order inputs 2. pre-specify the weight matrix 3. use a non-trainable embedding 4. project the resultant embeddings to some other dimension (which only makes sense with non-trainable embeddings). 5. build all of this easily ``from_params`` Note that if you are using our data API and are trying to embed a :class:`~allennlp.data.fields.TextField`, you should use a :class:`~allennlp.modules.TextFieldEmbedder` instead of using this directly. Parameters ---------- num_embeddings :, int: Size of the dictionary of embeddings (vocabulary size). embedding_dim : int The size of each embedding vector. projection_dim : int, (optional, default=None) If given, we add a projection layer after the embedding layer. This really only makes sense if ``trainable`` is ``False``. weight : torch.FloatTensor, (optional, default=None) A pre-initialised weight matrix for the embedding lookup, allowing the use of pretrained vectors. padding_index : int, (optional, default=None) If given, pads the output with zeros whenever it encounters the index. trainable : bool, (optional, default=True) Whether or not to optimize the embedding parameters. max_norm : float, (optional, default=None) If given, will renormalize the embeddings to always have a norm lesser than this norm_type : float, (optional, default=2): The p of the p-norm to compute for the max_norm option scale_grad_by_freq : boolean, (optional, default=False): If given, this will scale gradients by the frequency of the words in the mini-batch. sparse : bool, (optional, default=False): Whether or not the Pytorch backend should use a sparse representation of the embedding weight. Returns ------- An Embedding module. """ def __init__(self, num_embeddings: int, embedding_dim: int, projection_dim: int = None, weight: torch.FloatTensor = None, padding_index: int = None, trainable: bool = True, max_norm: float = None, norm_type: float = 2., scale_grad_by_freq: bool = False, sparse: bool = False) -> None: super(Embedding, self).__init__() self.num_embeddings = num_embeddings self.padding_index = padding_index self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse self.output_dim = projection_dim or embedding_dim if weight is None: weight = torch.FloatTensor(num_embeddings, embedding_dim) self.weight = torch.nn.Parameter(weight, requires_grad=trainable) self.weight.data.normal_(0, 1) else: if weight.size() != (num_embeddings, embedding_dim): raise ConfigurationError("A weight matrix was passed with contradictory embedding shapes.") self.weight = torch.nn.Parameter(weight, requires_grad=trainable) if self.padding_index is not None: self.weight.data[self.padding_index].fill_(0) if projection_dim: self._projection = torch.nn.Linear(embedding_dim, projection_dim) else: self._projection = None @overrides def get_output_dim(self) -> int: return self.output_dim @overrides def forward(self, inputs): # pylint: disable=arguments-differ original_inputs = inputs if original_inputs.dim() > 2: inputs = inputs.view(-1, inputs.size(-1)) embedded = embedding(inputs, self.weight, max_norm=self.max_norm, norm_type=self.norm_type, scale_grad_by_freq=self.scale_grad_by_freq, sparse=self.sparse) if original_inputs.dim() > 2: view_args = list(original_inputs.size()) + [embedded.size(-1)] embedded = embedded.view(*view_args) if self._projection: projection = self._projection for _ in range(embedded.dim() - 2): projection = TimeDistributed(projection) embedded = projection(embedded) return embedded @classmethod def from_params(cls, vocab: Vocabulary, params: Params) -> 'Embedding': """ We need the vocabulary here to know how many items we need to embed, and we look for a ``vocab_namespace`` key in the parameter dictionary to know which vocabulary to use. If you know beforehand exactly how many embeddings you need, or aren't using a vocabulary mapping for the things getting embedded here, then you can pass in the ``num_embeddings`` key directly, and the vocabulary will be ignored. """ num_embeddings = params.pop('num_embeddings', None) vocab_namespace = params.pop("vocab_namespace", "tokens") if num_embeddings is None: num_embeddings = vocab.get_vocab_size(vocab_namespace) embedding_dim = params.pop('embedding_dim') pretrained_file = params.pop("pretrained_file", None) projection_dim = params.pop("projection_dim", None) trainable = params.pop("trainable", True) padding_index = params.pop('padding_index', None) max_norm = params.pop('max_norm', None) norm_type = params.pop('norm_type', 2.) scale_grad_by_freq = params.pop('scale_grad_by_freq', False) sparse = params.pop('sparse', False) params.assert_empty(cls.__name__) if pretrained_file: # If we're loading a saved model, we don't want to actually read a pre-trained # embedding file - the embeddings will just be in our saved weights, and we might not # have the original embedding file anymore, anyway. weight = _read_pretrained_embedding_file(pretrained_file, embedding_dim, vocab, vocab_namespace) else: weight = None return cls(num_embeddings=num_embeddings, embedding_dim=embedding_dim, projection_dim=projection_dim, weight=weight, padding_index=padding_index, trainable=trainable, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse) def _read_pretrained_embedding_file(embeddings_filename: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens") -> torch.FloatTensor: """ Reads a pre-trained embedding file and generates an Embedding layer that has weights initialized to the pre-trained embeddings. The Embedding layer can either be trainable or not. We use the ``Vocabulary`` to map from the word strings in the embeddings file to the indices that we need, and to know which words from the embeddings file we can safely ignore. Parameters ---------- embeddings_filename : str, required. The path to a file containing pretrained embeddings. We support two file formats, gzipped-word2vec and hdf5. If the filename ends with '.hdf5' or '.h5' then we load from hdf5, otherwise assume gzipped-word2vec format. vocab : Vocabulary, required. A Vocabulary object. namespace : str, (optional, default=tokens) The namespace of the vocabulary to find pretrained embeddings for. trainable : bool, (optional, default=True) Whether or not the embedding parameters should be optimized. Returns ------- A weight matrix with embeddings initialized from the read file. The matrix has shape ``(vocab.get_vocab_size(namespace), embedding_dim)``, where the indices of words appearing in the pretrained embedding file are initialized to the pretrained embedding value. """ if embeddings_filename[-3:] == '.h5' or embeddings_filename[-5:] == '.hdf5': return _read_pretrained_hdf5_format_embedding_file(embeddings_filename, embedding_dim, vocab, namespace) else: # default to word2vec return _read_pretrained_word2vec_format_embedding_file(embeddings_filename, embedding_dim, vocab, namespace) def _read_pretrained_word2vec_format_embedding_file(embeddings_filename: str, # pylint: disable=invalid-name embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens") -> torch.FloatTensor: """ Read from a gzipped-word2vec format file. The embeddings file is assumed to be gzipped and space delimited, e.g. [word] [dim 1] [dim 2] ... The remainder of the docstring is identical to ``_read_pretrained_embedding_file``. """ words_to_keep = set(vocab.get_index_to_token_vocabulary(namespace).values()) vocab_size = vocab.get_vocab_size(namespace) embeddings = {} # First we read the embeddings from the file, only keeping vectors for the words we need. logger.info("Reading embeddings from file") with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file: for line in embeddings_file: fields = line.decode('utf-8').strip().split(' ') if len(fields) - 1 != embedding_dim: # Sometimes there are funny unicode parsing problems that lead to different # fields lengths (e.g., a word with a unicode space character that splits # into more than one column). We skip those lines. Note that if you have # some kind of long header, this could result in all of your lines getting # skipped. It's hard to check for that here; you just have to look in the # embedding_misses_file and at the model summary to make sure things look # like they are supposed to. logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s", embedding_dim, len(fields) - 1, line) continue word = fields[0] if word in words_to_keep: vector = numpy.asarray(fields[1:], dtype='float32') embeddings[word] = vector if not embeddings: raise ConfigurationError("No embeddings of correct dimension found; you probably " "misspecified your embedding_dim parameter, or didn't " "pre-populate your Vocabulary") all_embeddings = numpy.asarray(list(embeddings.values())) embeddings_mean = float(numpy.mean(all_embeddings)) embeddings_std = float(numpy.std(all_embeddings)) # Now we initialize the weight matrix for an embedding layer, starting with random vectors, # then filling in the word vectors we just read. logger.info("Initializing pre-trained embedding layer") embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(embeddings_mean, embeddings_std) for i in range(0, vocab_size): word = vocab.get_token_from_index(i, namespace) # If we don't have a pre-trained vector for this word, we'll just leave this row alone, # so the word has a random initialization. if word in embeddings: embedding_matrix[i] = torch.FloatTensor(embeddings[word]) else: logger.debug("Word %s was not found in the embedding file. Initialising randomly.", word) # The weight matrix is initialized, so we construct and return the actual Embedding. return embedding_matrix def _read_pretrained_hdf5_format_embedding_file(embeddings_filename: str, # pylint: disable=invalid-name embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens") -> torch.FloatTensor: """ Reads from a hdf5 formatted file. The embedding matrix is assumed to be keyed by 'embedding' and of size ``(num_tokens, embedding_dim)``. """ with h5py.File(embeddings_filename, 'r') as fin: embeddings = fin['embedding'][...] if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]: raise ConfigurationError( "Read shape {0} embeddings from the file, but expected {1}".format( list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim])) return torch.FloatTensor(embeddings)
apache-2.0
-4,872,697,086,478,548,000
46.359322
108
0.610622
false
4.495174
false
false
false
arannasousa/pagseguro_xml
exemplos/testes_assinatura.py
1
6513
# coding=utf-8 # --------------------------------------------------------------- # Desenvolvedor: Arannã Sousa Santos # Mês: 12 # Ano: 2015 # Projeto: pagseguro_xml # e-mail: asousas@live.com # --------------------------------------------------------------- import logging from pagseguro_xml.assinatura import ApiPagSeguroAssinatura_v2, CONST_v2 logger = logging.basicConfig(level=logging.DEBUG) PAGSEGURO_API_AMBIENTE = u'sandbox' PAGSEGURO_API_EMAIL = u'seu@email.com' PAGSEGURO_API_TOKEN_PRODUCAO = u'' PAGSEGURO_API_TOKEN_SANDBOX = u'' api = ApiPagSeguroAssinatura_v2(ambiente=CONST_v2.AMBIENTE.SANDBOX) PAGSEGURO_API_TOKEN = PAGSEGURO_API_TOKEN_SANDBOX def exemploRequisicaoAssinatura(): from pagseguro_xml.assinatura.v2.classes.requisicao import ClasseAssinaturaRequisicao, CONST as CONST_REQUISICAO xmlRequisicao = ClasseAssinaturaRequisicao() xmlRequisicao.redirectURL.valor = u'http://seusite.com.br' xmlRequisicao.reference.valor = u'REF0002' xmlRequisicao.sender.name.valor = u'Cliente de teste' xmlRequisicao.sender.email.valor = u'as1234231234e@sandbox.pagseguro.com.br' xmlRequisicao.sender.address.state.valor = u'TO' xmlRequisicao.preApproval.charge.valor = CONST_REQUISICAO.PREAPPROVAL.CHARGE.AUTO xmlRequisicao.preApproval.name.valor = u'Assinatura de 1 mes' xmlRequisicao.preApproval.amountPerPayment.valor = u'10.00' xmlRequisicao.preApproval.period.valor = CONST_REQUISICAO.PREAPPROVAL.PERIOD.MONTHLY from datetime import datetime xmlRequisicao.preApproval.finalDate.valor = datetime(2016, 01, 23) xmlRequisicao.preApproval.maxTotalAmount.valor = u'10.00' if xmlRequisicao.alertas: print u'erros antes de enviar' for a in xmlRequisicao.alertas: print a if not xmlRequisicao.alertas: ok, retorno = api.requisicao_assinatura_v2(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, xmlRequisicao) if ok: print u'-' * 45, u'RESPOSTA', u'-' * 45 # visualizando o XML retornado print retorno.xml print u'-' * 100 # checando erros no XML retornado if retorno.alertas: print u'-' * 45, u'ALERTAS', u'-' * 46 for a in retorno.alertas: print a print u'-' * 100 CODIGO_REQUISICAO_PAGAMENTO = retorno.code.valor url_fluxo = api.gera_url_fluxo_v2(CODIGO_REQUISICAO_PAGAMENTO) # >> u'https://[sandbox.]pagseguro.uol.com.br/v2/pre-approvals/request.html?code=CODIGO-RETORNADO' print u'URL para o fluxo:', url_fluxo # -------------------------------------------------------------------------------- # no final do pagamento, a PagSeguro vai gerar a URL como a de baixo # # u'http://seusite.com.br/?code=CODIGO-NOTIFICACAO' # -------------------------------------------------------------------------------- else: if hasattr(retorno, u'xml'): print u'Motivo do erro:', retorno.xml else: print u'Motivo do erro:', retorno def exemploConsultaAssinaturaNotificacao(): CODIGO_NOTIFICACAO = u'' ok, retorno = api.consulta_assinatura_notificacao_v2(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, CODIGO_NOTIFICACAO) if ok: print u'-' * 45, u'RESPOSTA', u'-' * 45 # visualizando o XML retornado print retorno.xml print u'-' * 100 print u'Status da Assinatura', retorno.status.valor # checando erros no XML retornado if retorno.alertas: print u'-' * 45, u'ALERTAS', u'-' * 46 for a in retorno.alertas: print a print u'-' * 100 else: print u'Motivo do erro:', retorno def exemploConsultaAssinatura(): # CODIGO_ASSINATURA = u'' CODIGO_ASSINATURA = u'' ok, retorno = api.consulta_assinatura_v2(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, CODIGO_ASSINATURA) if ok: print u'-' * 45, u'RESPOSTA', u'-' * 45 # visualizando o XML retornado print retorno.xml print u'-' * 100 print u'Status da Assinatura', retorno.status.valor # checando erros no XML retornado if retorno.alertas: print u'-' * 45, u'ALERTAS', u'-' * 46 for a in retorno.alertas: print a print u'-' * 100 else: print u'Motivo do erro:', retorno def exemploConsultaNotificacaoPorDias(): ok, retorno = api.consulta_notificacao_por_dias_v2(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, 30) if ok: print u'-' * 50 print retorno.xml print u'-' * 50 for preApproval in retorno.preApprovals: print preApproval.xml for a in retorno.alertas: print a else: if type(retorno) in (str, unicode, basestring): print u'Motivo do erro:', retorno else: print u'Motivo do erro:', retorno.xml def exemploConsultaPorData(): from datetime import datetime inicial = datetime(2015, 12, 9) final = datetime(2015, 12, 12) ok, retorno = api.consulta_por_data_v2(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, inicial, final) if ok: print u'-' * 50 print retorno.xml print u'-' * 50 for preApproval in retorno.preApprovals: print preApproval.xml for a in retorno.alertas: print a else: if type(retorno) in (str, unicode, basestring): print u'Motivo do erro:', retorno else: print u'Motivo do erro:', retorno.xml def exemploCancelar(): codigo = u'' # codigo = u'' ok, retorno = api.cancela_v2(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, codigo) if ok: print u'-' * 50 print retorno.xml print u'-' * 50 for a in retorno.alertas: print a else: if type(retorno) in (str, unicode, basestring): print u'Motivo do erro:', retorno else: print u'Motivo do erro:', retorno.xml print u'#' * 50 exemploRequisicaoAssinatura() print u'*' * 50 exemploConsultaAssinaturaNotificacao() print u'*' * 50 exemploConsultaAssinatura() print u'*' * 50 exemploConsultaNotificacaoPorDias() print u'*' * 50 exemploConsultaPorData() print u'*' * 50 exemploCancelar() print u'#' * 50
gpl-2.0
5,631,187,900,823,134,000
26.476793
118
0.590078
false
2.973059
false
false
false
sailorsenergy/windb2
bin/insert-mesowest-data.py
1
5096
#!/usr/bin/env python3 # # # Mike Dvorak # Postdoc # UC Berkeley # Civil and Environmental Engineering # dvorak@berkeley.edu # # Created: 2013-07-09 # Modified: 2016-01-22 # # # Description: Inserts CSV files from Mesowest (mesowest.utah.edu). The CSV files are the type you get when you save # the "CSV" files, which are actually html as "text" in Firefox. # # Add the WinDB2 lib import os import sys dir = os.path.dirname(__file__) sys.path.append(os.path.join(dir, '../')) import csv import re import time import sys import math from datetime import datetime, timedelta, tzinfo import pytz import urllib import tempfile from windb2 import windb2 from windb2.struct import winddata, insert import argparse from urllib.request import urlopen def parseLocation(row): """Parses the location string at the beginning of the file. Returns stationId, stationName, longitude, latitude""" # Rows look like this: # CALVP LOVELAND PASS 39.67472 -105.89389 3624 m CAIC r1 = re.match(b'^# (\w+) ([\(\)@\w /_-]+) ([0-9]+\.[0-9]+) ([-]*[0-9]+\.[0-9]+)', row) if r1 == None: raise ValueError("Location string didn't match: " + str(row)) else: return r1.group(1), r1.group(2), r1.group(4), r1.group(3) # # Main executable # # Parse the arguments parser = argparse.ArgumentParser() parser.add_argument('dbHost', help='Database hostname') parser.add_argument('dbUser', help='Database user') parser.add_argument('dbName', help='Database name') parser.add_argument("stationId", type=str, help="Mesowest code for observation.") parser.add_argument("year", type=int, help="Year to download") parser.add_argument("month", type=int, help="Month to download") parser.add_argument("-o", "--overwrite", help="Replace data if the data for the time exists in the WinDB2", action="store_true") parser.add_argument('-p', '--port', type=int, default='5432', help='Port for WinDB2 connection') args = parser.parse_args() # Connect to the WinDB windb2 = windb2.WinDB2(args.dbHost, args.dbName, args.dbUser, port=args.port) windb2.connect() # Set the times startTime = datetime(args.year, args.month, 1).strftime('%Y%m%d%H%M') if args.month > 12 or args.month < 1: raise ValueError('Illegal month num' + str(args.month)) elif args.month == 12: endTime = (datetime(int(args.year) + 1, args.month + 1, 1) - timedelta(seconds=1)).strftime('%Y%m%d%H%M') else: endTime = (datetime(args.year, args.month + 1, 1) - timedelta(seconds=1)).strftime('%Y%m%d%H%M') # Download the file tmpFile = tempfile.NamedTemporaryFile(mode='r+b',delete=False) tmpFileName = tmpFile.name url = "http://api.mesowest.net/v2/stations/timeseries?token=demotoken&stid={}&start={}&end={}&output=csv&units=temp|K,speed|kts,height|m,metric".format(args.stationId, startTime, endTime) print('Downloading: ', url) urlHandle = urlopen(url) try: reader = urlHandle.read() finally: # Write out the file print('Writing out the file to:', tmpFileName) tmpFile.write(reader) tmpFile.close() urlHandle.close() # Open the Mesowest file to read as a plain old file first print("Opening ", args.stationId) reader = open(tmpFileName, "r") # Get the location data stationId = re.match('# STATION: (\w+)', reader.readline()).group(1) stationName = re.match('# STATION NAME: (\w+)', reader.readline()).group(1) latitude = re.match('# LATITUDE: ([0-9\\.\\-]+)', reader.readline()).group(1) longitude = re.match('# LONGITUDE: ([0-9\\.\\-]+)', reader.readline()).group(1) elevationM = int(float(re.match('# ELEVATION \\[ft\\]: ([0-9]+)', reader.readline()).group(1))/3.3) state = re.match('# STATE: (\w+)', reader.readline()).group(1) # Info print('StationID: ', stationId) print('Station name:', stationName) print('Longitude: ', longitude) print('Latitude: ', latitude) # Make a dictionary of the column names as {'colName':colNum} colNames = re.split(',',reader.readline()) colDict = {} count = 0 for name in colNames: colDict[name.strip()] = count count += 1 # Burn the units line reader.readline() # Convert the regular file to a CSV file reader = csv.reader(reader) # Insert all the rows of data windData = [] count = 0 for row in reader: # Debug #print str(row) # Construct a timestamp, continuing on if there is a parse failure #t = datetime.strptime(row[colDict['Date_Time']], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=pytz.utc) #print(t) # Add data, if the wind speed and direction are not null if row[colDict['wind_speed_set_1']] != "" and row[colDict['wind_direction_set_1']] != "": windData.append(winddata.WindData(row[colDict['Date_Time']], elevationM, float(row[colDict['wind_speed_set_1']]), float(row[colDict['wind_direction_set_1']]))) # Increment count of valid data count += 1 # Info print('Downloaded ', count, ' times of weather data.') # Insert all of the data for that file. Necessary to do by file because of the large data sizes. print('Inserting the surface data...') insert.insertWindData(windb2, stationId, 'Mesowest', windData, longitude, latitude)
gpl-3.0
-6,013,748,427,673,454,000
31.458599
187
0.681515
false
3.153465
false
false
false
hansbrenna/NetCDF_postprocessor
area_calculator.py
1
1415
# -*- coding: utf-8 -*- """ Created on Thu Jun 25 13:07:42 2015 @author: hanbre """ from __future__ import print_function from math import * import numpy as np R=6378*1000 #Earth's radius lat1=radians(12.31579) #latitude and longitude boundaries for the calculation lat2=radians(16.10526) lon1=268.5 lon2=272.5 P=29.728 #Midlayer pressure P1=26.8825 #lower and upper pressure boundary P2=32.5735 Rl=287 #Gas constant for dry air, representative for the stratosphere T=235 #Temperature g=9.81 #g rho=P*100/(Rl*T) #Calculation of density dz=(P2-P1)*100/(rho*g) #Elevation change between the pressure boundaries. Hydrostatic assumption A=(pi/180)*R**2*(abs(sin(lat1)-sin(lat2))*abs(lon1-lon2)) #Area print('A={0} m^2'.format(A)) V=dz*A #Volume print('V={0} m^3'.format(V)) M=rho*V #Mass of air print('M={0} kg'.format(M)) #HBr dmol_frac = 32.65e-8 molar_mass = 80.91 # HBr=80.91, HCl= mol_mass_air = 28.97 dmass_frac = dmol_frac*(molar_mass/mol_mass_air) halog_frac = 0.987 #HBr=0.98, HCl = dmass = dmass_frac*M dhalog_mass = dmass*halog_frac print('Added Bromine mass from modification={0:E}'.format(dhalog_mass)) #HCl dmol_frac = 22.7e-5 molar_mass = 36.46 # HBr=80.91, HCl= mol_mass_air = 28.97 dmass_frac = dmol_frac*(molar_mass/mol_mass_air) halog_frac = 0.972 #HBr=0.98, HCl = dmass = dmass_frac*M dhalog_mass = dmass*halog_frac print('Added Chlorine mass from modification={0:E}'.format(dhalog_mass))
gpl-3.0
-8,894,281,081,695,982,000
25.222222
96
0.708127
false
2.394247
false
false
false
cowlicks/odo
odo/into.py
1
4707
from __future__ import absolute_import, division, print_function import functools from toolz import merge from multipledispatch import Dispatcher from .convert import convert from .append import append from .resource import resource from .utils import ignoring import datashape from datashape import discover from datashape.dispatch import namespace from datashape.predicates import isdimension from .compatibility import unicode __all__ = 'into', if 'into' not in namespace: namespace['into'] = Dispatcher('into') into = namespace['into'] def validate(f): @functools.wraps(f) def wrapped(*args, **kwargs): dshape = kwargs.pop('dshape', None) if isinstance(dshape, (str, unicode)): dshape = datashape.dshape(dshape) if dshape is not None and not isinstance(dshape, datashape.DataShape): raise TypeError('dshape argument is not an instance of DataShape') kwargs['dshape'] = dshape return f(*args, **kwargs) return wrapped @into.register(type, object) @validate def into_type(a, b, dshape=None, **kwargs): with ignoring(NotImplementedError): if dshape is None: dshape = discover(b) return convert(a, b, dshape=dshape, **kwargs) @into.register(object, object) @validate def into_object(target, source, dshape=None, **kwargs): """ Push one dataset into another Parameters ---------- source: object or string The source of your data. Either an object (e.g. DataFrame), target: object or string or type The target for where you want your data to go. Either an object, (e.g. []), a type, (e.g. list) or a string (e.g. 'postgresql://hostname::tablename' raise_on_errors: bool (optional, defaults to False) Raise exceptions rather than reroute around them **kwargs: keyword arguments to pass through to conversion functions. Examples -------- >>> L = into(list, (1, 2, 3)) # Convert things into new things >>> L [1, 2, 3] >>> _ = into(L, (4, 5, 6)) # Append things onto existing things >>> L [1, 2, 3, 4, 5, 6] >>> into('myfile.csv', [('Alice', 1), ('Bob', 2)]) # doctest: +SKIP Explanation ----------- We can specify data with a Python object like a ``list``, ``DataFrame``, ``sqlalchemy.Table``, ``h5py.Dataset``, etc.. We can specify data with a string URI like ``'myfile.csv'``, ``'myfiles.*.json'`` or ``'sqlite:///data.db::tablename'``. These are matched by regular expression. See the ``resource`` function for more details on string URIs. We can optionally specify datatypes with the ``dshape=`` keyword, providing a datashape. This allows us to be explicit about types when mismatches occur or when our data doesn't hold the whole picture. See the ``discover`` function for more information on ``dshape``. >>> ds = 'var * {name: string, balance: float64}' >>> into('accounts.json', [('Alice', 100), ('Bob', 200)], dshape=ds) # doctest: +SKIP We can optionally specify keyword arguments to pass down to relevant conversion functions. For example, when converting a CSV file we might want to specify delimiter >>> into(list, 'accounts.csv', has_header=True, delimiter=';') # doctest: +SKIP These keyword arguments trickle down to whatever function ``into`` uses convert this particular format, functions like ``pandas.read_csv``. See Also -------- into.resource.resource - Specify things with strings datashape.discover - Get datashape of data into.convert.convert - Convert things into new things into.append.append - Add things onto existing things """ if isinstance(source, (str, unicode)): source = resource(source, dshape=dshape, **kwargs) with ignoring(NotImplementedError): if dshape is None: dshape = discover(source) return append(target, source, dshape=dshape, **kwargs) @into.register((str, unicode), object) @validate def into_string(uri, b, dshape=None, **kwargs): if dshape is None: dshape = discover(b) resource_ds = 0 * dshape.subshape[0] if isdimension(dshape[0]) else dshape a = resource(uri, dshape=resource_ds, expected_dshape=dshape, **kwargs) return into(a, b, dshape=dshape, **kwargs) @into.register((type, (str, unicode)), (str, unicode)) @validate def into_string_string(a, b, **kwargs): return into(a, resource(b, **kwargs), **kwargs) @into.register(object) @validate def into_curried(o, **kwargs1): def curried_into(other, **kwargs2): return into(o, other, **merge(kwargs2, kwargs1)) return curried_into
bsd-3-clause
-1,310,497,420,108,693,200
29.967105
90
0.656682
false
3.805174
false
false
false
dgilland/alchy
setup.py
1
1592
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from setuptools import setup, find_packages def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() def parse_requirements(filename): return [line.strip() for line in read(filename).strip().split('\n') if line.strip()] pkg = {} exec(read('alchy/__pkg__.py'), pkg) readme = read('README.rst') changelog = read('CHANGELOG.rst') requirements = parse_requirements('requirements.txt') setup( name=pkg['__package_name__'], version=pkg['__version__'], url=pkg['__url__'], license=pkg['__license__'], author=pkg['__author__'], author_email=pkg['__email__'], description=pkg['__description__'], long_description=readme + '\n\n' + changelog, packages=find_packages(exclude=['tests', 'tasks']), install_requires=requirements, keywords='sqlalchemy databases orm declarative', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'License :: OSI Approved :: MIT License', 'Topic :: Database :: Front-Ends', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5' ] )
mit
8,434,848,589,541,545,000
29.615385
71
0.606784
false
4.082051
false
false
false
vivekpabani/projecteuler
python/012/problem_012.py
1
1116
#!/usr/bin/env python """ Problem Definition : The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be: 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... Let us list the factors of the first seven triangle numbers: 1: 1 3: 1,3 6: 1,2,3,6 10: 1,2,5,10 15: 1,3,5,15 21: 1,3,7,21 28: 1,2,4,7,14,28 We can see that 28 is the first triangle number to have over five divisors. What is the value of the first triangle number to have over five hundred divisors?''' """ import math def find_triangle(i): return i*(i+1)/2 def find_count(i): number = 1 count = 0 while number < int(math.sqrt(i)): if i % number == 0: count += 2 number += 1 if math.sqrt(i) == number: count += 1 return count def main(): final_count, triangle = 0, 0 num = 0 while final_count < 500: num += 1 triangle = find_triangle(num) final_count = find_count(triangle) print triangle if __name__ == '__main__': main()
apache-2.0
-4,799,293,626,819,717,000
18.578947
174
0.594086
false
3.117318
false
false
false
seakers/daphne_brain
AT/analysis/AnomalyDetectionAgreements.py
1
5289
import logging from django.shortcuts import render from django.http import Http404 from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status import pandas as pd import numpy as np import json def fill_neighbors(series, radius): n = series.shape[0] out = [] for i in range(n): out.append(max(series[max(0, i-radius):min(n, i+radius+1)])) return out class AgreementMethods(APIView): def post(self, request, format=None): methodOne = request.data['selectedAlgorithmOne'] methodTwo = request.data['selectedAlgorithmTwo'] typeOne = request.data['typeAlgorithmOne'] typeTwo = request.data['typeAlgorithmTwo'] variable = request.data['variable'] t = request.data['radius'] # The threshold imposed to consider related anomalies data = pd.read_json(request.data['data'], orient='records').set_index('timestamp') if typeOne == 'UniVariate': anomaliesOne = pd.read_json(json.dumps(request.data['detectedOneVarAnomalies'][variable][methodOne]), orient='records') else: anomaliesOne = pd.read_json(json.dumps(request.data['detectedMultiVarAnomalies'][methodOne]), orient='records') if typeTwo == 'UniVariate': anomaliesTwo = pd.read_json(json.dumps(request.data['detectedOneVarAnomalies'][variable][methodTwo]), orient='records') else: anomaliesTwo = pd.read_json(json.dumps(request.data['detectedMultiVarAnomalies'][methodTwo]), orient='records') anomaliesOne['FlagAnomalyOne'] = 1 anomaliesTwo['FlagAnomalyTwo'] = 1 anomaliesOne = anomaliesOne.set_index('timestamp') anomaliesTwo = anomaliesTwo.set_index('timestamp') # Merges the data with the anomalies, only keeps the flag which indicates which dataPoints # correspond with an anomaly result = pd.concat([data, anomaliesOne['FlagAnomalyOne'], anomaliesTwo['FlagAnomalyTwo']], axis=1) x = result.fillna(0) # Compares only the selected Data if request.data['useSelectedData']: selectedData = pd.read_json(json.dumps(request.data['selectedData']), orient='records') x['selectedData'] = selectedData.values if np.sum(x['selectedData']) == 0: return Response({'writtenResponse': [{'introduction': 'ERROR: NO SELECTED DATA', 'bulletPoints': []}]}) x = x[x['selectedData']] n = x.shape[0] x['FlagNeighborOne'] = fill_neighbors(x['FlagAnomalyOne'], t) x['FlagNeighborTwo'] = fill_neighbors(x['FlagAnomalyTwo'], t) x['Coincidence'] = x['FlagAnomalyOne'] * x['FlagAnomalyTwo'] x['RelatedOne'] = x['FlagAnomalyOne'] * x['FlagNeighborTwo'] x['RelatedTwo'] = x['FlagAnomalyTwo'] * x['FlagNeighborOne'] # Computes the scores sumOne= np.sum(np.min(np.asmatrix([np.asanyarray(x['Coincidence'] + x['RelatedOne'] * 0.5), np.ones(n)]), axis=0)) sumTwo = np.sum(np.min(np.asmatrix([np.asanyarray(x['Coincidence'] + x['RelatedTwo'] * 0.5), np.ones(n)]), axis=0)) sumFlagOne = sum(x['FlagAnomalyOne']) sumFlagTwo = sum(x['FlagAnomalyTwo']) score_One = 100 * sumOne / sumFlagOne score_Two = 100 * sumTwo / sumFlagTwo score_All = 100 * (sumOne + sumTwo) / (sumFlagOne + sumFlagTwo) writtenResponseGeneric = { 'introduction': 'There is a ' + str(score_All) + '% agreement between method ' + methodOne + ' and ' + methodTwo, 'bulletPoints': [ 'Total number of anomalies detected by ' + methodOne + ' is ' + str(sumFlagOne), 'Total number of anomalies detected by ' + methodTwo + ' is ' + str(sumFlagTwo) ] } writtenResponseOne = { 'introduction': 'There is a ' + str(score_One) + '% agreement of the anomalies detected with method ' + methodOne + ' to anomalies detected with' + methodTwo, 'bulletPoints': [ 'Total number of anomalies detected by ' + methodOne + ' is ' + str(sumFlagOne), 'The ' + str(100 * sum(x['Coincidence']) / sumFlagOne) + '% coincide with anomalies detected with method ' + methodTwo, 'The ' + str(100 * sum(x['RelatedOne']) / sumFlagOne) + '% are near to anomalies detected with method ' + methodTwo, ] } writtenResponseTwo = { 'introduction': 'There is a ' + str(score_Two) + '% agreement of the anomalies detected with method ' + methodTwo + ' to anomalies detected with' + methodOne, 'bulletPoints': [ 'Total number of anomalies detected by ' + methodTwo + ' is ' + str(sumFlagTwo), 'The ' + str(100 * sum(x['Coincidence']) / sumFlagTwo) + '% coincide with anomalies detected with method ' + methodOne, 'The ' + str(100 * sum(x['RelatedTwo']) / sumFlagTwo) + '% are near to anomalies detected with method ' + methodOne, ] } outJson = { 'writtenResponse': [writtenResponseGeneric, writtenResponseOne, writtenResponseTwo] } return Response(outJson)
mit
6,911,759,693,028,830,000
43.445378
135
0.615239
false
3.675469
false
false
false
RocketRedNeck/PythonPlayground
pidSim2.py
1
20497
# -*- coding: utf-8 -*- """ pidSim2.py A simulation of a vision control to steering PID loop accounting for communication and processing latency and variation; demonstrates the impact of variation to successful control. THIS VERSION models the control as a 1st order input (velocity) and then integrates once to get position. In other words, the control variable (CV) has indirect control over the process variable (PV); for example this is the case when a motor controller is in the loop and effectively makes this loop a cascaded PID This allows students to experiment with how different elements in the scaling of a control loop affect performance, this focusing efforts on successful design. The model consists of a PID processing software with an asynchronous alignment with a camera frame which is also asynchronous to image processing software. Communication latency and jitter are planned as well as image processing impacts. A plot at the end shows a sample over some specified duration. The initial conditions of the file represents a case that won't work well until it is correct by improvements in the constants and image processing rates Copyright (c) 2016 - RocketRedNeck.com RocketRedNeck.net RocketRedNeck and MIT Licenses RocketRedNeck hereby grants license for others to copy and modify this source code for whatever purpose other's deem worthy as long as RocketRedNeck is given credit where where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************** """ import matplotlib.pyplot as plot import numpy as np tmax_sec = 10.0 dt_sec = 0.001 ts_sec = np.arange(0.0, tmax_sec, 0.001) nmax = ts_sec.__len__() # round(tmax_sec/dt_sec) ns = range(0, nmax) kp = 1.2 # Proportional gain ki = 0.0 # Integral gain kd = 0.5 # Derivative gain kg = 1.0 # Plant (Process) gain tau_sec = 0.5 # This is the motor plus inertia time constant to reach velocity sp = np.zeros(nmax) # Will initialize after first image processed spStart = False; spPeriod = 1.0/4.0 err = np.zeros(nmax) intErr = np.zeros(nmax) derrdt = np.zeros(nmax) lastErr = 0.0 G = np.zeros(nmax) # Process output to be measured exp = np.exp(-dt_sec/tau_sec) # Define arrays to hold the kinematic values # In this case we will use simple names to represent either linear or rotation v = np.zeros(nmax) # linear or angular velocity p = np.zeros(nmax) # linear or angular position # Model of the pid task via a java util.timer # We add a random normal variation for task wakeup since the util.timer # can only assure that the task wakes up no earlier than scheduled. # Empirical measurement of the task latency is required for accurate # modeling, but for now we can just assume about a 10% average pidPeriod_sec = 0.02; pidPeriod_index = round(pidPeriod_sec / dt_sec) pidTimer_index = 0 pidStart_index = 0 # "time" that PID computation started pidDuration_sec = 0.001 # Time to complete PID calculation (models software latency) pidDuration_index = round(pidDuration_sec / dt_sec) pidEnd_index = pidStart_index + pidDuration_index # "time" that PID computation ended pidMinJitter_sec = 0.000 # Minimum Random task jitter pidMinJitter_index = round(pidMinJitter_sec / dt_sec) pidMaxJitter_sec = 0.0015 # Maximum Random task jitter pidMaxJitter_index = round(pidMaxJitter_sec / dt_sec) pidMeanJitter_index = round((pidMaxJitter_index + pidMinJitter_index)/2) pidStdDevJitter_index = round((pidMaxJitter_index - pidMinJitter_index) / 3) cvPid = np.zeros(nmax) # Initial value of cv coming from PID calculation # The first communication link is assumed to be a CAN bus # The bus overhead is assumed to be a total fixed time # not exceeding about 1 ms for up to four (4) messages going to four (4) # separate motors (including any increases for bit stuffing); in other words # we assume something like 100 bits per message all mastered from the same # location on a 1 Mbps bus. # The underlying software is assumed to be some queue processing task that # wakes upon a posted message. A complete review of the path is needed to # assess whether to task that actually receives the posted message awakens # immediately (higher priority) or must time slice with all other concurrent # tasks. If communication tasking is forced to wait for an available cycle # it is possible that an indeterminate delay may occur at the post-to-wire # boundary; also, the communication tasking must post all messages queued # to the wire in close sequence otherwise the motors will be out of phase # We can inject an estimate of communication jitter as a whole using a # simple normal distribution comm0Start_index = 0 # "time" that first communication bus starts comm0Delay_sec = 0.001 # Time to complete communication (MUST BE LESS THAN PID PERIOD) comm0Delay_index = round(comm0Delay_sec / dt_sec) comm0End_index = comm0Start_index + comm0Delay_index comm0MinJitter_sec = 0.000 comm0MinJitter_index = round(comm0MinJitter_sec / dt_sec) comm0MaxJitter_sec = 0.005 comm0MaxJitter_index = round(comm0MaxJitter_sec / dt_sec) comm0MeanJitter_index = round((comm0MaxJitter_index + comm0MinJitter_index)/2) comm0StdDevJitter_index = round((comm0MaxJitter_index - comm0MinJitter_index) / 3) cvComm0 = np.zeros(nmax) # cv value delayed for first communication bus camOffset_sec = 0.0 # Offset to represent asynchronous camera start camOffset_index = round(camOffset_sec / dt_sec) camStart_index = camOffset_index # "time" that camera runs camRate_Hz = 30 # Camera frame rate camPeriod_sec = 1.0/camRate_Hz camPeriod_index = round(camPeriod_sec / dt_sec) camEnd_index = camStart_index + camPeriod_index camImage_index = round((camStart_index + camEnd_index) / 2) # Time associated with center of image pvCam = np.zeros(nmax) # process variable delayed for camera framing # The second communication bus is polled by the imaging software # The time that the imaging software starts is asynchronous to the # other system components, and it will not execute again until the # image processing completes (which itself has some variation) comm1Start_index = 0 # "time" that second communication bus starts comm1Delay_sec = 0.020 # Time to complete communication comm1Delay_index = round(comm1Delay_sec / dt_sec) comm1End_index = comm1Start_index + comm1Delay_index comm1MinJitter_sec = 0.000 comm1MinJitter_index = round(comm1MinJitter_sec / dt_sec) comm1MaxJitter_sec = 0.002 comm1MaxJitter_index = round(comm1MaxJitter_sec / dt_sec) comm1MeanJitter_index = round((comm1MaxJitter_index + comm1MinJitter_index)/2) comm1StdDevJitter_index = round((comm1MaxJitter_index - comm1MinJitter_index) / 3) pvComm1 = np.zeros(nmax) # pv value delayed for second communication bus pvComm1StartTags = np.NaN * np.zeros(nmax) # Image processing consists of a bounded, but variable process # The content of the image and the operating environment will cause the # associated software to vary; we will use emprical estimates for a current # approach and will assume the variation has a normal distribution with a # 3-sigma distribution between the upper and lower limits pvImageStart_index = 0 pvImageMaxRate_Hz = 5.0 pvImageMinRate_Hz = 3.0 pvImageRateSigma = 3 pvImageMaxDuration_sec = 1.0 / pvImageMinRate_Hz pvImageMinDuration_sec = 1.0 / pvImageMaxRate_Hz pvImageMaxDuration_index = round(pvImageMaxDuration_sec / dt_sec) pvImageMinDuration_index = round(pvImageMinDuration_sec / dt_sec) pvImageMeanDuration_index = round((pvImageMinDuration_index + pvImageMaxDuration_index)/2) pvImageStdDevDuration_index = round((pvImageMaxDuration_index - pvImageMinDuration_index) / pvImageRateSigma) pvImageEnd_index = pvImageStart_index + 2*pvImageMaxDuration_index pvImage = np.zeros(nmax) # Final communication link between image processing and the PID comm2Start_index = 2*pvImageMaxDuration_index # "time" that third communication bus starts (always after image processing) comm2Delay_sec = 0.020 # Time to complete communication comm2Delay_index = round(comm2Delay_sec / dt_sec) comm2End_index = comm2Start_index + comm1Delay_index comm2Jitter_sec = 0.0 # Later we will add a "random" jitter that delays communication comm2Jitter_index = round(comm2Jitter_sec / dt_sec) pvComm2 = np.zeros(nmax) # pv value delayed for third communication bus pvFinal = np.zeros(nmax) for n in ns: # Only run the PID calculation on a period boundary # i.e., this branch represents the task scheduled on a boundary # When jitter is enabled we will occasionally add a delay # representing a late task start (independent of measurement jitter) # We assume here that the task is delayed and not immediately preempted # and thus able to make full use of its time slice if (pidStdDevJitter_index == 0): pidJitter_index = 0 else: pidJitter_index = round(np.random.normal(pidMeanJitter_index, pidStdDevJitter_index)) if (pidJitter_index < 0): pidJitter_index = 0 if (n == pidTimer_index): lastPidStart_index = pidStart_index pidStart_index = pidTimer_index + pidJitter_index pidTimer_index += pidPeriod_index if (n == pidStart_index): deltaT = dt_sec * (pidStart_index - lastPidStart_index) # compute realized period this cycle #print("@ " + str(n) + " pid start = (" + str(pidPeriod_index) + ", " + str(pidJitter_index) + ") + deltaT = " + str(deltaT)) pidEnd_index = n + pidDuration_index # Once we get going, we can compute the error as the # difference of the setpoint and the latest output # of the process variable (delivered after all sensor and # communication delays) if (n > 0): err[n] = sp[n] - pvFinal[n-1] # Assume we currently have no way of directly measuring derr # so we use the err measurement to estimate the error rate # In this sense, the error rate is an average over the # previous interval of time since we last looked, thus the # error rate is in the past # # NOTE: Here we make an ASSUMPTION that the period is accurate # even though we are jittering actual task start. This will cause # rate of error to be, itself, in error; using this error rate with # the additional uncertainty makes use of derivative gain problematic # because of the apparent noise induced by the incorrect timing assumption derrdt[n] = (err[n] - err[n-1]) / pidPeriod_sec # Integrate the error (i.e., add it up) intErr[n] = intErr[n-1] + err[n] # Compute the control variable by summing the PID parts # When the pidEnd_index is reached, the output will be # forwarded to the communication sequence cvPid[n] = (kp * err[n]) + (ki * intErr[n]) + (kd * derrdt[n]) elif (n > 0): # Previous output is held until the next task wakeup time err[n] = err[n-1] derrdt[n] = derrdt[n-1] intErr[n] = intErr[n-1] cvPid[n] = cvPid[n-1] # Initiate communication delay if (n == pidEnd_index): #print("@ " + str(n) + " pid end = " + str(cvPid[n])) comm0Start_index = n if (comm0StdDevJitter_index == 0): comm0Jitter_index = 0 else: comm0Jitter_index = round(np.random.normal(comm0MeanJitter_index, comm0StdDevJitter_index)) comm0End_index = comm0Start_index + comm0Delay_index + comm0Jitter_index # When communication delay has been met, move the information along if (n == comm0End_index): cvComm0[comm0End_index] = cvPid[comm0Start_index] #print("@ " + str(n) + " comm0 end = " + str(cvComm0[comm0End_index])) elif (n > 0): # Otherwise, just hold the previous command cvComm0[n] = cvComm0[n-1] # Currently just model the motor, gears, and kinematics as a simple # time constant without limits # The kinematics (physics) runs "continuously" so we update it # every time step # First, model a simple time constant representing controlled process G[n] = (kg * cvComm0[n] * (1.0 - exp)) + (G[n-1] * exp) # Torque applied to the robot mass induced motion # We don't yet care about the specific (how much mass nor whether the # motion is linear or rotational); in this case all we want to demonstrate # is the process effects of integrating from force to a position representing # the process variable being compared to the set point # # The form is F = m a or M = I alpha, but we will just use simple names # here v[n] = G[n] # Integrate to a position # Here will use a simple trapezoidal rule; we can upgrade this to Runge-Kutta # or other methods later but if our time step is small enough compared to # the rate of change, then trapezoidal is fine if (n > 0): p[n] = p[n-1] + v[n-1]*dt_sec + (v[n] - v[n-1])*dt_sec/2 # Next is the sensor delay, communication, processing, and communication # on the return path # The process output will be sensed by a camera and delivered at the # camera frame rate; the frame interval is asynchronous to all other # processing periods. # We currently assume insignificant jitter in the camera rate # We also are neglecting any blur occuring due to motion # # However, we will pick a point midway in the frame to represent # the time of the relevant image data; depending on the simulation # time step and modeled frame rate for the camera can cause a jitter # of up to a time step if ((n % camPeriod_index) == camOffset_index): #print("@ " + str(n) + " camera start") camStart_index = n camEnd_index = camStart_index + camPeriod_index camImage_index = round((camStart_index + camEnd_index)/2) # Midpoint in time # This is a point in time associated with the center pixel of # the image. For now we will just assume that the item we will measure in the # image is at the same point in time as the image center. # Reality is that the difference is small and only important for # very rapid target motion # While the center point of the image time is important for averaging # state on the image data, the frame will not be deliverable until the # entire frame is ready for the next communication boundary (when the frame # can be fetched) if (n == (camEnd_index-1)): pvCam[camStart_index:camEnd_index] = p[camImage_index] #print("@ " + str(n) + " camera = " + str(G[camImage_index])) # Image processing is assumed to operate as fast as it can # but will have asynchronous start and duration will vary based on # image content with a well defined lower and upper limit. # # The modeling is a small communication delay followed by a variable # image processing delay; we will model a small normal distribution in # time but will not model imaging errors if (n == comm1Start_index): #print("@ " + str(n) + " COMM1 start") if (comm1StdDevJitter_index == 0): comm1Jitter_index = 0 else: comm1Jitter_index = round(np.random.normal(comm1MeanJitter_index, comm1StdDevJitter_index)) comm1End_index = comm1Start_index + comm1Delay_index + comm1Jitter_index # Whichever image frame is available will now be forwarded # We back up one camera period from when communication startsbecause the # image information won't be available while a frame is being sampled # The information is placed in the outgoing comm1 buffer at the end of # communication, effectively delaying the image information and keeping # the boundaries aynchronous to the resolution of the time step. if (n == comm1End_index): if (comm1Start_index >= camPeriod_index): pvComm1StartTags[comm1Start_index] = pvCam[comm1Start_index - camPeriod_index] pvComm1[comm1End_index] = pvComm1StartTags[comm1Start_index] else: pvComm1StartTags[comm1Start_index] = pvCam[comm1Start_index] pvComm1[comm1End_index] = pvComm1StartTags[comm1Start_index] #print("@ " + str(n) + " COMM1 end = " + str(pvComm1[comm1End_index])) # Now that communication has completed, the image processing # can start; here we represent a variable processing latency # as a normal distribution between a min and max time assumed # to be 3-sigma limit # This is not a precise model of the statistical variation # of actual image processing, but rather just enough variation # to observe the impact to a control loop (if any) pvImageStart_index = comm1End_index if (pvImageStdDevDuration_index == 0): pvImageJitter_index = pvImageMeanDuration_index else: pvImageJitter_index = round(np.random.normal(pvImageMeanDuration_index, pvImageStdDevDuration_index)) pvImageEnd_index = pvImageStart_index + pvImageJitter_index elif (n > 0): pvComm1[n] = pvComm1[n-1] # When image processing is complete, we can begin to send the result # to the final communication link and then restart the second comm link # to read the camera again if (n == pvImageEnd_index): pvImage[pvImageEnd_index] = pvComm1[comm1End_index] #print("@ " + str(n) + " IMAGE PROCESSING end = " + str(pvImage[pvImageEnd_index])) comm2Start_index = pvImageEnd_index elif (n > 0): pvImage[n] = pvImage[n-1] if (n == comm2Start_index): comm2End_index = comm2Start_index + comm2Delay_index #print("@ " + str(n) + " COMM2 start --> end = " + str(comm2End_index)) if (n == comm2End_index): pvComm2[comm2End_index] = pvImage[comm2Start_index] #print("@ " + str(n) + " COMM2 end = " + str(pvComm2[comm2End_index])) comm1Start_index = comm2End_index + 1 # Restart image processing immediately # Enforce causality spStart = True; elif (n > 0): pvComm2[n] = pvComm2[n-1] if (spStart == True): if ((n+1) < nmax): sp[n+1] = np.sin(ts_sec[n+1] * spPeriod) sp[n+1] = sp[n+1]/np.abs(sp[n+1]) pvFinal[n] = pvComm2[n] plot.figure(1) plot.cla() plot.grid() plot.plot(ts_sec,sp,label='sp') plot.plot(ts_sec,err,label='err') #plot.plot(ts_sec,cvPid,label='cvPid') #plot.plot(ts_sec,cvComm0,'o',label='cvComm0') #plot.plot(ts_sec,G,label='G') plot.plot(ts_sec,p,label='p') plot.plot(ts_sec,pvCam,label='CameraFrame'), plot.plot(ts_sec,pvComm1StartTags,'o',label='CamCommStart') plot.plot(ts_sec,pvComm1,label='ImageProcessing') plot.plot(ts_sec,pvImage,label='NetworkTableStart') plot.plot(ts_sec,pvComm2,label='NetworkTableEnd') #plot.plot(ts_sec,pvFinal,label='pvFinal') #plot.legend(loc='best') plot.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) #plot.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
mit
8,180,067,789,737,755,000
43.56087
133
0.694346
false
3.550494
false
false
false
chebee7i/twitter
data/rsync.py
1
1182
""" Script to backup Twitter data using rsync. A lockfile ensures that this script does not run until the previous run has finished. """ from __future__ import print_function import errno import fcntl import glob import os import subprocess import sys import time import configparser config = configparser.ConfigParser() config.read('../project.cfg') BACKUP_PATH = config['Locations']['BACKUP_PATH'] MONGO_PREFIX = config['Prefixes']['MONGO_PREFIX'] def rsync(path=None): if path is None: path = BACKUP_PATH print() print("-----") subprocess.call('date') cmd = 'rsync --progress -zhtr *.gz *.log {0}* {1}' cmd = cmd.format(MONGO_PREFIX, path) print(cmd) subprocess.call(cmd, shell=True) def main(): with open('.lock_rsync', 'w') as f: try: fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError, e: if e.errno == errno.EAGAIN: msg = '[{0}] rsync script already running.\n' msg = msg.format(time.strftime('%c')) sys.stderr.write(msg) sys.exit(-1) raise rsync() if __name__ == '__main__': main()
unlicense
-8,303,988,705,077,570,000
21.730769
75
0.598139
false
3.636923
true
false
false
cortesi/qtile
libqtile/widget/mpd2widget.py
1
5933
from . import base from libqtile.log_utils import logger from six import u, text_type from socket import error as socket_error from mpd import MPDClient, ConnectionError, CommandError # Shortcuts # TODO: Volume inc/dec support keys = { # Left mouse button "toggle": 1, # Right mouse button "stop": 3, # Scroll up "previous": 4, # Scroll down "next": 5, # User defined command "command": None } # To display mpd state play_states = { 'play': u('\u25b6'), 'pause': u('\u23F8'), 'stop': u('\u25a0'), } def option(char): def _convert(elements, key, space): if key in elements and elements[key] != '0': elements[key] = char else: elements[key] = space return _convert prepare_status = { 'repeat': option('r'), 'random': option('z'), 'single': option('1'), 'consume': option('c'), 'updating_db': option('U') } default_format = '{play_status} {artist}/{title} ' +\ '[{repeat}{random}{single}{consume}{updating_db}]' class Mpd2(base.ThreadPoolText): """A widget for Music Player Daemon (MPD) based on python-mpd2 This widget exists since python-mpd library is no more supported. Parameters ========== status_format : format string to display status Full list of values see in ``status`` and ``currentsong`` commands https://musicpd.org/doc/protocol/command_reference.html#command_status https://musicpd.org/doc/protocol/tags.html Default:: {play_status} {artist}/{title} [{repeat}{random}{single}{consume}{updating_db}] ``play_status`` is string from ``play_states`` dict Note that ``time`` property of song renamed to ``fulltime`` to prevent conflicts with status information during formating. prepare_status : dict of functions for replace values in status with custom ``f(status, key, space_element) => str`` """ orientations = base.ORIENTATION_HORIZONTAL defaults = [ ('update_interval', 1, 'Interval of update widget'), ('host', 'localhost', 'Host of mpd server'), ('port', 6600, 'Port of mpd server'), ('password', None, 'Password for auth on mpd server'), ('keys', keys, 'Shortcut keys'), ('play_states', play_states, 'Play state mapping'), ('command', None, 'Executable command by "command" shortcut'), ('timeout', 30, 'MPDClient timeout'), ('idletimeout', 5, 'MPDClient idle command timeout'), ('no_connection', 'No connection', 'Text when mpd is disconnected'), ('space', '-', 'Space keeper') ] def __init__(self, status_format=default_format, prepare_status=prepare_status, **config): super(Mpd2, self).__init__(None, **config) self.add_defaults(Mpd2.defaults) self.status_format = status_format self.prepare_status = prepare_status self.connected = False self.client = MPDClient() self.client.timeout = self.timeout self.client.idletimeout = self.idletimeout self.try_reconnect() def try_reconnect(self): if not self.connected: try: self.client.ping() except(socket_error, ConnectionError): try: self.client.connect(self.host, self.port) if self.password: self.client.password(self.password) self.connected = True except(socket_error, ConnectionError, CommandError): self.connected = False def poll(self): self.try_reconnect() if self.connected: return self.update_status() else: return self.no_connection def update_status(self): self.client.command_list_ok_begin() self.client.status() self.client.currentsong() status, current_song = self.client.command_list_end() return self.formatter(status, current_song) # TODO: Resolve timeouts on the method call def button_press(self, x, y, button): self.try_reconnect() if self.connected: self[button] def __getitem__(self, key): if key == self.keys["toggle"]: status = self.client.status() play_status = status['state'] if play_status == 'play': self.client.pause() else: self.client.play() if key == self.keys["stop"]: self.client.stop() if key == self.keys["previous"]: self.client.previous() if key == self.keys["next"]: self.client.next() if key == self.keys['command']: if self.command: self.command(self.client) self.update(self.update_status) def formatter(self, status, currentsong): play_status = self.play_states[status['state']] # Dirty hack to prevent keys conflict currentsong['fulltime'] = currentsong['time'] del currentsong['time'] self.prepare_formatting(status, currentsong) status.update(currentsong) fmt = self.status_format if not isinstance(fmt, text_type): fmt = u(fmt) try: formatted = fmt.format(play_status=play_status, **status) return formatted except KeyError as e: logger.exception("mpd client did not return status: {}".format(e.args[0])) return "ERROR" def prepare_formatting(self, status, currentsong): for key in self.prepare_status: self.prepare_status[key](status, key, self.space) def finalize(self): super(Mpd2, self).finalize() try: self.client.close() self.client.disconnect() except ConnectionError: pass
mit
8,775,754,530,389,253,000
28.517413
87
0.578797
false
4.114424
false
false
false
mitsuhiko/celery
celery/bin/celerybeat.py
1
4325
#!/usr/bin/env python """celerybeat .. program:: celerybeat .. cmdoption:: -s, --schedule Path to the schedule database. Defaults to ``celerybeat-schedule``. The extension ".db" will be appended to the filename. .. cmdoption:: -f, --logfile Path to log file. If no logfile is specified, ``stderr`` is used. .. cmdoption:: -l, --loglevel Logging level, choose between ``DEBUG``, ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``, or ``FATAL``. """ import sys import optparse import traceback import celery from celery import conf from celery import platform from celery.log import emergency_error from celery.beat import ClockService from celery.utils import info STARTUP_INFO_FMT = """ Configuration -> . broker -> %(conninfo)s . schedule -> %(schedule)s . logfile -> %(logfile)s@%(loglevel)s """.strip() OPTION_LIST = ( optparse.make_option('-s', '--schedule', default=conf.CELERYBEAT_SCHEDULE_FILENAME, action="store", dest="schedule", help="Path to the schedule database. The extension \ '.db' will be appended to the filename. Default: %s" % ( conf.CELERYBEAT_SCHEDULE_FILENAME)), optparse.make_option('-f', '--logfile', default=conf.CELERYBEAT_LOG_FILE, action="store", dest="logfile", help="Path to log file."), optparse.make_option('-l', '--loglevel', default=conf.CELERYBEAT_LOG_LEVEL, action="store", dest="loglevel", help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL/FATAL."), ) class Beat(object): ClockService = ClockService def __init__(self, loglevel=conf.CELERYBEAT_LOG_LEVEL, logfile=conf.CELERYBEAT_LOG_FILE, schedule=conf.CELERYBEAT_SCHEDULE_FILENAME, **kwargs): """Starts the celerybeat task scheduler.""" self.loglevel = loglevel self.logfile = logfile self.schedule = schedule # Setup logging if not isinstance(self.loglevel, int): self.loglevel = conf.LOG_LEVELS[self.loglevel.upper()] def run(self): print("celerybeat %s is starting." % celery.__version__) self.init_loader() print(self.startup_info()) self.set_process_title() print("celerybeat has started.") self.start_scheduler() def start_scheduler(self): from celery.log import setup_logger logger = setup_logger(self.loglevel, self.logfile) beat = self.ClockService(logger, schedule_filename=self.schedule) try: self.install_sync_handler(beat) beat.start() except Exception, exc: emergency_error(self.logfile, "celerybeat raised exception %s: %s\n%s" % ( exc.__class__, exc, traceback.format_exc())) def init_loader(self): # Run the worker init handler. # (Usually imports task modules and such.) from celery.loaders import current_loader current_loader().init_worker() def startup_info(self): return STARTUP_INFO_FMT % { "conninfo": info.format_broker_info(), "logfile": self.logfile or "@stderr", "loglevel": conf.LOG_LEVELS[self.loglevel], "schedule": self.schedule, } def set_process_title(self): arg_start = "manage" in sys.argv[0] and 2 or 1 platform.set_process_title("celerybeat", info=" ".join(sys.argv[arg_start:])) def install_sync_handler(self, beat): """Install a ``SIGTERM`` + ``SIGINT`` handler that saves the celerybeat schedule.""" def _sync(signum, frame): beat.sync() raise SystemExit() platform.install_signal_handler("SIGTERM", _sync) platform.install_signal_handler("SIGINT", _sync) def parse_options(arguments): """Parse the available options to ``celeryd``.""" parser = optparse.OptionParser(option_list=OPTION_LIST) options, values = parser.parse_args(arguments) return options def run_celerybeat(**options): Beat(**options).run() def main(): options = parse_options(sys.argv[1:]) run_celerybeat(**vars(options)) if __name__ == "__main__": main()
bsd-3-clause
2,549,191,977,450,604,500
29.892857
77
0.602081
false
4.049625
false
false
false
yuxiang-zhou/AnimeTracker
AnimeFetcher/anime_updater_78land.py
1
2889
#!/usr/bin/python3 import sys import BaseHTTPServer import cgi import json import threading import urllib2 import time from bs4 import BeautifulSoup from pymongo import MongoClient import datetime reload(sys) sys.setdefaultencoding('utf-8') num_retry = 12 period = int(3600*12) con = MongoClient('178.62.38.12') db = con.animedb animelist = db.animelist animesDB = db.animes anime_base_url = 'http://wt.78land.com/ctlist/' anime_base_download = 'http://wt.78land.com/ctdown/' anime_list_url = anime_base_url + 'all.htm' def get_url_content(url): anime_doc = "" retry = num_retry while(retry > 0): try: anime_req = urllib2.Request(url) anime_doc = urllib2.urlopen(anime_req).read() retry = 0 except: retry = retry - 1 pass return anime_doc def parse_download_link(url): dl_doc = get_url_content(url) dlParse = BeautifulSoup(dl_doc) links = dlParse.find_all("a",href = True) linkList = [] for link in links: dl_link = link.get("href") if dl_link[:7] == "thunder": linkList.append(dl_link) return linkList def parse_anime(url, name, anime_id): anime_doc = get_url_content(url) animeParse = BeautifulSoup(anime_doc) animeVideos = animeParse.find_all("td", attrs={"width":"200", "height":"30"}) for videoParse in animeVideos: a_tag = videoParse.a video_name = a_tag.string video_download_url = anime_base_download + a_tag.get('href').rpartition('/')[-1] video_download_link = parse_download_link(video_download_url) video = animesDB.find_one({"name":video_name}) if video == None: animesDB.insert({"category":anime_id,"name":video_name,"dl_url":video_download_link,"update_at":datetime.datetime.now()}) print 'Updating Video: {}'.format(video_name) def run_task(): # retrive anime list html_doc = get_url_content(anime_list_url) # parse list htmlParser = BeautifulSoup(html_doc) animeListHtml = htmlParser.find_all("a",attrs={"target": "_blank"}) for animeHtml in animeListHtml: animeName = animeHtml.string animeUrl = anime_base_url + animeHtml.get('href') anime = animelist.find_one({"name":animeName}) animeID = 0 if anime == None: animeID = animelist.insert({"name":animeName, "url":animeUrl}) else: animeID = anime["_id"] print 'Updating {}'.format(animeName) parse_anime(animeUrl, animeName, animeID) while(True): # DBManager.connect(dbconfig['username'], dbconfig['password'], dbconfig['dbname']) print("Updating Anime List") run_task() print("Update Done") # DBManager.disconnect() time.sleep(period)
mit
-7,170,149,306,758,141,000
26.60396
133
0.610246
false
3.305492
false
false
false
yddgit/hello-python
send_email/send_email_html_plain.py
1
2564
#!/usr/bin/env python # -*- coding: utf-8 -*- from email import encoders from email.header import Header from email.utils import parseaddr, formataddr from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib # 格式化邮件地址,如果包含中文需要通过Header对象进行编码 def _format_addr(s): name, addr = parseaddr(s) return formataddr(( \ Header(name, 'utf-8').encode(), \ addr.encode('utf-8') if isinstance(addr, unicode) else addr)) # 输入Email地址和口令 from_addr = raw_input('From: ') password = raw_input('Password: ') # 输入SMTP服务器地址 smtp_server = raw_input('SMTP Server: ') # 输入收件人地址 to_addr = raw_input('To: ') # 如果要发送HTML邮件,但收件人无法查看HTML邮件时,可以在发送HTML的同时再附加一个纯文本。 # 如果收件人无法查看HTML格式的邮件,就可以自动降级查看纯文本邮件。 # 利用MIMEMultipart可以组合一个HTML和Plain,指定subtype是alternative msg = MIMEMultipart('alternative') # 邮件主题、如何显示发件人、收件人等信息并不是通过SMTP协议发给MTA,而是包含在发给MTA的文本中 # 因此,必须把From、To、Subject添加到MIMEText中,才是一封完整的邮件 msg['From'] = _format_addr(u'Python爱好者 <%s>' % from_addr) # 有些邮箱要求必须指定发件人,如新浪邮箱 # msg['To']接收的是字符串而不是list,如果有多个邮件地址,用,分隔即可。 # 收件箱中看到的收件人名字可能不是我们指定的“管理员”,因为很多邮件服务商在显示邮件时会把收件人名字自动替换成用户注册的名字,但其他收件人名字是显示不受影响 msg['To'] = _format_addr(u'管理员 <%s>' % to_addr) msg['Subject'] = Header(u'来自SMTP的问候……', 'utf-8').encode() # 同时设置文本和HTML邮件内容 msg.attach(MIMEText('Hello Python', 'plain', 'utf-8')) msg.attach(MIMEText('<html><body><h1>Hello</h1><p>send by <a href="http://www.python.org">Python</a>...</p></body></html>', 'html', 'utf-8')) #server = smtplib.SMTP(smtp_server, 25) # SMTP协议默认端口是25 server = smtplib.SMTP_SSL(smtp_server, 465) # 有些邮箱要求必须使用SSL连接,如QQ邮箱 # 打印出和SMTP服务器交互的所有信息,可以观察经过编码的邮件头,如:From: =?utf-8?b?UHl0aG9u54ix5aW96ICF?=... server.set_debuglevel(1) server.login(from_addr, password) server.sendmail(from_addr, [to_addr], msg.as_string()) server.quit()
apache-2.0
-8,638,957,130,049,513,000
34.098039
141
0.73352
false
1.625795
false
false
false
Ezhil-Language-Foundation/open-tamil
tamilmorse/tamil_morse_code.py
1
2127
## -*- coding: utf-8 -*- # (C) 2018 Muthiah Annamalai # This file is part of Open-Tamil project # You may use or distribute this file under terms of MIT license ## from __future__ import print_function import codecs import json from solthiruthi import resources from .huffman import huffman, print_huffman_code_cwl def TVU_morse_code(): # unigram data from Project Madurai unigram = TamilUnigramStats().unigram build_morse_code(unigram) def Madurai_morse_code(): # unigram data from Project Madurai unigram = MaduraiUnigramStats().unigram build_morse_code(unigram) def build_morse_code(unigram): v_keys = unigram.keys() p = [unigram[k] for k in v_keys] code, _ = huffman(v_keys, p) cwl, codelist = print_huffman_code_cwl(code, p, v_keys) tamilmorse = {} print(u"<ul>") descending_keys = [ x for _, x in sorted(zip(unigram.values(), v_keys), reverse=True) ] for k in descending_keys: v = code[k] v = v.replace("0", ".").replace("1", "-") tamilmorse[k] = v print(u"<li>%s -&gt; <b><kbd>%s</kbd></b></li>" % (k, v)) print(u"</ul>") with codecs.open("tamilmorse.json", "w", "utf-8") as fp: fp.write(json.dumps(tamilmorse)) return class UnigramStats: def __init__(self, filename): self.unigram = {} # Tamil letter -> probability of occurence self.unigram_file = resources.mk_path(filename) with codecs.open(self.unigram_file, "r", "utf-8") as fp: for L in fp.readlines(): a, b = L.split("-") a = a.strip() b = b.strip() self.unigram[a] = float(b) normalize = 1 + sum(self.unigram.values()) for k, v in self.unigram.items(): self.unigram[k] = v / normalize class TamilUnigramStats(UnigramStats): def __init__(self): UnigramStats.__init__(self, "tvu_unigram.txt") class MaduraiUnigramStats(UnigramStats): def __init__(self): UnigramStats.__init__(self, "madurai_unigram.txt") if __name__ == u"__main__": Madurai_morse_code()
mit
7,503,958,441,565,421,000
28.136986
73
0.594264
false
2.874324
false
false
false
neilLasrado/frappe
frappe/desk/search.py
1
6239
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt # Search from __future__ import unicode_literals import frappe, json from frappe.utils import cstr, unique, cint from frappe.permissions import has_permission from frappe import _ from six import string_types import re def sanitize_searchfield(searchfield): blacklisted_keywords = ['select', 'delete', 'drop', 'update', 'case', 'and', 'or', 'like'] def _raise_exception(searchfield): frappe.throw(_('Invalid Search Field {0}').format(searchfield), frappe.DataError) if len(searchfield) == 1: # do not allow special characters to pass as searchfields regex = re.compile('^.*[=;*,\'"$\-+%#@()_].*') if regex.match(searchfield): _raise_exception(searchfield) if len(searchfield) >= 3: # to avoid 1=1 if '=' in searchfield: _raise_exception(searchfield) # in mysql -- is used for commenting the query elif ' --' in searchfield: _raise_exception(searchfield) # to avoid and, or and like elif any(' {0} '.format(keyword) in searchfield.split() for keyword in blacklisted_keywords): _raise_exception(searchfield) # to avoid select, delete, drop, update and case elif any(keyword in searchfield.split() for keyword in blacklisted_keywords): _raise_exception(searchfield) else: regex = re.compile('^.*[=;*,\'"$\-+%#@()].*') if any(regex.match(f) for f in searchfield.split()): _raise_exception(searchfield) # this is called by the Link Field @frappe.whitelist() def search_link(doctype, txt, query=None, filters=None, page_length=20, searchfield=None, ignore_user_permissions=False): search_widget(doctype, txt, query, searchfield=searchfield, page_length=page_length, filters=filters, ignore_user_permissions=ignore_user_permissions) frappe.response['results'] = build_for_autosuggest(frappe.response["values"]) del frappe.response["values"] # this is called by the search box @frappe.whitelist() def search_widget(doctype, txt, query=None, searchfield=None, start=0, page_length=10, filters=None, filter_fields=None, as_dict=False, ignore_user_permissions=False): if isinstance(filters, string_types): filters = json.loads(filters) meta = frappe.get_meta(doctype) if searchfield: sanitize_searchfield(searchfield) if not searchfield: searchfield = "name" standard_queries = frappe.get_hooks().standard_queries or {} if query and query.split()[0].lower()!="select": # by method frappe.response["values"] = frappe.call(query, doctype, txt, searchfield, start, page_length, filters, as_dict=as_dict) elif not query and doctype in standard_queries: # from standard queries search_widget(doctype, txt, standard_queries[doctype][0], searchfield, start, page_length, filters) else: if query: frappe.throw(_("This query style is discontinued")) # custom query # frappe.response["values"] = frappe.db.sql(scrub_custom_query(query, searchfield, txt)) else: if isinstance(filters, dict): filters_items = filters.items() filters = [] for f in filters_items: if isinstance(f[1], (list, tuple)): filters.append([doctype, f[0], f[1][0], f[1][1]]) else: filters.append([doctype, f[0], "=", f[1]]) if filters==None: filters = [] or_filters = [] # build from doctype if txt: search_fields = ["name"] if meta.title_field: search_fields.append(meta.title_field) if meta.search_fields: search_fields.extend(meta.get_search_fields()) for f in search_fields: fmeta = meta.get_field(f.strip()) if f == "name" or (fmeta and fmeta.fieldtype in ["Data", "Text", "Small Text", "Long Text", "Link", "Select", "Read Only", "Text Editor"]): or_filters.append([doctype, f.strip(), "like", "%{0}%".format(txt)]) if meta.get("fields", {"fieldname":"enabled", "fieldtype":"Check"}): filters.append([doctype, "enabled", "=", 1]) if meta.get("fields", {"fieldname":"disabled", "fieldtype":"Check"}): filters.append([doctype, "disabled", "!=", 1]) # format a list of fields combining search fields and filter fields fields = get_std_fields_list(meta, searchfield or "name") if filter_fields: fields = list(set(fields + json.loads(filter_fields))) formatted_fields = ['`tab%s`.`%s`' % (meta.name, f.strip()) for f in fields] # find relevance as location of search term from the beginning of string `name`. used for sorting results. formatted_fields.append("""locate("{_txt}", `tab{doctype}`.`name`) as `_relevance`""".format( _txt=frappe.db.escape((txt or "").replace("%", "")), doctype=frappe.db.escape(doctype))) # In order_by, `idx` gets second priority, because it stores link count from frappe.model.db_query import get_order_by order_by_based_on_meta = get_order_by(doctype, meta) order_by = "if(_relevance, _relevance, 99999), {0}, `tab{1}`.idx desc".format(order_by_based_on_meta, doctype) ignore_permissions = True if doctype == "DocType" else (cint(ignore_user_permissions) and has_permission(doctype)) values = frappe.get_list(doctype, filters=filters, fields=formatted_fields, or_filters = or_filters, limit_start = start, limit_page_length=page_length, order_by=order_by, ignore_permissions = ignore_permissions, as_list=not as_dict) # remove _relevance from results if as_dict: for r in values: r.pop("_relevance") frappe.response["values"] = values else: frappe.response["values"] = [r[:-1] for r in values] def get_std_fields_list(meta, key): # get additional search fields sflist = meta.search_fields and meta.search_fields.split(",") or [] title_field = [meta.title_field] if (meta.title_field and meta.title_field not in sflist) else [] sflist = ['name'] + sflist + title_field if not key in sflist: sflist = sflist + [key] return sflist def build_for_autosuggest(res): results = [] for r in res: out = {"value": r[0], "description": ", ".join(unique(cstr(d) for d in r if d)[1:])} results.append(out) return results def scrub_custom_query(query, key, txt): if '%(key)s' in query: query = query.replace('%(key)s', key) if '%s' in query: query = query.replace('%s', ((txt or '') + '%')) return query
mit
-7,745,082,749,918,084,000
34.248588
151
0.678314
false
3.184788
false
false
false
MDSLab/s4t-iotronic
iotronic/db/sqlalchemy/models.py
1
6812
# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for iot data. """ import json from oslo_config import cfg from oslo_db import options as db_options from oslo_db.sqlalchemy import models import six.moves.urllib.parse as urlparse from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import ForeignKey, Integer from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import schema from sqlalchemy import String from sqlalchemy.types import TypeDecorator, TEXT from iotronic.common import paths sql_opts = [ cfg.StrOpt('mysql_engine', default='InnoDB', help='MySQL engine to use.') ] _DEFAULT_SQL_CONNECTION = 'sqlite:///' + \ paths.state_path_def('iotronic.sqlite') cfg.CONF.register_opts(sql_opts, 'database') db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'iotronic.sqlite') def table_args(): engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme if engine_name == 'mysql': return {'mysql_engine': cfg.CONF.database.mysql_engine, 'mysql_charset': "utf8"} return None class JsonEncodedType(TypeDecorator): """Abstract base type serialized as json-encoded string in db.""" type = None impl = TEXT def process_bind_param(self, value, dialect): if value is None: # Save default value according to current type to keep the # interface the consistent. value = self.type() elif not isinstance(value, self.type): raise TypeError("%s supposes to store %s objects, but %s given" % (self.__class__.__name__, self.type.__name__, type(value).__name__)) serialized_value = json.dumps(value) return serialized_value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value class JSONEncodedDict(JsonEncodedType): """Represents dict serialized as json-encoded string in db.""" type = dict class JSONEncodedList(JsonEncodedType): """Represents list serialized as json-encoded string in db.""" type = list class IotronicBase(models.TimestampMixin, models.ModelBase): metadata = None def as_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d def save(self, session=None): import iotronic.db.sqlalchemy.api as db_api if session is None: session = db_api.get_session() super(IotronicBase, self).save(session) Base = declarative_base(cls=IotronicBase) class Conductor(Base): """Represents a conductor service entry.""" __tablename__ = 'conductors' __table_args__ = ( schema.UniqueConstraint('hostname', name='uniq_conductors0hostname'), table_args() ) id = Column(Integer, primary_key=True) hostname = Column(String(255), nullable=False) online = Column(Boolean, default=True) class WampAgent(Base): """Represents a wampagent service entry.""" __tablename__ = 'wampagents' __table_args__ = ( schema.UniqueConstraint('hostname', name='uniq_wampagentss0hostname'), table_args() ) id = Column(Integer, primary_key=True) hostname = Column(String(255), nullable=False) wsurl = Column(String(255), nullable=False) online = Column(Boolean, default=True) ragent = Column(Boolean, default=False) class Board(Base): """Represents a Board.""" __tablename__ = 'boards' __table_args__ = ( schema.UniqueConstraint('uuid', name='uniq_boards0uuid'), schema.UniqueConstraint('code', name='uniq_boards0code'), table_args()) id = Column(Integer, primary_key=True) uuid = Column(String(36)) code = Column(String(25)) status = Column(String(15), nullable=True) name = Column(String(255), nullable=True) type = Column(String(255)) agent = Column(String(255), nullable=True) owner = Column(String(36)) project = Column(String(36)) mobile = Column(Boolean, default=False) config = Column(JSONEncodedDict) extra = Column(JSONEncodedDict) class Location(Base): """Represents a location of a board.""" __tablename__ = 'locations' __table_args__ = ( table_args()) id = Column(Integer, primary_key=True) longitude = Column(String(18), nullable=True) latitude = Column(String(18), nullable=True) altitude = Column(String(18), nullable=True) board_id = Column(Integer, ForeignKey('boards.id')) class SessionWP(Base): """Represents a session of a board.""" __tablename__ = 'sessions' __table_args__ = ( schema.UniqueConstraint( 'session_id', name='uniq_session_id0session_id'), schema.UniqueConstraint( 'board_uuid', name='uniq_board_uuid0board_uuid'), table_args()) id = Column(Integer, primary_key=True) valid = Column(Boolean, default=True) session_id = Column(String(15)) board_uuid = Column(String(36)) board_id = Column(Integer, ForeignKey('boards.id')) class Plugin(Base): """Represents a plugin.""" __tablename__ = 'plugins' __table_args__ = ( schema.UniqueConstraint('uuid', name='uniq_plugins0uuid'), table_args()) id = Column(Integer, primary_key=True) uuid = Column(String(36)) name = Column(String(36)) owner = Column(String(36)) public = Column(Boolean, default=False) code = Column(TEXT) callable = Column(Boolean) parameters = Column(JSONEncodedDict) extra = Column(JSONEncodedDict) class InjectionPlugin(Base): """Represents an plugin injection on board.""" __tablename__ = 'injection_plugins' __table_args__ = ( table_args()) id = Column(Integer, primary_key=True) board_uuid = Column(String(36), ForeignKey('boards.uuid')) plugin_uuid = Column(String(36), ForeignKey('plugins.uuid')) onboot = Column(Boolean, default=False) status = Column(String(15))
apache-2.0
8,382,822,558,443,551,000
29.410714
78
0.642543
false
3.935298
false
false
false
chromium/chromium
tools/cygprofile/check_orderfile.py
7
2870
#!/usr/bin/env vpython # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Check that symbols are ordered into a binary as they appear in the orderfile. """ import logging import optparse import sys import symbol_extractor def _VerifySymbolOrder(orderfile_symbols, symbol_infos, threshold): """Verify symbol ordering. Checks that the non-section symbols in |orderfile_filename| are consistent with the offsets |symbol_infos|. Args: orderfile_symbols: ([str]) list of symbols from orderfile. symbol_infos: ([SymbolInfo]) symbol infos from binary. threshold: (int) The number of misordered symbols beyond which we error. Returns: True iff the ordering is consistent within |threshold|. """ last_offset = 0 name_to_offset = {si.name: si.offset for si in symbol_infos} missing_count = 0 misorder_count = 0 misordered_syms = [] for sym in orderfile_symbols: if '.' in sym: continue # sym is a section name. if sym not in name_to_offset: missing_count += 1 continue next_offset = name_to_offset[sym] if next_offset < last_offset: misorder_count += 1 misordered_syms.append((sym, next_offset, last_offset)) last_offset = next_offset logging.warning('Missing symbols in verification: %d', missing_count) if misorder_count: logging.warning('%d misordered symbols:\n %s', misorder_count, '\n '.join(str(x) for x in misordered_syms[:threshold])) if misorder_count > threshold: logging.error('%d misordered symbols over threshold %d, failing', misorder_count, threshold) return False return True def main(): parser = optparse.OptionParser(usage= 'usage: %prog [options] <binary> <orderfile>') parser.add_option('--target-arch', action='store', dest='arch', default='arm', choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'], help='The target architecture for the binary.') parser.add_option('--threshold', action='store', dest='threshold', default=80, type=int, help='The maximum allowed number of out-of-order symbols.') options, argv = parser.parse_args(sys.argv) if len(argv) != 3: parser.print_help() return 1 (binary_filename, orderfile_filename) = argv[1:] symbol_extractor.SetArchitecture(options.arch) symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename) if not _VerifySymbolOrder([sym.strip() for sym in file(orderfile_filename)], symbol_infos, options.threshold): return 1 if __name__ == '__main__': logging.basicConfig(level=logging.INFO) sys.exit(main())
bsd-3-clause
936,724,482,510,614,900
32.764706
80
0.650871
false
3.942308
false
false
false
woutervanwijk/mopidy
mopidy/utils/path.py
1
6823
from __future__ import unicode_literals import Queue as queue import logging import os import stat import string import threading import urllib import urlparse import glib logger = logging.getLogger(__name__) XDG_DIRS = { 'XDG_CACHE_DIR': glib.get_user_cache_dir(), 'XDG_CONFIG_DIR': glib.get_user_config_dir(), 'XDG_DATA_DIR': glib.get_user_data_dir(), 'XDG_MUSIC_DIR': glib.get_user_special_dir(glib.USER_DIRECTORY_MUSIC), } # XDG_MUSIC_DIR can be none, so filter out any bad data. XDG_DIRS = dict((k, v) for k, v in XDG_DIRS.items() if v is not None) def get_or_create_dir(dir_path): if not isinstance(dir_path, bytes): raise ValueError('Path is not a bytestring.') dir_path = expand_path(dir_path) if os.path.isfile(dir_path): raise OSError( 'A file with the same name as the desired dir, ' '"%s", already exists.' % dir_path) elif not os.path.isdir(dir_path): logger.info('Creating dir %s', dir_path) os.makedirs(dir_path, 0o755) return dir_path def get_or_create_file(file_path, mkdir=True, content=None): if not isinstance(file_path, bytes): raise ValueError('Path is not a bytestring.') file_path = expand_path(file_path) if mkdir: get_or_create_dir(os.path.dirname(file_path)) if not os.path.isfile(file_path): logger.info('Creating file %s', file_path) with open(file_path, 'w') as fh: if content: fh.write(content) return file_path def path_to_uri(path): """ Convert OS specific path to file:// URI. Accepts either unicode strings or bytestrings. The encoding of any bytestring will be maintained so that :func:`uri_to_path` can return the same bytestring. Returns a file:// URI as an unicode string. """ if isinstance(path, unicode): path = path.encode('utf-8') path = urllib.quote(path) return urlparse.urlunsplit((b'file', b'', path, b'', b'')) def uri_to_path(uri): """ Convert an URI to a OS specific path. Returns a bytestring, since the file path can contain chars with other encoding than UTF-8. If we had returned these paths as unicode strings, you wouldn't be able to look up the matching dir or file on your file system because the exact path would be lost by ignoring its encoding. """ if isinstance(uri, unicode): uri = uri.encode('utf-8') return urllib.unquote(urlparse.urlsplit(uri).path) def split_path(path): parts = [] while True: path, part = os.path.split(path) if part: parts.insert(0, part) if not path or path == b'/': break return parts def expand_path(path): # TODO: document as we want people to use this. if not isinstance(path, bytes): raise ValueError('Path is not a bytestring.') try: path = string.Template(path).substitute(XDG_DIRS) except KeyError: return None path = os.path.expanduser(path) path = os.path.abspath(path) return path def _find_worker(relative, hidden, done, work, results, errors): """Worker thread for collecting stat() results. :param str relative: directory to make results relative to :param bool hidden: whether to include files and dirs starting with '.' :param threading.Event done: event indicating that all work has been done :param queue.Queue work: queue of paths to process :param dict results: shared dictionary for storing all the stat() results :param dict errors: shared dictionary for storing any per path errors """ while not done.is_set(): try: entry = work.get(block=False) except queue.Empty: continue if relative: path = os.path.relpath(entry, relative) else: path = entry try: st = os.lstat(entry) if stat.S_ISDIR(st.st_mode): for e in os.listdir(entry): if hidden or not e.startswith(b'.'): work.put(os.path.join(entry, e)) elif stat.S_ISREG(st.st_mode): results[path] = st else: errors[path] = 'Not a file or directory' except os.error as e: errors[path] = str(e) finally: work.task_done() def _find(root, thread_count=10, hidden=True, relative=False): """Threaded find implementation that provides stat results for files. Note that we do _not_ handle loops from bad sym/hardlinks in any way. :param str root: root directory to search from, may not be a file :param int thread_count: number of workers to use, mainly useful to mitigate network lag when scanning on NFS etc. :param bool hidden: whether to include files and dirs starting with '.' :param bool relative: if results should be relative to root or absolute """ threads = [] results = {} errors = {} done = threading.Event() work = queue.Queue() work.put(os.path.abspath(root)) if not relative: root = None for i in range(thread_count): t = threading.Thread(target=_find_worker, args=(root, hidden, done, work, results, errors)) t.daemon = True t.start() threads.append(t) work.join() done.set() for t in threads: t.join() return results, errors def find_mtimes(root): results, errors = _find(root, hidden=False, relative=False) return dict((f, int(st.st_mtime)) for f, st in results.iteritems()) def check_file_path_is_inside_base_dir(file_path, base_path): assert not file_path.endswith(os.sep), ( 'File path %s cannot end with a path separator' % file_path) # Expand symlinks real_base_path = os.path.realpath(base_path) real_file_path = os.path.realpath(file_path) # Use dir of file for prefix comparision, so we don't accept # /tmp/foo.m3u as being inside /tmp/foo, simply because they have a # common prefix, /tmp/foo, which matches the base path, /tmp/foo. real_dir_path = os.path.dirname(real_file_path) # Check if dir of file is the base path or a subdir common_prefix = os.path.commonprefix([real_base_path, real_dir_path]) assert common_prefix == real_base_path, ( 'File path %s must be in %s' % (real_file_path, real_base_path)) # FIXME replace with mock usage in tests. class Mtime(object): def __init__(self): self.fake = None def __call__(self, path): if self.fake is not None: return self.fake return int(os.stat(path).st_mtime) def set_fake_time(self, time): self.fake = time def undo_fake(self): self.fake = None mtime = Mtime()
apache-2.0
1,331,943,284,067,081,500
29.324444
79
0.625531
false
3.6961
true
false
false
paramsingh/lazycoin
funcs.py
1
1056
import socket def send_data(sock,data): sock.sendall(data) def receive_data(sock,size = 4096): data = bytes() while size: recv = sock.recv(size) if not recv: raise ConnectionError() data += recv size -= len(recv) return data def nDigit(s,size): s = str(s) if(len(s)<size): s = '0'*(size-len(s))+s return s def send_bytes(sock,data): size = nDigit(len(data),5).encode('utf-8') send_data(sock,size+data) def receive_bytes(sock): size = receive_data(sock,5).decode('utf-8') data = receive_data(sock,int(size)) return data def create_listening_socket(host,port,size): listening_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) listening_socket.bind((host,port)) listening_socket.listen(100) return listening_socket def receive_message(sock): size = receive_data(sock,5).decode('utf-8') msg = receive_data(sock,int(size)).decode('utf-8') return msg def send_message(sock,message): message = message.encode('utf-8') size = nDigit(len(message),5).encode('utf-8') message = size+message send_data(sock,message)
mit
6,221,665,157,644,711,000
21.956522
68
0.699811
false
2.633416
false
false
false
rarbg/ZeroNet
src/Worker/WorkerManager.py
1
7098
from Worker import Worker import gevent, time, logging, random MAX_WORKERS = 10 # Worker manager for site class WorkerManager: def __init__(self, site): self.site = site self.workers = {} # Key: ip:port, Value: Worker.Worker self.tasks = [] # {"evt": evt, "workers_num": 0, "site": self.site, "inner_path": inner_path, "done": False, "time_started": None, "time_added": time.time(), "peers": peers, "priority": 0, "failed": peer_ids} self.started_task_num = 0 # Last added task num self.running = True self.log = logging.getLogger("WorkerManager:%s" % self.site.address_short) self.process_taskchecker = gevent.spawn(self.checkTasks) def __str__(self): return "WorkerManager %s" % self.site.address_short def __repr__(self): return "<%s>" % self.__str__() # Check expired tasks def checkTasks(self): while self.running: tasks = task = worker = workers = None # Cleanup local variables time.sleep(15) # Check every 15 sec # Clean up workers for worker in self.workers.values(): if worker.task and worker.task["done"]: worker.skip() # Stop workers with task done if not self.tasks: continue tasks = self.tasks[:] # Copy it so removing elements wont cause any problem for task in tasks: if task["time_started"] and time.time() >= task["time_started"]+60: # Task taking too long time, skip it self.log.debug("Timeout, Skipping: %s" % task) # Skip to next file workers workers = self.findWorkers(task) if workers: for worker in workers: worker.skip() else: self.failTask(task) elif time.time() >= task["time_added"]+60 and not self.workers: # No workers left self.log.debug("Timeout, Cleanup task: %s" % task) # Remove task self.failTask(task) elif (task["time_started"] and time.time() >= task["time_started"]+15) or not self.workers: # Task started more than 15 sec ago or no workers self.log.debug("Task taking more than 15 secs, find more peers: %s" % task["inner_path"]) task["site"].announce() # Find more peers if task["peers"]: # Release the peer lock self.log.debug("Task peer lock release: %s" % task["inner_path"]) task["peers"] = [] self.startWorkers() break # One reannounce per loop self.log.debug("checkTasks stopped running") # Tasks sorted by this def taskSorter(self, task): if task["inner_path"] == "content.json": return 9999 # Content.json always prority if task["inner_path"] == "index.html": return 9998 # index.html also important priority = task["priority"] if task["inner_path"].endswith(".js") or task["inner_path"].endswith(".css"): priority += 1 # download js and css files first return priority-task["workers_num"] # Prefer more priority and less workers # Returns the next free or less worked task def getTask(self, peer): self.tasks.sort(key=self.taskSorter, reverse=True) # Sort tasks by priority and worker numbers for task in self.tasks: # Find a task if task["peers"] and peer not in task["peers"]: continue # This peer not allowed to pick this task if peer in task["failed"]: continue # Peer already tried to solve this, but failed return task # New peers added to site def onPeers(self): self.startWorkers() # Add new worker def addWorker(self, peer): key = peer.key if key not in self.workers and len(self.workers) < MAX_WORKERS: # We dont have worker for that peer and workers num less than max worker = Worker(self, peer) self.workers[key] = worker worker.key = key worker.start() return worker else: # We have woker for this peer or its over the limit return False # Start workers to process tasks def startWorkers(self, peers=None): if not self.tasks: return False # No task for workers if len(self.workers) >= MAX_WORKERS and not peers: return False # Workers number already maxed and no starting peers definied if not peers: peers = self.site.peers.values() # No peers definied, use any from site random.shuffle(peers) for peer in peers: # One worker for every peer if peers and peer not in peers: continue # If peers definied and peer not valid worker = self.addWorker(peer) if worker: self.log.debug("Added worker: %s, workers: %s/%s" % (peer.key, len(self.workers), MAX_WORKERS)) # Stop all worker def stopWorkers(self): for worker in self.workers.values(): worker.stop() tasks = self.tasks[:] # Copy for task in tasks: # Mark all current task as failed self.failTask(task) # Find workers by task def findWorkers(self, task): workers = [] for worker in self.workers.values(): if worker.task == task: workers.append(worker) return workers # Ends and remove a worker def removeWorker(self, worker): worker.running = False if worker.key in self.workers: del(self.workers[worker.key]) self.log.debug("Removed worker, workers: %s/%s" % (len(self.workers), MAX_WORKERS)) # Create new task and return asyncresult def addTask(self, inner_path, peer=None, priority = 0): self.site.onFileStart(inner_path) # First task, trigger site download started task = self.findTask(inner_path) if task: # Already has task for that file if peer and task["peers"]: # This peer also has new version, add it to task possible peers task["peers"].append(peer) self.log.debug("Added peer %s to %s" % (peer.key, task["inner_path"])) self.startWorkers([peer]) elif peer and peer in task["failed"]: task["failed"].remove(peer) # New update arrived, remove the peer from failed peers self.log.debug("Removed peer %s from failed %s" % (peer.key, task["inner_path"])) self.startWorkers([peer]) if priority: task["priority"] += priority # Boost on priority return task["evt"] else: # No task for that file yet evt = gevent.event.AsyncResult() if peer: peers = [peer] # Only download from this peer else: peers = None task = {"evt": evt, "workers_num": 0, "site": self.site, "inner_path": inner_path, "done": False, "time_added": time.time(), "time_started": None, "peers": peers, "priority": priority, "failed": []} self.tasks.append(task) self.started_task_num += 1 self.log.debug("New task: %s, peer lock: %s, priority: %s, tasks: %s" % (task["inner_path"], peers, priority, self.started_task_num)) self.startWorkers(peers) return evt # Find a task using inner_path def findTask(self, inner_path): for task in self.tasks: if task["inner_path"] == inner_path: return task return None # Not found # Mark a task failed def failTask(self, task): if task in self.tasks: task["done"] = True self.tasks.remove(task) # Remove from queue self.site.onFileFail(task["inner_path"]) task["evt"].set(False) if not self.tasks: self.started_task_num = 0 # Mark a task done def doneTask(self, task): task["done"] = True self.tasks.remove(task) # Remove from queue self.site.onFileDone(task["inner_path"]) task["evt"].set(True) if not self.tasks: self.started_task_num = 0 self.site.onComplete() # No more task trigger site complete
gpl-2.0
5,870,406,172,953,979,000
33.794118
210
0.67681
false
3.278522
false
false
false
ewels/MultiQC
multiqc/modules/biobambam2/biobambam2.py
1
1974
#!/usr/bin/env python """ MultiQC module to parse output from biobambam2 """ from __future__ import print_function from collections import OrderedDict import logging from multiqc.modules.base_module import BaseMultiqcModule from multiqc.modules.picard import MarkDuplicates # Initialise the logger log = logging.getLogger(__name__) class MultiqcModule(BaseMultiqcModule): """This module is super weird. The output from this tools is essentially identical to Picard MarkDuplicates, so we just hijack that module instead""" def __init__(self): # Initialise the parent object super(MultiqcModule, self).__init__( name="biobambam2", anchor="biobambam2", href="https://github.com/gt1/biobambam2", info="provides tools for early stage alignment file processing", ) # Set up class objects to hold parsed data self.general_stats_headers = OrderedDict() self.general_stats_data = dict() n = dict() n["bamsormadup"] = MarkDuplicates.parse_reports( self, log_key="biobambam2/bamsormadup", section_name="bamsormadup", section_anchor="biobambam2-bamsormadup", plot_title="biobambam2: bamsormadup deduplication stats", plot_id="biobambam2_bamsormadup_plot", data_filename="bamsormadup_bamsormadup", ) if n["bamsormadup"] > 0: log.info("Found {} bamsormadup reports".format(n["bamsormadup"])) # Exit if we didn't find anything if sum(n.values()) == 0: raise UserWarning # Add to the General Stats table (has to be called once per MultiQC module) self.general_stats_addcols(self.general_stats_data, self.general_stats_headers) # Helper functions def multiply_hundred(self, val): try: val = float(val) * 100 except ValueError: pass return val
gpl-3.0
4,941,387,552,553,416,000
32.457627
87
0.636778
false
3.825581
false
false
false
stevegt/librabinpoly
test/benchmark.py
1
1127
#!/usr/bin/python from ctypes import * import sys import timeit sys.path.append('..') import rabinpoly as lib fn = sys.argv[1] libc = CDLL("libc.so.6") fopen = libc.fopen fread = libc.fread feof = libc.feof fread.argtypes = [c_void_p, c_size_t, c_size_t, c_void_p] window_size = 32 min_block_size = 2**14 avg_block_size = 2**15 max_block_size = 2**16 buf_size = max_block_size*2 def run(): rp = lib.rp_init( window_size, avg_block_size, min_block_size, max_block_size) rpc = rp.contents buf = create_string_buffer(buf_size) fh = fopen(fn, "rb") total_size = 0 while True: if rpc.state & lib.RP_IN: fread_size = fread(buf, 1, buf_size, fh) rc = lib.rp_in(rp, buf, fread_size) assert rc == 1 if rpc.state & lib.RP_OUT: rc = lib.rp_out(rp) assert rc == 1 total_size += rpc.frag_size if rpc.state & lib.RP_RESET: assert feof(fh) break lib.rp_free(rp) print total_size print timeit.timeit('run()', setup="from __main__ import run", number=100) # run()
gpl-2.0
2,347,288,876,777,289,000
19.87037
67
0.573203
false
2.8175
false
false
false
DeStars/omniture_python
omniture_python/omniture_wrapper.py
1
2260
import binascii import urllib2 import json from hashlib import sha1 import base64 import datetime import calendar import time __author__ = 'DeStars' class OmnitureWrapper: def __init__(self, user_name, secret): self._user_name = user_name self._secret = secret def __create_header(self): """ Creates header for request :return: Header string """ utc_timestamp = datetime.datetime.utcnow() nonce = str(calendar.timegm(utc_timestamp.timetuple())) base64nonce = binascii.b2a_base64(binascii.a2b_qp(nonce)) created_on = utc_timestamp.strftime("%Y-%m-%dT%H:%M:%SZ") sha_object = sha1(nonce + created_on + self._secret) password_64 = base64.b64encode(bytes(sha_object.digest())) return 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % ( self._user_name, password_64.strip(), base64nonce.strip(), created_on) def __get_request_data(self, request): request.add_header('X-WSSE', self.__create_header()) return json.loads(urllib2.urlopen(request).read(), encoding='utf-8') def send_request(self, method, request_data, retry_delay=15): """ Sends request to the endpoint :param method: String of method :param request_data: json object of request body :return: Response data """ request = urllib2.Request('https://api.omniture.com/admin/1.4/rest/?method=%s' % method, json.dumps(request_data)) try: return self.__get_request_data(request) except urllib2.HTTPError as e: print '{0}. Retrying in {1} seconds...'.format(e, retry_delay) time.sleep(retry_delay) return self.send_request(method, request_data) def retrieve_report(self, request, delay=5): """ Queues and retrieves the report :param request: json object of request body :return: Report data """ response = self.send_request(method='Report.Queue', request_data=request) time.sleep(delay) report = self.send_request(method='Report.Get', request_data={'reportID': response['reportID']}) return report
mit
-2,951,056,399,141,589,500
36.04918
104
0.615929
false
3.883162
false
false
false
idiotic/idiotic
idiotic/util/blocks/teapot.py
1
1895
from idiotic.util.resources import http from idiotic import block import logging import asyncio import aiohttp import time log = logging.getLogger(__name__) class Teapot(block.Block): def __init__(self, name, **config): super().__init__(name, **config) self.name = name self.config = {"address": "https://api.particle.io", "path": "/v1/devices/", "access_token": "", "device_id": "" } self.config.update(config) self.inputs = {"temperature": self.temperature, "hold": self.hold } self.require(http.HostReachable('api.particle.io', 443)) self.hold_start = 0 self.hold_duration = 0 async def temperature(self, value): log.debug("setting temp to %s", value) async with aiohttp.ClientSession() as client: async with client.post( "{}{}{}/set_temp".format(self.config['address'], self.config['path'], self.config['device_id']), data={'access_token': self.config['access_token'], 'args': str(value)} ) as request: await request.text() async def hold(self, value): log.debug("holding for %s", value) self.hold_start = time.time() self.hold_duration = value async def run(self): if (time.time() - self.hold_duration) < self.hold_start: async with aiohttp.ClientSession() as client: async with client.post( "{}{}{}/set_hold".format(self.config['address'], self.config['path'], self.config['device_id']), data={'access_token': self.config['access_token'], 'args': str(30)} ) as request: await request.text() await asyncio.sleep(5)
mit
-4,488,406,143,664,696,000
36.156863
120
0.530871
false
4.201774
true
false
false
mupi/escolamupi
payments/migrations/0001_initial.py
1
7873
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'UserPayments' db.create_table(u'payments_userpayments', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('payment_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.TimtecUser'])), ('payment_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), ('payment_status', self.gf('django.db.models.fields.CharField')(max_length=30)), )) db.send_create_signal(u'payments', ['UserPayments']) # Adding model 'Plans' db.create_table(u'payments_plans', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=30)), ('description', self.gf('django.db.models.fields.CharField')(max_length=300)), )) db.send_create_signal(u'payments', ['Plans']) # Adding model 'UserPlanData' db.create_table(u'payments_userplandata', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.TimtecUser'])), ('plan', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['payments.Plans'])), ('expiration_date', self.gf('django.db.models.fields.DateTimeField')(null=True)), ('last_payment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['payments.UserPayments'], null=True)), ('user_status', self.gf('django.db.models.fields.BooleanField')()), )) db.send_create_signal(u'payments', ['UserPlanData']) def backwards(self, orm): # Deleting model 'UserPayments' db.delete_table(u'payments_userpayments') # Deleting model 'Plans' db.delete_table(u'payments_plans') # Deleting model 'UserPlanData' db.delete_table(u'payments_userplandata') models = { u'accounts.timtecuser': { 'Meta': {'object_name': 'TimtecUser'}, 'accepted_terms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'payments.plans': { 'Meta': {'object_name': 'Plans'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'payments.userpayments': { 'Meta': {'object_name': 'UserPayments'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'payment_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'payment_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'payment_status': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.TimtecUser']"}) }, u'payments.userplandata': { 'Meta': {'object_name': 'UserPlanData'}, 'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_payment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.UserPayments']", 'null': 'True'}), 'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.Plans']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.TimtecUser']"}), 'user_status': ('django.db.models.fields.BooleanField', [], {}) } } complete_apps = ['payments']
agpl-3.0
5,020,216,592,489,578,000
64.616667
195
0.573733
false
3.629783
false
false
false
wieden-kennedy/spring-jpa-hibernate-templator
generator/Generator.py
1
2025
from os import chdir,mkdir from os.path import join,exists class Generator: def __init__(self,generator_type,root_dir,basepackage,java_source_dir,model=None): if generator_type: self.type=generator_type if root_dir: self.root_dir=root_dir if basepackage: self.basepackage = basepackage self.basepackage_path=basepackage.replace('.','/') if root_dir and generator_type: self.template_dir=join(join(self.root_dir,'templates'),self.type) self.template_path=join(self.template_dir,'template.txt') if java_source_dir: self.java_source_dir = java_source_dir if model: self.model = model def generate_class(): pass def get_template_data(self): f=open(self.template_path,'r') template_data = f.readlines() f.close() return template_data def set_generator_type(self,generator_type): self.type=generator_type def set_out_path(self,path): self.out_path=path def set_template_dir(self,dir_path): self.template_dir=dir_path def set_licensing(self,license_text_path): self.licensing_path = license_text_path def write_license(self,out_file=None): if not out_file: out_file = self.out_file if hasattr(self,'licensing_path'): license_file = open(self.licensing_path,'r') license_data = license_file.readlines() license_file.close() for line in license_data: out_file.write(line) def generate(self): chdir(self.root_dir) chdir(self.java_source_dir) self.template_data = self.get_template_data() if not exists('./%s/%s' % (self.basepackage_path,self.type)): mkdir('./%s/%s' % (self.basepackage_path,self.type)) self.set_out_path('./%s/%s/%s.java' % (self.basepackage_path,self.type,self.model['model_name'])) self.generate_class()
bsd-3-clause
1,190,702,424,003,001,900
33.338983
105
0.601975
false
3.629032
false
false
false
youdar/work
work/Clashes/Old work/compare_delta.py
1
3072
from __future__ import division import Test_internal_clashscore import os,sys ''' Collect clash information from PROBE in resraints_manager and compare them ''' def get_files_data(): '''() -> list,list reads files RM_clash_results PROBE_clash_results in folder: C:\Phenix\Dev\Work\work\Clashes\junk Returns: RM_clash_dict,PROBE_clash_dict : two dictionaries containing the clash information from PROBE and resraints_manager clash_in_both,clash_only_MR,clash_only_PROBE : sets of clash keys ''' RM_clash_results = open('RM_clash_results','r').read().splitlines() PROBE_clash_results = open('PROBE_clash_results','r').read().splitlines() # RM_clash_dict = {} PROBE_clash_dict = {} clash_in_both = set() clash_only_MR = set() clash_only_PROBE = set() clash_MR = set() clash_PROBE = set() # for x in RM_clash_results: x = x.split('::') RM_clash_dict[x[0]] = [float(x[1]),float(x[2])] clash_MR.add(x[0]) for x in PROBE_clash_results: x = x.split('::') PROBE_clash_dict[x[0]] = float(x[1]) clash_PROBE.add(x[0]) # clash_in_both = clash_MR.intersection(clash_PROBE) clash_only_MR = clash_MR - clash_PROBE clash_only_PROBE = clash_PROBE - clash_MR return RM_clash_dict,PROBE_clash_dict,clash_in_both,clash_only_MR,clash_only_PROBE if __name__=='__main__': currentpath = os.getcwd() workpath = 'c:\\Phenix\\Dev\\Work\\work\\Clashes\\junk' os.chdir(workpath) # file_name = sys.argv[1] file_name = Test_internal_clashscore.get_new_file_name(file_name) nb_clashscore,clashscore_probe,time_internal,time_probe = Test_internal_clashscore.call_both_clashscores(file_name) output_file_name = Test_internal_clashscore.get_file_name(file_name) # RM_clash_dict,PROBE_clash_dict,clash_in_both,clash_only_MR,clash_only_PROBE = get_files_data() # Print clashes that are only in one of the methods: print '\nClash info for: {}'.format(output_file_name) print '='*80 print 'nonbonded_clashscore: {0:.3f}'.format(nb_clashscore[1]) print 'clashscore : {0:.3f}'.format(clashscore_probe) print '='*80 print 'Clashes that show up only in PROBE' print '-'*80 for rec in clash_only_PROBE: print '{0:30}{1:^14.3f}'.format(rec,PROBE_clash_dict[rec]) print '='*80 print 'Clashes that show up only in restraints_manager' print 'Note: those clashes do not include clashes due to symmetry operations' print '-'*80 for rec in clash_only_MR: print '{0:30}{1:^14.3f}'.format(rec,RM_clash_dict[rec][0]) print '='*80 # print 'Clashes in both' outstr = '{0:30}{1:^14.3f}{2:^14.3f}{3:^14.3f}{4:^14.3f}' print '{0:30}{1:^14}{2:^14}{3:^14}{4:^14}'.format('Clash','overlap RM','overlap RPROBE','diff','vdw') print '-'*80 for rec in clash_in_both: overlap_RM = RM_clash_dict[rec][0] vdw_RM = RM_clash_dict[rec][1] overlap_PROBE = PROBE_clash_dict[rec] print outstr.format(rec,overlap_RM,overlap_PROBE,overlap_RM-overlap_PROBE,vdw_RM) print '='*80 # os.chdir(currentpath) print 'Done'
mit
-8,545,692,545,632,096,000
32.769231
117
0.657552
false
2.860335
false
false
false
wannabegeek/PyFIX
pyfix/session.py
1
1093
import logging class FIXSession: def __init__(self, key, targetCompId, senderCompId): self.key = key self.senderCompId = senderCompId self.targetCompId = targetCompId self.sndSeqNum = 0 self.nextExpectedMsgSeqNum = 1 def validateCompIds(self, targetCompId, senderCompId): return self.senderCompId == senderCompId and self.targetCompId == targetCompId def allocateSndSeqNo(self): self.sndSeqNum += 1 return str(self.sndSeqNum) def validateRecvSeqNo(self, seqNo): if self.nextExpectedMsgSeqNum < int(seqNo): logging.warning("SeqNum from client unexpected (Rcvd: %s Expected: %s)" % (seqNo, self.nextExpectedMsgSeqNum)) return (False, self.nextExpectedMsgSeqNum) else: return (True, seqNo) def setRecvSeqNo(self, seqNo): # if self.nextExpectedMsgSeqNum != int(seqNo): # logging.warning("SeqNum from client unexpected (Rcvd: %s Expected: %s)" % (seqNo, self.nextExpectedMsgSeqNum)) self.nextExpectedMsgSeqNum = int(seqNo) + 1
cc0-1.0
8,573,742,107,478,617,000
35.433333
124
0.660567
false
3.583607
false
false
false
bladyjoker/python-oauth2
oauth2/messages.py
1
3611
## Authorization AuthorizationRequestParam = tuple AuthorizationRequestType = str AUTHCODE_AUTHREQTYPE = AuthorizationRequestType('code') IMPLICIT_AUTHREQTYPE = AuthorizationRequestType('token') class AuthorizationRequest(object): def __init__(self, request_type: AuthorizationRequestType, params: [AuthorizationRequestParam]): self.request_type = request_type self.params = params def code_auth_request(client_id, redirect_uri, scope, state): return AuthorizationRequest( AUTHCODE_AUTHREQTYPE, [ AuthorizationRequestParam(('client_id', client_id)), AuthorizationRequestParam(('redirect_uri', redirect_uri)), AuthorizationRequestParam(('scope', scope)), AuthorizationRequestParam(('state', state)) ] ) def implicit_auth_request(client_id, redirect_uri, scope, state): return AuthorizationRequest( IMPLICIT_AUTHREQTYPE, [ AuthorizationRequestParam(('client_id', client_id)), AuthorizationRequestParam(('redirect_uri', redirect_uri)), AuthorizationRequestParam(('scope', scope)), AuthorizationRequestParam(('state', state)) ] ) class CodeAuthorization(object): def __init__(self, code, state): self.code = code self.state = state class AuthorizationError(object): def __init__(self, error, error_description, error_uri, state): self.error = error self.error_description = error_description self.error_uri = error_uri self.state = state # Access token AccessTokenRequestParam = tuple AccessTokenRequestType = str AUTHCODE_TKNREQTYPE = AccessTokenRequestType('authorization_code') ROCREDS_TKNREQTYPE = AccessTokenRequestType('password') CLICREDS_TKNREQTYPE = AccessTokenRequestType('client_credentials') REFRESH_TKNREQTYPE = AccessTokenRequestType('refresh_token') class AccessTokenRequest(object): def __init__(self, request_type: AccessTokenRequestType, params: [AccessTokenRequestParam]): self.request_type = request_type self.params = params def code_tokenreq(code, redirect_uri, client_id): return AccessTokenRequest( AUTHCODE_TKNREQTYPE, [ AccessTokenRequestParam(('code', code)), AccessTokenRequestParam(('redirect_uri', redirect_uri)), AccessTokenRequestParam(('client_id', client_id)) ]) def resource_owner_pwd_creds_tokenreq(username, password, scope): return AccessTokenRequest( ROCREDS_TKNREQTYPE, [ AccessTokenRequestParam(('username', username)), AccessTokenRequestParam(('password', password)), AccessTokenRequestParam(('scope', scope)) ]) def client_credentials_tokenreq(scope): return AccessTokenRequest( CLICREDS_TKNREQTYPE, [AccessTokenRequestParam(('scope', scope))]) def refresh_tokenreq(refresh_token, scope): return AccessTokenRequest( REFRESH_TKNREQTYPE, [ AccessTokenRequestParam(('refresh_token', refresh_token)), AccessTokenRequestParam(('scope', scope)) ]) class AccessToken(object): def __init__(self, access_token: str, token_type: str, expires_in: int): self.access_token = access_token self.token_type = token_type self.expires_in = expires_in self.expires_on = 1#time!!! AccessTokenParam = tuple def refresh_tokenparams(refresh_token): return [AccessTokenParam(('refresh_token', refresh_token))] def state_tokenparams(state, scope): return [AccessTokenParam(('state', state)), AccessTokenParam(('scope', scope))] class AccessTokenError(object): def __init__(self, error: str, error_description: str, error_uri: str): self.error = error self.error_description = error_description self.error_uri = error_uri AccessTokenErrorParam = tuple def state_errorparams(state): return [AccessTokenErrorParam(('state', state))]
gpl-2.0
8,317,927,578,722,036,000
29.344538
97
0.751592
false
3.522927
false
false
false
sandeepklr/gridmap
gridmap/job.py
1
35680
# -*- coding: utf-8 -*- # Written (W) 2008-2012 Christian Widmer # Written (W) 2008-2010 Cheng Soon Ong # Written (W) 2012-2014 Daniel Blanchard, dblanchard@ets.org # Copyright (C) 2008-2012 Max-Planck-Society, 2012-2014 ETS # This file is part of GridMap. # GridMap is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # GridMap is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with GridMap. If not, see <http://www.gnu.org/licenses/>. """ This module provides wrappers that simplify submission and collection of jobs, in a more 'pythonic' fashion. We use pyZMQ to provide a heart beat feature that allows close monitoring of submitted jobs and take appropriate action in case of failure. :author: Christian Widmer :author: Cheng Soon Ong :author: Dan Blanchard (dblanchard@ets.org) """ from __future__ import (absolute_import, division, print_function, unicode_literals) import inspect import logging import multiprocessing import os import smtplib import sys import traceback from datetime import datetime from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.mime.image import MIMEImage from io import open from importlib import import_module from multiprocessing import Pool from socket import gethostname, gethostbyname, getaddrinfo, getfqdn from smtplib import (SMTPRecipientsRefused, SMTPHeloError, SMTPSenderRefused, SMTPDataError) import zmq from gridmap.conf import (CHECK_FREQUENCY, CREATE_PLOTS, DEFAULT_QUEUE, DRMAA_PRESENT, ERROR_MAIL_RECIPIENT, ERROR_MAIL_SENDER, HEARTBEAT_FREQUENCY, IDLE_THRESHOLD, MAX_IDLE_HEARTBEATS, MAX_TIME_BETWEEN_HEARTBEATS, NUM_RESUBMITS, SEND_ERROR_MAIL, SMTP_SERVER, USE_MEM_FREE, MAX_BOOTUP_TIME) from gridmap.data import zdumps, zloads from gridmap.runner import _heart_beat if DRMAA_PRESENT: from drmaa import (ExitTimeoutException, InvalidJobException, JobControlAction, JOB_IDS_SESSION_ALL, Session, TIMEOUT_NO_WAIT) # Python 2.x backward compatibility if sys.version_info < (3, 0): range = xrange # Setup back-end if we're using matplotlib if CREATE_PLOTS: import matplotlib matplotlib.use('AGG') import matplotlib.pyplot as plt # Placeholder string, since a job could potentially return None on purpose _JOB_NOT_FINISHED = '*@#%$*@#___GRIDMAP___NOT___DONE___@#%**#*$&*%' class JobException(Exception): ''' New exception type for when one of the jobs crashed. ''' pass class Job(object): """ Central entity that wraps a function and its data. Basically, a job consists of a function, its argument list, its keyword list and a field "ret" which is filled, when the execute method gets called. .. note:: This can only be used to wrap picklable functions (i.e., those that are defined at the module or class level). """ __slots__ = ('_f', 'args', 'id', 'kwlist', 'cleanup', 'ret', 'traceback', 'num_slots', 'mem_free', 'white_list', 'path', 'uniq_id', 'name', 'queue', 'environment', 'working_dir', 'cause_of_death', 'num_resubmits', 'home_address', 'log_stderr_fn', 'log_stdout_fn', 'timestamp', 'host_name', 'heart_beat', 'track_mem', 'track_cpu', 'submit_time') def __init__(self, f, args, kwlist=None, cleanup=True, mem_free="1G", name='gridmap_job', num_slots=1, queue=DEFAULT_QUEUE): """ Initializes a new Job. :param f: a function, which should be executed. :type f: function :param args: argument list of function f :type args: list :param kwlist: dictionary of keyword arguments for f :type kwlist: dict :param cleanup: flag that determines the cleanup of input and log file :type cleanup: boolean :param mem_free: Estimate of how much memory this job will need (for scheduling) :type mem_free: str :param name: Name to give this job :type name: str :param num_slots: Number of slots this job should use. :type num_slots: int :param queue: SGE queue to schedule job on. :type queue: str """ self.track_mem = [] self.track_cpu = [] self.heart_beat = None self.traceback = None self.host_name = '' self.timestamp = None self.log_stdout_fn = '' self.log_stderr_fn = '' self.home_address = '' self.num_resubmits = 0 self.cause_of_death = '' self.path = None self._f = None self.function = f self.args = args self.id = -1 self.kwlist = kwlist if kwlist is not None else {} self.cleanup = cleanup self.ret = _JOB_NOT_FINISHED self.num_slots = num_slots self.mem_free = mem_free self.white_list = [] self.name = name.replace(' ', '_') self.queue = queue # Save copy of environment variables self.environment = {} for env_var, value in os.environ.items(): try: if not isinstance(env_var, bytes): env_var = env_var.encode() if not isinstance(value, bytes): value = value.encode() except UnicodeEncodeError: logger = logging.getLogger(__name__) logger.warning('Skipping non-ASCII environment variable.') else: self.environment[env_var] = value self.working_dir = os.getcwd() self.submit_time = None @property def function(self): ''' Function this job will execute. ''' return self._f @function.setter def function(self, f): """ setter for function that carefully takes care of namespace, avoiding __main__ as a module """ m = inspect.getmodule(f) try: self.path = os.path.dirname(os.path.abspath( inspect.getsourcefile(f))) except TypeError: self.path = '' # if module is not __main__, all is good if m.__name__ != "__main__": self._f = f else: # determine real module name mn = os.path.splitext(os.path.basename(m.__file__))[0] # make sure module is present import_module(mn) # get module mod = sys.modules[mn] # set function from module self._f = getattr(mod, f.__name__) def execute(self): """ Executes function f with given arguments and writes return value to field ret. If an exception is encountered during execution, ret will contain a pickled version of it. Input data is removed after execution to save space. """ try: self.ret = self.function(*self.args, **self.kwlist) except Exception as exception: self.ret = exception self.traceback = traceback.format_exc() traceback.print_exc() @property def native_specification(self): """ define python-style getter """ ret = "-shell yes -b yes" if self.mem_free and USE_MEM_FREE: ret += " -l mem_free={}".format(self.mem_free) if self.num_slots and self.num_slots > 1: ret += " -pe smp {}".format(self.num_slots) if self.white_list: ret += " -l h={}".format('|'.join(self.white_list)) if self.queue: ret += " -q {}".format(self.queue) return ret ############################### # Job Submission and Monitoring ############################### class JobMonitor(object): """ Job monitor that communicates with other nodes via 0MQ. """ def __init__(self, session, temp_dir='/scratch', jobs=[], white_list="", max_processes=1): """ set up socket """ self.logger = logging.getLogger(__name__) context = zmq.Context() self.temp_dir = temp_dir self.socket = context.socket(zmq.REP) self.host_name = gethostname() self.ip_address = gethostbyname(self.host_name) for _, _, _, _, (ip, _) in getaddrinfo(getfqdn(), 0): if ip != '127.0.0.1': self.ip_address = ip self.interface = "tcp://%s" % (self.ip_address) break else: self.logger.warning('IP address for JobMonitor server is ' '127.0.0.1. Runners on other machines will be' ' unable to connect.') self.ip_address = '127.0.0.1' self.interface = "tcp://%s" % (self.ip_address) # bind to random port and remember it self.port = self.socket.bind_to_random_port(self.interface) self.home_address = "%s:%i" % (self.interface, self.port) self.logger.info("Setting up JobMonitor on %s", self.home_address) # uninitialized field (set in check method) self.jobs = jobs self.jobs_iter = iter(jobs) self.all_queued = False self.all_processed = False self.ids = [] self.session = session self.white_list = white_list self.max_processes = max_processes self.id_to_job = {} def __enter__(self): ''' Enable JobMonitor to be used as a context manager. ''' return self def __exit__(self, exc_type, exc_value, exc_tb): ''' Gracefully handle exceptions by terminating all jobs, and closing sockets. ''' # Always close socket self.socket.close() # Clean up if we have a valid session if exc_type is not None: self.logger.info('Encountered %s, so killing all jobs.', exc_type.__name__) # try to kill off all old jobs try: self.session.control(JOB_IDS_SESSION_ALL, JobControlAction.TERMINATE) except InvalidJobException: self.self.logger.debug("Could not kill all jobs for " + "session.", exc_info=True) # Get rid of job info to prevent memory leak try: self.session.synchronize([JOB_IDS_SESSION_ALL], TIMEOUT_NO_WAIT, dispose=True) except ExitTimeoutException: pass def check(self): """ serves input and output data """ # save list of jobs # determines in which interval to check if jobs are alive self.logger.debug('Starting local hearbeat') local_heart = multiprocessing.Process(target=_heart_beat, args=(-1, self.home_address, -1, "", CHECK_FREQUENCY)) local_heart.start() try: self.logger.debug("Starting ZMQ event loop") # main loop while not self.all_jobs_done(): self.logger.debug('Waiting for message') msg_str = self.socket.recv() msg = zloads(msg_str) self.logger.debug('Received message: %s', msg) return_msg = "" job_id = msg["job_id"] # only if its not the local beat if job_id != -1: # If message is from a valid job, process that message if job_id in self.id_to_job: job = self.id_to_job[job_id] if msg["command"] == "fetch_input": return_msg = self.id_to_job[job_id] job.timestamp = datetime.now() self.logger.debug("Received input request from %s", job_id) if msg["command"] == "store_output": # be nice return_msg = "thanks" # store tmp job object if isinstance(msg["data"], Job): tmp_job = msg["data"] # copy relevant fields job.ret = tmp_job.ret job.traceback = tmp_job.traceback self.logger.info("Received output from %s", job_id) # Returned exception instead of job, so store that elif isinstance(msg["data"], tuple): job.ret, job.traceback = msg["data"] self.logger.info("Received exception from %s", job_id) else: self.logger.error(("Received message with " + "invalid data: %s"), msg) job.ret = msg["data"] job.timestamp = datetime.now() if msg["command"] == "heart_beat": job.heart_beat = msg["data"] # keep track of mem and cpu try: job.track_mem.append(job.heart_beat["memory"]) job.track_cpu.append(job.heart_beat["cpu_load"]) except (ValueError, TypeError): self.logger.error("Error decoding heart-beat", exc_info=True) return_msg = "all good" job.timestamp = datetime.now() if msg["command"] == "get_job": # serve job for display return_msg = job else: # update host name job.host_name = msg["host_name"] # If this is an unknown job, report it and reply else: self.logger.error(('Received message from unknown job' + ' with ID %s. Known job IDs are: ' + '%s'), job_id, list(self.id_to_job.keys())) return_msg = 'thanks, but no thanks' else: # run check self.check_if_alive() if msg["command"] == "get_jobs": # serve list of jobs for display return_msg = self.jobs # send back compressed response self.logger.debug('Sending reply: %s', return_msg) self.socket.send(zdumps(return_msg)) finally: # Kill child processes that we don't need anymore local_heart.terminate() def check_job_status(self, job): """ check if jobs should be removed from the queue. check if jobs are alive and determine cause of death if not. """ remove_from_queue = False # noting was returned yet if job.ret == _JOB_NOT_FINISHED: # exclude first-timers if job.timestamp is not None: # check heart-beats if there was a long delay current_time = datetime.now() time_delta = current_time - job.timestamp if time_delta.seconds > MAX_TIME_BETWEEN_HEARTBEATS: self.logger.debug("It has been %s seconds since we " + "received a message from job %s", time_delta.seconds, job.id) self.logger.error("Job died for unknown reason") job.cause_of_death = "unknown" elif (len(job.track_cpu) > MAX_IDLE_HEARTBEATS and all(cpu_load <= IDLE_THRESHOLD and not running for cpu_load, running in job.track_cpu[-MAX_IDLE_HEARTBEATS:])): self.logger.error('Job stalled for unknown reason.') job.cause_of_death = 'stalled' else: # Job was submitted a long time back but never got scheduled. Never received a message from Job. current_time = datetime.now() time_delta = current_time - job.submit_time if time_delta.seconds > MAX_BOOTUP_TIME: self.logger.debug("Job %s didn't spin up in %s", job.id, time_delta.seconds) self.logger.error("Job didn't spin up in time.") job.cause_of_death = "nospinup" # could have been an exception, we check right away elif isinstance(job.ret, Exception): job.cause_of_death = 'exception' # Send error email, in addition to raising and logging exception if SEND_ERROR_MAIL: send_error_mail(job) # Format traceback much like joblib does self.logger.error("-" * 80) self.logger.error("GridMap job traceback for %s:", job.name) self.logger.error("-" * 80) self.logger.error("Exception: %s", type(job.ret).__name__) self.logger.error("Job ID: %s", job.id) self.logger.error("Host: %s", job.host_name) self.logger.error("." * 80) self.logger.error(job.traceback) # raise job.ret else: # Job returned. Remove from queue. remove_from_queue = True # attempt to resubmit if job.cause_of_death: self.logger.info("Creating error report") # send report if SEND_ERROR_MAIL: send_error_mail(job) # try to resubmit old_id = job.id job.track_cpu = [] job.track_mem = [] try: handle_resubmit(self.session, job, temp_dir=self.temp_dir) except JobException: """ This job has already hit maximum number of resubmits. Remove from queue. """ remove_from_queue = True else: # Update job ID if successfully resubmitted self.logger.info('Resubmitted job %s; it now has ID %s', old_id, job.id) del self.id_to_job[old_id] self.id_to_job[job.id] = job return remove_from_queue def check_if_alive(self): self.logger.debug('Checking if jobs are alive') n = len(self.id_to_job) for jid, job in self.id_to_job.items(): remove_from_queue = self.check_job_status(job) if remove_from_queue: del self.id_to_job[jid] n -= 1 if self.all_queued and n == 0: self.all_processed = True elif not self.all_queued: while n < self.max_processes: try: job = next(self.jobs_iter) except StopIteration: self.all_queued = True break else: job.white_list = self.white_list job.home_address = self.home_address _append_job_to_session(self.session, job) self.id_to_job[job.id] = job n += 1 def all_jobs_done(self): return self.all_processed def send_error_mail(job): """ send out diagnostic email """ logger = logging.getLogger(__name__) # Connect to server try: s = smtplib.SMTP(SMTP_SERVER) except smtplib.SMTPConnectError: logger.error('Failed to connect to SMTP server to send error ' + 'email.', exc_info=True) return # create message msg = MIMEMultipart() msg["subject"] = "GridMap error {}".format(job.name) msg["From"] = ERROR_MAIL_SENDER msg["To"] = ERROR_MAIL_RECIPIENT # compose error message body_text = "" body_text += "Job {}\n".format(job.name) body_text += "Last timestamp: {}\n".format(job.timestamp) body_text += "Resubmissions: {}\n".format(job.num_resubmits) body_text += "Cause of death: {}\n".format(job.cause_of_death) if job.heart_beat: body_text += "Last memory usage: {}\n".format(job.heart_beat["memory"]) body_text += "Last cpu load: {}\n".format(job.heart_beat["cpu_load"][0]) body_text += ("Process was running at last check: " + "{}\n\n").format(job.heart_beat["cpu_load"][1]) body_text += "Host: {}\n\n".format(job.host_name) if isinstance(job.ret, Exception): body_text += "Job encountered exception: {}\n".format(job.ret) body_text += "Stacktrace: {}\n\n".format(job.traceback) logger.info('Email body: %s', body_text) body_msg = MIMEText(body_text) msg.attach(body_msg) # attach log file if job.heart_beat and "log_file" in job.heart_beat: log_file_attachement = MIMEText(job.heart_beat['log_file']) log_file_attachement.add_header('Content-Disposition', 'attachment', filename='{}_log.txt'.format(job.id)) msg.attach(log_file_attachement) # if matplotlib is installed if CREATE_PLOTS: #TODO: plot to cstring directly (some code is there) #imgData = cStringIO.StringIO() #plt.savefig(imgData, format='png') # rewind the data #imgData.seek(0) #plt.savefig(imgData, format="png") time = [HEARTBEAT_FREQUENCY * i for i in range(len(job.track_mem))] # attack mem plot img_mem_fn = os.path.join('/tmp', "{}_mem.png".format(job.id)) plt.figure(1) plt.plot(time, job.track_mem, "-o") plt.xlabel("time (s)") plt.ylabel("memory usage") plt.savefig(img_mem_fn) plt.close() with open(img_mem_fn, "rb") as img_mem: img_data = img_mem.read() img_mem_attachement = MIMEImage(img_data) img_mem_attachement.add_header('Content-Disposition', 'attachment', filename=os.path.basename(img_mem_fn)) msg.attach(img_mem_attachement) # attach cpu plot img_cpu_fn = os.path.join("/tmp", "{}_cpu.png".format(job.id)) plt.figure(2) plt.plot(time, [cpu_load for cpu_load, _ in job.track_cpu], "-o") plt.xlabel("time (s)") plt.ylabel("cpu load") plt.savefig(img_cpu_fn) plt.close() with open(img_cpu_fn, "rb") as img_cpu: img_data = img_cpu.read() img_cpu_attachement = MIMEImage(img_data) img_cpu_attachement.add_header('Content-Disposition', 'attachment', filename=os.path.basename(img_cpu_fn)) msg.attach(img_cpu_attachement) # Send mail try: s.sendmail(ERROR_MAIL_SENDER, ERROR_MAIL_RECIPIENT, msg.as_string()) except (SMTPRecipientsRefused, SMTPHeloError, SMTPSenderRefused, SMTPDataError): logger.error('Failed to send error email.', exc_info=True) # Clean up plot temporary files if CREATE_PLOTS: os.unlink(img_cpu_fn) os.unlink(img_mem_fn) s.quit() def handle_resubmit(session, job, temp_dir='/scratch/'): """ heuristic to determine if the job should be resubmitted side-effect: job.num_resubmits incremented job.id set to new ID """ # reset some fields job.timestamp = None job.heart_beat = None if job.num_resubmits < NUM_RESUBMITS: logger = logging.getLogger(__name__) logger.warning("Looks like job %s (%s) died an unnatural death, " + "resubmitting (previous resubmits = %i)", job.name, job.id, job.num_resubmits) # remove node from white_list node_name = '{}@{}'.format(job.queue, job.host_name) if job.white_list and node_name in job.white_list: job.white_list.remove(node_name) # increment number of resubmits job.num_resubmits += 1 job.cause_of_death = "" _resubmit(session, job, temp_dir) else: # Try killing off job just so you do not have a zombie job that returns later _killjob(session, job) raise JobException(("Job {0} ({1}) failed after {2} " + "resubmissions").format(job.name, job.id, NUM_RESUBMITS)) def _execute(job): """ Cannot pickle method instances, so fake a function. Used by _process_jobs_locally """ job.execute() return job.ret def _process_jobs_locally(jobs, max_processes=1): """ Local execution using the package multiprocessing, if present :param jobs: jobs to be executed :type jobs: list of Job :param max_processes: maximal number of processes :type max_processes: int :return: list of jobs, each with return in job.ret :rtype: list of Job """ logger = logging.getLogger(__name__) logger.info("using %i processes", max_processes) if max_processes == 1: # perform sequential computation for job in jobs: job.execute() else: pool = Pool(max_processes) result = pool.map(_execute, jobs) for ret_val, job in zip(result, jobs): job.ret = ret_val pool.close() pool.join() return jobs def _submit_jobs(jobs, home_address, temp_dir='/scratch', white_list=None, quiet=True): """ Method used to send a list of jobs onto the cluster. :param jobs: list of jobs to be executed :type jobs: list of `Job` :param home_address: Full address (including IP and port) of JobMonitor on submitting host. Running jobs will communicate with the parent process at that address via ZMQ. :type home_address: str :param temp_dir: Local temporary directory for storing output for an individual job. :type temp_dir: str :param white_list: List of acceptable nodes to use for scheduling job. If None, all are used. :type white_list: list of str :param quiet: When true, do not output information about the jobs that have been submitted. :type quiet: bool :returns: Session ID """ with Session() as session: for job in jobs: # set job white list job.white_list = white_list # remember address of submission host job.home_address = home_address # append jobs _append_job_to_session(session, job, temp_dir=temp_dir, quiet=quiet) sid = session.contact return sid def _append_job_to_session(temp_dir='/scratch/', quiet=True): """ For an active session, append new job based on information stored in job object. Also sets job.id to the ID of the job on the grid. :param session: The current DRMAA session with the grid engine. :type session: Session :param job: The Job to add to the queue. :type job: `Job` :param temp_dir: Local temporary directory for storing output for an individual job. :type temp_dir: str :param quiet: When true, do not output information about the jobs that have been submitted. :type quiet: bool """ global _append_job_to_session def _append_job_to_session(session, job): jt = session.createJobTemplate() logger = logging.getLogger(__name__) # logger.debug('{}'.format(job.environment)) jt.jobEnvironment = job.environment # Run module using python -m to avoid ImportErrors when unpickling jobs jt.remoteCommand = sys.executable jt.args = ['-m', 'gridmap.runner', '{}'.format(job.home_address), job.path] jt.nativeSpecification = job.native_specification jt.jobName = job.name jt.workingDirectory = job.working_dir jt.outputPath = ":{}".format(temp_dir) jt.errorPath = ":{}".format(temp_dir) # Create temp directory if necessary if not os.path.exists(temp_dir): try: os.makedirs(temp_dir) except OSError: logger.warning(("Failed to create temporary directory " + "{}. Your jobs may not start " + "correctly.").format(temp_dir)) job_id = session.runJob(jt) # set job fields that depend on the job_id assigned by grid engine job.id = job_id job.log_stdout_fn = os.path.join(temp_dir, '{}.o{}'.format(job.name, job_id)) job.log_stderr_fn = os.path.join(temp_dir, '{}.e{}'.format(job.name, job_id)) job.submit_time = datetime.now() if not quiet: print('Your job {} has been submitted with id {}'.format(job.name, job_id), file=sys.stderr) session.deleteJobTemplate(jt) def process_jobs(jobs, temp_dir='/scratch/', white_list=None, quiet=True, max_processes=1, local=False): """ Take a list of jobs and process them on the cluster. :param jobs: Jobs to run. :type jobs: list of Job :param temp_dir: Local temporary directory for storing output for an individual job. :type temp_dir: str :param white_list: If specified, limit nodes used to only those in list. :type white_list: list of str :param quiet: When true, do not output information about the jobs that have been submitted. :type quiet: bool :param max_processes: The maximum number of concurrent processes to use if processing jobs locally. :type max_processes: int :param local: Should we execute the jobs locally in separate processes instead of on the the cluster? :type local: bool :returns: List of Job results """ if (not local and not DRMAA_PRESENT): logger = logging.getLogger(__name__) logger.warning('Could not import drmaa. Processing jobs locally.') local = True if not local: # initialize _append_to_session function _append_job_to_session(temp_dir=temp_dir, quiet=quiet) # initialize session and monitor to get port number with Session() as session, JobMonitor(session=session, temp_dir=temp_dir, jobs=jobs, white_list=white_list, max_processes=max_processes) as monitor: # handling of inputs, outputs and heartbeats monitor.check() else: _process_jobs_locally(jobs, max_processes=max_processes) return [job.ret for job in jobs] def _killjob(session, job): logger = logging.getLogger(__name__) try: session.control(job.id, JobControlAction.TERMINATE) logger.info("zombie job killed") except Exception: logger.error("Could not kill job with SGE id %s", job.id, exc_info=True) def _resubmit(session, job, temp_dir): """ Resubmit a failed job. :returns: ID of new job """ logger = logging.getLogger(__name__) logger.info("starting resubmission process") if DRMAA_PRESENT: # append to session # try to kill off old job _killjob(session, job) # create new job _append_job_to_session(session, job) else: logger.error("Could not restart job because we're in local mode.") ##################### # MapReduce Interface ##################### def grid_map(f, args_list, cleanup=True, mem_free="1G", name='gridmap_job', num_slots=1, temp_dir='/scratch/', white_list=None, queue=DEFAULT_QUEUE, quiet=True, local=False, max_processes=1): """ Maps a function onto the cluster. .. note:: This can only be used with picklable functions (i.e., those that are defined at the module or class level). :param f: The function to map on args_list :type f: function :param args_list: List of arguments to pass to f :type args_list: list :param cleanup: Should we remove the stdout and stderr temporary files for each job when we're done? (They are left in place if there's an error.) :type cleanup: bool :param mem_free: Estimate of how much memory each job will need (for scheduling). (Not currently used, because our cluster does not have that setting enabled.) :type mem_free: str :param name: Base name to give each job (will have a number add to end) :type name: str :param num_slots: Number of slots each job should use. :type num_slots: int :param temp_dir: Local temporary directory for storing output for an individual job. :type temp_dir: str :param white_list: If specified, limit nodes used to only those in list. :type white_list: list of str :param queue: The SGE queue to use for scheduling. :type queue: str :param quiet: When true, do not output information about the jobs that have been submitted. :type quiet: bool :param local: Should we execute the jobs locally in separate processes instead of on the the cluster? :type local: bool :param max_processes: The maximum number of concurrent processes to use if processing jobs locally. :type max_processes: int :returns: List of Job results """ # construct jobs jobs = [Job(f, [args] if not isinstance(args, list) else args, cleanup=cleanup, mem_free=mem_free, name='{}{}'.format(name, job_num), num_slots=num_slots, queue=queue) for job_num, args in enumerate(args_list)] # process jobs job_results = process_jobs(jobs, temp_dir=temp_dir, white_list=white_list, quiet=quiet, local=local, max_processes=max_processes) return job_results
gpl-3.0
7,930,880,811,626,348,000
35.745623
112
0.544142
false
4.213012
false
false
false
jmanoel7/my_dot_files
.local/share/gedit/plugins/xmlhelper.py
1
14182
# -*- coding: utf8 -*- # XML Helper for GEdit # # Copyright (c) 2007 Matej Cepl <matej@ceplovi.cz> # Additional Author : Ajith Sen G. S. <ajithsengs@gmail.com> # Additional Author : Oliver Sauder <os@esite.ch> # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import gettext import sys from gi.repository import GObject, Gedit, Gio, Gtk from libxmlhelper import GUI, XML from lxml import etree Debug = False def print_debug(string): if Debug: print >> sys.stderr, string class Endness: end = 0 start = 1 single = 2 class XMLHelperAppActivatable(GObject.Object, Gedit.AppActivatable): __gtype_name__ = 'XMLHelperPlugin' app = GObject.property(type=Gedit.App) def __init__(self): GObject.Object.__init__(self) def do_activate(self): self.app.set_accels_for_action("win.end_tag", ['<Ctrl>e']) self.app.set_accels_for_action("win.last_tag", ['<Ctrl>r']) self.app.set_accels_for_action("win.indent_xml", ['<Ctrl><Shift>f']) # Translate actions below, hardcoding domain here to avoid # complications now lambda s: gettext.dgettext('devhelp', s) self.menu_ext = self.extend_menu("tools-section") menu_model = Gio.Menu() for menu_name, action in ( ('EndTag', 'end_tag'), ('LastTag', 'previous_tag'), ('ValidateXML', 'validate_xml'), ('ValidateSchema', 'validate_schema'), ('ConvertXML', 'convert_xml'), ('GenerateXML', 'generate_xml'), ('IndentXML', 'indent_xml'), ): item = Gio.MenuItem.new(_(menu_name), "win.%s" % action) menu_model.append_item(item) submenu = Gio.MenuItem.new_submenu('_XML Helper', menu_model) self.menu_ext.append_menu_item(submenu) def do_deactivate(self): self.app.set_accels_for_action("win.end_tag", []) self.app.set_accels_for_action("win.last_tag", []) self.app.set_accels_for_action("win.indent_xml", []) self.menu_ext = None class XMLHelperWindowActivatable(GObject.Object, Gedit.WindowActivatable): window = GObject.property(type=Gedit.Window) def __init__(self): GObject.Object.__init__(self) self._dialog = None def do_activate(self): action = Gio.SimpleAction(name="end_tag") action.connect('activate', self.end_tag) self.window.add_action(action) action = Gio.SimpleAction(name="last_tag") action.connect('activate', self.previous_tag) self.window.add_action(action) action = Gio.SimpleAction(name="validate_xml") action.connect('activate', self.validate_xml) self.window.add_action(action) action = Gio.SimpleAction(name="validate_schema") action.connect('activate', self.validate_schema) self.window.add_action(action) action = Gio.SimpleAction(name="convert_xml") action.connect('activate', self.convert_xml) self.window.add_action(action) action = Gio.SimpleAction(name="generate_xml") action.connect('activate', self.generate_xml) self.window.add_action(action) action = Gio.SimpleAction(name="indent_xml") action.connect('activate', self.indent_xml) self.window.add_action(action) # run whatever needs to be run FIXME self._update() def do_deactivate(self): pass def do_update_state(self): # update state self._update() def __get_tag(self, iter): if not(iter.forward_char()): raise RuntimeError("we are in trouble") search_ret = iter.forward_search(">", Gtk.TextSearchFlags.TEXT_ONLY, None) if search_ret: beg_end, end_end = search_ret ret_str = iter.get_text(beg_end) if (ret_str[-1] == "/") or (ret_str[:3] == "!--"): has_end_tag = Endness.single ret_str = ret_str.rstrip("/") elif ret_str[0] == "/": has_end_tag = Endness.end ret_str = ret_str.lstrip("/") else: has_end_tag = Endness.start # cut element's parameters ret_str = ret_str.split()[0] print_debug("tag found is %s and the value of has_end_tag is %s" % (ret_str, has_end_tag)) return ret_str, has_end_tag else: raise IOError("Never ending tag at line %d" % (iter.get_line()+1)) def find_last_endable_tag(self, position): tag_stack = [] res = position.backward_search("<", Gtk.TextSearchFlags.TEXT_ONLY, None) while res: start_match, end_match = res tag, is_end_tag = self.__get_tag(start_match) if is_end_tag == Endness.end: tag_stack.append(tag) print_debug("Push tag '%s'" % tag) elif is_end_tag == Endness.single: print_debug("Ignoring single tag '%s'" % tag) elif len(tag_stack) != 0: # stack not empty popped_tag = tag_stack.pop() print_debug("Popped tag '%s'" % popped_tag) if popped_tag != tag: raise IOError("mismatching tags.\nFound %s" + " and expecting %s." % (tag, popped_tag)) else: # stack is empty and this is not end tag == we found it print_debug("We found tag '%s'" % tag) return tag start_match.backward_char() res = start_match.backward_search("<", Gtk.TextSearchFlags.TEXT_ONLY, None) # not totally sure what following means, but doesn't look right to me if len(tag_stack) != 0: raise IOError("whatever") if not(res): # There is no open tag in the current buffer return None def end_tag(self, a, b): buffer = self.window.get_active_view().get_buffer() inp_mark = buffer.get_iter_at_mark(buffer.get_insert()) tagname = self.find_last_endable_tag(inp_mark) if tagname: buffer.insert(inp_mark, '</%s>' % tagname) def previous_tag(self, a, b): buffer = self.window.get_active_view().get_buffer() inp_mark = buffer.get_iter_at_mark(buffer.get_insert()) res = inp_mark.backward_search("<", Gtk.TextSearchFlags.TEXT_ONLY, None) if res: start_match, end_match = res tag, is_end_tag = self.__get_tag(start_match) if is_end_tag == Endness.end: buffer.insert(inp_mark, '<%s>' % tag) # # store per window data in the window object # windowdata = dict() # window.set_data("XMLHelperWindowDataKey", windowdata) # windowdata["ui_id"] = manager.add_ui_from_string(end_tag_str) def _remove_menu(self): manager = self.window.get_ui_manager() manager.remove_ui(self._ui_id) manager.remove_action_group(self._action_group) manager.ensure_update() def _update(self): tab = self.window.get_active_tab() # self._action_group.set_sensitive(tab is not None) if not tab and self._dialog and \ self._dialog.get_transient_for() == self.window: self._dialog.response(Gtk.ResponseType.CLOSE) def validate_xml(self, action): """Function for validating an XML document against a W3C schema""" g = GUI() msg = "Save before continuing" if g.msgBox(msg, cont=True) is False: return """Select a W3C schema""" msg = "Select Schema File" file_desc = "XML Schema" file_type = "*.xsd" schema_file = g.openFile(msg, file_desc, file_type) if schema_file is False: return xml_file = self.window.get_active_document().get_uri_for_display() """Validate the XML document against the W3C schema""" try: xmlschema_doc = etree.parse(schema_file) xmlschema = etree.XMLSchema(xmlschema_doc) except Exception, e: msg = schema_file+" is not a valid schema file\n\n"+str(e) else: try: inp_doc = etree.parse(xml_file) except Exception, e: error_message = "%s: %s" % (e.__class__.__name__, e.msg) msg = xml_file + " is not a valid XML file\n\n"+error_message else: try: xmlschema.assertValid(inp_doc) except Exception, e: msg = "This document is not valid for " + \ schema_file + "\n\n"+str(e) else: msg = "This document is valid for " + schema_file g.msgBox(msg) def validate_schema(self, action): """Function for validating a W3C schema""" g = GUI() schema_file = self.window.get_active_document().get_uri_for_display() """Validate the Schema against W3C standards""" try: xmlschema_doc = etree.parse(schema_file) etree.XMLSchema(xmlschema_doc) except Exception as e: msg = "Invalid Schema\n\n"+str(e) else: msg = "Valid Schema" g.msgBox(msg) def convert_xml(self, action): """Function for generating a schema file from an XML document""" g = GUI() x = XML() input_file = self.window.get_active_document().get_uri_for_display() """Validate the XML Document against W3C standards""" try: tree = etree.parse(open(input_file)) except Exception, e: error_message = "%s: %s" % (e.__class__.__name__, e.msg) msg = input_file + " is not a valid XML file\n\n" + error_message g.msgBox(msg) return msg = "Save before continuing" if g.msgBox(msg, cont=True) is False: return """Select a schema file to save to""" msg = "Save Schema File" file_desc = "XML Schema" file_type = "*.xsd" output_file = g.saveFile(msg, file_desc, file_type) if output_file is False: return x.root_schema_element = x.create_schema_element('schema', root_element=True) x.root_schema_element.attrib['elementFormDefault'] = "qualified" root_element = tree.getroot() x.xml_walk(root_element, x.xml_pass1) x.xml_walk(root_element, x.xml_pass2) x.xml_walk(root_element, x.xml_pass3) x.xml_walk(root_element, x.xml_pass4) doc = etree.ElementTree(x.root_schema_element) x.writeFile(output_file, doc) def generate_xml(self, action): """Function for generating an XML document from a W3C schema""" g = GUI() x = XML() """Validate the Schema against W3C standards""" input_file = self.window.get_active_document().get_uri_for_display() try: tree = etree.parse(open(input_file)) except Exception, e: error_message = "%s: %s" % (e.__class__.__name__, e.msg) msg = input_file + " is not a valid Schema file\n\n" + \ error_message g.msgBox(msg) return msg = "Save before continuing" if g.msgBox(msg, cont=True) is False: return """Select an XML file to save to""" msg = "Save XML File" file_desc = "XML Document" file_type = "*.xml" output_file = g.saveFile(msg, file_desc, file_type) if output_file is False: return root_element = tree.getroot() x.xml_walk(root_element, x.schema_preprocess) root = x.create_random_element(x.ROOT_ELEMENT) if root is False: return doc = etree.ElementTree(root) x.writeFile(output_file, doc) def indent_xml(self, action, b): """Function for indenting an XML document""" doc = self.window.get_active_document() text = doc.get_text(doc.get_start_iter(), doc.get_end_iter(), True) xml_document = etree.fromstring(text) encoding = doc.get_encoding().get_charset() xml = etree.tostring(xml_document, pretty_print=True, xml_declaration=True, encoding=encoding) doc.set_text(xml) def deactivate(self): windowdata = self.window.get_data("XMLHelperWindowDataKey") manager = self.window.get_ui_manager() manager.remove_ui(windowdata["ui_id"]) manager.remove_action_group(windowdata["action_group"]) def update_ui(self): view = self.window.get_active_view() windowdata = self.window.get_data("XMLHelperWindowDataKey") windowdata["action_group"].set_sensitive(bool(view and view.get_editable()))
gpl-3.0
-6,545,584,575,914,101,000
35.645995
78
0.570582
false
3.815443
false
false
false
monetario/core
monetario/views/api/decorators.py
1
4822
import functools from flask import jsonify as flask_jsonify from flask import request from flask import url_for def jsonify(exclude=None): """ This decorator generates a JSON response from a Python dictionary or a SQLAlchemy model. """ def decorator(f): @functools.wraps(f) def wrapped(*args, **kwargs): rv = f(*args, **kwargs) status_or_headers = {} headers = None if isinstance(rv, tuple): rv, status_or_headers, headers = rv + (None,) * (3 - len(rv)) if isinstance(status_or_headers, (dict, list)): headers, status_or_headers = status_or_headers, None if not isinstance(rv, dict): # assume it is a model, call its to_json() method rv = rv.to_json(exclude=exclude).data rv = flask_jsonify(rv) if status_or_headers is not None: rv.status_code = status_or_headers if headers is not None: rv.headers.extend(headers) return rv return wrapped return decorator def _filter_query(model, query, filter_spec): filters = [f.split(',') for f in filter_spec.split(';')] for f in filters: if len(f) < 3 or (len(f) > 3 and f[1] != 'in'): continue if f[1] == 'in': f = [f[0], f[1], f[2:]] ops = {'eq': '__eq__', 'ne': '__ne__', 'lt': '__lt__', 'le': '__le__', 'gt': '__gt__', 'ge': '__ge__', 'in': 'in_', 'like': 'like'} if hasattr(model, f[0]) and f[1] in ops.keys(): column = getattr(model, f[0]) op = ops[f[1]] query = query.filter(getattr(column, op)(f[2])) return query def _sort_query(model, query, sort_spec): sort = [s.split(',') for s in sort_spec.split(';')] for s in sort: if hasattr(model, s[0]): column = getattr(model, s[0]) if len(s) == 2 and s[1] in ['asc', 'desc']: query = query.order_by(getattr(column, s[1])()) else: query = query.order_by(column.asc()) return query def collection(model, name=None, max_per_page=10, exclude=None): """ This decorator implements pagination, filtering, sorting and expanding for collections. The expected response from the decorated route is a SQLAlchemy query. """ if name is None: name = model.__tablename__ def decorator(f): @functools.wraps(f) def wrapped(*args, **kwargs): query = f(*args, **kwargs) p, meta = get_pagination(model, query, max_per_page, **kwargs) expand = request.args.get('expand') if expand: items = [item.to_json(exclude=exclude).data for item in p.items] else: items = [item.resource_url for item in p.items] return {'objects': items, 'meta': meta} # return {name: items, 'meta': meta} return wrapped return decorator def get_pagination(model, query, max_per_page=10, **kwargs): # filtering and sorting filter = request.args.get('filter') if filter: query = _filter_query(model, query, filter) sort = request.args.get('sort') if sort: query = _sort_query(model, query, sort) # pagination page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', max_per_page, type=int), max_per_page) expand = request.args.get('expand') p = query.paginate(page, per_page) pages = {'page': page, 'per_page': per_page, 'total': p.total, 'pages': p.pages} if p.has_prev: pages['prev_url'] = url_for( request.endpoint, filter=filter, sort=sort, page=p.prev_num, per_page=per_page, expand=expand, _external=True, **kwargs ) else: pages['prev_url'] = None if p.has_next: pages['next_url'] = url_for( request.endpoint, filter=filter, sort=sort, page=p.next_num, per_page=per_page, expand=expand, _external=True, **kwargs ) else: pages['next_url'] = None pages['first_url'] = url_for( request.endpoint, filter=filter, sort=sort, page=1, per_page=per_page, expand=expand, _external=True, **kwargs ) pages['last_url'] = url_for( request.endpoint, filter=filter, sort=sort, page=p.pages or 1, per_page=per_page, expand=expand, _external=True, **kwargs ) return p, pages
bsd-3-clause
1,515,050,096,615,621,600
29.327044
80
0.518872
false
3.784929
false
false
false
dbuch/systemd
man/90-rearrange-path.py
1
1400
#!/usr/bin/python3 """ Proof-of-concept systemd environment generator that makes sure that bin dirs are always after matching sbin dirs in the path. (Changes /sbin:/bin:/foo/bar to /bin:/sbin:/foo/bar.) This generator shows how to override the configuration possibly created by earlier generators. It would be easier to write in bash, but let's have it in Python just to prove that we can, and to serve as a template for more interesting generators. """ import os import pathlib def rearrange_bin_sbin(path): """Make sure any pair of …/bin, …/sbin directories is in this order >>> rearrange_bin_sbin('/bin:/sbin:/usr/sbin:/usr/bin') '/bin:/sbin:/usr/bin:/usr/sbin' """ items = [pathlib.Path(p) for p in path.split(':')] for i in range(len(items)): if 'sbin' in items[i].parts: ind = items[i].parts.index('sbin') bin = pathlib.Path(*items[i].parts[:ind], 'bin', *items[i].parts[ind+1:]) if bin in items[i+1:]: j = i + 1 + items[i+1:].index(bin) items[i], items[j] = items[j], items[i] return ':'.join(p.as_posix() for p in items) if __name__ == '__main__': path = os.environ['PATH'] # This should be always set. # If it's not, we'll just crash, we is OK too. new = rearrange_bin_sbin(path) if new != path: print('PATH={}'.format(new))
gpl-2.0
-3,990,370,177,937,099,000
33.9
85
0.609599
false
3.371981
false
false
false
he7d3r/revscoring
revscoring/features/wikitext/datasources/sentences.py
1
1893
from deltas.segmenters import MatchableSegment from revscoring.datasources import Datasource from revscoring.datasources.meta import indexable class Revision: def __init__(self, name, revision_datasources): super().__init__(name, revision_datasources) self.sentences = Datasource( self._name + ".sentences", psw2sentences, depends_on=[self.paragraphs_sentences_and_whitespace] ) """ A list of "sentences" extracted from the text. """ class Diff(): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.sentences_added_removed = Datasource( self._name + ".sentences_added_removed", set_diff, depends_on=[self.revision.sentences, self.revision.parent.sentences] ) self.sentences_added = indexable.index( 0, self.sentences_added_removed, name=self._name + ".sentences_added" ) """ A set of sentences that were added in this edit """ self.sentences_removed = indexable.index( 1, self.sentences_added_removed, name=self._name + ".sentences_removed" ) """ A set of sentences that were removed in this edit """ def psw2sentences(segments): sentences = [] for paragraph_or_whitespace in segments: if isinstance(paragraph_or_whitespace, MatchableSegment): paragraph = paragraph_or_whitespace # We have a paragraph for sentence_or_whitespace in paragraph: if isinstance(sentence_or_whitespace, MatchableSegment): sentence = sentence_or_whitespace # We have a sentence sentences.append(sentence) return sentences def set_diff(a, b): a, b = set(a), set(b) return (a - b, b - a)
mit
-941,986,875,014,066,800
29.047619
75
0.593767
false
4.273138
false
false
false
jimsize/PySolFC
pysollib/games/matriarchy.py
1
8074
#!/usr/bin/env python # -*- mode: python; coding: utf-8; -*- # ---------------------------------------------------------------------------## # # Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer # Copyright (C) 2003 Mt. Hood Playing Card Co. # Copyright (C) 2005-2009 Skomoroh # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ---------------------------------------------------------------------------## # imports # PySol imports from pysollib.mygettext import _ from pysollib.gamedb import registerGame, GameInfo, GI from pysollib.game import Game from pysollib.layout import Layout from pysollib.hint import CautiousDefaultHint from pysollib.pysoltk import MfxCanvasText from pysollib.util import KING, QUEEN, VARIABLE_REDEALS from pysollib.stack import \ Stack, \ WasteStack, \ WasteTalonStack, \ SS_RowStack # ************************************************************************ # * Talon # ************************************************************************ class Matriarchy_Waste(WasteStack): def updateText(self): WasteStack.updateText(self) if self.game.s.talon._updateMaxRounds(): self.game.s.talon.updateText() class Matriarchy_Talon(WasteTalonStack): DEAL = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 11, 10, 9, 8, 7, 6, 5) def _updateMaxRounds(self): # recompute max_rounds old = self.max_rounds self.max_rounds = 11 rows = self.game.s.rows for i in (0, 2, 4, 6): l1 = len(rows[i+0].cards) + len(rows[i+8].cards) l2 = len(rows[i+1].cards) + len(rows[i+9].cards) assert l1 + l2 <= 26 if l1 + l2 == 26: self.max_rounds = self.max_rounds + 2 elif l1 >= 13 or l2 >= 13: self.max_rounds = self.max_rounds + 1 if self.max_rounds == 19: # game is won self.max_rounds = 18 return old != self.max_rounds def canDealCards(self): if self._updateMaxRounds(): self.updateText() if not self.cards and not self.game.s.waste.cards: return False ncards = self.DEAL[self.round-1] assert ncards > 0 return len(self.cards) >= ncards or self.round < self.max_rounds def dealCards(self, sound=False): # get number of cards to deal ncards = self.DEAL[self.round-1] assert ncards > 0 # init waste = self.game.s.waste n = 0 update_flags = 1 # deal if self.cards: if sound and not self.game.demo: self.game.playSample("dealwaste") while n < ncards: # from self to waste while n < ncards: card = self.getCard() if not card: break assert not card.face_up self.game.flipMove(self) self.game.moveMove(1, self, waste, frames=3, shadow=0) n = n + 1 # turn from waste to self if n < ncards and len(waste.cards) > 0: assert len(self.cards) == 0 assert self.round < self.max_rounds or update_flags == 0 if sound: self.game.playSample("turnwaste", priority=20) self.game.turnStackMove(waste, self) if update_flags: self.game.nextRoundMove(self) # do not update self.round anymore in this deal update_flags = 0 assert self.round <= self.max_rounds assert n == ncards assert len(self.game.s.waste.cards) > 0 # done return n def updateText(self): if self.game.preview > 1: return WasteTalonStack.updateText(self, update_rounds=0) # t = "Round %d" % self.round t = _("Round %d/%d") % (self.round, self.max_rounds) self.texts.rounds.config(text=t) t = _("Deal %d") % self.DEAL[self.round-1] self.texts.misc.config(text=t) # ************************************************************************ # * Rows # ************************************************************************ class Matriarchy_UpRowStack(SS_RowStack): def __init__(self, x, y, game, suit): SS_RowStack.__init__(self, x, y, game, suit=suit, base_rank=KING, mod=13, dir=1, min_cards=1, max_cards=12) self.CARD_YOFFSET = -self.CARD_YOFFSET getBottomImage = Stack._getSuitBottomImage class Matriarchy_DownRowStack(SS_RowStack): def __init__(self, x, y, game, suit): SS_RowStack.__init__(self, x, y, game, suit=suit, base_rank=QUEEN, mod=13, dir=-1, min_cards=1, max_cards=12) getBottomImage = Stack._getSuitBottomImage # ************************************************************************ # * Matriarchy # ************************************************************************ class Matriarchy(Game): Hint_Class = CautiousDefaultHint # # game layout # def createGame(self): # create layout l, s = Layout(self), self.s # set window # (set piles so that at least 2/3 of a card is visible with 12 cards) h = max(2*l.YS, (12-1)*l.YOFFSET + l.CH*2//3) self.setSize(10*l.XS+l.XM, h + l.YM + h) # create stacks # center, c1, c2 = self.height // 2, h, self.height - h center = self.height // 2 c1, c2 = center-l.TEXT_HEIGHT//2, center+l.TEXT_HEIGHT//2 x, y = l.XM, c1 - l.CH for i in range(8): s.rows.append(Matriarchy_UpRowStack(x, y, self, i//2)) x = x + l.XS x, y = l.XM, c2 for i in range(8): s.rows.append(Matriarchy_DownRowStack(x, y, self, i//2)) x = x + l.XS x, y = x + l.XS // 2, c1 - l.CH // 2 - l.CH tx = x + l.CW // 2 s.waste = Matriarchy_Waste(x, y, self) l.createText(s.waste, "s") y = c2 + l.CH // 2 s.talon = Matriarchy_Talon(x, y, self, max_rounds=VARIABLE_REDEALS) l.createText(s.talon, "n") l.createRoundText(s.talon, 'ss') s.talon.texts.misc = MfxCanvasText( self.canvas, tx, center, anchor="center", font=self.app.getFont("canvas_large")) # define stack-groups l.defaultStackGroups() # # game overrides # def _shuffleHook(self, cards): # move Queens to top of the Talon (i.e. first cards to be dealt) return self._shuffleHookMoveToTop( cards, lambda c: (c.rank == 11, c.suit), 8) def startGame(self): self.startDealSample() self.s.talon.dealRow(self.s.rows[8:]) self.s.talon.dealCards() # deal first cards to WasteStack def isGameWon(self): return len(self.s.talon.cards) == 0 and len(self.s.waste.cards) == 0 def shallHighlightMatch(self, stack1, card1, stack2, card2): if card1.rank + card2.rank == QUEEN + KING: return False return (card1.suit == card2.suit and ((card1.rank + 1) % 13 == card2.rank or (card2.rank + 1) % 13 == card1.rank)) # register the game registerGame(GameInfo(17, Matriarchy, "Matriarchy", GI.GT_2DECK_TYPE, 2, VARIABLE_REDEALS, GI.SL_BALANCED))
gpl-3.0
-6,562,917,565,485,467,000
33.801724
79
0.523037
false
3.466724
false
false
false
GoogleCloudPlatform/gsutil
gslib/impersonation_credentials.py
1
2566
# -*- coding: utf-8 -*- # Copyright 2019 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of credentials that refreshes using the iamcredentials API.""" from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals import datetime from oauth2client import client from gslib.iamcredentials_api import IamcredentailsApi class ImpersonationCredentials(client.OAuth2Credentials): _EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ' def __init__(self, service_account_id, scopes, credentials, logger): self._service_account_id = service_account_id self.api = IamcredentailsApi(logger, credentials) response = self.api.GenerateAccessToken(service_account_id, scopes) self.access_token = response.accessToken self.token_expiry = self._ConvertExpiryTime(response.expireTime) super(ImpersonationCredentials, self).__init__(self.access_token, None, None, None, self.token_expiry, None, None, scopes=scopes) @property def service_account_id(self): return self._service_account_id def _refresh(self, http): # client.Oauth2Credentials converts scopes into a set, so we need to convert # back to a list before making the API request. response = self.api.GenerateAccessToken(self._service_account_id, list(self.scopes)) self.access_token = response.accessToken self.token_expiry = self._ConvertExpiryTime(response.expireTime) def _ConvertExpiryTime(self, value): return datetime.datetime.strptime(value, ImpersonationCredentials._EXPIRY_FORMAT)
apache-2.0
-9,101,236,373,902,733,000
39.730159
80
0.625877
false
4.623423
false
false
false
tytso/xfstests-bld
kvm-xfstests/test-appliance/files/usr/lib/python3/dist-packages/junitparser/cli.py
2
1595
from argparse import ArgumentParser from glob import iglob from itertools import chain from . import JUnitXml, version def merge(paths, output): """Merge xml report.""" result = JUnitXml() for path in paths: result += JUnitXml.fromfile(path) result.update_statistics() result.write(output) return 0 def _parser(prog_name=None): # pragma: no cover """Create the CLI arg parser.""" parser = ArgumentParser(description="Junitparser CLI helper.", prog=prog_name) parser.add_argument( "-v", "--version", action="version", version="%(prog)s " + version ) command_parser = parser.add_subparsers(dest="command", help="command") command_parser.required = True # command: merge merge_parser = command_parser.add_parser( "merge", help="Merge Junit XML format reports with junitparser." ) merge_parser.add_argument( "--glob", help="Treat original XML path(s) as glob(s).", dest="paths_are_globs", action="store_true", default=False, ) merge_parser.add_argument("paths", nargs="+", help="Original XML path(s).") merge_parser.add_argument("output", help="Merged XML Path.") return parser def main(args=None, prog_name=None): """CLI's main runner.""" args = args or _parser(prog_name=prog_name).parse_args() if args.command == "merge": return merge( chain.from_iterable(iglob(path) for path in args.paths) if args.paths_are_globs else args.paths, args.output, ) return 255
gpl-2.0
4,698,021,028,848,066,000
26.982456
82
0.626332
false
3.852657
false
false
false
pmonta/GNSS-DSP-tools
acquire-galileo-e5aq.py
1
3231
#!/usr/bin/env python import optparse import numpy as np import scipy.signal import scipy.fftpack as fft import gnsstools.galileo.e5aq as e5aq import gnsstools.nco as nco import gnsstools.io as io import gnsstools.util as util # # Acquisition search # def search(x,prn,doppler_search,ms): fs = 3*10230000.0 n = 3*10230 # 1 ms coherent integration doppler_min, doppler_max, doppler_incr = doppler_search incr = float(e5aq.code_length)/n c = e5aq.code(prn,0,0,incr,n) # obtain samples of the E5b-Q code c = fft.fft(np.concatenate((c,np.zeros(n)))) m_metric,m_code,m_doppler = 0,0,0 for doppler in np.arange(doppler_min,doppler_max,doppler_incr): # doppler bins q = np.zeros(2*n) w = nco.nco(-doppler/fs,0,2*n) for block in range(ms): # incoherent sums b = x[(block*n):((block+2)*n)] b = b*w r = fft.ifft(c*np.conj(fft.fft(b))) q = q + np.absolute(r) idx = np.argmax(q) if q[idx]>m_metric: m_metric = q[idx] m_code = e5aq.code_length*(float(idx)/n) m_doppler = doppler m_code = m_code%e5aq.code_length return m_metric,m_code,m_doppler # # main program # parser = optparse.OptionParser(usage="""acquire-galileo-e5aq.py [options] input_filename sample_rate carrier_offset Acquire Galileo E5aq signals Examples: Acquire all Galileo PRNs using standard input with sample rate 69.984 MHz and carrier offset -15.191625 MHz: acquire-galileo-e5aq.py /dev/stdin 69984000 -15191625 Arguments: input_filename input data file, i/q interleaved, 8 bit signed sample_rate sampling rate in Hz carrier_offset offset to E5a carrier in Hz (positive or negative)""") parser.disable_interspersed_args() parser.add_option("--prn", default="1-50", help="PRNs to search, e.g. 1,3,7-14,31 (default %default)") parser.add_option("--doppler-search", metavar="MIN,MAX,INCR", default="-9000,9000,200", help="Doppler search grid: min,max,increment (default %default)") parser.add_option("--time", type="int", default=80, help="integration time in milliseconds (default %default)") (options, args) = parser.parse_args() filename = args[0] fs = float(args[1]) coffset = float(args[2]) prns = util.parse_list_ranges(options.prn) doppler_search = util.parse_list_floats(options.doppler_search) ms = options.time # read first portion of file ms_pad = ms + 5 n = int(fs*0.001*ms_pad) fp = open(filename,"rb") x = io.get_samples_complex(fp,n) # resample to 3*10.230 MHz fsr = 3*10230000.0/fs nco.mix(x,-coffset/fs,0) h = scipy.signal.firwin(161,12e6/(fs/2),window='hanning') x = scipy.signal.filtfilt(h,[1],x) xr = np.interp((1/fsr)*np.arange(ms_pad*3*10230),np.arange(len(x)),np.real(x)) xi = np.interp((1/fsr)*np.arange(ms_pad*3*10230),np.arange(len(x)),np.imag(x)) x = xr+(1j)*xi # iterate (in parallel) over PRNs of interest def worker(p): x,prn = p metric,code,doppler = search(x,prn,doppler_search,ms) return 'prn %2d doppler % 7.1f metric % 7.1f code_offset %6.1f' % (prn,doppler,metric,code) import multiprocessing as mp cpus = mp.cpu_count() results = mp.Pool(cpus).map(worker, map(lambda prn: (x,prn),prns)) for r in results: print(r)
mit
-4,065,531,073,665,987,000
30.067308
153
0.672547
false
2.622565
false
false
false
macosforge/ccs-calendarserver
contrib/tools/tables.py
1
9609
## # Copyright (c) 2009-2017 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from sys import stdout import types class Table(object): """ Class that allows pretty printing ascii tables. The table supports multiline headers and footers, independent column formatting by row, alternative tab-delimited output. """ class ColumnFormat(object): """ Defines the format string, justification and span for a column. """ LEFT_JUSTIFY = 0 RIGHT_JUSTIFY = 1 CENTER_JUSTIFY = 2 def __init__(self, strFormat="%s", justify=LEFT_JUSTIFY, span=1): self.format = strFormat self.justify = justify self.span = span def __init__(self, table=None): self.headers = [] self.headerColumnFormats = [] self.rows = [] self.footers = [] self.footerColumnFormats = [] self.columnCount = 0 self.defaultColumnFormats = [] self.columnFormatsByRow = {} if table: self.setData(table) def setData(self, table): self.hasTitles = True self.headers.append(table[0]) self.rows = table[1:] self._getMaxColumnCount() def setDefaultColumnFormats(self, columnFormats): self.defaultColumnFormats = columnFormats def addDefaultColumnFormat(self, columnFormat): self.defaultColumnFormats.append(columnFormat) def setHeaders(self, rows, columnFormats=None): self.headers = rows self.headerColumnFormats = columnFormats if columnFormats else [None, ] * len(self.headers) self._getMaxColumnCount() def addHeader(self, row, columnFormats=None): self.headers.append(row) self.headerColumnFormats.append(columnFormats) self._getMaxColumnCount() def addHeaderDivider(self, skipColumns=()): self.headers.append((None, skipColumns,)) self.headerColumnFormats.append(None) def setFooters(self, row, columnFormats=None): self.footers = row self.footerColumnFormats = columnFormats if columnFormats else [None, ] * len(self.footers) self._getMaxColumnCount() def addFooter(self, row, columnFormats=None): self.footers.append(row) self.footerColumnFormats.append(columnFormats) self._getMaxColumnCount() def addRow(self, row=None, columnFormats=None): self.rows.append(row) if columnFormats: self.columnFormatsByRow[len(self.rows) - 1] = columnFormats self._getMaxColumnCount() def addDivider(self, skipColumns=()): self.rows.append((None, skipColumns,)) def printTable(self, os=stdout): maxWidths = self._getMaxWidths() self.printDivider(os, maxWidths, False) if self.headers: for header, format in zip(self.headers, self.headerColumnFormats): self.printRow(os, header, self._getHeaderColumnFormat(format), maxWidths) self.printDivider(os, maxWidths) for ctr, row in enumerate(self.rows): self.printRow(os, row, self._getColumnFormatForRow(ctr), maxWidths) if self.footers: self.printDivider(os, maxWidths, double=True) for footer, format in zip(self.footers, self.footerColumnFormats): self.printRow(os, footer, self._getFooterColumnFormat(format), maxWidths) self.printDivider(os, maxWidths, False) def printRow(self, os, row, format, maxWidths): if row is None or type(row) is tuple and row[0] is None: self.printDivider(os, maxWidths, skipColumns=row[1] if type(row) is tuple else ()) else: if len(row) != len(maxWidths): row = list(row) row.extend([""] * (len(maxWidths) - len(row))) t = "|" ctr = 0 while ctr < len(row): startCtr = ctr maxWidth = 0 for _ignore_span in xrange(format[startCtr].span if format else 1): maxWidth += maxWidths[ctr] ctr += 1 maxWidth += 3 * ((format[startCtr].span - 1) if format else 0) text = self._columnText(row, startCtr, format, width=maxWidth) t += " " + text + " |" t += "\n" os.write(t) def printDivider(self, os, maxWidths, intermediate=True, double=False, skipColumns=()): t = "|" if intermediate else "+" for widthctr, width in enumerate(maxWidths): if widthctr in skipColumns: c = " " else: c = "=" if double else "-" t += c * (width + 2) t += "+" if widthctr < len(maxWidths) - 1 else ("|" if intermediate else "+") t += "\n" os.write(t) def printTabDelimitedData(self, os=stdout, footer=True): if self.headers: titles = [""] * len(self.headers[0]) for row, header in enumerate(self.headers): for col, item in enumerate(header): titles[col] += (" " if row and item else "") + item self.printTabDelimitedRow(os, titles, self._getHeaderColumnFormat(self.headerColumnFormats[0])) for ctr, row in enumerate(self.rows): self.printTabDelimitedRow(os, row, self._getColumnFormatForRow(ctr)) if self.footers and footer: for footer in self.footers: self.printTabDelimitedRow(os, footer, self._getFooterColumnFormat(self.footerColumnFormats[0])) def printTabDelimitedRow(self, os, row, format): if row is None: row = [""] * self.columnCount if len(row) != self.columnCount: row = list(row) row.extend([""] * (self.columnCount - len(row))) textItems = [self._columnText(row, ctr, format) for ctr in xrange((len(row)))] os.write("\t".join(textItems) + "\n") def _getMaxColumnCount(self): self.columnCount = 0 if self.headers: for header in self.headers: self.columnCount = max(self.columnCount, len(header) if header else 0) for row in self.rows: self.columnCount = max(self.columnCount, len(row) if row else 0) if self.footers: for footer in self.footers: self.columnCount = max(self.columnCount, len(footer) if footer else 0) def _getMaxWidths(self): maxWidths = [0] * self.columnCount if self.headers: for header, format in zip(self.headers, self.headerColumnFormats): self._updateMaxWidthsFromRow(header, self._getHeaderColumnFormat(format), maxWidths) for ctr, row in enumerate(self.rows): self._updateMaxWidthsFromRow(row, self._getColumnFormatForRow(ctr), maxWidths) if self.footers: for footer, format in zip(self.footers, self.footerColumnFormats): self._updateMaxWidthsFromRow(footer, self._getFooterColumnFormat(format), maxWidths) return maxWidths def _updateMaxWidthsFromRow(self, row, format, maxWidths): if row and (type(row) is not tuple or row[0] is not None): ctr = 0 while ctr < len(row): text = self._columnText(row, ctr, format) startCtr = ctr for _ignore_span in xrange(format[startCtr].span if format else 1): maxWidths[ctr] = max(maxWidths[ctr], len(text) / (format[startCtr].span if format else 1)) ctr += 1 def _getHeaderColumnFormat(self, format): if format: return format else: justify = Table.ColumnFormat.CENTER_JUSTIFY if len(self.headers) == 1 else Table.ColumnFormat.LEFT_JUSTIFY return [Table.ColumnFormat(justify=justify)] * self.columnCount def _getFooterColumnFormat(self, format): if format: return format else: return self.defaultColumnFormats def _getColumnFormatForRow(self, ctr): if ctr in self.columnFormatsByRow: return self.columnFormatsByRow[ctr] else: return self.defaultColumnFormats def _columnText(self, row, column, format, width=0): if row is None or column >= len(row): return "" colData = row[column] if colData is None: colData = "" columnFormat = format[column] if format and column < len(format) else Table.ColumnFormat() if type(colData) in types.StringTypes: text = colData else: text = columnFormat.format % colData if width: if columnFormat.justify == Table.ColumnFormat.LEFT_JUSTIFY: text = text.ljust(width) elif columnFormat.justify == Table.ColumnFormat.RIGHT_JUSTIFY: text = text.rjust(width) elif columnFormat.justify == Table.ColumnFormat.CENTER_JUSTIFY: text = text.center(width) return text
apache-2.0
-7,932,836,984,542,855,000
33.941818
118
0.605058
false
3.939729
false
false
false
yalewoosoft/shadowsocks
speedtest_thread.py
1
7027
#!/usr/bin/python # -*- coding: UTF-8 -*- import logging import time import sys import os import configloader import importloader from speedtest import speedtest from shadowsocks import common, shell class Speedtest(object): def __init__(self): import threading self.event = threading.Event() def speedtest_thread(self): if self.event.wait(600): return logging.info("Speedtest starting...You can't stop right now!") CTid = 0 speedtest_ct = speedtest.Speedtest() speedtest_ct.get_servers() servers_list = [] for _, servers in sorted(speedtest_ct.servers.items()): for server in servers: if server['country'].find( 'China') != -1 and server['sponsor'].find('Telecom') != -1: servers_list.append(server) speedtest_ct.get_best_server(servers_list) results_ct = speedtest_ct.results CTPing = str(results_ct.server['latency']) + ' ms' speedtest_ct.download() CTDLSpeed = str( round( (results_ct.download / 1000 / 1000), 2)) + " Mbit/s" speedtest_ct.upload() CTUpSpeed = str( round( (results_ct.upload / 1000 / 1000), 2)) + " Mbit/s" CUid = 0 speedtest_cu = speedtest.Speedtest() speedtest_cu.get_servers() servers_list = [] for _, servers in sorted(speedtest_cu.servers.items()): for server in servers: if server['country'].find( 'China') != -1 and server['sponsor'].find('Unicom') != -1: servers_list.append(server) speedtest_cu.get_best_server(servers_list) results_cu = speedtest_cu.results CUPing = str(results_cu.server['latency']) + ' ms' speedtest_cu.download() CUDLSpeed = str( round( (results_cu.download / 1000 / 1000), 2)) + " Mbit/s" speedtest_cu.upload() CUUpSpeed = str( round( (results_cu.upload / 1000 / 1000), 2)) + " Mbit/s" CMid = 0 speedtest_cm = speedtest.Speedtest() speedtest_cm.get_servers() servers_list = [] for _, servers in sorted(speedtest_cm.servers.items()): for server in servers: if server['country'].find( 'China') != -1 and server['sponsor'].find('Mobile') != -1: servers_list.append(server) speedtest_cm.get_best_server(servers_list) results_cm = speedtest_cm.results CMPing = str(results_cu.server['latency']) + ' ms' speedtest_cm.download() CMDLSpeed = str( round( (results_cm.download / 1000 / 1000), 2)) + " Mbit/s" speedtest_cm.upload() CMUpSpeed = str( round( (results_cm.upload / 1000 / 1000), 2)) + " Mbit/s" if configloader.get_config().API_INTERFACE == 'modwebapi': webapi.postApi('func/speedtest', {'node_id': configloader.get_config().NODE_ID}, {'data': [{'telecomping': CTPing, 'telecomeupload': CTUpSpeed, 'telecomedownload': CTDLSpeed, 'unicomping': CUPing, 'unicomupload': CUUpSpeed, 'unicomdownload': CUDLSpeed, 'cmccping': CMPing, 'cmccupload': CMUpSpeed, 'cmccdownload': CMDLSpeed}]}) else: import cymysql if configloader.get_config().MYSQL_SSL_ENABLE == 1: conn = cymysql.connect( host=configloader.get_config().MYSQL_HOST, port=configloader.get_config().MYSQL_PORT, user=configloader.get_config().MYSQL_USER, passwd=configloader.get_config().MYSQL_PASS, db=configloader.get_config().MYSQL_DB, charset='utf8', ssl={ 'ca': configloader.get_config().MYSQL_SSL_CA, 'cert': configloader.get_config().MYSQL_SSL_CERT, 'key': configloader.get_config().MYSQL_SSL_KEY}) else: conn = cymysql.connect( host=configloader.get_config().MYSQL_HOST, port=configloader.get_config().MYSQL_PORT, user=configloader.get_config().MYSQL_USER, passwd=configloader.get_config().MYSQL_PASS, db=configloader.get_config().MYSQL_DB, charset='utf8') conn.autocommit(True) cur = conn.cursor() cur.execute( "INSERT INTO `speedtest` (`id`, `nodeid`, `datetime`, `telecomping`, `telecomeupload`, `telecomedownload`, `unicomping`, `unicomupload`, `unicomdownload`, `cmccping`, `cmccupload`, `cmccdownload`) VALUES (NULL, '" + str( configloader.get_config().NODE_ID) + "', unix_timestamp(), '" + CTPing + "', '" + CTUpSpeed + "', '" + CTDLSpeed + "', '" + CUPing + "', '" + CUUpSpeed + "', '" + CUDLSpeed + "', '" + CMPing + "', '" + CMUpSpeed + "', '" + CMDLSpeed + "')") cur.close() conn.close() logging.info("Speedtest finished") @staticmethod def thread_db(obj): if configloader.get_config().SPEEDTEST == 0: return if configloader.get_config().API_INTERFACE == 'modwebapi': import webapi_utils global webapi webapi = webapi_utils.WebApi() global db_instance db_instance = obj() try: while True: try: db_instance.speedtest_thread() except Exception as e: import traceback trace = traceback.format_exc() logging.error(trace) #logging.warn('db thread except:%s' % e) if db_instance.event.wait(configloader.get_config().SPEEDTEST * 3600): break except KeyboardInterrupt as e: pass db_instance = None @staticmethod def thread_db_stop(): global db_instance db_instance.event.set()
apache-2.0
-3,055,601,498,901,799,000
35.984211
231
0.468336
false
4.343016
true
false
false
lamthientruc/vac
benchmarks/Time/ASASPTIME_Hier/ASASPTIME/agtHiertoNonHierAdapt.py
2
10522
#!/usr/bin/env python import sys fn = sys.argv[1] #input file with temporal hierarchies #format input file #[Hierarchies] #1>2>t1 #3>4>t2 #[UA0] #u1 2 4 3 > t1 #u2 2 4 3 > t1 #u9 4 2 > t5 #rs 1 > t1 # for initial enabled roles #rs 2 > t1 #[Rules] #can_assign #can_revoke on = sys.argv[2] #output file with no hierarchies file = open(fn, 'r') o_file = open(on, 'w') #maximum roles for each var is 150 #11-6-2013: note, form A are for role enabling (RS), from a-$ are for TUA, z is for timer r2a = { 1 : 'a', 2 : 'b', 3 : 'c', 4 : 'd' , 5 : 'e' , 6 : 'f', 7 : 'g', 8 : 'h', 9 : 'i' , 10 : 'Z' , 11 : 'k' , 12 : 'l', 13 : 'm', 14 : 'n', 15 : 'o', 16 : 'p', 17 : 'q', 18 : 'r', 19 : 's', 20 : 't', 21 : 'u', 22 : 'v', 23 : 'w', 24 : '#', 25 : '$', 26 : 'z', 27 : 'A', 28 : 'B', 29 : 'C', 30 : 'D' , 31 : 'E' , 32 : 'F', 33 : 'G', 34 : 'H', 35 : 'I' , 36 : 'J' , 37 : 'K' , 38 : 'L', 39 : 'M', 40 : 'N', 41 : 'O', 42 : 'P', 43 : 'Q', 44 : 'R', 45 : 'S', 46 : 'T', 47 : 'U', 48 : 'V', 49 : 'W', 50 : 'X', 51 : 'Y', 52 : '@' } RelRoles = [] #initial role hierarchies RelRolesTr = [] #transitive closure of hierarchies for line in open(fn): if (line.find("[Hierarchies]") > -1): relField = True ruleField = False UA0Field = False continue elif (line.find("[UA0]") > -1): #compute transitive closure of RelRoles (role hierarchies) #... tempRelRolesTr = RelRolesTr firsttime = True while (firsttime or len(tempRelRolesTr) > len(RelRolesTr)): if (firsttime): firsttime = False RelRolesTr = tempRelRolesTr i = 0 while (i < len(RelRolesTr)): hier1 = RelRolesTr[i] tok_lstHier1 = hier1.split(">") j = 0 while (j < len(RelRolesTr)): if (i != j): hier2 = RelRolesTr[j] tok_lstHier2 = hier2.split(">") if (tok_lstHier1[2] == tok_lstHier2[2] and tok_lstHier1[0] == tok_lstHier2[1]): if (not ((tok_lstHier2[0] + ">" + tok_lstHier1[1] + ">" + tok_lstHier1[2]) in tempRelRolesTr) ): tempRelRolesTr.append(tok_lstHier2[0] + ">" + tok_lstHier1[1] + ">" + tok_lstHier1[2]) elif (tok_lstHier1[2] == tok_lstHier2[2] and tok_lstHier1[1] == tok_lstHier2[0]): if (not ((tok_lstHier1[0] + ">" + tok_lstHier2[1] + ">" + tok_lstHier1[2]) in tempRelRolesTr) ): tempRelRolesTr.append(tok_lstHier1[0] + ">" + tok_lstHier2[1] + ">" + tok_lstHier1[2]) j = j + 1 i = i + 1 print "TransitiveRoleHierarchies:=" print RelRolesTr o_file.write(line) UA0Field = True relField = False ruleField = False continue elif (line.find("[Rules]") > -1): o_file.write(line) relField = False UA0Field = False ruleField = True continue elif (line == "\n"): continue if (relField == True): #1>2>t1 RelRoles.append(str.strip(line)) RelRolesTr.append(str.strip(line)) elif (UA0Field == True): #print "UA0 can be not empty" o_file.write(line) elif (ruleField == True): #can_assign admin , ts1 , roles ; ts2 , role tok_lst = line.split() strRule= "" ts1 = "" ts2 = "" index = 0 nextTS = False targetRoles = "" for i,tok in enumerate(tok_lst): if (tok == "," or tok == ";"): index = index + 1 if (index == 1 or index == 3): nextTS = True continue elif (nextTS and index == 1): ts1 = tok nextTS = False elif (nextTS and index == 3): ts2 = tok nextTS = False elif (index == 4): targetRoles = targetRoles + " " + tok targetRoles = str.strip(targetRoles) index = 0 minusPreCond = [] for i,tok in enumerate(tok_lst): if (tok == "," or tok == ";"): if (len(minusPreCond) > 0): if (index == 0): tempminusPreCond = minusPreCond firsttime = True while (firsttime or len(tempminusPreCond) > len(minusPreCond)): if (firsttime): firsttime = False minusPreCond = tempminusPreCond j = 0 while (j < len(minusPreCond)): minusRole = minusPreCond[j] k = 0 while (k < len(RelRolesTr)): if (RelRolesTr[k].find(">" + minusRole + ">" + ts1) > -1): relRole_tok_lst = RelRolesTr[k].split(">") if (not (relRole_tok_lst[0] in tempminusPreCond)): tempminusPreCond.append(relRole_tok_lst[0]) k = k + 1 j = j + 1 elif (index == 2 and (tok_lst[0] == "can_revoke" or tok_lst[0] == "can_assign")): tempminusPreCond = minusPreCond firsttime = True while (firsttime or len(tempminusPreCond) > len(minusPreCond)): if (firsttime): firsttime = False minusPreCond = tempminusPreCond j = 0 while (j < len(minusPreCond)): minusRole = minusPreCond[j] k = 0 while (k < len(RelRolesTr)): if (RelRolesTr[k].find(">" + minusRole + ">" + ts2) > -1): relRole_tok_lst = RelRolesTr[k].split(">") if (not (relRole_tok_lst[0] in tempminusPreCond)): tempminusPreCond.append(relRole_tok_lst[0]) k = k + 1 j = j + 1 j = 0 while (j < len(minusPreCond)): strRule = strRule + "-" + minusPreCond[j] + " " j = j + 1 minusPreCond = [] index = index + 1 strRule = strRule + tok + " " elif (tok[0] != "-"): strRule = strRule + tok + " " else: if(not (tok[1:] in minusPreCond)): minusPreCond.append(tok[1:]) #work here for positive roles tok_lst = strRule.split() posRoleSeniorAd = [] posRoleSeniorCond = [] negRoleAd = [] negRoleCond = [] posRoleDisEnab = "" #Only for preCond of a disable, enable actions index = 0 for i,tok in enumerate(tok_lst): if (i < 1): continue if (tok == "," or tok == ";"): index = index + 1 elif (tok[0] != "-"): seniorRole = [] seniorRole.append(tok) if (index == 0): tempseniorRole = seniorRole firsttime = True while (firsttime or len(tempseniorRole) > len(seniorRole)): if (firsttime): firsttime = False seniorRole = tempseniorRole j = 0 while (j < len(seniorRole)): posRole = seniorRole[j] k = 0 while (k < len(RelRolesTr)): if (RelRolesTr[k].find( ">" + posRole + ">" + ts1) > -1): relRole_tok_lst = RelRolesTr[k].split(">") if (not (relRole_tok_lst[0] in tempseniorRole)): tempseniorRole.append(relRole_tok_lst[0]) k = k + 1 j = j + 1 posRoleSeniorAd.append(seniorRole) elif (index == 2 and (tok_lst[0] == "can_revoke" or tok_lst[0] == "can_assign")): tempseniorRole = seniorRole firsttime = True while (firsttime or len(tempseniorRole) > len(seniorRole)): if (firsttime): firsttime = False seniorRole = tempseniorRole j = 0 while (j < len(seniorRole)): posRole = seniorRole[j] k = 0 while (k < len(RelRolesTr)): if (RelRolesTr[k].find(">" + posRole + ">" + ts2) > -1): relRole_tok_lst = RelRolesTr[k].split(">") if (not (relRole_tok_lst[0] in tempseniorRole)): tempseniorRole.append(relRole_tok_lst[0]) k = k + 1 j = j + 1 posRoleSeniorCond.append(seniorRole) elif (index == 2 and (tok_lst[0] == "can_enable" or tok_lst[0] == "can_disable")): posRoleDisEnab = posRoleDisEnab + " " + tok else: if (index == 0): if(not (tok[1:] in negRoleAd)): negRoleAd.append(tok[1:]) elif (index == 2): if(not (tok[1:] in negRoleCond)): negRoleCond.append(tok[1:]) #compute cartersian product #For Admin cartesianAd = [] i = 0 while True: carte = "" j = i for l in posRoleSeniorAd: carte = carte + " " + l[j % len(l)] j /= len(l) if (not (str.strip(carte) in cartesianAd)): cartesianAd.append(str.strip(carte)) if j > 0: break i += 1 if (tok_lst[0] == "can_enable" or tok_lst[0] == "can_disable"): i = 0 while(i < len(cartesianAd)): if (cartesianAd[i] != "true" or cartesianAd[i] != "false"): carte_lst = cartesianAd[i].split(" ") isRemoved = False for k, tok in enumerate(carte_lst): if (tok in negRoleAd): #Pos and neg have same role -> remove isRemoved = True break if (isRemoved): i = i + 1 continue strRule = tok_lst[0] + " " + cartesianAd[i] for tok in negRoleAd: strRule = strRule + " -" + tok strRule = strRule + " , " + ts1 + " , " + str.strip(posRoleDisEnab) for tok in negRoleCond: strRule = strRule + " -" + tok strRule = strRule + " ; " + ts2 + " , " + targetRoles o_file.write(strRule + "\n") i = i + 1 continue # not need the following code #For condition (can_assign or can_revoke) cartesianCond = [] i = 0 while True: carte = "" j = i for l in posRoleSeniorCond: carte = carte + " " + l[j % len(l)] j /= len(l) if (not (str.strip(carte) in cartesianCond)): cartesianCond.append(str.strip(carte)) if j > 0: break i += 1 #Print to file additional actions i = 0 while(i < len(cartesianAd)): if (cartesianAd[i] != "true" or cartesianAd[i] != "false"): carte_lst = cartesianAd[i].split(" ") isRemoved = False for p,tok in enumerate(carte_lst): if (tok in negRoleAd): #Pos and neg have same role -> remove isRemoved = True break if (isRemoved): i = i + 1 continue j = 0 while(j < len(cartesianCond)): if (cartesianCond[j] != "true" or cartesianCond[j] != "false"): carte_lst = cartesianCond[j].split(" ") isRemoved = False for t,tok in enumerate(carte_lst): if (tok in negRoleCond): #Pos and neg have same role -> remove isRemoved = True break if (isRemoved): j = j + 1 continue strRule = tok_lst[0] + " " + cartesianAd[i] for tok in negRoleAd: strRule = strRule + " -" + tok strRule = strRule + " , " + ts1 + " , " + cartesianCond[j] for tok in negRoleCond: strRule = strRule + " -" + tok strRule = strRule + " ; " + ts2 + " , " + targetRoles o_file.write(strRule + "\n") j = j + 1 i = i + 1 o_file.close()
bsd-2-clause
6,503,462,406,943,212,000
24.916256
103
0.529177
false
2.769676
false
false
false
JioCloud/oslo.vmware
oslo/vmware/common/loopingcall.py
2
4594
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys from eventlet import event from eventlet import greenthread from oslo.vmware.openstack.common.gettextutils import _ from oslo.vmware.openstack.common import timeutils LOG = logging.getLogger(__name__) class LoopingCallDone(Exception): """Exception to break out and stop a LoopingCall. The poll-function passed to LoopingCall can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; this return-value will be returned by LoopingCall.wait() """ def __init__(self, retvalue=True): """:param retvalue: Value that LoopingCall.wait() should return.""" self.retvalue = retvalue class LoopingCallBase(object): def __init__(self, f=None, *args, **kw): self.args = args self.kw = kw self.f = f self._running = False self.done = None def stop(self): self._running = False def wait(self): return self.done.wait() class FixedIntervalLoopingCall(LoopingCallBase): """A fixed interval looping call.""" def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done # TODO(mikal): this class name is deprecated in Havana and should be removed # in the I release LoopingCall = FixedIntervalLoopingCall class DynamicLoopingCall(LoopingCallBase): """A looping call which sleeps until the next known event. The function called should return how long to sleep for before being called again. """ def start(self, initial_delay=None, periodic_interval_max=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: idle = self.f(*self.args, **self.kw) if not self._running: break if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug(_('Dynamic looping call sleeping for %.02f ' 'seconds'), idle) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn(_inner) return self.done
apache-2.0
-2,428,121,379,658,182,700
30.682759
78
0.585329
false
4.517207
false
false
false
gfitzp/sql-connections
sqlconnections_oracle.py
1
5552
# The MIT License (MIT) # # Copyright © 2015 Glenn Fitzpatrick # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import cx_Oracle import logging from tqdm import * verbose = False # dbConnect() # # INPUT # username, password, host IP, database schema # # OUTPUT # database connection object # # Opens a connection to the given database def dbConnect(DB_USER, DB_HOST, DB_SCHEMA, DB_PASSWORD = None): if not DB_PASSWORD: DB_PASSWORD = input("Enter password: ") # 'user/password@host:port/database' CONNECT_STRING = '{}/{}@{}/{}'.format(DB_USER, DB_PASSWORD, DB_HOST, DB_SCHEMA) print("Opening connection to database...", end=" ") try: connection = cx_Oracle.connect(CONNECT_STRING) print("connected!") print() except: print("Unable to connect!") return False else: return connection # dbQuery() # # INPUT # connection object, query statement # # OUTPUT # result = dictionary containing column names and results # result['columns'] = array of result column names # result['data'] = array of tuples of results # result['data'][0] = first result from query as a tuple # # Pass in a connection and an arbitrary SQL query, receive the results of that query for parsing def dbQuery(connection, query): result = {} if verbose: print() print("Opening cursor") logging.info("Opening cursor") cur = connection.cursor() if verbose: print("Running query...") print() print(query) logging.info("Running query...") logging.info(query) cur.execute(query) description = [] for d in cur.description: description.append(d[0]) result['columns'] = description reader = cur.fetchall() cur.close() data = [] for row in reader: data.append(row) if verbose: print() print("Cursor closed. Retrieved", str(len(data)), "rows.") print() logging.info("Cursor closed. Retrieved {} rows.".format(str(len(data)))) result['data'] = data return result # dbUpdate() # # INPUT # connection object, SQL update/insert statement, data to be inserted/updated as a list of lists # # OUTPUT # none # # Executes the update/insert statement with the provided data on the database associated with the given connection object def dbUpdate(connection, query, data = []): cursor = connection.cursor() if verbose: if query.startswith("INSERT"): print("Inserting data...") elif query.startswith("UPDATE"): print("Updating data...") else: print("Changing data...") if verbose: for item in tqdm(data, leave=True): print(query.format(*item)) cursor.execute(query.format(*item)) print() else: for item in tqdm(data): cursor.execute(query.format(*item)) logging.info(query.format(*item)) if verbose: if query.startswith("INSERT"): if len(data) == 1: print("1 row inserted") elif len(data) == None: print("0 rows inserted") else: print("{} rows inserted".format(str(len(data)))) elif query.startswith("UPDATE"): if len(data) == 1: print("1 row updated") elif len(data) == None: print("0 rows updated") else: print("{} rows updated".format(str(len(data)))) else: if len(data) == 1: print("1 row changed") elif len(data) == None: print("0 rows changed") else: print("{} rows changed".format(str(len(data)))) try: connection.commit() if verbose: print("Database commit.") print() except: print("Unable to commit!") print() return False else: cursor.close() return # dbClose() # # INPUT # database connection object # # OUTPUT # none # # Closes a given database connection def dbClose(connection): connection.close() print("Database connection closed.") logging.info("Database connection closed.") print() return
mit
-3,997,224,486,578,959,000
22.521186
121
0.590885
false
4.433706
false
false
false
flgiordano/netcash
+/google-cloud-sdk/lib/googlecloudsdk/calliope/cli_tree.py
1
9923
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A module for the Cloud SDK CLI tree external representation.""" import argparse import textwrap from googlecloudsdk.core.console import console_io def _NormalizeDescription(description): """Normalizes description text. argparse.SUPPRESS normalizes to None. Args: description: str, The text to be normalized. Returns: str, The normalized text. """ if description == argparse.SUPPRESS: description = None elif description: description = textwrap.dedent(description) return description or '' class Flag(object): """Flag info. Attributes: type: str, The flag value type name {'bool', 'int', 'float', 'string'}. name: str, The normalized flag name ('_' => '-'). hidden: bool, True if the flag is hidden. value: str, The flag value documentation name. countmin: int, The minimum number of flag values. countmax: int, The maximum number of flag values, 0 for unlimited. required: int, 1 if the flag must be specified, 0 otherwise. description: str, The help text. choices: list, The list of static choices. default: (self.type), The default flag value or None if no default. group: int, Mutually exclusive flag group id counting from 1, 0 if none. resource: str, Flag value resource identifier. """ def __init__(self, name, description='', default=None): self.type = 'string' self.name = name self.hidden = description == argparse.SUPPRESS self.value = '' self.countmin = 0 self.countmax = 0 self.required = 0 self.choices = [] self.default = default self.description = _NormalizeDescription(description) self.group = 0 self.resource = '' class Positional(object): """Positional info. Attributes: name: str, The normalized name ('_' => '-'). value: str, The positional value documentation name. countmin: int, The minimum number of positional values. countmax: int, The maximum number of positional values. required: int, 1 if the positional must be specified, 0 otherwise. description: str, The help text. resource: str, Positional value resource identifier. """ def __init__(self, name, description): self.name = name self.value = '' self.countmin = 0 self.countmax = 0 self.capsule = '' self.description = description self.resource = '' class Command(object): """Command and group info. Attributes: release: str, The command release name {'internal', 'alpha', 'beta', 'ga'}. name: str, The normalized name ('_' => '-'). hidden: bool, True if the command is hidden. capsule: str, The first line of the command docstring. description: str, The second and following lines of the command docstring. flags: {str:str}, Command flag dict, indexed by normalized flag name. positionals: [str], Command positionals list. sections: {str:str}, Optional section help dict, indexed by section name. """ def __init__(self, command, parent): self.release = command.ReleaseTrack().id self.name = command.name.replace('_', '-') self.hidden = command.IsHidden() self.flags = {} self.positionals = [] self.sections = {} parent_command = parent.name.replace('_', '-') if parent else '' self.release, capsule = self.__Release( command, self.release, getattr(command, 'short_help', '')) self.capsule = console_io.LazyFormat( _NormalizeDescription(capsule), command=self.name, parent_command=parent_command) self.release, description = self.__Release( command, self.release, getattr(command, 'long_help', '')) self.description = console_io.LazyFormat( _NormalizeDescription(description), command=self.name, index=self.capsule, parent_command=parent_command) sections = getattr(command, 'detailed_help', None) if sections: for s in sections: if s == 'brief': self.release, self.capsule = self.__Release( command, self.release, sections[s]) else: self.sections[s] = console_io.LazyFormat( _NormalizeDescription(sections[s]), command=self.name, index=self.capsule, description=self.description, parent_command=parent_command) self.commands = {} # _parent is explicitly private so it won't appear in serialized output. self._parent = parent if parent: parent.commands[self.name] = self args = command.ai # Initialize the mutually exclusive flag groups. group_count = {} group_name = {} for arg in args.flag_args: for name in arg.option_strings: if name.startswith('--'): name = name.replace('_', '-') if not self.__Ancestor(name): g = args.mutex_groups.get(arg.dest, None) if g: group_name[name] = g if g in group_count: group_count[g] += 1 else: group_count[g] = 1 group_id_count = 0 group_id = {} # Sorted iteration preserves group_id[] indices across separate invocations # where the mutex groups do not change. for _, g in sorted(group_name.iteritems()): if group_count[g] > 1: group_count[g] = 0 # Don't check this group again! group_id_count += 1 group_id[g] = group_id_count # Collect the flags. for arg in sorted(args.flag_args): for name in arg.option_strings: if name.startswith('--'): name = name.replace('_', '-') # Don't include ancestor flags. if not self.__Ancestor(name): flag = Flag(name, description=_NormalizeDescription(arg.help), default=arg.default) # ArgParse does not have an explicit Boolean flag type. By # convention a flag with arg.nargs=0 and action='store_true' or # action='store_false' is a Boolean flag. arg.type gives no hint # (arg.type=bool would have been so easy) and we don't have access # to args.action here. Even then the flag can take on non-Boolean # values. If arg.default is not specified then it will be None, but # it can be set to anything. So we do a conservative 'truthiness' # test here. if arg.nargs == 0: flag.type = 'bool' flag.default = True if arg.default else False else: if arg.type == int: flag.type = 'int' elif arg.type == float: flag.type = 'float' if arg.nargs == '*': pass elif arg.nargs == '?': flag.countmax = 1 elif arg.nargs == '+': flag.countmin = 1 elif type(arg.nargs) in (int, long): flag.countmin = arg.nargs flag.countmax = arg.nargs if arg.metavar: flag.value = arg.metavar else: flag.value = name[2:].upper() if arg.choices: choices = sorted(arg.choices) if choices == ['false', 'true']: flag.type = 'bool' else: flag.choices = choices if arg.required: flag.required = 1 flag.resource = getattr(arg, 'completion_resource', '') if name in group_name and group_name[name] in group_id: flag.group = group_id[group_name[name]] self.flags[flag.name] = flag # Collect the positionals. for arg in args.positional_args: name = arg.dest.replace('_', '-') positional = Positional(name, description=_NormalizeDescription(arg.help)) if arg.metavar: positional.value = arg.metavar if arg.nargs != 0: if arg.nargs == '*': pass elif arg.nargs == '?': positional.countmax = 1 elif arg.nargs == '+': positional.countmin = 1 elif type(arg.nargs) in (int, long): positional.countmin = arg.nargs positional.countmax = arg.nargs positional.resource = getattr(arg, 'completion_resource', '') self.positionals.append(positional) def __Ancestor(self, flag): """Determines if flag is provided by an ancestor command. Args: flag: str, The flag name (no leading '-'). Returns: bool, True if flag provided by an ancestor command, false if not. """ command = self._parent while command: if flag in command.flags: return True command = command._parent # pylint: disable=protected-access return False def __Release(self, command, release, description): """Determines the release type from the description text. Args: command: Command, The CLI command/group description. release: int, The default release type. description: str, The command description markdown. Returns: (release, description): (int, str), The actual release and description with release prefix omitted. """ description = _NormalizeDescription(description) path = command.GetPath() if len(path) >= 2 and path[1] == 'internal': release = 'INTERNAL' return release, description
bsd-3-clause
1,056,222,635,158,947,700
34.439286
80
0.612617
false
4.231557
false
false
false
xmendez/wfuzz
src/wfuzz/plugins/payloads/shodanp.py
1
1602
from wfuzz.externals.moduleman.plugin import moduleman_plugin from wfuzz.plugin_api.payloadtools import ShodanIter from wfuzz.plugin_api.base import BasePayload from wfuzz.fuzzobjects import FuzzWordType @moduleman_plugin class shodanp(BasePayload): name = "shodanp" author = ("Xavi Mendez (@xmendez)",) version = "0.1" description = ("Queries the Shodan API",) summary = "Returns URLs of a given Shodan API search (needs api key)." category = ["default"] priority = 99 parameters = ( ("search", "", True, "Shodan search string."), ("page", "0", False, "Offset page, starting at zero."), ( "limit", "0", False, "Number of pages (1 query credit = 100 results). Zero for all.", ), ) default_parameter = "search" def __init__(self, params): BasePayload.__init__(self, params) search = params["search"] page = int(params["page"]) limit = int(params["limit"]) self._it = ShodanIter(search, page, limit) def count(self): return -1 def close(self): self._it._stop() def get_type(self): return FuzzWordType.WORD def get_next(self): match = next(self._it) port = match["port"] scheme = "https" if "ssl" in match or port == 443 else "http" if match["hostnames"]: for hostname in match["hostnames"]: return "{}://{}:{}".format(scheme, hostname, port) else: return "{}://{}:{}".format(scheme, match["ip_str"], port)
gpl-2.0
-1,685,633,677,487,755,800
26.152542
76
0.56804
false
3.787234
false
false
false
pgiri/asyncoro
py3/asyncoro/asyncfile.py
1
36873
"""This file is part of asyncoro; see http://asyncoro.sourceforge.net for details. This module provides API for asynchronous file and pipe processing. They work with Windows, Linux, OS X and likely other UNIX variants. Note that regular (on-disk) files don't support asynchronous I/O, as they are non-blocking and can't be used for polling to signal read/write events - they are always ready to be read/written. Under Windows, pipes must be opened with Popen in this module instead of Popen in subprocess module. See 'pipe_csum.py', 'pipe_grep.py' and 'socket_afile.py' for examples. """ import subprocess import os import sys import errno import platform from functools import partial as partial_func import asyncoro from asyncoro import _AsyncPoller, AsynCoro, Coro __author__ = "Giridhar Pemmasani (pgiri@yahoo.com)" __copyright__ = "Copyright (c) 2014 Giridhar Pemmasani" __license__ = "MIT" __url__ = "http://asyncoro.sourceforge.net" __all__ = ['AsyncFile', 'AsyncPipe'] if platform.system() == 'Windows': __all__ += ['pipe', 'Popen'] import itertools import win32file import win32pipe import win32event import win32con import winerror import winnt import pywintypes import msvcrt # pywin32 doesn't define FILE_FLAG_FIRST_PIPE_INSTANCE FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000 _pipe_id = itertools.count() def pipe(bufsize=8192): """Creates overlapped (asynchronous) pipe. """ name = r'\\.\pipe\asyncoro-pipe-%d-%d' % (os.getpid(), next(_pipe_id)) openmode = (win32pipe.PIPE_ACCESS_INBOUND | win32file.FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE) pipemode = (win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE) rh = wh = None try: rh = win32pipe.CreateNamedPipe( name, openmode, pipemode, 1, bufsize, bufsize, win32pipe.NMPWAIT_USE_DEFAULT_WAIT, None) wh = win32file.CreateFile( name, win32file.GENERIC_WRITE | winnt.FILE_READ_ATTRIBUTES, 0, None, win32file.OPEN_EXISTING, win32file.FILE_FLAG_OVERLAPPED, None) overlapped = pywintypes.OVERLAPPED() # 'yield' can't be used in constructor so use sync wait # (in this case it is should be okay) overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None) rc = win32pipe.ConnectNamedPipe(rh, overlapped) if rc == winerror.ERROR_PIPE_CONNECTED: win32event.SetEvent(overlapped.hEvent) rc = win32event.WaitForSingleObject(overlapped.hEvent, 1000) overlapped = None if rc != win32event.WAIT_OBJECT_0: asyncoro.logger.warning('connect failed: %s' % rc) raise Exception(rc) return (rh, wh) except: if rh is not None: win32file.CloseHandle(rh) if wh is not None: win32file.CloseHandle(wh) raise class Popen(subprocess.Popen): """Asynchronous version of subprocess.Popen - stdin, stdout and stderr support overlapped I/O. """ def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwargs): self.stdin = self.stdout = self.stderr = None stdin_rh = stdin_wh = stdout_rh = stdout_wh = stderr_rh = stderr_wh = None if stdin == subprocess.PIPE: stdin_rh, stdin_wh = pipe() stdin_rfd = msvcrt.open_osfhandle(stdin_rh.Detach(), os.O_RDONLY) self.stdin_rh = stdin_rh else: stdin_rfd = stdin self.stdin_rh = None if stdout == subprocess.PIPE: stdout_rh, stdout_wh = pipe() stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0) else: stdout_wfd = stdout if stderr == subprocess.PIPE: stderr_rh, stderr_wh = pipe() stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0) elif stderr == subprocess.STDOUT: stderr_wfd = stdout_wfd else: stderr_wfd = stderr try: super(Popen, self).__init__(args, stdin=stdin_rfd, stdout=stdout_wfd, stderr=stderr_wfd, **kwargs) except: for handle in (stdin_rh, stdin_wh, stdout_rh, stdout_wh, stderr_rh, stderr_wh): if handle is not None: win32file.CloseHandle(handle) raise else: if stdin_wh is not None: self.stdin = AsyncFile(stdin_wh, mode='w') if stdout_rh is not None: self.stdout = AsyncFile(stdout_rh, mode='r') if stderr_rh is not None: self.stderr = AsyncFile(stderr_rh, mode='r') finally: if stdin == subprocess.PIPE: os.close(stdin_rfd) if stdout == subprocess.PIPE: os.close(stdout_wfd) if stderr == subprocess.PIPE: os.close(stderr_wfd) def close(self): """It is advised to call 'close' on the pipe so both handles of pipe are closed. """ if isinstance(self.stdin, AsyncFile): self.stdin.close() self.stdin = None if self.stdin_rh: win32pipe.DisconnectNamedPipe(self.stdin_rh) win32file.CloseHandle(self.stdin_rh) self.stdin_rh = None if isinstance(self.stdout, AsyncFile): self.stdout.close() self.stdout = None if isinstance(self.stderr, AsyncFile): self.stderr.close() self.stderr = None def terminate(self): """Close pipe and terminate child process. """ self.close() super(Popen, self).terminate() def __del__(self): self.terminate() class _AsyncFile(object): """Asynchronous file interface. Under Windows asynchronous I/O works on regular (on-disk) files, but not very useful, as regular files are always ready to read / write. They are useful when used as file objects in asynchronous pipes. """ def __init__(self, path_handle, mode='r', share=None): """If 'path_handle' is a string, opens that file for asynchronous I/O; if it is a handle (pipe client / server, for example), sets up for asynchronous I/O. 'mode' is as per 'open' Python function, although limited to basic/common modes. """ self._overlap = pywintypes.OVERLAPPED() if isinstance(path_handle, str): self._path = path_handle if mode.startswith('r'): access = win32file.GENERIC_READ if share is None: share = win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE create = win32file.OPEN_EXISTING if '+' in mode: access |= win32file.GENERIC_WRITE elif mode.startswith('w'): access = win32file.GENERIC_WRITE if share is None: share = win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE create = win32file.CREATE_ALWAYS if '+' in mode: access |= win32file.GENERIC_READ elif mode.startswith('a'): access = win32file.GENERIC_WRITE if share is None: share = win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE create = win32file.OPEN_ALWAYS if '+' in mode: access |= win32file.GENERIC_READ # TODO: if reading, offset should be 0? sb = os.stat(path_handle) self._overlap.Offset = sb.st_size else: self._overlap = None raise ValueError('invalid mode "%s"' % mode) flags = win32file.FILE_FLAG_OVERLAPPED try: self._handle = win32file.CreateFile(path_handle, access, share, None, create, flags, None) except: self._overlap = None raise if mode.startswith('r'): flags = os.O_RDONLY elif mode.startswith('a'): flags = os.O_APPEND else: flags = 0 self._fileno = msvcrt.open_osfhandle(self._handle, flags) else: self._handle = path_handle # pipe mode should be either 'r' or 'w' flags = os.O_RDONLY if mode.startswith('r') else 0 self._fileno = msvcrt.open_osfhandle(self._handle, flags) self._buflist = [] self._read_result = None self._write_result = None self._timeout = None self._timeout_id = None self._asyncoro = AsynCoro.scheduler() if self._asyncoro: self._notifier = self._asyncoro._notifier self._notifier.register(self._handle) else: self._notifier = None def read(self, size=0, full=False, timeout=None): """Read at most 'size' bytes from file; if 'size' <= 0, all data up to EOF is read and returned. If 'full' is True, exactly 'size' bytes are returned (unless EOF or timeout occur before). If EOF is encountered before any more data is available, empty buffer is returned. If no data has been read before timeout, then IOError('timedout') will be thrown. If timeout is given and full is True and timeout expires before all the data could be read, it returns partial data read before timeout if any data has been read. Must be used in a coroutine with 'yield' as 'data = yield fd.read(1024)' """ def _read(size, full, rc, n): if rc or n == 0: if self._timeout: self._notifier._del_timeout(self) self._overlap.object = self._read_result = None if rc != winerror.ERROR_OPERATION_ABORTED: if (self._buflist or rc == winerror.ERROR_HANDLE_EOF or rc == winerror.ERROR_BROKEN_PIPE): buf, self._buflist = b''.join(self._buflist), [] self._read_coro._proceed_(buf) return self._read_coro.throw(IOError(rc, 'ReadFile', str(rc))) self._overlap.object = self._read_coro = self._read_result = None return buf = self._read_result[:n] if size > 0: size -= len(buf) assert size >= 0 if size == 0: full = False self._buflist.append(buf) self._overlap.Offset += n if full: self._overlap.object = partial_func(_read, size, full) try: rc, _ = win32file.ReadFile(self._handle, self._read_result, self._overlap) except pywintypes.error as exc: rc = exc.winerror if rc and rc != winerror.ERROR_IO_PENDING: buf, self._buflist = b''.join(self._buflist), [] self._overlap.object = self._read_result = None if self._timeout: self._notifier._del_timeout(self) self._read_coro._proceed_(buf) self._read_coro = None return if self._buflist: buf, self._buflist = b''.join(self._buflist), [] if self._timeout: self._notifier._del_timeout(self) self._overlap.object = self._read_result = None self._read_coro._proceed_(buf) self._read_coro = None if not self._asyncoro: self._asyncoro = AsynCoro.scheduler() self._notifier = self._asyncoro._notifier self._notifier.register(self._handle) if not size or size < 0: count = 16384 full = True else: if self._buflist: buf, self._buflist = b''.join(self._buflist), [] if len(buf) > size: buf, self._buflist = buf[:size], [buf[size:]] if (not full) or (len(buf) == size): return buf self._buflist = [buf] size -= len(buf) count = size self._read_result = win32file.AllocateReadBuffer(count) self._overlap.object = partial_func(_read, size, full) self._read_coro = AsynCoro.cur_coro(self._asyncoro) self._read_coro._await_() try: rc, _ = win32file.ReadFile(self._handle, self._read_result, self._overlap) except pywintypes.error as exc: if exc.winerror == winerror.ERROR_BROKEN_PIPE: buf, self._buflist = b''.join(self._buflist), [] self._read_coro._proceed_(buf) self._read_result = self._read_coro = self._overlap.object = None return else: rc = exc.winerror if rc and rc != winerror.ERROR_IO_PENDING: self._overlap.object = self._read_result = self._read_coro = None self._read_coro.throw(IOError(rc, 'ReadFile', str(rc))) if timeout: self._timeout = timeout self._notifier._add_timeout(self) def write(self, buf, full=False, timeout=None): """Write data in 'buf' to file. If 'full' is True, the function waits till all data in buf is written; otherwise, it waits until one write completes. It returns length of data written. If no data has been written before timeout, then IOError('timedout') will be thrown. If timeout is given and full is True and timeout expires before all the data could be written, it returns length of data written before timeout if any data has been written. Must be used with 'yield' as 'n = yield fd.write(buf)' to write (some) data in buf. """ def _write(written, rc, n): if rc or n == 0: if self._timeout: self._notifier._del_timeout(self) if rc != winerror.ERROR_OPERATION_ABORTED: if written: self._write_coro._proceed_(written) else: self._write_coro.throw(IOError(rc, 'WriteFile', str(rc))) self._write_result.release() self._overlap.object = self._write_coro = self._write_result = None return written += n self._overlap.Offset += n self._write_result = self._write_result[n:] if not full or len(self._write_result) == 0: self._write_result.release() self._overlap.object = self._write_result = None if self._timeout: self._notifier._del_timeout(self) self._write_coro._proceed_(written) self._write_coro = None return self._overlap.object = partial_func(_write, written) try: rc, _ = win32file.WriteFile(self._handle, self._write_result, self._overlap) except pywintypes.error as exc: rc = exc.winerror if rc and rc != winerror.ERROR_IO_PENDING: self._write_result.release() self._overlap.object = self._write_result = None if self._timeout: self._notifier._del_timeout(self) if written: self._write_coro._proceed_(written) else: self._write_coro.throw(IOError(rc, 'WriteFile', str(rc))) self._write_coro = None return if not self._asyncoro: self._asyncoro = AsynCoro.scheduler() self._notifier = self._asyncoro._notifier self._notifier.register(self._handle) self._write_result = memoryview(buf) self._overlap.object = partial_func(_write, 0) self._write_coro = AsynCoro.cur_coro(self._asyncoro) self._write_coro._await_() try: rc, _ = win32file.WriteFile(self._handle, self._write_result, self._overlap) except pywintypes.error as exc: if exc.winerror == winerror.ERROR_BROKEN_PIPE: self._write_result.release() self._write_coro._proceed_(0) self._write_result = self._write_coro = self._overlap.object = None return else: rc = exc.winerror if rc and rc != winerror.ERROR_IO_PENDING: self._write_result.release() self._overlap.object = self._write_result = self._write_coro = None self._write_coro._proceed_(None) raise IOError(rc, 'WriteFile', str(rc)) if timeout: self._timeout = timeout self._notifier._add_timeout(self) def seek(self, offset, whence=os.SEEK_SET): """Similar to 'seek' of file descriptor; works only for regular files. """ if whence == os.SEEK_SET: self._overlap.Offset = offset elif whence == os.SEEK_CUR: self._overlap.Offset += offset else: assert whence == os.SEEK_END if isinstance(self._path, str): sb = os.stat(self._path) self._overlap.Offset = sb.st_size + offset else: self._overlap.Offset = offset def tell(self): """Similar to 'tell' of file descriptor; works only for regular files. """ return self._overlap.Offset def fileno(self): """Similar to 'fileno' of file descriptor; works only for regular files. """ return self._fileno def close(self): """Similar to 'close' of file descriptor. """ if self._handle: try: flags = win32pipe.GetNamedPipeInfo(self._handle)[0] except: flags = 0 if flags & win32con.PIPE_SERVER_END: win32pipe.DisconnectNamedPipe(self._handle) # TODO: if pipe, need to call FlushFileBuffers? def _close_(rc, n): win32file.CloseHandle(self._handle) self._overlap = None if self._notifier: self._notifier.unregister(self._handle) self._handle = None self._read_result = self._write_result = None self._read_coro = self._write_coro = None self._buflist = [] if self._overlap.object: self._overlap.object = _close_ win32file.CancelIo(self._handle) else: _close_(0, 0) def _timed_out(self): """Internal use only. """ if self._read_coro: if self._buflist: buf, self._buflist = b''.join(self._buflist), [] self._read_coro._proceed_(buf) self._read_coro = None else: self._read_coro.throw(IOError('timedout')) self._read_coro = None win32file.CancelIo(self._handle) else: import fcntl class _AsyncFile(object): """Asynchronous interface for file-like objects in Linux and other Unix variants. Tested with AsyncPipe and sockets under Linux and OS X; it should work on other Unix variants. """ def __init__(self, fd): """'fd' is either a file object (e.g., obtained with 'open') or a file number (e.g., obtained with socket's fileno()). """ if hasattr(fd, 'fileno'): self._fd = fd self._fileno = fd.fileno() elif isinstance(fd, int): self._fd, self._fileno = None, self._fd else: raise ValueError('invalid file descriptor') self._asyncoro = AsynCoro.scheduler() if self._asyncoro: self._notifier = self._asyncoro._notifier if hasattr(fd, '_fileno'): # assume it is AsyncSocket self._notifier.unregister(fd) else: self._notifier = None self._timeout = None self._read_task = None self._write_task = None self._read_coro = None self._write_coro = None self._buflist = [] flags = fcntl.fcntl(self._fileno, fcntl.F_GETFL) fcntl.fcntl(self._fileno, fcntl.F_SETFL, flags | os.O_NONBLOCK) def read(self, size=0, full=False, timeout=None): """Read at most 'size' bytes from file; if 'size' <= 0, all data up to EOF is read and returned. If 'full' is True, exactly 'size' bytes are returned (unless EOF or timeout occur before). If EOF is encountered before any more data is available, empty buffer is returned. If no data has been read before timeout, then IOError('timedout') will be thrown. If timeout is given and full is True and timeout expires before all the data could be read, it returns partial data read before timeout if any data has been read. Must be used in a coroutine with 'yield' as 'data = yield fd.read(1024)' """ def _read(size, full): if size > 0: count = size else: count = 16384 try: buf = os.read(self._fileno, count) except (OSError, IOError) as exc: if exc.errno in (errno.EAGAIN, errno.EWOULDBLOCK): return else: raise except: self._notifier.clear(self, _AsyncPoller._Read) self._read_coro.throw(*sys.exc_info()) self._read_coro = self._read_task = None return if buf: if size > 0: size -= len(buf) # assert size >= 0 if size == 0: full = False self._buflist.append(buf) if full: self._read_task = partial_func(_read, size, full) return if self._buflist: buf, self._buflist = b''.join(self._buflist), [] self._notifier.clear(self, _AsyncPoller._Read) self._read_coro._proceed_(buf) self._read_coro = self._read_task = None if not self._asyncoro: self._asyncoro = AsynCoro.scheduler() self._notifier = self._asyncoro._notifier if hasattr(self._fd, '_fileno'): self._notifier.unregister(self._fd) if not size or size < 0: size = 0 full = True elif self._buflist: buf, self._buflist = b''.join(self._buflist), [] if len(buf) > size: buf, self._buflist = buf[:size], [buf[size:]] if (not full) or (len(buf) == size): return buf self._buflist = [buf] size -= len(buf) self._timeout = timeout self._read_coro = AsynCoro.cur_coro(self._asyncoro) self._read_coro._await_() self._read_task = partial_func(_read, size, full) self._notifier.add(self, _AsyncPoller._Read) def write(self, buf, full=False, timeout=None): """Write data in 'buf' to file. If 'full' is True, the function waits till all data in buf is written; otherwise, it waits until one write completes. It returns length of data written. If no data has been written before timeout, then IOError('timedout') will be thrown. If timeout is given and full is True and timeout expires before all the data could be written, it returns length of data written before timeout if any data has been written. Must be used with 'yield' as 'n = yield fd.write(buf)' to write (some) data in buf. """ def _write(view, written): try: n = os.write(self._fileno, view) except (OSError, IOError) as exc: if exc.errno in (errno.EAGAIN, errno.EINTR): n = 0 else: self._notifier.clear(self, _AsyncPoller._Write) if full: view.release() self._write_coro.throw(*sys.exc_info()) self._write_coro = self._write_task = None return written += n if n == len(view) or not full: self._notifier.clear(self, _AsyncPoller._Write) if full: view.release() self._write_coro._proceed_(written) self._write_coro = self._write_task = None else: view = view[n:] self._write_task = partial_func(_write, view, written) if not self._asyncoro: self._asyncoro = AsynCoro.scheduler() self._notifier = self._asyncoro._notifier if hasattr(self._fd, '_fileno'): self._notifier.unregister(self._fd) if full: view = memoryview(buf) else: view = buf self._timeout = timeout self._write_coro = AsynCoro.cur_coro(self._asyncoro) self._write_coro._await_() self._write_task = partial_func(_write, view, 0) self._notifier.add(self, _AsyncPoller._Write) def close(self): """Close file descriptor. """ if self._fileno: self._notifier.unregister(self) if self._fd: self._fd.close() self._fd = self._fileno = None self._read_coro = self._write_coro = None self._read_task = self._write_task = None self._buflist = [] def _eof(self): """Internal use only. """ if self._read_task and self._read_coro: self._read_task() def _timed_out(self): """Internal use only. """ if self._read_coro: if self._read_task and self._buflist: buf, self._buflist = b''.join(self._buflist), [] self._notifier.clear(self, _AsyncPoller._Read) self._read_coro._proceed_(buf) else: self._read_coro.throw(IOError('timedout')) self._read_coro = self._read_task = None if self._write_coro: written = 0 if self._write_task: written = self._write_task.args[2] if isinstance(self._write_task.args[1], memoryview): self._write_task.args[1].release() self._notifier.clear(self, _AsyncPoller._Write) self._write_coro._proceed_(written) self._write_coro = self._write_task = None class AsyncFile(_AsyncFile): """See _AsyncFile above. """ def readline(self, size=0, sizehint=100, timeout=None): """Read a line up to 'size' and return. 'size' and 'timeout' are as per 'read' method above. 'sizehint' indicates approximate number of bytes expected in a line. Too big/small value affects performance, otherwise has no effect. Must be used with 'yield' as 'line = yield fd.readline()' """ if not size or size < 0: size = 0 if self._buflist: buf, self._buflist = b''.join(self._buflist), [] else: buf = yield self.read(size=sizehint, timeout=timeout) if not buf: raise StopIteration(buf) buflist = [] while 1: if size > 0: pos = buf.find(b'\n', 0, size) size -= len(buf) if size <= 0 and pos < 0: pos = size + len(buf) - 1 else: pos = buf.find(b'\n') if pos >= 0: if buflist: buf = b''.join(buflist) + buf pos += sum(len(b) for b in buflist) if len(buf) > pos: buf, self._buflist = buf[:pos+1], [buf[pos+1:]] raise StopIteration(buf) buflist.append(buf) buf = yield self.read(size=sizehint, timeout=timeout) if not buf: buf = b''.join(buflist) raise StopIteration(buf) def __enter__(self): return self def __exit__(self, exc_type, exc_value, trace): self.close() return True class AsyncPipe(object): """Asynchronous interface for (connected) pipes. """ def __init__(self, first, last=None): """'first' is a Popen object. 'last', if given, is another Popen object that is the end of the joints to 'first'. 'write' operations send data to first's stdin and 'read' operations get data from last's stdout/stderr. """ if not last: last = first self.first = first self.last = last if platform.system() == 'Windows': if not isinstance(first, Popen) or not isinstance(last, Popen): raise ValueError('argument must be asyncfile.Popen object') if first.stdin: self.stdin = first.stdin else: self.stdin = None if last.stdout: self.stdout = last.stdout else: self.stdout = None if last.stderr: self.stderr = last.stderr else: self.stderr = None else: if not isinstance(first, subprocess.Popen) or not isinstance(last, subprocess.Popen): raise ValueError('argument must be subprocess.Popen object') if first.stdin: self.stdin = AsyncFile(first.stdin) else: self.stdin = None if last.stdout: self.stdout = AsyncFile(last.stdout) else: self.stdout = None if last.stderr: self.stderr = AsyncFile(last.stderr) else: self.stderr = None def __getattr__(self, name): if self.last: return getattr(self.last, name) elif self.first: return getattr(self.first, name) else: raise RuntimeError('AsyncPipe is invalid') def write(self, buf, full=False, timeout=None): """Write data in buf to stdin of pipe. See 'write' method of AsyncFile for details. """ yield self.stdin.write(buf, full=full, timeout=timeout) def read(self, size=0, timeout=None): """Read data from stdout of pipe. See 'read' method of AsyncFile for details. """ yield self.stdout.read(size=size, timeout=timeout) def readline(self, size=0, sizehint=100, timeout=None): """Read a line from stdout of pipe. See 'readline' method of AsyncFile for details. """ yield self.stdout.readline(size=size, sizehint=sizehint, timeout=timeout) def read_stderr(self, size=0, timeout=None): """Read data from stderr of pipe. See 'read' method of AsyncFile for details. """ yield self.stderr.read(size=size, timeout=timeout) def readline_stderr(self, size=0, sizehint=100, timeout=None): """Read a line from stderr of pipe. See 'readline' method of AsyncFile for details. """ yield self.stderr.readline(size=size, sizehint=sizehint, timeout=timeout) def communicate(self, input=None): """Similar to Popen's communicate. Must be used with 'yield' as 'stdout, stderr = yield async_pipe.communicate()' 'input' must be either data or an object with 'read' method (i.e., regular file object or AsyncFile object). """ def write_proc(fd, input, coro=None): size = 16384 if isinstance(input, str) or isinstance(input, bytes): n = yield fd.write(input, full=True) if n != len(input): raise IOError('write failed') else: # TODO: how to know if 'input' is file object for # on-disk file? if hasattr(input, 'seek') and hasattr(input, 'fileno'): read_func = partial_func(os.read, input.fileno()) else: read_func = input.read while 1: data = yield read_func(size) if not data: break if isinstance(data, str): data = data.encode() n = yield fd.write(data, full=True) if n != len(data): raise IOError('write failed') input.close() fd.close() def read_proc(fd, coro=None): size = 16384 buflist = [] while 1: buf = yield fd.read(size) if not buf: break buflist.append(buf) fd.close() data = b''.join(buflist) raise StopIteration(data) if self.stdout: stdout_coro = Coro(read_proc, self.stdout) if self.stderr: stderr_coro = Coro(read_proc, self.stderr) if input and self.stdin: stdin_coro = Coro(write_proc, self.stdin, input) yield stdin_coro.finish() raise StopIteration((yield stdout_coro.finish()) if self.stdout else None, (yield stderr_coro.finish()) if self.stderr else None) def poll(self): """Similar to 'poll' of Popen. """ if self.last: return self.last.poll() elif self.first: return self.first.poll() def close(self): """Close pipe. """ self.first = None self.last = None def __del__(self): self.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, trace): self.close() return True
mit
-483,532,685,367,652,300
39.12296
98
0.496488
false
4.453261
false
false
false
jeffkaufman/diplomator
disp.py
1
16620
""" Usage: $ ls IMAGE_L.gif icons COORDINATES statusfile $ python disp.py datafilesdir statusfile tmp.png """ import sys import os.path from random import random import Image, ImageDraw from math import sqrt, acos, sin ILLEGAL_PLACEMENT = (5,5) # The special value 5,5 for coordinates indicates illegal placement IMAGE = "IMAGE_L.png" COORDS = "COORDINATES" ICONS="icons" use_images = True use_names = True use_flood_fill = True def all(s): """old versions of python don't have all""" for x in s: if not x: return False return True def parse_coords(COORDS): """ read the coordinates files and return {prov-name: (info)} The keys of the hash are upper case anmes of provinces The values are (x,y) pairs representing the coordinates of, in order, the place to draw the name, the place to draw an army, the place to draw a fleet, and the place to draw a fleet on the alternate coast. """ inf = open(COORDS) coords = {} for line in inf: line = line.strip() if not line or not line[0].isalpha(): continue n, c = line.split(None, 1) nX, nY, aX, aY, fX, fY, fsX, fsY = [2*int(z)+5 for z in c.split()] coords[n.upper()] = [(nX, nY), (aX, aY), (fX, fY), (fsX, fsY)] return coords # we hard code the colors to flood fill with colors = {"Klingon" : (255,180,170), # red "Cardassian" : (240,240,150), # yellow "Borg" : (180,180,180), # black "Federation" : (190,190,255), # blue "Dominion" : (243,190,240), # purple "Ferengi" : (230,190,140), # orange "Romulan" : (190,240,190)} # green def parse_status(status_fname, provinces): """ the status file has all the information needed for future turns Returns options, powers options are the things that show up before the first section representing a country. They can represent anything, from the season of the game to random information someone wanted to add. Unrecognized options are parsed but then ignored. options is a hash. The keys are the names of the options ('Season', 'Wormhole', ...), the values are tuples where each element of the tuple is one of the space separated components. Ugh. Example: options['Season'] = ('Spring', 'Moves', '2373') options['Wormhole'] = ('BLA', 'GOL') powers are the countries, and contain all the information about a country. There should only be the 7 normal ones, though it's fine if some are missing. powers is a list of power tuples each power tuple is in the form: (country, race, units, scs) country is the text name of the power: 'Russia' race is the text race of the power: 'Cardassian' units is a list of the units of that power, consisting of unit tuples. unit tuples are in the form: (unitname, (attr1, attr2, attr3)) unitname is the name of the province the unit is in, while attrs include all of the other information about the unit. The status lines: Bla Fleet Assimilated(Cardassian) Mos Army Cloaked Disloged(War) Knows(Klingon,Cardassian) translate to 'a fleet in the black sea that is now borg but was assimilated from the cardassians' and 'a cloaked army in moscow that was disloged from warsaw (and so cannot retreat there) and both the klingons and cardassians know about it)'. These would turn into the unit tuples: ('BLA', ('Fleet', 'Assimilated(Cardassian)')) ('MOS', ('Army', 'Cloaked', 'Disloged(War)', 'Knows(Klingon,Cardassian)')) scs is a list of the supply centers belonging to that power. This list consists of the text names of the supply centers. """ inf = open(status_fname) options = {} # {option_name ->(optionarg1, optionarg2)} powers = [] # [(country, race, units, scs)] # units : [(unitname, (attr1, attr2, attr3))] # scs : [sc1, sc2, ...] power = [] units = [] scs = [] for line in inf: line = line.strip() if not line or not line[0].isalpha(): continue if line.endswith(":"): if power: power.append(units) power.append(scs) powers.append(power) power, units, scs = [], [], [] line = line[:-1] # lose final : country, race = line.split(None) assert race.startswith("(") and race.endswith(")") race = race[1:-1] # lose parens power = [country, race] else: try: name, attrs = line.split(None, 1) except ValueError: name, attrs = line, "" attrs = attrs.split() if power: if not attrs or all(attr.upper() in provinces for attr in attrs): scs.append(name.upper()) scs.extend([attr.upper() for attr in attrs]) else: units.append((name.upper(), attrs)) else: options[name]=attrs if power: power.append(units) power.append(scs) powers.append(power) return options, powers def choose_loc(mode, coast, a, f, fs): if mode == "Fleet": if coast == "Secondary": return fs return f if mode == "Army": return a if a == ILLEGAL_PLACEMENT: return f return a def get_image_fname(datafilesdir, race, mode, enterprise, trader, cloaked, infiltrated, assimilated): """ given info on the unit, try and get a picture for it """ fn = "%s_%s" % (race, mode) if enterprise: fn += "_Enterprise" if trader: fn += "_Trader" if cloaked: fn += "_Cloaked" if infiltrated: fn += "_Infiltrated" if assimilated: fn += "_" + assimilated fn += ".png" fn = os.path.join(datafilesdir,ICONS, fn) if os.path.exists(fn): return fn print "Missing", fn return None def draw_powers(datafilesdir, powers, coords, draw, im): """ modify im to represent powers """ used = set() draw_fnames = {} debug_interpret_locs = {} for country, race, units, scs in powers: for unitname, attrs in units: n, a, f, fs = coords[unitname] coast, mode, enterprise, infiltrated, cloaked, trader, assimilated, disloged = None, None, False, False, False, False, None, None other_attrs = [] for attr in attrs: o_attr = attr attr = attr.lower() if attr in ["(sc)", "(wc)"]: coast = "Secondary" elif attr in ["(nc)", "(ec)"]: pass elif attr == "army": mode = "Army" elif attr == "fleet": mode = "Fleet" elif attr == "flarmy": mode = "Flarmy" elif attr == "infiltrated": infiltrated = True elif attr == "cloaked": cloaked = True elif attr == "trader": trader = True elif attr == "enterprise": enterprise = True elif o_attr.startswith("Assimilated("): assimilated = o_attr elif o_attr.startswith("Dislodged"): disloged = o_attr else: assert "Disloged" not in o_attr other_attrs.append(o_attr) loc = choose_loc(mode, coast, a, f, fs) color=colors[race] image_fname = None if use_images: image_fname = get_image_fname(datafilesdir, race, mode, enterprise, trader, cloaked, infiltrated, assimilated) if not image_fname: """ if we don't have some icons, draw ovals instead """ while loc in used: loc = add(loc, (12, 12)) used.add(loc) debug_interpret_locs[loc] = image_fname, unitname if mode == "Fleet": xy = [add(loc,(-5,-10)), add(loc,(5,10))] elif mode == "Army": xy = [add(loc,(-10,-5)), add(loc,(10,5))] else: xy = [add(loc,(-6,-6)), add(loc,(6,6))] if cloaked: draw.ellipse(xy, outline=color) else: draw.ellipse(xy, fill=color) if infiltrated: draw.ellipse([add(loc,(-1,-1)), add(loc,(1,1))], fill=(0,0,0)) if trader: draw.line([loc[0], loc[1], loc[0], loc[1]-14], fill=(0,0,0)) else: if loc not in draw_fnames: draw_fnames[loc] = ["","",""] debug_interpret_locs[loc] = image_fname, unitname sort = 0 #"normal" if trader: sort = 1 #"trader" elif disloged: sort = 2 #"disloged" draw_fnames[loc][sort] = image_fname if other_attrs: txt = "(%s)" % " ".join(attr[0].upper() for attr in other_attrs) draw.text(add(loc,(10,-5)),txt,fill=color) for loc, (normal, trader, disloged) in draw_fnames.items(): t_loc = loc if normal: t_loc = add(loc, (0, -28)) if trader: add_icon(im, trader, t_loc) if disloged: #assert normal add_icon(im, disloged, loc, offset=True) if normal: try: add_icon(im, normal, loc) except Exception: print loc, debug_interpret_locs[loc] raise def dot(a,b): x0,y0=a x1,y1=b return x0*x1+y0*y1 def add(a,b): x0,y0=a x1,y1=b return x0+x1,y0+y1 def sub(a,b): x0,y0=a x1,y1=b return x0-x1,y0-y1 def mul(s, pt): x0,y0=pt return x0*s,y0*s def perp(pt): # [x,y] . [ 0,1], # [-1,0] x,y=pt return (-y, x) def calculate_bezier(p, steps = 5000): """ from http://www.pygame.org/wiki/BezierCurve with only small modifications Calculate a bezier curve from 4 control points and return a list of the resulting points. The function uses the forward differencing algorithm described here: http://www.niksula.cs.hut.fi/~hkankaan/Homepages/bezierfast.html """ t = 1.0 / steps temp = t*t f = p[0] fd = mul(t, mul(3, sub(p[1], p[0]))) fdd_per_2 = mul(temp, mul(3, add(sub(p[0], mul(2, p[1])), p[2]))) fddd_per_2 = mul(t, mul(temp, mul(3, add(mul(3, sub(p[1], p[2])), sub(p[3], p[0]))))) fddd = add(fddd_per_2, fddd_per_2) fdd = add(fdd_per_2 , fdd_per_2) fddd_per_6 = mul(.33333, fddd_per_2) points = [] for x in range(steps): points.append(f) f = add(add(add(f, fd), fdd_per_2), fddd_per_6) fd = add(add(fd, fdd), fddd_per_2) fdd = add(fdd, fddd) fdd_per_2 = add(fdd_per_2, fddd_per_2) points.append(f) return points def distsq(a,b): """ the square of the distance from a to b """ return (a[0]-b[0])*(a[0]-b[0])+(a[1]-b[1])*(a[1]-b[1]) def mkint(pt): x,y = pt return int(x),int(y) def draw_wormhole(start,stop,img): """ make a bezier curve, color points near the bezier curve """ sys.stderr.write("\nWormholeing...") st_a = mul(.4, sub(start,stop)) st_b = mul(.2, sub(stop,start)) c1=add(start, add(st_b, perp(mul(.5,st_b)))) c2=add(stop, add(st_a, perp(mul(.5,st_a)))) control_points = [start, c1, c2, stop] # for each point in a 14x14 square centered on each point on the # bezier curve, compute the minimum distance from that point to the # curve and put that info in all_points. All points not in # all_pts.keys() should be left alone # all_pts = {} # pt -> dist to curve for x in range((len(control_points)-1)/3): b_points = calculate_bezier(control_points[3*x:3*x+4]) for pt in b_points: for xx in range(-6,6): for yy in range(-6,6): d=xx*xx+yy*yy npt=mkint(add(pt,(xx,yy))) if npt not in all_pts or all_pts[npt] > d: all_pts[npt]=d sys.stderr.write(".") sys.stderr.write("\n\n") # now we have points and their distances to the curve. color them # apropriately: no change right on the curve, darken the r and g as # we move away, then when we get too far fade back to no change for pt, d in all_pts.iteritems(): # d is the distance squared from pt to the curve # r,g,b are the colors of the output pixel # alpha is how much to darken r and g by (range 0-1) r,g,b=img.getpixel(pt) alpha = d/20.0 # get darker proportional to the distance to the # line squared, reaching 100% at sqrt(20) pixels # away if alpha > 1: # if we're all the way dark, go back towards the light alpha = 1-(alpha/2) if alpha < 0: # if we're all the way light, make no change alpha = 0 alpha = (alpha)/6 # instead of darkening all the way, darken only 1/6 assert 0<=alpha<=1 r,g,b=int(r-255*(alpha)), int(g-255*(alpha)), b img.putpixel(pt, (r,g,b)) sys.stderr.write("\n") def draw_background(coords, powers, draw, img, options): """ modify img to show sc ownership, province names, and the wormhole """ ownership = {} for country, race, units, scs in powers: for sc in scs: ownership[sc] = colors[race] if use_flood_fill: sys.stderr.write("\nFlood Filling") for name, (n, a, f, fs) in coords.items(): if name in ownership: color = ownership[name] flood_fill(img, n, color) sys.stderr.write(".") sys.stderr.write("\n") if "Wormhole" in options: a, b = options["Wormhole"] start = coords[a.upper()][0] stop = coords[b.upper()][0] draw_wormhole(start, stop, img) if use_names: for name, (n, a, f, fs) in coords.items(): color = (0,0,0) if name in ownership and not flood_fill: color = ownership[name] draw.text(n, name, fill=color) def alpha_paste(img_base, img_add, xyoffset): """ img.paste ignores the alpha channel, so we do it by hand """ from_x_max, from_y_max = img_add.size def blend(a_color, b_color, alpha): return (a_color*alpha + b_color*(255-alpha))/255 for x in range(from_x_max): for y in range(from_y_max): ar,ag,ab,aa = img_add.getpixel((x,y)) br,bg,bb = img_base.getpixel(add((x,y), xyoffset)) if aa < 5: # if it's almost all the way transparent, make it all the # way aa = 0 r,g,b,a = blend(ar,br,aa), blend(ag,bg,aa), blend(ab,bb,aa), 255 img_base.putpixel(add((x,y), xyoffset), (r,g,b,a)) def within(img, x, y): img_x, img_y = img.size return 0 <= x < img_x and 0 <= y <= img_y def flood_fill(image, loc, value): """ Flood fill on a region (not in old PIL) modified from http://article.gmane.org/gmane.comp.python.image/1753 """ x,y = loc if not within(image,x, y): return orig_color = image.getpixel((x, y)) if orig_color == value: return edge = [(x, y)] image.putpixel((x, y), value) while edge: newedge = [] for (x, y) in edge: for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)): if within(image, s, t) and image.getpixel((s, t)) == orig_color: image.putpixel((s, t), value) newedge.append((s, t)) edge = newedge def real_size(ico): """ compute the size of the part of the image having alpha > 5 """ x_max, y_max = ico.size rx_min, rx_max = x_max/2, x_max/2 ry_min, ry_max = y_max/2, y_max/2 for x in range(x_max): for y in range(y_max): r,g,b,a=ico.getpixel((x,y)) if a >= 5: if x < rx_min: rx_min = x if x > rx_max: rx_max = x if y < ry_min: ry_min = y if y > ry_max: ry_max = y return rx_max-rx_min, ry_max-ry_min def draw_standoffs(datafilesdir, coords, places, draw, im): for place in places: n, a, f, fs = coords[place.upper()] loc = choose_loc(None, None, a, f, fs) add_icon(im, os.path.join(datafilesdir,ICONS,"Standoff.png"), loc) def add_icon(im, iconfname, loc, offset=False): """ add the icon in iconfname to im at loc if offset, adjust position by 1/3 of the real width and height """ ico = Image.open(iconfname).convert() x,y = loc x_max, y_max = ico.size loc = x-x_max/2, y-y_max/2 if offset: real_w, real_h = real_size(ico) loc = loc[0]+real_w/3, loc[1]+real_h/3 alpha_paste(im, ico, loc) def start(datafilesdir, status_fname, img_out): coords = parse_coords(os.path.join(datafilesdir,COORDS)) options, powers = parse_status(status_fname, coords) im = Image.open(os.path.join(datafilesdir,IMAGE)).convert() draw = ImageDraw.Draw(im) draw_background(coords, powers, draw, im, options) draw_powers(datafilesdir, powers, coords, draw, im) if "PlacesCannotRetreatTo" in options: draw_standoffs(datafilesdir, coords, options["PlacesCannotRetreatTo"], draw, im) im.save(img_out) if __name__ == "__main__": start(*sys.argv[1:])
gpl-2.0
7,914,423,068,613,350,000
25.849758
135
0.581649
false
3.057395
false
false
false
nicolasfauchereau/paleopy
paleopy/core/WR.py
1
7469
import os import sys import numpy as np import pandas as pd import h5py import json from ..markov import get_probs from ..utils import seasons_params from ..plotting import heatmap from ..plotting import bar from ..utils import make_sig class WR: """ base class for weather regimes calculations and plots takes either a proxy or ensemble instance and either 'SW Pacific Regimes' or 'Kidson Types' """ def __init__(self, obj, classification='Kidson Types'): super(WR, self).__init__() # the parent can be either an instance of a `proxy` or `ensemble` class self.parent = obj self.djsons = obj.djsons self.classification = classification # get the season, and the analog years self.season = self.parent.season self.analog_years = self.parent.analog_years self.detrend = self.parent.detrend self.climatology = self.parent.climatology if self.parent.description == 'proxy': self.sitename = self.parent.sitename def _get_WR_json(self): with open(os.path.join(self.djsons, 'WRs.json'), 'r') as f: dict_json = json.loads(f.read()) return dict_json def _get_WR_ts(self): if not(hasattr(self, 'dict_json')): self.dict_json = self._get_WR_json() csv = self.dict_json[self.classification]['WR_TS'] wr_ts = pd.read_csv(csv, parse_dates=True, index_col=0) return wr_ts def _get_WR_MC(self): if not(hasattr(self, 'dict_json')): self.dict_json = self._get_WR_json() f = h5py.File(self.dict_json[self.classification]['Markov Chains'], mode='r') MC_probs = f[self.season]['probs'].value MC_probs_classes = f[self.season]['probs'].attrs['classes'] f.close() MC_probs_classes = MC_probs_classes.split(',') MC_probs = pd.DataFrame(MC_probs, index=MC_probs_classes) # reindex so that the index of the MC simulations follows the # order of the types defined in the JSON file MC_probs = MC_probs.reindex(self.dict_json[self.classification]['types']) # The MC_probs contains the frequencies # of each type in the 1000 simulations return MC_probs def _get_season_ts(self): if not(hasattr(self,'wr_ts')): wr_ts = self._get_WR_ts() ts = wr_ts.copy() ts.loc[:,'month'] = ts.index.month sparams = seasons_params() m = list(range(1,13)) + list(range(1,13)) m = m[(sparams[self.season][1]-sparams[self.season][0]+12):(sparams[self.season][1]+12)] # selects the season ts_seas = ts[ts['month'].isin(m)] ts_seas = ts_seas.drop('month', axis=1) return ts_seas def _get_clim_probs(self): if not(hasattr(self, 'ts_seas')): ts_seas = self._get_season_ts() ts = ts_seas.ix[str(self.climatology[0]): str(self.climatology[1])].copy() types = self.dict_json[self.classification]['types'] clim_probs = get_probs(ts['type'], types) # create a pandas.Series, index are the types clim_probs = pd.Series(clim_probs, index=types) return clim_probs def _get_compos_probs(self, analog_years): """ Arguments --------- analog_years : list list of analog years Return ------ obs_probs : pandas.Series observed probabilities """ if not(hasattr(self, 'ts_seas')): ts_seas = self._get_season_ts() ayears = list(map(str, analog_years)) ts = ts_seas.copy() ts = pd.concat([ts.ix[l] for l in ayears]) types = self.dict_json[self.classification]['types'] obs_probs = get_probs(ts['type'], types) obs_probs = pd.Series(obs_probs, index=types) return obs_probs def probs_anomalies(self, kind='one', test=True): """ Arguments --------- kind : string if kind == 'one': either for a `proxy` or for all the years in an `ensemble` as a whole if kind == 'many': for each proxy record in an `ensemble` object Return ------ anoms_probs : pandas.Series probabilities anomalies """ # get the climatological probabilities, those are always the same clim_probs = self._get_clim_probs() if kind == 'one': obs_probs = self._get_compos_probs(self.analog_years) anoms = obs_probs - clim_probs if self.parent.description == 'proxy': self.df_anoms = pd.DataFrame(anoms, columns=[self.sitename]) self.df_probs = pd.DataFrame(obs_probs, columns=[self.sitename]) else: self.df_anoms = pd.DataFrame(anoms, columns=['ensemble']) self.df_probs = pd.DataFrame(obs_probs, columns=['ensemble']) # if test, the percentiles values are added as columns to # the df_probs pandas.DataFrame if test: MC_probs = self._get_WR_MC() for tval in [0.1, 0.9, 0.05, 0.95, 0.01, 0.99]: c = str(int(tval * 100)) self.df_probs.loc[:,c] = MC_probs.T.quantile(tval) if kind == 'many': """ we can only calculate `many` anomalies if the object passed to the WR instance is an `ensemble` object, raise an exception if that is not the case """ if self.parent.description != 'ensemble': print(""" ERROR! cannot calculate `many` anomalies with a proxy object: you need to pass an `ensemble` object to the `WR` class """) raise Exception("KIND ERROR") else: d_anoms = {} d_probs = {} d = self.parent.dict_proxies for k in d.keys(): obs_probs = self._get_compos_probs(analog_years = d[k]['analog_years']) d_anoms[k] = obs_probs - clim_probs d_probs[k] = obs_probs # df_probs contains the ANOMALIES in frequencies (probabilities) # for the composite years for each proxy in the ensemble # index = types # columns = proxies self.df_anoms = pd.DataFrame(d_anoms) # df_probs contains the OBSERVED frequencies (probabilities) # for the composite years for each proxy in the ensemble # index = types # columns = proxies self.df_probs = pd.DataFrame(d_probs) # if test, we add another DataFrame to the object, containing the # percentile values coming from the MC simulation if test: MC_probs = self._get_WR_MC() df_probs_MC = pd.DataFrame() for tval in [0.1, 0.9, 0.05, 0.95, 0.01, 0.99]: c = str(int(tval * 100)) df_probs_MC.loc[:,c] = MC_probs.T.quantile(tval) self.df_probs_MC = df_probs_MC def plot_heatmap(self): f = heatmap(self) return f def plot_bar(self, sig=10): f = bar(self, sig) return f
mit
420,959,520,509,369,000
35.612745
96
0.543312
false
3.822416
false
false
false
nickattree/Minijets
mj_morpho.py
1
3063
# -*- coding: utf-8 -*- """ Created on Sat Feb 20 10:51:13 2016 @author: noa """ # Theoretical time evolution of minijet gradient def theory(M): return np.arctan((1.0 - np.cos(M))/(2.0 * np.sin(M) - 1.5 * M)) * 180.0 / np.pi # Currently uses graphical comparison method - inefficient and slow # Also accuracy dependdet on atol parameter chosen but data is not v accurate anyway # Alternatives are: solving symbolically with sympy solve but this only does linear # fsolve or Brent method from scipy for single non-linear eqn but this requiers limits a, b with # f(a,b) of different signs. Func def would have to have extra offset term = -gradient # and solve repeately between 0<M<1.27, 1.27<M<2pi, 2pi<M<4pi, etc. # import scipy.optimize # root = scipy.optimize.brentq(theory, 1.27, 2.0 * np.pi) # root = scipy.optimize.fsolve(theory,6) #from sympy import * #q=solvers.nsolve(((1.0 - cos(M))/(2.0 * sin(M) - 1.5 * M)) + 1.9 , 10) # Setup graphical comparison method x = np.arange(0, 251, 0.01) # Roughly every 0.5deg for 40 cycles y1 = theory(x) data['rads'] = np.zeros(shape=(len(data))) data['dela'] = np.zeros(shape=(len(data))) data['res'] = np.zeros(shape=(len(data))) # Loop to start here for i in range(len(data)): if np.isnan(data.angle[i]): data.rads[i] = np.nan data.res[i] = np.nan data.dela[i] = np.nan else: y2 = np.full(len(x), data.angle[i]) idx = np.argwhere(np.isclose(y1,y2, atol=1.0)).reshape(-1) deltaa = data.r[i]/(1.0 - np.cos(x[idx])) epi = 2.0 * np.sin(x[idx]) * deltaa kep = l[i] - epi mkep = -kep / (1.5 * deltaa) residual = abs(mkep - x[idx]) # Select best fit solution sol = residual.argmin() sol = sol[~np.isnan(sol)] data.rads[i] = float(x[idx[sol]]) data.dela[i] = float(deltaa[sol]) data.res[i] = float(residual[sol]) #plt.plot(x, y1) #plt.plot([0, 20], [data.angle[3], data.angle[3]]) #plt.plot(x[idx], y1[idx], 'ro') #plt.xlim(0, 20) data['res'] /= (2.0 * np.pi) data['cycles'] = data.rads/(2.0 * np.pi) data['days'] = data.cycles * 0.6196 # Orbital period F ring # Histogram of the radial sizes of minijets plt.figure(figsize=(8, 6), dpi=80) # Cut out unrealistically large solutions for now temp = data.dela[abs(data.dela)<200] temp.plot(kind = 'hist', bins =80) plt.xlabel('$\Delta$a (km)') plt.ylabel('Features') plt.xlim(-200, 200) plt.savefig('plot_dela.pdf') # Histogram of the angles minijets plt.figure(figsize=(8, 6), dpi=90) data.angle[(data.Class=='Classic') | (data.Class=='Classic - bright head')].plot(kind = 'hist', bins =80) plt.xlabel('Gradient ($^{\circ}$)') plt.ylabel('Features') plt.xlim(-90, 90) plt.savefig('plot_grad.pdf') """ # Alternative using numpy plt.figure(figsize=(8, 6), dpi=80) bins = np.arange(-200, 201, 5) hist = np.histogram(r[data.Class == 'Classic'], bins=bins)[0] bins = 0.5*(bins[1:] + bins[:-1]) plt.bar(bins, hist, width = 5, fill = False) plt.xlim(-200, 200) plt.xlabel('r (km)') plt.ylabel('Features') plt.savefig('plot_r.pdf') """
mit
3,180,090,542,250,781,700
33.426966
105
0.634019
false
2.661164
false
false
false
kikocorreoso/brython
www/src/Lib/interpreter.py
1
14285
import sys import tb as traceback from browser import console, document, window, html, DOMNode from browser.widgets.dialog import Dialog _credits = """ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands for supporting Python development. See www.python.org for more information.""" _copyright = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com All Rights Reserved. Copyright (c) 2001-2013 Python Software Foundation. All Rights Reserved. Copyright (c) 2000 BeOpen.com. All Rights Reserved. Copyright (c) 1995-2001 Corporation for National Research Initiatives. All Rights Reserved. Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. All Rights Reserved.""" _license = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ class Info: def __init__(self, msg): self.msg = msg def __repr__(self): return self.msg # execution namespace editor_ns = { 'credits': Info(_credits), 'copyright': Info(_copyright), 'license': Info(_license) } # default style for console textarea style_sheet = """ .brython-interpreter { background-color: #000; color: #fff; font-family: consolas, courier; } """ class Trace: def __init__(self): self.buf = "" def write(self, data): self.buf += str(data) def format(self): """Remove calls to function in this script from the traceback.""" lines = self.buf.split("\n") stripped = [lines[0]] for i in range(1, len(lines), 2): if __file__ in lines[i]: continue stripped += lines[i: i+2] return "\n".join(stripped) class Interpreter: """Add a Python interactive interpreter in a textarea.""" def __init__(self, elt_id=None, title="Interactive Interpreter", globals=None, locals=None, rows=30, cols=84, default_css=True): """ Create the interpreter. - "elt_id" is the id of a textarea in the document. If not set, a new popup window is added with a textarea. - "globals" and "locals" are the namespaces the RPEL runs in """ if default_css: # Insert default CSS stylesheet if not already loaded for stylesheet in document.styleSheets: if stylesheet.ownerNode.id == "brython-interpreter": break else: document <= html.STYLE(style_sheet, id="brython-interpreter") if elt_id is None: self.dialog = Dialog(title=title, top=10, left=10, default_css=default_css) self.zone = html.TEXTAREA(rows=rows, cols=cols, Class="brython-interpreter") self.dialog.panel <= self.zone else: if isinstance(elt_id, str): try: elt = document[elt_id] if elt.tagName != "TEXTAREA": raise ValueError( f"element {elt_id} is a {elt.tagName}, " + "not a TEXTAREA") self.zone = elt except KeyError: raise KeyError(f"no element with id '{elt_id}'") elif isinstance(elt_id, DOMNode): if elt_id.tagName == "TEXTAREA": self.zone = elt_id else: raise ValueError("element is not a TEXTAREA") else: raise ValueError("element should be a string or " + f"a TEXTAREA, got '{elt_id.__class__.__name__}'") v = sys.implementation.version self.zone.value = (f"Brython {v[0]}.{v[1]}.{v[2]} on " + f"{window.navigator.appName} {window.navigator.appVersion}\n>>> ") self.cursor_to_end() self._status = "main" self.current = 0 self.history = [] self.globals = {} if globals is None else globals self.globals.update(editor_ns) self.locals = self.globals if locals is None else locals self.buffer = '' sys.stdout.write = sys.stderr.write = self.write sys.stdout.__len__ = sys.stderr.__len__ = lambda: len(self.buffer) self.zone.bind('keypress', self.keypress) self.zone.bind('keydown', self.keydown) self.zone.bind('mouseup', self.mouseup) self.zone.focus() def cursor_to_end(self, *args): pos = len(self.zone.value) self.zone.setSelectionRange(pos, pos) self.zone.scrollTop = self.zone.scrollHeight def get_col(self): # returns the column num of cursor sel = self.zone.selectionStart lines = self.zone.value.split('\n') for line in lines[:-1]: sel -= len(line) + 1 return sel def keypress(self, event): if event.keyCode == 9: # tab key event.preventDefault() self.zone.value += " " elif event.keyCode == 13: # return sel_start = self.zone.selectionStart sel_end = self.zone.selectionEnd if sel_end > sel_start: # If text was selected by the mouse, copy to clipboard document.execCommand("copy") self.cursor_to_end() event.preventDefault() # don't insert line feed return src = self.zone.value if self._status == "main": currentLine = src[src.rfind('>>>') + 4:] elif self._status == "3string": currentLine = src[src.rfind('>>>') + 4:] currentLine = currentLine.replace('\n... ', '\n') else: currentLine = src[src.rfind('...') + 4:] if self._status == 'main' and not currentLine.strip(): self.zone.value += '\n>>> ' event.preventDefault() return self.zone.value += '\n' self.history.append(currentLine) self.current = len(self.history) if self._status in ["main", "3string"]: try: _ = editor_ns['_'] = eval(currentLine, self.globals, self.locals) self.flush() if _ is not None: self.write(repr(_) + '\n') self.flush() self.zone.value += '>>> ' self._status = "main" except IndentationError: self.zone.value += '... ' self._status = "block" except SyntaxError as msg: if str(msg) == 'invalid syntax : triple string end not found' or \ str(msg).startswith('Unbalanced bracket'): self.zone.value += '... ' self._status = "3string" elif str(msg) == 'eval() argument must be an expression': try: exec(currentLine, self.globals, self.locals) except: self.print_tb() self.flush() self.zone.value += '>>> ' self._status = "main" elif str(msg) == 'decorator expects function': self.zone.value += '... ' self._status = "block" else: self.syntax_error(msg.args) self.zone.value += '>>> ' self._status = "main" except: # the full traceback includes the call to eval(); to # remove it, it is stored in a buffer and the 2nd and 3rd # lines are removed self.print_tb() self.zone.value += '>>> ' self._status = "main" elif currentLine == "": # end of block block = src[src.rfind('\n>>>') + 5:].splitlines() block = [block[0]] + [b[4:] for b in block[1:]] block_src = '\n'.join(block) # status must be set before executing code in globals() self._status = "main" try: _ = exec(block_src, self.globals, self.locals) if _ is not None: print(repr(_)) except: self.print_tb() self.flush() self.zone.value += '>>> ' else: self.zone.value += '... ' self.cursor_to_end() event.preventDefault() def keydown(self, event): if event.keyCode == 37: # left arrow sel = self.get_col() if sel < 5: event.preventDefault() event.stopPropagation() elif event.keyCode == 36: # line start pos = self.zone.selectionStart col = self.get_col() self.zone.setSelectionRange(pos - col + 4, pos - col + 4) event.preventDefault() elif event.keyCode == 38: # up if self.current > 0: pos = self.zone.selectionStart col = self.get_col() # remove current line self.zone.value = self.zone.value[:pos - col + 4] self.current -= 1 self.zone.value += self.history[self.current] event.preventDefault() elif event.keyCode == 40: # down if self.current < len(self.history) - 1: pos = self.zone.selectionStart col = self.get_col() # remove current line self.zone.value = self.zone.value[:pos - col + 4] self.current += 1 self.zone.value += self.history[self.current] event.preventDefault() elif event.keyCode == 8: # backspace src = self.zone.value lstart = src.rfind('\n') if (lstart == -1 and len(src) < 5) or (len(src) - lstart < 6): event.preventDefault() event.stopPropagation() elif event.ctrlKey and event.keyCode == 65: # ctrl+a src = self.zone.value pos = self.zone.selectionStart col = get_col() self.zone.setSelectionRange(pos - col + 4, len(src)) event.preventDefault() elif event.keyCode in [33, 34]: # page up, page down event.preventDefault() def mouseup(self, ev): """If nothing was selected by the mouse, set cursor to prompt.""" sel_start = self.zone.selectionStart sel_end = self.zone.selectionEnd if sel_end == sel_start: self.cursor_to_end() def write(self, data): self.buffer += str(data) def flush(self): self.zone.value += self.buffer self.buffer = '' def print_tb(self): trace = Trace() traceback.print_exc(file=trace) self.zone.value += trace.format() def syntax_error(self, args): info, filename, lineno, offset, line = args print(f" File {filename}, line {lineno}") print(" " + line) print(" " + offset * " " + "^") print("SyntaxError:", info) self.flush() class Inspector(Interpreter): def __init__(self, title="Frames inspector", rows=30, cols=84, default_css=True): frame = sys._getframe().f_back super().__init__(None, title, globals=frame.f_globals.copy(), locals=frame.f_locals.copy(), rows=rows, cols=cols, default_css=default_css) frames_sel = html.SELECT() self.frames = [] while frame: self.frames.append([frame.f_globals.copy(), frame.f_locals.copy()]) name = frame.f_code.co_name name = name.replace("<", "&lt;").replace(">", "&gt;") frames_sel <= html.OPTION(name) frame = frame.f_back frames_sel.bind("change", self.change_frame) frame_div = html.DIV("Frame " + frames_sel) panel_style = window.getComputedStyle(self.dialog.panel) frame_div.style.paddingLeft = panel_style.paddingLeft frame_div.style.paddingTop = panel_style.paddingTop self.dialog.insertBefore(frame_div, self.dialog.panel) def change_frame(self, ev): self.globals, self.locals = self.frames[ev.target.selectedIndex]
bsd-3-clause
-2,024,490,416,110,576,000
37.608108
91
0.533567
false
4.293658
false
false
false
raqqun/tweetcommander
packages/requests_oauthlib/core.py
1
2328
# -*- coding: utf-8 -*- from __future__ import unicode_literals from ..oauthlib.common import extract_params from ..oauthlib.oauth1 import (Client, SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER) CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_MULTI_PART = 'multipart/form-data' import sys if sys.version > "3": unicode = str # OBS!: Correct signing of requests are conditional on invoking OAuth1 # as the last step of preparing a request, or at least having the # content-type set properly. class OAuth1(object): """Signs the request using OAuth 1 (RFC5849)""" def __init__(self, client_key, client_secret=None, resource_owner_key=None, resource_owner_secret=None, callback_uri=None, signature_method=SIGNATURE_HMAC, signature_type=SIGNATURE_TYPE_AUTH_HEADER, rsa_key=None, verifier=None, decoding='utf-8'): try: signature_type = signature_type.upper() except AttributeError: pass self.client = Client(client_key, client_secret, resource_owner_key, resource_owner_secret, callback_uri, signature_method, signature_type, rsa_key, verifier, decoding=decoding) def __call__(self, r): """Add OAuth parameters to the request. Parameters may be included from the body if the content-type is urlencoded, if no content type is set a guess is made. """ # Overwriting url is safe here as request will not modify it past # this point. content_type = r.headers.get('Content-Type'.encode('utf-8'), '') if not isinstance(content_type, unicode): content_type = content_type.decode('utf-8') is_form_encoded = (CONTENT_TYPE_FORM_URLENCODED in content_type) if is_form_encoded or extract_params(r.body): r.headers['Content-Type'] = CONTENT_TYPE_FORM_URLENCODED r.url, r.headers, r.body = self.client.sign( unicode(r.url), unicode(r.method), r.body or '', r.headers) else: # Omit body data in the signing of non form-encoded requests r.url, r.headers, _ = self.client.sign( unicode(r.url), unicode(r.method), None, r.headers) return r
gpl-3.0
6,963,956,816,960,503,000
37.163934
82
0.629296
false
3.854305
false
false
false
GBPeters/upair
bot/gatwick.py
1
3495
""" Module for harvesting data from the Gatwick Aviation Society (GAS) aircraft database DO NOT USE """ # Imports import requests from bs4 import BeautifulSoup from db.pghandler import Connection # Constants GAS_URL = "http://www.gatwickaviationsociety.org.uk/modeslookup.asp" GAS_FIELDS = {"Registration": "registration", "DICAOType": "icaotype", "DType": "type", "DSerial": "serial", "DOperator": "operator", "DICAOOperator": "icaooperator", "DSubOperator": "suboperator"} def downloadGASPage(icao24): """ Search the GAS db for a specific transponder code :param icao24: The ICAO24 Mode S transponder code :return: The response object """ data = {"MSC": icao24, "Submit2": "Find"} for key in GAS_FIELDS: data[key] = "" headers = {"Host": "www.gatwickaviationsociety.org.uk", "Accept": "text/static,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US;q=0.7,en;q=0.3", "Accept-Encoding": "gzip, deflate", "Referer": "http://www.gatwickaviationsociety.org.uk/modeslookup.asp"} r = requests.post(GAS_URL, headers=headers, data=data) return r def getMissingICAO24Codes(): """ Create a list of codes not yet included in the aircraft database :return: A list if ICAO24 Mode S transponder codes """ sql = "SELECT icao24 FROM aircraft WHERE registration IS NULL" with Connection() as con: codes = [code[0] for code in con.selectAll(sql)] return codes def extractData(rawtext): """ extract values from raw HTML :param rawtext: The text to extract from :return: a dictionary with the GAS keys and values found in the HTML """ soup = BeautifulSoup(rawtext, "lxml") values = {} for key in GAS_FIELDS: value = soup.find("input", id=key) values[key] = value["value"] return values def storeData(icao24, data): """ Store aircraft data into the database :param icao24: The ICAO24 Mode S transponder code :param data: Dictionary with corresponding data :return: """ values = "" for key in GAS_FIELDS: name = GAS_FIELDS[key] value = data[key] if value == '' and key != "Registration": value = "NULL" else: value = "'%s'" % value values += "%s=%s," % (name, value) values = values[:-1] sql = "UPDATE aircraft SET %s WHERE icao24='%s'" % (values, icao24) with Connection(autocommit=True) as con: con.execute(sql) def harvestGAS(): """ GAS Harvest Base function, for use in bot.app.main :return: A dictionary with keys success (boolean) and message (string) """ codes = getMissingICAO24Codes() if len(codes) > 0: code = codes[0] r = downloadGASPage(code) data = extractData(r.text) storeData(code, data) if data["Registration"] == "Not Found": message = "No aircraft found for ICAO24 code %s" % code else: message = "Aircraft %s found for ICAO24 code %s." % (data["Registration"], code) result = {"success": True, "message": message} return result else: result = {"success": True, "message": "All aircraft already stored in database."} return result if __name__ == "__main__": harvestGAS()
gpl-3.0
6,033,836,778,019,814,000
29.391304
93
0.596567
false
3.548223
false
false
false
pyfa-org/Pyfa
gui/builtinMarketBrowser/pfSearchBox.py
1
9376
# noinspection PyPackageRequirements import wx import gui.utils.color as colorUtils import gui.utils.draw as drawUtils from gui.utils.helpers_wxPython import HandleCtrlBackspace SearchButton, EVT_SEARCH_BTN = wx.lib.newevent.NewEvent() CancelButton, EVT_CANCEL_BTN = wx.lib.newevent.NewEvent() TextEnter, EVT_TEXT_ENTER = wx.lib.newevent.NewEvent() TextTyped, EVT_TEXT = wx.lib.newevent.NewEvent() class PFSearchBox(wx.Window): def __init__(self, parent, id=wx.ID_ANY, value="", pos=wx.DefaultPosition, size=wx.Size(-1, 24), style=0): wx.Window.__init__(self, parent, id, pos, size, style=style) self.isSearchButtonVisible = False self.isCancelButtonVisible = False self.descriptiveText = "Search" self.searchBitmap = None self.cancelBitmap = None self.bkBitmap = None self.resized = True self.searchButtonX = 0 self.searchButtonY = 0 self.searchButtonPressed = False self.cancelButtonX = 0 self.cancelButtonY = 0 self.cancelButtonPressed = False self.editX = 0 self.editY = 0 self.padding = 4 self._hl = False w, h = size self.EditBox = wx.TextCtrl(self, wx.ID_ANY, "", wx.DefaultPosition, (-1, h - 2 if 'wxGTK' in wx.PlatformInfo else -1), wx.TE_PROCESS_ENTER | (wx.BORDER_NONE if 'wxGTK' in wx.PlatformInfo else 0)) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBk) self.Bind(wx.EVT_SIZE, self.OnSize) self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown) self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp) # self.EditBox.ChangeValue(self.descriptiveText) self.EditBox.Bind(wx.EVT_SET_FOCUS, self.OnEditSetFocus) self.EditBox.Bind(wx.EVT_KILL_FOCUS, self.OnEditKillFocus) self.EditBox.Bind(wx.EVT_KEY_DOWN, self.OnKeyPress) self.EditBox.Bind(wx.EVT_TEXT, self.OnText) self.EditBox.Bind(wx.EVT_TEXT_ENTER, self.OnTextEnter) self.SetBackgroundStyle(wx.BG_STYLE_PAINT) self.SetMinSize(size) def OnText(self, event): wx.PostEvent(self, TextTyped()) event.Skip() def OnTextEnter(self, event): wx.PostEvent(self, TextEnter()) event.Skip() @staticmethod def OnEditSetFocus(event): # value = self.EditBox.GetValue() # if value == self.descriptiveText: # self.EditBox.ChangeValue("") event.Skip() def OnEditKillFocus(self, event): if self.EditBox.GetValue() == "": self.Clear() event.Skip() def OnKeyPress(self, event): if event.RawControlDown() and event.GetKeyCode() == wx.WXK_BACK: HandleCtrlBackspace(self.EditBox) else: event.Skip() def Clear(self): self.EditBox.Clear() # self.EditBox.ChangeValue(self.descriptiveText) def Focus(self): self.EditBox.SetFocus() def SetValue(self, value): self.EditBox.SetValue(value) def ChangeValue(self, value): self.EditBox.ChangeValue(value) def GetValue(self): return self.EditBox.GetValue() def GetLineText(self, lineno): return self.EditBox.GetLineText(lineno) def HitTest(self, target, position, area): x, y = target px, py = position aX, aY = area if (x < px < x + aX) and (y < py < y + aY): return True return False def GetButtonsPos(self): btnpos = [ (self.searchButtonX, self.searchButtonY), (self.cancelButtonX, self.cancelButtonY) ] return btnpos def GetButtonsSize(self): btnsize = [] if self.searchBitmap: sw = self.searchBitmap.GetWidth() sh = self.searchBitmap.GetHeight() else: sw = 0 sh = 0 if self.cancelBitmap: cw = self.cancelBitmap.GetWidth() ch = self.cancelBitmap.GetHeight() else: cw = 0 ch = 0 btnsize.append((sw, sh)) btnsize.append((cw, ch)) return btnsize def OnLeftDown(self, event): btnpos = self.GetButtonsPos() btnsize = self.GetButtonsSize() self.CaptureMouse() for btn in range(2): if self.HitTest(btnpos[btn], event.GetPosition(), btnsize[btn]): if btn == 0: if not self.searchButtonPressed: self.searchButtonPressed = True self.Refresh() if btn == 1: if not self.cancelButtonPressed: self.cancelButtonPressed = True self.Refresh() def OnLeftUp(self, event): btnpos = self.GetButtonsPos() btnsize = self.GetButtonsSize() if self.HasCapture(): self.ReleaseMouse() for btn in range(2): if self.HitTest(btnpos[btn], event.GetPosition(), btnsize[btn]): if btn == 0: if self.searchButtonPressed: self.searchButtonPressed = False self.Refresh() self.SetFocus() wx.PostEvent(self, SearchButton()) if btn == 1: if self.cancelButtonPressed: self.cancelButtonPressed = False self.Refresh() self.SetFocus() wx.PostEvent(self, CancelButton()) else: if btn == 0: if self.searchButtonPressed: self.searchButtonPressed = False self.Refresh() if btn == 1: if self.cancelButtonPressed: self.cancelButtonPressed = False self.Refresh() def OnSize(self, event): self.resized = True self.Refresh() def OnEraseBk(self, event): pass def UpdateElementsPos(self, dc): rect = self.GetRect() if self.searchBitmap and self.isSearchButtonVisible: sw = self.searchBitmap.GetWidth() sh = self.searchBitmap.GetHeight() else: sw = 0 sh = 0 if self.cancelBitmap and self.isCancelButtonVisible: cw = self.cancelBitmap.GetWidth() ch = self.cancelBitmap.GetHeight() else: cw = 0 ch = 0 cwidth = rect.width cheight = rect.height self.searchButtonX = self.padding self.searchButtonY = (cheight - sh) / 2 self.cancelButtonX = cwidth - self.padding - cw self.cancelButtonY = (cheight - ch) / 2 self.editX = self.searchButtonX + self.padding + sw editWidth, editHeight = self.EditBox.GetSize() self.editY = (cheight - editHeight) / 2 self.EditBox.SetPosition((self.editX, self.editY)) self.EditBox.SetSize((self.cancelButtonX - self.padding - self.editX, -1)) def OnPaint(self, event): dc = wx.AutoBufferedPaintDC(self) bkColor = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW) sepColor = colorUtils.GetSuitable(bkColor, 0.2) rect = self.GetRect() if self.resized: self.bkBitmap = drawUtils.RenderGradientBar(bkColor, rect.width, rect.height, 0.1, 0.1, 0.2, 2) self.UpdateElementsPos(dc) self.resized = False dc.DrawBitmap(self.bkBitmap, 0, 0) if self.isSearchButtonVisible: if self.searchBitmap: if self.searchButtonPressed: spad = 1 else: spad = 0 dc.DrawBitmap(self.searchBitmapShadow, self.searchButtonX + 1, self.searchButtonY + 1) dc.DrawBitmap(self.searchBitmap, self.searchButtonX + spad, self.searchButtonY + spad) if self.isCancelButtonVisible: if self.cancelBitmap: if self.cancelButtonPressed: cpad = 1 else: cpad = 0 dc.DrawBitmap(self.cancelBitmapShadow, self.cancelButtonX + 1, self.cancelButtonY + 1) dc.DrawBitmap(self.cancelBitmap, self.cancelButtonX + cpad, self.cancelButtonY + cpad) dc.SetPen(wx.Pen(sepColor, 1)) dc.DrawLine(0, rect.height - 1, rect.width, rect.height - 1) def SetSearchBitmap(self, bitmap): self.searchBitmap = bitmap self.searchBitmapShadow = drawUtils.CreateDropShadowBitmap(bitmap, 0.2) def SetCancelBitmap(self, bitmap): self.cancelBitmap = bitmap self.cancelBitmapShadow = drawUtils.CreateDropShadowBitmap(bitmap, 0.2) def IsSearchButtonVisible(self): return self.isSearchButtonVisible def IsCancelButtonVisible(self): return self.isCancelButtonVisible def ShowSearchButton(self, show=True): self.isSearchButtonVisible = show def ShowCancelButton(self, show=True): self.isCancelButtonVisible = show def SetDescriptiveText(self, text): self.descriptiveText = text def GetDescriptiveText(self): return self.descriptiveText
gpl-3.0
4,457,695,793,184,732,700
30.783051
111
0.575405
false
3.893688
false
false
false
b4ux1t3/piday2017
runtests.py
1
2166
""" This is just a way to see which method alluded to in the README will produce the best result. Think of it as a test, I guess. """ import randompi import statistics import time print("Starting timer") start_time = time.time() # Basically, I'm going to make a bunch of lists holding a thousand (1000) of values of pi as # calculated via various parameters. # Baseline, method works as shown in the original video. # Iterations = 500 # Max random value = 120 baseline = [randompi.run_calculation(501, max_random_number=121) for i in range(1000)] baseline_average = statistics.mean(baseline) print(" Baseline average estimation for pi:\t{}".format(baseline_average)) print(" Execution time:\t{}".format(time.time() - start_time)) elapsed_time = time.time() #Baseline, but done a million times # Iterations = 1000000 # Max random value = 120 million_baseline = [randompi.run_calculation(1000001, max_random_number=121) for i in range(1000)] million_baseline_average = statistics.mean(million_baseline) print(" Million baseline average estimation for pi:\t{}".format(million_baseline_average)) print(" Execution time:\t{}".format(time.time() - elapsed_time)) elapsed_time = time.time() # Iterations = 500 # Default max value (sys.maxsize) baseline_max_value = [randompi.run_calculation(501) for i in range(1000)] baseline_max_value_average = statistics.mean(baseline_max_value) print(" 500 tries at max value average estimation for pi:\t{}".format(baseline_max_value_average)) print(" Execution time:\t{}".format(time.time() - elapsed_time)) elapsed_time = time.time() # Iterations = 1000000 # Default max value (sys.maxsize) million_max_value = [randompi.run_calculation(1000001) for i in range(1000)] million_max_value_average = statistics.mean(million_max_value) print("A million tries at max value average estimation for pi:\t{}".format(million_max_value_average)) print(" Execution time:\t{}".format(time.time() - elapsed_time)) elapsed_time = time.time()
mit
-6,844,707,654,209,961,000
36.344828
103
0.677747
false
3.622074
false
false
false
lgarren/spack
var/spack/repos/builtin/packages/r-multtest/package.py
1
1752
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RMulttest(RPackage): """Resampling-based multiple hypothesis testing""" homepage = "https://www.bioconductor.org/packages/multtest/" url = "https://git.bioconductor.org/packages/multtest" version('2.32.0', git='https://git.bioconductor.org/packages/multtest', commit='c5e890dfbffcc3a3f107303a24b6085614312f4a') depends_on('r@3.4.0:3.4.9', when='@2.32.0') depends_on('r-biocgenerics', type=('build', 'run')) depends_on('r-biobase', type=('build', 'run'))
lgpl-2.1
2,030,379,986,413,075,500
45.105263
126
0.674087
false
3.808696
false
false
false
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_local_network_gateways_operations.py
1
24340
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class LocalNetworkGatewaysOperations: """LocalNetworkGatewaysOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_11_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _create_or_update_initial( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.LocalNetworkGateway", **kwargs ) -> "_models.LocalNetworkGateway": cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-11-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'LocalNetworkGateway') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.LocalNetworkGateway", **kwargs ) -> AsyncLROPoller["_models.LocalNetworkGateway"]: """Creates or updates a local network gateway in the specified resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param local_network_gateway_name: The name of the local network gateway. :type local_network_gateway_name: str :param parameters: Parameters supplied to the create or update local network gateway operation. :type parameters: ~azure.mgmt.network.v2020_11_01.models.LocalNetworkGateway :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LocalNetworkGateway or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.LocalNetworkGateway] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, local_network_gateway_name=local_network_gateway_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def get( self, resource_group_name: str, local_network_gateway_name: str, **kwargs ) -> "_models.LocalNetworkGateway": """Gets the specified local network gateway in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param local_network_gateway_name: The name of the local network gateway. :type local_network_gateway_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: LocalNetworkGateway, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_11_01.models.LocalNetworkGateway :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-11-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, local_network_gateway_name: str, **kwargs ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-11-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def begin_delete( self, resource_group_name: str, local_network_gateway_name: str, **kwargs ) -> AsyncLROPoller[None]: """Deletes the specified local network gateway. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param local_network_gateway_name: The name of the local network gateway. :type local_network_gateway_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, local_network_gateway_name=local_network_gateway_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore async def update_tags( self, resource_group_name: str, local_network_gateway_name: str, parameters: "_models.TagsObject", **kwargs ) -> "_models.LocalNetworkGateway": """Updates a local network gateway tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param local_network_gateway_name: The name of the local network gateway. :type local_network_gateway_name: str :param parameters: Parameters supplied to update local network gateway tags. :type parameters: ~azure.mgmt.network.v2020_11_01.models.TagsObject :keyword callable cls: A custom type or function that will be passed the direct response :return: LocalNetworkGateway, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_11_01.models.LocalNetworkGateway :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-11-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update_tags.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('LocalNetworkGateway', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore def list( self, resource_group_name: str, **kwargs ) -> AsyncIterable["_models.LocalNetworkGatewayListResult"]: """Gets all the local network gateways in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.LocalNetworkGatewayListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-11-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
mit
1,005,753,911,172,090,200
50.350211
209
0.656163
false
4.347981
true
false
false
ViGLug/django-phpbb-extender
api/views.py
1
2642
#from django.contrib.auth.models import User, Group from rest_framework import filters, generics from .serializers import ( UserSerializer, ForumSerializer, TopicSerializer, PostSerializer, ) class UsersReadView(generics.ListAPIView): serializer_class = UserSerializer model = serializer_class.Meta.model queryset = model.objects.exclude(user_type=2) # exclude bot paginate_by = 100 class UserReadView(generics.RetrieveAPIView): serializer_class = UserSerializer model = serializer_class.Meta.model lookup_field = 'user_id' class UserPostsReadView(generics.ListAPIView): serializer_class = PostSerializer model = serializer_class.Meta.model paginate_by = 100 def get_queryset(self): poster_id = self.kwargs['poster_id'] queryset = self.model.objects.filter(poster_id=poster_id) return queryset.order_by('-post_time') class ForumsReadView(generics.ListAPIView): serializer_class = ForumSerializer model = serializer_class.Meta.model filter_backends = (filters.OrderingFilter,) ordering = ('left_id',) class ForumReadView(generics.RetrieveAPIView): serializer_class = ForumSerializer model = serializer_class.Meta.model lookup_field = 'forum_id' class ForumTopicsReadView(generics.ListAPIView): serializer_class = TopicSerializer model = serializer_class.Meta.model paginate_by = 100 def get_queryset(self): forum_id = self.kwargs['forum_id'] queryset = self.model.objects.filter(forum_id=forum_id) return queryset.order_by('-topic_last_post_time') class TopicsReadView(generics.ListAPIView): serializer_class = TopicSerializer model = serializer_class.Meta.model queryset = model.objects.order_by('-topic_time') paginate_by = 100 class TopicReadView(generics.RetrieveAPIView): serializer_class = TopicSerializer model = serializer_class.Meta.model lookup_field = 'topic_id' class TopicPostsReadView(generics.ListAPIView): serializer_class = PostSerializer model = serializer_class.Meta.model paginate_by = 100 def get_queryset(self): topic_id = self.kwargs['topic_id'] queryset = self.model.objects.filter(topic_id=topic_id) return queryset.order_by('-post_time') class PostsReadView(generics.ListAPIView): serializer_class = PostSerializer model = serializer_class.Meta.model queryset = model.objects.order_by('-post_time') paginate_by = 100 class PostReadView(generics.RetrieveAPIView): serializer_class = PostSerializer model = serializer_class.Meta.model lookup_field = 'post_id'
agpl-3.0
-8,175,664,467,557,067,000
32.443038
65
0.723694
false
3.758179
false
false
false
dropzonemathmo/SocialMediaLinksRecommend
TwitterScraper/TwitterURLs.py
1
4012
""" TwitterURLs.py is a class that can be used to obtain the URLs of user tweets """ #import all secret keys for twitter access from Secret import * #import TwitterAPI from TwitterAPI import TwitterAPI #Global Printing Variable VERBOSE = False class TwitterURLs(): """ Twitter URLs enables access to the URLs posted by the authorised user and the followers of that user """ def __init__(self,responseCount=40): #set response count self.responseCount = responseCount #configure TwitterAPI AUTH self.api = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET) #set class variables to store responses self.tweets = {} self.urls = {} self.followers = [] def setFollowers(self): """ setFollowers sets the class variable to a list of user IDs that are following the authorised user """ self.followers = [] response = self.api.request('followers/list',{'skip_status':'true','include_user_entities':'false','count':self.responseCount}) for item in response: for user in item['users']: self.followers.append(user['screen_name']) def setTweets(self,screenName='owner'): """ setTweets adds a dictionary key value pair to the class variable tweets where the key is a screenName (or owner if authorised user) and the value are tweets """ if (screenName == 'owner'): response = self.api.request('statuses/user_timeline',{'count':self.responseCount}) else: response = self.api.request('statuses/user_timeline',{'screen_name':screenName,'count':self.responseCount}) self.tweets[screenName] = [] for item in response: self.tweets[screenName].append(item) def setURLs(self,screenName='owner'): """ setURLS adds a key value pair to the urls class variable where the key is a screenName and the value is a list of recent URLs they have tweeted """ if (screenName not in self.tweets.keys()): self.setTweets(screenName) self.urls[screenName] = [] for tweet in self.tweets[screenName]: try: urls = tweet['entities']['urls'] except KeyError: print "Key Error for user {}".format(screenName) urls = [] for url in urls: self.urls[screenName].append(url['expanded_url']) def getFollowers(self): "printFollowers prints all followers in the class variable followers" if (len(self.followers) == 0): self.setFollowers() if VERBOSE: for follower in self.followers: print follower return self.followers def getTweets(self,screenName='owner'): "printTweets prints all the tweet text for the given screenName" if (screenName not in self.tweets.keys()): self.setTweets(screenName) tweets = [] for tweet in self.tweets[screenName]: if VERBOSE: print tweet['text'] tweets.append(tweet['text']) return tweets def getURLs(self,screenName='owner'): "printURLs prints all the URLs shared by the given screenName" if (screenName not in self.urls.keys()): self.setURLs(screenName) if VERBOSE: for url in self.urls[screenName]: print url return self.urls[screenName] def getAllURLs(self): "getAllURLs gets all the the URLs shared by a users followers" if (len(self.followers) == 0): self.setFollowers() #set the urls for owner self.setURLs() #get the urls for all owners for follower in self.followers: self.setURLs(follower) #return the urls dictionary object return self.urls if (__name__ == "__main__"): VERBOSE = True twitterURLs = TwitterURLs() #Get list of twitter followers twitterURLs.getFollowers() #Get tweets and URLs for AUTH user twitterURLs.getTweets() twitterURLs.getURLs() #Get tweets and URLs for user with screenName 2815238795 twitterURLs.getTweets('JamesDolman') twitterURLs.getURLs('JamesDolman')
gpl-2.0
8,145,693,527,491,492,000
25.746667
131
0.667498
false
3.853987
false
false
false
xapple/plumbing
plumbing/graphs/__init__.py
1
11246
# Built-in modules # import os, time, inspect, getpass from collections import OrderedDict # Internal modules # from plumbing.common import split_thousands, camel_to_snake from plumbing.cache import property_cached # First party modules # from autopaths import Path from autopaths.file_path import FilePath ################################################################################ class Graph(object): """ A nice class to make graphs with matplotlib. Example usage: from plumbing.graphs import Graph class RegressionGraph(Graph): formats = ('pdf', 'svg') def plot(self, **kwargs): fig = pyplot.figure() seaborn.regplot(self.x_data, self.y_data, fit_reg=True); self.save_plot(fig, **kwargs) for x_name in x_names: graph = PearsonGraph(short_name = x_name) graph.title = "Regression between y and '%s'" % (x_name) graph.x_data = x_data[x_name] graph.y_data = y_data graph.plot() """ default_params = OrderedDict(( ('width' , None), ('height' , None), ('bottom' , None), ('top' , None), ('left' , None), ('right' , None), ('x_grid' , None), # Vertical lines ('y_grid' , None), # Horizontal lines ('x_scale', None), ('y_scale', None), ('x_label', None), ('y_label', None), ('x_labels_rot', None), ('x_labels_size', None), ('y_labels_size', None), ('title' , None), ('y_lim_min', None), # Minimum (ymax - ymin) after autoscale ('x_lim_min', None), # Minimum (xmax - xmin) after autoscale ('sep' , ()), ('formats', ('pdf',)), ('close' , True), ('dpi' , None), ('bbox' , None), ('remove_frame', None), )) def __repr__(self): return '<%s graph "%s">' % (self.__class__.__name__, self.short_name) def __bool__(self): return bool(self.path) __nonzero__ = __bool__ def __init__(self, parent=None, base_dir=None, short_name=None): # Save parent # self.parent = parent # If we got a file as base_dir # if isinstance(base_dir, FilePath): self.base_dir = base_dir.directory short_name = base_dir.short_prefix # If no parent and no directory given get the calling script # if base_dir is None and parent is None: file_name = os.path.abspath((inspect.stack()[1])[1]) self.base_dir = os.path.dirname(os.path.abspath(file_name)) + '/' self.base_dir = Path(self.base_dir) # If no directory given but a parent is present we can guess # if base_dir is None: if hasattr(self.parent, 'p'): self.base_dir = self.parent.p.graphs_dir elif hasattr(self.parent, 'paths'): self.base_dir = self.parent.paths.graphs_dir elif hasattr(self.parent, 'autopaths'): self.base_dir = self.parent.autopaths.graphs_dir elif hasattr(self.parent, 'graphs_dir'): self.base_dir = self.parent.graphs_dir else: raise Exception("Please specify a `base_dir` for this graph.") else: self.base_dir = Path(base_dir) # Make sure the directory exists # self.base_dir.create_if_not_exists() # Short name # if short_name: self.short_name = short_name # Use the parents name or the base class name # if not hasattr(self, 'short_name'): if hasattr(self.parent, 'short_name'): self.short_name = self.parent.short_name else: self.short_name = camel_to_snake(self.__class__.__name__) @property_cached def path(self): return Path(self.base_dir + self.short_name + '.pdf') def __call__(self, *args, **kwargs): """ Plot the graph if it doesn't exist. Then return the path to it. Force the re-runing with rerun=True. """ if not self or kwargs.get('rerun'): self.plot(*args, **kwargs) return self.path def save_plot(self, fig=None, axes=None, **kwargs): # Import # from matplotlib import pyplot # Missing figure # if fig is None: fig = pyplot.gcf() # Missing axes # if axes is None: axes = pyplot.gca() # Parameters # self.params = {} for key in self.default_params: if key in kwargs: self.params[key] = kwargs[key] elif hasattr(self, key): self.params[key] = getattr(self, key) elif self.default_params[key] is not None: self.params[key] = self.default_params[key] # Backwards compatibility # if kwargs.get('x_log', False): self.params['x_scale'] = 'symlog' if kwargs.get('y_log', False): self.params['y_scale'] = 'symlog' # Log # if 'x_scale' in self.params: axes.set_xscale(self.params['x_scale']) if 'y_scale' in self.params: axes.set_yscale(self.params['y_scale']) # Axis limits # if 'x_min' in self.params: axes.set_xlim(self.params['x_min'], axes.get_xlim()[1]) if 'x_max' in self.params: axes.set_xlim(axes.get_xlim()[0], self.params['x_max']) if 'y_min' in self.params: axes.set_ylim(self.params['y_min'], axes.get_ylim()[1]) if 'y_max' in self.params: axes.set_ylim(axes.get_ylim()[0], self.params['y_max']) # Minimum delta on axis limits # if 'y_lim_min' in self.params: top, bottom = axes.get_ylim() minimum = self.params['y_lim_min'] delta = top - bottom if delta < minimum: center = bottom + delta/2 axes.set_ylim(center - minimum/2, center + minimum/2) # Title # title = self.params.get('title', False) if title: axes.set_title(title) # Axes labels # if self.params.get('x_label'): axes.set_xlabel(self.params['x_label']) if self.params.get('y_label'): axes.set_ylabel(self.params['y_label']) # Set height and width # if self.params.get('width'): fig.set_figwidth(self.params['width']) if self.params.get('height'): fig.set_figheight(self.params['height']) # Adjust # if self.params.get('bottom'): fig.subplots_adjust(hspace=0.0, bottom = self.params['bottom'], top = self.params['top'], left = self.params['left'], right = self.params['right']) # Grid # if 'x_grid' in self.params: if self.params['x_grid']: axes.xaxis.grid(True, linestyle=':') else: axes.xaxis.grid(False) if 'y_grid' in self.params: if self.params['y_grid']: axes.yaxis.grid(True, linestyle=':') else: axes.yaxis.grid(False) # Frame # if 'remove_frame' in self.params: axes.spines["top"].set_visible(False) axes.spines["right"].set_visible(False) # Data and source extra text # if hasattr(self, 'dev_mode') and self.dev_mode is True: fig.text(0.99, 0.98, time.asctime(), horizontalalignment='right') job_name = os.environ.get('SLURM_JOB_NAME', 'Unnamed') user_msg = 'user: %s, job: %s' % (getpass.getuser(), job_name) fig.text(0.01, 0.98, user_msg, horizontalalignment='left') # Nice digit grouping # import matplotlib if 'x' in self.params['sep']: separate = lambda x,pos: split_thousands(x) axes.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(separate)) if 'y' in self.params['sep']: separate = lambda y,pos: split_thousands(y) axes.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(separate)) # Add custom labels # if 'x_labels' in self.params: axes.set_xticklabels(self.params['x_labels']) if 'x_labels_rot' in self.params: pyplot.setp(axes.xaxis.get_majorticklabels(), rotation=self.params['x_labels_rot']) # Adjust font size # if 'x_labels_size' in self.params: pyplot.setp(axes.xaxis.get_majorticklabels(), fontsize=self.params['x_labels_size']) if 'y_labels_size' in self.params: pyplot.setp(axes.yaxis.get_majorticklabels(), fontsize=self.params['y_labels_size']) # Possibility to overwrite path # if 'path' in self.params: path = FilePath(self.params['path']) elif hasattr(self, 'path'): path = FilePath(self.path) else: path = FilePath(self.short_name + '.pdf') # The arguments to save # save_args = {} if 'dpi' in self.params: save_args['dpi'] = self.params['dpi'] if 'bbox' in self.params: save_args['bbox_inches'] = self.params['bbox'] # Save it as different formats # for ext in self.params['formats']: fig.savefig(path.replace_extension(ext), **save_args) # Close it # if self.params['close']: pyplot.close(fig) def plot_and_save(self, **kwargs): """ Used when the plot method defined does not create a figure nor calls save_plot Then the plot method has to use self.fig. """ from matplotlib import pyplot self.fig = pyplot.figure() self.plot() self.axes = pyplot.gca() self.save_plot(self.fig, self.axes, **kwargs) pyplot.close(self.fig) def plot(self, bins=250, **kwargs): """An example plot function. You have to subclass this method.""" # Import # import numpy # Data # counts = [sum(map(len, b.contigs)) for b in self.parent.bins] # Linear bins in logarithmic space # if 'log' in kwargs.get('x_scale', ''): start, stop = numpy.log10(1), numpy.log10(max(counts)) bins = list(numpy.logspace(start=start, stop=stop, num=bins)) bins.insert(0, 0) # Plot # from matplotlib import pyplot fig = pyplot.figure() pyplot.hist(counts, bins=bins, color='gray') axes = pyplot.gca() # Information # title = 'Distribution of the total nucleotide count in the bins' axes.set_title(title) axes.set_xlabel('Number of nucleotides in a bin') axes.set_ylabel('Number of bins with that many nucleotides in them') # Save it # self.save_plot(fig, axes, **kwargs) pyplot.close(fig) # For convenience # return self def save_anim(self, fig, animate, init, bitrate=10000, fps=30): """Not functional -- TODO.""" from matplotlib import animation anim = animation.FuncAnimation(fig, animate, init_func=init, frames=360, interval=20) FFMpegWriter = animation.writers['ffmpeg'] writer = FFMpegWriter(bitrate= bitrate, fps=fps) # Save # self.avi_path = self.base_dir + self.short_name + '.avi' anim.save(self.avi_path, writer=writer, codec='x264')
mit
-97,055,972,161,673,570
42.758755
105
0.557265
false
3.673963
false
false
false
bitxbay/BitXBay
pywa.py
1
8297
#!/usr/bin/env python #-*- coding: utf-8 -*- pywversion="2.1.7" never_update=False # # jackjack's pywallet.py # https://github.com/jackjack-jj/pywallet # forked from Joric's pywallet.py # beta_version = ('a' in pywversion.split('-')[0]) or ('b' in pywversion.split('-')[0]) missing_dep = [] from bsddb.db import * import os, sys, time, re pyw_filename = os.path.basename(__file__) pyw_path = os.path.dirname(os.path.realpath(__file__)) try: import json except: try: import simplejson as json except: print("Json or simplejson package is needed") import logging import struct import StringIO import traceback import socket import types import string import exceptions import hashlib import random import urllib import math from datetime import datetime from subprocess import * import os import os.path import platform max_version = 81000 addrtype = 0 json_db = {} private_keys = [] private_hex_keys = [] passphrase = "" global_merging_message = ["",""] balance_site = 'http://jackjack.alwaysdata.net/balance/index.php?address' aversions = {}; for i in range(256): aversions[i] = "version %d" % i; aversions[0] = 'Bitcoin'; aversions[48] = 'Litecoin'; aversions[52] = 'Namecoin'; aversions[111] = 'Testnet'; wallet_dir = "" wallet_name = "" ko = 1e3 kio = 1024 Mo = 1e6 Mio = 1024 ** 2 Go = 1e9 Gio = 1024 ** 3 To = 1e12 Tio = 1024 ** 4 prekeys = ["308201130201010420".decode('hex'), "308201120201010420".decode('hex')] postkeys = ["a081a530".decode('hex'), "81a530".decode('hex')] def delete_from_wallet(db_env, walletfile, typedel, kd): db = open_wallet(db_env, walletfile, True) kds = BCDataStream() vds = BCDataStream() deleted_items = 0 if not isinstance(kd, list): kd=[kd] if typedel=='tx' and kd!=['all']: for keydel in kd: db.delete('\x02\x74\x78'+keydel.decode('hex')[::-1]) deleted_items+=1 else: for i,keydel in enumerate(kd): for (key, value) in db.items(): kds.clear(); kds.write(key) vds.clear(); vds.write(value) type = kds.read_string() if typedel == "tx" and type == "tx": db.delete(key) deleted_items+=1 elif typedel == "key": if type == "key" or type == "ckey": if keydel == public_key_to_bc_address(kds.read_bytes(kds.read_compact_size())): db.delete(key) deleted_items+=1 elif type == "pool": vds.read_int32() vds.read_int64() if keydel == public_key_to_bc_address(vds.read_bytes(vds.read_compact_size())): db.delete(key) deleted_items+=1 elif type == "name": if keydel == kds.read_string(): db.delete(key) deleted_items+=1 db.close() return deleted_items def open_wallet(db_env, walletfile, writable=False): db = DB(db_env) if writable: DB_TYPEOPEN = DB_CREATE else: DB_TYPEOPEN = DB_RDONLY flags = DB_THREAD | DB_TYPEOPEN try: r = db.open(walletfile, "main", DB_BTREE, flags) except DBError: r = True if r is not None: logging.error("Couldn't open wallet.dat/main. Try quitting Bitcoin and running this again.") sys.exit(1) return db class BCDataStream(object): def __init__(self): self.input = None self.read_cursor = 0 def clear(self): self.input = None self.read_cursor = 0 def write(self, bytes): # Initialize with string of bytes if self.input is None: self.input = bytes else: self.input += bytes def read_bytes(self, length): try: result = self.input[self.read_cursor:self.read_cursor+length] self.read_cursor += length return result except IndexError: raise SerializationError("attempt to read past end of buffer") return '' def read_string(self): # Strings are encoded depending on length: # 0 to 252 : 1-byte-length followed by bytes (if any) # 253 to 65,535 : byte'253' 2-byte-length followed by bytes # 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes # ... and the Bitcoin client is coded to understand: # greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string # ... but I don't think it actually handles any strings that big. if self.input is None: raise SerializationError("call write(bytes) before trying to deserialize") try: length = self.read_compact_size() except IndexError: raise SerializationError("attempt to read past end of buffer") return self.read_bytes(length) def write_string(self, string): # Length-encoded as with read-string self.write_compact_size(len(string)) self.write(string) def read_boolean(self): return self.read_bytes(1)[0] != chr(0) def read_int16(self): return self._read_num('<h') def read_uint16(self): return self._read_num('<H') def read_int32(self): return self._read_num('<i') def read_uint32(self): return self._read_num('<I') def read_int64(self): return self._read_num('<q') def read_uint64(self): return self._read_num('<Q') def write_boolean(self, val): return self.write(chr(bool_to_int(val))) def write_int16(self, val): return self._write_num('<h', val) def write_uint16(self, val): return self._write_num('<H', val) def write_int32(self, val): return self._write_num('<i', val) def write_uint32(self, val): return self._write_num('<I', val) def write_int64(self, val): return self._write_num('<q', val) def write_uint64(self, val): return self._write_num('<Q', val) def read_compact_size(self): size = ord(self.input[self.read_cursor]) self.read_cursor += 1 if size == 253: size = self._read_num('<H') elif size == 254: size = self._read_num('<I') elif size == 255: size = self._read_num('<Q') return size def write_compact_size(self, size): if size < 0: raise SerializationError("attempt to write size < 0") elif size < 253: self.write(chr(size)) elif size < 2**16: self.write('\xfd') self._write_num('<H', size) elif size < 2**32: self.write('\xfe') self._write_num('<I', size) elif size < 2**64: self.write('\xff') self._write_num('<Q', size) def _read_num(self, format): (i,) = struct.unpack_from(format, self.input, self.read_cursor) self.read_cursor += struct.calcsize(format) return i def _write_num(self, format, num): s = struct.pack(format, num) self.write(s) def hash_160(public_key): md = hashlib.new('ripemd160') md.update(hashlib.sha256(public_key).digest()) return md.digest() def public_key_to_bc_address(public_key, v=None): if v==None: v=addrtype h160 = hash_160(public_key) return hash_160_to_bc_address(h160, v) def hash_160_to_bc_address(h160, v=None): if v==None: v=addrtype vh160 = chr(v) + h160 h = Hash(vh160) addr = vh160 + h[0:4] return b58encode(addr) def bc_address_to_hash_160(addr): bytes = b58decode(addr, 25) return bytes[1:21] def Hash(data): return hashlib.sha256(hashlib.sha256(data).digest()).digest() __b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' __b58base = len(__b58chars) def b58encode(v): """ encode v, which is a string of bytes, to base58. """ long_value = 0L for (i, c) in enumerate(v[::-1]): long_value += (256**i) * ord(c) result = '' while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result long_value = div result = __b58chars[long_value] + result
gpl-3.0
9,194,209,195,237,866,000
29.284672
103
0.579246
false
3.417216
false
false
false