code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
from mission.framework.search import SpiralSearch from mission.framework.helpers import ConsistencyCheck from mission.missions.old.bins import BinsTask as Bins from mission.missions.old.recovery import OptimalRecovery as Recovery from mission.framework.task import Task from mission.missions.old.torpedoes import LocateBoard, Torpedoes from mission.missions.old.hydrophones import FindPinger from mission.framework.combinators import Sequential from mission.framework.primitive import Log from mission.framework.timing import Timer import shm NONE = 0 TORPEDOES = 1 RECOVERY = 2 class RandomPinger(Task): def desiredModules(self): if hasattr(self, "after_pinger") and hasattr(self.after_pinger, "selected_mission"): selected_mission = self.after_pinger.selected_mission if selected_mission is None or selected_mission == "torpedoes": return [shm.vision_modules.Torpedoes] elif selected_mission == "recovery": return [shm.vision_modules.Recovery] elif selected_mission == "bins": return [shm.vision_modules.Bins] else: return [shm.vision_modules.Recovery] def on_first_run(self): random_task = shm.mission_state.random_task.get() find_pinger = FindPinger() if random_task == TORPEDOES: self.logi("Making FindPinger with safe elevations because we are going from Bins to Recovery!") find_pinger = FindPinger(safe_elevations=True) elif random_task == RECOVERY: self.logi("Making FindPinger with look_for_recovery=False because we are going from Recovery to Bins!") find_pinger = FindPinger(look_for_recovery=False) self.after_pinger = AfterPinger(find_pinger) self.task = Sequential(find_pinger, self.after_pinger) self.has_made_progress = True def on_run(self): # Always report pinger tracking has made progress # self.has_made_progress = self.after_pinger.selected_task.has_made_progress if self.after_pinger.selected_task is not None else True if self.task.finished: self.finish() else: self.task() class AfterPinger(Task): task = None selected_task = None selected_mission = None def on_first_run(self, find_pinger=None): self.torpedoes = Torpedoes() self.recovery = Recovery() self.bins = Bins() self.random_task = shm.mission_state.random_task.get() # We need to identify where we are. if self.random_task == NONE: self.search = LocateBoard() self.selected_mission = None self.selected_task = None self.has_made_progress = True def at_recovery(self): if not self.recovery.finished: self.recovery() self.selected_mission = "recovery" self.selected_task = self.recovery else: self.finish() def at_bins_torpedoes(self): if not self.torpedoes.finished: self.selected_mission = "torpedoes" self.selected_task = self.torpedoes self.torpedoes() elif not self.bins.finished: self.selected_mission = "bins" self.selected_task = self.bins self.bins() else: self.finish() def on_run(self, find_pinger=None): # self.has_made_progress = self.selected_task.has_made_progress if self.selected_task is not None else False if self.random_task == NONE and self.search.finished: if self.search.found_board: shm.mission_state.random_task.set(TORPEDOES) self.at_bins_torpedoes() else: shm.mission_state.random_task.set(RECOVERY) self.at_recovery() elif self.random_task == TORPEDOES: shm.mission_state.random_task.set(RECOVERY) self.at_recovery() elif find_pinger is not None and find_pinger.found_recovery: shm.mission_state.random_task.set(RECOVERY) self.at_recovery() elif self.random_task == RECOVERY: shm.mission_state.random_task.set(TORPEDOES) self.at_bins_torpedoes() else: self.search() class AfterPingerBinsRecovery(Task): def recovery_validator(self): results = shm.recovery_vision.get() stacks_visible = False stack_visible_vars = [results.stack_1_visible, results.stack_2_visible, results.stack_3_visible, results.stack_4_visible] num_visible_stacks = 0 for stack_visible in stack_visible_vars: if stack_visible: num_visible_stacks += 1 if num_visible_stacks >= 2: stacks_visible = True return stacks_visible or results.green_mark_visible or \ results.red_mark_visible or results.table_visible def bins_validator(self): return shm.bin_cover.probability.get() > 0.0 or \ shm.bin_yellow_1.probability.get() > 0.0 or \ shm.bin_yellow_2.probability.get() > 0.0 def check_bins_visible(self): print("check bins") if self.bins_watcher.has_changed(): bins_val = self.bins_validator() print("bins validator returned", bins_val) self.bins_visible = self.bins_check(bins_val) def check_recovery_visible(self): if self.recovery_watcher.has_changed(): self.recovery_visible = self.recovery_check(self.recovery_validator()) def on_first_run(self): self.search = SpiralSearch(optimize_heading=True, min_spin_radius=2.0) self.recovery_check = ConsistencyCheck(5, 5) self.bins_check = ConsistencyCheck(5, 5) self.bins_watcher = shm.watchers.watcher() self.bins_watcher.watch(shm.bin_cover) self.bins_watcher.watch(shm.bin_yellow_1) self.bins_watcher.watch(shm.bin_yellow_2) self.bins_visible = False self.recovery_watcher = shm.watchers.watcher() self.recovery_watcher.watch(shm.recovery_vision) self.recovery_visible = False self.bins = Bins() self.recovery = Recovery() self.task = None def on_run(self): self.check_recovery_visible() self.check_bins_visible() print("bins visible is", self.bins_visible) if self.task is None: if self.bins_visible: self.logi("Found bins!") self.task = self.bins elif self.recovery_visible: self.logi("Found recovery!") self.task = self.recovery else: self.search() else: self.task() random_pinger = RandomPinger() after_pinger = AfterPinger()
cuauv/software
mission/missions/old/2017/random_pinger.py
Python
bsd-3-clause
6,914
import gevent import pytest from gevent import coros from boto.s3 import bucket from boto.s3 import key from fast_wait import fast_wait from wal_e import exception from wal_e.worker.s3 import s3_deleter assert fast_wait ISO8601 = '%Y-%m-%dT%H:%M:%SZ' class BucketDeleteKeysCollector(object): """A callable to stand-in for bucket.delete_keys Used to test that given keys are bulk-deleted. Also can inject an exception. """ def __init__(self): self.deleted_keys = [] self.aborted_keys = [] self.exc = None # Protect exc, since some paths test it and then use it, which # can run afoul race conditions. self._exc_protect = coros.RLock() def inject(self, exc): self._exc_protect.acquire() self.exc = exc self._exc_protect.release() def __call__(self, keys): self._exc_protect.acquire() try: if self.exc: self.aborted_keys.extend(keys) # Prevent starvation/livelock with a polling process # by yielding. gevent.sleep(0.1) raise self.exc finally: self._exc_protect.release() self.deleted_keys.extend(keys) @pytest.fixture def collect(monkeypatch): """Instead of performing bulk delete, collect key names deleted. This is to test invariants, as to ensure deleted keys are passed to boto properly. """ collect = BucketDeleteKeysCollector() monkeypatch.setattr(bucket.Bucket, 'delete_keys', collect) return collect @pytest.fixture def b(): return bucket.Bucket(name='test-bucket-name') @pytest.fixture(autouse=True) def never_use_single_delete(monkeypatch): """Detect any mistaken uses of single-key deletion. Older wal-e versions used one-at-a-time deletions. This is just to help ensure that use of this API (through the nominal boto symbol) is detected. """ def die(): assert False monkeypatch.setattr(key.Key, 'delete', die) monkeypatch.setattr(bucket.Bucket, 'delete_key', die) def make_key(*args, **kwargs): from datetime import datetime from datetime import timedelta k = key.Key(*args, **kwargs) last_modified = (datetime.now() - timedelta(days=60)).strftime(ISO8601) k.last_modified = last_modified return k def test_construction(): """The constructor basically works.""" s3_deleter.Deleter() def test_close_error(): """Ensure that attempts to use a closed Deleter results in an error.""" d = s3_deleter.Deleter() d.close() with pytest.raises(exception.UserCritical): d.delete('no value should work') def test_processes_one_deletion(b, collect): # Mock up a key and bucket key_name = 'test-key-name' k = make_key(bucket=b, name=key_name) d = s3_deleter.Deleter() d.delete(k) d.close() assert collect.deleted_keys == [key_name] def test_processes_many_deletions(b, collect): # Generate a target list of keys in a stable order target = sorted(['test-key-' + str(x) for x in range(20001)]) # Construct boto S3 Keys from the generated names and delete them # all. keys = [make_key(bucket=b, name=key_name) for key_name in target] d = s3_deleter.Deleter() for k in keys: d.delete(k) d.close() # Sort the deleted key names to obtain another stable order and # then ensure that everything was passed for deletion # successfully. assert sorted(collect.deleted_keys) == target def test_retry_on_normal_error(b, collect): """Ensure retries are processed for most errors.""" key_name = 'test-key-name' k = make_key(bucket=b, name=key_name) collect.inject(Exception('Normal error')) d = s3_deleter.Deleter() d.delete(k) # Since delete_keys will fail over and over again, aborted_keys # should grow quickly. while len(collect.aborted_keys) < 2: gevent.sleep(0.1) # Since delete_keys has been failing repeatedly, no keys should be # successfully deleted. assert not collect.deleted_keys # Turn off fault injection and flush/synchronize with close(). collect.inject(None) d.close() # The one enqueued job should have been processed.n assert collect.deleted_keys == [key_name] def test_no_retry_on_keyboadinterrupt(b, collect): """Ensure that KeyboardInterrupts are forwarded.""" key_name = 'test-key-name' k = make_key(bucket=b, name=key_name) # If vanilla KeyboardInterrupt is used, then sending SIGINT to the # test can cause it to pass improperly, so use a subtype instead. class MarkedKeyboardInterrupt(KeyboardInterrupt): pass collect.inject(MarkedKeyboardInterrupt('SIGINT, probably')) d = s3_deleter.Deleter() with pytest.raises(MarkedKeyboardInterrupt): d.delete(k) # Exactly when coroutines are scheduled is non-deterministic, # so spin while yielding to provoke the # MarkedKeyboardInterrupt being processed within the # pytest.raises context manager. while True: gevent.sleep(0.1) # Only one key should have been aborted, since the purpose is to # *not* retry when processing KeyboardInterrupt. assert collect.aborted_keys == [key_name] # Turn off fault injection and flush/synchronize with close(). collect.inject(None) d.close() # Since there is no retrying, no keys should be deleted. assert not collect.deleted_keys
heroku/wal-e
tests/test_s3_deleter.py
Python
bsd-3-clause
5,541
import theano.sandbox.cuda.basic_ops as sbcuda import numpy as np import load_data import realtime_augmentation as ra import time import sys import json from custom_for_keras import input_generator from datetime import datetime, timedelta import csv import os import cPickle as pickle import matplotlib.pyplot as plt from termcolor import colored from custom_keras_model_and_fit_capsels import kaggle_winsol starting_time = time.time() copy_to_ram_beforehand = False debug = True get_winsol_weights = False BATCH_SIZE = 256 # keep in mind NUM_INPUT_FEATURES = 3 TRAIN_LOSS_SF_PATH = "trainingNmbrs_keras_modular_includeFlip_and_37relu.txt" # TARGET_PATH = "predictions/final/try_convnet.csv" WEIGHTS_PATH = "analysis/final/try_convent_keras_modular_includeFlip_and_37relu.h5" input_sizes = [(69, 69), (69, 69)] PART_SIZE = 45 N_INPUT_VARIATION = 2 DO_VALID = True # disable this to not bother with the validation set evaluation DO_VALID_CORR = False # not implemented yet DO_VALID_SCATTER = True VALID_CORR_OUTPUT_FILTER = np.zeros((37)) VALID_CORR_OUTPUT_FILTER[2] = 1 # star or artifact VALID_CORR_OUTPUT_FILTER[3] = 1 # edge on yes VALID_CORR_OUTPUT_FILTER[4] = 1 # edge on no VALID_CORR_OUTPUT_FILTER[5] = 1 # bar feature yes VALID_CORR_OUTPUT_FILTER[7] = 1 # spiral arms yes VALID_CORR_OUTPUT_FILTER[14] = 1 # anything odd? no VALID_CORR_OUTPUT_FILTER[18] = 1 # ring VALID_CORR_OUTPUT_FILTER[19] = 1 # lence VALID_CORR_OUTPUT_FILTER[20] = 1 # disturbed VALID_CORR_OUTPUT_FILTER[21] = 1 # irregular VALID_CORR_OUTPUT_FILTER[22] = 1 # other VALID_CORR_OUTPUT_FILTER[23] = 1 # merger VALID_CORR_OUTPUT_FILTER[24] = 1 # dust lane N_Corr_Filter_Images = np.sum(VALID_CORR_OUTPUT_FILTER) DO_VALIDSTUFF_ON_TRAIN = True DO_TEST = False # disable this to not generate predictions on the testset DO_PRINT_FILTERS = False IMAGE_OUTPUT_PATH = "images_wColorbar_newYear2_realValid" output_names = ["smooth", "featureOrdisk", "NoGalaxy", "EdgeOnYes", "EdgeOnNo", "BarYes", "BarNo", "SpiralYes", "SpiralNo", "BulgeNo", "BulgeJust", "BulgeObvious", "BulgDominant", "OddYes", "OddNo", "RoundCompletly", "RoundBetween", "RoundCigar", "Ring", "Lense", "Disturbed", "Irregular", "Other", "Merger", "DustLane", "BulgeRound", "BlulgeBoxy", "BulgeNo2", "SpiralTight", "SpiralMedium", "SpiralLoose", "Spiral1Arm", "Spiral2Arm", "Spiral3Arm", "Spiral4Arm", "SpiralMoreArms", "SpiralCantTell"] target_filename = os.path.basename(WEIGHTS_PATH).replace(".h5", ".npy.gz") target_path_valid = os.path.join( "predictions/final/augmented/valid", target_filename) target_path_test = os.path.join( "predictions/final/augmented/test", target_filename) if copy_to_ram_beforehand: ra.myLoadFrom_RAM = True import copy_data_to_shm y_train = np.load("data/solutions_train.npy") ra.y_train = y_train # split training data into training + a small validation set ra.num_train = y_train.shape[0] # integer division, is defining validation size ra.num_valid = ra.num_train // 10 ra.num_train -= ra.num_valid # training num check for EV usage if ra.num_train != 55420: print "num_train = %s not %s" % (ra.num_train, 55420) ra.y_valid = ra.y_train[ra.num_train:] ra.y_train = ra.y_train[:ra.num_train] load_data.num_train = y_train.shape[0] load_data.train_ids = np.load("data/train_ids.npy") ra.load_data.num_train = load_data.num_train ra.load_data.train_ids = load_data.train_ids ra.valid_ids = load_data.train_ids[ra.num_train:] ra.train_ids = load_data.train_ids[:ra.num_train] train_ids = load_data.train_ids test_ids = load_data.test_ids num_train = ra.num_train num_test = len(test_ids) num_valid = ra.num_valid y_valid = ra.y_valid y_train = ra.y_train valid_ids = ra.valid_ids train_ids = ra.train_ids train_indices = np.arange(num_train) valid_indices = np.arange(num_train, num_train + num_valid) test_indices = np.arange(num_test) N_TRAIN = num_train N_VALID = num_valid print("validation sample contains %s images. \n" % (ra.num_valid)) print 'initiate winsol class' winsol = kaggle_winsol(BATCH_SIZE=BATCH_SIZE, NUM_INPUT_FEATURES=NUM_INPUT_FEATURES, PART_SIZE=PART_SIZE, input_sizes=input_sizes, LOSS_PATH=TRAIN_LOSS_SF_PATH, WEIGHTS_PATH=WEIGHTS_PATH) print "Build model" if debug: print("input size: %s x %s x %s x %s" % (input_sizes[0][0], input_sizes[0][1], NUM_INPUT_FEATURES, BATCH_SIZE)) winsol.init_models() # print 'output cuda_0' # print np.shape(winsol.get_layer_output(layer='cuda_0')) # print 'output maxout_2' # max_out = winsol.get_layer_output(layer='maxout_2') # print np.shape(max_out) print 'end of testing.py'
garbersc/keras-galaxies
testing.py
Python
bsd-3-clause
4,774
#!/usr/bin/env python from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup d = generate_distutils_setup( packages = ['vigir_behavior_sinusoide_joint_control_test'], package_dir = {'': 'src'} ) setup(**d)
team-vigir/vigir_behaviors
behaviors/vigir_behavior_sinusoide_joint_control_test/setup.py
Python
bsd-3-clause
256
#!/usr/bin/env python import sys, rospy from pimouse_ros.msg import LightSensorValues if __name__ == '__main__': devfile = '/dev/rtlightsensor0' rospy.init_node('lightsensors') pub = rospy.Publisher('lightsensors', LightSensorValues, queue_size=1) rate = rospy.Rate(10) while not rospy.is_shutdown(): try: with open(devfile,'r') as f: data = f.readline().split() data = [ int(e) for e in data ] d = LightSensorValues() d.right_forward = data[0] d.right_side = data[1] d.left_forward = data[2] d.left_side = data[3] d.sum_all = sum(data) d.sum_forward = data[0] + data[2] pub.publish(d) except IOError: rospy.logerr("cannot write to " + devfile) rate.sleep rospy.init_node('lightsensors')
ishigem/pimouse_ros
scripts/lightsensors2.py
Python
bsd-3-clause
885
''' Cryptotrade ----------- This module provides wrappers for the Bitcoin trading APIs available from MtGox and Tradehill. In the future more trading platforms may be added. ''' from setuptools import setup setup( name='Cryptotrade', version='0.1.3', url='https://github.com/maxcountryman/cryptotrade', license='BSD', author='Max Countryman', author_email='maxc@me.com', description='Cryptocurrency trading API wrapper; MtGox and Tradehill', long_description=__doc__, zip_safe=False, platforms='any', test_suite='test_cryptotrade' )
maxcountryman/cryptotrade
setup.py
Python
bsd-3-clause
582
from setuptools import setup, find_packages import sys, os version = '1.2.6' setup(name='django-cached-field', version=version, description="Celery-deferred, cached fields on Django ORM for expensive-to-calculate data", long_description="""Celery-deferred, cached fields on Django ORM for expensive-to-calculate data""", classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='django caching', author='Martin Chase', author_email='outofculture@gmail.com', url='https://github.com/aquameta/django-cached-field', license='BSD', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, zip_safe=False, install_requires=[ 'django>=1.3.1', 'celery>=3.0', 'django-celery>=3.0', ], entry_points=""" # -*- Entry points: -*- """, )
aquametalabs/django-cached-field
setup.py
Python
bsd-3-clause
939
import json from jsonobject.exceptions import BadValueError from corehq.apps.userreports.exceptions import BadSpecError from django.utils.translation import ugettext as _ from corehq.apps.userreports.reports.filters.specs import ReportFilter from corehq.apps.userreports.reports.specs import PieChartSpec, \ MultibarAggregateChartSpec, MultibarChartSpec, \ FieldColumn, PercentageColumn, ExpandedColumn, AggregateDateColumn, \ OrderBySpec, LocationColumn, ExpressionColumn class ReportFactory(object): @classmethod def from_spec(cls, spec, include_prefilters=False): from corehq.apps.userreports.reports.data_source import ConfigurableReportDataSource order_by = [(o['field'], o['order']) for o in spec.sort_expression] filters = spec.filters if include_prefilters else spec.filters_without_prefilters return ConfigurableReportDataSource( domain=spec.domain, config_or_config_id=spec.config_id, filters=[ReportFilter.wrap(f) for f in filters], aggregation_columns=spec.aggregation_columns, columns=spec.report_columns, order_by=order_by, ) class ReportColumnFactory(object): class_map = { 'aggregate_date': AggregateDateColumn, 'expanded': ExpandedColumn, 'field': FieldColumn, 'percent': PercentageColumn, 'location': LocationColumn, 'expression': ExpressionColumn, } @classmethod def from_spec(cls, spec): column_type = spec.get('type') or 'field' if column_type not in cls.class_map: raise BadSpecError( 'Unknown or missing column type: {} must be in [{}]'.format( column_type, ', '.join(cls.class_map.keys()) ) ) try: return cls.class_map[column_type].wrap(spec) except BadValueError as e: raise BadSpecError(_( 'Problem creating column from spec: {}, message is: {}' ).format( json.dumps(spec, indent=2), str(e), )) class ChartFactory(object): spec_map = { 'pie': PieChartSpec, 'multibar': MultibarChartSpec, 'multibar-aggregate': MultibarAggregateChartSpec, } @classmethod def from_spec(cls, spec): if spec.get('type') not in cls.spec_map: raise BadSpecError(_('Illegal chart type: {0}, must be one of the following choice: ({1})').format( spec.get('type', _('(missing from spec)')), ', '.join(cls.spec_map.keys()) )) try: return cls.spec_map[spec['type']].wrap(spec) except BadValueError as e: raise BadSpecError(_('Problem creating chart from spec: {}, message is: {}').format( json.dumps(spec, indent=2), str(e), )) class ReportOrderByFactory(object): @classmethod def from_spec(cls, spec): return OrderBySpec.wrap(spec)
qedsoftware/commcare-hq
corehq/apps/userreports/reports/factory.py
Python
bsd-3-clause
3,066
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 12);
antoinecarme/pyaf
tests/artificial/transf_Logit/trend_Lag1Trend/cycle_5/ar_12/test_artificial_1024_Logit_Lag1Trend_5_12_20.py
Python
bsd-3-clause
262
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Fabric deployment file. :copyright: (c) 2011 Julen Ruiz Aizpuru. :license: BSD, see LICENSE for more details. """ from fabric.api import cd, env from fabric.context_managers import hide, prefix, settings from fabric.contrib.console import confirm from fabric.contrib.files import exists, upload_template from fabric.operations import put, run, sudo from configs import fabric # # Load server settings from the configuration module # env.hosts = fabric.HOSTS env.user = fabric.USER env.project_name = fabric.PROJECT_NAME env.project_path = fabric.PROJECT_PATH env.project_repo = fabric.PROJECT_REPO env.project_url = fabric.PROJECT_URL env.project_settings = fabric.PROJECT_SETTINGS env.vhost_dir = fabric.VHOST_DIR env.vhost_file = fabric.VHOST_FILE env.wsgi_file = fabric.WSGI_FILE env.wsgi_user = fabric.WSGI_USER env.wsgi_group = fabric.WSGI_GROUP def bootstrap(): """Creates initial directories and virtual environment.""" if (exists('%(project_path)s' % env) and \ confirm('%(project_path)s already exists. Do you want to continue?' \ % env, default=False)) or not exists('%(project_path)s' % env): print('Bootstrapping initial directories...') with settings(hide('warnings', 'running', 'stdout', 'stderr')): sudo('mkdir -p %(project_path)s' % env) sudo('chown %(user)s:%(user)s %(project_path)s' % env) with cd(env.project_path): run('git clone %(project_repo)s .' % env) run('virtualenv --no-site-packages env') with settings(warn_only=True): run('mkdir -m a+w logs') run('mkdir -m g+w tzos/dbs') run('mkdir -m g+w tzos/dbs/dbxml') else: print('Aborting.') def deploy(): """Updates the code and installs the production site.""" print('Deploying the site...') update_code() install_site() def update_code(): """Updates the code used in the production environment.""" print('Getting the latest code and dependencies...') with settings(hide('warnings', 'running', 'stdout', 'stderr')): with cd(env.project_path): run('git pull') run('pip install -E env/ -r requirements.txt') def install_site(): """Updates the configuration and enables the site.""" print('Configuring and installing site...') update_config() enable_site() def update_config(): """Updates configuration files (Apache, WSGI, app).""" with settings(hide('warnings', 'running', 'stdout', 'stderr')): # Configure VirtualHost upload_template('configs/virtualhost.conf', '%(vhost_file)s' % env, context=env, use_jinja=True, use_sudo=True) # Configure WSGI application upload_template('configs/tzos.wsgi', '%(wsgi_file)s' % env, context=env, use_jinja=True) # Configure and install production settings upload_template('configs/production.py', '%(project_settings)s' % env, context=env, use_jinja=True) def enable_site(): """Enables the site.""" _switch_site(True) def disable_site(): """Disables the site.""" _switch_site(False) def _switch_site(enable): """Switches site's status to enabled or disabled.""" action = "Enabling" if enable else "Disabling" print('%s site...' % action) with settings(hide('warnings', 'running', 'stdout', 'stderr')): env.apache_command = 'a2ensite' if enable else 'a2dissite' sudo('%(apache_command)s %(project_name)s' % env) with settings(warn_only=True): sudo('/etc/init.d/apache2 reload') def touch(): """Runs 'touch' on the WSGI file to reload daemon processes.""" print('Running touch...') run('touch %(wsgi_file)s' % env) def copy_local_dbs(): """Copies local db data to play with the application.""" print('Copying local dbs to the server...') with settings(warn_only=True): put('tzos/dbs/*', env.project_path + '/tzos/dbs/', mode=0664) put('tzos/dbs/dbxml/*', env.project_path + '/tzos/dbs/dbxml/', mode=0664) def compile_translations(): """Compiles PO translations.""" print('Compiling translations...') with cd(env.project_path): with prefix('source env/bin/activate' % env): run('pybabel compile -d tzos/translations')
julen/tzos
fabfile.py
Python
bsd-3-clause
4,547
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Game.extra_data' db.add_column('competition_game', 'extra_data', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False) def backwards(self, orm): # Deleting field 'Game.extra_data' db.delete_column('competition_game', 'extra_data') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'competition.avatar': { 'Meta': {'object_name': 'Avatar'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'image_height': ('django.db.models.fields.IntegerField', [], {}), 'image_width': ('django.db.models.fields.IntegerField', [], {}), 'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'thumbnail_height': ('django.db.models.fields.IntegerField', [], {}), 'thumbnail_width': ('django.db.models.fields.IntegerField', [], {}) }, 'competition.competition': { 'Meta': {'ordering': "['-is_running', '-is_open', '-start_time']", 'object_name': 'Competition'}, 'avatar': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['competition.Avatar']", 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'cost': ('django.db.models.fields.FloatField', [], {}), 'description': ('django.db.models.fields.TextField', [], {}), 'end_time': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'max_num_team_members': ('django.db.models.fields.IntegerField', [], {}), 'min_num_team_members': ('django.db.models.fields.IntegerField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'payment_option': ('django.db.models.fields.CharField', [], {'default': "'T'", 'max_length': '1'}), 'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['competition.RegistrationQuestion']", 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'}), 'start_time': ('django.db.models.fields.DateTimeField', [], {}) }, 'competition.game': { 'Meta': {'object_name': 'Game'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Competition']"}), 'end_time': ('django.db.models.fields.DateTimeField', [], {}), 'extra_data': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'start_time': ('django.db.models.fields.DateTimeField', [], {}) }, 'competition.invitation': { 'Meta': {'ordering': "['-sent']", 'object_name': 'Invitation'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'receiver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'received_invitations'", 'to': "orm['auth.User']"}), 'response': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}), 'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_invitations'", 'to': "orm['auth.User']"}), 'sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Team']"}) }, 'competition.organizer': { 'Meta': {'object_name': 'Organizer'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Competition']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'role': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['competition.OrganizerRole']", 'symmetrical': 'False'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'competition.organizerrole': { 'Meta': {'object_name': 'OrganizerRole'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'competition.registration': { 'Meta': {'object_name': 'Registration'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Competition']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'signup_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'competition.registrationquestion': { 'Meta': {'object_name': 'RegistrationQuestion'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.TextField', [], {}), 'question_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, 'competition.registrationquestionchoice': { 'Meta': {'object_name': 'RegistrationQuestionChoice'}, 'choice': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_choice_set'", 'to': "orm['competition.RegistrationQuestion']"}) }, 'competition.registrationquestionresponse': { 'Meta': {'object_name': 'RegistrationQuestionResponse'}, 'agreed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'choices': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'response_set'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['competition.RegistrationQuestionChoice']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'response_set'", 'to': "orm['competition.RegistrationQuestion']"}), 'registration': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'response_set'", 'to': "orm['competition.Registration']"}), 'text_response': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'competition.score': { 'Meta': {'object_name': 'Score'}, 'game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Game']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'score': ('django.db.models.fields.PositiveIntegerField', [], {}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Team']"}) }, 'competition.team': { 'Meta': {'ordering': "['name']", 'unique_together': "(('competition', 'slug'),)", 'object_name': 'Team'}, 'avatar': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['competition.Avatar']", 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Competition']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'eligible_to_win': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}), 'time_paid': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['competition']
michaelwisely/django-competition
src/competition/migrations/0005_auto__add_field_game_extra_data.py
Python
bsd-3-clause
12,613
import os import numpy as np from scipy import io as sio import SimpleITK as sitk def loadimg(file, target_resolution): if file.endswith('.mat'): filecont = sio.loadmat(file) img = filecont['img'] for z in range(img.shape[-1]): # Flip the image upside down img[:, :, z] = np.flipud(img[:, :, z]) img = np.swapaxes(img, 0, 1) elif file.endswith('.tif'): img = loadtiff3d(file) elif file.endswith('.mhd'): from scipy.ndimage.interpolation import zoom mhd = sitk.ReadImage(file) img = sitk.GetArrayFromImage(mhd) # Resample the image to isotropic resolution print('Resample Image to isotropic resolution 1mmx1mmx1mm') sx, sy, sz = mhd.GetSpacing() img = zoom(img, (sz / target_resolution, sy / target_resolution, sx / target_resolution), order=0) img = np.transpose(img, (2, 1, 0)) elif file.endswith('.nii') or file.endswith('.nii.gz'): import nibabel as nib img = nib.load(file) img = img.get_data() else: raise IOError("The extension of " + file + 'is not supported. File extension supported are: *.tif, *.mat, *.nii') return img def loadtiff3d(filepath): """Load a tiff file into 3D numpy array""" from libtiff import TIFF tiff = TIFF.open(filepath, mode='r') stack = [] for sample in tiff.iter_images(): stack.append(np.rot90(np.fliplr(np.flipud(sample)))) out = np.dstack(stack) tiff.close() return out def writetiff3d(filepath, block): from libtiff import TIFF try: os.remove(filepath) except OSError: pass tiff = TIFF.open(filepath, mode='w') block = np.swapaxes(block, 0, 1) for z in range(block.shape[2]): tiff.write_image(np.flipud(block[:, :, z]), compression=None) tiff.close() def loadswc(filepath): ''' Load swc file as a N X 7 numpy array ''' swc = [] with open(filepath) as f: lines = f.read().split("\n") for l in lines: if not l.startswith('#'): cells = l.split(' ') if len(cells) == 7: cells = [float(c) for c in cells] # cells[2:5] = [c-1 for c in cells[2:5]] swc.append(cells) return np.array(swc) def saveswc(filepath, swc): if swc.shape[1] > 7: swc = swc[:, :7] with open(filepath, 'w') as f: for i in range(swc.shape[0]): print('%d %d %.3f %.3f %.3f %.3f %d' % tuple(swc[i, :].tolist()), file=f) def crop(img, thr): """Crop a 3D block with value > thr""" ind = np.argwhere(img > thr) x = ind[:, 0] y = ind[:, 1] z = ind[:, 2] xmin = max(x.min() - 10, 0) xmax = min(x.max() + 10, img.shape[0]) ymin = max(y.min() - 10, 1) ymax = min(y.max() + 10, img.shape[1]) zmin = max(z.min() - 10, 2) zmax = min(z.max() + 10, img.shape[2]) return img[xmin:xmax, ymin:ymax, zmin:zmax], np.array( [[xmin, xmax], [ymin, ymax], [zmin, zmax]]) def world2ras(voxpos): '''Get the vox2ras-tkr transform. Inspired by get_vox2ras_tkr in https://discourse.slicer.org/t/building-the-ijk-to-ras-transform-from-a-nrrd-file/1513 ''' x, y, z = voxpos lps_to_ras = np.diag([-1, -1, 1, 1]) p = lps_to_ras.dot(np.asarray([x, y, z, 1]).T) return np.squeeze(p)[:3] def swc2world(swc, mhd, spacing, slicer=False): # First transfer the image coordinates to the original image size sp = mhd.GetSpacing() swc[:, 2] *= spacing[0] / sp[0] swc[:, 3] *= spacing[1] / sp[1] swc[:, 4] *= spacing[2] / sp[2] # USe SimpleITK to transform back to physical coordinates for i in range(swc.shape[0]): swc[i, 2:5] = mhd.TransformContinuousIndexToPhysicalPoint(swc[i, 2:5]) swc[i, 5] *= spacing[0] # Transform to RAS spacing coordinates that can be rendered in 3D Slicer if requested if slicer: print('Converting the vtk coordinates to RAS space') for i in range(swc.shape[0]): swc[i, 2:5] = world2ras(swc[i, 2:5]) return swc def swc2vtk(swc, outvtkpath): swc_arr = swc.get_array() nnode = swc_arr.shape[0] vtkstr = '# vtk DataFile Version 2.0\n' vtkstr += 'Generated with Rivuletpy\n' vtkstr += 'ASCII\n' vtkstr += 'DATASET POLYDATA\n' vtkstr += 'POINTS {} float\n'.format(nnode) id2vtkidx = {} for i in range(nnode): vtkstr += '{} {} {}\n'.format(swc_arr[i, 2], swc_arr[i, 3], swc_arr[i, 4]) id2vtkidx[int(swc_arr[i, 0])] = i linectr = 0 vtklinestr = '' for i in range(nnode): id, pid = swc_arr[i, 0], swc_arr[i, -1] if pid >= 0 and int(pid) in id2vtkidx: linectr += 1 vtklinestr += '{} {} {}\n'.format(2, id2vtkidx[int(id)], id2vtkidx[int(pid)]) vtkstr += 'LINES {} {}\n'.format(linectr, linectr * 3) vtkstr += vtklinestr vtkstr += "POINT_DATA {}\n".format(nnode) vtkstr += "SCALARS contourArray double\n" vtkstr += "LOOKUP_TABLE default\n" for i in range(nnode): vtkstr += '{}\n'.format(swc_arr[i, -2]) vtkstr += "SCALARS indicatorArray char\n" vtkstr += "LOOKUP_TABLE default\n" for i in range(nnode): vtkstr += '0\n' with open(outvtkpath, 'w') as f: f.write(vtkstr)
RivuletStudio/rivuletpy
rivuletpy/utils/io.py
Python
bsd-3-clause
5,620
#!/usr/bin/env python import coverage_utils coverage_utils.cov_start() import rospkg import sys rospack = rospkg.RosPack() mission_control_path = rospack.get_path('mission_control') sys.path.append("%s/src" % mission_control_path) import rospy import behaviour from mission_control_utils_constants import Constants from std_msgs.msg import String def main(): rospy.init_node('behaviour_variable', anonymous=True) beha = behaviour.Behaviour() beha.set_debug_level(rospy.get_param('~debug', 0)) beha.set_priority(rospy.get_param('~priority')) beha.set_active(rospy.get_param('~active')) beha.set_executable(rospy.get_param('~script')) """ We need to make a little pause after start-up so some of the token release/request do not go missing """ rospy.sleep(float(rospy.get_param('~wait_before_startup', 1))) test_counter6_foo_pub = rospy.Publisher("/mission_control/test/variable/test_counter6_foo", String, queue_size=Constants.QUEUE_SIZE) test_counter6_bar_pub = rospy.Publisher("/mission_control/test/variable/test_counter6_bar", String, queue_size=Constants.QUEUE_SIZE) test_counter3_foo_pub = rospy.Publisher("/mission_control/test/variable/test_counter3_foo", String, queue_size=Constants.QUEUE_SIZE) test_default_pub = rospy.Publisher("/mission_control/test/variable/test_default", String, queue_size=Constants.QUEUE_SIZE) rate = rospy.Rate(2) while not rospy.is_shutdown(): beha.spin() rate.sleep() if beha._token: #Own first state test_counter6_foo = beha.get_var('test_counter6_foo') test_counter6_foo_pub.publish(str(test_counter6_foo)) #Own second state test_counter6_bar = beha.get_var('test_counter6_bar') test_counter6_bar_pub.publish(str(test_counter6_bar)) #Other nodes test_counter3_foo = beha.get_var('test_counter3_foo') test_counter3_foo_pub.publish(str(test_counter3_foo)) #Default value test_default = beha.get_var('some_random_val', 777) test_default_pub.publish(str(test_default)) coverage_utils.cov_stop() if __name__ == '__main__': try: main() except rospy.ROSInterruptException: pass
mission-control-ros/mission_control
test/behaviour_variable_node.py
Python
bsd-3-clause
2,286
from helper import * def doTest(): pass
wangjeaf/CSSCheckStyle
tests/unit/todos/TryParseKeyFrames.py
Python
bsd-3-clause
45
#!/usr/bin/env python import math import numpy as np import scipy import scipy.linalg as la import os from sklearn import linear_model class LinearPredictor: def __init__(self): self.m_w = 0 self.m_mean_x = 0 self.m_mean_y = 0 self.m_std_x = 1 # ---------------------------------------------------------------------- #load predictor from file #---------------------------------------------------------------------- def load(self, filename): A = np.load(filename) #load numpy array stored in filename self.m_w = A[0, :] self.m_mean_x = A[1, :] self.m_std_x = A[2, :] self.m_mean_y = A[3, 0] #---------------------------------------------------------------------- #compute ouput prediction given input features #---------------------------------------------------------------------- def predict(self, feat_array): #renormalize features xtmp = (feat_array - self.m_mean_x) / self.m_std_x #compute dot product between features and predictor return np.dot(xtmp, self.m_w) + self.m_mean_y def to_string(self): print self.m_w def load(filename): pred = LinearPredictor() pred.load(filename) return pred def train(X, y, filename, options, feature_weight=np.array([1.0]), sample_weight_type="None", print_flag=0): mean_x = X.mean(0) std_x = X.std(0) mean_y = y.mean(0) n = mean_x.size # hack to keep bias feature when removing mean and renormalizing with std #mean_x[n - 1] = 0; #std_x[n - 1] = 1; for index, x in enumerate(std_x): if x == 0.0: std_x[index] = 1.0 print "WARNING: Failing index with zero stddev = %d" % index #renormalize features X = (X - mean_x) / std_x (r, c) = X.shape #solve ridge regression if options.size == 0: options = np.array([1]) # compute sample weights y = y m = y.size sample_weights = np.ones(m) X_sub = np.array([]) y_sub = np.array([]) nonzero_val = 0.01 if sample_weight_type == "weighted": nb_nonzero = np.sum(abs(y) > nonzero_val) weight_nonzero = m / (2.0 * nb_nonzero) weight_zero = m / (2.0 * (m - nb_nonzero)) sample_weights[abs(y) > nonzero_val] = weight_nonzero sample_weights[abs(y) <= nonzero_val] = weight_zero elif sample_weight_type == "subsample": nb_nonzero = np.sum(abs(y) > nonzero_val) if nb_nonzero < m - nb_nonzero: X_sub = X[abs(y) > nonzero_val, :] y_sub = y[abs(y) > nonzero_val] Xtmp = X[abs(y) <= nonzero_val, :] ytmp = y[abs(y) <= nonzero_val] X_sub = np.vstack((X_sub, Xtmp[range(0, m - nb_nonzero, int((m - nb_nonzero) / nb_nonzero)), :])) y_sub = np.append(y_sub, ytmp[range(0, m - nb_nonzero, int((m - nb_nonzero) / nb_nonzero))]) y_sub = y_sub - mean_y else: X_sub = X[abs(y) <= nonzero_val, :] y_sub = y[abs(y) <= nonzero_val] Xtmp = X[abs(y) > nonzero_val, :] ytmp = y[abs(y) > nonzero_val] X_sub = np.vstack((X_sub, Xtmp[range(0, nb_nonzero, int(nb_nonzero / (m - nb_nonzero))), :])) y_sub = np.append(y_sub, ytmp[range(0, nb_nonzero, int(nb_nonzero / (m - nb_nonzero)))]) y_sub = y_sub - mean_y y = y - mean_y A = np.zeros((4, n)) A[1, :] = mean_x A[2, :] = std_x A[3,:] = mean_y for i in range(options.size): #print "[DAgger] Training with Regularizer %f" % (options[i]) reg = math.sqrt(r) * options[i] outname, outext = os.path.splitext(filename) fname = "%s-%f%s" % (outname, options[i], outext) reg_algo = linear_model.Ridge(alpha=reg, fit_intercept=False) #reg_algo = linear_model.Lasso(alpha=reg/math.sqrt(r), fit_intercept=False) if sample_weight_type == "None": reg_algo.fit(X,y) w = reg_algo.coef_ elif sample_weight_type == "subsample": reg_algo.fit(X_sub,y_sub) w = reg_algo.coef_ if print_flag ==1: print "[DAgger] learned weights for reg ", options[i], ": " print w A[0, :] = w np.save(fname, A)
icoderaven/slytherin_dagger
src/linear_predictor.py
Python
bsd-3-clause
4,306
import argparse import inspect import logging from compago import Option logger = logging.getLogger(__name__) class CommandError(Exception): pass class Command(object): def __init__(self, target, parent=None): self.target = target self.name = target.__name__ if parent: self.prog = '%s %s' % (parent.name, target.__name__) self.parents = [parent.parser] else: self.prog = target.__name__ self.parents = [] self.description = target.__doc__ or '' self.options = [] def run(self, *args): self.options += self.default_options() cmd_ns = self.parser.parse_args(args) logger.debug('Parsed command namespace: %s' % cmd_ns.__dict__) kwargs = {} for k, v in cmd_ns.__dict__.items(): if k in self.args: kwargs[k] = v try: logger.debug('Running target:%s' % self.target) return self.target(**kwargs) except TypeError, e: raise CommandError('Invalid command args: %s' % e) def default_options(self): options = [] logger.debug('self.args:%s' % str(self.args)) logger.debug('self.kwargs:%s' % str(self.kwargs)) for arg in self.args: if arg in self.kwargs: default = self.kwargs[arg] if isinstance(default, bool): if default: action = 'store_false' else: action = 'store_true' else: action = 'store' option = Option('-%s' % arg[0], '--%s' % arg, action=action, dest=arg, required=False, default=default) else: option = Option(arg, type=unicode) if not option.dest in [o.dest for o in self.options]: logger.debug('Option:%s not already found in options:%s' % ( option, self.options)) options.append(option) else: logger.debug('Option:%s already in options:%s' % ( option, self.options)) return options def add_option(self, *args, **kwargs): option = Option(*args, **kwargs) logger.debug('Adding option:%s' % option) self.options.append(option) @property def parser(self): parser = argparse.ArgumentParser(prog=self.prog, description=self.description, parents=self.parents) for option in self.options: logger.debug('Adding argument:%s to parser.' % option) parser.add_argument(*option.args, **option.kwargs) return parser @property def usage(self): return self.parser.format_help() @property def args(self): args, varargs, keywords, defaults = inspect.getargspec(self.target) return args @property def kwargs(self): args, varargs, keywords, defaults = inspect.getargspec(self.target) kwargs = dict(zip(*[reversed(l) for l in (args, defaults or [])])) return kwargs
jmohr/compago
compago/command.py
Python
bsd-3-clause
3,203
# Copyright (C) 2014, 2015 University of Vienna # All rights reserved. # BSD license. # Author: Ali Baharev <ali.baharev@gmail.com> from __future__ import print_function from sys import stderr from codegen import gen_column_permutation from py3compat import irange from utils import has_matplotlib, DATADIR, create_dir def gen_r_c_color(rows, cols, rowp, colp, colors): to_str = {1: 'black', 2: 'red', 3: 'gray'} for k in irange(0, len(colors)): yield rowp[rows[k]], colp[cols[k]], to_str[colors[k]] def plot_dm(name, rows, cols, rowp, colp, colors, sccs, show=True, msg=''): from matplotlib import pyplot as plt _, ax = setup(plt, len(rowp), len(colp)) for r, c, color in gen_r_c_color(rows, cols, rowp, colp, colors): ax.add_artist( square(r, c, facecolor=color) ) # Mark the border of SCC blocks yellow for r, c, size in sccs: ax.add_artist( square(r, c, size=size, facecolor='none', edgecolor='yellow', linewidth=3.0 ) ) beautify_axes(ax) if show: ax.set_title(name) plt.tight_layout(pad=1.00) plt.show() else: plt.tight_layout(pad=1.00) plt.title(msg) # TODO Hard-coded path plt.savefig('/tmp/pics/'+name+'.pdf', bbox_inches='tight', pad_inches=0.05) plt.close() #------------------------------------------------------------------------------- def to_pdf(g, rowp, colp, msg='', fname='', path='/tmp/pics/'): # shows the plot if no file name is given # FIXME Make it configurable whether we want to use Agg (e.g. on a remote # server without X). #import matplotlib as mpl #mpl.use('Agg') #mpl.rcParams['axes.linewidth'] = 0.25 ##mpl.rcParams['lines.linewidth'] = 0.0 ##mpl.rcParams['patch.linewidth'] = 0.0 from matplotlib import pyplot as plt indexof = { name : i for i, name in enumerate(colp) } rows = [[indexof[c] for c in g[r]] for r in rowp] _, ax = setup(plt, len(rows), len(colp)) for r, row in enumerate(rows): for c in row: # r and c must be swapped: row -> y axis, col -> x axis rect = plt.Rectangle((c,r), 1,1, facecolor='black', edgecolor='0.7') ax.add_artist(rect) beautify_axes(ax) if not fname: ax.set_title(msg) plt.tight_layout(pad=1.00) plt.show() # if fails, the Agg backend does not have show else: plt.tight_layout(pad=1.00) plt.rcParams.update({'axes.titlesize': 'small'}) plt.title(msg) create_dir(path) plt.savefig(path+fname+'.pdf', bbox_inches='tight', pad_inches=0.05) plt.close() #------------------------------------------------------------------------------- def plot_hessenberg(g, rowp, colp, partitions, msg, mark_red=[ ]): # Compare with plot_sparsity from matplotlib import pyplot as plt _, ax = setup(plt, len(rowp), len(colp)) for r, c in gen_entries(g, rowp, colp): ax.add_artist( square(r, c) ) for r, c in partitions: draw_partition(ax, r, c) for r, c in mark_red: ax.add_artist( square(r, c, facecolor='r') ) beautify_axes(ax) ax.set_title(msg) plt.show() def gen_entries(g, rowp, colp): indexof = { name : i for i, name in enumerate(colp) } rows = [ [indexof[c] for c in g[r]] for r in rowp ] return ((r,c) for r, row in enumerate(rows) for c in row) def square(r, c, size=1, facecolor='k', edgecolor='0.7', linewidth=None): from matplotlib.pyplot import Rectangle # r and c must be swapped: row -> y axis, col -> x axis return Rectangle((c, r), size, size , facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth) #------------------------------------------------------------------------------- def plot_bipartite(g, forbidden, row_perm, col_perm): # The row and column identifiers are in permuted order indexof = { name : i for i, name in enumerate(col_perm) } rows = [ ] for r in row_perm: cols = g[r] rows.append( [ (indexof[c], (r,c) not in forbidden) for c in cols ] ) # plot_sparsity(rows, len(col_perm)) #------------------------------------------------------------------------------- def get_spiked_form_rowwise(blocks): col_perm = list(gen_column_permutation(blocks)) indexof = { name : i for i, name in enumerate(col_perm) } rows = [ ] for blk in blocks: for eq in blk.eqs: row = [ (indexof[name], name in eq.elims) for name in eq.names ] rows.append(row) for y, x, _ in blk.conn_triples: rows.append( [ (indexof[x],True), (indexof[y],True) ] ) return rows, col_perm def plot_ordering(blocks): #from utils import serialize #serialize(blocks, 'blocks.pkl') #return rows, col_perm = get_spiked_form_rowwise(blocks) def func_draw_partitions(ax): draw_row_and_col_partitions(ax, blocks) # plot_sparsity(rows, len(col_perm), func_draw_partitions) def plot_sparsity(rows, n_cols, func_draw_partitions=None): #import matplotlib as mpl ##mpl.use('Agg') #mpl.rcParams['axes.linewidth'] = 0.45 #mpl.rcParams['lines.linewidth'] = 0.00 #mpl.rcParams['patch.linewidth'] = 0.00 from matplotlib import pyplot as plt fig, ax = setup(plt, len(rows), n_cols) for r, row in enumerate(rows): for c, allowed in row: # r and c must be swapped: row -> y axis, col -> x axis clr = get_color(r, c, allowed) rect = plt.Rectangle((c,r), 1,1, facecolor=clr, edgecolor='0.7') ax.add_artist(rect) if func_draw_partitions: # used to be: func_draw_partitions(ax) # draw_row_and_col_partitions(ax, blocks) beautify_axes(ax) fig.tight_layout() plt.show() def get_color(r, c, allowed): # r and c must be swapped: row -> y axis, col -> x axis if c <= r: # below or on the diagonal clr = 'black' if allowed else 'grey' else: clr = 'red' return clr def draw_row_and_col_partitions(ax, blocks): pos = 0 for blk in blocks: if blk.eqs: pos += len(blk.eqs) draw_partition(ax, pos, pos) if blk.conn_triples: pos += len(blk.conn_triples) draw_partition(ax, pos, pos) def draw_partition(ax, r, c): line_color, line_width = 'blue', 1 ax.axhline(r, c=line_color, lw=line_width) ax.axvline(c, c=line_color, lw=line_width) #------------------------------------------------------------------------------- def plot_bipartite_no_red_greedy_order(g, eqs, forbidden, msg=''): from heap_md import min_degree as greedy_order rowp, colp = greedy_order(g, eqs, forbidden)[0:2] _plot_bipartite(g, forbidden, rowp, colp, msg) def _plot_bipartite(g, forbidden, row_perm, col_perm, msg=''): # The row and column identifiers are in permuted order indexof = { name : i for i, name in enumerate(col_perm) } rows = [ ] for r in row_perm: cols = g[r] rows.append( [ (indexof[c], (r,c) not in forbidden) for c in cols ] ) # _plot_sparsity(rows, len(col_perm), msg) def _plot_sparsity(rows, n_cols, msg): from matplotlib import pyplot as plt _, ax = setup(plt, len(rows), n_cols) for r, row in enumerate(rows): for c, allowed in row: # r and c must be swapped: row -> y axis, col -> x axis clr = 'black' if allowed else 'grey' rect = plt.Rectangle((c,r), 1,1, facecolor=clr, edgecolor='0.7') ax.add_artist(rect) beautify_axes(ax) ax.set_title(msg) plt.tight_layout(pad=1.00) plt.show() #------------------------------------------------------------------------------- # Compare plotting with SDOPT def setup(plt, nrows, ncols): fig=plt.figure() ax=fig.add_subplot(111) mng = plt.get_current_fig_manager() mng.resize(1865,1025) #mng.full_screen_toggle() plt.axis('scaled') ax.set_xlim([0, ncols]) ax.set_ylim([0, nrows]) return fig, ax def beautify_axes(ax): ax.invert_yaxis() ax.set_xticks([]) ax.set_yticks([]) def no_matplotlib(*args, **kwargs): stderr.write('Plotting requires a working matplotlib installation.\n') if not has_matplotlib(): plot_dm = no_matplotlib plot_sparsity = no_matplotlib to_pdf = no_matplotlib def main(): from utils import deserialize blocks = deserialize(DATADIR + 'blocks.pkl.gz') plot_ordering(blocks) if __name__ == '__main__': main()
baharev/sdopt-tearing
plot_ordering.py
Python
bsd-3-clause
8,578
import subprocess subprocess.call(["coverage", "run", "--source", "toyplot", "--omit", "toyplot/testing.py", "-m", "nose", "--exclude-dir", "toyplot"]) subprocess.call(["coverage", "run", "--append", "--source", "toyplot", "--omit", "toyplot/testing.py", "-m", "behave"]) subprocess.call(["coverage", "report"]) subprocess.call(["coverage", "html", "--directory", ".cover"])
cmorgan/toyplot
regression.py
Python
bsd-3-clause
410
from helper import * def doTest(): _combine_should_not_make_mistake() _totally_same_ruleset() do_not_touch_background_position() _do_not_change_comment() def _combine_should_not_make_mistake(): css = '''.a {width:0px} .a, .b{width:1px} .b{width:0px}''' expected = '''.a { width: 0; } .a, .b { width: 1px; } .b { width: 0; }''' fixer, msg = doFix(css, '') equal(msg, expected, 'do not make mistake when combine rulesets'); def do_not_touch_background_position(): css = '''.a {background-position: 0 0} .test {width:1} .b {background-position: 0 0} ''' expected = '''.a { background-position: 0 0; } .test { width: 1; } .b { background-position: 0 0; }''' fixer, msg = doFix(css, '') equal(msg, expected, 'do not combine background-position'); def _totally_same_ruleset(): css = '''/*fdafda*/ .page-title { width: 100px; padding: 0px 1px; } .page-title { width: 100px; padding: 0px 1px; }''' expected = '''/* fdafda */ .page-title { width: 100px; padding: 0 1px; }''' fixer, msg = doFix(css, '') equal(msg, expected, 'it is the same ruleset'); def _do_not_change_comment(): css = '''/*fdafda, fda,fda,fdas */ .page-title { width: 100px; padding: 0px 1px; } .page-title { width: 100px; padding: 0px 1px; }''' expected = '''/*fdafda, fda,fda,fdas */ .page-title { width: 100px; padding: 0 1px; }''' fixer, msg = doFix(css, '') equal(msg, expected, 'do not change comment is ok');
wangjeaf/CSSCheckStyle
tests/unit/fix/FEDCombineSameRuleSets.py
Python
bsd-3-clause
1,544
import os from setuptools import setup setup( name="django-recurrence", use_scm_version=True, license="BSD", description="Django utility wrapping dateutil.rrule", long_description="\n".join( [ open("README.rst", encoding="utf-8").read(), open("CHANGES.rst", encoding="utf-8").read(), ] ), author="Tamas Kemenczy", author_email="tamas.kemenczy@gmail.com", url="https://github.com/django-recurrence/django-recurrence", classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Environment :: Plugins", "Framework :: Django", "Framework :: Django", "Framework :: Django :: 2.2", "Framework :: Django :: 3.2", "Framework :: Django :: 4.0", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: Implementation", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ], python_requires=">=3.7", install_requires=["django>=2.2", "python-dateutil"], setup_requires=["setuptools_scm"], packages=["recurrence", "recurrence.migrations"], package_dir={"recurrence": "recurrence"}, package_data={ "recurrence": [ os.path.join("static", "*.css"), os.path.join("static", "*.png"), os.path.join("static", "*.js"), os.path.join("locale", "*.po"), os.path.join("locale", "*.mo"), ] }, zip_safe=False, include_package_data=True, )
django-recurrence/django-recurrence
setup.py
Python
bsd-3-clause
2,011
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2008 Jason Davies # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. try: from setuptools import setup except ImportError: from distutils.core import setup setup( name = 'CouchDB-FUSE', version = '0.1', description = 'CouchDB FUSE module', long_description = \ """This is a Python FUSE module for CouchDB. It allows CouchDB document attachments to be mounted on a virtual filesystem and edited directly.""", author = 'Jason Davies', author_email = 'jason@jasondavies.com', license = 'BSD', url = 'http://code.google.com/p/couchdb-fuse/', zip_safe = True, py_modules = ['couchmount'], classifiers = [ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Database :: Front-Ends', ], entry_points = { 'console_scripts': [ 'couchmount = couchmount:main', ], }, install_requires = ['CouchDB>=0.9'], )
jasondavies/couchdb-fuse
setup.py
Python
bsd-3-clause
1,251
#!/usr/bin/env python # Copyright (c) 2014, Paessler AG <support@paessler.com> # All rights reserved. # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions # and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions # and the following disclaimer in the documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse # or promote products derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # PRTG Python Miniprobe # Miniprobe needs at least Python 2.7 because of "importlib" # If older python version is used you will have to install "importlib" # import general modules import sys import hashlib import importlib import gc import logging import subprocess import os import requests import warnings from requests.packages.urllib3 import exceptions # import own modules sys.path.append('./') try: import sensors except Exception as e: print(e) class MiniProbe(object): """ Main class for the Python Mini Probe """ def __init__(self, http): gc.enable() self.http = http logging.basicConfig( filename="./logs/probe.log", filemode="a", level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s", datefmt='%m/%d/%Y %H:%M:%S' ) def get_import_sensors(self): """ import available sensor modules and return list of sensor objects """ sensor_objects = [] for mod in sensors.__all__: try: sensor_objects.append(self.load_class("sensors.%s.%s" % (mod.lower(), mod))) except Exception as import_error: logging.error("Sensor Import Error! Error message: %s" % import_error) return sensor_objects @staticmethod def load_class(full_class_string): """ dynamically load a class from a string """ class_data = full_class_string.split(".") module_path = ".".join(class_data[:-1]) class_str = class_data[-1] module = importlib.import_module(module_path) return getattr(module, class_str) def read_config(self, path): """ read configuration file and write data to dict """ config = {} try: conf_file = open(path) for line in conf_file: if not (line == '\n'): if not (line.startswith('#')): config[line.split(':')[0]] = line.split(':')[1].rstrip() conf_file.close() return config except Exception as read_error: logging.error("No config found! Error Message: %s Exiting!" % read_error) sys.exit() @staticmethod def hash_access_key(key): """ create hash of probes access key """ key = key.encode('utf-8') return hashlib.sha1(key).hexdigest() def create_parameters(self, config, jsondata, i=None): """ create URL parameters for announce, task and data requests """ if i == 'announce': return {'gid': config['gid'], 'key': self.hash_access_key(config['key']), 'protocol': config['protocol'], 'name': config['name'], 'baseinterval': config['baseinterval'], 'sensors': jsondata} else: return {'gid': config['gid'], 'key': self.hash_access_key(config['key']), 'protocol': config['protocol']} def create_url(self, config, i=None, http=False): """ creating the actual URL """ prefix = "https" if http: prefix = "http" if not (i is None) and (i != "data"): return "%s://%s:%s/probe/%s" % ( prefix, config['server'], config['port'], i) elif i == "data": return "%s://%s:%s/probe/%s?gid=%s&protocol=%s&key=%s" % (prefix, config['server'], config['port'], i, config['gid'], config['protocol'], self.hash_access_key(config['key'])) pass else: return "No method given" def build_announce(self, sensor_list): """ build json for announce request """ sensors_avail = [] for sensor in sensor_list: if not sensor.get_sensordef() == "": sensors_avail.append(sensor.get_sensordef()) return sensors_avail def build_task(self, config): """ build data payload for task request. """ task = { 'gid': config['gid'], 'protocol': config['protocol'], 'key': self.hash_access_key(config['key']) } return task def request_to_core(self, req_type, data, config): """ perform different request types to the core """ url = self.create_url(config, req_type, self.http) try: with warnings.catch_warnings(): warnings.simplefilter("ignore", exceptions.InsecureRequestWarning) request_to_core = requests.post(url, data=data, verify=False, timeout=30) logging.info("%s request successfully sent to PRTG Core Server at %s:%s." % (req_type, config["server"], config["port"])) logging.debug("Connecting to %s:%s" % (config["server"], config["port"])) logging.debug("Status Code: %s | Message: %s" % (request_to_core.status_code, request_to_core.text)) return request_to_core except requests.exceptions.Timeout: logging.error("%s Timeout: %s" % (req_type, str(data))) raise except Exception as req_except: logging.error("Exception %s!" % req_except) raise def split_json_response(self, json_response, size=None): """ split up response from task request into predefined chunk sizes """ if not size: size = "10" return [json_response[i:i + int(size)] for i in range(0, len(json_response), int(size))] @staticmethod def clean_mem(): """Ugly brute force method to clean up Mem""" subprocess.call("sync", shell=False) os.popen("sysctl vm.drop_caches=1") os.popen("sysctl vm.drop_caches=2") os.popen("sysctl vm.drop_caches=3")
PaesslerAG/PythonMiniProbe
miniprobe/miniprobe.py
Python
bsd-3-clause
7,571
""" A module which handles Matrix Expressions """ from .slice import MatrixSlice from .blockmatrix import BlockMatrix, BlockDiagMatrix, block_collapse, blockcut from .funcmatrix import FunctionMatrix from .inverse import Inverse from .matadd import MatAdd from .matexpr import (Identity, MatrixExpr, MatrixSymbol, ZeroMatrix, OneMatrix, matrix_symbols) from .matmul import MatMul from .matpow import MatPow from .trace import Trace, trace from .determinant import Determinant, det from .transpose import Transpose from .adjoint import Adjoint from .hadamard import hadamard_product, HadamardProduct, hadamard_power, HadamardPower from .diagonal import DiagonalMatrix, DiagonalOf, DiagMatrix, diagonalize_vector from .dotproduct import DotProduct from .kronecker import kronecker_product, KroneckerProduct, combine_kronecker
kaushik94/sympy
sympy/matrices/expressions/__init__.py
Python
bsd-3-clause
847
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2017-03-29 16:56 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jobs', '0033_job_config'), ] operations = [ migrations.RemoveField( model_name='job', name='configs', ), ]
hotosm/osm-export-tool2
jobs/migrations/0034_remove_job_configs.py
Python
bsd-3-clause
377
from django.utils import timezone from rest_framework import serializers from shop.conf import app_settings from shop.models.cart import CartModel from shop.models.order import OrderModel from shop.modifiers.pool import cart_modifiers_pool from shop.rest.money import MoneyField class OrderListSerializer(serializers.ModelSerializer): number = serializers.CharField( source='get_number', read_only=True, ) url = serializers.URLField( source='get_absolute_url', read_only=True, ) status = serializers.CharField( source='status_name', read_only=True, ) subtotal = MoneyField() total = MoneyField() class Meta: model = OrderModel fields = ['number', 'url', 'created_at', 'updated_at', 'subtotal', 'total', 'status', 'shipping_address_text', 'billing_address_text'] # TODO: these fields are not part of the base model read_only_fields = ['shipping_address_text', 'billing_address_text'] class OrderDetailSerializer(OrderListSerializer): items = app_settings.ORDER_ITEM_SERIALIZER( many=True, read_only=True, ) extra = serializers.DictField(read_only=True) amount_paid = MoneyField(read_only=True) outstanding_amount = MoneyField(read_only=True) cancelable = serializers.BooleanField(read_only=True) is_partially_paid = serializers.SerializerMethodField( method_name='get_partially_paid', help_text="Returns true, if order has been partially paid", ) annotation = serializers.CharField( write_only=True, required=False, ) reorder = serializers.BooleanField( write_only=True, default=False, ) cancel = serializers.BooleanField( write_only=True, default=False, ) active_payment_method = serializers.SerializerMethodField() active_shipping_method = serializers.SerializerMethodField() class Meta: model = OrderModel exclude = ['id', 'customer', 'stored_request', '_subtotal', '_total'] read_only_fields = ['shipping_address_text', 'billing_address_text'] # TODO: not part of OrderBase def get_partially_paid(self, order): return order.amount_paid > 0 def get_active_payment_method(self, order): modifier = cart_modifiers_pool.get_active_payment_modifier(order.extra.get('payment_modifier')) value, label = modifier.get_choice() if modifier else (None, "") return {'value': value, 'label': label} def get_active_shipping_method(self, order): modifier = cart_modifiers_pool.get_active_shipping_modifier(order.extra.get('shipping_modifier')) value, label = modifier.get_choice() if modifier else (None, "") return {'value': value, 'label': label} def update(self, order, validated_data): order.extra.setdefault('addendum', []) if validated_data.get('annotation'): timestamp = timezone.now().isoformat() order.extra['addendum'].append((timestamp, validated_data['annotation'])) order.save() if validated_data['reorder'] is True: cart = CartModel.objects.get_from_request(self.context['request']) order.readd_to_cart(cart) if validated_data['cancel'] is True and order.cancelable(): order.cancel_order() order.save(with_notification=True) return order
awesto/django-shop
shop/serializers/order.py
Python
bsd-3-clause
3,455
#Copyright (c) 2015, Matthew P. Grosvenor #All rights reserved. See LICENSE for more details import sys import os import subprocess import datetime import thread import threading import Queue import fcntl import select import time import signal #(optionaly) Make pretty logs of everything that we do def log(logfile,msg,tostdout=False,tostderr=False, timestamp=True): timestr = "" if timestamp: timestr += datetime.datetime.now().strftime("%Y%m%dT%H%M%S.%f ") footer = "" if len(msg) > 0 and msg[-1] != "\n": footer = "\n" msg = "%s%s%s" % (timestr, msg,footer) logfile.write(msg) logfile.flush() if tostdout: sys.stdout.write(msg) if tostderr: sys.stderr.write(msg) ###################################################################################################################### #This is a giant work around the completely brain dead subprocess stdin/stdout/communicate behaviour class CThread (threading.Thread): def __init__(self, parent, cmd, returnout, result, tostdout): threading.Thread.__init__(self) self.parent = parent self.result = result self.cmd = cmd self.returnout = returnout self.tostdout = tostdout self.daemon = False self.subproc = None def kill_subproc(self): p = self.subproc p.poll() if p.returncode is None: os.killpg(p.pid, signal.SIGTERM) #Give the process some time to clean up timestep = 0.1 #seconds timeout = 10 #seconds i = 0 while(p.returncode is None and i < (timeout/timestep)): p.poll() time.sleep(timestep) i += 1 if p.returncode is None: os.killpg(p.pid, signal.SIGKILL) #see https://www.youtube.com/watch?v=Up1hGZhvjzs p.wait() return p.returncode def run(self): usereadline = True #Python docs warn that this could break, I've never seen it but am skeptical usereadline = False #Python docs warn that this could break, I've never seen it but am skeptical p = subprocess.Popen(self.cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid) self.subproc = p if usereadline: fl = fcntl.fcntl(p.stdout, fcntl.F_GETFL) fcntl.fcntl(p.stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK) fl = fcntl.fcntl(p.stderr, fcntl.F_GETFL) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fl | os.O_NONBLOCK) while True: stdout = None stderr = None if usereadline: #Check if process has died p.poll() if p.returncode is not None: thread.exit() #Unreachable try: stdout = p.stdout.readline() except: None try: stderr = p.stderr.readline() except: None else: try: (stdout,stderr) = p.communicate() if stdout == None or stderr == None: #Does this ever happen?? print "Communicate process is dead..." self.kill_subproc() thread.exit() except: #Communicate throws an exception if the subprocess dies #Ouput our illgotten gains if stdout and stdout != "": self.parent.log(stdout, tostdout=self.tostdout) if stderr and stderr != "": self.parent.log(stderr, tostdout=self.tostdout) if stdout and self.returnout: self.result.put(stdout) if stderr and self.returnout: self.result.put(stderr) thread.exit() #Ouput our illgotten gains if stdout and stdout != "": self.parent.log(stdout, tostdout=self.tostdout) if stderr and stderr != "": self.parent.log(stderr, tostdout=self.tostdout) if stdout and self.returnout: self.result.put(stdout) if stderr and self.returnout: self.result.put(stderr) def __del__(self): #Does this even work? Have never seen it happen self.kill_subproc() thread.exit() ###################################################################################################################### #Defines a remote host class Host: def __init__(self, redo, name,uname,logging, init, logfilename): self.pidcount = -1 self.pid2thread = {} #maps PIDs to threads self.name = name self.uname = uname self.redo_main = redo #Populating these should be ported to some general infrastructure at some point self.mac = -1 self.cpu_count = -1 self.ram_space = -1 self.disk_space = -1 self.pinned_cpus = -1 self.free_cpus = -1 self.workdir = self.redo_main.workdir self.logging = logging self.logfilename = logfilename + "-" + self.name self.logfile = open(self.logfilename + ".log","w") #if(init): # self.inithost() #populate host info def log(self,msg,tostdout=False,tostderr=False, timestamp=True): log(self.logfile,msg,tostdout,tostderr,timestamp) def makepid(self): self.pidcount += 1 return "%s-%s" % (self.name,self.pidcount) def cd(self,path): self.workdir = path #This is in the wrong place and does the wrong thing. Beacuse it's blocking, it forces #everyone to wait to do the thing that has probably already been done, except when it hasnt. #going to turn it off for the moment, but there does need to be an init phase at somepoint #def inithost(self): # self.run("mkdir -p %s" % (self.redo_main.workdir)) #Run a command on a remote host return a pid for the command #cmd: Text string of the command to run #timeout: Time in seconds to wait for the command to run, otherwise kill it #blocking: Wait for the the command to finish before continuing. Either wait infinitely, or timeout seconds #pincpu: Pin the command to a single CPU and run it as realtime prioirty def run(self, cmd, timeout=None,block=True, pincpu=-1, realtime=False, returnout=True, tostdout=False ): escaped = cmd.replace("\"","\\\"") escaped = escaped.replace("$","\$") if timeout > 0 and not block: escaped = "timeout %i %s" % (timeout,escaped) workdir_cmd = "mkdir -p %s && cd %s; %s" % (self.workdir, self.workdir,escaped) ssh_cmd = "ssh %s@%s \"%s\"" %(self.uname,self.name,workdir_cmd) pid = self.makepid() self.log("REDO [%s] Running ssh command \"%s\" with pid %s" % (self.name,ssh_cmd,pid), tostdout=tostdout) result = Queue.Queue() ssh_thread = CThread(self, ssh_cmd, returnout, result, tostdout) self.pid2thread[pid] = ssh_thread ssh_thread.start() #Give the thread a litte time to get going while(ssh_thread.subproc is None): None if(block): self.log("REDO [%s]: Waiting for thread to th pid %s terminate..." % (self.name,pid)) ssh_thread.join(timeout) if ssh_thread.isAlive(): self.log("REDO [%s]: Killing thread running pid \"%s\" after timeout..." % (self.name, pid)) ssh_thread.kill_subproc() self.log("REDO [%s]: Waiting for thread to die..." % (self.name)) ssh_thread.join() self.log("REDO [%s]: Thread and process is dead" % (self.name)) else: self.log("REDO [%s]: Thread with pid %s just terminated" % (self.name,pid)) #None return pid def getoutput(self,pid, block=False, timeout=None): results_q = self.pid2thread[pid].result if results_q.empty(): return None return results_q.get(block,timeout) def isalive(self,pid): return self.pid2thread[pid].isAlive() #Wait on a command on a remote host finishing def wait(self, pid, timeout=None, kill=False): procthread = self.pid2thread[pid] #Wait for the thread to start up if it hasn't self.log("REDO [%s]: Waiting for thread with pid \"%s\" to terminate..." % (self.name,pid)) procthread.join(timeout) if procthread.isAlive(): if not kill: return None #Timedout, and not going to kill self.log("REDO [%s]: Killing pid \"%s\" after timeout..." % (self.name,pid)) procthread.kill_subproc() #self.log("REDO [%s]: Waiting for thread running pid \"%s\" to die..." % (self.name,pid)) procthread.join() #self.log("REDO [%s]: Thread and running pid \"%s\" is dead" % (self.name,pid)) if procthread.subproc.returncode is not None: self.log("REDO [%s]: Process with pid \"%s\" terminated with return code \"%i\" ..." % (self.name,pid,procthread.subproc.returncode)) else: self.log("REDO [%s]: Process with pid \"%s\" has not yet terminated ..." % (self.name,pid)) return procthread.subproc.returncode #Stop the remote process by sending a signal def kill(self,pid): self.log("REDO [%s]: Killing thread with pid \"%s\" " % (self.name, pid)) proc = self.pid2thread[pid] proc.kill_subproc() self.log("REDO [%s]: Waiting for thread to exit.." %(self.name)) proc.join() self.log("REDO [%s]: Thread has exited.." %(self.name)) return proc.subproc.returncode def docopy(self,copy_cmd,timeout,block,returnout,tostdout): pid = self.makepid() self.log("REDO [%s] - Running copy command \"%s\" with pid %s" % (self.name,copy_cmd,pid), tostdout=tostdout) result = Queue.Queue() copy_thread = CThread(self, copy_cmd, returnout, result, tostdout) self.pid2thread[pid] = copy_thread copy_thread.start() #Give the thread a litte time to get going while(copy_thread.subproc is None): None if(block): self.log("REDO [%s]: Waiting for thread to th pid %s terminate..." % (self.name,pid)) copy_thread.join(timeout) if copy_thread.isAlive(): self.log("REDO [%s]: Killing thread running pid \"%s\" after timeout..." % (self.name, pid)) copy_thread.kill_subproc() self.log("REDO [%s]: Waiting for thread to die..." % (self.name)) copy_thread.join() self.log("REDO [%s]: Thread and process is dead" % (self.name)) else: self.log("REDO [%s]: Thread with pid %s just terminated" % (self.name,pid)) #None return pid #Copy data to the remote host with scp def copy_to(self,src,dst,timeout=None,block=True,returnout=True,tostdout=False): scp_cmd = "scp -r %s %s@%s:%s" %(src,self.uname,self.name,dst) return self.docopy(scp_cmd,timeout,block,returnout,tostdout) #Copy data from the remote host with scp def copy_from(self,src,dst,timeout=None,block=True,returnout=True,tostdout=False): scp_cmd = "scp -r %s@%s:%s %s" %(self.uname,self.name,src,dst) return self.docopy(scp_cmd,timeout,block,returnout,tostdout) #Use rysnc to minimise copying def sync_to(self, src, dst,timeout=None,block=True,returnout=True,tostdout=False): sync_cmd = "rsync -rv %s %s@%s:%s" %(src,self.uname,self.name,dst) return self.docopy(sync_cmd,timeout,block,returnout,tostdout) #Use rsync to minimise copying def sync_from(self,src,dst,timeout=None,block=True,returnout=True,tostdout=False): sync_cmd = "rsync -rv %s@%s:%s %s" %(self.uname,self.name,src,dst) return self.docopy(sync_cmd,timeout,block,returnout,tostdout) #Nice string representation def __str__(self): return "'%s'" % self.name def __unicode__(self): return unicode("'%s'" % self.name) def __repr__(self): return "'%s'" % self.name def __del__(self): self.redo_main.log("REDO [%s]: Destroying host %s" % (self.name,self.name)) for pid in self.pid2thread: procthread = pid2thread[pid] if procthread.isAlive(): #self.redo_main.log("REDO [%s]: Killing thread in destructor..." % (self.name)) peocthread.kill_subproc() #self.redo_main.log("REDO [%s]: Waiting for thread to die..." % (self.name)) procthread.join() #self.redo_main.log("REDO [%s]: Thread is dead" % (self.main)) ###################################################################################################################### #Operate on a list of hosts #Expose the same interface as a single host, take lists of arguments whereever sensible #This is syntactic sugar over map, but useful to minimize code overhead in derivitve apps class Hosts: def __init__(self,hostlist): self.hostlist = hostlist #Run a command on a remote host return a pid for the command #cmd: Text string of the command to run #timeout: Time in seconds to wait for the command to run, otherwise kill it #blocking: Wait for the the command to finish before continuing. Either wait infinitely, or timeout seconds #pincpu: Pin the command to a single CPU and run it as realtime prioirty def run(self, cmds, timeout=None, block=True, pincpu=-1, realtime=False, returnout=True, tostdout=False): if type(cmds) is not list: cmds = [cmds] * len(self.hostlist) #This one is special, we want things to run in parallell. So we don't pass the blocking through pids = map( (lambda (cmd,host): host.run(cmd,timeout,False,pincpu,realtime,returnout,tostdout)), zip(cmds,self.hostlist)) if block == True: self.wait(pids,timeout=None) return pids def cd(self,paths): if type(paths) is not list: paths = [paths] * len(self.hostlist) return map( (lambda (host,path): host.cd(path)), zip(self.hostlist,paths)) def getoutput(self,pids, block=False, timeout=None): return map( (lambda (host,pid): host.getoutput(pid,block,timeout)), zip(self.hostlist,pids)) #Wait on a command on a remote host finishing #Bug or feautre this waits for timeout seconds on all pids. Which is potentially much bigger than timeout... def wait(self,pids, timeout=None, kill=False): #Try wating the timeout, if that works, keep doing it, but decrement the time to wait, otherwise turn off the timeout if timeout is not None: now = time.time() exp = now + timeout for (host,pid) in zip(self.hostlist,pids): now = time.time() if now > exp: print "Time is up, no timeout anymore.." return map( (lambda (host,pid): host.wait(pid,0,kill)), zip(self.hostlist,pids)) left = exp - now print "Waiting for %f seconds for process %s to exit..." % (left,pid) host.wait(pid,left,kill) print "All hosts exited, returning results..." return map( (lambda (host,pid): host.wait(pid,0,kill)), zip(self.hostlist,pids)) else: return map( (lambda (host,pid): host.wait(pid,timeout,kill)), zip(self.hostlist,pids)) #Stop the remote process by sending a signal def kill(self,pids): return map( (lambda (host,pid): host.kill(pid)), zip(self.hostlist,pids)) #Copy data to the remote host with scp def copy_to(self,srcs,dsts,timeout=None,block=True,returnout=True,tostdout=False): if type(srcs) is not list: srcs = [srcs] * len(self.hostlist) if type(dsts) is not list: dsts = [dsts] * len(self.hostlist) pids = map( (lambda (host,src,dst): host.copy_to(src,dst,timeout,False,returnout,tostdout)), zip(self.hostlist,srcs, dsts)) if block: self.wait(pids,timeout=None) return pids #Copy data from the remote host with scp def copy_from(self,srcs,dsts,timeout=None,block=True,returnout=True,tostdout=False): if type(srcs) is not list: srcs = [srcs] * len(self.hostslist) if type(dsts) is not list: dsts = [dsts] * len(self.hostlist) pids = map( (lambda (host,src,dst): host.copy_from(src,dst,timeout,False,returnout,tostdout)), zip(self.hostlist,srcs, dsts)) if block: self.wait(pids,timeout=None) return pids #Use rysnc to minimise copying def sync_to(self,srcs,dsts,timeout=None,block=True,returnout=True,tostdout=False): if type(srcs) is not list: srcs = [srcs] * len(self.hostlist) if type(dsts) is not list: dsts = [dsts] * len(self.hostlist) pids = map( (lambda (host,src,dst): host.sync_to(src,dst,timeout,False,returnout,tostdout)), zip(self.hostlist,srcs, dsts)) if block: self.wait(pids,timeout=None) return pids #Use rsync to minimise copying def sync_from(self,srcs,dsts,timeout=None,block=True,returnout=True,tostdout=False): if type(srcs) is not list: srcs = [srcs] * len(self.hostlist) if type(dsts) is not list: dsts = [dsts] * len(self.hostlist) pids = map( (lambda (host,src,dst): host.sync_from(src,dst,timeout,False,returnout,tostdout)), zip(self.hostlist,srcs, dsts)) if block: self.wait(pids,timeout=None) return pids #Nice string representation def __str__(self): return str(self.hostlist) def __unicode__(self): return unicode(str(self.hostlist)) def __repr__(self): return str(self.hostlist) ###################################################################################################################### class Redo: def __init__(self, hostnames, unames, workdir="/tmp/redo/",logging=True): self.pid2thread = {} self.pidcount = 0 self.logging = logging self.workdir = workdir if not os.path.exists(self.workdir): os.makedirs(self.workdir) os.chdir(self.workdir) if logging: self.logfilename = self.workdir + "/redo_%s" % (datetime.datetime.now().strftime("%Y%m%dT%H%M%S.%f")) self.logfile = open(self.logfilename + ".log","w") #Make a list of empy host structures if type(hostnames) != list: hostnames = [hostnames] init = True if type(unames) == list: self.hostlist = [ Host(self,host,uname, logging, init, self.logfilename) for host,uname in zip(hostnames,unames) ] else: self.hostlist = [ Host(self,host,unames, logging, init, self.logfilename) for host in hostnames ] self.hosts = Hosts(self.hostlist) #Get a range of hosts def gethosts(self, start, stop): result = [] first = False for host in self.hostlist: if host.name == start: first = True #if start == stop: #return host if first: result.append(host) if host.name == stop: break return Hosts(result) #Allows us to use slice notation with both integer and string indexes. Neat, but tricky def __getitem__(self,key): start = None stop = None if type(key) is slice: if type(key.start) is int: start = self.hostlist[key.start].name if type(key.start) is str: start = key.start if key.start is None: start = self.hostlist[0].name if type(key.stop) is int: idx = key.stop if key.stop > len(self.hostlist): idx = len(self.hostlist) -1 stop = self.hostlist[idx].name if type(key.stop) is str: stop = key.stop if key.stop is None: stop = self.hostlist[-1].name if type(key) is int: start = self.hostlist[key].name stop = self.hostlist[key].name if type(key) is str: start = key stop = key return self.gethosts(start,stop) def __len__(self): return len(self.hostlist) #Run a command on a remote host return a pid for the command #cmd: Text string of the command to run #timeout: Time in seconds to wait for the command to run, otherwise kill it #blocking: Wait for the the command to finish before continuing. Either wait infinitely, or timeout seconds #pincpu: Pin the command to a single CPU and run it as realtime prioirty def run(self, cmds, timeout=None,block=True, pincpu=-1, realtime=False, returnout=True, tostdout=False): return self.hosts.run(cmds,timeout,block,pincpu,realtime,returnout,tostdout) def cd(self,path): return self.hosts.cd(path) def getoutput(self,pid, block=False, timeout=None): return self.hosts.getoutput(pid,block,timeout) #Wait on a command on a remote host finishing def wait(self,pids, timeout=None, kill=False): return self.hosts.wait(pids,timeout,kill) #Stop the remote process by sending a signal def kill(self,pids): return self.hosts.kill(pids) #Copy data to the remote host with scp def copy_to(self,srcs,dsts,timeout=None,block=True,returnout=True,tostdout=False): return self.hosts.copy_to(srcs,dsts,timeout,block,returnout,tostdout) def copy_from(self,srcs,dsts,timeout=None,block=True,returnout=True,tostdout=False): return self.hosts.copy_from(srcs,dsts,timeout,block,returnout,tostdout) def sync_to(self,srcs,dsts,timeout=None,block=True,returnout=True,tostdout=False): return self.hosts.sync_to(srcs,dsts,timeout,block,returnout,tostdout) def sync_from(self,srcs,dsts,timeout=None,block=True,returnout=True,tostdout=False): return self.hosts.sync_from(srcs,dsts,timeout,block,returnout,tostdout) def makepid(self): self.pidcount += 1 return "%s-%s" % ("local",self.pidcount) def local_run(self, cmd, timeout=None,block=True, returnout=True, tostdout=False ): escaped = cmd.replace("\"","\\\"") if timeout > 0 and not block:#Won't work on mac... :-( escaped = "timeout %i %s" % (timeout,escaped) local_cmd = "%s" %(escaped) pid = self.makepid() self.log("REDO [main]: Running command \"%s\" with pid %s" % (local_cmd,pid), tostdout=tostdout) self.log("REDO [main]: %s" % (local_cmd), tostdout=tostdout) result = Queue.Queue() run_thread = CThread(self, local_cmd, returnout, result, tostdout) self.pid2thread[pid] = run_thread run_thread.start() #Give the thread a litte time to get going while(run_thread.subproc is None): None if(block): #print "Waiting for thread to th pid %s terminate..." % (pid) run_thread.join(timeout) if run_thread.isAlive(): self.log("REDO [main]: Killing thread after timeout...") run_thread.kill_subproc() #self.log("REDO [main]: Waiting for thread to die...") run_thread.join() #self.log("REDO [main]: Thread and process is dead") else: None self.log("REDO [main]: Thread with pid %s just terminated" % (pid)) return pid def local_getoutput(self,pid, block=False, timeout=None): results_q = self.pid2thread[pid].result if results_q.empty(): return None return results_q.get(block,timeout) def local_isalive(self,pid): return self.pid2thread[pid].isAlive() #Wait on a command on a remote host finishing def local_wait(self, pid, timeout=None, kill=False): procthread = self.pid2thread[pid] #Wait for the thread to start up if it hasn't self.log("REDO [main]: Waiting for thread to th pid %s terminate..." % (pid)) procthread.join(timeout) if procthread.isAlive(): if not kill: return None #Timedout, and not going to kill self.log("REDO [main]: Killing subprocess after timeout...") procthread.kill_subproc() self.log("REDO [main]: Waiting for thread to die...") procthread.join() self.log("REDO [main]: Thread and process is dead") return procthread.subproc.returncode #Stop the remote process by sending a signal def local_kill(self,pid): self.log("REDO [main]: Killing thread") proc = self.pid2thread[pid] proc.kill_subproc() self.log("REDO [main]: Waiting for thread to exit..") proc.join() self.log("REDO [main]: Thread has exited..") return proc.subproc.returncode def local_cd(self,path): os.chdir(os.path.expanduser(path)) def log(self,msg,tostdout=False,tostderr=False, timestamp=True): log(self.logfile,msg,tostdout,tostderr,timestamp)
mgrosvenor/redo
redo.py
Python
bsd-3-clause
26,098
"""Tests for spelling.able_atable check.""" from proselint.checks.spelling import able_atable as chk from .check import Check class TestCheck(Check): """The test class for spelling.able_atable.""" __test__ = True @property def this_check(self): """Boilerplate.""" return chk def test_smoke(self): """Basic smoke test for spelling.able_atable.""" assert self.passes("""Smoke phrase with nothing flagged.""") assert not self.passes("""There was a demonstratable difference.""")
amperser/proselint
tests/test_spelling_able_atable.py
Python
bsd-3-clause
543
# Copyright (C) 2003-2005 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import dns.rdtypes.mxbase class AFSDB(dns.rdtypes.mxbase.UncompressedMX): """AFSDB record @ivar subtype: the subtype value @type subtype: int @ivar hostname: the hostname name @type hostname: dns.name.Name object""" # Use the property mechanism to make "subtype" an alias for the # "preference" attribute, and "hostname" an alias for the "exchange" # attribute. # # This lets us inherit the UncompressedMX implementation but lets # the caller use appropriate attribute names for the rdata type. # # We probably lose some performance vs. a cut-and-paste # implementation, but this way we don't copy code, and that's # good. def get_subtype(self): return self.preference def set_subtype(self, subtype): self.preference = subtype subtype = property(get_subtype, set_subtype) def get_hostname(self): return self.exchange def set_hostname(self, hostname): self.exchange = hostname hostname = property(get_hostname, set_hostname)
liyongyue/dnsspider
dns/rdtypes/ANY/AFSDB.py
Python
isc
1,841
from sleekxmpp.test import * from sleekxmpp.xmlstream.stanzabase import ElementBase class TestElementBase(SleekTest): def testFixNs(self): """Test fixing namespaces in an XPath expression.""" e = ElementBase() ns = "http://jabber.org/protocol/disco#items" result = e._fix_ns("{%s}foo/bar/{abc}baz/{%s}more" % (ns, ns)) expected = "/".join(["{%s}foo" % ns, "{%s}bar" % ns, "{abc}baz", "{%s}more" % ns]) self.failUnless(expected == result, "Incorrect namespace fixing result: %s" % str(result)) def testExtendedName(self): """Test element names of the form tag1/tag2/tag3.""" class TestStanza(ElementBase): name = "foo/bar/baz" namespace = "test" stanza = TestStanza() self.check(stanza, """ <foo xmlns="test"> <bar> <baz /> </bar> </foo> """) def testGetStanzaValues(self): """Test getStanzaValues using plugins and substanzas.""" class TestStanzaPlugin(ElementBase): name = "foo2" namespace = "foo" interfaces = set(('bar', 'baz')) plugin_attrib = "foo2" class TestSubStanza(ElementBase): name = "subfoo" namespace = "foo" interfaces = set(('bar', 'baz')) class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) subitem = set((TestSubStanza,)) register_stanza_plugin(TestStanza, TestStanzaPlugin) stanza = TestStanza() stanza['bar'] = 'a' stanza['foo2']['baz'] = 'b' substanza = TestSubStanza() substanza['bar'] = 'c' stanza.append(substanza) values = stanza.getStanzaValues() expected = {'bar': 'a', 'baz': '', 'foo2': {'bar': '', 'baz': 'b'}, 'substanzas': [{'__childtag__': '{foo}subfoo', 'bar': 'c', 'baz': ''}]} self.failUnless(values == expected, "Unexpected stanza values:\n%s\n%s" % (str(expected), str(values))) def testSetStanzaValues(self): """Test using setStanzaValues with substanzas and plugins.""" class TestStanzaPlugin(ElementBase): name = "pluginfoo" namespace = "foo" interfaces = set(('bar', 'baz')) plugin_attrib = "plugin_foo" class TestStanzaPlugin2(ElementBase): name = "pluginfoo2" namespace = "foo" interfaces = set(('bar', 'baz')) plugin_attrib = "plugin_foo2" class TestSubStanza(ElementBase): name = "subfoo" namespace = "foo" interfaces = set(('bar', 'baz')) class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) subitem = set((TestSubStanza,)) register_stanza_plugin(TestStanza, TestStanzaPlugin) register_stanza_plugin(TestStanza, TestStanzaPlugin2) stanza = TestStanza() values = {'bar': 'a', 'baz': '', 'plugin_foo': {'bar': '', 'baz': 'b'}, 'plugin_foo2': {'bar': 'd', 'baz': 'e'}, 'substanzas': [{'__childtag__': '{foo}subfoo', 'bar': 'c', 'baz': ''}]} stanza.setStanzaValues(values) self.check(stanza, """ <foo xmlns="foo" bar="a"> <pluginfoo baz="b" /> <pluginfoo2 bar="d" baz="e" /> <subfoo bar="c" /> </foo> """) def testGetItem(self): """Test accessing stanza interfaces.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz', 'qux')) sub_interfaces = set(('baz',)) def getQux(self): return 'qux' class TestStanzaPlugin(ElementBase): name = "foobar" namespace = "foo" plugin_attrib = "foobar" interfaces = set(('fizz',)) TestStanza.subitem = (TestStanza,) register_stanza_plugin(TestStanza, TestStanzaPlugin) stanza = TestStanza() substanza = TestStanza() stanza.append(substanza) stanza.setStanzaValues({'bar': 'a', 'baz': 'b', 'qux': 42, 'foobar': {'fizz': 'c'}}) # Test non-plugin interfaces expected = {'substanzas': [substanza], 'bar': 'a', 'baz': 'b', 'qux': 'qux', 'meh': ''} for interface, value in expected.items(): result = stanza[interface] self.failUnless(result == value, "Incorrect stanza interface access result: %s" % result) # Test plugin interfaces self.failUnless(isinstance(stanza['foobar'], TestStanzaPlugin), "Incorrect plugin object result.") self.failUnless(stanza['foobar']['fizz'] == 'c', "Incorrect plugin subvalue result.") def testSetItem(self): """Test assigning to stanza interfaces.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz', 'qux')) sub_interfaces = set(('baz',)) def setQux(self, value): pass class TestStanzaPlugin(ElementBase): name = "foobar" namespace = "foo" plugin_attrib = "foobar" interfaces = set(('foobar',)) register_stanza_plugin(TestStanza, TestStanzaPlugin) stanza = TestStanza() stanza['bar'] = 'attribute!' stanza['baz'] = 'element!' stanza['qux'] = 'overridden' stanza['foobar'] = 'plugin' self.check(stanza, """ <foo xmlns="foo" bar="attribute!"> <baz>element!</baz> <foobar foobar="plugin" /> </foo> """) def testDelItem(self): """Test deleting stanza interface values.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz', 'qux')) sub_interfaces = set(('bar',)) def delQux(self): pass class TestStanzaPlugin(ElementBase): name = "foobar" namespace = "foo" plugin_attrib = "foobar" interfaces = set(('foobar',)) register_stanza_plugin(TestStanza, TestStanzaPlugin) stanza = TestStanza() stanza['bar'] = 'a' stanza['baz'] = 'b' stanza['qux'] = 'c' stanza['foobar']['foobar'] = 'd' self.check(stanza, """ <foo xmlns="foo" baz="b" qux="c"> <bar>a</bar> <foobar foobar="d" /> </foo> """) del stanza['bar'] del stanza['baz'] del stanza['qux'] del stanza['foobar'] self.check(stanza, """ <foo xmlns="foo" qux="c" /> """) def testModifyingAttributes(self): """Test modifying top level attributes of a stanza's XML object.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) stanza = TestStanza() self.check(stanza, """ <foo xmlns="foo" /> """) self.failUnless(stanza._get_attr('bar') == '', "Incorrect value returned for an unset XML attribute.") stanza._set_attr('bar', 'a') stanza._set_attr('baz', 'b') self.check(stanza, """ <foo xmlns="foo" bar="a" baz="b" /> """) self.failUnless(stanza._get_attr('bar') == 'a', "Retrieved XML attribute value is incorrect.") stanza._set_attr('bar', None) stanza._del_attr('baz') self.check(stanza, """ <foo xmlns="foo" /> """) self.failUnless(stanza._get_attr('bar', 'c') == 'c', "Incorrect default value returned for an unset XML attribute.") def testGetSubText(self): """Test retrieving the contents of a sub element.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar',)) def setBar(self, value): wrapper = ET.Element("{foo}wrapper") bar = ET.Element("{foo}bar") bar.text = value wrapper.append(bar) self.xml.append(wrapper) def getBar(self): return self._get_sub_text("wrapper/bar", default="not found") stanza = TestStanza() self.failUnless(stanza['bar'] == 'not found', "Default _get_sub_text value incorrect.") stanza['bar'] = 'found' self.check(stanza, """ <foo xmlns="foo"> <wrapper> <bar>found</bar> </wrapper> </foo> """) self.failUnless(stanza['bar'] == 'found', "_get_sub_text value incorrect: %s." % stanza['bar']) def testSubElement(self): """Test setting the contents of a sub element.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) def setBaz(self, value): self._set_sub_text("wrapper/baz", text=value) def getBaz(self): return self._get_sub_text("wrapper/baz") def setBar(self, value): self._set_sub_text("wrapper/bar", text=value) def getBar(self): return self._get_sub_text("wrapper/bar") stanza = TestStanza() stanza['bar'] = 'a' stanza['baz'] = 'b' self.check(stanza, """ <foo xmlns="foo"> <wrapper> <bar>a</bar> <baz>b</baz> </wrapper> </foo> """) stanza._set_sub_text('wrapper/bar', text='', keep=True) self.check(stanza, """ <foo xmlns="foo"> <wrapper> <bar /> <baz>b</baz> </wrapper> </foo> """, use_values=False) stanza['bar'] = 'a' stanza._set_sub_text('wrapper/bar', text='') self.check(stanza, """ <foo xmlns="foo"> <wrapper> <baz>b</baz> </wrapper> </foo> """) def testDelSub(self): """Test removing sub elements.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) def setBar(self, value): self._set_sub_text("path/to/only/bar", value); def getBar(self): return self._get_sub_text("path/to/only/bar") def delBar(self): self._del_sub("path/to/only/bar") def setBaz(self, value): self._set_sub_text("path/to/just/baz", value); def getBaz(self): return self._get_sub_text("path/to/just/baz") def delBaz(self): self._del_sub("path/to/just/baz") stanza = TestStanza() stanza['bar'] = 'a' stanza['baz'] = 'b' self.check(stanza, """ <foo xmlns="foo"> <path> <to> <only> <bar>a</bar> </only> <just> <baz>b</baz> </just> </to> </path> </foo> """) del stanza['bar'] del stanza['baz'] self.check(stanza, """ <foo xmlns="foo"> <path> <to> <only /> <just /> </to> </path> </foo> """, use_values=False) stanza['bar'] = 'a' stanza['baz'] = 'b' stanza._del_sub('path/to/only/bar', all=True) self.check(stanza, """ <foo xmlns="foo"> <path> <to> <just> <baz>b</baz> </just> </to> </path> </foo> """) def testMatch(self): """Test matching a stanza against an XPath expression.""" class TestSubStanza(ElementBase): name = "sub" namespace = "baz" interfaces = set(('attrib',)) class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar','baz', 'qux')) sub_interfaces = set(('qux',)) subitem = (TestSubStanza,) def setQux(self, value): self._set_sub_text('qux', text=value) def getQux(self): return self._get_sub_text('qux') class TestStanzaPlugin(ElementBase): name = "plugin" namespace = "http://test/slash/bar" interfaces = set(('attrib',)) register_stanza_plugin(TestStanza, TestStanzaPlugin) stanza = TestStanza() self.failUnless(stanza.match("foo"), "Stanza did not match its own tag name.") self.failUnless(stanza.match("{foo}foo"), "Stanza did not match its own namespaced name.") stanza['bar'] = 'a' self.failUnless(stanza.match("foo@bar=a"), "Stanza did not match its own name with attribute value check.") stanza['baz'] = 'b' self.failUnless(stanza.match("foo@bar=a@baz=b"), "Stanza did not match its own name with multiple attributes.") stanza['qux'] = 'c' self.failUnless(stanza.match("foo/qux"), "Stanza did not match with subelements.") stanza['qux'] = '' self.failUnless(stanza.match("foo/qux") == False, "Stanza matched missing subinterface element.") self.failUnless(stanza.match("foo/bar") == False, "Stanza matched nonexistent element.") stanza['plugin']['attrib'] = 'c' self.failUnless(stanza.match("foo/plugin@attrib=c"), "Stanza did not match with plugin and attribute.") self.failUnless(stanza.match("foo/{http://test/slash/bar}plugin"), "Stanza did not match with namespaced plugin.") substanza = TestSubStanza() substanza['attrib'] = 'd' stanza.append(substanza) self.failUnless(stanza.match("foo/sub@attrib=d"), "Stanza did not match with substanzas and attribute.") self.failUnless(stanza.match("foo/{baz}sub"), "Stanza did not match with namespaced substanza.") def testComparisons(self): """Test comparing ElementBase objects.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) stanza1 = TestStanza() stanza1['bar'] = 'a' self.failUnless(stanza1, "Stanza object does not evaluate to True") stanza2 = TestStanza() stanza2['baz'] = 'b' self.failUnless(stanza1 != stanza2, "Different stanza objects incorrectly compared equal.") stanza1['baz'] = 'b' stanza2['bar'] = 'a' self.failUnless(stanza1 == stanza2, "Equal stanzas incorrectly compared inequal.") def testKeys(self): """Test extracting interface names from a stanza object.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) plugin_attrib = 'qux' register_stanza_plugin(TestStanza, TestStanza) stanza = TestStanza() self.failUnless(set(stanza.keys()) == set(('bar', 'baz')), "Returned set of interface keys does not match expected.") stanza.enable('qux') self.failUnless(set(stanza.keys()) == set(('bar', 'baz', 'qux')), "Incorrect set of interface and plugin keys.") def testGet(self): """Test accessing stanza interfaces using get().""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) stanza = TestStanza() stanza['bar'] = 'a' self.failUnless(stanza.get('bar') == 'a', "Incorrect value returned by stanza.get") self.failUnless(stanza.get('baz', 'b') == 'b', "Incorrect default value returned by stanza.get") def testSubStanzas(self): """Test manipulating substanzas of a stanza object.""" class TestSubStanza(ElementBase): name = "foobar" namespace = "foo" interfaces = set(('qux',)) class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) subitem = (TestSubStanza,) stanza = TestStanza() substanza1 = TestSubStanza() substanza2 = TestSubStanza() substanza1['qux'] = 'a' substanza2['qux'] = 'b' # Test appending substanzas self.failUnless(len(stanza) == 0, "Incorrect empty stanza size.") stanza.append(substanza1) self.check(stanza, """ <foo xmlns="foo"> <foobar qux="a" /> </foo> """, use_values=False) self.failUnless(len(stanza) == 1, "Incorrect stanza size with 1 substanza.") stanza.append(substanza2) self.check(stanza, """ <foo xmlns="foo"> <foobar qux="a" /> <foobar qux="b" /> </foo> """, use_values=False) self.failUnless(len(stanza) == 2, "Incorrect stanza size with 2 substanzas.") # Test popping substanzas stanza.pop(0) self.check(stanza, """ <foo xmlns="foo"> <foobar qux="b" /> </foo> """, use_values=False) # Test iterating over substanzas stanza.append(substanza1) results = [] for substanza in stanza: results.append(substanza['qux']) self.failUnless(results == ['b', 'a'], "Iteration over substanzas failed: %s." % str(results)) def testCopy(self): """Test copying stanza objects.""" class TestStanza(ElementBase): name = "foo" namespace = "foo" interfaces = set(('bar', 'baz')) stanza1 = TestStanza() stanza1['bar'] = 'a' stanza2 = stanza1.__copy__() self.failUnless(stanza1 == stanza2, "Copied stanzas are not equal to each other.") stanza1['baz'] = 'b' self.failUnless(stanza1 != stanza2, "Divergent stanza copies incorrectly compared equal.") suite = unittest.TestLoader().loadTestsFromTestCase(TestElementBase)
skinkie/SleekXMPP--XEP-0080-
tests/test_stanza_element.py
Python
mit
19,610
from conans.errors import NotFoundException, RequestErrorException, RecipeNotFoundException, \ PackageNotFoundException from conans.server.service.common.common import CommonService class ConanService(CommonService): """Handles authorization and expose methods for REST API""" def __init__(self, authorizer, server_store, auth_user): self._authorizer = authorizer self._server_store = server_store self._auth_user = auth_user def get_recipe_snapshot(self, ref): """Gets a dict with file paths and the md5: {filename: md5} """ self._authorizer.check_read_conan(self._auth_user, ref) latest_ref = self._get_latest_ref(ref) snap = self._server_store.get_recipe_snapshot(latest_ref) if not snap: raise RecipeNotFoundException(latest_ref) return snap def get_conanfile_download_urls(self, ref, files_subset=None): """Gets a dict with filepaths and the urls: {filename: url} """ self._authorizer.check_read_conan(self._auth_user, ref) latest_ref = self._get_latest_ref(ref) urls = self._server_store.get_download_conanfile_urls(latest_ref, files_subset, self._auth_user) if not urls: raise RecipeNotFoundException(latest_ref) return urls def get_conanfile_upload_urls(self, ref, filesizes): _validate_conan_reg_filenames(list(filesizes.keys())) self._authorizer.check_write_conan(self._auth_user, ref) latest_ref = self._get_latest_ref(ref) urls = self._server_store.get_upload_conanfile_urls(latest_ref, filesizes, self._auth_user) return urls # Package methods def get_package_snapshot(self, pref): """Gets a list with filepaths and the urls and md5: [filename: {'url': url, 'md5': md5}] """ self._authorizer.check_read_package(self._auth_user, pref) pref = self._get_latest_pref(pref) snap = self._server_store.get_package_snapshot(pref) return snap def get_package_download_urls(self, pref, files_subset=None): """Gets a list with filepaths and the urls and md5: [filename: {'url': url, 'md5': md5}] """ new_pref = self._get_latest_pref(pref) self._authorizer.check_read_package(self._auth_user, new_pref) urls = self._server_store.get_download_package_urls(new_pref, files_subset=files_subset) return urls def get_package_upload_urls(self, pref, filesizes): """ :param pref: PackageReference :param filesizes: {filepath: bytes} :return {filepath: url} """ new_pref = self._get_latest_pref(pref) try: self._server_store.get_recipe_snapshot(new_pref.ref) except NotFoundException: raise PackageNotFoundException(new_pref) self._authorizer.check_write_package(self._auth_user, new_pref) urls = self._server_store.get_upload_package_urls(new_pref, filesizes, self._auth_user) return urls def _validate_conan_reg_filenames(files): message = "Invalid conans request" # Could be partial uploads, so we can't expect for all files to be present # # conanfile and digest in files # if CONANFILE not in files: # # Log something # raise RequestErrorException("Missing %s" % CONANFILE) # if CONAN_MANIFEST not in files: # # Log something # raise RequestErrorException("Missing %s" % CONAN_MANIFEST) # All contents in same directory (from conan_id) for filename in files: if ".." in filename: # Log something raise RequestErrorException(message)
memsharded/conan
conans/server/service/v1/service.py
Python
mit
3,783
""" Computes, plots, and saves the 2D vorticity field from a PetIBM simulation at saved time-steps. """ from snake.petibm.simulation import PetIBMSimulation simulation = PetIBMSimulation() simulation.read_grid() for time_step in simulation.get_time_steps(): simulation.read_fields('vorticity', time_step) simulation.plot_contour('vorticity', field_range=(-5.0, 5.0, 101), filled_contour=True, view=[-2.0, -5.0, 15.0, 5.0], style='mesnardo', width=8.0)
barbagroup/cuIBM
external/snake-0.3/examples/petibm/plotVorticity.py
Python
mit
590
from mpfmc.tests.MpfMcTestCase import MpfMcTestCase class TestWidgetStyles(MpfMcTestCase): def get_machine_path(self): return 'tests/machine_files/widget_styles' def get_config_file(self): return 'test_widget_styles.yaml' def get_widget(self, index=0): return self.mc.targets['default'].current_slide.widgets[index].widget def test_style_loading(self): self.assertIn('text_default', self.mc.machine_config['widget_styles']) def test_style(self): self.mc.events.post('slide1') self.advance_time() # Test named style # font size set in style self.assertEqual(self.get_widget().font_size, 100) # halign set in style and widget, widget should win self.assertEqual(self.get_widget().halign, 'right') # second widget has no style set, so it should get the default self.assertEqual(self.get_widget(1).font_size, 21) def test_default_style(self): self.mc.events.post('slide3') self.advance_time() self.assertEqual(self.get_widget().color, [1, 0, 0, 1]) def test_invalid_style(self): self.mc.events.post('slide4') with self.assertRaises(Exception) as e: self.advance_time() self.assertIsInstance(e.exception.__cause__, ValueError) def test_local_setting_overrides_style(self): self.mc.events.post('slide5') self.advance_time() self.assertEqual(self.get_widget().font_size, 50) def test_stacked_styles(self): self.mc.events.post('slide6') self.advance_time() self.assertEqual(self.get_widget().font_size, 100) self.assertEqual(self.get_widget().color, [0.0, 0.0, 1.0, 1]); def test_stacked_order_overrides(self): self.mc.events.post('slide7') self.advance_time() self.assertEqual(self.get_widget().font_size, 21) self.assertEqual(self.get_widget().color, [0.0, 0.0, 1.0, 1]); # todo some future release # def test_mode_style(self): # self.mc.modes['mode1'].start() # self.advance_time() # # self.mc.events.post('slide2') # self.advance_time() # # # widget with no style, should pickup default out of the mode # # text_strings, rather than the machine wide one # self.assertEqual(self.get_widget().font_size, 50) # # # mode widget with style from machine wide config # self.assertEqual(self.get_widget(1).font_size, 100) # # # mode widget with style name that's not valid, so it should # # pickup the default # self.assertEqual(self.get_widget(2).font_size, 50)
missionpinball/mpf_mc
mpfmc/tests/test_WidgetStyles.py
Python
mit
2,684
import torch import torch.nn as nn import argparse from sklearn.metrics import classification_report from losses import CenterLoss from mnist_net import Net import mnist_loader def main(): args = parse_args() # Device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Dataset train_loader, test_loader, classes = mnist_loader.load_dataset(args.dataset_dir, img_show=False) # Model model = Net().to(device) model.load_state_dict(torch.load(args.model_path)) model = model.eval() print(model) # Loss nllloss = nn.NLLLoss().to(device) # CrossEntropyLoss = log_softmax + NLLLoss loss_weight = 1 centerloss = CenterLoss(10, 2).to(device) # Test a model. print('Testing a trained model....') test_acc, test_loss = test(device, test_loader, model, nllloss, loss_weight, centerloss) stdout_temp = 'test acc: {:<8}, test loss: {:<8}' print(stdout_temp.format(test_acc, test_loss)) def test(device, test_loader, model, nllloss, loss_weight, centerloss): model = model.eval() # Prediciton running_loss = 0.0 pred_list = [] label_list = [] with torch.no_grad(): for i,(imgs, labels) in enumerate(test_loader): # Set batch data. imgs, labels = imgs.to(device), labels.to(device) # Predict labels. ip1, pred = model(imgs) # Calculate loss. loss = nllloss(pred, labels) + loss_weight * centerloss(labels, ip1) # Append predictions and labels. running_loss += loss.item() pred_list += [int(p.argmax()) for p in pred] label_list += [int(l) for l in labels] # Calculate accuracy. result = classification_report(pred_list, label_list, output_dict=True) test_acc = round(result['weighted avg']['f1-score'], 6) test_loss = round(running_loss / len(test_loader.dataset), 6) return test_acc, test_loss def parse_args(): arg_parser = argparse.ArgumentParser(description="parser for focus one") arg_parser.add_argument("--dataset_dir", type=str, default='D:/workspace/datasets') arg_parser.add_argument("--model_path", type=str, default='../outputs/models/mnist_original_softmax_center_epoch_099.pth') args = arg_parser.parse_args() return args if __name__ == "__main__": main()
iShoto/testpy
codes/20200106_metric_learning_cifar10/src/test.py
Python
mit
2,170
# -*- coding: utf-8 -*- # # ScribleMark documentation build configuration file, created by # sphinx-quickstart on Sun Feb 22 06:46:26 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'ScribleMark' copyright = u'2015, Scribe Inc' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ScribleMarkdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ScribleMark.tex', u'ScribleMark Documentation', u'Scribe Inc', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'scriblemark', u'ScribleMark Documentation', [u'Scribe Inc'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ScribleMark', u'ScribleMark Documentation', u'Scribe Inc', 'ScribleMark', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'ScribleMark' epub_author = u'Scribe Inc' epub_publisher = u'Scribe Inc' epub_copyright = u'2015, Scribe Inc' # The basename for the epub file. It defaults to the project name. #epub_basename = u'ScribleMark' # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. #epub_theme = 'epub' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. #epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Choose between 'default' and 'includehidden'. #epub_tocscope = 'default' # Fix unsupported image types using the PIL. #epub_fix_images = False # Scale large images. #epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. #epub_show_urls = 'inline' # If false, no index is generated. #epub_use_index = True
scribenet/jabiru
docs/conf.py
Python
mit
10,227
import os, sys sys.path.append('.\libraries') #from pabuehle_utilities_CV_v1 import * from pabuehle_utilities_general_v0 import * import requests, json, base64, datetime, time, threading from flask import Flask, jsonify #################################### # Parameters #################################### #imgDir = "C:/Users/pabuehle/Desktop/apiCallImages/" imgDir = "C:/workspace_BostonDSEng/teamTJ/wildnature/WebProject1/examples/" apiUrl = "http://localhost:64054/api" #apiUrl = "http://hendrick2.azurewebsites.net/faceDetectionAPI" #no need to change these boThreaded = False outDir = imgDir + "apiOutput/" #################################### # Helper Functions #################################### def runWorker(imgIndex,imgFilename): print "\n*** Processing image " + str(imgIndex) + ": " + imgFilename + ".. ***" imgPath = imgDir + imgFilename #call API upload route with image print "Making API call.." tstart = datetime.datetime.now() files = {'file': (imgFilename, open(imgPath, 'rb'), 'image/jpeg', {'Expires': '0'})} rv = requests.post(apiUrl, files = files) durationMs = (datetime.datetime.now()-tstart).total_seconds() * 1000 print "Done API call (time = {0}[ms])".format(durationMs) #parse output and save to disk print rv.content response = json.loads(rv.content) boGiraffeFound = response['boGiraffeFound'] == 'True' confidence = response['confidence'] debugLog = response['debugLog'] processingTimeMs = response['processingTimeMs'] #resultImg = base64.b64decode(response['resultImg']) #writeBinaryFile(outDir + imgFilename[:-3] + "resultImg.jpg", resultImg); if boGiraffeFound: left = int(response['left']) top = int(response['top']) right = int(response['right']) bottom = int(response['bottom']) print "Giraffe location: left = {0}, top = {1}, right = {2}, bottom = {3}".format(left, top, right, bottom) print "Processing time = " + str(processingTimeMs) print "Overhead from API call = {0} [ms]".format(str(durationMs - float(processingTimeMs))) print "confidence = " + str(confidence) print "debugLog = " for s in debugLog.split('<br>'): print " " + s print "\n*** DONE with image " + str(imgIndex) + ": " + imgFilename + ".. ***" #################################### # Code #################################### makeDirectory(outDir) imgFilenames = getFilesInDirectory(imgDir, ".jpg") tstartAll = datetime.datetime.now() if boThreaded == False: for imgIndex,imgFilename in enumerate(imgFilenames): runWorker(imgIndex,imgFilename) else: #start threads threads = [] for imgIndex,imgFilename in enumerate(imgFilenames): t = threading.Thread(target=runWorker, name=None, args=(imgIndex,imgFilename)) threads.append(t) t.start() #time.sleep(0.1) #wait until all threads are done for i in range(len(threads)): threads[i].join() print "Done with all API calls (time = {0}[ms])".format((datetime.datetime.now()-tstartAll).total_seconds() * 1000)
Edeleon4/PoolShark
flask/wildnature/WebProject1/obj/Release/Package/PackageTmp/apiCall.py
Python
mit
3,096
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: theme_specifics.proto from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) DESCRIPTOR = _descriptor.FileDescriptor( name='theme_specifics.proto', package='sync_pb', serialized_pb='\n\x15theme_specifics.proto\x12\x07sync_pb\"\xa4\x01\n\x0eThemeSpecifics\x12\x18\n\x10use_custom_theme\x18\x01 \x01(\x08\x12#\n\x1buse_system_theme_by_default\x18\x02 \x01(\x08\x12\x19\n\x11\x63ustom_theme_name\x18\x03 \x01(\t\x12\x17\n\x0f\x63ustom_theme_id\x18\x04 \x01(\t\x12\x1f\n\x17\x63ustom_theme_update_url\x18\x05 \x01(\tB\x04H\x03`\x01') _THEMESPECIFICS = _descriptor.Descriptor( name='ThemeSpecifics', full_name='sync_pb.ThemeSpecifics', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='use_custom_theme', full_name='sync_pb.ThemeSpecifics.use_custom_theme', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='use_system_theme_by_default', full_name='sync_pb.ThemeSpecifics.use_system_theme_by_default', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='custom_theme_name', full_name='sync_pb.ThemeSpecifics.custom_theme_name', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='custom_theme_id', full_name='sync_pb.ThemeSpecifics.custom_theme_id', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='custom_theme_update_url', full_name='sync_pb.ThemeSpecifics.custom_theme_update_url', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=35, serialized_end=199, ) DESCRIPTOR.message_types_by_name['ThemeSpecifics'] = _THEMESPECIFICS class ThemeSpecifics(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _THEMESPECIFICS # @@protoc_insertion_point(class_scope:sync_pb.ThemeSpecifics) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), 'H\003`\001') # @@protoc_insertion_point(module_scope)
valurhrafn/chrome-sync-server
protocol/theme_specifics_pb2.py
Python
mit
3,431
""" String Substitution for a Mad Lib Adapted from code by Kirby Urner """ story = """ Once upon a time, deep in an ancient jungle, there lived a %(animal)s. This %(animal)s liked to eat %(food)s, but the jungle had very little %(food)s to offer. One day, an explorer found the %(animal)s and discovered it liked %(food)s. The explorer took the %(animal)s back to %(city)s, where it could eat as much %(food)s as it wanted. However, the %(animal)s became homesick, so the explorer brought it back to the jungle, leaving a large supply of %(food)s. The End """ def tellStory(): userPicks = dict() addPick('animal', userPicks) addPick('food', userPicks) addPick('city', userPicks) print story % userPicks def addPick(cue, dictionary): prompt = "Enter a specific example for %s: " % cue dictionary[cue] = raw_input(prompt) tellStory() raw_input("Press Enter to end the program.")
deniederhut/programming-fundamentals
madlib.py
Python
mit
1,400
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- # coding: utf-8 from setuptools import setup, find_packages NAME = "autoresthttpinfrastructuretestservice" VERSION = "1.0.0" # To install the library, run the following # # python setup.py install # # prerequisite: setuptools # http://pypi.python.org/pypi/setuptools REQUIRES = ["msrest>=0.1.0"] setup( name=NAME, version=VERSION, description="AutoRestHttpInfrastructureTestService", author_email="", url="", keywords=["Swagger", "AutoRestHttpInfrastructureTestService"], install_requires=REQUIRES, packages=find_packages(), include_package_data=True, long_description="""\ Test Infrastructure for AutoRest """ )
jkonecki/autorest
AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Http/setup.py
Python
mit
1,139
"""Snappy libmemcached wrapper pylibmc is a Python wrapper around TangentOrg's libmemcached library. The interface is intentionally made as close to python-memcached as possible, so that applications can drop-in replace it. Example usage ============= Create a connection and configure it:: >>> import pylibmc >>> m = pylibmc.Client(["10.0.0.1"], binary=True) >>> m.behaviors = {"tcp_nodelay": True, "ketama": True} Nevermind this doctest shim:: >>> from pylibmc.test import make_test_client >>> mc = make_test_client(behaviors=m.behaviors) Basic operation:: >>> mc.set("some_key", "Some value") True >>> value = mc.get("some_key") >>> value 'Some value' >>> mc.set("another_key", 3) True >>> mc.delete("another_key") True >>> mc.set("key", "1") # str or int is fine True Atomic increments and decrements:: >>> mc.incr("key") 2L >>> mc.decr("key") 1L Batch operation:: >>> mc.get_multi(["key", "another_key"]) {'key': '1'} >>> mc.set_multi({"cats": ["on acid", "furry"], "dogs": True}) [] >>> mc.get_multi(["cats", "dogs"]) {'cats': ['on acid', 'furry'], 'dogs': True} >>> mc.delete_multi(["cats", "dogs", "nonextant"]) False >>> mc.add_multi({"cats": ["on acid", "furry"], "dogs": True}) [] >>> mc.get_multi(["cats", "dogs"]) {'cats': ['on acid', 'furry'], 'dogs': True} >>> mc.add_multi({"cats": "not set", "dogs": "definitely not set", "bacon": "yummy"}) ['cats', 'dogs'] >>> mc.get_multi(["cats", "dogs", "bacon"]) {'cats': ['on acid', 'furry'], 'bacon': 'yummy', 'dogs': True} >>> mc.delete_multi(["cats", "dogs", "bacon"]) True Further Reading =============== See http://sendapatch.se/projects/pylibmc/ """ import _pylibmc from .consts import hashers, distributions from .client import Client from .pools import ClientPool, ThreadMappedPool libmemcached_version = _pylibmc.libmemcached_version support_compression = _pylibmc.support_compression support_sasl = _pylibmc.support_sasl __version__ = _pylibmc.__version__ def build_info(): return ("pylibmc %s for libmemcached %s (compression=%s, sasl=%s)" % (__version__, libmemcached_version, support_compression, support_sasl)) __all__ = ["hashers", "distributions", "Client", "ClientPool", "ThreadMappedPool"]
alcance/blog_ng
build/pylibmc/src/pylibmc/__init__.py
Python
mit
2,410
# -*- coding: utf-8 -*- import random from twitterapi import TwitterAPI from tweets import tweets twitter = TwitterAPI() last_tweets = twitter.timeline('abadbot', 5) def get_random_tweet(): return random.choice(tweets) def get_new_tweet(): new_tweet = get_random_tweet() for tweet in last_tweets: # Check if this was posted in one of the last # 5 tweets if new_tweet.get('text', '') in tweet.text: return get_new_tweet() return new_tweet if __name__ == "__main__": new_tweet = get_new_tweet() if new_tweet.get('type') == 'text': twitter.tweet_text(new_tweet.get('text')) elif new_tweet.get('type') == 'image': twitter.tweet_image(new_tweet.get('image'), new_tweet.get('text')) print new_tweet
juanrossi/abadbot
bot.py
Python
mit
786
## \namespace orbit::parsers ## \brief Accelerator lattice parsers. ## ## Classes: ## - mad_parser - MAD-8 parser ## - sad_parser - SAD parser from mad_parser import MAD_Parser from mad_parser import MAD_LattElement from mad_parser import MAD_LattLine from madx_parser import MADX_Parser from madx_parser import MADX_LattElement from sad_parser import SAD_Parser from sad_parser import SAD_LattElement from sad_parser import SAD_LattLine from field_parser import Field_Parser3D __all__ = [] __all__.append("MAD_Parser") __all__.append("MAD_LattElement") __all__.append("MAD_LattLine") __all__.append("MADX_Parser") __all__.append("MADX_LattElement") __all__.append("SAD_Parser") __all__.append("SAD_LattElement") __all__.append("SAD_LattLine") __all__.append("Field_Parser3D")
azukov/py-orbit
py/orbit/parsers/__init__.py
Python
mit
781
#!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL2Tiles, Google Summer of Code 2007 & 2008 # Global Map Tiles Classes # Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326, # generate a simple HTML viewers based on Google Maps and OpenLayers # Author: Klokan Petr Pridal, klokan at klokan dot cz # Web: http://www.klokan.cz/projects/gdal2tiles/ # ############################################################################### # Copyright (c) 2008 Klokan Petr Pridal. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### """ globalmaptiles.py Global Map Tiles as defined in Tile Map Service (TMS) Profiles ============================================================== Functions necessary for generation of global tiles used on the web. It contains classes implementing coordinate conversions for: - GlobalMercator (based on EPSG:900913 = EPSG:3785) for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles - GlobalGeodetic (based on EPSG:4326) for OpenLayers Base Map and Google Earth compatible tiles More info at: http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation http://msdn.microsoft.com/en-us/library/bb259689.aspx http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates Created by Klokan Petr Pridal on 2008-07-03. Google Summer of Code 2008, project GDAL2Tiles for OSGEO. In case you use this class in your product, translate it to another language or find it usefull for your project please let me know. My email: klokan at klokan dot cz. I would like to know where it was used. Class is available under the open-source GDAL license (www.gdal.org). """ import math class GlobalMercator(object): """ TMS Global Mercator Profile --------------------------- Functions necessary for generation of tiles in Spherical Mercator projection, EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001. Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps, UK Ordnance Survey OpenSpace API, ... and you can overlay them on top of base maps of those web mapping applications. Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left). What coordinate conversions do we need for TMS Global Mercator tiles:: LatLon <-> Meters <-> Pixels <-> Tile WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid lat/lon XY in metres XY pixels Z zoom XYZ from TMS EPSG:4326 EPSG:900913 .----. --------- -- TMS / \ <-> | | <-> /----/ <-> Google \ / | | /--------/ QuadTree ----- --------- /------------/ KML, public WebMapService Web Clients TileMapService What is the coordinate extent of Earth in EPSG:900913? [-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244] Constant 20037508.342789244 comes from the circumference of the Earth in meters, which is 40 thousand kilometers, the coordinate origin is in the middle of extent. In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0 $ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913 Polar areas with abs(latitude) bigger then 85.05112878 are clipped off. What are zoom level constants (pixels/meter) for pyramid with EPSG:900913? whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile, every lower zoom level resolution is always divided by two initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062 What is the difference between TMS and Google Maps/QuadTree tile name convention? The tile raster itself is the same (equal extent, projection, pixel size), there is just different identification of the same raster tile. Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ. Google placed the origin [0,0] to the top-left corner, reference is XYZ. Microsoft is referencing tiles by a QuadTree name, defined on the website: http://msdn2.microsoft.com/en-us/library/bb259689.aspx The lat/lon coordinates are using WGS84 datum, yeh? Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum. Well, the web clients like Google Maps are projecting those coordinates by Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if the were on the WGS84 ellipsoid. From MSDN documentation: To simplify the calculations, we use the spherical form of projection, not the ellipsoidal form. Since the projection is used only for map display, and not for displaying numeric coordinates, we don't need the extra precision of an ellipsoidal projection. The spherical projection causes approximately 0.33 percent scale distortion in the Y direction, which is not visually noticable. How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4? You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform. All of the tools supports -t_srs 'epsg:900913'. For other GIS programs check the exact definition of the projection: More info at http://spatialreference.org/ref/user/google-projection/ The same projection is degined as EPSG:3785. WKT definition is in the official EPSG database. Proj4 Text: +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs Human readable WKT format of EPGS:900913: PROJCS["Google Maps Global Mercator", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.2572235630016, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0], UNIT["degree",0.0174532925199433], AUTHORITY["EPSG","4326"]], PROJECTION["Mercator_1SP"], PARAMETER["central_meridian",0], PARAMETER["scale_factor",1], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]]] """ def __init__(self, tileSize=256): "Initialize the TMS Global Mercator pyramid" self.tileSize = tileSize self.initialResolution = 2 * math.pi * 6378137 / self.tileSize # 156543.03392804062 for tileSize 256 pixels self.originShift = 2 * math.pi * 6378137 / 2.0 # 20037508.342789244 def LatLonToMeters(self, lat, lon): "Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913" mx = lon * self.originShift / 180.0 my = math.log( math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0) my = my * self.originShift / 180.0 return mx, my def MetersToLatLon(self, mx, my): "Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum" lon = (mx / self.originShift) * 180.0 lat = (my / self.originShift) * 180.0 lat = 180 / math.pi * ( 2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0) return lat, lon def PixelsToMeters(self, px, py, zoom): "Converts pixel coordinates in given zoom level of pyramid to EPSG:900913" res = self.Resolution(zoom) mx = px * res - self.originShift my = py * res - self.originShift return mx, my def MetersToPixels(self, mx, my, zoom): "Converts EPSG:900913 to pyramid pixel coordinates in given zoom level" res = self.Resolution(zoom) px = (mx + self.originShift) / res py = (my + self.originShift) / res return px, py def PixelsToTile(self, px, py): "Returns a tile covering region in given pixel coordinates" tx = int(math.ceil(px / float(self.tileSize)) - 1) ty = int(math.ceil(py / float(self.tileSize)) - 1) return tx, ty def PixelsToRaster(self, px, py, zoom): "Move the origin of pixel coordinates to top-left corner" mapSize = self.tileSize << zoom return px, mapSize - py def MetersToTile(self, mx, my, zoom): "Returns tile for given mercator coordinates" px, py = self.MetersToPixels(mx, my, zoom) return self.PixelsToTile(px, py) def TileBounds(self, tx, ty, zoom): "Returns bounds of the given tile in EPSG:900913 coordinates" minx, miny = self.PixelsToMeters( tx * self.tileSize, ty * self.tileSize, zoom) maxx, maxy = self.PixelsToMeters( (tx + 1) * self.tileSize, (ty + 1) * self.tileSize, zoom) return minx, miny, maxx, maxy def TileLatLonBounds(self, tx, ty, zoom): "Returns bounds of the given tile in latutude/longitude using WGS84 datum" bounds = self.TileBounds(tx, ty, zoom) minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1]) maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3]) return minLat, minLon, maxLat, maxLon def Resolution(self, zoom): "Resolution (meters/pixel) for given zoom level (measured at Equator)" zoom = int(zoom) return self.initialResolution / (2 ** zoom) def ZoomForPixelSize(self, pixelSize): "Maximal scaledown zoom of the pyramid closest to the pixelSize." for i in range(30): if pixelSize > self.Resolution(i): return i - 1 if i != 0 else 0 # We don't want to scale up @staticmethod def GoogleTile(tx, ty, zoom): "Converts TMS tile coordinates to Google Tile coordinates" # coordinate origin is moved from bottom-left to top-left corner of the # extent return tx, (2 ** zoom - 1) - ty @staticmethod def QuadTree(tx, ty, zoom): "Converts TMS tile coordinates to Microsoft QuadTree" quadKey = "" ty = (2 ** zoom - 1) - ty for i in range(zoom, 0, -1): digit = 0 mask = 1 << (i - 1) if (tx & mask) != 0: digit += 1 if (ty & mask) != 0: digit += 2 quadKey += str(digit) return quadKey #--------------------- class GlobalGeodetic(object): """ TMS Global Geodetic Profile --------------------------- Functions necessary for generation of global tiles in Plate Carre projection, EPSG:4326, "unprojected profile". Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters) and you can overlay the tiles on top of OpenLayers base map. Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left). What coordinate conversions do we need for TMS Global Geodetic tiles? Global Geodetic tiles are using geodetic coordinates (latitude,longitude) directly as planar coordinates XY (it is also called Unprojected or Plate Carre). We need only scaling to pixel pyramid and cutting to tiles. Pyramid has on top level two tiles, so it is not square but rectangle. Area [-180,-90,180,90] is scaled to 512x256 pixels. TMS has coordinate origin (for pixels and tiles) in bottom-left corner. Rasters are in EPSG:4326 and therefore are compatible with Google Earth. LatLon <-> Pixels <-> Tiles WGS84 coordinates Pixels in pyramid Tiles in pyramid lat/lon XY pixels Z zoom XYZ from TMS EPSG:4326 .----. ---- / \ <-> /--------/ <-> TMS \ / /--------------/ ----- /--------------------/ WMS, KML Web Clients, Google Earth TileMapService """ def __init__(self, tileSize=256): self.tileSize = tileSize @staticmethod def LatLonToPixels(lat, lon, zoom): "Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid" res = 180 / 256.0 / 2 ** zoom px = (180 + lat) / res py = (90 + lon) / res return px, py def PixelsToTile(self, px, py): "Returns coordinates of the tile covering region in pixel coordinates" tx = int(math.ceil(px / float(self.tileSize)) - 1) ty = int(math.ceil(py / float(self.tileSize)) - 1) return tx, ty @staticmethod def Resolution(zoom): "Resolution (arc/pixel) for given zoom level (measured at Equator)" return 180 / 256.0 / 2 ** zoom # return 180 / float( 1 << (8+zoom) ) @staticmethod def TileBounds(tx, ty, zoom): "Returns bounds of the given tile" res = 180 / 256.0 / 2 ** zoom return ( tx * 256 * res - 180, ty * 256 * res - 90, (tx + 1) * 256 * res - 180, (ty + 1) * 256 * res - 90 )
geometalab/OSM-Crosswalk-Detection
src/base/globalmaptiles.py
Python
mit
14,459
import numpy as np from cea.constants import HEAT_CAPACITY_OF_WATER_JPERKGK from cea.technologies.boiler import cond_boiler_op_cost from cea.technologies.cogeneration import calc_cop_CCGT from cea.technologies.constants import BOILER_MIN from cea.technologies.furnace import furnace_op_cost from cea.technologies.heatpumps import GHP_op_cost, HPSew_op_cost, HPLake_op_cost from cea.technologies.pumps import calc_water_body_uptake_pumping __author__ = "Sreepathi Bhargava Krishna" __copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich" __credits__ = ["Sreepathi Bhargava Krishna", "Jimeno Fonseca"] __license__ = "MIT" __version__ = "0.1" __maintainer__ = "Daren Thomas" __email__ = "thomas@arch.ethz.ch" __status__ = "Production" def heating_source_activator(Q_therm_req_W, master_to_slave_vars, Q_therm_GHP_W, TretGHPArray_K, TretLakeArray_K, Q_therm_Lake_W, Q_therm_Sew_W, TretsewArray_K, tdhsup_K, tdhret_req_K): """ :param Q_therm_req_W: :param hour: :param context: :type Q_therm_req_W: float :type hour: int :type context: list :return: cost_data_centralPlant_op, source_info, Q_source_data, E_coldsource_data, E_PP_el_data, E_gas_data, E_wood_data, Q_excess :rtype: """ ## initializing unmet heating load Q_heat_unmet_W = Q_therm_req_W # ACTIVATE THE COGEN if master_to_slave_vars.CC_on == 1 and Q_heat_unmet_W > 0.0: CC_op_cost_data = calc_cop_CCGT(master_to_slave_vars.CCGT_SIZE_W, tdhsup_K, "NG") # create cost information Q_used_prim_CC_fn_W = CC_op_cost_data['q_input_fn_q_output_W'] q_output_CC_min_W = CC_op_cost_data['q_output_min_W'] Q_output_CC_max_W = CC_op_cost_data['q_output_max_W'] eta_elec_interpol = CC_op_cost_data['eta_el_fn_q_input'] if Q_heat_unmet_W >= q_output_CC_min_W: # operation Possible if above minimal load if Q_heat_unmet_W <= Q_output_CC_max_W: # Normal operation Possible within partload regime Q_CHP_gen_W = Q_heat_unmet_W NG_CHP_req_W = Q_used_prim_CC_fn_W(Q_CHP_gen_W) E_CHP_gen_W = np.float(eta_elec_interpol(NG_CHP_req_W)) * NG_CHP_req_W else: # Only part of the demand can be delivered as 100% load achieved Q_CHP_gen_W = Q_output_CC_max_W NG_CHP_req_W = Q_used_prim_CC_fn_W(Q_CHP_gen_W) E_CHP_gen_W = np.float(eta_elec_interpol(NG_CHP_req_W)) * NG_CHP_req_W else: NG_CHP_req_W = 0.0 E_CHP_gen_W = 0.0 Q_CHP_gen_W = 0.0 Q_heat_unmet_W = Q_heat_unmet_W - Q_CHP_gen_W else: NG_CHP_req_W = 0.0 E_CHP_gen_W = 0.0 Q_CHP_gen_W = 0.0 # WET FURNACE if master_to_slave_vars.Furnace_wet_on == 1 and Q_heat_unmet_W > 0.0: # Activate Furnace if its there. # Operate only if its above minimal load if Q_heat_unmet_W > master_to_slave_vars.WBFurnace_Q_max_W: if Q_heat_unmet_W > master_to_slave_vars.WBFurnace_Q_max_W: Q_Furnace_wet_gen_W = master_to_slave_vars.WBFurnace_Q_max_W # scale down if above maximum load, Furnace operates at max. capacity DryBiomass_Furnace_req_W, E_Furnace_wet_gen_W = furnace_op_cost(Q_Furnace_wet_gen_W, master_to_slave_vars.WBFurnace_Q_max_W, tdhret_req_K, "wet") else: # Normal Operation Possible Q_Furnace_wet_gen_W = Q_heat_unmet_W DryBiomass_Furnace_req_W, E_Furnace_wet_gen_W = furnace_op_cost(Q_Furnace_wet_gen_W, master_to_slave_vars.WBFurnace_Q_max_W, tdhret_req_K, "wet") else: E_Furnace_wet_gen_W = 0.0 DryBiomass_Furnace_req_W = 0.0 Q_Furnace_wet_gen_W = 0.0 Q_heat_unmet_W = Q_heat_unmet_W - Q_Furnace_wet_gen_W else: E_Furnace_wet_gen_W = 0.0 DryBiomass_Furnace_req_W = 0.0 Q_Furnace_wet_gen_W = 0.0 # DRY FURNACE if master_to_slave_vars.Furnace_dry_on == 1 and Q_heat_unmet_W > 0.0: # Activate Furnace if its there. # Operate only if its above minimal load if Q_heat_unmet_W > master_to_slave_vars.DBFurnace_Q_max_W: if Q_heat_unmet_W > master_to_slave_vars.DBFurnace_Q_max_W: Q_Furnace_dry_gen_W = master_to_slave_vars.DBFurnace_Q_max_W # scale down if above maximum load, Furnace operates at max. capacity WetBiomass_Furnace_req_W, E_Furnace_dry_gen_W = furnace_op_cost(Q_Furnace_dry_gen_W, master_to_slave_vars.DBFurnace_Q_max_W, tdhret_req_K, "dry") else: # Normal Operation Possible Q_Furnace_dry_gen_W = Q_heat_unmet_W WetBiomass_Furnace_req_W, E_Furnace_dry_gen_W = furnace_op_cost(Q_Furnace_dry_gen_W, master_to_slave_vars.DBFurnace_Q_max_W, tdhret_req_K, "dry") else: E_Furnace_dry_gen_W = 0.0 WetBiomass_Furnace_req_W = 0.0 Q_Furnace_dry_gen_W = 0.0 Q_heat_unmet_W = Q_heat_unmet_W - Q_Furnace_dry_gen_W else: E_Furnace_dry_gen_W = 0.0 WetBiomass_Furnace_req_W = 0.0 Q_Furnace_dry_gen_W = 0.0 if (master_to_slave_vars.HPSew_on) == 1 and Q_heat_unmet_W > 0.0 and not np.isclose(tdhsup_K, tdhret_req_K): # activate if its available if Q_heat_unmet_W > Q_therm_Sew_W: Q_HPSew_gen_W = Q_therm_Sew_W mdot_DH_to_Sew_kgpers = Q_HPSew_gen_W / (HEAT_CAPACITY_OF_WATER_JPERKGK * (tdhsup_K - tdhret_req_K)) else: Q_HPSew_gen_W = Q_heat_unmet_W mdot_DH_to_Sew_kgpers = Q_HPSew_gen_W / (HEAT_CAPACITY_OF_WATER_JPERKGK * (tdhsup_K - tdhret_req_K)) E_HPSew_req_W, \ Q_coldsource_HPSew_W, \ Q_HPSew_gen_W = HPSew_op_cost(mdot_DH_to_Sew_kgpers, tdhsup_K, tdhret_req_K, TretsewArray_K, Q_HPSew_gen_W ) Q_heat_unmet_W = Q_heat_unmet_W - Q_HPSew_gen_W else: E_HPSew_req_W = 0.0 Q_HPSew_gen_W = 0.0 if (master_to_slave_vars.HPLake_on) == 1 and Q_heat_unmet_W > 0.0 and not np.isclose(tdhsup_K, tdhret_req_K): if Q_heat_unmet_W > Q_therm_Lake_W: # Scale down Load, 100% load achieved Q_HPLake_gen_W = Q_therm_Lake_W else: # regular operation possible Q_HPLake_gen_W = Q_heat_unmet_W E_HPLake_req_W, Q_coldsource_HPLake_W, Q_HPLake_gen_W = HPLake_op_cost(Q_HPLake_gen_W, tdhsup_K, tdhret_req_K, TretLakeArray_K ) E_pump_req_W = calc_water_body_uptake_pumping(Q_HPLake_gen_W, tdhret_req_K, tdhsup_K) E_HPLake_req_W += E_pump_req_W Q_heat_unmet_W = Q_heat_unmet_W - Q_HPLake_gen_W else: E_HPLake_req_W = 0.0 Q_HPLake_gen_W = 0.0 if (master_to_slave_vars.GHP_on) == 1 and Q_heat_unmet_W > 0.0 and not np.isclose(tdhsup_K, tdhret_req_K): if Q_heat_unmet_W > Q_therm_GHP_W: Q_GHP_gen_W = Q_therm_GHP_W mdot_DH_to_GHP_kgpers = Q_GHP_gen_W / (HEAT_CAPACITY_OF_WATER_JPERKGK * (tdhsup_K - tdhret_req_K)) else: # regular operation possible, demand is covered Q_GHP_gen_W = Q_heat_unmet_W mdot_DH_to_GHP_kgpers = Q_GHP_gen_W / (HEAT_CAPACITY_OF_WATER_JPERKGK * (tdhsup_K - tdhret_req_K)) E_GHP_req_W, Q_coldsource_GHP_W, Q_GHP_gen_W = GHP_op_cost(mdot_DH_to_GHP_kgpers, tdhsup_K, tdhret_req_K, TretGHPArray_K, Q_GHP_gen_W) Q_heat_unmet_W = Q_heat_unmet_W - Q_GHP_gen_W else: E_GHP_req_W = 0.0 Q_GHP_gen_W = 0.0 if (master_to_slave_vars.Boiler_on) == 1 and Q_heat_unmet_W > 0: if Q_heat_unmet_W >= BOILER_MIN * master_to_slave_vars.Boiler_Q_max_W: # Boiler can be activated? if Q_heat_unmet_W >= master_to_slave_vars.Boiler_Q_max_W: # Boiler above maximum Load? Q_BaseBoiler_gen_W = master_to_slave_vars.Boiler_Q_max_W else: Q_BaseBoiler_gen_W = Q_heat_unmet_W NG_BaseBoiler_req_W, E_BaseBoiler_req_W = cond_boiler_op_cost(Q_BaseBoiler_gen_W, master_to_slave_vars.Boiler_Q_max_W, tdhret_req_K) else: Q_BaseBoiler_gen_W = 0.0 NG_BaseBoiler_req_W = 0.0 E_BaseBoiler_req_W = 0.0 Q_heat_unmet_W = Q_heat_unmet_W - Q_BaseBoiler_gen_W else: Q_BaseBoiler_gen_W = 0.0 NG_BaseBoiler_req_W = 0.0 E_BaseBoiler_req_W = 0.0 if master_to_slave_vars.BoilerPeak_on == 1 and Q_heat_unmet_W > 0: if Q_heat_unmet_W >= BOILER_MIN * master_to_slave_vars.BoilerPeak_Q_max_W: # Boiler can be activated? if Q_heat_unmet_W > master_to_slave_vars.BoilerPeak_Q_max_W: # Boiler above maximum Load? Q_PeakBoiler_gen_W = master_to_slave_vars.BoilerPeak_Q_max_W else: Q_PeakBoiler_gen_W = Q_heat_unmet_W NG_PeakBoiler_req_W, E_PeakBoiler_req_W = cond_boiler_op_cost(Q_PeakBoiler_gen_W, master_to_slave_vars.BoilerPeak_Q_max_W, tdhret_req_K) else: Q_PeakBoiler_gen_W = 0.0 NG_PeakBoiler_req_W = 0 E_PeakBoiler_req_W = 0.0 Q_heat_unmet_W = Q_heat_unmet_W - Q_PeakBoiler_gen_W else: Q_PeakBoiler_gen_W = 0.0 NG_PeakBoiler_req_W = 0 E_PeakBoiler_req_W = 0.0 if Q_heat_unmet_W > 1.0E-3: Q_uncovered_W = Q_heat_unmet_W # this will become the back-up boiler else: Q_uncovered_W = 0.0 return Q_HPSew_gen_W, \ Q_HPLake_gen_W, \ Q_GHP_gen_W, \ Q_CHP_gen_W, \ Q_Furnace_dry_gen_W, \ Q_Furnace_wet_gen_W, \ Q_BaseBoiler_gen_W, \ Q_PeakBoiler_gen_W, \ Q_uncovered_W, \ E_HPSew_req_W, \ E_HPLake_req_W, \ E_BaseBoiler_req_W, \ E_PeakBoiler_req_W, \ E_GHP_req_W, \ E_CHP_gen_W, \ E_Furnace_dry_gen_W, \ E_Furnace_wet_gen_W, \ NG_CHP_req_W, \ NG_BaseBoiler_req_W, \ NG_PeakBoiler_req_W, \ WetBiomass_Furnace_req_W, \ DryBiomass_Furnace_req_W
architecture-building-systems/CEAforArcGIS
cea/optimization/slave/heating_resource_activation.py
Python
mit
12,639
""" test_meta: ========== A module intended for use with Nose. """ from __future__ import absolute_import import random import string from unittest import skip from chart_studio import plotly as py from chart_studio.exceptions import PlotlyRequestError from chart_studio.grid_objs import Column, Grid from chart_studio.tests.utils import PlotlyTestCase class MetaTest(PlotlyTestCase): _grid = grid = Grid([Column([1, 2, 3, 4], "first column")]) _meta = {"settings": {"scope1": {"model": "Unicorn Finder", "voltage": 4}}} def setUp(self): super(MetaTest, self).setUp() py.sign_in("PythonTest", "xnyU0DEwvAQQCwHVseIL") def random_filename(self): random_chars = [random.choice(string.ascii_uppercase) for _ in range(5)] unique_filename = "Valid Grid with Meta " + "".join(random_chars) return unique_filename def test_upload_meta(self): unique_filename = self.random_filename() grid_url = py.grid_ops.upload(self._grid, unique_filename, auto_open=False) # Add some Metadata to that grid py.meta_ops.upload(self._meta, grid_url=grid_url) def test_upload_meta_with_grid(self): c1 = Column([1, 2, 3, 4], "first column") Grid([c1]) unique_filename = self.random_filename() py.grid_ops.upload( self._grid, unique_filename, meta=self._meta, auto_open=False ) @skip( "adding this for now so test_file_tools pass, more info" + "https://github.com/plotly/python-api/issues/263" ) def test_metadata_to_nonexistent_grid(self): non_exist_meta_url = "https://local.plotly.com/~GridTest/999999999" with self.assertRaises(PlotlyRequestError): py.meta_ops.upload(self._meta, grid_url=non_exist_meta_url)
plotly/python-api
packages/python/chart-studio/chart_studio/tests/test_plot_ly/test_meta/test_meta.py
Python
mit
1,805
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/ship/components/weapon_capacitor/shared_cap_mandal_tuned_powermaster_mk2.iff" result.attribute_template_id = 8 result.stfName("space/space_item","cap_mandal_tuned_powermaster_mk2_n") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
anhstudios/swganh
data/scripts/templates/object/tangible/ship/components/weapon_capacitor/shared_cap_mandal_tuned_powermaster_mk2.py
Python
mit
519
# -*- coding:utf-8 -*- import os, sys current_path = os.path.dirname(os.path.abspath(__file__)) root_path = os.path.abspath( os.path.join(current_path, os.pardir)) sys.path.append(root_path) import threading import time import unittest from socket import error as sock_error from ftplib import FTP, FTP_TLS, error_temp, error_perm from ossftp import ftpserver from util import * class LoginTest(unittest.TestCase): def setUp(self): self.host = get_value_from_config("ftpconfig", "host") self.ftp_port = int(get_value_from_config("ftpconfig", "ftp_port")) self.username = get_value_from_config("ftpconfig", "normal_id") + '/' + get_value_from_config("ftpconfig", "normal_bucket") self.password = get_value_from_config("ftpconfig", "normal_key") def tearDown(self): pass def ftp_login(self, username, password): try: ftp = FTP() ftp.connect(self.host, self.ftp_port) ftp.login(username, password) ftp.quit() except (error_temp, error_perm, sock_error) as e: return False else: return True def test_normal(self): self.assertTrue(self.ftp_login(self.username, self.password)) #below login will hit cache self.assertTrue(self.ftp_login(self.username, self.password)) #test wrong access_key_sercrete self.assertFalse(self.ftp_login(self.username, self.password+"qwerasdf")) self.assertTrue(self.ftp_login(self.username, self.password)) def test_specified(self): normal_id = get_value_from_config("ftpconfig", "normal_id") normal_key = get_value_from_config("ftpconfig", "normal_key") specified_bucket = get_value_from_config("ftpconfig", "specified_bucket") username_specified = normal_id + "/" + specified_bucket self.assertTrue(self.ftp_login(username_specified, normal_key)) wrong_user = "qwerasdf/%s" % specified_bucket wrong_pwd = "adfasdfqer" self.assertFalse(self.ftp_login(wrong_user, wrong_pwd)) def test_child_account(self): normal_bucket = get_value_from_config("ftpconfig", "normal_bucket") specified_bucket = get_value_from_config("ftpconfig", "specified_bucket") child_id = get_value_from_config("ftpconfig", "child_id") child_key = get_value_from_config("ftpconfig", "child_key") self.assertFalse(self.ftp_login(child_id+"/"+normal_bucket, child_key)) self.assertTrue(self.ftp_login(child_id+"/"+specified_bucket, child_key)) def test_update_ak(self): #first login with normal ak self.assertTrue(self.ftp_login(self.username, self.password)) #then login with new ak username_new = get_value_from_config("ftpconfig", "normal_id_new") + '/' + get_value_from_config("ftpconfig", "normal_bucket") password_new = get_value_from_config("ftpconfig", "normal_key_new") self.assertTrue(self.ftp_login(username_new, password_new)) def test_error_input(self): user_info = { "":"", "qqqqqq":"", "":"aaaaaaaa", "asdfasdf":"asdfsdf", "asdfasdf/":"aaaaaaaa", "/qwerdf":"aaaaaaaa" } for username, password in user_info.items(): self.assertFalse(self.ftp_login(username, password)) def test_no_such_bucket(self): user_info = { "adsfasdf/test-bucket-name-unittest":"randomaccesskey" } for username, password in user_info.items(): self.assertFalse(self.ftp_login(username, password)) def test_wrong_ak(self): normal_bucket = get_value_from_config("ftpconfig", "normal_bucket") user_info = { "adsfasdf/%s"%normal_bucket:"randomaccesskey" } for username, password in user_info.items(): self.assertFalse(self.ftp_login(username, password)) if __name__ == '__main__': specified_url = get_value_from_config("ftpconfig", "specified_url") t = myThread("thread_id_1", "", "127.0.0.1", 2048, "DEBUG", specified_url) t.daemon = True t.start() #wait for ossftp ready time.sleep(5) unittest.main()
johnkeepmoving/oss-ftp
test/login.py
Python
mit
4,240
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models from referral.compat import AUTH_USER_MODEL class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Campaign.pattern' db.add_column('referral_campaign', 'pattern', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'Campaign.pattern' db.delete_column('referral_campaign', 'pattern') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, AUTH_USER_MODEL: { 'Meta': {'object_name': AUTH_USER_MODEL.split('.')[-1]}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'referral.campaign': { 'Meta': {'ordering': "['name']", 'object_name': 'Campaign'}, 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'pattern': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}) }, 'referral.referrer': { 'Meta': {'ordering': "['name']", 'object_name': 'Referrer'}, 'campaign': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'referrers'", 'null': 'True', 'to': "orm['referral.Campaign']"}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'referral.userreferrer': { 'Meta': {'ordering': "['referrer__name']", 'object_name': 'UserReferrer'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'referrer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['referral.Referrer']"}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'referrer'", 'unique': 'True', 'to': "orm['%s']" % AUTH_USER_MODEL}) } } complete_apps = ['referral']
Chris7/django-referral
referral/south_migrations/0002_auto__add_field_campaign_pattern.py
Python
mit
5,531
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-04-22 11:28 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django_pgjson.fields class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('events', '0001_initial'), ] operations = [ migrations.CreateModel( name='RSVP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('_data', django_pgjson.fields.JsonField(default={})), ('going', models.BooleanField(default=True)), ('created_datetime', models.DateTimeField(auto_now_add=True)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='event', name='ticketing_type', field=models.CharField(choices=[('T', 'tickets'), ('R', 'rsvp')], default='T', max_length=1), ), migrations.AddField( model_name='rsvp', name='event', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rsvps', to='events.Event'), ), migrations.AddField( model_name='rsvp', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rsvps', to=settings.AUTH_USER_MODEL), ), ]
jscott1989/happening
src/events/migrations/0002_auto_20170422_1128.py
Python
mit
1,610
import babel.numbers import babel.dates import decimal import locale from datetime import datetime from urlparse import urlparse from slugify import slugify from grano.core import app, url_for from grano.model import Schema, Project, Entity STOPWORDS = ['of', 'the', 'for', 'die', 'der', 'das', 'at', 'de', 'et', 'en', 'in', 'le', 'la', 'in', 'to', 'and'] def facet_schema_list(obj, facets): results = [] project = Project.by_slug('openinterests') for facet in facets: schema = Schema.by_name(project, facet.get('term')) if schema is not None and not schema.hidden: results.append((schema, facet.get('count'))) return results def url_slug(text): if text is None: return '' parts = [] for part in slugify(text).split('-'): if part not in STOPWORDS: parts.append(part) text = '-'.join(parts) return text[:255] @app.template_filter('format_eur') def format_eur(num): if num is None or (isinstance(num, basestring) and not len(num)): return '-' try: num = decimal.Decimal(num) num = babel.numbers.format_currency(num, "EUR", locale="en_US") return num.replace('.00', '') except Exception, e: raise return '-' @app.template_filter('domain_name') def domain_name(url): if url is None: return None url = urlparse(url) dom = url.hostname.lower() if dom.startswith('www.'): dom = dom[4:] return dom @app.template_filter('entity_link') def entity_link(entity, **kwargs): if isinstance(entity, Entity): prop = entity['name'] id, name = entity.id, prop.value if prop else 'forward' else: prop = entity.get('properties', {}).get('name', {}) id, name = entity.get('id'), prop.get('value', 'forward') return url_for('entities.view', id=id, slug=url_slug(name), **kwargs) @app.template_filter('render_value') def render_value(value): if isinstance(value, basestring): return render_value_text(value) if isinstance(value, int): return '<span class="numeric-value int-value">%d</span>' % value if isinstance(value, float): return '<span class="numeric-value float-value">%.2f</span>' % value if isinstance(value, bool): if value: return '<i class="fa fa-plus-square"></i> true' else: return '<i class="fa fa-minus-square"></i> false' if isinstance(value, datetime): return babel.dates.format_date(value, locale='en_US') return value def render_value_text(text): if text is None: return '' texts = text.split('\n') if len(text) < 300: return '<p>' + '</p>\n<p>'.join(texts) + '</p>' LT = """<div class="longtext"><div class="snippet">%s <a class="expand" href="#">Read more...</a></div> <div class="full"><p>%s</p></div></div>""" snippet = texts[0] if len(texts[0]) < 350 else texts[0][:250] + '...' return LT % (snippet, '</p>\n<p>'.join(texts))
IuliiSe/openinterests.eu
openinterests/views/util.py
Python
mit
3,044
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Building() result.template = "object/building/player/city/shared_garden_corellia_sml_03.iff" result.attribute_template_id = -1 result.stfName("building_name","garden") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
obi-two/Rebelion
data/scripts/templates/object/building/player/city/shared_garden_corellia_sml_03.py
Python
mit
458
"""Experiment data storage. There are two main use cases for the functionality in this module: reading/writing data during an experiment session, and reading data once an experiment is complete (i.e. for analysis). See the :ref:`user guide <storage>` for information on these use cases/api.jpeg/api.jpeg/api.jpeg. """ import os import h5py import numpy import pandas import zipfile import shutil import pickle import logging # # Highest layer. Used by tasks to obtain task readers/writers # class Storage(object): """Top-level data storage maintainer. See the :ref:`user guide <storage>` for more information. Parameters ---------- root : str, optional Path to the root of the data storage filestructure. By default, 'data' is used. If the directory doesn't exist, it is created. allow_overwrite : bool, optional Specifies whether or not the storage interface allows you to overwrite a task's data for a subject if it already exists. """ def __init__(self, root='data', allow_overwrite=False): self.root = root self.allow_overwrite = allow_overwrite makedirs(root, exist_ok=True) self._subject_id = None @property def subject_ids(self): """Generate subject IDs found in storage sorted in alphabetical order. Returns ------- subject_id : str ID of the subject found. """ ls = os.listdir(self.root) for name in sorted(ls): path = os.path.join(self.root, name) if os.path.isdir(path): yield name @property def subject_id(self): """The current subject ID. When setting the subject ID for a new subject (i.e. one that doesn't exist already), storage for that subject is created. """ return self._subject_id @subject_id.setter def subject_id(self, val): makedirs(os.path.join(self.root, val), exist_ok=True) self._subject_id = val @property def task_ids(self): """Generate names of tasks found for the current subject. Note that there may be no tasks found if the `subject_id` has not been set or if the subject hasn't started any tasks. In this case, nothing is yielded. """ if self.subject_id is None: return subj_path = os.path.join(self.root, self.subject_id) ls = os.listdir(subj_path) for name in sorted(ls): path = os.path.join(subj_path, name) if os.path.isdir(path): yield name def create_task(self, task_id): """Create a task for the current subject. Parameters ---------- task_id : str The ID of the task to add. The name must not have been used for another task for the current subject. Returns ------- writer : TaskWriter A new TaskWriter for storing task data. """ path = self._task_path(task_id) try: makedirs(path) except OSError: if self.allow_overwrite: shutil.rmtree(path) makedirs(path) else: raise ValueError( "Subject {} has already started \"{}\". Only unique task " "names are allowed.".format(self.subject_id, task_id)) return TaskWriter(path) def require_task(self, task_id): """Retrieves a task for the current subject. Parameters ---------- task_id : str The ID of the task to look for. The task must have already been run with the current subject. Returns ------- reader : TaskReader A new TaskReader for working with the existing task data. """ if task_id not in self.task_ids: raise ValueError( "Subject {} has not started \"{}\" yet. Use `create_task` to " "create it first.".format(self.subject_id, task_id)) path = self._task_path(task_id) return TaskReader(path) def to_zip(self, outfile): """Create a ZIP archive from a data storage hierarchy. For more information, see :func:`storage_to_zip`. """ storage_to_zip(self.root, outfile) def _task_path(self, task_id): return os.path.join(self.root, self.subject_id, task_id) # # Middle layer. Used by tasks to read/write data. # class TaskWriter(object): """The main interface for storing data from a task. Usually you get a :class:`Taskwriter` from :class:`Storage`, so you don't normally need to create one yourself. Parameters ---------- root : str Path to the task root (e.g. 'data/subject_1/taskname'). Attributes ---------- trials : TrialWriter :class:`TrialWriter` for storing trial data. """ def __init__(self, root): self.root = root self.trials = TrialWriter(_trials_path(self.root)) def write(self, trial): """Write trial data. This must be the last thing done for the current trial. That is, make sure all arrays have accumulated all data required. This method flushes trial and array data to files for you. **Important note**: The trial's arrays are cleared after writing. Parameters ---------- trial : Trial Tral data. See :meth:`TrialWriter.write` and :class:`Trial` for details. """ logging.info('saving trial {}:{}\n{}'.format( trial.attrs['block'], trial.attrs['trial'], str(trial))) self.trials.write(trial.attrs) ind = self.trials.df.index[-1] for name, array in trial.arrays.items(): path = _array_path(self.root, name) write_hdf5(path, array.data, dataset=str(ind)) array.clear() def pickle(self, obj, name): """Write a generic object to storage. This can be useful to persist an object from one task to another, or to store something that doesn't easily fit into the AxoPy storage model (trial attributes and arrays). Be cautious, however, as pickles are not the best way to store things long-term nor securely. See the advice given here, for example: http://scikit-learn.org/stable/modules/model_persistence.html Parameters ---------- obj : object The object to pickle. name : str Name of the pickle to save (no extension). """ with open(_pickle_path(self.root, name), 'wb') as f: pickle.dump(obj, f) class TaskReader(object): """High-level interface to task storage. Parameters ---------- root : str Path to task's root directory. This is the directory specific to a task which contains a ``trials.csv`` file and HDF5 array files. """ def __init__(self, root): self.root = root self._trials = None @property def trials(self): """A Pandas DataFrame representing the trial data.""" if self._trials is None: self._trials = pandas.read_csv(_trials_path(self.root)) return self._trials def iterarray(self, name): """Iteratively retrieve an array for each trial. Parameters ---------- name : str Name of the array type. """ for ind in self.trials.index: dset = str(ind) yield read_hdf5(_array_path(self.root, name), dataset=dset) def array(self, name): """Retrieve an array type's data for all trials.""" return numpy.vstack(self.iterarray(name)) def pickle(self, name): """Load a pickled object from storage. Parameters ---------- name : str Name of the pickled object (no extension). """ with open(_pickle_path(self.root, name), 'rb') as f: obj = pickle.load(f) return obj # # Lowest layer. Used by TaskReader/TaskWriter. # class TrialWriter(object): """Writes trial data to a CSV file line by line. Parameters ---------- filepath : str Path to the file to create. Attributes ---------- data : dict Dictionary containing all trial data written so far. """ def __init__(self, filepath): self.filepath = filepath self.data = {} def write(self, data): """Add a single row to the trials dataset. Data is immediately added to the file on disk. Parameters ---------- data : dict Data values to add. """ for col, val in data.items(): if col not in self.data: self.data[col] = [] self.data[col].append(val) self.df = pandas.DataFrame(self.data) self.df.to_csv(self.filepath, index=False) # # Utilities # def _trials_path(taskroot): return os.path.join(taskroot, 'trials.csv') def _array_path(taskroot, arrayname): return os.path.join(taskroot, '{}.hdf5'.format(arrayname)) def _pickle_path(taskroot, picklename): return os.path.join(taskroot, '{}.pkl'.format(picklename)) def read_hdf5(filepath, dataset='data'): """Read the contents of a dataset. This function assumes the dataset in the HDF5 file exists at the root of the file (i.e. at '/'). It is primarily for internal usage but you may find it useful for quickly grabbing an array from an HDF5 file. Parameters ---------- filepath : str Path to the file to read from. dataset : str, optional Name of the dataset to retrieve. By default, 'data' is used. Returns ------- data : ndarray The data (read into memory) as a NumPy array. The dtype, shape, etc. is all determined by whatever is in the file. """ with h5py.File(filepath, 'r') as f: return f.get('/{}'.format(dataset))[:] def write_hdf5(filepath, data, dataset='data'): """Write data to an hdf5 file. The data is written to a new file with a single dataset called "data" in the root group. It is primarily for internal usage but you may find it useful for quickly writing an array to an HDF5 file. Parameters ---------- filepath : str Path to the file to be written. data : ndarray NumPy array containing the data to write. The dtype, shape, etc. of the resulting dataset in storage is determined by this array directly. dataset : str, optional Name of the dataset to create. Default is 'data'. """ with h5py.File(filepath, 'a') as f: f.create_dataset(dataset, data=data) def storage_to_zip(path, outfile=None): """Create a ZIP archive from a data storage hierarchy. The contents of the data storage hierarchy are all placed in the archive, with the top-level folder in the archive being the data storage root folder itself. That is, all paths within the ZIP file are relative to the dataset root folder. Parameters ---------- path : str Path to the root of the dataset. outfile : str, optional Name of the ZIP file to create. If not specified, the file is created in the same directory as the data root with the same name as the dataset root directory (with ".zip" added). Returns ------- outfile : str The name of the ZIP file created. """ datapath, datadir = os.path.split(path) if outfile is None: # absolute path to parent of data root + dataset name + .zip outfile = os.path.join(datapath, datadir + '.zip') with zipfile.ZipFile(outfile, 'w') as zipf: for root, dirs, files in os.walk(path): for f in files: # write as *relative* path from data root zipf.write(os.path.join(root, f), arcname=os.path.join(datadir, f)) return outfile def makedirs(path, exist_ok=False): """Recursively create directories. This is needed for Python versions earlier than 3.2, otherwise ``os.makedirs(path, exist_ok=True)`` would suffice. Parameters ---------- path : str Path to directory to create. exist_ok : bool, optional If `exist_ok` is False (default), an exception is raised. Set to True if it is acceptable that the directory already exists. """ try: os.makedirs(path) except OSError: if not exist_ok: raise
ucdrascal/axopy
axopy/storage.py
Python
mit
12,668
import os import pytest from pyqode.qt import QtCore from pyqode.qt import QtWidgets from pyqode.qt.QtTest import QTest import datetime from pyqode.core import modes from test.helpers import editor_open, preserve_settings file_path = os.path.join( os.getcwd(), 'test', 'test_modes', 'file_to_watch.txt') def setup_module(): with open(file_path, 'w') as f: f.write("test file initial") def teardown_module(): os.remove(file_path) def get_mode(editor): return editor.modes.get(modes.FileWatcherMode) @editor_open(file_path) def test_enabled(editor): mode = get_mode(editor) assert mode.enabled mode.enabled = False mode.enabled = True def accept_mbox(): widgets = QtWidgets.QApplication.instance().topLevelWidgets() for w in widgets: if isinstance(w, QtWidgets.QMessageBox): QTest.keyPress(w, QtCore.Qt.Key_Space) def reject_mbox(): widgets = QtWidgets.QApplication.instance().topLevelWidgets() for w in widgets: if isinstance(w, QtWidgets.QMessageBox): QTest.keyPress(w, QtCore.Qt.Key_Escape) @editor_open(file_path) def test_modif_autoreload(editor): mode = get_mode(editor) mode.auto_reload = False mode = get_mode(editor) mode.auto_reload = True with open(file_path, 'r') as f: with open(file_path, 'w') as f2: f2.write("test file %s" % datetime.datetime.now()) QTest.qWait(1000) @editor_open(file_path) def test_delete(editor): mode = get_mode(editor) mode.auto_reload = False os.remove(file_path) QTest.qWait(1000) with open(file_path, 'w') as f: f.write("test file initial") editor.file.open(file_path) @editor_open(file_path) def test_none_filepath(editor): mode = get_mode(editor) mode.auto_reload = False mode.auto_reload = False p = editor.file.path editor.file._path = None mode._update_mtime() editor.file._path = p @editor_open(file_path) def test_non_existing_file_path(editor): mode = get_mode(editor) mode.auto_reload = False p = editor.file.path editor.file._path = '/usr/blah/foo/bar.txt' mode._update_mtime() editor.file._path = p
zwadar/pyqode.core
test/test_modes/test_filewatcher.py
Python
mit
2,194
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Static() result.template = "object/static/structure/general/shared_data_terminal_wall_s2.iff" result.attribute_template_id = -1 result.stfName("obj_n","unknown_object") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
obi-two/Rebelion
data/scripts/templates/object/static/structure/general/shared_data_terminal_wall_s2.py
Python
mit
459
from __future__ import print_function, unicode_literals, division import unittest import os import re from subprocess import check_output from tempfile import mkdtemp from pyrouge import Rouge155 from pyrouge.utils.file_utils import str_from_file, xml_equal module_path = os.path.dirname(__file__) os.chdir(module_path) add_data_path = lambda p: os.path.join('data', p) check_output_clean = lambda c: check_output(c).decode("UTF-8").strip() class PyrougeTest(unittest.TestCase): def test_paths(self): rouge = Rouge155() def get_home_from_settings(): with open(rouge.settings_file) as f: for line in f.readlines(): if line.startswith("home_dir"): rouge_home = line.split("=")[1].strip() return rouge_home self.assertEqual(rouge.home_dir, get_home_from_settings()) self.assertTrue(os.path.exists(rouge.bin_path)) self.assertTrue(os.path.exists(rouge.data_dir)) wrong_path = "/nonexisting/path/rewafafkljaerearjafankwe3" with self.assertRaises(Exception) as context: rouge.system_dir = wrong_path self.assertEqual( str(context.exception), "Cannot set {} directory because the path {} does not " "exist.".format("system", wrong_path)) right_path = add_data_path("systems") rouge.system_dir = right_path self.assertEqual(rouge.system_dir, right_path) with self.assertRaises(Exception) as context: rouge.model_dir = wrong_path self.assertEqual( str(context.exception), "Cannot set {} directory because the path {} does not " "exist.".format("model", wrong_path)) right_path = add_data_path("models") rouge.model_dir = right_path self.assertEqual(rouge.model_dir, right_path) def test_wrong_system_pattern(self): wrong_regexp = "adfdas454fd" rouge = Rouge155() rouge.system_dir = add_data_path("systems") rouge.model_dir = add_data_path("models") #rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html" rouge.system_filename_pattern = wrong_regexp rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html" with self.assertRaises(Exception) as context: rouge.evaluate() self.assertEqual( str(context.exception), "Did not find any files matching the pattern {} in the system " "summaries directory {}.".format(wrong_regexp, rouge.system_dir)) def test_wrong_model_pattern(self): rouge = Rouge155() rouge.system_dir = add_data_path("systems") rouge.model_dir = add_data_path("models_plain") rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html" rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html" with self.assertRaises(Exception) as context: rouge.evaluate() match_string = ( r"Could not find any model summaries for the system " r"summary with ID " + "(\d+)" + r". Specified model filename " r"pattern was: " + re.escape(rouge.model_filename_pattern)) try: assert_regex = self.assertRegex except AttributeError: assert_regex = self.assertRegexpMatches assert_regex(str(context.exception), re.compile(match_string)) def test_text_conversion(self): rouge = Rouge155() text = str_from_file(add_data_path("spl_test_doc")) html = rouge.convert_text_to_rouge_format(text, "D00000.M.100.A.C") target = str_from_file(add_data_path("spl_test_doc.html")) self.assertEqual(html, target) # only run this test if BeautifulSoup is installed try: from bs4 import BeautifulSoup def test_get_plain_text(self): input_dir = add_data_path("SL2003_models_rouge_format") output_dir = mkdtemp() target_dir = add_data_path("SL2003_models_plain_text") command = ( "pyrouge_convert_rouge_format_to_plain_text " "-i {} -o {}".format(input_dir, output_dir)) check_output(command.split()) filenames = os.listdir(input_dir) for filename in filenames: output_file = os.path.join(output_dir, filename) output = str_from_file(output_file) target_file = os.path.join(target_dir, filename) target = str_from_file(target_file) self.assertEqual(output, target) except ImportError: pass def test_convert_summaries(self): input_dir = add_data_path("SL2003_models_plain_text") output_dir = mkdtemp() target_dir = add_data_path("SL2003_models_rouge_format") command = ( "pyrouge_convert_plain_text_to_rouge_format -i {} -o {}".format( input_dir, output_dir)) check_output(command.split()) filenames = os.listdir(input_dir) for filename in filenames: output_file = os.path.join(output_dir, filename) output = str_from_file(output_file) target_file = os.path.join(target_dir, filename) target = str_from_file(target_file) filename = filename.replace(".html", "") target = target.replace(filename, "dummy title") self.assertEqual(output, target, filename) def test_config_file(self): rouge = Rouge155() rouge.system_dir = add_data_path("systems") rouge.model_dir = add_data_path("models") rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html" rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html" rouge.config_file = add_data_path("config_test.xml") rouge.write_config(system_id=11) self.assertTrue(xml_equal( rouge.config_file, add_data_path("ROUGE-test_11.xml"))) os.remove(rouge.config_file) def test_evaluation(self): rouge = Rouge155() rouge.system_dir = add_data_path("systems") rouge.model_dir = add_data_path("models") rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html" rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html" pyrouge_output = rouge.evaluate(system_id=11).strip() rouge_command = ( "{bin} -e {data} -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 " "-a -m {xml}".format( bin=rouge.bin_path, data=rouge.data_dir, xml=add_data_path("ROUGE-test_11.xml"))) orig_rouge_output = check_output_clean(rouge_command.split()) self.assertEqual(pyrouge_output, orig_rouge_output) def test_rouge_for_plain_text(self): model_dir = add_data_path("models_plain") system_dir = add_data_path("systems_plain") pyrouge_command = ( "pyrouge_evaluate_plain_text_files -m {} -s {} -sfp " "D(\d+).M.100.T.A -mfp D#ID#.M.100.T.[A-Z] -id 1".format( model_dir, system_dir)) pyrouge_output = check_output_clean(pyrouge_command.split()) rouge = Rouge155() config_file = add_data_path("config_test2.xml") rouge_command = ( "{bin} -e {data} -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 " "-a -m {xml}".format( bin=rouge.bin_path, data=rouge.data_dir, xml=config_file)) orig_rouge_output = check_output_clean(rouge_command.split()) self.assertEqual(pyrouge_output, orig_rouge_output) def test_write_config(self): system_dir = add_data_path("systems") model_dir = add_data_path("models") system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html" model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html" config_file = os.path.join(mkdtemp(), "config_test.xml") command = ( "pyrouge_write_config_file -m {m} -s {s} " "-mfp {mfp} -sfp {sfp} -c {c}".format( m=model_dir, s=system_dir, mfp=model_filename_pattern, sfp=system_filename_pattern, c=config_file)) check_output(command.split()) target_xml = add_data_path("config_test.xml") print(config_file, target_xml) self.assertTrue(xml_equal(config_file, target_xml)) def test_options(self): rouge = Rouge155() model_dir = add_data_path("models_plain") system_dir = add_data_path("systems_plain") config_file = add_data_path("config_test2.xml") command_part1 = ( "pyrouge_evaluate_plain_text_files -m {} -s {} -sfp " "D(\d+).M.100.T.A -mfp D#ID#.M.100.T.[A-Z] -id 1 -rargs".format( model_dir, system_dir)) command_part2 = [ "\"-e {data} -c 90 -2 -1 -U -r 1000 -n 2 -w 1.2 " "-a -m {xml}\"".format( data=rouge.data_dir, xml=config_file)] pyrouge_command = command_part1.split() + command_part2 pyrouge_output = check_output_clean(pyrouge_command) rouge_command = ( "{bin} -e {data} -c 90 -2 -1 -U -r 1000 -n 2 -w 1.2 " "-a -m {xml}".format( bin=rouge.bin_path, data=rouge.data_dir, xml=config_file)) orig_rouge_output = check_output_clean(rouge_command.split()) self.assertEqual(pyrouge_output, orig_rouge_output) def main(): unittest.main() if __name__ == "__main__": main()
bheinzerling/pyrouge
pyrouge/tests/Rouge155_test.py
Python
mit
9,629
from __future__ import division import numpy as np from six.moves import range def normalize(theta, start=0): """ Normalize an angle to be in the range :math:`[0, 2\pi]` Parameters ----------- theta : float input angle to normalize start: float input start angle (optional, default: 0.0) Returns -------- res : float normalized angle or :math:`\infty` """ if theta < np.inf: while theta >= start + 2 * np.pi: theta -= 2 * np.pi while theta < start: theta += 2 * np.pi return theta else: return np.inf def addangles(alpha, beta): """ Add two angles Parameters ---------- alpha : float Augend (in radians) beta : float Addend (in radians) Returns ------- sum : float Sum (in radians, normalized to [0, 2pi]) """ return normalize(alpha + beta, start=0) def subangles(alpha, beta): """ Substract one angle from another Parameters ---------- alpha : float Minuend (in radians) beta : float Subtraend (in radians) Returns ------- delta : float Difference (in radians, normalized to [0, 2pi]) """ delta = 0 if alpha < np.inf and beta < np.inf: alpha = normalize(alpha, start=0) beta = normalize(beta, start=0) delta = alpha - beta if alpha > beta: while delta > np.pi: delta -= 2 * np.pi elif beta > alpha: while delta < -np.pi: delta += 2 * np.pi else: delta = np.inf return delta def edist(v1, v2): """ Euclidean distance between the two poses Parameters ----------- v1, v2 : array-like vector of poses Returns ----------- dist : float distance between v1 and v2 """ return np.hypot((v1[0] - v2[0]), (v1[1] - v2[1])) def adist(focal_agent, other_agent, ak=2.48, bk=1.0, lambda_=0.4, rij=0.9): """ Anisotropic distance between two oriented poses Anisotropic distance based on the Social Force Model (SFM) [TODO - cite] model of pedestrian dynamics. .. math:: a \cdot b \exp{\left(\\frac{r_{ij} - d_{ij}}{b}\\right)} \mathbf{n}_{ij} \left(\lambda + (1 - \lambda) \\frac{1 + \cos(\\varphi_{ij})}{2}\\right) Parameters ----------- focal_agent, other_agent : array-like Vector of poses (including orientation information as vx, vy) ak, bk, lambda_, rij : float Parameters of the anisotropic model Returns ---------- dist : float Distance between the two poses """ ei = np.array([-focal_agent[2], -focal_agent[3]]) length_ei = np.linalg.norm(ei) if length_ei > 1e-24: ei = ei / length_ei phi = np.arctan2(other_agent[1] - focal_agent[1], other_agent[0] - focal_agent[0]) dij = edist(focal_agent, other_agent) nij = np.array([np.cos(phi), np.sin(phi)]) ns = 2 alpha = ak * np.exp((rij - dij) / bk) * nij beta_ = np.tile(np.ones(shape=(1, ns)) * lambda_ + ((1 - lambda_) * (np.ones(shape=(1, ns)) - (np.dot(nij.T, ei)).T) / 2.), [1, 1]) curve = np.multiply(alpha, beta_).T dc = np.hypot(curve[0], curve[1]) return dc def distance_to_segment(point, (line_start, line_end)): """ Distance from a point to a line segment Compute the distance from a point to a line segment all in 2D. Additionally return a flag indicating if the points lies within the boundary of the two perpendicular lines at the line segment ends Parameters ----------- point : array-like Point in 2D, (x, y) line_start, line_end : array-like Coordinates of the start and end points of the line sement in 2D Returns -------- dist : float or None Float value if the point is 'inside' the line segment, else None inside : bool Flag indicating if the point is 'inside' the line segment """ xa, ya = line_start[0], line_start[1] xb, yb = line_end[0], line_end[1] xp, yp = point[0], point[1] # x-coordinates A = xb-xa B = yb-ya C = yp*B+xp*A a = 2*((B*B)+(A*A)) b = -4*A*C+(2*yp+ya+yb)*A*B-(2*xp+xa+xb)*(B*B) c = 2*(C*C)-(2*yp+ya+yb)*C*B+(yp*(ya+yb)+xp*(xa+xb))*(B*B) if b*b < 4*a*c: return None, False x1 = (-b + np.sqrt((b*b)-4*a*c))/(2*a) x2 = (-b - np.sqrt((b*b)-4*a*c))/(2*a) # y-coordinates A = yb-ya B = xb-xa C = xp*B+yp*A a = 2*((B*B)+(A*A)) b = -4*A*C+(2*xp+xa+xb)*A*B-(2*yp+ya+yb)*(B*B) c = 2*(C*C)-(2*xp+xa+xb)*C*B+(xp*(xa+xb)+yp*(ya+yb))*(B*B) if b*b < 4*a*c: return None, False y1 = (-b + np.sqrt((b*b)-4*a*c))/(2*a) y2 = (-b - np.sqrt((b*b)-4*a*c))/(2*a) # Put point candidates together candidates = ((x1, y2), (x2, y2), (x1, y2), (x2, y1)) distances = (edist(candidates[0], point), edist(candidates[1], point), edist(candidates[2], point), edist(candidates[3], point)) max_index = np.argmax(distances) cand = candidates[max_index] dist = distances[max_index] start_cand = (line_start[0]-cand[0], line_start[1]-cand[1]) end_cand = (line_end[0]-cand[0], line_end[1]-cand[1]) dotp = (start_cand[0] * end_cand[0]) + (start_cand[1] * end_cand[1]) inside = False if dotp <= 0.0: inside = True return dist, inside def extract_relations(persons, groups): """" Extract relation links from grouping information Given poses of persons and grouping information in form of person ids per group, this method extracts line segments representing the relation links between the persons. Parameters ---------- persons : dict Dictionary of person poses indexed by id groups : array-like 2D array with each row containing ids of a pairwise grouping. For groups with more than 2 persons, multiple rows are used for every to represent every pairing possible Returns -------- elines : array-like An a array of line segments, each represented by a tuple of start and end points """ min_id = np.amin(groups) elines = [] for [i, j] in groups: line = ((persons[i-min_id][0], persons[i-min_id][1]), (persons[j-min_id][0], persons[j-min_id][1])) elines.append(line) return elines def dtw(x, y, dist=lambda x, y: np.linalg.norm(x - y, ord=1)): """ Computes the dtw between two signals. Adapted from: https://github.com/pierre-rouanet/dtw/blob/master/dtw.py """ x = np.array(x) if len(x.shape) == 1: x = x.reshape(-1, 1) y = np.array(y) if len(y.shape) == 1: y = y.reshape(-1, 1) r, c = len(x), len(y) D = np.zeros((r + 1, c + 1)) D[0, 1:] = np.inf D[1:, 0] = np.inf for i in range(r): for j in range(c): D[i+1, j+1] = dist(x[i], y[j]) for i in range(r): for j in range(c): D[i+1, j+1] += min(D[i, j], D[i, j+1], D[i+1, j]) D = D[1:, 1:] dist = D[-1, -1] / sum(D.shape) return dist, D, _track_back(D) def _track_back(D): i, j = np.array(D.shape) - 1 p, q = [i], [j] while i > 0 and j > 0: tb = np.argmin((D[i-1, j-1], D[i-1, j], D[i, j-1])) if tb == 0: i -= 1 j -= 1 elif tb == 1: i -= 1 elif tb == 2: j -= 1 p.insert(0, i) q.insert(0, j) p.insert(0, 0) q.insert(0, 0) return (np.array(p), np.array(q))
srl-freiburg/navmet
navmet/utils.py
Python
mit
7,719
#!/usr/bin/env python3 # # linearize-hashes.py: List blocks in a linear, no-fork version of the chain. # # Copyright (c) 2013-2016 The Bitsend Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from __future__ import print_function try: # Python 3 import http.client as httplib except ImportError: # Python 2 import httplib import json import re import base64 import sys settings = {} ##### Switch endian-ness ##### def hex_switchEndian(s): """ Switches the endianness of a hex string (in pairs of hex chars) """ pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)] return b''.join(pairList[::-1]).decode() class BitsendRPC: def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) authpair = authpair.encode('utf-8') self.authhdr = b"Basic " + base64.b64encode(authpair) self.conn = httplib.HTTPConnection(host, port=port, timeout=30) def execute(self, obj): try: self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) except ConnectionRefusedError: print('RPC connection refused. Check RPC settings and the server status.', file=sys.stderr) return None resp = self.conn.getresponse() if resp is None: print("JSON-RPC: no response", file=sys.stderr) return None body = resp.read().decode('utf-8') resp_obj = json.loads(body) return resp_obj @staticmethod def build_request(idx, method, params): obj = { 'version' : '1.1', 'method' : method, 'id' : idx } if params is None: obj['params'] = [] else: obj['params'] = params return obj @staticmethod def response_is_error(resp_obj): return 'error' in resp_obj and resp_obj['error'] is not None def get_block_hashes(settings, max_blocks_per_call=10000): rpc = BitsendRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpassword']) height = settings['min_height'] while height < settings['max_height']+1: num_blocks = min(settings['max_height']+1-height, max_blocks_per_call) batch = [] for x in range(num_blocks): batch.append(rpc.build_request(x, 'getblockhash', [height + x])) reply = rpc.execute(batch) if reply is None: print('Cannot continue. Program will halt.') return None for x,resp_obj in enumerate(reply): if rpc.response_is_error(resp_obj): print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr) exit(1) assert(resp_obj['id'] == x) # assume replies are in-sequence if settings['rev_hash_bytes'] == 'true': resp_obj['result'] = hex_switchEndian(resp_obj['result']) print(resp_obj['result']) height += num_blocks if __name__ == '__main__': if len(sys.argv) != 2: print("Usage: linearize-hashes.py CONFIG-FILE") sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 8332 if 'min_height' not in settings: settings['min_height'] = 0 if 'max_height' not in settings: settings['max_height'] = 313000 if 'rev_hash_bytes' not in settings: settings['rev_hash_bytes'] = 'false' if 'rpcuser' not in settings or 'rpcpassword' not in settings: print("Missing username and/or password in cfg file", file=stderr) sys.exit(1) settings['port'] = int(settings['port']) settings['min_height'] = int(settings['min_height']) settings['max_height'] = int(settings['max_height']) # Force hash byte format setting to be lowercase to make comparisons easier. settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower() get_block_hashes(settings)
madzebra/BitSend
contrib/linearize/linearize-hashes.py
Python
mit
3,974
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Result', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('key', models.IntegerField(blank=True)), ('data', models.TextField(blank=True)), ], ), ]
nalabelle/druid-django
frontend/migrations/0001_initial.py
Python
mit
536
from random import randint board = [] for x in range(5): board.append(["O"] * 5) def print_board(board): for row in board: print " ".join(row) print "Let's play Battleship!" print_board(board) def random_row(board): return randint(0, len(board) - 1) def random_col(board): return randint(0, len(board[0]) - 1) ship_row = random_row(board) ship_col = random_col(board) print ship_row print ship_col for turn in range(4): print "Turn", turn + 1 guess_row = int(raw_input("Guess Row:")) guess_col = int(raw_input("Guess Col:")) if guess_row == ship_row and guess_col == ship_col: print "Congratulations! You sunk my battleship!" break else: if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4): print "Oops, that's not even in the ocean." elif(board[guess_row][guess_col] == "X"): print "You guessed that one already." else: print "You missed my battleship!" board[guess_row][guess_col] = "X" print_board(board) if turn == 3: print "Game Over"
vpstudios/Codecademy-Exercise-Answers
Language Skills/Python/Unit 7/2-Battleship!/You Sunk my Battleship/18-To Your Battle Stations.py
Python
mit
1,124
from numpy import array from numpy.testing import assert_allclose from glimix_core.cov import FreeFormCov def test_freeformcov(): cov = FreeFormCov(3) L = array([[1.5, 0, 0], [2.5, 0.8, 0], [-0.3, 0.4, 2.1]], float) cov.L = L assert_allclose(cov.value(), L @ L.T, rtol=1e-4) assert_allclose(cov._check_grad(), 0, atol=1e-5) assert_allclose(cov.logdet(), 1.8486202109176444) cov = FreeFormCov(2) L = array([[1, 0], [2.5, 1]], float) cov.L = L assert_allclose(cov.value(), L @ L.T, rtol=1e-4) assert_allclose(cov._check_grad(), 0, atol=1e-5) assert_allclose(cov.logdet(), 0.00012292724603080174) cov = FreeFormCov(1) L = array([[0.5]], float) cov.L = L assert_allclose(cov.value(), L @ L.T, rtol=1e-4) assert_allclose(cov._check_grad(), 0, atol=1e-5) assert_allclose(cov.logdet(), -1.3862347582514014)
limix/glimix-core
glimix_core/cov/test/test_cov_free.py
Python
mit
878
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/loot/quest/shared_valarian_dagger.iff" result.attribute_template_id = -1 result.stfName("item_n","valarian_dagger") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
anhstudios/swganh
data/scripts/templates/object/tangible/loot/quest/shared_valarian_dagger.py
Python
mit
452
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.core.validators import django.contrib.auth.models from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('auth', '0006_require_contenttypes_0002'), ('bots', '0001_initial'), ('accounts', '0001_initial'), ] operations = [ migrations.AlterModelManagers( name='user', managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), migrations.AddField( model_name='membership', name='channel', field=models.ForeignKey(default=1, to='bots.Channel'), preserve_default=False, ), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL), preserve_default=False, ), migrations.AddField( model_name='user', name='groups', field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'), ), migrations.AddField( model_name='user', name='user_permissions', field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions'), ), migrations.AlterField( model_name='user', name='email', field=models.EmailField(max_length=254, verbose_name='email address', blank=True), ), migrations.AlterField( model_name='user', name='last_login', field=models.DateTimeField(null=True, verbose_name='last login', blank=True), ), migrations.AlterField( model_name='user', name='username', field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'), ), migrations.AlterUniqueTogether( name='membership', unique_together=set([('user', 'channel')]), ), ]
metabrainz/botbot-web
botbot/apps/accounts/migrations/0002_auto_20150630_1459.py
Python
mit
2,751
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Direct unit tests for L{twisted.trial.unittest.TestCase}. """ from twisted.trial.unittest import TestCase class TestCaseTests(TestCase): """ L{TestCase} tests. """ class MyTestCase(TestCase): """ Some test methods which can be used to test behaviors of L{TestCase}. """ def test_1(self): pass def setUp(self): """ Create a couple instances of C{MyTestCase}, each for the same test method, to be used in the test methods of this class. """ self.first = self.MyTestCase('test_1') self.second = self.MyTestCase('test_1') def test_equality(self): """ In order for one test method to be runnable twice, two TestCase instances with the same test method name must not compare as equal. """ self.assertTrue(self.first == self.first) self.assertTrue(self.first != self.second) self.assertFalse(self.first == self.second) def test_hashability(self): """ In order for one test method to be runnable twice, two TestCase instances with the same test method name should not have the same hash value. """ container = {} container[self.first] = None container[self.second] = None self.assertEqual(len(container), 2)
Varriount/Colliberation
libs/twisted/trial/test/test_testcase.py
Python
mit
1,449
import unittest import checksieve class TestStrings(unittest.TestCase): def test_multiline_strings(self): sieve = ''' require "vacation"; vacation :mime "Content-Type: multipart/alternative; boundary=foo --foo I'm at the beach relaxing. Mmmm, surf... --foo Content-Type: text/html; charset=us-ascii <!DOCTYPE HTML PUBLIC \\"-//W3C//DTD HTML 4.0//EN\\" \\"http://www.w3.org/TR/REC-html40/strict.dtd\\"> <HTML><HEAD><TITLE>How to relax</TITLE> <BASE HREF=\\"http://home.example.com/pictures/\\"></HEAD> <BODY><P>I'm at the <A HREF=\\"beach.gif\\">beach</A> relaxing. Mmmm, <A HREF=\\"ocean.gif\\">surf</A>... </BODY></HTML> --foo-- "; ''' self.assertFalse(checksieve.parse_string(sieve, False)) if __name__ == '__main__': unittest.main()
dburkart/check-sieve
test/3028/strings_test.py
Python
mit
973
#! /usr/bin/env python # -*- coding: utf8 -*- import os,sys predoi="10.1021/" pos1str='<notes><style face="normal" font="default" size="100%">' pos1len=len(pos1str) pos2str='Times Cited' pos2len=len(pos2str) doistr='<electronic-resource-num><style face="normal" font="default" size="100%">' doistrlen=len(doistr) substr=False def processdoi(stri): pos1=stri.find(doistr) pos2=stri.find('</style></electronic-resource-num>') if (pos1 is -1 or pos2 is -1): return stri dois=stri[pos1+doistrlen:pos2] pos3=dois.find("10.") if ( pos3 >=0): newdoi=dois[pos3:].lower().strip() return stri[:pos1+doistrlen]+newdoi+stri[pos2:] else: return stri if (__name__ == '__main__'): fname=sys.argv[1] fnamelist=os.path.splitext(fname) fwname=fnamelist[0]+"_new"+fnamelist[1] fr=open(fname) all=fr.read() fr.close() fw=open(fwname,'w') length=len(all) prepos1=0; pos1=0;pos2=0 while True: prepos1=pos1; pos1=all.find(pos1str,pos2) writestr="" if (pos1 is -1): break elif ((pos1-pos2)>50): fw.write(processdoi(all[pos2:pos1+pos1len])) else: fw.write(processdoi(all[prepos1:pos1+pos1len])) try: pos2=all.find(pos2str,pos1) if (substr): #oristr=all[pos1+pos1len:pos2] fw.write(substr) except: pass #fw.write(all[pos1+pos1len:pos2]) #last part fw.write(processdoi(all[pos2:])) fw.close()
OAPDF/oapdftools
oapdf/GrepPDF/modifyXML.py
Python
mit
1,356
# -*- coding: utf-8 -*- import sys import re from PyQt4 import QtCore, QtGui from pygments import highlight from pygments.lexers import * from pygments.formatter import Formatter import time # Copyright (C) 2008 Christophe Kibleur <kib2@free.fr> # # This file is part of WikiParser (http://thewikiblog.appspot.com/). # def hex2QColor(c): r=int(c[0:2],16) g=int(c[2:4],16) b=int(c[4:6],16) return QtGui.QColor(r,g,b) class QFormatter(Formatter): def __init__(self): Formatter.__init__(self) self.data=[] # Create a dictionary of text styles, indexed # by pygments token names, containing QTextCharFormat # instances according to pygments' description # of each style self.styles={} for token, style in self.style: qtf=QtGui.QTextCharFormat() if style['color']: qtf.setForeground(hex2QColor(style['color'])) if style['bgcolor']: qtf.setBackground(hex2QColor(style['bgcolor'])) if style['bold']: qtf.setFontWeight(QtGui.QFont.Bold) if style['italic']: qtf.setFontItalic(True) if style['underline']: qtf.setFontUnderline(True) self.styles[str(token)]=qtf def format(self, tokensource, outfile): global styles # We ignore outfile, keep output in a buffer self.data=[] # Just store a list of styles, one for each character # in the input. Obviously a smarter thing with # offsets and lengths is a good idea! for ttype, value in tokensource: l=len(value) t=str(ttype) self.data.extend([self.styles[t],]*l) class Highlighter(QtGui.QSyntaxHighlighter): def __init__(self, parent, mode): QtGui.QSyntaxHighlighter.__init__(self, parent) self.tstamp=time.time() # Keep the formatter and lexer, initializing them # may be costly. self.formatter=QFormatter() self.lexer=get_lexer_by_name(mode) self.enabled=True # Connect the document's contentChanged to our # reparse slot self.connect(parent,QtCore.SIGNAL("contentsChanged()"), self.reparse) self.reparse() def reparse(self): # The \n is not really needed, but sometimes # you are in an empty last block, so your position is # **after** the end of the document. text=str(self.document().toPlainText())+'\n' # Yes, re-highlight the whole document. # There **must** be some optimizacion possibilities # but it seems fast enough. highlight(text,self.lexer,self.formatter) def highlightBlock(self, text): """Takes a block, applies format to the document. according to what's in it. """ if not self.enabled: return # I need to know where in the document we are, # because our formatting info is global to # the document cb = self.currentBlock() p = cb.position() # Just apply the formatting to this block. # For titles, it may be necessary to backtrack # and format a couple of blocks **earlier**. for i in range(len(str(text))): try: self.setFormat(i,1,self.formatter.data[p+i]) except IndexError: pass # I may need to do something about this being called # too quickly. self.tstamp=time.time() if __name__ == "__main__": app = QtGui.QApplication(sys.argv) rst = QtGui.QPlainTextEdit() rst.setWindowTitle('reSt') hl=Highlighter(rst.document(),"rest") rst.show() python = QtGui.QPlainTextEdit() python.setWindowTitle('python') hl=Highlighter(python.document(),"python") python.show() sys.exit(app.exec_())
aquavitae/rst2pdf
gui/highlighter.py
Python
mit
4,089
#!/usr/bin/env python # Accelerator for pip, the Python package manager. # # Author: Peter Odding <peter.odding@paylogic.com> # Last Change: May 17, 2016 # URL: https://github.com/paylogic/pip-accel """Setup script for the `pip-accel` package.""" # Standard library modules. import codecs import os import re # De-facto standard solution for Python packaging. from setuptools import setup, find_packages def get_readme(): """Get the contents of the ``README.rst`` file as a Unicode string.""" with codecs.open(get_absolute_path('README.rst'), 'r', 'utf-8') as handle: return handle.read() def get_version(*args): """Get the package's version (by extracting it from the source code).""" module_path = get_absolute_path(*args) with open(module_path) as handle: for line in handle: match = re.match(r'^__version__\s*=\s*["\']([^"\']+)["\']$', line) if match: return match.group(1) raise Exception("Failed to extract version from %s!" % module_path) def get_requirements(*args): """Get requirements from pip requirement files.""" requirements = set() with open(get_absolute_path(*args)) as handle: for line in handle: # Strip comments. line = re.sub(r'^#.*|\s#.*', '', line) # Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\s+', '', line)) return sorted(requirements) def get_absolute_path(*args): """Transform relative pathnames into absolute pathnames.""" directory = os.path.dirname(os.path.abspath(__file__)) return os.path.join(directory, *args) setup(name='pip-accel', version=get_version('pip_accel', '__init__.py'), description='Accelerator for pip, the Python package manager', long_description=get_readme(), author='Peter Odding', author_email='peter.odding@paylogic.com', url='https://github.com/paylogic/pip-accel', packages=find_packages(), entry_points={ 'console_scripts': ['pip-accel = pip_accel.cli:main'], 'pip_accel.cache_backends': [ # The default cache backend (uses the local file system). 'local = pip_accel.caches.local', # An optional cache backend that uses Amazon S3. 's3 = pip_accel.caches.s3 [s3]', ], }, extras_require={'s3': 'boto >= 2.32'}, package_data={'pip_accel.deps': ['*.ini']}, install_requires=get_requirements('requirements.txt'), test_suite='pip_accel.tests', tests_require=get_requirements('requirements-testing.txt'), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'Operating System :: Unix', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Archiving :: Packaging', 'Topic :: System :: Installation/Setup', 'Topic :: System :: Software Distribution', ])
pombredanne/pip-accel
setup.py
Python
mit
3,671
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Intangible() result.template = "object/intangible/pet/shared_malkloc_hue.iff" result.attribute_template_id = -1 result.stfName("","") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
anhstudios/swganh
data/scripts/templates/object/intangible/pet/shared_malkloc_hue.py
Python
mit
424
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ManagedClusterUpgradeProfile(Model): """The list of available upgrades for compute pools. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Id of upgrade profile. :vartype id: str :ivar name: Name of upgrade profile. :vartype name: str :ivar type: Type of upgrade profile. :vartype type: str :param control_plane_profile: The list of available upgrade versions for the control plane. :type control_plane_profile: ~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile :param agent_pool_profiles: The list of available upgrade versions for agent pools. :type agent_pool_profiles: list[~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'control_plane_profile': {'required': True}, 'agent_pool_profiles': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'control_plane_profile': {'key': 'properties.controlPlaneProfile', 'type': 'ManagedClusterPoolUpgradeProfile'}, 'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterPoolUpgradeProfile]'}, } def __init__(self, control_plane_profile, agent_pool_profiles): super(ManagedClusterUpgradeProfile, self).__init__() self.id = None self.name = None self.type = None self.control_plane_profile = control_plane_profile self.agent_pool_profiles = agent_pool_profiles
lmazuel/azure-sdk-for-python
azure-mgmt-containerservice/azure/mgmt/containerservice/models/managed_cluster_upgrade_profile.py
Python
mit
2,281
def entrada(lista): while True: ad = (int(input('Digite um valor: '))) if ad not in lista: lista.append(ad) print('Adicionado com sucesso!') else: print('Valor duplicado. Adição negada.') while True: ask = str(input('Deseja continuar?[S/N] ')).strip().upper()[0] if ask == 'S': break elif ask == 'N': return lista = [] entrada(lista) print(lista) #https://pt.stackoverflow.com/q/352398/101
bigown/SOpt
Python/Algorithm/WhileBreak.py
Python
mit
533
import json import httplib from django.http import HttpResponse from Bio.Alphabet import IUPAC from hippo import clean_sequence import features import orfs import gb def _post(params, is_ajax): """ Post a sequence and run the sequence through blast and orf detection. Expects: db and sequence Response: JSON list of features """ from hippo.models import Feature_Database is_gb = False db_name = params['db'].strip() db = Feature_Database.objects.get(name=db_name) sequence = params['sequence'] gb_features = [] # parse genbank if sequence.strip().startswith('LOCUS'): is_gb = True try: sequence, gb_features = gb.parse_genbank(sequence.lstrip()) except Exception as e: sequence = "" gb_features = [] # clean sequence input_type = params['input'] if 'input' in params else 'dna' if input_type in ['protein']: sequence = clean_sequence(sequence, alphabet=IUPAC.protein) else: sequence = clean_sequence(sequence) feature_list = gb_features gbonly = 'gbonly' in params and params['gbonly'] in ['1', 'true', 'True'] blastonly = 'blastonly' in params and params['blastonly'] in ['1', 'true', 'True'] if not is_gb or gbonly is False: args = {} if 'identity_threshold' in params: args['identity_threshold'] = float(params['identity_threshold']) if 'feature_threshold' in params: args['feature_threshold'] = float(params['feature_threshold']) circular = True if 'circular' in params and str(params['circular']).strip().lower() in ['false', 0, '0']: circular = False # feature detection feature_list += features.blast(sequence, db, input_type=input_type, protein=False, circular=circular, **args) feature_list += features.blast(sequence, db, input_type=input_type, protein=True, circular=circular, **args) if input_type == 'dna' and blastonly is False: # restriction site search feature_list += features.find_restriction_sites(sequence, circular=circular) # ORFs and tags orf_list, tag_list = orfs.detect_orfs_and_tags(sequence, circular=circular) feature_list += orf_list feature_list += tag_list res = [x.to_dict() for x in feature_list] # print 'returning %s' % (res,) # now sort everything by start res.sort(cmp=lambda x,y:cmp(int(x['query_start']),int(y['query_start']))) res = [len(sequence),res,sequence] j = json.JSONEncoder().encode(res) if 'jsonp' in params: j = params['jsonp']+'('+j+')' http_res = HttpResponse(j,mimetype="text/javascript",status=httplib.OK) else: # technically we should be returning "application/json", but in that # case browsers force user to download into a file, and for debugging # we want to be able to see the JSON list in browser. looks like most # browsers will handle JSON sent back as text/html anyways. if is_ajax: http_res = HttpResponse(j,mimetype="application/json",status=httplib.OK) else: http_res = HttpResponse(j,status=httplib.OK) # allow cross origin API calls http_res['Access-Control-Allow-Origin'] = '*' http_res['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS' http_res['Access-Control-Max-Age'] = 1000 return http_res def post(request): if 'sequence' in request.REQUEST: params = request.REQUEST else: params = json.loads(request.body) return _post(params, request.is_ajax()) def _blast2(params, is_ajax): """ Post query and subject sequences, returns alignment of the two using blastn. Expects: query, subject Response: JSON dictionary with subject and query strings """ if (not 'subject' in params) or (not 'query' in params): res = [] else: subject = clean_sequence(params['subject']) query = clean_sequence(params['query']) res = features.blast2(subject, query) j = json.JSONEncoder().encode(res) if 'jsonp' in params: j = params['jsonp']+'('+j+')' http_res = HttpResponse(j,mimetype="text/javascript",status=httplib.OK) else: # technically we should be returning "application/json", but in that # case browsers force user to download into a file, and for debugging # we want to be able to see the JSON list in browser. looks like most # browsers will handle JSON sent back as text/html anyways. if is_ajax: http_res = HttpResponse(j,mimetype="application/json",status=httplib.OK) else: http_res = HttpResponse(j,status=httplib.OK) # allow cross origin API calls http_res['Access-Control-Allow-Origin'] = '*' http_res['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS' http_res['Access-Control-Max-Age'] = 1000 return http_res def blast2(request): try: if 'subject' in request.REQUEST: params = request.REQUEST else: params = json.loads(request.body) return _blast2(params, request.is_ajax()) except Exception as e: print str(e) raise(e)
UndeadBlow/giraffe
src/giraffe/views.py
Python
mit
5,133
''' * TeleStax, Open Source Cloud Communications * Copyright 2011-2016, Telestax Inc and individual contributors * by the @authors tag. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. This code was generated by : Name: Md Sharique Email : nukles1.07@gmail.com ''' import requests import json class client(object): def __init__(self, Sid, AuthToken, BaseUrl): self.Sid = Sid self.AuthToken = AuthToken self.BaseUrl = BaseUrl class UssdPush(object): def __init__(self, From, To, AppName, client): self.From = From self.To = To self.AppName = AppName self.Sid = client.Sid self.AuthToken = client.AuthToken self.BaseUrl = client.BaseUrl def Push(self): try: PushUrl = 'https://cloud.restcomm.com/restcomm-rvd/services/apps/'+self.AppName+'controller' Url = self.BaseUrl+'/Accounts/'+self.Sid+'UssdPush' data = {'From':self.From, 'To':self.To, 'Url':PushUrl} r = requests.post(Url, data=data, auth=(self.Sid,self.AuthToken)) if r.status_code == 401: print("Authentication Error! Please Enter Valid Account Sid and Authentication Token") elif r.status_code == 404: return "Base Url is Incorrect! Please verify and try again" elif r.status_code == 400: return "Invalid option" else: content = json.loads(r.text) return content except requests.HTTPError: return ("HTTP ERROR") except requests.ConnectionError: return ("CONNECTION ERROR! Please check and try again") except requests.Timeout: return ("TIMEOUT ERROR") except requests.RequestException: return ("Invalid Url! Please check and try again")
RestComm/restcomm-sdk-python
Restcomm_Python_SDk/Restcomm/UssdPush/UssdPush.py
Python
mit
2,591
try: from setuptools import setup, find_packages except ImportError: import sys print >>sys.stderr, "Error: setuptools is required to install this package." sys.exit(1) __version__ = '1.4.1' __author__ = 'Christopher Roach' __email__ = 'croach@freshplum.com' __license__ = 'MIT' setup( name='django-simple-rest', version=__version__, author=__author__, author_email=__email__, description='A drop dead simple package for creating RESTful APIs on top of Django', long_description=open('README.rst').read(), url='https://github.com/freshplum/django-simple-rest', packages=find_packages(), install_requires=['setuptools', 'mimeparse'], zip_safe=False, keywords='rest,django,api', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
croach/django-simple-rest
setup.py
Python
mit
1,291
from req import WebRequestHandler from req import Service import tornado class Web404Handler(WebRequestHandler): @tornado.gen.coroutine def get(self): self.write_error(404)
allenwhalecs03/nctu_hackathon
backend/web/error.py
Python
mit
191
# elected_office/admin.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- from django.contrib import admin # Register your models here.
jainanisha90/WeVoteServer
elected_office/admin.py
Python
mit
152
''' Text ==== An abstraction of text creation. Depending of the selected backend, the accuracy of text rendering may vary. .. versionchanged:: 1.5.0 :attr:`LabelBase.line_height` added. .. versionchanged:: 1.0.7 The :class:`LabelBase` does not generate any texture if the text has a width <= 1. This is the backend layer for getting text out of different text providers, you should only be using this directly if your needs aren't fulfilled by the :class:`~kivy.uix.label.Label`. Usage example:: from kivy.core.label import Label as CoreLabel ... ... my_label = CoreLabel() my_label.text = 'hello' # the label is usually not drawn until needed, so force it to draw. my_label.refresh() # Now access the texture of the label and use it wherever and # however you may please. hello_texture = my_label.texture ''' __all__ = ('LabelBase', 'Label') import re import os from functools import partial from copy import copy from kivy import kivy_data_dir from kivy.utils import platform from kivy.graphics.texture import Texture from kivy.core import core_select_lib from kivy.core.text.text_layout import layout_text, LayoutWord from kivy.resources import resource_find, resource_add_path from kivy.compat import PY2 from kivy.setupconfig import USE_SDL2 DEFAULT_FONT = 'DroidSans' FONT_REGULAR = 0 FONT_ITALIC = 1 FONT_BOLD = 2 FONT_BOLDITALIC = 3 class LabelBase(object): '''Core text label. This is the abstract class used by different backends to render text. .. warning:: The core text label can't be changed at runtime. You must recreate one. :Parameters: `font_size`: int, defaults to 12 Font size of the text `font_name`: str, defaults to DEFAULT_FONT Font name of the text `bold`: bool, defaults to False Activate "bold" text style `italic`: bool, defaults to False Activate "italic" text style `text_size`: tuple, defaults to (None, None) Add constraint to render the text (inside a bounding box). If no size is given, the label size will be set to the text size. `padding`: float, defaults to None If it's a float, it will set padding_x and padding_y `padding_x`: float, defaults to 0.0 Left/right padding `padding_y`: float, defaults to 0.0 Top/bottom padding `halign`: str, defaults to "left" Horizontal text alignment inside the bounding box `valign`: str, defaults to "bottom" Vertical text alignment inside the bounding box `shorten`: bool, defaults to False Indicate whether the label should attempt to shorten its textual contents as much as possible if a `size` is given. Setting this to True without an appropriately set size will lead to unexpected results. `shorten_from`: str, defaults to `center` The side from which we should shorten the text from, can be left, right, or center. E.g. if left, the ellipsis will appear towards the left side and it will display as much text starting from the right as possible. `split_str`: string, defaults to `' '` (space) The string to use to split the words by when shortening. If empty, we can split after every character filling up the line as much as possible. `max_lines`: int, defaults to 0 (unlimited) If set, this indicate how maximum line are allowed to render the text. Works only if a limitation on text_size is set. `mipmap` : bool, defaults to False Create a mipmap for the texture `strip` : bool, defaults to False Whether each row of text has its leading and trailing spaces stripped. If `halign` is `justify` it is implicitly True. `strip_reflow` : bool, defaults to True Whether text that has been reflowed into a second line should be striped, even if `strip` is False. This is only in effect when `size_hint_x` is not None, because otherwise lines are never split. `unicode_errors` : str, defaults to `'replace'` How to handle unicode decode errors. Can be `'strict'`, `'replace'` or `'ignore'`. .. versionchanged:: 1.9.0 `strip`, `strip_reflow`, `shorten_from`, `split_str`, and `unicode_errors` were added. .. versionchanged:: 1.9.0 `padding_x` and `padding_y` has been fixed to work as expected. In the past, the text was padded by the negative of their values. .. versionchanged:: 1.8.0 `max_lines` parameters has been added. .. versionchanged:: 1.0.8 `size` have been deprecated and replaced with `text_size`. .. versionchanged:: 1.0.7 The `valign` is now respected. This wasn't the case previously so you might have an issue in your application if you have not considered this. ''' __slots__ = ('options', 'texture', '_label', '_text_size') _cached_lines = [] _fonts = {} _fonts_cache = {} _fonts_dirs = [] _texture_1px = None def __init__( self, text='', font_size=12, font_name=DEFAULT_FONT, bold=False, italic=False, halign='left', valign='bottom', shorten=False, text_size=None, mipmap=False, color=None, line_height=1.0, strip=False, strip_reflow=True, shorten_from='center', split_str=' ', unicode_errors='replace', **kwargs): # Include system fonts_dir in resource paths. # This allows us to specify a font from those dirs. LabelBase.get_system_fonts_dir() options = {'text': text, 'font_size': font_size, 'font_name': font_name, 'bold': bold, 'italic': italic, 'halign': halign, 'valign': valign, 'shorten': shorten, 'mipmap': mipmap, 'line_height': line_height, 'strip': strip, 'strip_reflow': strip_reflow, 'shorten_from': shorten_from, 'split_str': split_str, 'unicode_errors': unicode_errors} options['color'] = color or (1, 1, 1, 1) options['padding'] = kwargs.get('padding', (0, 0)) if not isinstance(options['padding'], (list, tuple)): options['padding'] = (options['padding'], options['padding']) options['padding_x'] = kwargs.get('padding_x', options['padding'][0]) options['padding_y'] = kwargs.get('padding_y', options['padding'][1]) if 'size' in kwargs: options['text_size'] = kwargs['size'] else: if text_size is None: options['text_size'] = (None, None) else: options['text_size'] = text_size self._text_size = options['text_size'] self._text = options['text'] self._internal_size = 0, 0 # the real computed text size (inclds pad) self._cached_lines = [] self.options = options self.texture = None self.resolve_font_name() @staticmethod def register(name, fn_regular, fn_italic=None, fn_bold=None, fn_bolditalic=None): '''Register an alias for a Font. .. versionadded:: 1.1.0 If you're using a ttf directly, you might not be able to use the bold/italic properties of the ttf version. If the font is delivered in multiple files (one regular, one italic and one bold), then you need to register these files and use the alias instead. All the fn_regular/fn_italic/fn_bold parameters are resolved with :func:`kivy.resources.resource_find`. If fn_italic/fn_bold are None, fn_regular will be used instead. ''' fonts = [] for font_type in fn_regular, fn_italic, fn_bold, fn_bolditalic: if font_type is not None: font = resource_find(font_type) if font is None: raise IOError('File {0}s not found'.format(font_type)) else: fonts.append(font) else: fonts.append(fonts[-1]) # add regular font to list again LabelBase._fonts[name] = tuple(fonts) def resolve_font_name(self): options = self.options fontname = options['font_name'] fonts = self._fonts fontscache = self._fonts_cache # is the font is registered ? if fontname in fonts: # return the prefered font for the current bold/italic combinaison italic = int(options['italic']) if options['bold']: bold = FONT_BOLD else: bold = FONT_REGULAR options['font_name_r'] = fonts[fontname][italic | bold] elif fontname in fontscache: options['font_name_r'] = fontscache[fontname] else: filename = resource_find(fontname) if not filename: fontname = fontname + \ ('' if fontname.endswith('.ttf') else '.ttf') filename = resource_find(fontname) if filename is None: # XXX for compatibility, check directly in the data dir filename = os.path.join(kivy_data_dir, fontname) if not os.path.exists(filename): raise IOError('Label: File %r not found' % fontname) fontscache[fontname] = filename options['font_name_r'] = filename @staticmethod def get_system_fonts_dir(): '''Return the Directory used by the system for fonts. ''' if LabelBase._fonts_dirs: return LabelBase._fonts_dirs fdirs = [] if platform == 'linux': fdirs = [ '/usr/share/fonts/truetype', '/usr/local/share/fonts', os.path.expanduser('~/.fonts'), os.path.expanduser('~/.local/share/fonts')] elif platform == 'macosx': fdirs = ['/Library/Fonts', '/System/Library/Fonts', os.path.expanduser('~/Library/Fonts')] elif platform == 'win': fdirs = [os.environ['SYSTEMROOT'] + os.sep + 'Fonts'] elif platform == 'ios': fdirs = ['/System/Library/Fonts'] elif platform == 'android': fdirs = ['/system/fonts'] if fdirs: fdirs.append(kivy_data_dir + os.sep + 'fonts') # let's register the font dirs rdirs = [] for _dir in fdirs: if os.path.exists(_dir): resource_add_path(_dir) rdirs.append(_dir) LabelBase._fonts_dirs = rdirs return rdirs raise Exception("Unknown Platform {}".format(platform)) def get_extents(self, text): '''Return a tuple (width, height) indicating the size of the specified text''' return (0, 0) def get_cached_extents(self): '''Returns a cached version of the :meth:`get_extents` function. :: >>> func = self._get_cached_extents() >>> func <built-in method size of pygame.font.Font object at 0x01E45650> >>> func('a line') (36, 18) .. warning:: This method returns a size measuring function that is valid for the font settings used at the time :meth:`get_cached_extents` was called. Any change in the font settings will render the returned function incorrect. You should only use this if you know what you're doing. .. versionadded:: 1.9.0 ''' return self.get_extents def _render_begin(self): pass def _render_text(self, text, x, y): pass def _render_end(self): pass def shorten(self, text, margin=2): ''' Shortens the text to fit into a single line by the width specified by :attr:`text_size` [0]. If :attr:`text_size` [0] is None, it returns text text unchanged. :attr:`split_str` and :attr:`shorten_from` determines how the text is shortened. :params: `text` str, the text to be shortened. `margin` int, the amount of space to leave between the margins and the text. This is in addition to :attr:`padding_x`. :retruns: the text shortened to fit into a single line. ''' textwidth = self.get_cached_extents() uw = self.text_size[0] if uw is None or not text: return text opts = self.options uw = max(0, int(uw - opts['padding_x'] * 2 - margin)) # if larger, it won't fit so don't even try extents chr = type(text) text = text.replace(chr('\n'), chr(' ')) if len(text) <= uw and textwidth(text)[0] <= uw: return text c = opts['split_str'] offset = 0 if len(c) else 1 dir = opts['shorten_from'][0] elps = textwidth('...')[0] if elps > uw: if textwidth('..')[0] <= uw: return '..' else: return '.' uw -= elps f = partial(text.find, c) f_rev = partial(text.rfind, c) # now find the first and last word e1, s2 = f(), f_rev() if dir != 'l': # center or right # no split, or the first word doesn't even fit if e1 != -1: l1 = textwidth(text[:e1])[0] l2 = textwidth(text[s2 + 1:])[0] if e1 == -1 or l1 + l2 > uw: if len(c): opts['split_str'] = '' res = self.shorten(text, margin) opts['split_str'] = c return res # at this point we do char by char so e1 must be zero if l1 <= uw: return chr('{0}...').format(text[:e1]) return chr('...') # both word fits, and there's at least on split_str if s2 == e1: # there's only on split_str return chr('{0}...{1}').format(text[:e1], text[s2 + 1:]) # both the first and last word fits, and they start/end at diff pos if dir == 'r': ee1 = f(e1 + 1) while l2 + textwidth(text[:ee1])[0] <= uw: e1 = ee1 if e1 == s2: break ee1 = f(e1 + 1) else: while True: if l1 <= l2: ee1 = f(e1 + 1) l1 = textwidth(text[:ee1])[0] if l2 + l1 > uw: break e1 = ee1 if e1 == s2: break else: ss2 = f_rev(0, s2 - offset) l2 = textwidth(text[ss2 + 1:])[0] if l2 + l1 > uw: break s2 = ss2 if e1 == s2: break else: # left # no split, or the last word doesn't even fit if s2 != -1: l2 = textwidth(text[s2 + (1 if len(c) else -1):])[0] l1 = textwidth(text[:max(0, e1)])[0] # if split_str if s2 == -1 or l2 + l1 > uw: if len(c): opts['split_str'] = '' res = self.shorten(text, margin) opts['split_str'] = c return res return chr('...') # both word fits, and there's at least on split_str if s2 == e1: # there's only on split_str return chr('{0}...{1}').format(text[:e1], text[s2 + 1:]) # both the first and last word fits, and they start/end at diff pos ss2 = f_rev(0, s2 - offset) while l1 + textwidth(text[ss2 + 1:])[0] <= uw: s2 = ss2 if s2 == e1: break ss2 = f_rev(0, s2 - offset) return chr('{0}...{1}').format(text[:e1], text[s2 + 1:]) def _render_real(self): lines = self._cached_lines options = None for line in lines: if len(line.words): # get opts from first line, first word options = line.words[0].options break if not options: # there was no text to render self._render_begin() data = self._render_end() assert(data) if data is not None and data.width > 1: self.texture.blit_data(data) return render_text = self._render_text get_extents = self.get_cached_extents() uw, uh = options['text_size'] xpad, ypad = options['padding_x'], options['padding_y'] x, y = xpad, ypad # pos in the texture iw, ih = self._internal_size # the real size of text, not texture if uw is not None: uww = uw - 2 * xpad # real width of just text w, h = self.size sw = options['space_width'] halign = options['halign'] valign = options['valign'] split = re.split pat = re.compile('( +)') self._render_begin() if valign == 'bottom': y = h - ih + ypad elif valign == 'middle': y = int((h - ih) / 2 + ypad) for layout_line in lines: # for plain label each line has only one str lw, lh = layout_line.w, layout_line.h line = '' assert len(layout_line.words) < 2 if len(layout_line.words): last_word = layout_line.words[0] line = last_word.text x = xpad if halign[0] == 'c': # center x = int((w - lw) / 2.) elif halign[0] == 'r': # right x = max(0, int(w - lw - xpad)) # right left justify # divide left over space between `spaces` # TODO implement a better method of stretching glyphs? if (uw is not None and halign[-1] == 'y' and line and not layout_line.is_last_line): # number spaces needed to fill, and remainder n, rem = divmod(max(uww - lw, 0), sw) n = int(n) words = None if n or rem: # there's no trailing space when justify is selected words = split(pat, line) if words is not None and len(words) > 1: space = type(line)(' ') # words: every even index is spaces, just add ltr n spaces for i in range(n): idx = (2 * i + 1) % (len(words) - 1) words[idx] = words[idx] + space if rem: # render the last word at the edge, also add it to line ext = get_extents(words[-1]) word = LayoutWord(last_word.options, ext[0], ext[1], words[-1]) layout_line.words.append(word) last_word.lw = uww - ext[0] # word was stretched render_text(words[-1], x + last_word.lw, y) last_word.text = line = ''.join(words[:-2]) else: last_word.lw = uww # word was stretched last_word.text = line = ''.join(words) layout_line.w = uww # the line occupies full width if len(line): layout_line.x = x layout_line.y = y render_text(line, x, y) y += lh # get data from provider data = self._render_end() assert(data) # If the text is 1px width, usually, the data is black. # Don't blit that kind of data, otherwise, you have a little black bar. if data is not None and data.width > 1: self.texture.blit_data(data) def render(self, real=False): '''Return a tuple (width, height) to create the image with the user constraints. (width, height) includes the padding. ''' if real: return self._render_real() options = copy(self.options) options['space_width'] = self.get_extents(' ')[0] options['strip'] = strip = (options['strip'] or options['halign'][-1] == 'y') uw, uh = options['text_size'] = self._text_size text = self.text if strip: text = text.strip() if uw is not None and options['shorten']: text = self.shorten(text) self._cached_lines = lines = [] if not text: return 0, 0 if uh is not None and options['valign'][-1] == 'e': # middle center = -1 # pos of newline if len(text) > 1: middle = int(len(text) // 2) l, r = text.rfind('\n', 0, middle), text.find('\n', middle) if l != -1 and r != -1: center = l if center - l <= r - center else r elif l != -1: center = l elif r != -1: center = r # if a newline split text, render from center down and up til uh if center != -1: # layout from center down until half uh w, h, clipped = layout_text(text[center + 1:], lines, (0, 0), (uw, uh / 2), options, self.get_cached_extents(), True, True) # now layout from center upwards until uh is reached w, h, clipped = layout_text(text[:center + 1], lines, (w, h), (uw, uh), options, self.get_cached_extents(), False, True) else: # if there's no new line, layout everything w, h, clipped = layout_text(text, lines, (0, 0), (uw, None), options, self.get_cached_extents(), True, True) else: # top or bottom w, h, clipped = layout_text(text, lines, (0, 0), (uw, uh), options, self.get_cached_extents(), options['valign'][-1] == 'p', True) self._internal_size = w, h if uw: w = uw if uh: h = uh if h > 1 and w < 2: w = 2 return int(w), int(h) def _texture_refresh(self, *l): self.refresh() def _texture_fill(self, texture): # second pass, render for real self.render(real=True) def refresh(self): '''Force re-rendering of the text ''' self.resolve_font_name() # first pass, calculating width/height sz = self.render() self._size_texture = sz self._size = (sz[0], sz[1]) # if no text are rendered, return nothing. width, height = self._size if width <= 1 or height <= 1: self.texture = self.texture_1px return # create a delayed texture texture = self.texture if texture is None or \ width != texture.width or \ height != texture.height: texture = Texture.create(size=(width, height), mipmap=self.options['mipmap'], callback=self._texture_fill) texture.flip_vertical() texture.add_reload_observer(self._texture_refresh) self.texture = texture else: texture.ask_update(self._texture_fill) def _get_text(self): if PY2: try: if isinstance(self._text, unicode): return self._text return self._text.decode('utf8') except AttributeError: # python 3 support return str(self._text) except UnicodeDecodeError: return self._text else: return self._text def _set_text(self, text): if text != self._text: self._text = text text = property(_get_text, _set_text, doc='Get/Set the text') label = property(_get_text, _set_text, doc='Get/Set the text') @property def texture_1px(self): if LabelBase._texture_1px is None: tex = Texture.create(size=(1, 1), colorfmt='rgba') tex.blit_buffer(b'\x00\x00\x00\x00', colorfmt='rgba') LabelBase._texture_1px = tex return LabelBase._texture_1px @property def size(self): return self._size @property def width(self): return self._size[0] @property def height(self): return self._size[1] @property def content_width(self): '''Return the content width; i.e. the width of the text without any padding.''' if self.texture is None: return 0 return self.texture.width - 2 * self.options['padding_x'] @property def content_height(self): '''Return the content height; i.e. the height of the text without any padding.''' if self.texture is None: return 0 return self.texture.height - 2 * self.options['padding_y'] @property def content_size(self): '''Return the content size (width, height)''' if self.texture is None: return (0, 0) return (self.content_width, self.content_height) @property def fontid(self): '''Return a unique id for all font parameters''' return str([self.options[x] for x in ( 'font_size', 'font_name_r', 'bold', 'italic')]) def _get_text_size(self): return self._text_size def _set_text_size(self, x): self._text_size = x text_size = property(_get_text_size, _set_text_size, doc='''Get/set the (width, height) of the ' 'contrained rendering box''') usersize = property(_get_text_size, _set_text_size, doc='''(deprecated) Use text_size instead.''') # Load the appropriate provider label_libs = [] if USE_SDL2: label_libs += [('sdl2', 'text_sdl2', 'LabelSDL2')] else: label_libs += [('pygame', 'text_pygame', 'LabelPygame')] label_libs += [ ('sdlttf', 'text_sdlttf', 'LabelSDLttf'), ('pil', 'text_pil', 'LabelPIL')] Label = core_select_lib('text', label_libs) if 'KIVY_DOC' not in os.environ: if not Label: from kivy.logger import Logger import sys Logger.critical('App: Unable to get a Text provider, abort.') sys.exit(1) # For the first initalization, register the default font Label.register('DroidSans', 'data/fonts/DroidSans.ttf', 'data/fonts/DroidSans-Italic.ttf', 'data/fonts/DroidSans-Bold.ttf', 'data/fonts/DroidSans-BoldItalic.ttf')
JohnHowland/kivy
kivy/core/text/__init__.py
Python
mit
27,271
from hearthbreaker.cards.minions.neutral import ( BloodfenRaptor, IronbeakOwl, NoviceEngineer, StonetuskBoar, WarGolem, MogushanWarden, FaerieDragon, KoboldGeomancer, ElvenArcher, ArgentSquire, SilvermoonGuardian, TwilightDrake, MagmaRager, DireWolfAlpha, WorgenInfiltrator, Archmage, DalaranMage, Malygos, AzureDrake, OgreMagi, Spellbreaker, BloodmageThalnos, LootHoarder, LeperGnome, IronforgeRifleman, GnomishInventor, GoldshireFootman, FrostwolfGrunt, IronfurGrizzly, LordOfTheArena, MurlocRaider, ManaAddict, OasisSnapjaw, RecklessRocketeer, RiverCrocolisk, SenjinShieldmasta, ScarletCrusader, Shieldbearer, SilverbackPatriarch, JunglePanther, RavenholdtAssassin, StormpikeCommando, StormwindKnight, StranglethornTiger, Sunwalker, ThrallmarFarseer, WindfuryHarpy, YoungDragonhawk, Wolfrider, BootyBayBodyguard, BoulderfistOgre, ChillwindYeti, CoreHound, VoodooDoctor, EarthenRingFarseer, ArcaneGolem, PriestessOfElune, DarkscaleHealer, ArgentCommander, BluegillWarrior, Wisp, Nightblade, ShatteredSunCleric, TheBlackKnight, AbusiveSergeant, DarkIronDwarf, Abomination, AmaniBerserker, SilverHandKnight, FenCreeper, VentureCoMercenary, StormwindChampion, Deathwing, Alexstrasza, EmperorCobra, CrazedAlchemist, AcidicSwampOoze, AncientBrewmaster, YouthfulBrewmaster, BaronGeddon, AngryChicken, RagingWorgen, TaurenWarrior, SpitefulSmith, BloodKnight, FrostwolfWarlord, RaidLeader, DragonlingMechanic, MurlocTidehunter, RazorfenHunter, KnifeJuggler, CairneBloodhoof, HarvestGolem, TheBeast, SylvanasWindrunner, StampedingKodo, FrostElemental, Demolisher, Doomsayer, Gruul, Hogger, ImpMaster, InjuredBlademaster, MasterSwordsmith, NatPagle, Nozdormu, RagnarosTheFirelord, ColdlightOracle, ColdlightSeer, GrimscaleOracle, MurlocWarleader, AncientWatcher, BigGameHunter, BloodsailCorsair, BloodsailRaider, CaptainGreenskin, HungryCrab, MadBomber, ManaWraith, MindControlTech, MurlocTidecaller, Onyxia, SouthseaCaptain, SouthseaDeckhand, YoungPriestess, AcolyteOfPain, CultMaster, Secretkeeper, VioletTeacher, GadgetzanAuctioneer, IllidanStormrage, Lightwarden, FlesheatingGhoul, QuestingAdventurer, GurubashiBerserker, AncientMage, DefenderOfArgus, SunfuryProtector, HarrisonJones, KingMukla, LeeroyJenkins, SeaGiant, MoltenGiant, MountainGiant, DreadCorsair, CaptainsParrot, TinkmasterOverspark, AlarmoBot, EliteTaurenChieftain, MillhouseManastorm, PintSizedSummoner, OldMurkEye, Ysera, GelbinMekkatorque, LorewalkerCho, WildPyromancer, FacelessManipulator, NerubianEgg, Maexxna, HauntedCreeper, NerubarWeblord, UnstableGhoul, Loatheb, StoneskinGargoyle, SludgeBelcher, BaronRivendare, DancingSwords, Deathlord, SpectralKnight, Undertaker, WailingSoul, ZombieChow, Feugen, Stalagg, MadScientist, EchoingOoze, ShadeOfNaxxramas, KelThuzad, PilotedShredder, PilotedSkyGolem, SneedsOldShredder, AntiqueHealbot, AnnoyoTron, ArcaneNullifierX21, Blingtron3000, BombLobber, BurlyRockjawTrogg, Mechwarper, Frog, ClockworkGiant, ClockworkGnome, BoomBot, DoctorBoom, TargetDummy, ExplosiveSheep, Puddlestomper, MicroMachine, MechanicalYeti, SpiderTank, GilblinStalker, ShipsCannon, OgreBrute, MogorTheOgre, Toshley, ForceTankMAX, FelReaver, MadderBomber, Gazlowe, MiniMage, SaltyDog, GnomereganInfantry, FlyingMachine, LostTallstrider, HemetNesingwary, Illuminator, MekgineerThermaplugg, StonesplinterTrogg, TroggzorTheEarthinator, Hobgoblin, Cogmaster, GoblinSapper, TinkertownTechnician, Junkbot, Jeeves, Recombobulator, LilExorcist, EnhanceoMechano, FoeReaper4000, KezanMystic, MimironsHead, GnomishExperimenter, HungryDragon, GrimPatron, BlackwingTechnician, EmperorThaurissan, MajordomoExecutus, VolcanicDrake, BlackwingCorruptor, DrakonidCrusher, DragonEgg, Chromaggus, DragonkinSorcerer, RendBlackhand, Nefarian, ) from hearthbreaker.cards.minions.druid import ( KeeperOfTheGrove, DruidOfTheClaw, AncientOfLore, AncientOfWar, IronbarkProtector, Cenarius, AnodizedRoboCub, MechBearCat, DruidOfTheFang, Malorne, GroveTender, DruidOfTheFlame, VolcanicLumberer, ) from hearthbreaker.cards.minions.hunter import ( TimberWolf, SavannahHighmane, Houndmaster, KingKrush, StarvingBuzzard, TundraRhino, ScavengingHyena, Webspinner, Hound, Huffer, Misha, Leokk, Snake, MetaltoothLeaper, KingOfBeasts, Gahzrilla, SteamwheedleSniper, CoreRager, ) from hearthbreaker.cards.minions.mage import ( ManaWyrm, SorcerersApprentice, KirinTorMage, EtherealArcanist, WaterElemental, ArchmageAntonidas, Snowchugger, GoblinBlastmage, SootSpewer, WeeSpellstopper, FlameLeviathan, Flamewaker ) from hearthbreaker.cards.minions.paladin import ( AldorPeacekeeper, ArgentProtector, GuardianOfKings, TirionFordring, CobaltGuardian, SilverHandRecruit, ShieldedMinibot, Quartermaster, ScarletPurifier, BolvarFordragon, DragonConsort, ) from hearthbreaker.cards.minions.priest import ( AuchenaiSoulpriest, CabalShadowPriest, Lightspawn, Lightwell, NorthshireCleric, ProphetVelen, TempleEnforcer, DarkCultist, Shrinkmeister, UpgradedRepairBot, Shadowbomber, Shadowboxer, Voljin, TwilightWhelp, ) from hearthbreaker.cards.minions.rogue import ( AnubarAmbusher, DefiasRingleader, EdwinVanCleef, Kidnapper, MasterOfDisguise, PatientAssassin, SI7Agent, OneeyedCheat, IronSensei, OgreNinja, TradePrinceGallywix, GoblinAutoBarber, DarkIronSkulker, ) from hearthbreaker.cards.minions.shaman import ( AlAkirTheWindlord, DustDevil, EarthElemental, FireElemental, FlametongueTotem, ManaTideTotem, UnboundElemental, Windspeaker, HealingTotem, SearingTotem, StoneclawTotem, WrathOfAirTotem, SpiritWolf, VitalityTotem, SiltfinSpiritwalker, WhirlingZapomatic, DunemaulShaman, Neptulon, FireguardDestroyer, ) from hearthbreaker.cards.minions.warlock import ( FlameImp, PitLord, Voidwalker, DreadInfernal, Felguard, Doomguard, Succubus, SummoningPortal, BloodImp, LordJaraxxus, VoidTerror, Voidcaller, AnimaGolem, WorthlessImp, FelCannon, MalGanis, FloatingWatcher, MistressOfPain, ImpGangBoss, ) from hearthbreaker.cards.minions.warrior import ( ArathiWeaponsmith, Armorsmith, CruelTaskmaster, FrothingBerserker, GrommashHellscream, KorkronElite, WarsongCommander, Warbot, Shieldmaiden, SiegeEngine, IronJuggernaut, ScrewjankClunker, AxeFlinger, )
slaymaker1907/hearthbreaker
hearthbreaker/cards/minions/__init__.py
Python
mit
7,607
''' Copyright 2015 by Tobias Houska This file is part of Statistical Parameter Estimation Tool (SPOTPY). :author: Tobias Houska This example implements the external hydrological model HYMOD into SPOTPY. ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np try: import spotpy except ImportError: import sys sys.path.append(".") import spotpy import os import multiprocessing as mp from distutils.dir_util import copy_tree, remove_tree #from shutil import rmtree import sys class spot_setup(object): def __init__(self,parallel='seq'): self.params = [spotpy.parameter.Uniform('cmax',low=1.0 , high=500, optguess=412.33), spotpy.parameter.Uniform('bexp',low=0.1 , high=2.0, optguess=0.1725), spotpy.parameter.Uniform('alpha',low=0.1 , high=0.99, optguess=0.8127), spotpy.parameter.Uniform('Ks',low=0.0 , high=0.10, optguess=0.0404), spotpy.parameter.Uniform('Kq',low=0.1 , high=0.99, optguess=0.5592)] self.curdir = os.getcwd() self.owd = os.path.dirname(os.path.realpath(__file__)) self.hymod_path = self.owd+os.sep+'hymod_unix' self.evals = list(np.genfromtxt(self.hymod_path+os.sep+'bound.txt',skip_header=65)[:,3])[:730] self.Factor = 1944 * (1000 * 1000 ) / (1000 * 60 * 60 * 24) self.parallel = parallel def parameters(self): return spotpy.parameter.generate(self.params) def simulation(self,x): if self.parallel == 'seq': call = '' elif self.parallel == 'mpi': #Running n parallel, care has to be taken when files are read or written #Therefor we check the ID of the current computer core call = str(int(os.environ['OMPI_COMM_WORLD_RANK'])+2) #And generate a new folder with all underlying files copy_tree(self.hymod_path, self.hymod_path+call) elif self.parallel == 'mpc': #Running n parallel, care has to be taken when files are read or written #Therefor we check the ID of the current computer core call =str(os.getpid()) #And generate a new folder with all underlying files copy_tree(self.hymod_path, self.hymod_path+call) else: raise 'No call variable was assigned' os.chdir(self.hymod_path+call) try: if sys.version_info.major == 2: params = file('Param.in', 'w') elif sys.version_info.major == 3: params = open('Param.in','w') for i in range(len(x)): if i == len(x): params.write(str(round(x[i],5))) else: params.write(str(round(x[i],5))+' ') params.close() os.system('./hymod_%s.%s' % (sys.version_info.major, sys.version_info.minor)) #try: if sys.version_info.major == 2: SimRR = file('Q.out', 'r') elif sys.version_info.major == 3: SimRR = open('Q.out', 'r') else: raise Exception("Your python is too old for this example") simulations=[] for i in range(64): SimRR.readline() for i in range(730): val= SimRR.readline() simulations.append(float(val)*self.Factor) SimRR.close() except: 'Model has failed' simulations=[np.nan]*795 #Assign bad values - model might have crashed os.chdir(self.curdir) if self.parallel == 'mpi' or self.parallel == 'mpc': remove_tree(self.hymod_path+call) return simulations def evaluation(self): return self.evals def objectivefunction(self,simulation,evaluation, params=None): like = spotpy.objectivefunctions.nashsutcliffe(evaluation,simulation) # Just an example, please choose an appropriate objective function depending on the used algorithm return like
thouska/spotpy
spotpy/examples/spot_setup_hymod_unix.py
Python
mit
4,291
# ***************************************************************************** # Copyright (c) 2015, 2019 IBM Corporation and other Contributors. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Eclipse Public License v1.0 # which accompanies this distribution, and is available at # http://www.eclipse.org/legal/epl-v10.html # ***************************************************************************** import argparse import json import sys import os import logging try: import wiotp.sdk.application except ImportError: # This part is only required to run the sample from within the samples # directory when the module itself is not installed. import os import inspect cmd_subfolder = os.path.realpath( os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "../../src")) ) if cmd_subfolder not in sys.path: sys.path.insert(0, cmd_subfolder) import wiotp.sdk.application def exportTypes(destination): global client, cliArgs print("Exporting Device Types ...") with open(destination, "a") as out_file: for deviceType in client.registry.devicetypes: export = { "id": deviceType.id, "classId": deviceType.classId, "description": deviceType.description, "deviceInfo": deviceType.deviceInfo, "metadata": deviceType.metadata, } out_file.write(json.dumps(export) + "\n") def exportDevices(destination): global client, cliArgs print("Exporting Devices ...") with open(destination, "a") as out_file: for device in client.registry.devices: export = { "typeId": device.typeId, "deviceId": device.deviceId, "deviceInfo": device.deviceInfo, "metadata": device.metadata, } out_file.write(json.dumps(export) + "\n") def importTypes(source): # There is no bulk type registration in the API (yet) with open(source, "r") as in_file: for line in in_file: data = json.loads(line) client.api.registry.devicetypes.create(data) def importDevices(source): deviceArray = [] with open(source, "r") as in_file: for line in in_file: data = json.loads(line) deviceArray.append(data) result = client.api.registry.devices.create(deviceArray) if __name__ == "__main__": # Initialize the properties we need parser = argparse.ArgumentParser() parser.add_argument("-m", "--mode", required=True) parser.add_argument("-d", "--directory", required=True) args, unknown = parser.parse_known_args() client = None options = wiotp.sdk.application.parseEnvVars() client = wiotp.sdk.application.ApplicationClient(options) client.logger.setLevel(logging.DEBUG) # Note that we do not need to call connect to make API calls devicesFilePath = args.directory + "/devices.txt" typesFilePath = args.directory + "/types.txt" if args.mode == "import": importTypes(typesFilePath) importDevices(devicesFilePath) elif args.mode == "export": if os.path.isfile(typesFilePath): os.remove(typesFilePath) exportTypes(typesFilePath) if os.path.isfile(devicesFilePath): os.remove(devicesFilePath) exportDevices(devicesFilePath)
ibm-watson-iot/iot-python
samples/exportTool/exportTool.py
Python
epl-1.0
3,508
# pylint: disable=W0223 import textwrap import warnings import numpy as np from pandas.compat import range, zip import pandas.compat as compat from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries from pandas.core.dtypes.common import ( is_integer_dtype, is_integer, is_float, is_list_like, is_sequence, is_iterator, is_scalar, is_sparse, _is_unorderable_exception, _ensure_platform_int) from pandas.core.dtypes.missing import isna, _infer_fill_value from pandas.core.index import Index, MultiIndex import pandas.core.common as com from pandas.core.common import (is_bool_indexer, _asarray_tuplesafe, is_null_slice, is_full_slice, _values_from_object) # the supported indexers def get_indexers_list(): return [ ('ix', _IXIndexer), ('iloc', _iLocIndexer), ('loc', _LocIndexer), ('at', _AtIndexer), ('iat', _iAtIndexer), ] # "null slice" _NS = slice(None, None) # the public IndexSlicerMaker class _IndexSlice(object): """ Create an object to more easily perform multi-index slicing Examples -------- >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']]) >>> columns = ['foo', 'bar'] >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))), index=midx, columns=columns) Using the default slice command: >>> dfmi.loc[(slice(None), slice('B0', 'B1')), :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 Using the IndexSlice class for a more intuitive command: >>> idx = pd.IndexSlice >>> dfmi.loc[idx[:, 'B0':'B1'], :] foo bar A0 B0 0 1 B1 2 3 A1 B0 8 9 B1 10 11 """ def __getitem__(self, arg): return arg IndexSlice = _IndexSlice() class IndexingError(Exception): pass class _NDFrameIndexer(object): _valid_types = None _exception = KeyError axis = None def __init__(self, obj, name): self.obj = obj self.ndim = obj.ndim self.name = name def __call__(self, axis=None): # we need to return a copy of ourselves new_self = self.__class__(self.obj, self.name) new_self.axis = axis return new_self def __iter__(self): raise NotImplementedError('ix is not iterable') def __getitem__(self, key): if type(key) is tuple: key = tuple(com._apply_if_callable(x, self.obj) for x in key) try: values = self.obj.get_value(*key) if is_scalar(values): return values except Exception: pass return self._getitem_tuple(key) else: key = com._apply_if_callable(key, self.obj) return self._getitem_axis(key, axis=0) def _get_label(self, label, axis=0): if self.ndim == 1: # for perf reasons we want to try _xs first # as its basically direct indexing # but will fail when the index is not present # see GH5667 try: return self.obj._xs(label, axis=axis) except: return self.obj[label] elif isinstance(label, tuple) and isinstance(label[axis], slice): raise IndexingError('no slices here, handle elsewhere') return self.obj._xs(label, axis=axis) def _get_loc(self, key, axis=0): return self.obj._ixs(key, axis=axis) def _slice(self, obj, axis=0, kind=None): return self.obj._slice(obj, axis=axis, kind=kind) def _get_setitem_indexer(self, key): if self.axis is not None: return self._convert_tuple(key, is_setter=True) axis = self.obj._get_axis(0) if isinstance(axis, MultiIndex): try: return axis.get_loc(key) except Exception: pass if isinstance(key, tuple): try: return self._convert_tuple(key, is_setter=True) except IndexingError: pass if isinstance(key, range): return self._convert_range(key, is_setter=True) try: return self._convert_to_indexer(key, is_setter=True) except TypeError as e: # invalid indexer type vs 'other' indexing errors if 'cannot do' in str(e): raise raise IndexingError(key) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(com._apply_if_callable(x, self.obj) for x in key) else: key = com._apply_if_callable(key, self.obj) indexer = self._get_setitem_indexer(key) self._setitem_with_indexer(indexer, value) def _has_valid_type(self, k, axis): raise NotImplementedError() def _has_valid_tuple(self, key): """ check the key for valid keys across my indexer """ for i, k in enumerate(key): if i >= self.obj.ndim: raise IndexingError('Too many indexers') if not self._has_valid_type(k, i): raise ValueError("Location based indexing can only have [%s] " "types" % self._valid_types) def _should_validate_iterable(self, axis=0): """ return a boolean whether this axes needs validation for a passed iterable """ ax = self.obj._get_axis(axis) if isinstance(ax, MultiIndex): return False elif ax.is_floating(): return False return True def _is_nested_tuple_indexer(self, tup): if any([isinstance(ax, MultiIndex) for ax in self.obj.axes]): return any([is_nested_tuple(tup, ax) for ax in self.obj.axes]) return False def _convert_tuple(self, key, is_setter=False): keyidx = [] if self.axis is not None: axis = self.obj._get_axis_number(self.axis) for i in range(self.ndim): if i == axis: keyidx.append(self._convert_to_indexer( key, axis=axis, is_setter=is_setter)) else: keyidx.append(slice(None)) else: for i, k in enumerate(key): if i >= self.obj.ndim: raise IndexingError('Too many indexers') idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter) keyidx.append(idx) return tuple(keyidx) def _convert_range(self, key, is_setter=False): """ convert a range argument """ return list(key) def _convert_scalar_indexer(self, key, axis): # if we are accessing via lowered dim, use the last dim ax = self.obj._get_axis(min(axis, self.ndim - 1)) # a scalar return ax._convert_scalar_indexer(key, kind=self.name) def _convert_slice_indexer(self, key, axis): # if we are accessing via lowered dim, use the last dim ax = self.obj._get_axis(min(axis, self.ndim - 1)) return ax._convert_slice_indexer(key, kind=self.name) def _has_valid_setitem_indexer(self, indexer): return True def _has_valid_positional_setitem_indexer(self, indexer): """ validate that an positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally """ if isinstance(indexer, dict): raise IndexError("{0} cannot enlarge its target object" .format(self.name)) else: if not isinstance(indexer, tuple): indexer = self._tuplify(indexer) for ax, i in zip(self.obj.axes, indexer): if isinstance(i, slice): # should check the stop slice? pass elif is_list_like_indexer(i): # should check the elements? pass elif is_integer(i): if i >= len(ax): raise IndexError("{0} cannot enlarge its target object" .format(self.name)) elif isinstance(i, dict): raise IndexError("{0} cannot enlarge its target object" .format(self.name)) return True def _setitem_with_indexer(self, indexer, value): self._has_valid_setitem_indexer(indexer) # also has the side effect of consolidating in-place # TODO: Panel, DataFrame are not imported, remove? from pandas import Panel, DataFrame, Series # noqa info_axis = self.obj._info_axis_number # maybe partial set take_split_path = self.obj._is_mixed_type # if there is only one block/type, still have to take split path # unless the block is one-dimensional or it can hold the value if not take_split_path and self.obj._data.blocks: blk, = self.obj._data.blocks if 1 < blk.ndim: # in case of dict, keys are indices val = list(value.values()) if isinstance(value, dict) else value take_split_path = not blk._can_hold_element(val) if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): for i, ax in zip(indexer, self.obj.axes): # if we have any multi-indexes that have non-trivial slices # (not null slices) then we must take the split path, xref # GH 10360 if (isinstance(ax, MultiIndex) and not (is_integer(i) or is_null_slice(i))): take_split_path = True break if isinstance(indexer, tuple): nindexer = [] for i, idx in enumerate(indexer): if isinstance(idx, dict): # reindex the axis to the new value # and set inplace key, _ = convert_missing_indexer(idx) # if this is the items axes, then take the main missing # path first # this correctly sets the dtype and avoids cache issues # essentially this separates out the block that is needed # to possibly be modified if self.ndim > 1 and i == self.obj._info_axis_number: # add the new item, and set the value # must have all defined axes if we have a scalar # or a list-like on the non-info axes if we have a # list-like len_non_info_axes = [ len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i ] if any([not l for l in len_non_info_axes]): if not is_list_like_indexer(value): raise ValueError("cannot set a frame with no " "defined index and a scalar") self.obj[key] = value return self.obj # add a new item with the dtype setup self.obj[key] = _infer_fill_value(value) new_indexer = convert_from_missing_indexer_tuple( indexer, self.obj.axes) self._setitem_with_indexer(new_indexer, value) return self.obj # reindex the axis # make sure to clear the cache because we are # just replacing the block manager here # so the object is the same index = self.obj._get_axis(i) labels = index.insert(len(index), key) self.obj._data = self.obj.reindex_axis(labels, i)._data self.obj._maybe_update_cacher(clear=True) self.obj.is_copy = None nindexer.append(labels.get_loc(key)) else: nindexer.append(idx) indexer = tuple(nindexer) else: indexer, missing = convert_missing_indexer(indexer) if missing: # reindex the axis to the new value # and set inplace if self.ndim == 1: index = self.obj.index new_index = index.insert(len(index), indexer) # we have a coerced indexer, e.g. a float # that matches in an Int64Index, so # we will not create a duplicate index, rather # index to that element # e.g. 0.0 -> 0 # GH12246 if index.is_unique: new_indexer = index.get_indexer([new_index[-1]]) if (new_indexer != -1).any(): return self._setitem_with_indexer(new_indexer, value) # this preserves dtype of the value new_values = Series([value])._values if len(self.obj._values): try: new_values = np.concatenate([self.obj._values, new_values]) except TypeError: new_values = np.concatenate([self.obj.asobject, new_values]) self.obj._data = self.obj._constructor( new_values, index=new_index, name=self.obj.name)._data self.obj._maybe_update_cacher(clear=True) return self.obj elif self.ndim == 2: # no columns and scalar if not len(self.obj.columns): raise ValueError("cannot set a frame with no defined " "columns") # append a Series if isinstance(value, Series): value = value.reindex(index=self.obj.columns, copy=True) value.name = indexer # a list-list else: # must have conforming columns if is_list_like_indexer(value): if len(value) != len(self.obj.columns): raise ValueError("cannot set a row with " "mismatched columns") value = Series(value, index=self.obj.columns, name=indexer) self.obj._data = self.obj.append(value)._data self.obj._maybe_update_cacher(clear=True) return self.obj # set using setitem (Panel and > dims) elif self.ndim >= 3: return self.obj.__setitem__(indexer, value) # set item_labels = self.obj._get_axis(info_axis) # align and set the values if take_split_path: if not isinstance(indexer, tuple): indexer = self._tuplify(indexer) if isinstance(value, ABCSeries): value = self._align_series(indexer, value) info_idx = indexer[info_axis] if is_integer(info_idx): info_idx = [info_idx] labels = item_labels[info_idx] # if we have a partial multiindex, then need to adjust the plane # indexer here if (len(labels) == 1 and isinstance(self.obj[labels[0]].axes[0], MultiIndex)): item = labels[0] obj = self.obj[item] index = obj.index idx = indexer[:info_axis][0] plane_indexer = tuple([idx]) + indexer[info_axis + 1:] lplane_indexer = length_of_indexer(plane_indexer[0], index) # require that we are setting the right number of values that # we are indexing if is_list_like_indexer(value) and np.iterable( value) and lplane_indexer != len(value): if len(obj[idx]) != len(value): raise ValueError("cannot set using a multi-index " "selection indexer with a different " "length than the value") # make sure we have an ndarray value = getattr(value, 'values', value).ravel() # we can directly set the series here # as we select a slice indexer on the mi idx = index._convert_slice_indexer(idx) obj._consolidate_inplace() obj = obj.copy() obj._data = obj._data.setitem(indexer=tuple([idx]), value=value) self.obj[item] = obj return # non-mi else: plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:] if info_axis > 0: plane_axis = self.obj.axes[:info_axis][0] lplane_indexer = length_of_indexer(plane_indexer[0], plane_axis) else: lplane_indexer = 0 def setter(item, v): s = self.obj[item] pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer # perform the equivalent of a setitem on the info axis # as we have a null slice or a slice with full bounds # which means essentially reassign to the columns of a # multi-dim object # GH6149 (null slice), GH10408 (full bounds) if (isinstance(pi, tuple) and all(is_null_slice(idx) or is_full_slice(idx, len(self.obj)) for idx in pi)): s = v else: # set the item, possibly having a dtype change s._consolidate_inplace() s = s.copy() s._data = s._data.setitem(indexer=pi, value=v) s._maybe_update_cacher(clear=True) # reset the sliced object if unique self.obj[item] = s def can_do_equal_len(): """ return True if we have an equal len settable """ if not len(labels) == 1 or not np.iterable(value): return False l = len(value) item = labels[0] index = self.obj[item].index # equal len list/ndarray if len(index) == l: return True elif lplane_indexer == l: return True return False # we need an iterable, with a ndim of at least 1 # eg. don't pass through np.array(0) if is_list_like_indexer(value) and getattr(value, 'ndim', 1) > 0: # we have an equal len Frame if isinstance(value, ABCDataFrame) and value.ndim > 1: sub_indexer = list(indexer) multiindex_indexer = isinstance(labels, MultiIndex) for item in labels: if item in value: sub_indexer[info_axis] = item v = self._align_series( tuple(sub_indexer), value[item], multiindex_indexer) else: v = np.nan setter(item, v) # we have an equal len ndarray/convertible to our labels elif np.array(value).ndim == 2: # note that this coerces the dtype if we are mixed # GH 7551 value = np.array(value, dtype=object) if len(labels) != value.shape[1]: raise ValueError('Must have equal len keys and value ' 'when setting with an ndarray') for i, item in enumerate(labels): # setting with a list, recoerces setter(item, value[:, i].tolist()) # we have an equal len list/ndarray elif can_do_equal_len(): setter(labels[0], value) # per label values else: if len(labels) != len(value): raise ValueError('Must have equal len keys and value ' 'when setting with an iterable') for item, v in zip(labels, value): setter(item, v) else: # scalar for item in labels: setter(item, value) else: if isinstance(indexer, tuple): indexer = maybe_convert_ix(*indexer) # if we are setting on the info axis ONLY # set using those methods to avoid block-splitting # logic here if (len(indexer) > info_axis and is_integer(indexer[info_axis]) and all(is_null_slice(idx) for i, idx in enumerate(indexer) if i != info_axis) and item_labels.is_unique): self.obj[item_labels[indexer[info_axis]]] = value return if isinstance(value, (ABCSeries, dict)): value = self._align_series(indexer, Series(value)) elif isinstance(value, ABCDataFrame): value = self._align_frame(indexer, value) if isinstance(value, ABCPanel): value = self._align_panel(indexer, value) # check for chained assignment self.obj._check_is_chained_assignment_possible() # actually do the set self.obj._consolidate_inplace() self.obj._data = self.obj._data.setitem(indexer=indexer, value=value) self.obj._maybe_update_cacher(clear=True) def _align_series(self, indexer, ser, multiindex_indexer=False): """ Parameters ---------- indexer : tuple, slice, scalar The indexer used to get the locations that will be set to `ser` ser : pd.Series The values to assign to the locations specified by `indexer` multiindex_indexer : boolean, optional Defaults to False. Should be set to True if `indexer` was from a `pd.MultiIndex`, to avoid unnecessary broadcasting. Returns: -------- `np.array` of `ser` broadcast to the appropriate shape for assignment to the locations selected by `indexer` """ if isinstance(indexer, (slice, np.ndarray, list, Index)): indexer = tuple([indexer]) if isinstance(indexer, tuple): # flatten np.ndarray indexers ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i indexer = tuple(map(ravel, indexer)) aligners = [not is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 is_frame = self.obj.ndim == 2 is_panel = self.obj.ndim >= 3 obj = self.obj # are we a single alignable value on a non-primary # dim (e.g. panel: 1,2, or frame: 0) ? # hence need to align to a single axis dimension # rather that find all valid dims # frame if is_frame: single_aligner = single_aligner and aligners[0] # panel elif is_panel: single_aligner = (single_aligner and (aligners[1] or aligners[2])) # we have a frame, with multiple indexers on both axes; and a # series, so need to broadcast (see GH5206) if (sum_aligners == self.ndim and all([is_sequence(_) for _ in indexer])): ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values # single indexer if len(indexer) > 1 and not multiindex_indexer: l = len(indexer[1]) ser = np.tile(ser, l).reshape(l, -1).T return ser for i, idx in enumerate(indexer): ax = obj.axes[i] # multiple aligners (or null slices) if is_sequence(idx) or isinstance(idx, slice): if single_aligner and is_null_slice(idx): continue new_ix = ax[idx] if not is_list_like_indexer(new_ix): new_ix = Index([new_ix]) else: new_ix = Index(new_ix) if ser.index.equals(new_ix) or not len(new_ix): return ser._values.copy() return ser.reindex(new_ix)._values # 2 dims elif single_aligner and is_frame: # reindex along index ax = self.obj.axes[1] if ser.index.equals(ax) or not len(ax): return ser._values.copy() return ser.reindex(ax)._values # >2 dims elif single_aligner: broadcast = [] for n, labels in enumerate(self.obj._get_plane_axes(i)): # reindex along the matching dimensions if len(labels & ser.index): ser = ser.reindex(labels) else: broadcast.append((n, len(labels))) # broadcast along other dims ser = ser._values.copy() for (axis, l) in broadcast: shape = [-1] * (len(broadcast) + 1) shape[axis] = l ser = np.tile(ser, l).reshape(shape) if self.obj.ndim == 3: ser = ser.T return ser elif is_scalar(indexer): ax = self.obj._get_axis(1) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values raise ValueError('Incompatible indexer with Series') def _align_frame(self, indexer, df): is_frame = self.obj.ndim == 2 is_panel = self.obj.ndim >= 3 if isinstance(indexer, tuple): aligners = [not is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) # TODO: single_aligner is not used single_aligner = sum_aligners == 1 # noqa idx, cols = None, None sindexers = [] for i, ix in enumerate(indexer): ax = self.obj.axes[i] if is_sequence(ix) or isinstance(ix, slice): if isinstance(ix, np.ndarray): ix = ix.ravel() if idx is None: idx = ax[ix] elif cols is None: cols = ax[ix] else: break else: sindexers.append(i) # panel if is_panel: # need to conform to the convention # as we are not selecting on the items axis # and we have a single indexer # GH 7763 if len(sindexers) == 1 and sindexers[0] != 0: df = df.T if idx is None: idx = df.index if cols is None: cols = df.columns if idx is not None and cols is not None: if df.index.equals(idx) and df.columns.equals(cols): val = df.copy()._values else: val = df.reindex(idx, columns=cols)._values return val elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame): ax = self.obj.index[indexer] if df.index.equals(ax): val = df.copy()._values else: # we have a multi-index and are trying to align # with a particular, level GH3738 if (isinstance(ax, MultiIndex) and isinstance(df.index, MultiIndex) and ax.nlevels != df.index.nlevels): raise TypeError("cannot align on a multi-index with out " "specifying the join levels") val = df.reindex(index=ax)._values return val elif is_scalar(indexer) and is_panel: idx = self.obj.axes[1] cols = self.obj.axes[2] # by definition we are indexing on the 0th axis # a passed in dataframe which is actually a transpose # of what is needed if idx.equals(df.index) and cols.equals(df.columns): return df.copy()._values return df.reindex(idx, columns=cols)._values raise ValueError('Incompatible indexer with DataFrame') def _align_panel(self, indexer, df): # TODO: is_frame, is_panel are unused is_frame = self.obj.ndim == 2 # noqa is_panel = self.obj.ndim >= 3 # noqa raise NotImplementedError("cannot set using an indexer with a Panel " "yet!") def _getitem_tuple(self, tup): try: return self._getitem_lowerdim(tup) except IndexingError: pass # no multi-index, so validate all of the indexers self._has_valid_tuple(tup) # ugly hack for GH #836 if self._multi_take_opportunity(tup): return self._multi_take(tup) # no shortcut needed retval = self.obj for i, key in enumerate(tup): if i >= self.obj.ndim: raise IndexingError('Too many indexers') if is_null_slice(key): continue retval = getattr(retval, self.name)._getitem_axis(key, axis=i) return retval def _multi_take_opportunity(self, tup): from pandas.core.generic import NDFrame # ugly hack for GH #836 if not isinstance(self.obj, NDFrame): return False if not all(is_list_like_indexer(x) for x in tup): return False # just too complicated for indexer, ax in zip(tup, self.obj._data.axes): if isinstance(ax, MultiIndex): return False elif is_bool_indexer(indexer): return False elif not ax.is_unique: return False return True def _multi_take(self, tup): """ create the reindex map for our objects, raise the _exception if we can't create the indexer """ try: o = self.obj d = dict( [(a, self._convert_for_reindex(t, axis=o._get_axis_number(a))) for t, a in zip(tup, o._AXIS_ORDERS)]) return o.reindex(**d) except(KeyError, IndexingError): raise self._exception def _convert_for_reindex(self, key, axis=0): labels = self.obj._get_axis(axis) if is_bool_indexer(key): key = check_bool_indexer(labels, key) return labels[key] else: if isinstance(key, Index): keyarr = labels._convert_index_indexer(key) else: # asarray can be unsafe, NumPy strings are weird keyarr = _asarray_tuplesafe(key) if is_integer_dtype(keyarr): # Cast the indexer to uint64 if possible so # that the values returned from indexing are # also uint64. keyarr = labels._convert_arr_indexer(keyarr) if not labels.is_integer(): keyarr = _ensure_platform_int(keyarr) return labels.take(keyarr) return keyarr def _handle_lowerdim_multi_index_axis0(self, tup): # we have an axis0 multi-index, handle or raise try: # fast path for series or for tup devoid of slices return self._get_label(tup, axis=0) except TypeError: # slices are unhashable pass except Exception as e1: if isinstance(tup[0], (slice, Index)): raise IndexingError("Handle elsewhere") # raise the error if we are not sorted ax0 = self.obj._get_axis(0) if not ax0.is_lexsorted_for_tuple(tup): raise e1 return None def _getitem_lowerdim(self, tup): # we can directly get the axis result since the axis is specified if self.axis is not None: axis = self.obj._get_axis_number(self.axis) return self._getitem_axis(tup, axis=axis) # we may have a nested tuples indexer here if self._is_nested_tuple_indexer(tup): return self._getitem_nested_tuple(tup) # we maybe be using a tuple to represent multiple dimensions here ax0 = self.obj._get_axis(0) # ...but iloc should handle the tuple as simple integer-location # instead of checking it as multiindex representation (GH 13797) if isinstance(ax0, MultiIndex) and self.name != 'iloc': result = self._handle_lowerdim_multi_index_axis0(tup) if result is not None: return result if len(tup) > self.obj.ndim: raise IndexingError("Too many indexers. handle elsewhere") # to avoid wasted computation # df.ix[d1:d2, 0] -> columns first (True) # df.ix[0, ['C', 'B', A']] -> rows first (False) for i, key in enumerate(tup): if is_label_like(key) or isinstance(key, tuple): section = self._getitem_axis(key, axis=i) # we have yielded a scalar ? if not is_list_like_indexer(section): return section elif section.ndim == self.ndim: # we're in the middle of slicing through a MultiIndex # revise the key wrt to `section` by inserting an _NS new_key = tup[:i] + (_NS,) + tup[i + 1:] else: new_key = tup[:i] + tup[i + 1:] # unfortunately need an odious kludge here because of # DataFrame transposing convention if (isinstance(section, ABCDataFrame) and i > 0 and len(new_key) == 2): a, b = new_key new_key = b, a if len(new_key) == 1: new_key, = new_key # Slices should return views, but calling iloc/loc with a null # slice returns a new object. if is_null_slice(new_key): return section # This is an elided recursive call to iloc/loc/etc' return getattr(section, self.name)[new_key] raise IndexingError('not applicable') def _getitem_nested_tuple(self, tup): # we have a nested tuple so have at least 1 multi-index level # we should be able to match up the dimensionaility here # we have too many indexers for our dim, but have at least 1 # multi-index dimension, try to see if we have something like # a tuple passed to a series with a multi-index if len(tup) > self.ndim: result = self._handle_lowerdim_multi_index_axis0(tup) if result is not None: return result # this is a series with a multi-index specified a tuple of # selectors return self._getitem_axis(tup, axis=0) # handle the multi-axis by taking sections and reducing # this is iterative obj = self.obj axis = 0 for i, key in enumerate(tup): if is_null_slice(key): axis += 1 continue current_ndim = obj.ndim obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) axis += 1 # if we have a scalar, we are done if is_scalar(obj) or not hasattr(obj, 'ndim'): break # has the dim of the obj changed? # GH 7199 if obj.ndim < current_ndim: # GH 7516 # if had a 3 dim and are going to a 2d # axes are reversed on a DataFrame if i >= 1 and current_ndim == 3 and obj.ndim == 2: obj = obj.T axis -= 1 return obj def _getitem_axis(self, key, axis=0): if self._should_validate_iterable(axis): self._has_valid_type(key, axis) labels = self.obj._get_axis(axis) if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) elif (is_list_like_indexer(key) and not (isinstance(key, tuple) and isinstance(labels, MultiIndex))): if hasattr(key, 'ndim') and key.ndim > 1: raise ValueError('Cannot index with multidimensional key') return self._getitem_iterable(key, axis=axis) else: # maybe coerce a float scalar to integer key = labels._maybe_cast_indexer(key) if is_integer(key): if axis == 0 and isinstance(labels, MultiIndex): try: return self._get_label(key, axis=axis) except (KeyError, TypeError): if self.obj.index.levels[0].is_integer(): raise # this is the fallback! (for a non-float, non-integer index) if not labels.is_floating() and not labels.is_integer(): return self._get_loc(key, axis=axis) return self._get_label(key, axis=axis) def _getitem_iterable(self, key, axis=0): if self._should_validate_iterable(axis): self._has_valid_type(key, axis) labels = self.obj._get_axis(axis) if is_bool_indexer(key): key = check_bool_indexer(labels, key) inds, = key.nonzero() return self.obj.take(inds, axis=axis, convert=False) else: # Have the index compute an indexer or return None # if it cannot handle; we only act on all found values indexer, keyarr = labels._convert_listlike_indexer( key, kind=self.name) if indexer is not None and (indexer != -1).all(): return self.obj.take(indexer, axis=axis) # existing labels are unique and indexer are unique if labels.is_unique and Index(keyarr).is_unique: try: return self.obj.reindex_axis(keyarr, axis=axis) except AttributeError: # Series if axis != 0: raise AssertionError('axis must be 0') return self.obj.reindex(keyarr) # existing labels are non-unique else: # reindex with the specified axis if axis + 1 > self.obj.ndim: raise AssertionError("invalid indexing error with " "non-unique index") new_target, indexer, new_indexer = labels._reindex_non_unique( keyarr) if new_indexer is not None: result = self.obj.take(indexer[indexer != -1], axis=axis, convert=False) result = result._reindex_with_indexers( {axis: [new_target, new_indexer]}, copy=True, allow_dups=True) else: result = self.obj.take(indexer, axis=axis, convert=False) return result def _convert_to_indexer(self, obj, axis=0, is_setter=False): """ Convert indexing key into something we can use to do actual fancy indexing on an ndarray Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? 'In the face of ambiguity, refuse the temptation to guess.' raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing """ labels = self.obj._get_axis(axis) if isinstance(obj, slice): return self._convert_slice_indexer(obj, axis) # try to find out correct indexer, if not type correct raise try: obj = self._convert_scalar_indexer(obj, axis) except TypeError: # but we will allow setting if is_setter: pass # see if we are positional in nature is_int_index = labels.is_integer() is_int_positional = is_integer(obj) and not is_int_index # if we are a label return me try: return labels.get_loc(obj) except LookupError: if isinstance(obj, tuple) and isinstance(labels, MultiIndex): if is_setter and len(obj) == labels.nlevels: return {'key': obj} raise except TypeError: pass except (ValueError): if not is_int_positional: raise # a positional if is_int_positional: # if we are setting and its not a valid location # its an insert which fails by definition if is_setter: # always valid if self.name == 'loc': return {'key': obj} # a positional if (obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex)): raise ValueError("cannot set by positional indexing with " "enlargement") return obj if is_nested_tuple(obj, labels): return labels.get_locs(obj) elif is_list_like_indexer(obj): if is_bool_indexer(obj): obj = check_bool_indexer(labels, obj) inds, = obj.nonzero() return inds else: # Have the index compute an indexer or return None # if it cannot handle indexer, objarr = labels._convert_listlike_indexer( obj, kind=self.name) if indexer is not None: return indexer # unique index if labels.is_unique: indexer = check = labels.get_indexer(objarr) # non-unique (dups) else: (indexer, missing) = labels.get_indexer_non_unique(objarr) # 'indexer' has dupes, create 'check' using 'missing' check = np.zeros_like(objarr) check[missing] = -1 mask = check == -1 if mask.any(): raise KeyError('%s not in index' % objarr[mask]) return _values_from_object(indexer) else: try: return labels.get_loc(obj) except LookupError: # allow a not found key only if we are a setter if not is_list_like_indexer(obj) and is_setter: return {'key': obj} raise def _tuplify(self, loc): tup = [slice(None, None) for _ in range(self.ndim)] tup[0] = loc return tuple(tup) def _get_slice_axis(self, slice_obj, axis=0): obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) indexer = self._convert_slice_indexer(slice_obj, axis) if isinstance(indexer, slice): return self._slice(indexer, axis=axis, kind='iloc') else: return self.obj.take(indexer, axis=axis, convert=False) class _IXIndexer(_NDFrameIndexer): """A primarily label-location based indexer, with integer position fallback. ``.ix[]`` supports mixed integer and label based access. It is primarily label based, but will fall back to integer positional access unless the corresponding axis is of integer type. ``.ix`` is the most general indexer and will support any of the inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating point label schemes. ``.ix`` is exceptionally useful when dealing with mixed positional and label based hierachical indexes. However, when an axis is integer based, ONLY label based access and not positional access is supported. Thus, in such cases, it's usually better to be explicit and use ``.iloc`` or ``.loc``. See more at :ref:`Advanced Indexing <advanced>`. """ def __init__(self, obj, name): _ix_deprecation_warning = textwrap.dedent(""" .ix is deprecated. Please use .loc for label based indexing or .iloc for positional indexing See the documentation here: http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated""") # noqa warnings.warn(_ix_deprecation_warning, DeprecationWarning, stacklevel=3) super(_IXIndexer, self).__init__(obj, name) def _has_valid_type(self, key, axis): if isinstance(key, slice): return True elif is_bool_indexer(key): return True elif is_list_like_indexer(key): return True else: self._convert_scalar_indexer(key, axis) return True class _LocationIndexer(_NDFrameIndexer): _exception = Exception def __getitem__(self, key): if type(key) is tuple: key = tuple(com._apply_if_callable(x, self.obj) for x in key) try: if self._is_scalar_access(key): return self._getitem_scalar(key) except (KeyError, IndexError): pass return self._getitem_tuple(key) else: key = com._apply_if_callable(key, self.obj) return self._getitem_axis(key, axis=0) def _is_scalar_access(self, key): raise NotImplementedError() def _getitem_scalar(self, key): raise NotImplementedError() def _getitem_axis(self, key, axis=0): raise NotImplementedError() def _getbool_axis(self, key, axis=0): labels = self.obj._get_axis(axis) key = check_bool_indexer(labels, key) inds, = key.nonzero() try: return self.obj.take(inds, axis=axis, convert=False) except Exception as detail: raise self._exception(detail) def _get_slice_axis(self, slice_obj, axis=0): """ this is pretty simple as we just have to deal with labels """ obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step, kind=self.name) if isinstance(indexer, slice): return self._slice(indexer, axis=axis, kind='iloc') else: return self.obj.take(indexer, axis=axis, convert=False) class _LocIndexer(_LocationIndexer): """Purely label-location based indexer for selection by label. ``.loc[]`` is primarily label based, but may also be used with a boolean array. Allowed inputs are: - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index, and **never** as an integer position along the index). - A list or array of labels, e.g. ``['a', 'b', 'c']``. - A slice object with labels, e.g. ``'a':'f'`` (note that contrary to usual python slices, **both** the start and the stop are included!). - A boolean array. - A ``callable`` function with one argument (the calling Series, DataFrame or Panel) and that returns valid output for indexing (one of the above) ``.loc`` will raise a ``KeyError`` when the items are not found. See more at :ref:`Selection by Label <indexing.label>` """ _valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH " "endpoints included! Can be slices of integers if the " "index is integers), listlike of labels, boolean") _exception = KeyError def _has_valid_type(self, key, axis): ax = self.obj._get_axis(axis) # valid for a label where all labels are in the index # slice of lables (where start-end in labels) # slice of integers (only if in the lables) # boolean if isinstance(key, slice): return True elif is_bool_indexer(key): return True elif is_list_like_indexer(key): # mi is just a passthru if isinstance(key, tuple) and isinstance(ax, MultiIndex): return True # TODO: don't check the entire key unless necessary if (not is_iterator(key) and len(key) and np.all(ax.get_indexer_for(key) < 0)): raise KeyError("None of [%s] are in the [%s]" % (key, self.obj._get_axis_name(axis))) return True else: def error(): if isna(key): raise TypeError("cannot use label indexing with a null " "key") raise KeyError("the label [%s] is not in the [%s]" % (key, self.obj._get_axis_name(axis))) try: key = self._convert_scalar_indexer(key, axis) if not ax.contains(key): error() except TypeError as e: # python 3 type errors should be raised if _is_unorderable_exception(e): error() raise except: error() return True def _is_scalar_access(self, key): # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if not hasattr(key, '__len__'): return False if len(key) != self.ndim: return False for i, k in enumerate(key): if not is_scalar(k): return False ax = self.obj.axes[i] if isinstance(ax, MultiIndex): return False if not ax.is_unique: return False return True def _getitem_scalar(self, key): # a fast-path to scalar access # if not, raise values = self.obj.get_value(*key) return values def _get_partial_string_timestamp_match_key(self, key, labels): """Translate any partial string timestamp matches in key, returning the new key (GH 10331)""" if isinstance(labels, MultiIndex): if isinstance(key, compat.string_types) and \ labels.levels[0].is_all_dates: # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) key = tuple([key] + [slice(None)] * (len(labels.levels) - 1)) if isinstance(key, tuple): # Convert (..., '2016-01-01', ...) in tuple to # (..., slice('2016-01-01', '2016-01-01', None), ...) new_key = [] for i, component in enumerate(key): if isinstance(component, compat.string_types) and \ labels.levels[i].is_all_dates: new_key.append(slice(component, component, None)) else: new_key.append(component) key = tuple(new_key) return key def _getitem_axis(self, key, axis=0): labels = self.obj._get_axis(axis) key = self._get_partial_string_timestamp_match_key(key, labels) if isinstance(key, slice): self._has_valid_type(key, axis) return self._get_slice_axis(key, axis=axis) elif is_bool_indexer(key): return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): # convert various list-like indexers # to a list of keys # we will use the *values* of the object # and NOT the index if its a PandasObject if isinstance(labels, MultiIndex): if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1: # Series, or 0,1 ndim ndarray # GH 14730 key = list(key) elif isinstance(key, ABCDataFrame): # GH 15438 raise NotImplementedError("Indexing a MultiIndex with a " "DataFrame key is not " "implemented") elif hasattr(key, 'ndim') and key.ndim > 1: raise NotImplementedError("Indexing a MultiIndex with a " "multidimensional key is not " "implemented") if (not isinstance(key, tuple) and len(key) > 1 and not isinstance(key[0], tuple)): key = tuple([key]) # an iterable multi-selection if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): if hasattr(key, 'ndim') and key.ndim > 1: raise ValueError('Cannot index with multidimensional key') return self._getitem_iterable(key, axis=axis) # nested tuple slicing if is_nested_tuple(key, labels): locs = labels.get_locs(key) indexer = [slice(None)] * self.ndim indexer[axis] = locs return self.obj.iloc[tuple(indexer)] # fall thru to straight lookup self._has_valid_type(key, axis) return self._get_label(key, axis=axis) class _iLocIndexer(_LocationIndexer): """Purely integer-location based indexing for selection by position. ``.iloc[]`` is primarily integer position based (from ``0`` to ``length-1`` of the axis), but may also be used with a boolean array. Allowed inputs are: - An integer, e.g. ``5``. - A list or array of integers, e.g. ``[4, 3, 0]``. - A slice object with ints, e.g. ``1:7``. - A boolean array. - A ``callable`` function with one argument (the calling Series, DataFrame or Panel) and that returns valid output for indexing (one of the above) ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow out-of-bounds indexing (this conforms with python/numpy *slice* semantics). See more at :ref:`Selection by Position <indexing.integer>` """ _valid_types = ("integer, integer slice (START point is INCLUDED, END " "point is EXCLUDED), listlike of integers, boolean array") _exception = IndexError def _has_valid_type(self, key, axis): if is_bool_indexer(key): if hasattr(key, 'index') and isinstance(key.index, Index): if key.index.inferred_type == 'integer': raise NotImplementedError("iLocation based boolean " "indexing on an integer type " "is not available") raise ValueError("iLocation based boolean indexing cannot use " "an indexable as a mask") return True if isinstance(key, slice): return True elif is_integer(key): return self._is_valid_integer(key, axis) elif is_list_like_indexer(key): return self._is_valid_list_like(key, axis) return False def _has_valid_setitem_indexer(self, indexer): self._has_valid_positional_setitem_indexer(indexer) def _is_scalar_access(self, key): # this is a shortcut accessor to both .loc and .iloc # that provide the equivalent access of .at and .iat # a) avoid getting things via sections and (to minimize dtype changes) # b) provide a performant path if not hasattr(key, '__len__'): return False if len(key) != self.ndim: return False for i, k in enumerate(key): if not is_integer(k): return False ax = self.obj.axes[i] if not ax.is_unique: return False return True def _getitem_scalar(self, key): # a fast-path to scalar access # if not, raise values = self.obj.get_value(*key, takeable=True) return values def _is_valid_integer(self, key, axis): # return a boolean if we have a valid integer indexer ax = self.obj._get_axis(axis) l = len(ax) if key >= l or key < -l: raise IndexError("single positional indexer is out-of-bounds") return True def _is_valid_list_like(self, key, axis): # return a boolean if we are a valid list-like (e.g. that we don't # have out-of-bounds values) # a tuple should already have been caught by this point # so don't treat a tuple as a valid indexer if isinstance(key, tuple): raise IndexingError('Too many indexers') # coerce the key to not exceed the maximum size of the index arr = np.array(key) ax = self.obj._get_axis(axis) l = len(ax) if (hasattr(arr, '__len__') and len(arr) and (arr.max() >= l or arr.min() < -l)): raise IndexError("positional indexers are out-of-bounds") return True def _getitem_tuple(self, tup): self._has_valid_tuple(tup) try: return self._getitem_lowerdim(tup) except: pass retval = self.obj axis = 0 for i, key in enumerate(tup): if i >= self.obj.ndim: raise IndexingError('Too many indexers') if is_null_slice(key): axis += 1 continue retval = getattr(retval, self.name)._getitem_axis(key, axis=axis) # if the dim was reduced, then pass a lower-dim the next time if retval.ndim < self.ndim: axis -= 1 # try to get for the next axis axis += 1 return retval def _get_slice_axis(self, slice_obj, axis=0): obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) slice_obj = self._convert_slice_indexer(slice_obj, axis) if isinstance(slice_obj, slice): return self._slice(slice_obj, axis=axis, kind='iloc') else: return self.obj.take(slice_obj, axis=axis, convert=False) def _get_list_axis(self, key, axis=0): """ Return Series values by list or array of integers Parameters ---------- key : list-like positional indexer axis : int (can only be zero) Returns ------- Series object """ try: return self.obj.take(key, axis=axis, convert=False) except IndexError: # re-raise with different error message raise IndexError("positional indexers are out-of-bounds") def _getitem_axis(self, key, axis=0): if isinstance(key, slice): self._has_valid_type(key, axis) return self._get_slice_axis(key, axis=axis) if isinstance(key, list): try: key = np.asarray(key) except TypeError: # pragma: no cover pass if is_bool_indexer(key): self._has_valid_type(key, axis) return self._getbool_axis(key, axis=axis) # a list of integers elif is_list_like_indexer(key): return self._get_list_axis(key, axis=axis) # a single integer else: key = self._convert_scalar_indexer(key, axis) if not is_integer(key): raise TypeError("Cannot index by location index with a " "non-integer key") # validate the location self._is_valid_integer(key, axis) return self._get_loc(key, axis=axis) def _convert_to_indexer(self, obj, axis=0, is_setter=False): """ much simpler as we only have to deal with our valid types """ # make need to convert a float key if isinstance(obj, slice): return self._convert_slice_indexer(obj, axis) elif is_float(obj): return self._convert_scalar_indexer(obj, axis) elif self._has_valid_type(obj, axis): return obj raise ValueError("Can only index by location with a [%s]" % self._valid_types) class _ScalarAccessIndexer(_NDFrameIndexer): """ access scalars quickly """ def _convert_key(self, key, is_setter=False): return list(key) def __getitem__(self, key): if not isinstance(key, tuple): # we could have a convertible item here (e.g. Timestamp) if not is_list_like_indexer(key): key = tuple([key]) else: raise ValueError('Invalid call for scalar access (getting)!') key = self._convert_key(key) return self.obj.get_value(*key, takeable=self._takeable) def __setitem__(self, key, value): if isinstance(key, tuple): key = tuple(com._apply_if_callable(x, self.obj) for x in key) else: # scalar callable may return tuple key = com._apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = self._tuplify(key) if len(key) != self.obj.ndim: raise ValueError('Not enough indexers for scalar access ' '(setting)!') key = list(self._convert_key(key, is_setter=True)) key.append(value) self.obj.set_value(*key, takeable=self._takeable) class _AtIndexer(_ScalarAccessIndexer): """Fast label-based scalar accessor Similarly to ``loc``, ``at`` provides **label** based scalar lookups. You can also set using these indexers. """ _takeable = False def _convert_key(self, key, is_setter=False): """ require they keys to be the same type as the index (so we don't fallback) """ # allow arbitrary setting if is_setter: return list(key) for ax, i in zip(self.obj.axes, key): if ax.is_integer(): if not is_integer(i): raise ValueError("At based indexing on an integer index " "can only have integer indexers") else: if is_integer(i): raise ValueError("At based indexing on an non-integer " "index can only have non-integer " "indexers") return key class _iAtIndexer(_ScalarAccessIndexer): """Fast integer location scalar accessor. Similarly to ``iloc``, ``iat`` provides **integer** based lookups. You can also set using these indexers. """ _takeable = True def _has_valid_setitem_indexer(self, indexer): self._has_valid_positional_setitem_indexer(indexer) def _convert_key(self, key, is_setter=False): """ require integer args (and convert to label arguments) """ for a, i in zip(self.obj.axes, key): if not is_integer(i): raise ValueError("iAt based indexing can only have integer " "indexers") return key # 32-bit floating point machine epsilon _eps = 1.1920929e-07 def length_of_indexer(indexer, target=None): """return the length of a single non-tuple indexer which could be a slice """ if target is not None and isinstance(indexer, slice): l = len(target) start = indexer.start stop = indexer.stop step = indexer.step if start is None: start = 0 elif start < 0: start += l if stop is None or stop > l: stop = l elif stop < 0: stop += l if step is None: step = 1 elif step < 0: step = -step return (stop - start + step - 1) // step elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)): return len(indexer) elif not is_list_like_indexer(indexer): return 1 raise AssertionError("cannot find the length of the indexer") def convert_to_index_sliceable(obj, key): """if we are index sliceable, then return my slicer, otherwise return None """ idx = obj.index if isinstance(key, slice): return idx._convert_slice_indexer(key, kind='getitem') elif isinstance(key, compat.string_types): # we are an actual column if obj._data.items.contains(key): return None # We might have a datetimelike string that we can translate to a # slice here via partial string indexing if idx.is_all_dates: try: return idx._get_string_slice(key) except (KeyError, ValueError, NotImplementedError): return None return None def is_index_slice(obj): def _is_valid_index(x): return (is_integer(x) or is_float(x) and np.allclose(x, int(x), rtol=_eps, atol=0)) def _crit(v): return v is None or _is_valid_index(v) both_none = obj.start is None and obj.stop is None return not both_none and (_crit(obj.start) and _crit(obj.stop)) def check_bool_indexer(ax, key): # boolean indexing, need to check that the data are aligned, otherwise # disallowed # this function assumes that is_bool_indexer(key) == True result = key if isinstance(key, ABCSeries) and not key.index.equals(ax): result = result.reindex(ax) mask = isna(result._values) if mask.any(): raise IndexingError('Unalignable boolean Series provided as ' 'indexer (index of the boolean Series and of ' 'the indexed object do not match') result = result.astype(bool)._values elif is_sparse(result): result = result.to_dense() result = np.asarray(result, dtype=bool) else: # is_bool_indexer has already checked for nulls in the case of an # object array key, so no check needed here result = np.asarray(result, dtype=bool) return result def convert_missing_indexer(indexer): """ reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted """ if isinstance(indexer, dict): # a missing key (but not a tuple indexer) indexer = indexer['key'] if isinstance(indexer, bool): raise KeyError("cannot use a single bool to index into setitem") return indexer, True return indexer, False def convert_from_missing_indexer_tuple(indexer, axes): """ create a filtered indexer that doesn't have any missing indexers """ def get_indexer(_i, _idx): return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else _idx) return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)]) def maybe_convert_indices(indices, n): """ if we have negative indicies, translate to postive here if have indicies that are out-of-bounds, raise an IndexError """ if isinstance(indices, list): indices = np.array(indices) if len(indices) == 0: # If list is empty, np.array will return float and cause indexing # errors. return np.empty(0, dtype=np.int_) mask = indices < 0 if mask.any(): indices[mask] += n mask = (indices >= n) | (indices < 0) if mask.any(): raise IndexError("indices are out-of-bounds") return indices def maybe_convert_ix(*args): """ We likely want to take the cross-product """ ixify = True for arg in args: if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)): ixify = False if ixify: return np.ix_(*args) else: return args def is_nested_tuple(tup, labels): # check for a compatiable nested tuple and multiindexes among the axes if not isinstance(tup, tuple): return False # are we nested tuple of: tuple,list,slice for i, k in enumerate(tup): if isinstance(k, (tuple, list, slice)): return isinstance(labels, MultiIndex) return False def is_list_like_indexer(key): # allow a list_like, but exclude NamedTuples which can be indexers return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple) def is_label_like(key): # select a label or row return not isinstance(key, slice) and not is_list_like_indexer(key) def need_slice(obj): return (obj.start is not None or obj.stop is not None or (obj.step is not None and obj.step != 1)) def maybe_droplevels(index, key): # drop levels original_index = index if isinstance(key, tuple): for _ in key: try: index = index.droplevel(0) except: # we have dropped too much, so back out return original_index else: try: index = index.droplevel(0) except: pass return index def _non_reducing_slice(slice_): """ Ensurse that a slice doesn't reduce to a Series or Scalar. Any user-paseed `subset` should have this called on it to make sure we're always working with DataFrames. """ # default to column slice, like DataFrame # ['A', 'B'] -> IndexSlices[:, ['A', 'B']] kinds = tuple(list(compat.string_types) + [ABCSeries, np.ndarray, Index, list]) if isinstance(slice_, kinds): slice_ = IndexSlice[:, slice_] def pred(part): # true when slice does *not* reduce return isinstance(part, slice) or is_list_like(part) if not is_list_like(slice_): if not isinstance(slice_, slice): # a 1-d slice, like df.loc[1] slice_ = [[slice_]] else: # slice(a, b, c) slice_ = [slice_] # to tuplize later else: slice_ = [part if pred(part) else [part] for part in slice_] return tuple(slice_) def _maybe_numeric_slice(df, slice_, include_bool=False): """ want nice defaults for background_gradient that don't break with non-numeric data. But if slice_ is passed go with that. """ if slice_ is None: dtypes = [np.number] if include_bool: dtypes.append(bool) slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns] return slice_
Ziqi-Li/bknqgis
pandas/pandas/core/indexing.py
Python
gpl-2.0
73,643
# -*- coding: utf-8 -*- """ :created: 9 Jan 2014 :author: kimon :copyright: © 2014 Kimon Tsitsikas, Delmic This file is part of Odemis. .. license:: Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/. """ from __future__ import division import logging from odemis import model import odemis from odemis.acq import stream, leech from odemis.util import test import os import time import unittest # logging.basicConfig(format=" - %(levelname)s \t%(message)s") logging.getLogger().setLevel(logging.DEBUG) # _frm = "%(asctime)s %(levelname)-7s %(module)-15s: %(message)s" # logging.getLogger().handlers[0].setFormatter(logging.Formatter(_frm)) CONFIG_PATH = os.path.dirname(odemis.__file__) + "/../../install/linux/usr/share/odemis/" SECOM_CONFIG = CONFIG_PATH + "sim/secom-sim.odm.yaml" class TestDriftStream(unittest.TestCase): backend_was_running = False @classmethod def setUpClass(cls): try: test.start_backend(SECOM_CONFIG) except LookupError: logging.info("A running backend is already found, skipping tests") cls.backend_was_running = True return except IOError as exp: logging.error(str(exp)) raise # find components by their role cls.ebeam = model.getComponent(role="e-beam") cls.sed = model.getComponent(role="se-detector") cls.ccd = model.getComponent(role="ccd") cls.light = model.getComponent(role="light") cls.light_filter = model.getComponent(role="filter") @classmethod def tearDownClass(cls): if cls.backend_was_running: return test.stop_backend() def setUp(self): if self.backend_was_running: self.skipTest("Running backend found") # @unittest.skip("skip") def test_drift_stream(self): escan = self.ebeam detector = self.sed ccd = self.ccd # Create the stream sems = stream.SEMStream("test sem", detector, detector.data, escan) ars = stream.ARSettingsStream("test ar", ccd, ccd.data, escan) sas = stream.SEMARMDStream("test sem-ar", [sems, ars]) # Long acquisition ccd.exposureTime.value = 1e-02 # s dc = leech.AnchorDriftCorrector(escan, detector) dc.period.value = 5 dc.roi.value = (0.525, 0.525, 0.6, 0.6) dc.dwellTime.value = 1e-04 sems.leeches.append(dc) escan.dwellTime.value = 1e-02 ars.roi.value = (0.4, 0.4, 0.6, 0.6) ars.repetition.value = (5, 5) start = time.time() for l in sas.leeches: l.series_start() f = sas.acquire() x = f.result() for l in sas.leeches: l.series_complete(x) dur = time.time() - start logging.debug("Acquisition took %g s", dur) self.assertTrue(f.done()) def on_done(self, future): self.done += 1 def on_progress_update(self, future, past, left): self.past = past self.left = left self.updates += 1 if __name__ == "__main__": unittest.main()
delmic/odemis
src/odemis/acq/test/stream_drift_test.py
Python
gpl-2.0
3,620
# Copyright (c) 2006 by Aurelien Foret <orelien@chez.com> # Copyright (c) 2006-2018 Pacman Development Team <pacman-dev@archlinux.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import pmtest import tap class pmenv(object): """Environment object """ testcases = [] passed = 0 failed = 0 expectedfail = 0 unexpectedpass = 0 def __init__(self, root = "root"): self.root = os.path.abspath(root) self.pacman = { "bin": None, "bindir": ["/usr/bin/"], "debug": 0, "gdb": 0, "valgrind": 0, "nolog": 0 } def __str__(self): return "root = %s\n" \ "pacman = %s" \ % (self.root, self.pacman) def addtest(self, testcase): """ """ if not os.path.isfile(testcase): raise IOError("test file %s not found" % testcase) self.testcases.append(testcase) def run(self): """ """ tap.plan(len(self.testcases)) for testcase in self.testcases: t = pmtest.pmtest(testcase, self.root) tap.diag("Running '%s'" % t.testname) t.load() t.generate(self.pacman) t.run(self.pacman) tap.diag("==> Checking rules") tap.todo = t.expectfailure tap.subtest(lambda: t.check(), t.description)
kylon/pacman-fakeroot
test/pacman/pmenv.py
Python
gpl-2.0
2,029
from conf.defaults import mysql class settings(): apikey = "INVALID-API-KEY" channel = "#BrokenBots" callSign = "unbot" name = "UnNamedBot" manOpList = ["Oper1","Oper2","Oper3"] commandPrefix = "!" #Leave as "" to disallow filterBotMessages = True # textPrefix = "\x0306,99" # textPostfix = "\x03\z02" version = 1
MrMindImplosion/Slack-Bots
conf/defaults/__init__.py
Python
gpl-2.0
378
# coding: utf8 # Copyright (c) 2014 Adafruit Industries # Author: Tony DiCola # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import division import logging import time # BMP085 default address. BMP085_I2CADDR = 0x77 # Operating Modes BMP085_ULTRALOWPOWER = 0 BMP085_STANDARD = 1 BMP085_HIGHRES = 2 BMP085_ULTRAHIGHRES = 3 # BMP085 Registers BMP085_CAL_AC1 = 0xAA # R Calibration data (16 bits) BMP085_CAL_AC2 = 0xAC # R Calibration data (16 bits) BMP085_CAL_AC3 = 0xAE # R Calibration data (16 bits) BMP085_CAL_AC4 = 0xB0 # R Calibration data (16 bits) BMP085_CAL_AC5 = 0xB2 # R Calibration data (16 bits) BMP085_CAL_AC6 = 0xB4 # R Calibration data (16 bits) BMP085_CAL_B1 = 0xB6 # R Calibration data (16 bits) BMP085_CAL_B2 = 0xB8 # R Calibration data (16 bits) BMP085_CAL_MB = 0xBA # R Calibration data (16 bits) BMP085_CAL_MC = 0xBC # R Calibration data (16 bits) BMP085_CAL_MD = 0xBE # R Calibration data (16 bits) BMP085_CONTROL = 0xF4 BMP085_TEMPDATA = 0xF6 BMP085_PRESSUREDATA = 0xF6 # Commands BMP085_READTEMPCMD = 0x2E BMP085_READPRESSURECMD = 0x34 class BMP085(object): def __init__(self, mode=BMP085_STANDARD, address=BMP085_I2CADDR, i2c=None, **kwargs): self._logger = logging.getLogger('Adafruit_BMP.BMP085') # Check that mode is valid. if mode not in [BMP085_ULTRALOWPOWER, BMP085_STANDARD, BMP085_HIGHRES, BMP085_ULTRAHIGHRES]: raise ValueError('Unexpected mode value {0}. Set mode to one of BMP085_ULTRALOWPOWER, BMP085_STANDARD, BMP085_HIGHRES, or BMP085_ULTRAHIGHRES'.format(mode)) self._mode = mode # Create I2C device. if i2c is None: import Adafruit_GPIO.I2C as I2C i2c = I2C self._device = i2c.get_i2c_device(address, **kwargs) # Load calibration values. self._load_calibration() def _load_calibration(self): self.cal_AC1 = self._device.readS16BE(BMP085_CAL_AC1) # INT16 self.cal_AC2 = self._device.readS16BE(BMP085_CAL_AC2) # INT16 self.cal_AC3 = self._device.readS16BE(BMP085_CAL_AC3) # INT16 self.cal_AC4 = self._device.readU16BE(BMP085_CAL_AC4) # UINT16 self.cal_AC5 = self._device.readU16BE(BMP085_CAL_AC5) # UINT16 self.cal_AC6 = self._device.readU16BE(BMP085_CAL_AC6) # UINT16 self.cal_B1 = self._device.readS16BE(BMP085_CAL_B1) # INT16 self.cal_B2 = self._device.readS16BE(BMP085_CAL_B2) # INT16 self.cal_MB = self._device.readS16BE(BMP085_CAL_MB) # INT16 self.cal_MC = self._device.readS16BE(BMP085_CAL_MC) # INT16 self.cal_MD = self._device.readS16BE(BMP085_CAL_MD) # INT16 self._logger.debug('AC1 = {0:6d}'.format(self.cal_AC1)) self._logger.debug('AC2 = {0:6d}'.format(self.cal_AC2)) self._logger.debug('AC3 = {0:6d}'.format(self.cal_AC3)) self._logger.debug('AC4 = {0:6d}'.format(self.cal_AC4)) self._logger.debug('AC5 = {0:6d}'.format(self.cal_AC5)) self._logger.debug('AC6 = {0:6d}'.format(self.cal_AC6)) self._logger.debug('B1 = {0:6d}'.format(self.cal_B1)) self._logger.debug('B2 = {0:6d}'.format(self.cal_B2)) self._logger.debug('MB = {0:6d}'.format(self.cal_MB)) self._logger.debug('MC = {0:6d}'.format(self.cal_MC)) self._logger.debug('MD = {0:6d}'.format(self.cal_MD)) def _load_datasheet_calibration(self): # Set calibration from values in the datasheet example. Useful for debugging the # temp and pressure calculation accuracy. self.cal_AC1 = 408 self.cal_AC2 = -72 self.cal_AC3 = -14383 self.cal_AC4 = 32741 self.cal_AC5 = 32757 self.cal_AC6 = 23153 self.cal_B1 = 6190 self.cal_B2 = 4 self.cal_MB = -32767 self.cal_MC = -8711 self.cal_MD = 2868 def read_raw_temp(self): """Reads the raw (uncompensated) temperature from the sensor.""" self._device.write8(BMP085_CONTROL, BMP085_READTEMPCMD) time.sleep(0.005) # Wait 5ms raw = self._device.readU16BE(BMP085_TEMPDATA) self._logger.debug('Raw temp 0x{0:X} ({1})'.format(raw & 0xFFFF, raw)) return raw def read_raw_pressure(self): """Reads the raw (uncompensated) pressure level from the sensor.""" self._device.write8(BMP085_CONTROL, BMP085_READPRESSURECMD + (self._mode << 6)) if self._mode == BMP085_ULTRALOWPOWER: time.sleep(0.005) elif self._mode == BMP085_HIGHRES: time.sleep(0.014) elif self._mode == BMP085_ULTRAHIGHRES: time.sleep(0.026) else: time.sleep(0.008) msb = self._device.readU8(BMP085_PRESSUREDATA) lsb = self._device.readU8(BMP085_PRESSUREDATA+1) xlsb = self._device.readU8(BMP085_PRESSUREDATA+2) raw = ((msb << 16) + (lsb << 8) + xlsb) >> (8 - self._mode) self._logger.debug('Raw pressure 0x{0:04X} ({1})'.format(raw & 0xFFFF, raw)) return raw def read_temperature(self): """Gets the compensated temperature in degrees celsius.""" UT = self.read_raw_temp() # Datasheet value for debugging: #UT = 27898 # Calculations below are taken straight from section 3.5 of the datasheet. X1 = ((UT - self.cal_AC6) * self.cal_AC5) >> 15 X2 = (self.cal_MC << 11) // (X1 + self.cal_MD) B5 = X1 + X2 temp = ((B5 + 8) >> 4) / 10.0 self._logger.debug('Calibrated temperature {0} C'.format(temp)) return temp def read_pressure(self): """Gets the compensated pressure in Pascals.""" UT = self.read_raw_temp() UP = self.read_raw_pressure() # Datasheet values for debugging: #UT = 27898 #UP = 23843 # Calculations below are taken straight from section 3.5 of the datasheet. # Calculate true temperature coefficient B5. X1 = ((UT - self.cal_AC6) * self.cal_AC5) >> 15 X2 = (self.cal_MC << 11) // (X1 + self.cal_MD) B5 = X1 + X2 self._logger.debug('B5 = {0}'.format(B5)) # Pressure Calculations B6 = B5 - 4000 self._logger.debug('B6 = {0}'.format(B6)) X1 = (self.cal_B2 * (B6 * B6) >> 12) >> 11 X2 = (self.cal_AC2 * B6) >> 11 X3 = X1 + X2 B3 = (((self.cal_AC1 * 4 + X3) << self._mode) + 2) // 4 self._logger.debug('B3 = {0}'.format(B3)) X1 = (self.cal_AC3 * B6) >> 13 X2 = (self.cal_B1 * ((B6 * B6) >> 12)) >> 16 X3 = ((X1 + X2) + 2) >> 2 B4 = (self.cal_AC4 * (X3 + 32768)) >> 15 self._logger.debug('B4 = {0}'.format(B4)) B7 = (UP - B3) * (50000 >> self._mode) self._logger.debug('B7 = {0}'.format(B7)) if B7 < 0x80000000: p = (B7 * 2) // B4 else: p = (B7 // B4) * 2 X1 = (p >> 8) * (p >> 8) X1 = (X1 * 3038) >> 16 X2 = (-7357 * p) >> 16 p = p + ((X1 + X2 + 3791) >> 4) self._logger.debug('Pressure {0} Pa'.format(p)) return p def read_altitude(self, sealevel_pa=101325.0): """Calculates the altitude in meters.""" # Calculation taken straight from section 3.6 of the datasheet. pressure = float(self.read_pressure()) altitude = 44330.0 * (1.0 - pow(pressure / sealevel_pa, (1.0/5.255))) self._logger.debug('Altitude {0} m'.format(altitude)) return altitude def read_sealevel_pressure(self, altitude_m=0.0): """Calculates the pressure at sealevel when given a known altitude in meters. Returns a value in Pascals.""" pressure = float(self.read_pressure()) p0 = pressure / pow(1.0 - altitude_m/44330.0, 5.255) self._logger.debug('Sealevel pressure {0} Pa'.format(p0)) return p0
makerplane/FIX-Gateway
fixgw/plugins/rpi_bmp085/Adafruit_BMP/BMP085.py
Python
gpl-2.0
9,066
import random import string import md5 import re import time import json import urllib import urlparse _domain="cartoonhd.website" domain="http://%s"%_domain search="evokjaqbb8" slk="0A6ru35yyi5yn4THYpJqy0X82tE95btV" encoding="utf-8" def iframe(src): ret=None for sep in ['"',"'"]: res=re.findall("<iframe*.?src="+sep+"(.*?)"+sep,src,re.IGNORECASE) if len(res): ret=res[0] break return ret def caesar(plaintext, shift): lower = string.ascii_lowercase lower_trans = lower[shift:] + lower[:shift] alphabet = lower + lower.upper() shifted = lower_trans + lower_trans.upper() return unicode(str(plaintext).translate(string.maketrans(alphabet, shifted)).encode(encoding)) def nopad(text): for i in range(4): if text[-1]=="=":text=text[:-1] return text def run(ump): globals()['ump'] = ump i=ump.info is_serie=ump.info["mediatype"] == ump.defs.MT_EPISODE if not (ump.subscribe("movie") or ump.subscribe("tvshow")): return token=re.findall("var\s*tok\s*=\s*'(.*?)'",ump.get_page(domain,encoding))[0] found=False names=ump.getnames(3) for name in names: if found:break ump.add_log("cartoonhd is searching %s"%names[0]) set="".join([random.choice(string.ascii_letters) for k in range(25)]) d={ "q":name, "limit":100, "timestamp":int(time.time() * 1000), "verifiedCheck":token, "set":set, "rt":caesar(token+set,13), "sl":md5.new(slk.encode("base-64")[:-1]+search).hexdigest() } for result in json.loads(ump.get_page(domain+"/api/v2/cautare/"+search,encoding,data=d).encode("ascii","replace")): meta=result["meta"].lower() if ump.is_same(name,result["title"]): if (is_serie and "show" in meta) or (not is_serie and "movie" in meta and ump.is_same(str(i["year"]),str(result["year"]))): found=True break if found: ump.add_log("cartoonhd has matched %s"%names[0]) else: ump.add_log("cartoonhd can't match %s"%names[0]) return if is_serie: sourcepage=domain+result["permalink"]+"/season/%01d/episode/%01d"%(int(i["season"]),int(i["episode"])) else: sourcepage=domain+result["permalink"] page=ump.get_page(sourcepage,encoding) header={'X-Requested-With':'XMLHttpRequest'} data={} header["Authorization"]="Bearer false" for cookie in ump.cj: if _domain in cookie.domain and cookie.name == "__utmx": header["Authorization"]="Bearer %s"%cookie.val if is_serie: data["action"]="getEpisodeEmb" mname="%s %dx%d %s" % (i["tvshowtitle"],i["season"],i["episode"],i["title"]) else: data["action"]="getMovieEmb" mname=names[0] data["elid"]=urllib.quote(str(int(time.time())).encode("base-64")[:-1]) data["token"]=re.findall("var\s*tok\s*\=\s*'(.*?)'", page)[0] data["idEl"]= re.findall('elid\s*=\s*"(.*?)"', page)[0] for name,source in json.loads(ump.get_page(domain+"/ajax/nembeds.php",encoding,data=data,header=header)).iteritems(): sname=source["type"].lower() link=iframe(source["embed"]) if not link:continue upname=None if "google" in sname: upname="google" hash={"video":link} elif "openload" in sname: upname="openload" paths=link.split("/") hash=None for path in range(len(paths)): if paths[path]=="embed": hash=paths[path+1] break if not hash : continue elif "allmyvideos" in sname: upname="allmyvideos" path=urlparse.urlparse(link).path path=path.replace(".html","") path=path.replace(".htm","") hash=path.split("-")[1] elif "vidspot" in sname: upname="vidspot" path=urlparse.urlparse(link).path path=path.replace(".html","") path=path.replace(".htm","") hash=path.split("-")[1] if upname: parts=[{"url_provider_name":upname,"url_provider_hash":hash,"testid":name}] ump.add_mirror(parts,mname)
boogiekodi/plugin.program.ump
lib/providers/video_link_cartoonhd.py
Python
gpl-2.0
3,714
# =========================================================================== # eXe # Copyright 2004-2006, University of Auckland # Copyright 2004-2008 eXe Project, http://eXeLearning.org # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # =========================================================================== """ TrueFalseElement is responsible for a block of question. Used by TrueFalseBlock. """ import logging from exe.webui import common from exe.webui.element import TextAreaElement log = logging.getLogger(__name__) # =========================================================================== class TrueFalseElement(object): """ TrueFalseElement is responsible for a block of question. Used by TrueFalseBlock. """ def __init__(self, index, idevice, question): """ Initialize """ self.index = index self.id = unicode(index) + "b" + idevice.id self.idevice = idevice self.question = question # also split out each part for a separate TextAreaElement: # but first... # to compensate for the strange unpickling timing when objects are # loaded from an elp, ensure that proper idevices are set: if question.questionTextArea.idevice is None: question.questionTextArea.idevice = idevice if question.feedbackTextArea.idevice is None: question.feedbackTextArea.idevice = idevice if question.hintTextArea.idevice is None: question.hintTextArea.idevice = idevice # self.question_question = TextAreaElement(question.questionTextArea) self.question_feedback = TextAreaElement(question.feedbackTextArea) self.question_hint = TextAreaElement(question.hintTextArea) # note, question.isCorrect is left as it was, and not split out. # because there are low-level mechanisms in place somewhere # with the radio buttons or ??? expecting that as such. self.questionId = "question"+ unicode(index) + "b" + idevice.id self.question_question.id = self.questionId self.feedbackId = "feedback" + unicode(index) + "b" + idevice.id self.question_feedback.id = self.feedbackId self.hintId = "hint" + unicode(index) + "b" + idevice.id self.question_hint.id = self.hintId self.keyId = "Key" + unicode(index) + "b" + idevice.id def process(self, request): """ Process arguments from the web server. Return any which apply to this element. """ log.debug("process " + repr(request.args)) is_cancel = common.requestHasCancel(request) if self.questionId in request.args \ and not is_cancel: self.question_question.process(request) if self.hintId in request.args \ and not is_cancel: self.question_hint.process(request) if self.keyId in request.args \ and not is_cancel: if request.args[self.keyId][0] == "true": self.question.isCorrect = True log.debug("question " + repr(self.question.isCorrect)) else: self.question.isCorrect = False if self.feedbackId in request.args \ and not is_cancel: self.question_feedback.process(request) if "action" in request.args and request.args["action"][0] == self.id: # before deleting the question object, remove any internal anchors: for q_field in self.question.getRichTextFields(): q_field.ReplaceAllInternalAnchorsLinks() q_field.RemoveAllInternalLinks() self.idevice.questions.remove(self.question) # disable Undo once a question has been deleted: self.idevice.undo = False def renderEdit(self): """ Returns an XHTML string for editing this option element """ html = self.question_question.renderEdit() html += _("True") + " " html += common.option(self.keyId, self.question.isCorrect, "true") html += _("False") + " " html += common.option(self.keyId, not self.question.isCorrect, "false") html += "<br/><br/>\n" html += common.elementInstruc(self.idevice.keyInstruc) html += self.question_feedback.renderEdit() html += self.question_hint.renderEdit() html += common.submitImage(self.id, self.idevice.id, "/images/stock-cancel.png", _("Delete question")) html += "<br/><br/>\n" return html def renderQuestionView(self): """ Returns an XHTML string for viewing this question element """ is_preview = 0 html = self.renderQuestion(is_preview) if self.question.hintTextArea.content.strip() != "": html += u'<span ' html += u'style="background-image:url(\'panel-amusements.png\');">' html += u'\n<a onmousedown="Javascript:updateCoords(event);' html += u'showMe(\'%s\', 350, 100);" ' % self.hintId html += u'style="cursor:help;align:center;vertical-align:middle;" ' html += u'title="%s" \n' % _(u"Hint") html += u'href="javascript:void(0);">&nbsp;&nbsp;&nbsp;&nbsp;</a>' html += u'</span>' html += u'<div id="'+self.hintId+'" ' html += u'style="display:none; z-index:99;">' html += u'<div style="float:right;" >' html += u'<img alt="%s" ' % _('Close') html += u'src="stock-stop.png" title="%s"' % _('Close') html += u" onmousedown=\"Javascript:hideMe();\"/></div>" html += u'<div class="popupDivLabel">' html += _(u"Hint") html += u'</div>\n' html += self.question_hint.renderView() html += u"</div>\n" return html def renderQuestionPreview(self): #TODO merge renderQuestionView and renderQuestionPreview """ Returns an XHTML string for previewing this question element """ is_preview = 1 html = self.renderQuestion(is_preview) html += common.elementInstruc(self.question_hint.field.content, "panel-amusements.png", "Hint") return html def renderQuestion(self, is_preview): """ Returns an XHTML string for viewing and previewing this question element """ log.debug("renderPreview called in the form of renderQuestion") # JR # html = u"<br/><br/>" # # if is_preview: # html += self.question_question.renderPreview() + "<br/>" # else: # html += self.question_question.renderView() + "<br/>" if is_preview: html = self.question_question.renderPreview() else: html = self.question_question.renderView() html += _("True") + " " html += self.__option(0, 2, "true") + " \n" html += _("False") + " " html += self.__option(1, 2, "false") + "\n" return html def __option(self, index, length, true): """Add a option input""" html = u'<input type="radio" name="option%s" ' % self.id html += u'id="%s%s" ' % (true, self.id) html += u'onclick="getFeedback(%d,%d,\'%s\',\'truefalse\')"/>' % ( index, length, self.id) return html def renderFeedbackPreview(self): """ Merely a front-end to renderFeedbackView(), setting preview mode. Note: this won't really matter all that much, since these won't yet show up in exported printouts, BUT the image paths will be correct. """ return self.renderFeedbackView(is_preview=True) def renderFeedbackView(self, is_preview=False): """ return xhtml string for display this option's feedback """ feedbackStr1 = _(u"Correct!") + " " feedbackStr2 = _(u"Incorrect!") + " " # embed a score_representation as well, even==true, # so that Correct/Incorrect doesn't need to be un-translated # upon bursting from a CC export. # start off with a sorta random looking number: to_even1 = int(self.idevice.id)+5 if to_even1 % 2: # ensure that to_even1 is indeed even, correct: to_even1 += 1 # and ensure that to_even2 is odd, incorrect: to_even2 = to_even1 + 1 if not self.question.isCorrect: feedbackStr1, feedbackStr2 = feedbackStr2, feedbackStr1 to_even1, to_even2 = to_even2, to_even1 feedbackId1 = "0" + "b" + self.id feedbackId2 = "1" + "b" + self.id html = u'<div id="s%s" class="feedback"' % feedbackId1 # JR if is_preview: aux = self.question_feedback.field.content_w_resourcePaths else: aux = self.question_feedback.field.content_wo_resourcePaths # html += u'display: none;" even_steven="%s">' % (str(to_even1)) html += u'style="display: none;"><strong>' html += feedbackStr1 + '</strong> ' + aux + '</div>\n' html += u'<div id="s%s" class="feedback"' % feedbackId2 # html += u'display: none;" even_steven="%s">' % (str(to_even2)) html += u'style="display: none;"><strong>' html += feedbackStr2 + '</strong> ' + aux + '</div>\n' # html += u'<div id="sfbk%s" class="feedback"' % self.id # html += u'style="display: none;">' # if is_preview: # html += self.question_feedback.renderPreview() # else: # html += self.question_feedback.renderView() # html += u'</div>\n' return html # JR: Anadimos la etiqueta noscript def renderNoscript(self, is_preview=False): html = u'<noscript><div class="feedback">' if is_preview: aux = self.question_feedback.field.content_w_resourcePaths else: aux = self.question_feedback.field.content_wo_resourcePaths if self.question.isCorrect: isCorrect = _("True") else: isCorrect = _("False") html += '<strong>' + isCorrect + '</strong> ' + aux + '</div></noscript>\n' return html # ===========================================================================
luisgg/iteexe
exe/webui/truefalseelement.py
Python
gpl-2.0
11,157
# -*- coding: utf-8 -*- """ *************************************************************************** OTBUtils.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya (C) 2013 by CS Systemes d'information (CS SI) Email : volayaf at gmail dot com otb at c-s dot fr (CS SI) Contributors : Victor Olaya Julien Malik, Oscar Picas (CS SI) - add functions to manage xml tree Alexia Mondot (CS SI) - add a trick for OTBApplication SplitImages *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os import re from PyQt4.QtCore import QCoreApplication from qgis.core import QgsApplication import subprocess from processing.core.ProcessingConfig import ProcessingConfig from processing.core.ProcessingLog import ProcessingLog from processing.tools.system import isMac, isWindows import logging import xml.etree.ElementTree as ET import traceback from processing.gui.SilentProgress import SilentProgress OTB_FOLDER = "OTB_FOLDER" OTB_LIB_FOLDER = "OTB_LIB_FOLDER" OTB_SRTM_FOLDER = "OTB_SRTM_FOLDER" OTB_GEOID_FILE = "OTB_GEOID_FILE" def findOtbPath(): folder = None #try to configure the path automatically if isMac(): testfolder = os.path.join(unicode(QgsApplication.prefixPath()), "bin") if os.path.exists(os.path.join(testfolder, "otbcli")): folder = testfolder else: testfolder = "/usr/local/bin" if os.path.exists(os.path.join(testfolder, "otbcli")): folder = testfolder elif isWindows(): testfolder = os.path.join(os.path.dirname(QgsApplication.prefixPath()), os.pardir, "bin") if os.path.exists(os.path.join(testfolder, "otbcli.bat")): folder = testfolder else: testfolder = "/usr/bin" if os.path.exists(os.path.join(testfolder, "otbcli")): folder = testfolder return folder def otbPath(): folder = findOtbPath() if folder is None: folder = ProcessingConfig.getSetting(OTB_FOLDER) return folder def findOtbLibPath(): folder = None #try to configure the path automatically if isMac(): testfolder = os.path.join(unicode(QgsApplication.prefixPath()), "lib/otb/applications") if os.path.exists(testfolder): folder = testfolder else: testfolder = "/usr/local/lib/otb/applications" if os.path.exists(testfolder): folder = testfolder elif isWindows(): testfolder = os.path.join(os.path.dirname(QgsApplication.prefixPath()), "orfeotoolbox", "applications") if os.path.exists(testfolder): folder = testfolder else: testfolder = "/usr/lib/otb/applications" if os.path.exists(testfolder): folder = testfolder return folder def otbLibPath(): folder = findOtbLibPath() if folder is None: folder = ProcessingConfig.getSetting(OTB_LIB_FOLDER) return folder def otbSRTMPath(): folder = ProcessingConfig.getSetting(OTB_SRTM_FOLDER) if folder is None: folder = "" return folder def otbGeoidPath(): filepath = ProcessingConfig.getSetting(OTB_GEOID_FILE) if filepath is None: filepath = "" return filepath def otbDescriptionPath(): return os.path.join(os.path.dirname(__file__), "description") _installedVersion = None _installedVersionFound = False def getInstalledVersion(runOtb=False): global _installedVersion global _installedVersionFound if _installedVersionFound and not runOtb: return _installedVersion if otbPath() is None: _installedVersionFound = False return None commands = [os.path.join(otbPath(), "otbcli_Smoothing")] progress = SilentProgress() out = executeOtb(commands, progress, False) for line in out: if "version" in line: _installedVersionFound = True _installedVersion = line.split("version")[-1].strip() break return _installedVersion def compatibleDescriptionPath(version): supportedVersions = {"5.0.0": "5.0.0"} if version is None: return None if version not in supportedVersions: lastVersion = sorted(supportedVersions.keys())[-1] if version > lastVersion: version = lastVersion else: return None return os.path.join(otbDescriptionPath(), supportedVersions[version]) def executeOtb(commands, progress, addToLog=True): loglines = [] loglines.append(tr("OTB execution console output")) os.putenv('ITK_AUTOLOAD_PATH', otbLibPath()) fused_command = ''.join(['"%s" ' % re.sub(r'^"|"$', '', c) for c in commands]) proc = subprocess.Popen(fused_command, shell=True, stdout=subprocess.PIPE, stdin=open(os.devnull), stderr=subprocess.STDOUT, universal_newlines=True).stdout for line in iter(proc.readline, ""): if "[*" in line: idx = line.find("[*") perc = int(line[idx - 4:idx - 2].strip(" ")) if perc != 0: progress.setPercentage(perc) else: loglines.append(line) progress.setConsoleInfo(line) if addToLog: ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines) return loglines def tr(string, context=''): if context == '': context = 'OTBUtils' return QCoreApplication.translate(context, string) def get_choices_of(doc, parameter): choices = [] try: t5 = [item for item in doc.findall('.//parameter') if item.find('key').text == parameter] choices = [item.text for item in t5[0].findall('options/choices/choice')] except: logger = logging.getLogger('OTBGenerator') logger.warning(traceback.format_exc()) return choices def remove_dependent_choices(doc, parameter, choice): choices = get_choices_of(doc, parameter) choices.remove(choice) for a_choice in choices: t4 = [item for item in doc.findall('.//parameter') if '.%s' % a_choice in item.find('key').text] for t5 in t4: doc.remove(t5) def renameValueField(doc, textitem, field, newValue): t4 = [item for item in doc.findall('.//parameter') if item.find('key').text == textitem] for t5 in t4: t5.find(field).text = newValue def remove_independent_choices(doc, parameter, choice): choices = [] choices.append(choice) for a_choice in choices: t4 = [item for item in doc.findall('.//parameter') if '.%s' % a_choice in item.find('key').text] for t5 in t4: doc.remove(t5) def remove_parameter_by_key(doc, parameter): t4 = [item for item in doc.findall('.//parameter') if item.find('key').text == parameter] for t5 in t4: doc.remove(t5) def remove_other_choices(doc, parameter, choice): t5 = [item for item in doc.findall('.//parameter') if item.find('key').text == parameter] if len(t5) > 0: choices = [item for item in t5[0].findall('options/choices/choice') if item.text != choice] choice_root = t5[0].findall('options/choices')[0] for a_choice in choices: choice_root.remove(a_choice) def remove_choice(doc, parameter, choice): t5 = [item for item in doc.findall('.//parameter') if item.find('key').text == parameter] if len(t5) > 0: choices = [item for item in t5[0].findall('options/choices/choice') if item.text == choice] choice_root = t5[0].findall('options/choices')[0] for a_choice in choices: choice_root.remove(a_choice) def split_by_choice(doc, parameter): """ splits the given doc into several docs according to the given parameter returns a dictionary of documents """ result = {} choices = get_choices_of(doc, parameter) import copy for choice in choices: #creates a new copy of the document working_copy = copy.deepcopy(doc) remove_dependent_choices(working_copy, parameter, choice) #remove all other choices except the current one remove_other_choices(working_copy, parameter, choice) #set a new name according to the choice old_app_name = working_copy.find('key').text working_copy.find('key').text = '%s-%s' % (old_app_name, choice) working_copy.find('longname').text = '%s (%s)' % (old_app_name, choice) #add it to the dictionary result[choice] = working_copy return result def remove_parameter_by_criteria(doc, criteria): t4 = [item for item in doc.findall('./parameter') if criteria(item)] for t5 in t4: doc.getroot().remove(t5) def defaultWrite(available_app, original_dom_document): fh = open("description/%s.xml" % available_app, "w") the_root = original_dom_document ET.ElementTree(the_root).write(fh) fh.close() def defaultSplit(available_app, original_dom_document, parameter): the_root = original_dom_document split = split_by_choice(the_root, parameter) the_list = [] for key in split: defaultWrite('%s-%s' % (available_app, key), split[key]) the_list.append(split[key]) return the_list
sebastic/QGIS
python/plugins/processing/algs/otb/OTBUtils.py
Python
gpl-2.0
10,080
#!/usr/bin/env python import sys, os, getpass, datetime # PyGithub >= 1.13 is required https://pypi.python.org/pypi/PyGithub from github import Github from github_issues import GithubIssuesBase, GithubIssues # You could use OAuth here too for unattended access # see http://developer.github.com/v3/oauth/#create-a-new-authorization print "Enter github username:" username = sys.stdin.readline().strip() print password = getpass.getpass('Enter github password: ') gh = Github(login_or_token=username, password=password, user_agent='PyGithub/Python') # needed to fetch fresh rate_limiting data repo = gh.get_repo('autotest/virt-test') # Requests for logged in users are limited to 5000 per hour # or about 1 request every 0.7 seconds start = gh.rate_limiting # Open up cache and repository issues = GithubIssues(gh, 'autotest/virt-test') print "Issue #125: ", # Any issue can be referenced by number print issues[125] end = gh.rate_limiting print "Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) # Pull requests are treated as issues issues = GithubIssues(gh, 'autotest/virt-test') start = end print "Pull #526: ", print issues[526] end = gh.rate_limiting print "Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) # Listing issues requires finding the last issue # this takes a while when the cache is empty issues = GithubIssues(gh, 'autotest/virt-test') start = end print "Total number of issues (this could take a while):" # This len() is used to force the slower binary-search print GithubIssuesBase.__len__(issues) end = gh.rate_limiting print "Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) # Searches are supported and return lists of issue-numbers issues = GithubIssues(gh, 'autotest/virt-test') start = end print "Open issues last few days without any label (could take 2-10 minutes):" two_days = datetime.timedelta(days=2) last_week = datetime.datetime.now() - two_days # Search criteria is put into a dictionary # state - str - 'open', 'closed' # assignee - list of str (login), "none" or "*" # mentioned - str (login) # labels - list of str (label name) # sort - str - 'created', 'updated', 'comments' # direction - str - 'asc', 'desc' # since - datetime.datetime criteria = {'state':'open', 'since':last_week} # Search results are cached for 10-minutes, otherwise searches can be slow for number in issues.search(criteria): issue = issues[number] # some items must be searched/compared manually if len(issue['labels']) < 1: print ('https://github.com/autotest/virt-test/issues/%s\t"%s"' % (issue['number'], issue['summary'])) print print "Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) # Now that cache is populated, this will be very fast issues = GithubIssues(gh, 'autotest/virt-test') start = end print "Total number of issues (this should be a lot faster):" # This length uses a cached issue count plus a 'since' criteria search print len(issues) end = gh.rate_limiting print "Final Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) del issues
rbbratta/virt-test
tools/github/example.py
Python
gpl-2.0
3,402
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*- # # Copyright (C) 2006 - Ed Catmur <ed@catmur.co.uk> # Copyright (C) 2009 - Jonathan Matthew <jonathan@d14n.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # The Rhythmbox authors hereby grant permission for non-GPL compatible # GStreamer plugins to be used and distributed together with GStreamer # and Rhythmbox. This permission is above and beyond the permissions granted # by the GPL license by which Rhythmbox is covered. If you modify this code # you may extend this exception to your version of the code, but you are not # obligated to do so. If you do not wish to do so, delete this exception # statement from your version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. import os import rhythmdb import rb import gobject import gio IMAGE_NAMES = ["cover", "album", "albumart", ".folder", "folder"] ITEMS_PER_NOTIFICATION = 10 ART_SAVE_NAME = 'Cover.jpg' ART_SAVE_FORMAT = 'jpeg' ART_SAVE_SETTINGS = {"quality": "100"} IGNORED_SCHEMES = ('http', 'cdda', 'daap', 'mms') def file_root (f_name): return os.path.splitext (f_name)[0].lower () def shared_prefix_length (a, b): l = 0 while a[l] == b[l]: l = l+1 return l class LocalCoverArtSearch: def __init__ (self): pass def _enum_dir_cb(self, fileenum, result, (results, on_search_completed_cb, entry, args)): try: files = fileenum.next_files_finish(result) if files is None or len(files) == 0: print "okay, done; got %d files" % len(results) on_search_completed_cb(self, entry, results, *args) return for f in files: ct = f.get_attribute_string("standard::fast-content-type") if ct.startswith("image/") and f.get_attribute_boolean("access::can-read"): results.append(f.get_name()) # hm fileenum.next_files_async(ITEMS_PER_NOTIFICATION, callback = self._enum_dir_cb, user_data=(results, on_search_completed_cb, entry, args)) except Exception, e: print "okay, probably done: %s" % e on_search_completed_cb(self, entry, results, *args) def search (self, db, entry, is_playing, on_search_completed_cb, *args): self.file = gio.File(entry.get_playback_uri()) if self.file.get_uri_scheme() in IGNORED_SCHEMES: print 'not searching for local art for %s' % (self.file.get_uri()) on_search_completed_cb (self, entry, [], *args) return self.artist = db.entry_get (entry, rhythmdb.PROP_ARTIST) self.album = db.entry_get (entry, rhythmdb.PROP_ALBUM) print 'searching for local art for %s' % (self.file.get_uri()) parent = self.file.get_parent() enumfiles = parent.enumerate_children(attributes="standard::fast-content-type,access::can-read,standard::name") enumfiles.next_files_async(ITEMS_PER_NOTIFICATION, callback = self._enum_dir_cb, user_data=([], on_search_completed_cb, entry, args)) def search_next (self): return False def get_result_pixbuf (self, results): return None def get_best_match_urls (self, results): parent = self.file.get_parent() # Compare lower case, without file extension for name in [file_root (self.file.get_basename())] + IMAGE_NAMES: for f_name in results: if file_root (f_name) == name: yield parent.resolve_relative_path(f_name).get_uri() # look for file names containing the artist and album (case-insensitive) # (mostly for jamendo downloads) artist = self.artist.lower() album = self.album.lower() for f_name in results: f_root = file_root (f_name).lower() if f_root.find (artist) != -1 and f_root.find (album) != -1: yield parent.resolve_relative_path(f_name).get_uri() # if that didn't work, look for the longest shared prefix # only accept matches longer than 2 to avoid weird false positives match = (2, None) for f_name in results: pl = shared_prefix_length(f_name, self.file.get_basename()) if pl > match[0]: match = (pl, f_name) if match[1] is not None: yield parent.resolve_relative_path(match[1]).get_uri() def _pixbuf_save (self, pixbuf, uri): def pixbuf_cb(buf, stream): # can't be bothered doing this asynchronously.. stream.write(buf) def replace_cb(file, result, pixbuf): try: stream = file.replace_finish(result) pixbuf.save_to_callback(pixbuf_cb, ART_SAVE_FORMAT, ART_SAVE_SETTINGS, user_data=stream) stream.close() except Exception,e : print "error creating %s: %s" % (file.get_uri(), e) f = gio.File(uri) f.replace_async(replace_cb, user_data=pixbuf) def _save_dir_cb (self, enum, result, (db, entry, dir, pixbuf)): artist, album = [db.entry_get (entry, x) for x in [rhythmdb.PROP_ARTIST, rhythmdb.PROP_ALBUM]] try: files = enum.next_files_finish(result) if len(files) == 0: art_file = dir.resolve_relative_path(ART_SAVE_NAME) print "saving local art to \"%s\"" % art_file.get_uri() self._pixbuf_save (pixbuf, art_file.get_uri ()) enum.close() return for f in files: ct = f.get_attribute_string("standard::fast-content-type") if ct.startswith("image/") or ct.startswith("x-directory/"): continue uri = dir.resolve_relative_path(f.get_name()).get_uri() u_entry = db.entry_lookup_by_location (uri) if u_entry: u_artist, u_album = [db.entry_get (u_entry, x) for x in [rhythmdb.PROP_ARTIST, rhythmdb.PROP_ALBUM]] if album != u_album: print "Not saving local art; encountered media with different album (%s, %s, %s)" % (uri, u_artist, u_album) enum.close() return continue print "Not saving local art; encountered unknown file (%s)" % uri enum.close() return enum.next_files_async(ITEMS_PER_NOTIFICATION, callback = self._save_dir_cb, user_data=(db, entry, dir, pixbuf)) except Exception, e: print "Error reading \"%s\": %s" % (dir, e) def save_pixbuf (self, db, entry, pixbuf): uri = entry.get_playback_uri() if uri is None or uri == '': return f = gio.File(uri) if f.get_uri_scheme() in IGNORED_SCHEMES: print "not saving local art for %s" % uri return print 'checking whether to save local art for %s' % uri parent = f.get_parent() try: enumfiles = parent.enumerate_children(attributes="standard::fast-content-type,access::can-read,standard::name") enumfiles.next_files_async(ITEMS_PER_NOTIFICATION, callback = self._save_dir_cb, user_data=(db, entry, parent, pixbuf)) except Exception, e: print "unable to scan directory %s: %s" % (parent.get_uri(), e)
paulbellamy/Rhythmbox-iPod-Plugin
plugins/artdisplay/artdisplay/LocalCoverArtSearchGIO.py
Python
gpl-2.0
6,993
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui/rastertransparencydockwidgetbase.ui' # # Created: Mon Jul 29 13:57:02 2013 # by: PyQt4 UI code generator 4.9.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class Ui_RasterTransparencyDockWidget(object): def setupUi(self, RasterTransparencyDockWidget): RasterTransparencyDockWidget.setObjectName(_fromUtf8("RasterTransparencyDockWidget")) RasterTransparencyDockWidget.resize(302, 182) self.dockWidgetContents = QtGui.QWidget() self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents")) self.verticalLayout = QtGui.QVBoxLayout(self.dockWidgetContents) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.label = QtGui.QLabel(self.dockWidgetContents) self.label.setObjectName(_fromUtf8("label")) self.verticalLayout.addWidget(self.label) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.sliderStart = QtGui.QSlider(self.dockWidgetContents) self.sliderStart.setProperty("value", 0) self.sliderStart.setOrientation(QtCore.Qt.Horizontal) self.sliderStart.setTickPosition(QtGui.QSlider.TicksBelow) self.sliderStart.setObjectName(_fromUtf8("sliderStart")) self.horizontalLayout.addWidget(self.sliderStart) self.spinStart = QtGui.QSpinBox(self.dockWidgetContents) self.spinStart.setMaximum(100) self.spinStart.setObjectName(_fromUtf8("spinStart")) self.horizontalLayout.addWidget(self.spinStart) self.verticalLayout.addLayout(self.horizontalLayout) self.label_2 = QtGui.QLabel(self.dockWidgetContents) self.label_2.setObjectName(_fromUtf8("label_2")) self.verticalLayout.addWidget(self.label_2) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.sliderEnd = QtGui.QSlider(self.dockWidgetContents) self.sliderEnd.setOrientation(QtCore.Qt.Horizontal) self.sliderEnd.setTickPosition(QtGui.QSlider.TicksBelow) self.sliderEnd.setObjectName(_fromUtf8("sliderEnd")) self.horizontalLayout_2.addWidget(self.sliderEnd) self.spinEnd = QtGui.QSpinBox(self.dockWidgetContents) self.spinEnd.setObjectName(_fromUtf8("spinEnd")) self.horizontalLayout_2.addWidget(self.spinEnd) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3")) self.chkManualUpdate = QtGui.QCheckBox(self.dockWidgetContents) self.chkManualUpdate.setObjectName(_fromUtf8("chkManualUpdate")) self.horizontalLayout_3.addWidget(self.chkManualUpdate) self.btnRefresh = QtGui.QPushButton(self.dockWidgetContents) self.btnRefresh.setObjectName(_fromUtf8("btnRefresh")) self.horizontalLayout_3.addWidget(self.btnRefresh) self.verticalLayout.addLayout(self.horizontalLayout_3) RasterTransparencyDockWidget.setWidget(self.dockWidgetContents) self.retranslateUi(RasterTransparencyDockWidget) QtCore.QMetaObject.connectSlotsByName(RasterTransparencyDockWidget) def retranslateUi(self, RasterTransparencyDockWidget): RasterTransparencyDockWidget.setWindowTitle(QtGui.QApplication.translate("RasterTransparencyDockWidget", "Raster Slider", None, QtGui.QApplication.UnicodeUTF8)) self.label.setText(QtGui.QApplication.translate("RasterTransparencyDockWidget", "Values min/max", None, QtGui.QApplication.UnicodeUTF8)) self.label_2.setText(QtGui.QApplication.translate("RasterTransparencyDockWidget", "Values max/min", None, QtGui.QApplication.UnicodeUTF8)) self.chkManualUpdate.setText(QtGui.QApplication.translate("RasterTransparencyDockWidget", "Manual update", None, QtGui.QApplication.UnicodeUTF8)) self.btnRefresh.setText(QtGui.QApplication.translate("RasterTransparencyDockWidget", "Refresh", None, QtGui.QApplication.UnicodeUTF8))
alfanugraha/LUMENS-repo
processing/raster_transparency/ui/ui_rastertransparencydockwidgetbase.py
Python
gpl-2.0
4,316
import struct import socket import datetime import random import hashlib import proto class Connection(object): def __init__(self, host, port): self.host = host self.port = port self._sock = socket.create_connection((host, port)) def sendall(self, data): return self._sock.sendall(data) def recvall(self, totalsize): data = '' received = 0 while received < totalsize: bufsize = min(4096, totalsize - received) buf = self._sock.recv(bufsize) received += len(buf) data += buf return data class Inode(object): fmt = '<256s256sQQQQQBBBBLLLL4092x1048576L2097152L' size = struct.calcsize(fmt) packer = struct.Struct(fmt) def __init__(self, data): assert self.size == len(data) self.data = data pieces = self.packer.unpack(data) name = pieces[0] length = name.index('\0') self.name = name[:length] self.tag = pieces[1] self.create_time = pieces[2] >> 32 self.snap_ctime = pieces[3] >> 32 self.vm_clock_nsec = pieces[4] self.vdi_size = pieces[5] self.vm_state_size = pieces[6] self.copy_policy = pieces[7] self.store_policy = pieces[8] self.nr_copies = pieces[9] self.block_size_shift = pieces[10] self.snap_id = pieces[11] self.vdi_id = pieces[12] self.parent_vdi_id = pieces[13] self.btree_counter = pieces[14] self.data_vdi_id = pieces[15:1048591] self.generation_reference = pieces[1048591:3145728] class VDIState(object): # 40 = sizeof(struct node_id) # 31 = SD_MAX_COPIES # 1240 = sizeof(struct node_id) * SD_MAX_COPIES fmt = '<LBBBBB3xLL40xL31L1240x' size = struct.calcsize(fmt) packer = struct.Struct(fmt) def __init__(self, data): assert self.size == len(data) self.data = data pieces = self.packer.unpack(data) self.vid = pieces[0] self.nr_copies = pieces[1] self.snapshot = pieces[2] self.deleted = pieces[3] self.copy_policy = pieces[4] self.block_size_shift = pieces[5] self.parent_vid = pieces[6] self.lock_state = pieces[7] self.nr_participants = pieces[8] self.participants_state = pieces[9:40] class Request(object): fmt = '<BBHLLL32x' size = struct.calcsize(fmt) packer = struct.Struct(fmt) OBJ_OPS = ( proto.SD_OP_CREATE_AND_WRITE_OBJ, proto.SD_OP_READ_OBJ, proto.SD_OP_WRITE_OBJ, proto.SD_OP_REMOVE_OBJ, proto.SD_OP_DISCARD_OBJ, proto.SD_OP_CREATE_AND_WRITE_PEER, proto.SD_OP_READ_PEER, proto.SD_OP_WRITE_PEER, proto.SD_OP_REMOVE_PEER, ) VDI_OPS = ( proto.SD_OP_NEW_VDI, proto.SD_OP_LOCK_VDI, proto.SD_OP_RELEASE_VDI, proto.SD_OP_GET_VDI_INFO, proto.SD_OP_READ_VDIS, proto.SD_OP_FLUSH_VDI, proto.SD_OP_DEL_VDI, proto.SD_OP_GET_CLUSTER_DEFAULT, ) def __init__(self): self.proto_ver = proto.SD_PROTO_VER self.opcode = 0x00 self.flags = 0x00 self.epoch = 0 self.id = 0 self.data_length = 0 self.obj = self.Object() self.vdi = self.VDI() self.data = '' def serialize(self): data = self.packer.pack( self.proto_ver, self.opcode, self.flags, self.epoch, self.id, self.data_length) if self.opcode in self.OBJ_OPS: data = data[:16] + self.obj.serialize() elif self.opcode in self.VDI_OPS: data = data[:16] + self.vdi.serialize() if self.flags & proto.SD_FLAG_CMD_WRITE != 0: data += self.data return data class Object(object): fmt = '<QQBBBBLLL' size = struct.calcsize(fmt) packer = struct.Struct(fmt) def __init__(self): self.oid = 0x00000000 self.cow_oid = 0x00000000 self.copies = 0 self.copy_policy = 0 self.ec_index = 0 self.tgt_epoch = 0 self.offset = 0 def serialize(self): return self.packer.pack( self.oid, self.cow_oid, self.copies, self.copy_policy, self.ec_index, 0, self.tgt_epoch, self.offset, 0) class VDI(object): fmt = '<QLBBBBLL8x' size = struct.calcsize(fmt) packer = struct.Struct(fmt) def __init__(self): self.vdi_size = 0 self.base_vdi_id = 0 self.copies = 0 self.copy_policy = 0 self.store_policy = 0 self.block_size_shift = 0 self.snapid = 0 self.type = 0 def serialize(self): return self.packer.pack( self.vdi_size, self.base_vdi_id, self.copies, self.copy_policy, self.store_policy, self.block_size_shift, self.snapid, self.type, ) class Response(object): fmt = '<BBHLLLL28x' size = struct.calcsize(fmt) packer = struct.Struct(fmt) def __init__(self, data): pieces = self.packer.unpack(data) self.proto_ver = pieces[0] self.opcode = pieces[1] self.flags = pieces[2] self.epoch = pieces[3] self.id = pieces[4] self.data_length = pieces[5] self.result = pieces[6] self.data = '' self.vdi = self.VDI(data[20:48]) class VDI(object): fmt = '<4xLLBB2x12x' size = struct.calcsize(fmt) packer = struct.Struct(fmt) def __init__(self, data): pieces = self.packer.unpack(data) self.vdi_id = pieces[0] self.attr_id = pieces[1] self.copies = pieces[2] self.block_size_shift = pieces[3] class SheepdogClient(object): UINT32_MAX = 2 ** 32 def __init__(self, host="127.0.0.1", port=7000): self._conn = Connection(host, port) self._seq_id = random.randint(1, self.UINT32_MAX - 1) def _call(self, req): self._seq_id = (self._seq_id + 1) % self.UINT32_MAX req.id = self._seq_id self._conn.sendall(req.serialize()) rsp = Response(self._conn.recvall(Response.size)) assert req.id == rsp.id if rsp.result == proto.SD_RES_SUCCESS: rsp.data = self._conn.recvall(rsp.data_length) else: raise Exception(hex(rsp.result)) return rsp def _parse_vid_bitmap(self, data): vids = set() for i, c in enumerate(data): if c != '\x00': (b,) = struct.unpack('<B', c) j = 0 while b & 255 > 0: if b & 1 == 1: vids.add(i * 8 + j) j += 1 b = b >> 1 return vids def _parse_vids(self, data): length = len(data) / 8 fmt = '<%dQ' % length vids = struct.unpack(fmt, data) def _vid_to_str(vid): return '%016x' % vid return map(_vid_to_str, vids) def _parse_vdi_state(self, data): assert len(data) % 1428 == 0 nr_vdis = len(data) / 1428 status = [] for i in range(nr_vdis): head = 1428 * i tail = 1428 * (i + 1) status.append(VDIState(data[head:tail])) return status def get_vids(self): req = Request() req.opcode = proto.SD_OP_READ_VDIS req.data_length = proto.SD_NR_VDIS / 8 rsp = self._call(req) return self._parse_vid_bitmap(rsp.data) def get_del_vids(self): req = Request() req.opcode = proto.SD_OP_READ_DEL_VDIS req.proto_ver = proto.SD_SHEEP_PROTO_VER req.data_length = proto.SD_NR_VDIS / 8 rsp = self._call(req) return self._parse_vid_bitmap(rsp.data) def read_obj(self, oid, offset, size): req = Request() req.opcode = proto.SD_OP_READ_OBJ req.data_length = size req.obj.oid = oid req.obj.offset = offset return self._call(req) def get_vdi_copies(self, epoch): req = Request() req.opcode = proto.SD_OP_GET_VDI_COPIES req.proto_ver = proto.SD_SHEEP_PROTO_VER req.data_length = 1428 * 512 req.epoch = epoch rsp = self._call(req) return self._parse_vdi_state(rsp.data) def get_inode(self, vid): rsp = self.read_obj(proto.vid_to_vdi_oid(vid), 0, Inode.size) return Inode(rsp.data) def get_inodes(self): inodes = [] for vid in self.get_vids(): inode = self.get_inode(vid) if not inode.name: continue inodes.append(inode) return inodes def find_inode(self, vdiname, tagname=''): req = Request() req.opcode = proto.SD_OP_GET_VDI_INFO req.flags = proto.SD_FLAG_CMD_WRITE req.data_length = 512 req.data = struct.pack('<256s256s', vdiname, tagname) rsp = self._call(req) return self.get_inode(rsp.vdi.vdi_id) def find_vdi(self, vdiname): return SheepdogVDI(self, self.find_inode(vdiname)) def get_obj_list(self, data_length, epoch): req = Request() req.opcode = proto.SD_OP_GET_OBJ_LIST req.proto_ver = proto.SD_SHEEP_PROTO_VER req.data_length = data_length req.epoch = epoch rsp = self._call(req) return self._parse_vids(rsp.data) def create_and_write_obj(self, oid, data, offset): req = Request() req.opcode = proto.SD_OP_CREATE_AND_WRITE_OBJ req.proto_ver = proto.SD_PROTO_VER req.flags = proto.SD_FLAG_CMD_WRITE req.obj.oid = oid req.data = data req.data_length = len(data) req.obj.offset = offset return self._call(req) def write_obj(self, oid, data, offset): req = Request() req.opcode = proto.SD_OP_WRITE_OBJ req.proto_ver = proto.SD_PROTO_VER req.flags = proto.SD_FLAG_CMD_WRITE req.obj.oid = oid req.data = data req.data_length = len(data) req.obj.offset = offset return self._call(req) def remove_obj(self, oid): req = Request() req.opcode = proto.SD_OP_REMOVE_OBJ req.proto_ver = proto.SD_PROTO_VER req.obj.oid = oid return self._call(req) def create_and_write_peer(self, oid, data, epoch, ec_index): req = Request() req.opcode = proto.SD_OP_CREATE_AND_WRITE_PEER req.proto_ver = proto.SD_SHEEP_PROTO_VER req.flags = proto.SD_FLAG_CMD_WRITE req.obj.oid = oid req.data = data req.data_length = len(data) req.epoch = epoch req.obj.ec_index = ec_index return self._call(req) def write_peer(self, oid, data, epoch, ec_index): req = Request() req.opcode = proto.SD_OP_WRITE_PEER req.proto_ver = proto.SD_SHEEP_PROTO_VER req.flags = proto.SD_FLAG_CMD_WRITE req.obj.oid = oid req.data = data req.data_length = len(data) req.epoch = epoch req.obj.ec_index = ec_index return self._call(req) def read_peer(self, oid, size, epoch, ec_index): req = Request() req.opcode = proto.SD_OP_READ_PEER req.proto_ver = proto.SD_SHEEP_PROTO_VER req.data_length = size req.obj.oid = oid req.epoch = epoch req.obj.ec_index = ec_index return self._call(req) def remove_peer(self, oid, epoch, ec_index): req = Request() req.opcode = proto.SD_OP_REMOVE_PEER req.proto_ver = proto.SD_SHEEP_PROTO_VER req.obj.oid = oid req.epoch = epoch req.obj.ec_index = ec_index return self._call(req) class SheepdogVDI(object): def __init__(self, client, inode): self.client = client self.inode = inode self.object_size = 1 << self.inode.block_size_shift def read(self, offset, length): data = '' iterator = self.OffsetIterator(offset, length, self.object_size) for idx, offset, length in iterator: vdi_id = self.inode.data_vdi_id[idx] if vdi_id == 0: data = '\0' * length continue oid = (vdi_id << proto.VDI_SPACE_SHIFT) + idx rsp = self.client.read_obj(oid, offset, length) data += rsp.data return data class OffsetIterator(object): def __init__(self, offset, length, object_size): self.idx = int(offset / object_size) self.offset = offset % object_size self.total = length self.done = 0 self.object_size = object_size def __iter__(self): return self def next(self): if self.total <= self.done: raise StopIteration() length = min(self.total - self.done, self.object_size - self.offset) ret = (self.idx, self.offset, length) self.offset = 0 self.idx += 1 self.done += length return ret
matsu777/sheepdog
tests/operation/sheep.py
Python
gpl-2.0
13,447
import os import Components.Task from twisted.internet import reactor, threads, task class FailedPostcondition(Components.Task.Condition): def __init__(self, exception): self.exception = exception def getErrorMessage(self, task): return str(self.exception) def check(self, task): return self.exception is None class CopyFileTask(Components.Task.PythonTask): def openFiles(self, fileList): self.callback = None self.fileList = fileList self.handles = [(open(fn[0], 'rb'), open(fn[1], 'wb')) for fn in fileList] self.end = 0 for src,dst in fileList: try: self.end += os.stat(src).st_size except: print "Failed to stat", src if not self.end: self.end = 1 print "[CopyFileTask] size:", self.end def work(self): print "[CopyFileTask] handles ", len(self.handles) try: for src, dst in self.handles: while 1: if self.aborted: print "[CopyFileTask] aborting" raise Exception, "Aborted" d = src.read(65536) if not d: src.close() dst.close() # EOF break dst.write(d) self.pos += len(d) except: # In any event, close all handles for src, dst in self.handles: src.close() dst.close() for s,d in self.fileList: # Remove incomplete data. try: os.unlink(d) except: pass raise def copyFiles(fileList, name): name = _("Copy") + " " + name job = Components.Task.Job(name) task = CopyFileTask(job, name) task.openFiles(fileList) Components.Task.job_manager.AddJob(job)
libo/Enigma2
lib/python/Screens/CopyFiles.py
Python
gpl-2.0
1,518
#!/usr/bin/env python # -*- coding: UTF-8 -*- # REF [site] >> https://github.com/pymupdf/PyMuPDF import fitz # REF [site] >> https://pymupdf.readthedocs.io/en/latest/tutorial.html def tutorial(): pdf_filepath = '/path/to/sample.pdf' try: # Open a document. doc = fitz.open(pdf_filepath) #doc = fitz.Document(pdf_filepath) except RuntimeError as ex: print('RuntimeError raised in {}: {}.'.format(pdf_filepath, ex)) return print('#pages = {}.'.format(doc.page_count)) print('#chapters = {}.'.format(doc.chapter_count)) print('Metadata = {}.'.format(doc.metadata)) print('Form fonts = {}.'.format(doc.FormFonts)) print('doc.name = {}.'.format(doc.name)) print('doc.needs_pass = {}.'.format(doc.needs_pass)) print('doc.outline = {}.'.format(doc.outline)) print('doc.permissions = {}.'.format(doc.permissions)) print('doc.is_closed = {}.'.format(doc.is_closed)) print('doc.is_dirty = {}.'.format(doc.is_dirty)) print('doc.is_encrypted = {}.'.format(doc.is_encrypted)) print('doc.is_form_pdf = {}.'.format(doc.is_form_pdf)) print('doc.is_pdf = {}.'.format(doc.is_pdf)) print('doc.is_reflowable = {}.'.format(doc.is_reflowable)) print('doc.is_repaired = {}.'.format(doc.is_repaired)) print('doc.last_location = {}.'.format(doc.last_location)) print('doc.has_annots() = {}.'.format(doc.has_annots())) print('doc.has_links() = {}.'.format(doc.has_links())) print('ToC:\n{}.'.format(doc.get_toc())) try: page_no = 0 assert page_no < doc.page_count print('doc.get_page_fonts(pno={}, full=False) = {}.'.format(page_no, doc.get_page_fonts(pno=page_no, full=False))) # A list of (xref, ext, type, basefont, name, encoding, referencer (optional)). print('doc.get_page_images(pno={}) = {}.'.format(page_no, doc.get_page_images(pno=page_no))) print('doc.get_page_xobjects(pno={}) = {}.'.format(page_no, doc.get_page_xobjects(pno=page_no))) print('doc.get_page_pixmap(pno={}) = {}.'.format(page_no, doc.get_page_pixmap(pno=page_no))) print('doc.get_page_labels() = {}.'.format(doc.get_page_labels())) print('doc.get_page_numbers(label="label", only_one=False) = {}.'.format(doc.get_page_numbers(label='label', only_one=False))) print('doc.get_sigflags() = {}.'.format(doc.get_sigflags())) print('doc.get_xml_metadata() = {}.'.format(doc.get_xml_metadata())) print('doc.get_page_text(pno={}, option="text") = {}.'.format(page_no, doc.get_page_text(pno=page_no, option='text', clip=None, flags=None))) except IndexError as ex: print('IndexError raised: {}.'.format(ex)) try: # Loads page number 'page_no' of the document (0-based). page_no = 1 page = doc.load_page(page_id=page_no) #page = doc[page_no] except ValueError as ex: print('ValueError raised: {}.'.format(ex)) except IndexError as ex: print('IndexError raised: {}.'.format(ex)) for page in doc: print('page.number = {}.'.format(page.number)) print('page.parent = {}.'.format(page.parent)) print('page.rect = {}.'.format(page.rect)) print('page.rotation = {}.'.format(page.rotation)) print('page.xref = {}.'.format(page.xref)) print('page.first_annot = {}.'.format(page.first_annot)) print('page.first_link = {}.'.format(page.first_link)) print('page.first_widget = {}.'.format(page.first_widget)) print('page.mediabox = {}.'.format(page.mediabox)) print('page.mediabox_size = {}.'.format(page.mediabox_size)) print('page.cropbox = {}.'.format(page.mediabox)) print('page.cropbox_position = {}.'.format(page.cropbox_position)) print('page.transformation_matrix = {}.'.format(page.transformation_matrix)) print('page.rotation_matrix = {}.'.format(page.rotation_matrix)) print('page.derotation_matrix = {}.'.format(page.derotation_matrix)) #links = page.get_links() # All links of a page. links = page.links() # A generator over the page’s links. annotations = page.annots() # A generator over the page's annotations. fields = page.widgets() # A generator over the page's form fields. label = page.get_label() draw_cmds = page.get_drawings() fonts = page.get_fonts(full=False) images = page.get_images(full=False) for item in images: image_bboxes = page.get_image_bbox(item) # REF [function] >> pixmap_example(). pix = page.get_pixmap() # fitz.Pixmap. #image_str = page.get_svg_image(matrix=fitz.Identity, text_as_path=True) # REF [function] >> text_extraction_example(). text = page.get_text(option='text', clip=None, flags=None) # {'text', 'blocks', 'words', 'html', 'xhtml', 'xml', 'dict', 'json', 'rawdict', 'rawjson'}. #text = page.get_textbox(rect) #text_page = page.get_textpage(clip=None, flags=3) # fitz.TextPage. rects = page.search_for('mupdf') # A list of objects of fitz.Rect. if rects: print('bl = {}, br = {}, tl = {}, tr = {}.'.format(rects[0].bottom_left, rects[0].bottom_right, rects[0].top_left, rects[0].top_right)) #-------------------- if False: # Convert an XPS file to PDF. xps_filepath = '/path/to/infile.xps' xps_doc = fitz.open(xps_filepath) pdf_bytes = xps_doc.convert_to_pdf() pdf_filepath = '/path/to/outfile.pdf' if True: pdf = fitz.open('pdf', pdf_bytes) pdf.save(pdf_filepath) else: pdf_out = open(pdf_filepath, 'wb') pdf_out.tobytes(pdf_bytes) pdf_out.close() # Copy image files to PDF pages. # Each page will have image dimensions. image_filepaths = [ '/path/to/image1.png', '/path/to/image2.png', ] pdf_filepath = '/path/to/outfile.pdf' doc = fitz.open() # New PDF. for fpath in image_filepaths: img_doc = fitz.open(fpath) # Open each image as a document. pdf_bytes = img_doc.convert_to_pdf() # Make a 1-page PDF. img_pdf = fitz.open('pdf', pdf_bytes) doc.insert_pdf(img_pdf) # Insert the image PDF. doc.save(pdf_filepath) #-------------------- """ doc.new_page(pno=-1, width=595, height=842) doc.insert_page(pno, text=None, fontsize=11, width=595, height=842, fontname="helv", fontfile=None, color=None) doc.copy_page(pno, to=-1) doc.fullcopy_page(pno, to=-1) doc.move_page(pno, to=-1) doc.delete_page(pno=-1) doc.delete_pages(from_page=-1, to_page=-1) doc.select(sequence) # The sequence of page numbers (zero-based) to be included. pdf_filepath = '/path/to/new.pdf' doc2 = fitz.open() # New empty PDF. doc2.insert_pdf(doc1, to_page=9) # First 10 pages. doc2.insert_pdf(doc1, from_page=len(doc1) - 10) # Last 10 pages. doc2.save(pdf_filepath) """ def text_extraction_example(): pdf_filepath = '/path/to/sample.pdf' try: # Open a document. doc = fitz.open(pdf_filepath) #doc = fitz.Document(pdf_filepath) except RuntimeError as ex: print('RuntimeError raised in {}: {}.'.format(pdf_filepath, ex)) return for page in doc: # page.get_text(): option = {'text', 'blocks', 'words', 'html', 'xhtml', 'xml', 'dict', 'json', 'rawdict', 'rawjson'}. if True: text = page.get_text(option='text', clip=None, flags=None) # Extracts texts that are completely contained within the rectangle. #text = page.get_text(option='text', clip=(x0, y0, x1, y1), flags=None) print('-------------------------------------------------- Text.') print(text) if False: # A list of text lines grouped by block. (x0, y0, x1, y1, "lines in blocks", block_no, block_type). # block_type is 1 for an image block, 0 for text. blocks = page.get_text(option='blocks', clip=None, flags=None) print('-------------------------------------------------- Blocks.') for idx, block in enumerate(blocks): print('-------------------------------------------------- {}-th block.'.format(idx)) print(block) if False: # A list of single words with bbox information. (x0, y0, x1, y1, "word", block_no, line_no, word_no). # Everything wrapped in spaces is treated as a "word" with this method. words = page.get_text(option='words', clip=None, flags=None) print('-------------------------------------------------- Words.') print(words) print('#words = {}.'.format(len(words))) if False: html = page.get_text(option='html', clip=None, flags=None) print('-------------------------------------------------- HTML.') print(html) if False: xhtml = page.get_text(option='xhtml', clip=None, flags=None) print('-------------------------------------------------- XHTML.') print(xhtml) if False: xml = page.get_text(option='xml', clip=None, flags=None) print('-------------------------------------------------- XML.') print(xml) if False: page_dict = page.get_text(option='dict', clip=None, flags=None) print('-------------------------------------------------- dict.') print('Image width = {}, image height = {}.'.format(page_dict['width'], page_dict['height'])) for idx, block in enumerate(page_dict['blocks']): print('-------------------------------------------------- {}-th block.'.format(idx)) print(block['number']) # Block no. print(block['type']) # Block type: 1 for an image block, 0 for text. print(block['bbox']) # Bounding box. print(block['lines']) # Text lines. if False: import json page_json = page.get_text(option='json', clip=None, flags=None) print('-------------------------------------------------- JSON.') page_dict = json.loads(page_json) print('Image width = {}, image height = {}.'.format(page_dict['width'], page_dict['height'])) for idx, block in enumerate(page_dict['blocks']): print('-------------------------------------------------- {}-th block.'.format(idx)) print(block['number']) # Block no. print(block['type']) # Block type: 1 for an image block, 0 for text. print(block['bbox']) # Bounding box. print(block['lines']) # Text lines. if False: page_dict = page.get_text(option='rawdict', clip=None, flags=None) print('-------------------------------------------------- Raw dict.') print('Image width = {}, image height = {}.'.format(page_dict['width'], page_dict['height'])) for idx, block in enumerate(page_dict['blocks']): print('-------------------------------------------------- {}-th block.'.format(idx)) print(block['number']) # Block no. print(block['type']) # Block type: 1 for an image block, 0 for text. print(block['bbox']) # Bounding box. for lidx, line in enumerate(block['lines']): # Text lines. {'spans', 'wmode', 'dir', 'bbox'}. print('\t------------------------------ {}-th line.'.format(lidx)) print('\t', line['wmode']) print('\t', line['dir']) print('\t', line['bbox']) for sidx, span in enumerate(line['spans']): # {'size', 'flags', 'font', 'color', 'ascender', 'descender', 'chars', 'origin', 'bbox'}. print('\t\t-------------------- {}-th span.'.format(sidx)) print('\t\t', span['size']) print('\t\t', span['flags']) print('\t\t', span['font']) print('\t\t', span['color']) print('\t\t', span['ascender']) print('\t\t', span['descender']) print('\t\t', span['origin']) print('\t\t', span['bbox']) for cidx, ch in enumerate(span['chars']): # {'origin', 'bbox', 'c'}. print('\t\t\t---------- {}-th char.'.format(cidx)) print('\t\t\t', ch['origin']) print('\t\t\t', ch['bbox']) print('\t\t\t', ch['c']) # Char. if False: import json page_json = page.get_text(option='rawjson', clip=None, flags=None) print('-------------------------------------------------- Raw JSON.') page_dict = json.loads(page_json) print('Image width = {}, image height = {}.'.format(page_dict['width'], page_dict['height'])) for idx, block in enumerate(page_dict['blocks']): print('-------------------------------------------------- {}-th block.'.format(idx)) print(block['number']) # Block no. print(block['type']) # Block type: 1 for an image block, 0 for text. print(block['bbox']) # Bounding box. for lidx, line in enumerate(block['lines']): # Text lines. {'spans', 'wmode', 'dir', 'bbox'}. print('\t------------------------------ {}-th line.'.format(lidx)) print('\t', line['wmode']) print('\t', line['dir']) print('\t', line['bbox']) for sidx, span in enumerate(line['spans']): # {'size', 'flags', 'font', 'color', 'ascender', 'descender', 'chars', 'origin', 'bbox'}. print('\t\t-------------------- {}-th span.'.format(sidx)) print('\t\t', span['size']) print('\t\t', span['flags']) print('\t\t', span['font']) print('\t\t', span['color']) print('\t\t', span['ascender']) print('\t\t', span['descender']) print('\t\t', span['origin']) print('\t\t', span['bbox']) for cidx, ch in enumerate(span['chars']): # {'origin', 'bbox', 'c'}. print('\t\t\t---------- {}-th char.'.format(cidx)) print('\t\t\t', ch['origin']) print('\t\t\t', ch['bbox']) print('\t\t\t', ch['c']) # Char. def pixmap_example(): pdf_filepath = '/path/to/infile.pdf' png_filepath = '/path/to/outfile.png' try: doc = fitz.open(pdf_filepath) except RuntimeError as ex: print('RuntimeError raised in {}: {}.'.format(pdf_filepath, ex)) return try: page = doc.load_page(page_id=0) except ValueError as ex: print('ValueError raised: {}.'.format(ex)) return #-------------------- #pix = page.get_pixmap(matrix=fitz.Identity, colorspace=fitz.csRGB, clip=None, alpha=False, annots=True) pix = page.get_pixmap() pix.writePNG(png_filepath) #pix.writeImage(png_filepath) def drawing_example(): doc = fitz.open() page = doc.new_page() shape = page.new_shape() #shape = fitz.utils.Shape(page) if True: shape.shape.draw_line((100, 100), (300, 300)) """ shape.draw_polyline(points) shape.draw_rect(rect) shape.draw_quad(quad) shape.draw_circle(center, radius) shape.draw_oval(tetra) shape.draw_sector(center, point, angle, fullSector=True) shape.draw_curve(p1, p2, p3) shape.draw_bezier(p1, p2, p3, p4) shape.insert_text(point, text, fontsize=11, fontname='helv', fontfile=None, set_simple=False, encoding=TEXT_ENCODING_LATIN, color=None, lineheight=None, fill=None, render_mode=0, border_width=1, rotate=0, morph=None, stroke_opacity=1, fill_opacity=1, oc=0) shape.insert_textbox(rect, buffer, fontsize=11, fontname='helv', fontfile=None, set_simple=False, encoding=TEXT_ENCODING_LATIN, color=None, fill=None, render_mode=0, border_width=1, expandtabs=8, align=TEXT_ALIGN_LEFT, rotate=0, morph=None, stroke_opacity=1, fill_opacity=1, oc=0) """ if False: r = fitz.Rect(100, 100, 300, 200) shape.draw_squiggle(r.tl, r.tr, breadth=2) shape.draw_squiggle(r.tr, r.br, breadth=2) shape.draw_squiggle(r.br, r.tl, breadth=2) if False: r = fitz.Rect(100, 100, 300, 200) shape.draw_zigzag(r.tl, r.tr, breadth=2) shape.draw_zigzag(r.tr, r.br, breadth=2) shape.draw_zigzag(r.br, r.tl, breadth=2) # fitz.utils.Shape.finish(width=1, color=None, fill=None, lineCap=0, lineJoin=0, dashes=None, closePath=True, even_odd=False, morph=(fixpoint, matrix), stroke_opacity=1, fill_opacity=1, oc=0) shape.finish(width=1, color=(0, 0, 1), fill=(1, 1, 0)) shape.commit() doc.save('./drawing.pdf') def transformation_example(): if False: #mat = fitz.Matrix() # (0.0, 0.0, 0.0, 0.0, 0.0, 0.0). mat = fitz.Matrix(fitz.Identity) #mat = fitz.Matrix(1, 2, 3, 4, 5, 6) print('mat.a = {}.'.format(mat.a)) print('mat.b = {}.'.format(mat.b)) print('mat.c = {}.'.format(mat.c)) print('mat.d = {}.'.format(mat.d)) print('mat.e = {}.'.format(mat.e)) print('mat.f = {}.'.format(mat.f)) print('mat.isRectilinear = {}.'.format(mat.isRectilinear)) print('mat.norm() = {}.'.format(mat.norm())) print('mat.preRotate(theta) = {}.'.format(mat.preRotate(theta=30))) # [deg]. print('mat.preScale(sx, sy) = {}.'.format(mat.preScale(sx=2, sy=1))) print('mat.preShear(h, v) = {}.'.format(mat.preShear(h=1, v=0))) print('mat.preTranslate(tx, ty) = {}.'.format(mat.preTranslate(tx=50, ty=100))) mat1, mat2 = fitz.Matrix(1, 0, 0, 1, 1, 0), fitz.Matrix(1, 0, 0, 1, 0, 2) print('mat.concat(m1, m2) = {}.'.format(mat.concat(mat1, mat2))) # Matrix multiplication, m1 * m2. retval = mat.invert(mat1) print('mat.invert(m) = {} (retval = {}).'.format(mat, 'Invertible' if retval == 0 else 'Not invertible')) #-------------------- pdf_filepath = '/path/to/sample.pdf' try: doc = fitz.open(pdf_filepath) except RuntimeError as ex: print('RuntimeError raised in {}: {}.'.format(pdf_filepath, ex)) return try: page = doc.load_page(page_id=0) except ValueError as ex: print('ValueError raised: {}.'.format(ex)) return print('page.transformation_matrix = {}.'.format(page.transformation_matrix)) # This matrix translates coordinates from the PDF space to the MuPDF space. print('page.rotation_matrix = {}.'.format(page.rotation_matrix)) print('page.derotation_matrix = {}.'.format(page.derotation_matrix)) if False: # Rotate a page. print('page.rect = {} (before).'.format(page.rect)) page.set_rotation(90) print('page.rect = {} (after).'.format(page.rect)) # Rotate a point. pt = fitz.Point(0, 0) print('pt * page.rotation_matrix = {}.'.format(pt * page.rotation_matrix)) else: mat = fitz.Matrix(fitz.Identity) #mat.preRotate(theta=30) # [deg]. #mat.preScale(sx=2, sy=0.5) mat.preShear(h=0, v=1) #mat.preTranslate(tx=50, ty=100) pix = page.get_pixmap(matrix=mat) # Visualize. # REF [site] >> https://pymupdf.readthedocs.io/en/latest/tutorial.html from PIL import Image import matplotlib.pyplot as plt mode = 'RGBA' if pix.alpha else 'RGB' img = Image.frombytes(mode, [pix.width, pix.height], pix.samples) plt.figure() plt.imshow(img) plt.axis('off') plt.tight_layout() plt.show() def main(): # The coordinate system: # Origin: top-left, +X-axis: rightward, +Y-axis: downward. #-------------------- #tutorial() text_extraction_example() #pixmap_example() #drawing_example() #transformation_example() #-------------------- # Intersection of pdfminer's text boxes and PyMuPDF's blocks. # REF [function] >> intersection_of_pdfminer_and_pymupdf() in pdfminer_test.py. #-------------------------------------------------------------------- if '__main__' == __name__: main()
sangwook236/general-development-and-testing
sw_dev/python/ext/test/documentation/pymupdf_test.py
Python
gpl-2.0
18,130
#!/usr/bin/env python3 import requests import sys import subprocess import argparse def get_commits(pr): try: res = requests.get('https://api.github.com/repos/PowerDNS/pdns/pulls/' '{}/commits'.format(pr)).json() return [c['sha'] for c in res] except (ValueError, requests.exceptions.HTTPError) as e: print(e) sys.exit(1) def run_command(cmd): try: subprocess.check_call(cmd) except subprocess.CalledProcessError as e: print(e) sys.exit(1) a = argparse.ArgumentParser() action = a.add_mutually_exclusive_group(required=True) action.add_argument( '-b', '--backport-unto', metavar='REF', nargs=1, help='Backport, using ' 'cherry-pick, all commits from PULL_REQUEST onto REF. This is done on a ' 'branch called "backport-PULL_REQUEST". When the cherry-pick fails, solve ' 'the conflict as usual and run "git cherry-pick --continue --allow-empty"') action.add_argument( '-m', '--merge-into', metavar='REF', nargs=1, help='Take the backport-' 'PULL_REQUEST branch and merge it into REF') a.add_argument( 'pull_request', metavar='PULL_REQUEST', type=int, help='The PR number to backport') args = a.parse_args() if args.backport_unto: command = ['git', 'checkout', '-b', 'backport-{}'.format(args.pull_request), args.backport_unto[0]] run_command(command) commits = get_commits(args.pull_request) command = ['git', 'cherry-pick', '-x', '--allow-empty'] + commits run_command(command) if args.merge_into: command = ['git', 'checkout', args.merge_into[0]] run_command(command) command = ['git', 'merge', '--no-ff', 'backport-{}'.format(args.pull_request), '-m', 'Backport #{}'.format(args.pull_request)] run_command(command)
oridistor/pdns
build-scripts/cherry-pick-pr.py
Python
gpl-2.0
1,840
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class TicketsConfig(AppConfig): name = 'default_set.tickets' verbose_name = _('Тикеты')
null-none/OpenGain
default_set/tickets/apps.py
Python
gpl-2.0
194
from __future__ import (absolute_import, division, print_function, unicode_literals) import six import os import sys import tempfile import numpy as np import pytest import matplotlib as mpl from matplotlib import pyplot as plt from matplotlib import animation class NullMovieWriter(animation.AbstractMovieWriter): """ A minimal MovieWriter. It doesn't actually write anything. It just saves the arguments that were given to the setup() and grab_frame() methods as attributes, and counts how many times grab_frame() is called. This class doesn't have an __init__ method with the appropriate signature, and it doesn't define an isAvailable() method, so it cannot be added to the 'writers' registry. """ frame_size_can_vary = True def setup(self, fig, outfile, dpi, *args): self.fig = fig self.outfile = outfile self.dpi = dpi self.args = args self._count = 0 def grab_frame(self, **savefig_kwargs): self.savefig_kwargs = savefig_kwargs self._count += 1 def finish(self): pass def test_null_movie_writer(): # Test running an animation with NullMovieWriter. fig = plt.figure() def init(): pass def animate(i): pass num_frames = 5 filename = "unused.null" dpi = 50 savefig_kwargs = dict(foo=0) anim = animation.FuncAnimation(fig, animate, init_func=init, frames=num_frames) writer = NullMovieWriter() anim.save(filename, dpi=dpi, writer=writer, savefig_kwargs=savefig_kwargs) assert writer.fig == fig assert writer.outfile == filename assert writer.dpi == dpi assert writer.args == () assert writer.savefig_kwargs == savefig_kwargs assert writer._count == num_frames def test_movie_writer_dpi_default(): # Test setting up movie writer with figure.dpi default. fig = plt.figure() filename = "unused.null" fps = 5 codec = "unused" bitrate = 1 extra_args = ["unused"] def run(): pass writer = animation.MovieWriter(fps, codec, bitrate, extra_args) writer._run = run writer.setup(fig, filename) assert writer.dpi == fig.dpi @animation.writers.register('null') class RegisteredNullMovieWriter(NullMovieWriter): # To be able to add NullMovieWriter to the 'writers' registry, # we must define an __init__ method with a specific signature, # and we must define the class method isAvailable(). # (These methods are not actually required to use an instance # of this class as the 'writer' argument of Animation.save().) def __init__(self, fps=None, codec=None, bitrate=None, extra_args=None, metadata=None): pass @classmethod def isAvailable(self): return True WRITER_OUTPUT = [ ('ffmpeg', 'mp4'), ('ffmpeg_file', 'mp4'), ('mencoder', 'mp4'), ('mencoder_file', 'mp4'), ('avconv', 'mp4'), ('avconv_file', 'mp4'), ('imagemagick', 'gif'), ('imagemagick_file', 'gif'), ('html', 'html'), ('null', 'null') ] # Smoke test for saving animations. In the future, we should probably # design more sophisticated tests which compare resulting frames a-la # matplotlib.testing.image_comparison @pytest.mark.parametrize('writer, extension', WRITER_OUTPUT) def test_save_animation_smoketest(tmpdir, writer, extension): try: # for ImageMagick the rcparams must be patched to account for # 'convert' being a built in MS tool, not the imagemagick # tool. writer._init_from_registry() except AttributeError: pass if not animation.writers.is_available(writer): pytest.skip("writer '%s' not available on this system" % writer) fig, ax = plt.subplots() line, = ax.plot([], []) ax.set_xlim(0, 10) ax.set_ylim(-1, 1) dpi = None codec = None if writer == 'ffmpeg': # Issue #8253 fig.set_size_inches((10.85, 9.21)) dpi = 100. codec = 'h264' def init(): line.set_data([], []) return line, def animate(i): x = np.linspace(0, 10, 100) y = np.sin(x + i) line.set_data(x, y) return line, # Use temporary directory for the file-based writers, which produce a file # per frame with known names. with tmpdir.as_cwd(): anim = animation.FuncAnimation(fig, animate, init_func=init, frames=5) try: anim.save('movie.' + extension, fps=30, writer=writer, bitrate=500, dpi=dpi, codec=codec) except UnicodeDecodeError: pytest.xfail("There can be errors in the numpy import stack, " "see issues #1891 and #2679") def test_no_length_frames(): fig, ax = plt.subplots() line, = ax.plot([], []) def init(): line.set_data([], []) return line, def animate(i): x = np.linspace(0, 10, 100) y = np.sin(x + i) line.set_data(x, y) return line, anim = animation.FuncAnimation(fig, animate, init_func=init, frames=iter(range(5))) writer = NullMovieWriter() anim.save('unused.null', writer=writer) def test_movie_writer_registry(): ffmpeg_path = mpl.rcParams['animation.ffmpeg_path'] # Not sure about the first state as there could be some writer # which set rcparams # assert not animation.writers._dirty assert len(animation.writers._registered) > 0 animation.writers.list() # resets dirty state assert not animation.writers._dirty mpl.rcParams['animation.ffmpeg_path'] = u"not_available_ever_xxxx" assert animation.writers._dirty animation.writers.list() # resets assert not animation.writers._dirty assert not animation.writers.is_available("ffmpeg") # something which is guaranteed to be available in path # and exits immediately bin = u"true" if sys.platform != 'win32' else u"where" mpl.rcParams['animation.ffmpeg_path'] = bin assert animation.writers._dirty animation.writers.list() # resets assert not animation.writers._dirty assert animation.writers.is_available("ffmpeg") mpl.rcParams['animation.ffmpeg_path'] = ffmpeg_path
jonyroda97/redbot-amigosprovaveis
lib/matplotlib/tests/test_animation.py
Python
gpl-3.0
6,327
# Copyright (C) 2013, Walter Bender # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import sys from gi.repository import Gtk from dbus.mainloop.glib import DBusGMainLoop DBusGMainLoop(set_as_default=True) from jarabe import config from jarabe.journal.journaltoolbox import DetailToolbox from jarabe.journal.journalwindow import JournalWindow from jarabe.webservice.account import Account ACCOUNT_NAME = 'mock' class JournalMock(JournalWindow): def get_mount_point(self): return '/' tests_dir = os.getcwd() extension_dir = os.path.join(tests_dir, 'extensions') os.environ["MOCK_ACCOUNT_STATE"] = str(Account.STATE_VALID) config.ext_path = extension_dir sys.path.append(config.ext_path) window = Gtk.Window() toolbox = DetailToolbox(JournalMock()) toolbox.show() window.add(toolbox) window.show() toolbox.set_metadata({'mountpoint': '/', 'uid': '', 'title': 'mock'}) toolbox._copy.palette.popup(immediate=True) Gtk.main()
quozl/sugar
tests/views/journal_detailstoolbox.py
Python
gpl-3.0
1,544
#!/usr/bin/python # -*- coding: utf-8 -*- # # CoMFoRT: a COntent Management system FOr Researchers and Teachers! # # Copyright (C) 2008 Projet2-L3IF ENS Lyon. # # Contributors: # * Jean-Alexandre Angles d'Auriac # * Gabriel Beaulieu # * Valentin Blot # * Pierre Boutillier # * Nicolas Brunie # * Aloïs Brunel # * Vincent Delaitre # * Antoine Frénoy # * Mathias Gaunard # * Guilhem Jaber # * Timo Jolivet # * Jonas Lefèvre # * Bastien Le Gloannec # * Anne-Laure Mouly # * Kevin Perrot # * Jonathan Protzenko # * Gabriel Renault # * Philippe Robert # * Pierre Roux # * Abdallah Saffidine # * David Salinas # * Félix Sipma # * Alexandra Sourisseau # * Samuel Vaiter # * Guillaume Vors # # Contact us with : comfort@listes.ens-lyon.fr # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>." # import xml.dom.minidom def cleanDoc(document,indent="",newl=""): node = document.documentElement cleanNode(node,indent,newl) def cleanNode(currentNode,indent,newl): filter = indent + newl if currentNode.hasChildNodes: for node in currentNode.childNodes: if node.nodeType == 3: node.nodeValue = node.nodeValue.lstrip(filter).strip(filter) if node.nodeValue == "": currentNode.removeChild(node) for node in currentNode.childNodes: cleanNode(node,indent,newl)
vdel/CoMFoRT
src/db/cleaner_xml.py
Python
gpl-3.0
1,998
import pytest @pytest.mark.xfail def test_i_will_fail(): assert False, 'I told you I was going to fail' @pytest.mark.xfail(run=False) def test_i_will_break_things_big_time(): print('Use this to not run e.g. tests that segfault') @pytest.mark.xfail(raises=IndexError) def test_moronic_list_access(): a = [] assert a[1] == 5 @pytest.mark.xfail(raises=IndexError) def test_moronic_dict_access(): a = {} # dict access raises KeyError, not IndexError assert a[1] == 5
mayankjohri/LetsExplorePython
Section 2 - Advance Python/Chapter S2.08 - Automated Testing/code/pytest - Copy/test_xfail/test_1.py
Python
gpl-3.0
495
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### import sys import cProfile import pstats from io import StringIO from contextlib import contextmanager import bpy from bpy.props import BoolProperty, EnumProperty from sverchok.utils.logging import info, debug from sverchok.utils.context_managers import sv_preferences # Global cProfile.Profile singleton _global_profile = None # Nesting level for @profile decorator _profile_nesting = 0 # Whether the profiling is enabled by "Start profiling" toggle is_currently_enabled = False def get_global_profile(): """ Get cProfile.Profile singleton object """ global _global_profile if _global_profile is None: _global_profile = cProfile.Profile() return _global_profile def is_profiling_enabled(section): """ Check if profiling is enabled in general, and if it is enabled for specified section. """ global is_currently_enabled if not is_currently_enabled: return False with sv_preferences() as prefs: return prefs.profile_mode == section def is_profiling_enabled_in_settings(): """ Check if profiling is not set to NONE in addon preferences. """ with sv_preferences() as prefs: if prefs is None: return True return prefs.profile_mode != "NONE" def profile(function = None, section = "MANUAL"): """ Decorator for profiling the specific methods. It can be used in two ways: @profile def method(...): ... or @profile(section="SECTION"): def method(...): ... The second form is equivalent to the first with section = "MANUAL". Profiling section is a named set of methods which should be profiled. Supported values of section are listed in SverchokPreferences.profiling_sections. The @profile(section) decorator does profile the method only if all following conditions are met: * profiling for specified section is enabled in settings (profile_mode option), * profiling is currently active. """ def profiling_decorator(func): def wrapper(*args, **kwargs): if is_profiling_enabled(section): global _profile_nesting profile = get_global_profile() _profile_nesting += 1 if _profile_nesting == 1: profile.enable() result = func(*args, **kwargs) _profile_nesting -= 1 if _profile_nesting == 0: profile.disable() return result else: return func(*args, **kwargs) wrapper.__name__ = func.__name__ wrapper.__doc__ = func.__doc__ return wrapper if callable(function): return profiling_decorator(function) else: return profiling_decorator def dump_stats(sort = "tottime", strip_dirs = False, file_path=None): """ Dump profiling statistics to the log. """ profile = get_global_profile() if not profile.getstats(): info("There are no profiling results yet") return if file_path is None: stream = StringIO() stats = pstats.Stats(profile, stream=stream) if strip_dirs: stats.strip_dirs() stats = stats.sort_stats(sort) stats.print_stats() info("Profiling results:\n" + stream.getvalue()) info("---------------------------") else: with open(file_path, 'w') as stream: stats = pstats.Stats(profile, stream=stream) if strip_dirs: stats.strip_dirs() stats = stats.sort_stats(sort) stats.print_stats() info("Profiling results are written to %s", file_path) def save_stats(path): """ Dump profiling statistics to file in cProfile's binary format. Such file can be parsed, for example, by gprof2dot utility. """ profile = get_global_profile() if not profile.getstats(): info("There are no profiling results yet") return stats = pstats.Stats(profile) stats.dump_stats(path) info("Profiling statistics saved to %s.", path) def have_gathered_stats(): global _global_profile if _global_profile is None: return False if _global_profile.getstats(): return True else: return False def reset_stats(): global _global_profile _global_profile = None @contextmanager def profiling_enabled(): if is_profiling_enabled_in_settings(): global _profile_nesting profile = None try: profile = get_global_profile() _profile_nesting += 1 if _profile_nesting == 1: profile.enable() yield profile finally: _profile_nesting -= 1 if _profile_nesting == 0 and profile is not None: profile.disable() else: yield None @contextmanager def profiling_startup(): if "--profile-sverchok-startup" in sys.argv: global _profile_nesting profile = None try: profile = get_global_profile() _profile_nesting += 1 if _profile_nesting == 1: profile.enable() yield profile finally: _profile_nesting -= 1 if _profile_nesting == 0 and profile is not None: profile.disable() dump_stats(file_path="sverchok_profile.txt") save_stats("sverchok_profile.prof") else: yield None ######################## # # GUI # ######################### class SvProfilingToggle(bpy.types.Operator): """Toggle profiling on/off""" bl_idname = "node.sverchok_profile_toggle" bl_label = "Toggle profiling" bl_options = {'INTERNAL'} def execute(self, context): global is_currently_enabled is_currently_enabled = not is_currently_enabled info("Profiling is set to %s", is_currently_enabled) return {'FINISHED'} class SvProfileDump(bpy.types.Operator): """Dump profiling statistics to log""" bl_idname = "node.sverchok_profile_dump" bl_label = "Dump profiling statistics to log" bl_options = {'INTERNAL'} sort_methods = [ ("tottime", "Internal time", "Internal time (excluding time made in calls to sub-functions)", 0), ("cumtime", "Cumulative time", "Cumulative time (including sub-functions)", 1), ("calls", "Calls count", "Count of calls of function", 2), ("nfl", "Name, file, line", "Function name, file name, line number", 3) ] sort: EnumProperty(name = "Sort by", description = "How to sort dumped statistics", items = sort_methods, default = "tottime") strip_dirs: BoolProperty(name = "Strip directories", description = "Strip directory path part of file name in the output", default = True) def execute(self, context): dump_stats(sort = self.sort, strip_dirs = self.strip_dirs) return {'FINISHED'} def invoke(self, context, event): wm = context.window_manager return wm.invoke_props_dialog(self) class SvProfileSave(bpy.types.Operator): """Dump profiling statistics to binary file""" bl_idname = "node.sverchok_profile_save" bl_label = "Dump profiling statistics to binary file" bl_options = {'INTERNAL'} filepath: bpy.props.StringProperty(subtype="FILE_PATH") def execute(self, context): save_stats(self.filepath) return {'FINISHED'} def invoke(self, context, event): context.window_manager.fileselect_add(self) return {'RUNNING_MODAL'} class SvProfileReset(bpy.types.Operator): """Reset profiling statistics""" bl_idname = "node.sverchok_profile_reset" bl_label = "Reset profiling statistics" bl_options = {'INTERNAL'} def execute(self, context): reset_stats() info("Profiling statistics data cleared.") return {'FINISHED'} classes = [SvProfilingToggle, SvProfileDump, SvProfileSave, SvProfileReset] def register(): for class_name in classes: bpy.utils.register_class(class_name) def unregister(): for class_name in reversed(classes): bpy.utils.unregister_class(class_name)
DolphinDream/sverchok
utils/profile.py
Python
gpl-3.0
9,090
# -*- encoding: utf-8 -*- from abjad import * def test_pitchtools_NamedInterval_semitones_01(): assert pitchtools.NamedInterval('perfect', 1).semitones == 0 assert pitchtools.NamedInterval('minor', 2).semitones == 1 assert pitchtools.NamedInterval('major', 2).semitones == 2 assert pitchtools.NamedInterval('minor', 3).semitones == 3 assert pitchtools.NamedInterval('major', 3).semitones == 4 assert pitchtools.NamedInterval('perfect', 4).semitones == 5 assert pitchtools.NamedInterval('augmented', 4).semitones == 6 assert pitchtools.NamedInterval('diminished', 5).semitones == 6 assert pitchtools.NamedInterval('perfect', 5).semitones == 7 assert pitchtools.NamedInterval('minor', 6).semitones == 8 assert pitchtools.NamedInterval('major', 6).semitones == 9 assert pitchtools.NamedInterval('minor', 7).semitones == 10 assert pitchtools.NamedInterval('major', 7).semitones == 11 assert pitchtools.NamedInterval('perfect', 8).semitones == 12 def test_pitchtools_NamedInterval_semitones_02(): assert pitchtools.NamedInterval('major', 23).semitones == 38 assert pitchtools.NamedInterval('major', -23).semitones == -38
mscuthbert/abjad
abjad/tools/pitchtools/test/test_pitchtools_NamedInterval_semitones.py
Python
gpl-3.0
1,185