repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ddurieux/alignak | test/alignak_modules.py | Python | agpl-3.0 | 10,473 | 0.004774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import copy
import time
import subprocess
import | shutil
import datetime # not used but "sub-"imported by livestatus test.. (to be corrected..)
import sys # not here used but "sub-"imported by livestatus test.. (to be corrected..)
#
from alignak.modulesctx import modulesctx
from alignak.objects.module import Module
from alignak.modulesmanager import ModulesManager
from align | ak.misc.datamanager import datamgr
from alignak.log import logger
#
from alignak_test import (
modules_dir,
alignakTest,
time_hacker, # not used here but "sub"-imported by lvestatus test (to be corrected)
)
modulesctx.set_modulesdir(modules_dir)
# Special Livestatus module opening since the module rename
#from alignak.modules.livestatus import module as livestatus_broker
livestatus_broker = modulesctx.get_module('livestatus')
LiveStatus_broker = livestatus_broker.LiveStatus_broker
LiveStatus = livestatus_broker.LiveStatus
LiveStatusRegenerator = livestatus_broker.LiveStatusRegenerator
LiveStatusQueryCache = livestatus_broker.LiveStatusQueryCache
LiveStatusClientThread = livestatus_broker.LiveStatusClientThread
Logline = livestatus_broker.Logline
LiveStatusLogStoreMongoDB = modulesctx.get_module('logstore-mongodb').LiveStatusLogStoreMongoDB
LiveStatusLogStoreSqlite = modulesctx.get_module('logstore-sqlite').LiveStatusLogStoreSqlite
livestatus_modconf = Module()
livestatus_modconf.module_name = "livestatus"
livestatus_modconf.module_type = livestatus_broker.properties['type']
livestatus_modconf.properties = livestatus_broker.properties.copy()
class AlignakModulesTest(AlignakTest):
def do_load_modules(self):
self.modules_manager.load_and_init()
self.log.log("I correctly loaded the modules: [%s]" % (','.join([inst.get_name() for inst in self.modules_manager.instances])))
def update_broker(self, dodeepcopy=False):
# The brok should be manage in the good order
ids = self.sched.brokers['Default-Broker']['broks'].keys()
ids.sort()
for brok_id in ids:
brok = self.sched.brokers['Default-Broker']['broks'][brok_id]
#print "Managing a brok type", brok.type, "of id", brok_id
#if brok.type == 'update_service_status':
# print "Problem?", brok.data['is_problem']
if dodeepcopy:
brok = copy.deepcopy(brok)
brok.prepare()
self.livestatus_broker.manage_brok(brok)
self.sched.brokers['Default-Broker']['broks'] = {}
def init_livestatus(self, modconf=None, dbmodconf=None, needcache=False):
self.livelogs = 'tmp/livelogs.db' + self.testid
if modconf is None:
modconf = Module({'module_name': 'LiveStatus',
'module_type': 'livestatus',
'port': str(50000 + os.getpid()),
'pnp_path': 'tmp/pnp4nagios_test' + self.testid,
'host': '127.0.0.1',
'socket': 'live',
'name': 'test', #?
})
if dbmodconf is None:
dbmodconf = Module({'module_name': 'LogStore',
'module_type': 'logstore_sqlite',
'use_aggressive_sql': "0",
'database_file': self.livelogs,
'archive_path': os.path.join(os.path.dirname(self.livelogs), 'archives'),
})
modconf.modules = [dbmodconf]
self.livestatus_broker = LiveStatus_broker(modconf)
self.livestatus_broker.create_queues()
#--- livestatus_broker.main
self.livestatus_broker.log = logger
# this seems to damage the logger so that the scheduler can't use it
#self.livestatus_broker.log.load_obj(self.livestatus_broker)
self.livestatus_broker.debug_output = []
self.livestatus_broker.modules_manager = ModulesManager('livestatus', modules_dir, [])
self.livestatus_broker.modules_manager.set_modules(self.livestatus_broker.modules)
# We can now output some previouly silented debug ouput
self.livestatus_broker.do_load_modules()
for inst in self.livestatus_broker.modules_manager.instances:
if inst.properties["type"].startswith('logstore'):
f = getattr(inst, 'load', None)
if f and callable(f):
f(self.livestatus_broker) # !!! NOT self here !!!!
break
for s in self.livestatus_broker.debug_output:
print "errors during load", s
del self.livestatus_broker.debug_output
self.livestatus_broker.rg = LiveStatusRegenerator()
self.livestatus_broker.datamgr = datamgr
datamgr.load(self.livestatus_broker.rg)
self.livestatus_broker.query_cache = LiveStatusQueryCache()
if not needcache:
self.livestatus_broker.query_cache.disable()
self.livestatus_broker.rg.register_cache(self.livestatus_broker.query_cache)
#--- livestatus_broker.main
self.livestatus_broker.init()
self.livestatus_broker.db = self.livestatus_broker.modules_manager.instances[0]
self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.datamgr, self.livestatus_broker.query_cache, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
#--- livestatus_broker.do_main
self.livestatus_broker.db.open()
if hasattr(self.livestatus_broker.db, 'prepare_log_db_table'):
self.livestatus_broker.db.prepare_log_db_table()
#--- livestatus_broker.do_main
class TestConfig(AlignakModulesTest):
def tearDown(self):
self.livestatus_broker.db.close()
if os.path.exists(self.livelogs):
os.remove(self.livelogs)
if os.path.exists(self.livelogs + "-journal"):
os.remove(self.livelogs + "-journal")
if os.path.exists(self.livestatus_broker.pnp_path):
shutil.rmtree(self.livestatus_broker.pnp_path)
if os.path.exists('var/alignak.log'):
os.remove('var/alignak.log')
if os.path.exists('var/retention.dat'):
os.remove('var/retention.dat')
if os.path.exists('var/status.dat'):
os.remove('var/status.dat')
self.livestatus_broker = None
def contains_line(self, text, pattern):
regex = re.compile(pattern)
for line in text.splitlines():
if re.search(regex, line):
return True
return False
def update_broker(self, dodeepcopy=False):
# The brok should be manage in the good order
ids = self.sched.brokers['Default-Broker']['broks'].keys()
ids.sort()
for brok_id in ids:
brok = self.sched.brokers['Default-Broker']['broks'][brok_id]
#print "Managing a brok type", brok.type, "of id", brok_id
#if brok.type == 'update_service_status':
# print "Problem?", brok.data['is_problem']
if dodeepcopy:
brok = copy.deepcopy(brok)
brok.prepare()
self.livestatus_broker.manage_brok(brok)
self.sched.brokers['Default-Broker']['broks'] = {}
def lines_equal(self, text1, text2):
# gets two multiline strings and compares the contents
# lifestatus output may not be in alphabetical order, so this
# function is used to compare unordere |
mjiang-27/django_learn | template_III/template_III/urls.py | Python | gpl-3.0 | 1,076 | 0.003717 | """template_III URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from app import views as app_views # new
urlpatterns = [
u | rl(r'^admin/', admin.site.urls),
url(r'^$', app_views.home, name='home'), # new
url(r'^add/(\d+)/(\d+)/$', app_views.add, name='add'), # new
# if method is replaced with method sum below, the app can still works fine
# url(r'^sum/(\d+)/(\d+)/$', app_views. | add, name='add'), # new
]
|
kustomzone/Rusthon | regtests/go/loop_arrays.py | Python | bsd-3-clause | 677 | 0.116691 | '''
array loop
'''
def main():
a = [1,2,3]
y = 0
for x in a:
y += x
TestError( y==6 )
z = ''
arr = ['a', 'b', 'c']
for v in arr:
z += v
TestError( z == 'abc' )
b = 0 |
for i in range(10):
b += 1
TestError( b == 10 )
b2 = 0
for i in range(5, 10):
b2 += 1
TestError( b2 == 5 )
c = ''
d = 0
for i,v in enumerate(arr):
c += v
d += i
TestError( c == 'abc' )
e = 0
for i in range( len(arr) ):
e += 1
TestError( e == 3 )
| s = a[:2]
#print('len of s:')
#print(len(s))
TestError( len(s)==2 )
s2 = a[2:]
#print('len of s2:')
#print(len(s2))
#print(s2[0])
TestError( len(s2)==1 )
#e = 0
#for i in s:
# e += i
#TestError( e == 3 )
|
globocom/vault | actionlogger/actionlogger.py | Python | apache-2.0 | 1,424 | 0 | # -*- coding: utf-8 -*-
import syslog
from .models import Audit
class ActionNotFound(Exception):
pass
class ActionLogger:
""" A wrapper to log actions """
def __init__(self):
self._actions = {
"create": "Criou",
"update": "Atualizou",
"delete": "Removeu",
"upload": "Realizou Upload",
"download": "Realizou Download",
"enable": "Habilitou",
"disable": "Desabilitou",
"restore": "Restaurou",
"update_trash": "Atualizou a lixeira do container",
"remove_cache": "Removeu do cache",
"update header": "Atualizou Header",
| "set_private": "Setou container como privado",
"set_public": "Setou container como publico",
}
def log(self, user, action, item):
if action not in self._actions.keys():
raise ActionNotFound("Invalid action: '{}'".format(action))
audit = Audit(user=user,
action=self._actions[action],
item=str(item))
audit.save()
msg = self._make_log_message(user, action, item)
| syslog.syslog(syslog.LOG_INFO, msg)
def _make_log_message(self, user, action, item):
return 'Usuario {} {} {}'.format(user,
self._actions[action],
str(item))
|
ciudadanointeligente/check-it | promises_web/tests/tags_extra_css_tests.py | Python | gpl-3.0 | 1,786 | 0.008399 | from django.test import TestCase
from django.utils.timezone import now
from promises.models import Promise, Category
from popolo.models import Person
from taggit.models import Tag
from ..models import TagExtraCss
nownow = now()
class TagsExtraCssTestCase(TestCase):
def setUp(self):
self.person = Person.obje | cts.create(name=u"A person")
self. | category = Category.objects.create(name="Education")
self.promise = Promise.objects.create(name="this is a promise",\
description="this is a description",\
date = nownow,\
person = self.person,
category = self.category
)
def test_a_tag_can_have_extra_css(self):
'''A tag can have an extra css to display extra things'''
self.promise.tags.add("test")
tag = self.promise.tags.first()
extracss = TagExtraCss.objects.create(tag=tag, classes="extraclass")
self.assertTrue(extracss)
self.assertEquals(extracss.tag, tag)
self.assertEquals(extracss.classes, "extraclass")
def test_tag_css_unicode(self):
'''A tag css has a unicode'''
self.promise.tags.add("test")
tag = self.promise.tags.first()
extracss = TagExtraCss.objects.create(tag=tag, classes="extraclass")
self.assertEquals(extracss.__unicode__(), u"extraclass for test")
def test_tag_related_name_(self):
'''A tag has extracsss'''
self.promise.tags.add("test")
tag = self.promise.tags.first()
extracss = TagExtraCss.objects.create(tag=tag, classes="extraclass")
self.assertIn(extracss, tag.extracss.all())
|
opennewzealand/linz2osm | linz2osm/workslices/migrations/0004_auto__add_field_workslicefeature_layer_in_dataset.py | Python | gpl-3.0 | 7,883 | 0.00685 | # -*- coding: utf-8 -*-
# LINZ-2-OSM
# Copyright (C) 2010-2012 Koordinates Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'WorksliceFeature.layer_in_dataset'
db.add_column('workslices_workslicefeature', 'layer_in_dataset',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['data_dict.LayerInDataset']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'WorksliceFeature.layer_in_dataset'
db.delete_column('workslices_workslicefeature', 'layer_in_dataset_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'data_dict.dataset': {
'Meta': {'object_name': 'Dataset'},
'database_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.m | odels.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'primary_key': 'True'}),
'srid': ('django.db.models.fields.IntegerField', [], {})
},
'data_dict.layer': {
'Meta': {'object_name': 'Layer'},
'datasets': ('django.db.models.fi | elds.related.ManyToManyField', [], {'to': "orm['data_dict.Dataset']", 'through': "orm['data_dict.LayerInDataset']", 'symmetrical': 'False'}),
'entity': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'processors': ('linz2osm.utils.db_fields.JSONField', [], {'null': 'True', 'blank': 'True'})
},
'data_dict.layerindataset': {
'Meta': {'object_name': 'LayerInDataset'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data_dict.Dataset']"}),
'extent': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'features_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data_dict.Layer']"})
},
'workslices.workslice': {
'Meta': {'object_name': 'Workslice'},
'checked_out_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'checkout_extent': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'feature_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'followup_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_in_dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data_dict.LayerInDataset']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'status_changed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'workslices.workslicefeature': {
'Meta': {'object_name': 'WorksliceFeature'},
'feature_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_in_dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data_dict.LayerInDataset']"}),
'workslice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workslices.Workslice']"})
}
}
complete_apps = ['workslices']
|
udacity/nose2-gae | tests/bad_app/bad_app.py | Python | lgpl-3.0 | 460 | 0.01087 | import socket
import wsgiref.handlers
class App:
def __call__( | self, environ, start_response):
# This won't work under GAE, since this is app code
here = socket.gethostbyname('localhost')
start_response('200 OK', [('Content-type', 'text/plain')])
return ['Hello %s' % here]
|
def application():
return App()
def main():
wsgiref.handlers.CGIHandler().run(application())
if __name__ == '__main__':
main()
|
bryan-lunt/execnet | testing/test_fixes.py | Python | mit | 1,009 | 0 | from execnet import Group
from execnet.gateway_bootstrap import fix_pid_for_jython_popen
def test_jython_bootstrap_not_on_remote():
group = Group()
try:
group.makegateway('popen//id=via')
group.makegateway('popen//via=via')
finally:
group.terminate(timeout=1.0)
def test_jython_bootstrap_fix():
group = Group()
gw = group.makegateway('popen')
popen = gw._io.popen
real_pid = popen.pid
try:
# n | othing happens when calling it on a normal seyup
fix_pid_for_jython_popen(gw)
assert popen.pid == real_pid
# if there is no pid for a popen gw, restore
popen.pid = None
fix_pid_for_jython_popen(gw)
assert popen.pid == real_pid
# if there is no pid for other gw, ignore - they are remote
gw.spec.popen = False
popen.pid = None
fix_pid_for_jython_popen(gw)
assert popen.pid is None
finally:
| popen.pid = real_pid
group.terminate(timeout=1)
|
jrbourbeau/composition | save_sim/save_sim.py | Python | mit | 4,424 | 0.000678 | #!/bin/sh /cvmfs/icecube.opensciencegrid.org/py2-v1/icetray-start
#METAPROJECT /data/user/jbourbeau/metaprojects/icerec/V05-00-00/build
import numpy as np
import time
import glob
import argparse
import os
from icecube import dataio, toprec, dataclasses, icetray, phys_services
from icecube.frame_object_diff.segments import uncompress
from I3Tray import *
from icecube.tableio import I3TableWriter
from icecube.hdfwriter import I3HDFTableService
from icecube.icetop_Level3_scripts.functions import count_stations
import composition.support_functions.simfunctions as simfunctions
import composition.support_functions.i3modules as i3modules
if __name__ == "__main__":
p = argparse.ArgumentParser(
description='Runs extra modules over a given fileList')
p.add_argument('-f', '--files', dest='files', nargs='*',
help='Files to run over')
p.add_argument('-s', '--sim', dest='sim',
help='Simulation dataset')
p.add_argument('-o', '--outfile', dest='outfile',
help='Output file')
args = p.parse_args()
# Starting parameters
IT_pulses, inice_pulses = simfunctions.reco_pulses()
# Keys to write to frame
keys = []
keys += ['I3EventHeader']
keys += ['ShowerPlane']
# keys += ['ShowerPlane', 'ShowerPlaneParams']
keys += ['ShowerCOG']
keys += ['MCPrimary']
keys += ['IceTopMaxSignal', 'IceTopMaxSignalString',
'IceTopMaxSignalInEdge', 'IceTopNeighbourMaxSignal',
'StationDensity', 'NStations']
keys += ['NChannels', 'InIce_charge', 'max_charge_frac']
keys += ['InIce_FractionContainment', 'IceTop_FractionContainment',
'LineFit_InIce_FractionContainment']
keys += ['Laputop', 'LaputopParams']
t0 = time.time()
# Construct list of non-truncated files to process
# icetray.set_log_level(icetray.I3LogLevel.LOG_DEBUG)
good_file_list = []
for test_file in args.files:
try:
test_tray = I3Tray()
test_tray.context['I3FileStager'] = dataio.get_stagers(
staging_directory=os.environ['_CONDOR_SCRATCH_DIR'])
test_tray.Add('I3Reader', FileName=test_file)
test_tray.Add(uncompress, 'uncompress')
test_tray.Execute()
test_tray.Finish()
good_file_list.append(test_file)
except:
print('file {} is truncated'.format(test_file))
pass
del test_tray
tray = I3Tray()
tray.context['I3FileStager'] = dataio.get_stagers(
staging_directory=os.environ['_CONDOR_SCRATCH_DIR'])
# icetray.logging.log_dedug('good_file_list = {}'.format(good_file_list))
tray.Add('I3Reader', FileNameList=good_file_list)
# Uncompress Level3 diff files
tray.Add(uncompress, 'uncompress')
hdf = I3HDFTableService(args.outfile)
# Filter out non-coincident P frames
tray.Add(lambda frame: frame['IceTopInIce_StandardFilter'].value)
def get_nstations(frame):
nstation = 0
if IT_pulses in frame:
nstation = count_stations(
dataclasses.I3RecoPulseSeriesMap.from_frame(frame, IT_pulses))
frame.Put('NStations', icetray.I3Int(nstation))
tray.Add(get_nstations)
# def get_inice_charge(frame):
# q_tot = 0.0
# n_channels = 0
# if inice_pulses in frame:
# VEMpulses = frame[inice_pulses]
# if VEMpulses | .__class__ == dataclasses.I3RecoPu | lseSeriesMapMask:
# VEMpulses = VEMpulses.apply(frame)
#
# for om, pulses in VEMpulses:
# n_channels += 1
# for pulse in pulses:
# q_tot += pulse.charge
#
# frame.Put('InIce_charge', dataclasses.I3Double(q_tot))
# frame.Put('NChannels', icetray.I3Int(n_channels))
# return
# tray.Add(get_inice_charge)
# Add total inice charge to frame
tray.Add(i3modules.AddInIceCharge, inice_pulses='SRTCoincPulses')
# Add containment to frame
tray.Add(i3modules.AddMCContainment)
tray.Add(i3modules.AddInIceRecoContainment)
#====================================================================
# Finish
tray.Add(I3TableWriter, tableservice=hdf, keys=keys,
SubEventStreams=['ice_top'])
tray.Execute()
tray.Finish()
print('Time taken: {}'.format(time.time() - t0))
|
klette/comics | comics/comics/applegeekslite.py | Python | agpl-3.0 | 688 | 0.002907 | from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.meta.base import MetaBase
class Meta(MetaBase):
n | ame = 'AppleGeeks Lite'
language = 'en'
url = 'http://www.applegeeks.com/'
start_date = '2006-04-18'
rights = 'Mohammad Haque & Ananth Panagariya'
class Crawler(CrawlerBase):
history_capable_days = 30
schedule = | None
time_zone = -5
def crawl(self, pub_date):
feed = self.parse_feed('http://www.applegeeks.com/rss/?cat=lite')
for entry in feed.for_date(pub_date):
url = entry.summary.src('img')
title = entry.title.replace('AG Lite - ', '')
return CrawlerImage(url, title)
|
modelbrouwers/modelbouwdag.nl | src/modelbouwdag/exhibitors/migrations/0003_auto_20160409_2153.py | Python | mit | 448 | 0 | # -*- | coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exhibitors', '0002_auto_20160409_2110'),
]
operations = [
migratio | ns.AlterField(
model_name='exhibitorlistplugin',
name='for_event',
field=models.ForeignKey(to='events.Event', verbose_name='event'),
),
]
|
gdementen/PyTables | bench/table-bench.py | Python | bsd-3-clause | 16,167 | 0.003835 | #!/usr/bin/env python
from __future__ import print_function
import numpy as NP
from tables import *
# This class is accessible only for the examples
class Small(IsDescription):
var1 = StringCol(itemsize=4, pos=2)
var2 = Int32Col(pos=1)
var3 = Float64Col(pos=0)
# Define a user record to characterize some kind of particles
class Medium(IsDescription):
name = StringCol(itemsize=16, pos=0) # 16-character String
float1 = Float64Col(shape=2, dflt=NP.arange(2), pos=1)
#float1 = Float64Col(dflt=2.3)
#float2 = Float64Col(dflt=2.3)
# zADCcount = Int16Col() # | signed short integer
ADCcount = Int32Col(pos=6) # signed short integer
grid_i = Int32Col(pos=7) # integer
grid_j = Int32Col(pos=8) # integer
pressure = Float32Col(pos=9) # float (single-precision)
energy = Float64Col(pos=2) # double (double-precision)
# unalig = Int8Col() # just to unalign data
# Define | a user record to characterize some kind of particles
class Big(IsDescription):
name = StringCol(itemsize=16) # 16-character String
float1 = Float64Col(shape=32, dflt=NP.arange(32))
float2 = Float64Col(shape=32, dflt=2.2)
TDCcount = Int8Col() # signed short integer
#ADCcount = Int32Col()
# ADCcount = Int16Col() # signed short integer
grid_i = Int32Col() # integer
grid_j = Int32Col() # integer
pressure = Float32Col() # float (single-precision)
energy = Float64Col() # double (double-precision)
def createFile(filename, totalrows, filters, recsize):
# Open a file in "w"rite mode
fileh = open_file(filename, mode="w", title="Table Benchmark",
filters=filters)
# Table title
title = "This is the table title"
# Create a Table instance
group = fileh.root
rowswritten = 0
for j in range(3):
# Create a table
if recsize == "big":
table = fileh.create_table(group, 'tuple' + str(j), Big, title,
None,
totalrows)
elif recsize == "medium":
table = fileh.create_table(group, 'tuple' + str(j), Medium, title,
None,
totalrows)
elif recsize == "small":
table = fileh.create_table(group, 'tuple' + str(j), Small, title,
None,
totalrows)
else:
raise RuntimeError("This should never happen")
table.attrs.test = 2
rowsize = table.rowsize
# Get the row object associated with the new table
d = table.row
# Fill the table
if recsize == "big":
for i in range(totalrows):
# d['name'] = 'Part: %6d' % (i)
d['TDCcount'] = i % 256
#d['float1'] = NP.array([i]*32, NP.float64)
#d['float2'] = NP.array([i**2]*32, NP.float64)
#d['float1'][0] = float(i)
#d['float2'][0] = float(i*2)
# Common part with medium
d['grid_i'] = i
d['grid_j'] = 10 - i
d['pressure'] = float(i * i)
# d['energy'] = float(d['pressure'] ** 4)
d['energy'] = d['pressure']
# d['idnumber'] = i * (2 ** 34)
d.append()
elif recsize == "medium":
for i in range(totalrows):
#d['name'] = 'Part: %6d' % (i)
#d['float1'] = NP.array([i]*2, NP.float64)
#d['float1'] = arr
#d['float1'] = i
#d['float2'] = float(i)
# Common part with big:
d['grid_i'] = i
d['grid_j'] = 10 - i
d['pressure'] = i * 2
# d['energy'] = float(d['pressure'] ** 4)
d['energy'] = d['pressure']
d.append()
else: # Small record
for i in range(totalrows):
#d['var1'] = str(random.randrange(1000000))
#d['var3'] = random.randrange(10000000)
d['var1'] = str(i)
#d['var2'] = random.randrange(totalrows)
d['var2'] = i
#d['var3'] = 12.1e10
d['var3'] = totalrows - i
d.append() # This is a 10% faster than table.append()
rowswritten += totalrows
if recsize == "small":
# Testing with indexing
pass
# table._createIndex("var3", Filters(1,"zlib",shuffle=1))
# table.flush()
group._v_attrs.test2 = "just a test"
# Create a new group
group2 = fileh.create_group(group, 'group' + str(j))
# Iterate over this new group (group2)
group = group2
table.flush()
# Close the file (eventually destroy the extended type)
fileh.close()
return (rowswritten, rowsize)
def readFile(filename, recsize, verbose):
# Open the HDF5 file in read-only mode
fileh = open_file(filename, mode="r")
rowsread = 0
for groupobj in fileh.walk_groups(fileh.root):
# print "Group pathname:", groupobj._v_pathname
row = 0
for table in fileh.list_nodes(groupobj, 'Table'):
rowsize = table.rowsize
print("reading", table)
if verbose:
print("Max rows in buf:", table.nrowsinbuf)
print("Rows in", table._v_pathname, ":", table.nrows)
print("Buffersize:", table.rowsize * table.nrowsinbuf)
print("MaxTuples:", table.nrowsinbuf)
if recsize == "big" or recsize == "medium":
# e = [ p.float1 for p in table.iterrows()
# if p.grid_i < 2 ]
#e = [ str(p) for p in table.iterrows() ]
# if p.grid_i < 2 ]
# e = [ p['grid_i'] for p in table.iterrows()
# if p['grid_j'] == 20 and p['grid_i'] < 20 ]
# e = [ p['grid_i'] for p in table
# if p['grid_i'] <= 2 ]
# e = [ p['grid_i'] for p in table.where("grid_i<=20")]
# e = [ p['grid_i'] for p in
# table.where('grid_i <= 20')]
e = [p['grid_i'] for p in
table.where('(grid_i <= 20) & (grid_j == 20)')]
# e = [ p['grid_i'] for p in table.iterrows()
# if p.nrow() == 20 ]
# e = [ table.delrow(p.nrow()) for p in table.iterrows()
# if p.nrow() == 20 ]
# The version with a for loop is only 1% better than
# comprenhension list
#e = []
# for p in table.iterrows():
# if p.grid_i < 20:
# e.append(p.grid_j)
else: # small record case
# e = [ p['var3'] for p in table.iterrows()
# if p['var2'] < 20 and p['var3'] < 20 ]
# e = [ p['var3'] for p in table.where("var3 <= 20")
# if p['var2'] < 20 ]
# e = [ p['var3'] for p in table.where("var3 <= 20")]
# Cuts 1) and 2) issues the same results but 2) is about 10 times faster
# Cut 1)
# e = [ p.nrow() for p in
# table.where(table.cols.var2 > 5)
# if p["var2"] < 10]
# Cut 2)
# e = [ p.nrow() for p in
# table.where(table.cols.var2 < 10)
# if p["var2"] > 5]
# e = [ (p._nrow,p["var3"]) for p in
# e = [ p["var3"] for p in
# table.where(table.cols.var3 < 10)]
# table.where(table.cols.var3 < 10)]
# table if p["var3"] <= 10]
# e = [ p['var3'] for p in table.where("var3 <= 20")]
# |
fin/froide | froide/publicbody/migrations/0024_auto_20181114_1516.py | Python | mit | 556 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-14 14:16
from __future__ import unicode_literals
from django.db import migrations
def move_region_to_regions(apps, schema_editor):
PublicBody = apps.get_model("publicbody", "PublicBody")
for pb in PublicBody.objects.filter(region__isnull=False | ):
| pb.regions.add(pb.region)
class Migration(migrations.Migration):
dependencies = [
("publicbody", "0023_auto_20181114_1515"),
]
operations = [
migrations.RunPython(move_region_to_regions),
]
|
michaelrosejr/pyaos6 | netmiko/checkpoint/checkpoint_gaia_ssh.py | Python | mit | 774 | 0 | from __future__ import unicode_literals
from netmiko.base_connection import BaseConnection
class CheckPointGaiaSSH(BaseConnection):
"""
Implements methods for communicating with Check Point Gaia
firewalls.
"""
def session_preparation(self):
"""
Prepare the session after the connection has been established.
Set the base prompt for interaction ('>').
" | ""
self._test_channel_read()
self.set_base_prompt()
self.disable_paging(command="set clienv rows 0\n")
def config_mode(self, config_command=''):
"""N | o config mode for Check Point devices."""
return ''
def exit_config_mode(self, exit_config=''):
"""No config mode for Check Point devices."""
return ''
|
belangeo/pyo | tests/test_Expr_object/expr_test_07_load.py | Python | lgpl-3.0 | 678 | 0.001475 | from pyo import *
s = Server().boot()
s.amp = 0.1 |
TEST = 2
if TEST == 0:
t = """
(load utils.expr) // scalef
(load generators.expr) // osc, pwm
(pwm 110
(scalef (osc 0.2) 0.05 0.95)
)
"""
ex = Expr(Sig | (0), t).out()
elif TEST == 1:
t = """
(load generators.expr) // square
(load filters.expr) //notch
(notch (square 86) 5000 0.75)
"""
ex = Expr(Sig(0), t).out()
elif TEST == 2:
t = """
(load utils.expr) // scalef
(load generators.expr) // noise
(load filters.expr) // peak
(peak (noise 0.5) (scalef (osc 0.2) 1000 5000) 0.9)
"""
ex = Expr(Sig(0), t).out()
ex.editor()
sc = Scope(ex)
sp = Spectrum(ex)
s.gui(locals())
|
alvico/solaris | solaris/helpers.py | Python | bsd-2-clause | 1,048 | 0 | import fileinput
import itertools
import subprocess
def insert_line_file(file, new_line, anchor):
insert = False
for line in fileinput.input(file, inplace=1):
if line.startswith(anchor):
insert = True
else:
if insert:
print new_line
insert = Fa | lse
print line
def insert_line_after(content, new_line, anchor):
i = 0
new_content = []
for line in content:
i += 1
if line.startswith(anchor):
head = itertools.islice(content, i)
tail = iter(())
if len(content) > 1:
tail = itertools.islice(content, i, len(content))
new_content = list(head) + [new_line] + list(tail)
break
return | new_content
def docker_exec(cmd, container, usr="root"):
full_cmd = "docker exec --user {0} {1} {2}".format(usr, container, cmd)
# subprocess.call(full_cmd, shell=True)
p = subprocess.Popen(full_cmd, stdout=subprocess.PIPE, shell=True)
return p.communicate()
|
zimeon/iiif | tests/test_auth_basic.py | Python | gpl-3.0 | 3,828 | 0.000522 | """Test code for iiif.auth_basic.
See http://flask.pocoo.org/docs/0.10/testing/ for Flask notes.
"""
from flask import Flask, request, make_response, redirect
from werkzeug.datastructures import Headers
import base64
import json
import re
import unittest
from | iiif.auth_basic import IIIFAuthBasic
dummy_app = Flask('dummy')
class TestAll(unittest.TestCase):
"""Tests."""
def setUp(self):
"""Set up dummy app."""
self.app = dummy_app.test_client()
def tearDown(self):
"""No op."""
pass
def test0 | 1_init(self):
"""Test inialization."""
auth = IIIFAuthBasic()
self.assertTrue(re.match(r'\d+_', auth.cookie_prefix))
auth = IIIFAuthBasic(cookie_prefix='abc')
self.assertEqual(auth.cookie_prefix, 'abc')
def test02_logout_service_description(self):
"""Test logout_service_description."""
auth = IIIFAuthBasic()
auth.logout_uri = 'xyz'
lsd = auth.logout_service_description()
self.assertEqual(lsd['profile'], 'http://iiif.io/api/auth/1/logout')
self.assertEqual(lsd['@id'], 'xyz')
self.assertEqual(lsd['label'], 'Logout from image server (basic auth)')
def test03_info_authn(self):
"""Test info_authn."""
with dummy_app.test_request_context('/a_request'):
auth = IIIFAuthBasic()
ia = auth.info_authn()
self.assertEqual(ia, False)
def test04_image_authn(self):
"""Test image_authn."""
with dummy_app.test_request_context('/a_request'):
auth = IIIFAuthBasic()
ia = auth.image_authn()
self.assertEqual(ia, False)
def test05_login_handler(self):
"""Test login_handler."""
with dummy_app.test_request_context('/a_request'):
auth = IIIFAuthBasic()
response = auth.login_handler()
self.assertEqual(response.status_code, 401)
self.assertEqual(response.headers['Content-type'], 'text/html')
html = response.get_data().decode('utf-8') # data is bytes in python3
self.assertEqual(html, '')
# add good login params and check OK, window close
h = Headers()
h.add('Authorization', b'Basic ' +
base64.b64encode(b'userpass:userpass'))
with dummy_app.test_request_context('/a_request', headers=h):
response = auth.login_handler()
self.assertEqual(response.status_code, 200)
html = response.get_data().decode('utf-8')
self.assertTrue(
re.search(
r'<script>window.close\(\);</script>',
html))
set_cookie = response.headers['Set-Cookie']
self.assertTrue(
re.search(
auth.account_cookie_name +
'=valid-http-basic-login',
set_cookie))
# add bad login params and check fail
h = Headers()
h.add('Authorization', b'Basic ' +
base64.b64encode(b'userpass:bad-pass'))
with dummy_app.test_request_context('/a_request', headers=h):
response = auth.login_handler()
self.assertEqual(response.status_code, 401)
def test06_logout_handler(self):
"""Test logout_handler."""
with dummy_app.test_request_context('/a_request'):
auth = IIIFAuthBasic()
response = auth.logout_handler()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-type'], 'text/html')
html = response.get_data().decode('utf-8') # get_data is bytes in python3
self.assertTrue(
re.search(
r'<script>window.close\(\);</script>',
html))
|
Sorsly/subtle | google-cloud-sdk/lib/surface/functions/describe.py | Python | mit | 2,021 | 0.002474 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions describe' command."""
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class Describe(base.DescribeCommand):
"""Show description of a function."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'name', help='The name of the function to describe.',
type=util.ValidateFunctionNameOrRaise)
@util.CatchHTTPErrorRaiseHTTPException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The specified function with its description and configured filter.
"""
| client = self.context['functions_client']
messages = self.context['functions_messages']
project = properties.VALUES.core.project.Get(required=Tru | e)
registry = self.context['registry']
function_ref = registry.Parse(
args.name, params={'projectsId': project, 'locationsId': args.region},
collection='cloudfunctions.projects.locations.functions')
# TODO(user): Use resources.py here after b/21908671 is fixed.
return client.projects_locations_functions.Get(
messages.CloudfunctionsProjectsLocationsFunctionsGetRequest(
name=function_ref.RelativeName()))
|
joeatwork/python-lzw | tests/__init__.py | Python | mit | 36 | 0 | # Put your tests in this direc | tory.
| |
wegamekinglc/alpha-mind | alphamind/tests/test_suite.py | Python | mit | 4,079 | 0.001716 | # -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import os
SKIP_ENGINE_TESTS = True
if not SKIP_ENGINE_TESTS:
try:
DATA_ENGINE_URI = os.environ['DB_URI']
except KeyError:
DATA_ENGINE_URI = "mysql+mysqldb://reader:Reader#2020@121.37.138.1:13317/vision?charset=utf8"
else:
DATA_ENGINE_URI = None
if __name__ == '__main__':
from simpleutils import add_parent_path
add_parent_path(__file__, 3)
from simpleutils import TestRunner
from alphamin | d.utilities import alpha_logger
from alphamind.tests.data.test_neutralize import TestNeutralize
from alphamind.tests.data.test_standardize import TestStandardize
from alphamind.tests.data.test_winsorize import TestWinsorize
from alphamind.tests.data.test_quantile import TestQuantile
from alphamind.tests.data.engines.test_sql_engine import TestSqlEngine
from alphamind.tests.data.engines.test_universe import TestUniverse
from | alphamind.tests.portfolio.test_constraints import TestConstraints
from alphamind.tests.portfolio.test_evolver import TestEvolver
from alphamind.tests.portfolio.test_longshortbuild import TestLongShortBuild
from alphamind.tests.portfolio.test_rankbuild import TestRankBuild
from alphamind.tests.portfolio.test_percentbuild import TestPercentBuild
from alphamind.tests.portfolio.test_linearbuild import TestLinearBuild
from alphamind.tests.portfolio.test_meanvariancebuild import TestMeanVarianceBuild
from alphamind.tests.portfolio.test_riskmodel import TestRiskModel
from alphamind.tests.settlement.test_simplesettle import TestSimpleSettle
from alphamind.tests.analysis.test_riskanalysis import TestRiskAnalysis
from alphamind.tests.analysis.test_perfanalysis import TestPerformanceAnalysis
from alphamind.tests.analysis.test_factoranalysis import TestFactorAnalysis
from alphamind.tests.analysis.test_quantilieanalysis import TestQuantileAnalysis
from alphamind.tests.model.test_modelbase import TestModelBase
from alphamind.tests.model.test_linearmodel import TestLinearModel
from alphamind.tests.model.test_treemodel import TestTreeModel
from alphamind.tests.model.test_loader import TestLoader
from alphamind.tests.model.test_composer import TestComposer
from alphamind.tests.execution.test_naiveexecutor import TestNaiveExecutor
from alphamind.tests.execution.test_thresholdexecutor import TestThresholdExecutor
from alphamind.tests.execution.test_targetvolexecutor import TestTargetVolExecutor
from alphamind.tests.execution.test_pipeline import TestExecutionPipeline
from alphamind.tests.portfolio.test_optimizers import TestOptimizers
runner = TestRunner([TestNeutralize,
TestStandardize,
TestWinsorize,
TestQuantile,
TestSqlEngine,
TestUniverse,
TestConstraints,
TestEvolver,
TestLongShortBuild,
TestRankBuild,
TestPercentBuild,
TestLinearBuild,
TestMeanVarianceBuild,
TestRiskModel,
TestSimpleSettle,
TestRiskAnalysis,
TestPerformanceAnalysis,
TestFactorAnalysis,
TestQuantileAnalysis,
TestModelBase,
TestLinearModel,
TestTreeModel,
TestLoader,
TestComposer,
TestNaiveExecutor,
TestThresholdExecutor,
TestTargetVolExecutor,
TestExecutionPipeline,
TestOptimizers],
alpha_logger)
runner.run()
|
nthitz/dreamlastnight | pgutils.py | Python | mit | 1,920 | 0.0125 | #collection of stuff to make postgres easier
import psycopg2
import os
pgConnectionString = "dbname=" + os.environ['PGNAME'] + " user=" + os.environ['PGUSER'] + " password=" + os.environ['PGPASS']
pg = psycopg2.connect(pgConnectionString)
pgCursor = pg.cursor()
def close():
pg.commit()
pgCursor.close()
pg.close()
def getCursor():
return pgCursor
savedRelations = {}
def getRelationByKeys(relati | on, valueKey,cached = True):
requestRelation(relation, valueKey, cached)
return savedRelations[relation]['keys']
def getRelationByValues(relation, valueKey,cached = True):
requestRelation(relation, valueKey, cached)
return savedRelations[relation]['values']
def getQueryDictionary(q,*arg):
pgCursor.execute(q,arg)
results = []
description = pgCursor.description
for result in pgCursor:
resultMap = {}
for i in range(len(result)):
| column = description[i].name
tuple = result[i]
resultMap[column] = tuple
results.append(resultMap)
return results
def requestRelation(relation, valueKey, cached = True):
global savedRelations
# if a cached request is ok
if cached:
#and we have a cached request
if relation in savedRelations:
#just return the cache
return
# else we need to fetch it
# and update the cache
results = getQueryDictionary('SELECT * FROM ' + relation)
relationByKeys = {}
relationByValues = {}
for result in results:
id = result[relation + '_id']
value = result[valueKey]
relationByKeys[id] = result
relationByValues[value] = result
savedRelations[relation] = {}
savedRelations[relation]['keys'] = relationByKeys
savedRelations[relation]['values'] = relationByValues
#returns term id and whether or not term is expired
def selectOrInsertTerm(termValue, termType):
pass |
domluna/ml_p5_capstone | filters.py | Python | mit | 1,135 | 0.003524 | """Filters"""
from skimage.color import rgb2gray
from skimage.transform import resize
import numpy as np
from | modular_rl import *
class ObFilterFF(object):
def __init__(self, new_width, new_height):
self.w = new_width
self.h = new_height
self.f = Flatten()
def __call__(self, ob):
out = resize(rgb2gray(ob), (self.h, s | elf.w))
return self.f(out)
def output_shape(self, input_shape):
return (self.h * self.w,)
class ObFilterCNN(object):
def __init__(self, new_width, new_height):
self.w = new_width
self.h = new_height
def __call__(self, ob):
out = resize(rgb2gray(ob), (self.h, self.w))
return out.reshape(out.shape + (1,))
def output_shape(self, input_shape):
return (self.h, self.w, 1)
class ActFilter(object):
def __init__(self, lookup):
self.lookup = lookup
self.n = len(self.lookup)
def __call__(self, act):
action_list = np.zeros(43) # Doom has 43 actions
action_list[self.lookup[act]] = 1
return action_list
def output_shape(self):
return self.n
|
Orav/kbengine | kbe/src/lib/python/Lib/tabnanny.py | Python | lgpl-3.0 | 11,731 | 0.001961 | #! /usr/bin/env python3
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error as msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print("%r: listing directory" % (file,))
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = tokenize.open(file)
except OSError as msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print("checking %r ..." % file)
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError as msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError as msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag as nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
print("offending line: %r" % (line,))
print(nag.get_msg())
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print(file)
else: print(file, badline, repr(line))
return
finally:
f.close()
if verbose:
print("%r: Clean bill of health." % (file,))
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i//tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i//ts + 1)*ts*count[i] =
# trailing + ts * sum (i//ts + 1)*count[i] =
# trailing + ts * sum i//ts*count[i] + count[i] =
# trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i//ts*count[i]) + num_tabs]
# and note that i//ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i//tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, | in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
| a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= othe |
hip-odoo/odoo | addons/website_portal/controllers/main.py | Python | agpl-3.0 | 4,979 | 0.002008 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http
from odoo.http import request
from odoo import tools
from odoo.tools.translate import _
from odoo.fields import Date
class website_account(http.Controller):
MANDATORY_BILLING_FIELDS = ["name", "phone", "email", "street", "city", "country_id"]
OPTIONAL_BILLING_FIELDS = ["zipcode", "state_id", "vat", "company_name"]
_items_per_page = 20
def _prepare_portal_layout_values(self):
""" prepare the values to render portal layout """
partner = request.env.user.partner_id
# get customer sales rep
if partner.user_id:
sales_rep = partner.user_id
else:
sales_rep = False
values = {
'sales_rep': sales_rep,
'company': request.website.company_id,
'user': request.env.user
}
return values
def _get_archive_groups(self, model, domain=None, fields=None, groupby="create_date", order="create_date desc"):
if not model:
return []
if domain is None:
domain = []
if fields is None:
fields = ['name', 'create_date']
groups = []
for group in request.env[model]._read_group_raw(domain, fields=fields, groupby=groupby, orderby=order):
dates, label = group[groupby]
date_ | begin, date_end = dates.split('/')
| groups.append({
'date_begin': Date.to_string(Date.from_string(date_begin)),
'date_end': Date.to_string(Date.from_string(date_end)),
'name': label,
'item_count': group[groupby + '_count']
})
return groups
@http.route(['/my', '/my/home'], type='http', auth="user", website=True)
def account(self, **kw):
values = self._prepare_portal_layout_values()
return request.render("website_portal.portal_my_home", values)
@http.route(['/my/account'], type='http', auth='user', website=True)
def details(self, redirect=None, **post):
partner = request.env.user.partner_id
values = {
'error': {},
'error_message': []
}
if post:
error, error_message = self.details_form_validate(post)
values.update({'error': error, 'error_message': error_message})
values.update(post)
if not error:
values = {key: post[key] for key in self.MANDATORY_BILLING_FIELDS}
values.update({key: post[key] for key in self.OPTIONAL_BILLING_FIELDS if key in post})
values.update({'zip': values.pop('zipcode', '')})
partner.sudo().write(values)
if redirect:
return request.redirect(redirect)
return request.redirect('/my/home')
countries = request.env['res.country'].sudo().search([])
states = request.env['res.country.state'].sudo().search([])
values.update({
'partner': partner,
'countries': countries,
'states': states,
'has_check_vat': hasattr(request.env['res.partner'], 'check_vat'),
'redirect': redirect,
})
return request.render("website_portal.details", values)
def details_form_validate(self, data):
error = dict()
error_message = []
# Validation
for field_name in self.MANDATORY_BILLING_FIELDS:
if not data.get(field_name):
error[field_name] = 'missing'
# email validation
if data.get('email') and not tools.single_email_re.match(data.get('email')):
error["email"] = 'error'
error_message.append(_('Invalid Email! Please enter a valid email address.'))
# vat validation
if data.get("vat") and hasattr(request.env["res.partner"], "check_vat"):
if request.website.company_id.sudo().vat_check_vies:
# force full VIES online check
check_func = request.env["res.partner"].vies_vat_check
else:
# quick and partial off-line checksum validation
check_func = request.env["res.partner"].simple_vat_check
vat_country, vat_number = request.env["res.partner"]._split_vat(data.get("vat"))
if not check_func(vat_country, vat_number): # simple_vat_check
error["vat"] = 'error'
# error message for empty required fields
if [err for err in error.values() if err == 'missing']:
error_message.append(_('Some required fields are empty.'))
unknown = [k for k in data.iterkeys() if k not in self.MANDATORY_BILLING_FIELDS + self.OPTIONAL_BILLING_FIELDS]
if unknown:
error['common'] = 'Unknown field'
error_message.append("Unknown field '%s'" % ','.join(unknown))
return error, error_message
|
telminov/knowledge-base | kb/data.py | Python | mit | 1,498 | 0.004633 | __author__ = 'g10k'
"""Потом убрать в миграции"""
# ('Регистраторы','Регистраторы'),
# ('Менеджеры','Менеджеры'),
# ('Проф департамент','Проф департамент'),
# ('Аналитика','Аналитика'),
# ('Контроль качества','Контроль качества'),
import kb.models
departaments = {
'Регистраторы': {
'apps': ['lmk', 'prof'],
'color': 'blue',
},
'Менеджеры': {
'apps': ['crm', 'out'],
'color': 'green'
},
'Проф департамент': {
'apps': ['prof', 'crm', 'out'],
'color': 'purple',
},
'Аналитики': {
'apps': ['analytic', 'crm', 'lmk', 'prof'],
'color': 'red'
},
'Контроль качества': {
'apps': ['qq'],
'color': 'pink'
}
}
def fill_departaments():
set_apps = set()
for department_name, i | nfo in departaments.items():
for app in set_apps:
kb.models.App.objects.get_or_create(name=app)
apps = info.get('apps')
color = info.get('color')
departament, created = kb.models.Departament.objects.get_or_create(name=department_name)
app_objects = kb.models | .App.objects.filter(name__in=apps)
departament.apps.set(app_objects, clear=True)
departament.color = color
departament.save()
|
kennethd/moto | moto/ec2/responses/elastic_ip_addresses.py | Python | apache-2.0 | 6,068 | 0.003461 | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import sequence_from_querystring
class ElasticIPAddresses(BaseResponse):
def allocate_address(self):
if "Domain" in self.querystring:
domain = self.querystring.get('Domain')[0]
else:
domain = "standard"
address = self.ec2_backend.allocate_address(domain)
template = self.response_template(ALLOCATE_ADDRESS_RESPONSE)
return template.render(address=address)
def associate_address(self):
instance = eni = None
if "InstanceId" in self.querystring:
instance = self.ec2_backend.get_instance(self.querystring['InstanceId'][0])
elif "NetworkInterfaceId" in self.querystring:
eni = self.ec2_backend.get_network_interface(self.querystring['NetworkInterfaceId'][0])
else:
self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect InstanceId/NetworkId parameter.")
reassociate = False
if "AllowReassociation" in self.querystring:
reassociate = self.querystring['AllowReassociation'][0] == "true"
if instance or eni:
if "PublicIp" in self.querystring:
eip = self.ec2_backend.associate_address(instance=instance, eni=eni, address=self.querystring['PublicIp'][0], reassociate=reassociate)
elif "AllocationId" in self.querystring:
eip = self.ec2_backend.associate_address(instance=instance, eni=eni, allocation_id=self.querystring['AllocationId'][0], reassociate=reassociate)
else:
self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.")
else:
self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect either instance or ENI.")
template = self.response_template(ASSOCIATE_ADDRESS_RESPONSE)
return template.render(address=eip)
def describe_addresses(self):
template = self.response_template(DESCRIBE_ADDRESS_RESPONSE)
if "Filter.1.Name" in self.querystring:
raise NotImplementedError("Filtering not supported in describe_address.")
elif "PublicIp.1" in self.querystring:
public_ips = sequence_from_querystring("PublicIp", self.querystring)
addresses = | self.ec2_backend.address_by_ip(public_ips)
elif "AllocationId.1" in self.querystring:
allocation_ids = sequence_from_querystring("AllocationId", self.querystring)
addresses = self.ec2_backend.address_by_allocation(allocation_ids)
else:
addresses = self.ec2_backend.describe_addresses()
return template.render(addresses=addresses)
def disassociate_address(self):
if "Publ | icIp" in self.querystring:
self.ec2_backend.disassociate_address(address=self.querystring['PublicIp'][0])
elif "AssociationId" in self.querystring:
self.ec2_backend.disassociate_address(association_id=self.querystring['AssociationId'][0])
else:
self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect PublicIp/AssociationId parameter.")
return self.response_template(DISASSOCIATE_ADDRESS_RESPONSE).render()
def release_address(self):
if "PublicIp" in self.querystring:
self.ec2_backend.release_address(address=self.querystring['PublicIp'][0])
elif "AllocationId" in self.querystring:
self.ec2_backend.release_address(allocation_id=self.querystring['AllocationId'][0])
else:
self.ec2_backend.raise_error("MissingParameter", "Invalid request, expect PublicIp/AllocationId parameter.")
return self.response_template(RELEASE_ADDRESS_RESPONSE).render()
ALLOCATE_ADDRESS_RESPONSE = """<AllocateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
</AllocateAddressResponse>"""
ASSOCIATE_ADDRESS_RESPONSE = """<AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
</AssociateAddressResponse>"""
DESCRIBE_ADDRESS_RESPONSE = """<DescribeAddressesResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<addressesSet>
{% for address in addresses %}
<item>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.instance %}
<instanceId>{{ address.instance.id }}</instanceId>
{% else %}
<instanceId/>
{% endif %}
{% if address.eni %}
<networkInterfaceId>{{ address.eni.id }}</networkInterfaceId>
{% else %}
<networkInterfaceId/>
{% endif %}
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
</item>
{% endfor %}
</addressesSet>
</DescribeAddressesResponse>"""
DISASSOCIATE_ADDRESS_RESPONSE = """<DisassociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateAddressResponse>"""
RELEASE_ADDRESS_RESPONSE = """<ReleaseAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ReleaseAddressResponse>"""
|
jwadden/django-sharding | django_sharding_library/constants.py | Python | bsd-3-clause | 161 | 0 | class | Backends(object):
MYSQL = 'django.db.backends.mysql'
POSTGRES = 'django.db.backends.postgresql_psycopg2'
SQLITE = 'django.db.backends.sq | lite3'
|
yqowen/odatapy-client | codegen_template.py | Python | mit | 34,379 | 0.002443 | # OData Python Client and Server Libraries ver. 1.0.0
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# class_name, edm_namespace, edm_name
EDM_INFO = r"""
_namespace = r"{1}"
_typename = r"{2}"
@staticmethod
def get_full_name():
return {0}._namespace + '.' + {0}._typename
@staticmethod
def get_type_name():
return {0}._typename
"""
GET_ROOT_URL = r"""
def get_root_url(self):
if self._service_context is not None:
return self._service_context.get_root_url()
else:
return ""
"""
# edm_namespace
GET_ENUM_TYPE_NAMESPACE = r"""
@staticmethod
def get_enum_type_namespace():
return "{0}"
"""
BEGIN_GET_ENUM_TYPE_FROM_STRING = r"""
@staticmethod
def get_enum_value_from_string(enum_string):"""
# class_name, edm_name, class_member_name
ON_GET_ENUM_TYPE_FROM_STRING = r"""
if enum_string == "{1}":
return {0}.{2}"""
END_GET_ENUM_TYPE_FROM_STRING = r"""
return 0
"""
BEGIN_GET_STRING_FROM_ENUM_TYPE = r"""
@staticmethod
def get_string_from_enum_value(enum_value):"""
# class_name, edm_name, class_member_name
ON_GET_STRING_FROM_ENUM_TYPE = r'''
if enum_value == {0}.{2}:
return "{1}"'''
END_GET_STRING_FROM_ENUM_TYPE = r"""
return ""
"""
# base_class_name
BEGIN_COMPLEX_CONSTRUCTOR = r"""
def __init__(self, service_context):
{0}.__init__(self, service_context)"""
# class_member_name, default_value
ON_PROPERTY_IN_COMPLEX_CONSTRUCTOR = r"""
self._{0} = {1}"""
# class_member_name, edm_name, primitive_resolver
PRIMITIVE_PROPERTY_IN_COMPLEX_MAPPING = r"""
def get_{0}(self):
return self._{0}
def set_{0}(self, property_value):
self._{0} = property_value
def _get_{0}_from_complex(self, complex):
property_value = odata_client_python.odata_value()
if not complex.get_property_value("{1}", property_value):
return
if odata_client_python.is_nullptr(property_value):
property_value = None
if property_value is None:
return
primitive_value = odata_client_python.to_primitive_value(property_value)
if primitive_value is not None:
try:
self._{0} = {2}
except:
self._{0} = primitive_value.to_string()
def _set_{0}_to_complex(self, complex):
if complex is None or self._{0} is None:
return
complex.set_value("{1}", self._{0})
"""
# class_member_name, edm_name, class_member_type
COMPLEX_PROPERTY_IN_COMPLEX_MAPPING = r"""
def get_{0}(self):
return self._{0}
def set_{0}(self, property_value):
self._{0} = property_value
def _get_{0}_from_complex(self, complex):
property_value = odata_client_python.odata_value()
if not complex.get_property_value("{1}", property_value):
return
if odata_client_python.is_nullptr(property_value):
property_value = None
if property_value is None:
return
if property_value.get_value_type().get_type_kind() == odata_client_python.Complex:
complex_value = odata_client_python.to_complex_value(property_value)
self._{0} = {2}.create_instance_from_complex(complex_value, self._service_context)
def _set_{0}_to_complex(self, complex):
if complex is None or self._{0} is None:
return
complex.set_value("{1}", self._{0}.to_value())
"""
# class_member_name, edm_name, class_member_type
ENUM_PROPERTY_IN_COMPLEX_MAPPING = r"""
def get_{0}(self):
return self._{0}
def set_{0}(self, property_value):
self._{0} = property_value
def _get_{0}_from_complex(self, complex):
if complex is None:
return
property_value = odata_client_python.odata_value()
if not complex.get_property_value("{1}", property_value):
return
if odata_client_python.is_nullptr(property_value):
property_value = None
if property_value is None:
return
enum_value = odata_client_python.to_enum_value(property_value)
if enum_value is not None:
self._{0} = {2}.get_enum_value_from_string(enum_value.to_string())
def _set_{0}_to_complex(self, complex):
if complex is None or self._{0} is None:
return
complex_type = odata_client_python.to_complex_type(complex.get_value_type())
if complex_type is None:
return
edm_property = complex_type.find_property("{1}")
if edm_property is None:
return
property_type = edm_property.get_property_type()
enum_value = odata_client_python.odata_enum_value(property_type, {2}.get_string_from_enum_value(self._{0}))
complex.set_value("{1}", enum_value)
"""
# class_member_name, edm_name, primitive_resolver
COLLECTION_PRIMITIVE_PROPERTY_IN_COMPLEX_MAPPING = r"""
def get_{0}(self):
return self._{0}
def set_{0}(self, property_values):
self._{0} = property_values
def add_to_{0}(self, property_value):
if self._{0} is None:
self._{0} = []
self._{0}.append(property_value)
def _get_{0}_from_complex(self, complex):
if complex is None:
return
property_value = odata_client_python.odata_value()
if not complex.get_property_value("{1}", property_value):
return
if odata_client_python.is_nullptr(property_value):
property_value = None
if property_value is None:
return
property_collection_value = odata_client_python.to_collection_value(property_value)
if property_collection_value is None:
return
self._{0} = []
for odata_value in property_collection_value.get_collection_values():
primitive_value = odata_client_python.to_primitive_value(odata_value)
if primitive_value is None:
continue
try:
value = {2}
except:
value = primitive_value.to_string()
self._{0}.append(value)
def _set_{0}_to_complex(self, complex):
| if complex i | s None or self._{0} is None:
return
complex_type = odata_client_python.to_complex_type(complex.get_value_type())
if complex_type is None:
return
edm_property = complex_type.find_property("{1}")
if edm_property is None:
return
property_type = edm_property.get_property_type()
collection_value_type = odata_client_python.to_collection_type(property_type)
if collection_value_type is None:
return
collection_value = odata_client_python.to_collection_value(collection_value_type)
for primitive in self._{0}:
collection_value.add_collection_value(odata_client_python.odata_primitive_value.make_primitive_value(primitive))
complex.set_value("{1}", collection_value)
"""
# class_member_name, edm_name |
dvarrazzo/Pyrseas | tests/augment/test_audit.py | Python | bsd-3-clause | 12,757 | 0 | # -*- coding: utf-8 -*-
"""Test audit columns"""
import pytest
from pyrseas.testutils import AugmentToMapTestCase
CREATE_STMT = "CREATE TABLE t1 (c1 integer, c2 text)"
FUNC_SRC1 = """
BEGIN
NEW.modified_by_user = SESSION_USER;
NEW.modified_timestamp = CURRENT_TIMESTAMP;
RETURN NEW;
END"""
FUNC_SRC2 = """
BEGIN
NEW.updated = CURRENT_TIMESTAMP;
RETURN NEW;
END"""
class AuditColumnsTestCase(AugmentToMapTestCase):
"""Test mapping of audit column augmentations"""
def test_predef_column(self):
"Add predefined audit column"
augmap = {'schema public': {'table t1': {
'audit_columns': 'created_date_only'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'created_date': {'type': 'date', 'not_null': True,
'default': "('now'::text)::date"}}]}
assert expmap == dbmap['schema public']['table t1']
def test_unknown_table(self):
"Error on non-existent table"
augmap = {'schema public': {'table t2': {
'audit_columns': 'created_date_only'}}}
with pytest.raises(KeyError):
self.to_map([CREATE_STMT], augmap)
def test_bad_audit_spec(self):
"Error on bad audit column specification"
augmap = {'schema public': {'table t1': {
'audit_column': 'created_date_only'}}}
with pytest.raises(KeyError):
self.to_map([CREATE_STMT], augmap)
def test_unknown_audit_spec(self):
"Error on non-existent audit column specification"
augmap = {'schema public': {'table t1': {
'audit_columns': 'created_date'}}}
with pytest.raises(KeyError):
self.to_map([CREATE_STMT], augmap)
def test_new_column(self):
"Add new (non-predefined) audit column"
augmap = {'augmenter': {'columns': {
'modified_date': {'type': 'date', 'not_null': True,
'default': "('now'::text)::date"}},
'audit_columns': {'modified_date_only': {
'columns': ['modified_date']}}},
'schema public': {'table t1': {
'audit_columns': 'modified_date_only'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_date': {'type': 'date', 'not_null': True,
'default': "('now'::text)::date"}}]}
assert expmap == dbmap['schema public']['table t1']
def test_rename_column(self):
"Add predefined audit column but with new name"
augmap = {'augmenter': {'columns': {
'modified_timestamp': {'name': 'updated'}}},
'schema public': {'table t1': {
'audit_columns': 'modified_only'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
colmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'updated': {'type': 'timestamp with time zone',
'not_null': True}}],
'triggers': {'t1_20_audit_modified_only': {
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'audit_modified()', 'timing': 'before'}}}
funcmap = {'language': 'plpgsql', 'returns': 'trigger',
'security_definer': True, 'description':
'Provides modified_timestamp values for audit columns.',
'source': FUNC_SRC2}
assert dbmap['schema public']['table t1'] == colmap
assert dbmap['schema public']['function audit_modified()'] == funcmap
def test_change_column_type(self):
"Add predefined audit column but with changed datatype"
augmap = {'augmenter': {'columns': {'created_date': {'type': 'text'}}},
'schema public': {'table t1': {
'audit_columns': 'created_date_only'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'created_date': {'type': 'text', 'not_null': True,
'default': "('now'::text)::date"}}]}
assert expmap == dbmap['schema public']['table t1']
def test_columns_with_trigger(self):
"Add predefined audit columns with trigger"
augmap = {'schema public': {'table t1': {'audit_columns': 'default'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}],
'triggers': {'t1_20_audit_default': {
'events': ['update'], 'level': 'row',
'procedure': 'audit_default()', 'timing': 'before'}}}
assert expmap == dbmap['schema public']['table t1']
assert dbmap['schema public']['function audit_default()'][
'returns'] == 'trigger'
assert dbmap['schema public']['function audit_default()'][
'source'] == FUNC_SRC1
def test_nonpublic_schema_with_trigger(self):
"Add predefined audit columns with trigger in a non-public schema"
stmts = ["CREATE SCHEMA s1",
"CREATE TABLE s1.t1 (c1 integer, c2 text)"]
augmap = {'schema s1': {'table t1': {'audit_columns': 'default'}}}
dbmap = self.to_map(stmts, augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}],
'triggers': {'t1_20_audit_default': {
'events': ['update'], 'level': 'row',
'procedure': 's1.audit_default()', 'timing': 'before'}}}
assert expmap == dbmap['schema s1']['table t1']
assert dbmap['schema s1']['function audit_default()']['returns'] == \
'trigger'
assert dbmap['schema s1']['function audit_default()'][
'source'] == FUNC_SRC1
def test_skip_existing_columns(self):
"Do not add already existing audit columns"
stmts = [CREATE_STMT,
"ALTER TABLE t1 ADD modified_by_user varchar(63) NOT NULL",
"ALTER TABLE t1 ADD modified_timestamp "
"timestamp with time zone NOT NULL"]
augmap = {'schema public': {'table t1': {
'audit_columns': 'default'}}}
dbmap = self.to_map(stmts, augmap)
expmap = [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
| 'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}]
assert expmap == dbmap['schema public']['table t1']['columns']
def test_change_existing_columns(self):
"Change already existing audit columns"
stmts = [CREATE_STMT, "ALTER TABLE t1 ADD modified_by_user text ",
"ALTER | TABLE t1 ADD modified_timestamp "
"timestamp with time zone NOT NULL"]
augmap = {'schema public': {'table t1': {'audit_columns': 'default'}}}
dbmap = self.to_map(stmts, augmap)
expmap = [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}]
assert expmap == dbmap['schema public']['table t1']['columns']
|
ikanor/typeschema | tests/typeschema_test.py | Python | mit | 237 | 0 | import doctest
import unittest
import typeschema
class TestCase(unittest.TestCase):
def test_doc(self): |
fails, tested | = doctest.testmod(typeschema.typeschema)
if fails > 0:
self.fail('Doctest failed!')
|
cincinnatusc123/jbrowse | bin/list_interval_tables.py | Python | lgpl-2.1 | 1,553 | 0.025113 | #!/usr/bin/python
import cgi
import simplejson as json
import os
import os.path
import shutil
import sys
sys.path.append(".. | /lib")
import GlobalConfig
import utils
import time
from subprocess import Popen, PIPE
err_filename = "%s/list_tables_error.txt" % (GlobalConfig.DEBUG_DIR)
sys.stderr = open( err_filename,'w')
out_filename = "%s/list_tables_output.txt" % (GlobalConfig.DEBUG_DIR)
sys.stdout = open( | out_filename,'w')
fields = cgi.FieldStorage()
project_name = fields.getvalue("project_name")
print "project_name", project_name
utils.printToServer( 'Content-type: text/json\n\n' )
#path = "%s/data/tracks/%s%s/interval_tables" % \
#(GlobalConfig.ROOT_DIR, \
#GlobalConfig.PROJECT_PREFIX, \
#project_name)
src_table_dir = "%s/src_tables/%s" % (os.environ["BIOSQL_HOME"], project_name)
interval_tables = []
if os.path.exists( src_table_dir ) :
for listing in os.listdir( src_table_dir ) :
print listing
if listing.find( project_name ) == 0 :
viewable = listing[len(project_name)+1:].rsplit('.',1)[0]
else :
viewable = listing.rsplit('.',1)[0]
#viewable = listing.split('_',1)[1].rsplit('.',1)[0]
#viewable = listing.strip("%s_" % project_name).strip('.it')
print viewable
interval_tables.append( viewable )
message = json.dumps(interval_tables)
status = "ok"
else :
status = "empty"
message = '"No tables loaded"'
print message
utils.printToServer( '{"status": "%s", "message": %s }' % (status,message) )
|
xu6148152/Binea_Python_Project | wikiSpider/wikiSpider/pipelines.py | Python | mit | 290 | 0 | # -*- coding: utf-8 -*-
# Define your item pipelines here
| #
# Don't forget to add your pipeline to the ITEM | _PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class WikispiderPipeline(object):
def process_item(self, item, spider):
return item
|
v-iam/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/compute/v2017_03_30/models/resource_sku_restrictions.py | Python | mit | 1,903 | 0.000525 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporatio | n. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceSkuRestrictions(Model):
"""Describes scaling information of a SKU.
| Variables are only populated by the server, and will be ignored when
sending a request.
:ivar type: The type of restrictions. Possible values include: 'location'
:vartype type: str or :class:`ResourceSkuRestrictionsType
<azure.mgmt.compute.compute.v2017_03_30.models.ResourceSkuRestrictionsType>`
:ivar values: The value of restrictions. If the restriction type is set to
location. This would be different locations where the SKU is restricted.
:vartype values: list of str
:ivar reason_code: The reason for restriction. Possible values include:
'QuotaId', 'NotAvailableForSubscription'
:vartype reason_code: str or :class:`ResourceSkuRestrictionsReasonCode
<azure.mgmt.compute.compute.v2017_03_30.models.ResourceSkuRestrictionsReasonCode>`
"""
_validation = {
'type': {'readonly': True},
'values': {'readonly': True},
'reason_code': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'ResourceSkuRestrictionsType'},
'values': {'key': 'values', 'type': '[str]'},
'reason_code': {'key': 'reasonCode', 'type': 'ResourceSkuRestrictionsReasonCode'},
}
def __init__(self):
self.type = None
self.values = None
self.reason_code = None
|
xorpaul/shinken | test/test_antivirg.py | Python | agpl-3.0 | 3,398 | 0.013537 | #!/usr/bin/env python
# -*- coding: utf-8 -*
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
# load the configuration from file
self.setup_with_file('etc/nagios_antivirg.cfg')
def test_hostname_antivirg(self):
"""Check that it is allowed to have a host with the "__ANTI-VIRG__" substring in its hostname"""
# the global configuration must be valid
self.assert_(
True == self.conf.conf_is_correct
,("config is not correct")
)
# try to get the host
# if it is not possible to get the host, it is probably because
# "__ANTI-VIRG__" has been replaced by ";"
hst = self.conf.hosts.find_by_name('test__ANTI-VIRG___0')
self.assert_(
hst is not None
,("host 'test__ANTI-VIRG___0' not found")
)
# Check that the host has a valid configuration
self.assert_(
True == hst.is_correct()
,("config of host '%s' is not true"
% (hst.get_name()))
)
def test_parsing_comment(self):
"""Check that the semicolon is a comment delimiter"""
# the global configuration must be valid
self.assert_(
True == self.conf.conf_is_correct
,("config is not correct")
)
# try to get the host
hst = self.conf.hosts.find_by_name('test_host_1')
self.assert_(
hst is not None
,("host 'test_host_1' not found")
)
# Check that the host has a valid configuration
self.assert_(
True == hst.is_correct()
,("config of host '%s' is not true"
% (hst.get_name()))
)
def test_escaped_semicolon(self):
"""Check that it is possible to have a host with a semicolon in its hostname
The consequences of this aren't tested. We try just to send a command but
I think that others programs which send commands don't think to escape
the semicolon.
"""
# the global configuration must be valid
self.assert_(
True == self.conf.conf_is_correct
,("config is not correct")
)
# try to get the host
hst = self.conf.hosts.find_by_name('test_host_2;with_semicolon')
self.assert_(
hst is not None
,("host 'test_host_2;with_semicolon' not found")
)
# Check that the host has a valid configuration
self.assert_(
True == hst.is_correct()
,("config of host '%s' is not true"
% (hst.get_name()))
| )
# We can send a command by escaping the semicolon.
command = '[%lu] PROCESS_HOST_CHECK_RESULT;test_host_2\;with_semicolon;2;down' % (time.time())
self.sched.run_external_command(command)
# can need 2 run for get the consu | m (I don't know why)
self.scheduler_loop(1, [])
self.scheduler_loop(1, [])
if '__main__' == __name__:
unittest.main()
|
salamb/girder | girder/api/v1/file.py | Python | apache-2.0 | 17,532 | 0.000285 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import errno
import six
from ..describe import Description, describeRoute
from ..rest import Resource, RestException, filtermodel, loadmodel
from ...constants import AccessType, TokenScope
from girder.models.model_base import AccessException, GirderException
from girder.api import access
class File(Resource):
"""
API Endpoint for files. Includes utilities for uploading and downloading
them.
"""
def __init__(self):
super(File, self).__init__()
self.resourceName = 'file'
self.route('DELETE', (':id',), self.deleteFile)
self.route('DELETE', ('upload', ':id'), self.cancelUpload)
self.route('GET', ('offset',), self.requestO | ffset)
self.route('GET', (':id',), self.getFile)
self.route('GET', (':id', 'download'), self.download)
self.route('GET', (':id', 'download', ':name'), self.downloadWithName)
self.route('POST', (), self.initUpload)
self.route('POST', ('chunk',), | self.readChunk)
self.route('POST', ('completion',), self.finalizeUpload)
self.route('POST', (':id', 'copy'), self.copy)
self.route('PUT', (':id',), self.updateFile)
self.route('PUT', (':id', 'contents'), self.updateFileContents)
@access.public(scope=TokenScope.DATA_READ)
@loadmodel(model='file', level=AccessType.READ)
@filtermodel(model='file')
@describeRoute(
Description('Get a file\'s information.')
.param('id', 'The ID of the file.', paramType='path')
.errorResponse()
.errorResponse('Read access was denied on the file.', 403)
)
def getFile(self, file, params):
return file
@access.user(scope=TokenScope.DATA_WRITE)
@describeRoute(
Description('Start a new upload or create an empty or link file.')
.responseClass('Upload')
.param('parentType', 'Type being uploaded into (folder or item).')
.param('parentId', 'The ID of the parent.')
.param('name', 'Name of the file being created.')
.param('size', 'Size in bytes of the file.',
dataType='integer', required=False)
.param('mimeType', 'The MIME type of the file.', required=False)
.param('linkUrl', 'If this is a link file, pass its URL instead '
'of size and mimeType using this parameter.', required=False)
.param('reference', 'If included, this information is passed to the '
'data.process event when the upload is complete.',
required=False)
.errorResponse()
.errorResponse('Write access was denied on the parent folder.', 403)
.errorResponse('Failed to create upload.', 500)
)
def initUpload(self, params):
"""
Before any bytes of the actual file are sent, a request should be made
to initialize the upload. This creates the temporary record of the
forthcoming upload that will be passed in chunks to the readChunk
method. If you pass a "linkUrl" parameter, it will make a link file
in the designated parent.
"""
self.requireParams(('name', 'parentId', 'parentType'), params)
user = self.getCurrentUser()
mimeType = params.get('mimeType', 'application/octet-stream')
parentType = params['parentType'].lower()
if parentType not in ('folder', 'item'):
raise RestException('The parentType must be "folder" or "item".')
parent = self.model(parentType).load(id=params['parentId'], user=user,
level=AccessType.WRITE, exc=True)
if 'linkUrl' in params:
return self.model('file').filter(
self.model('file').createLinkFile(
url=params['linkUrl'], parent=parent, name=params['name'],
parentType=parentType, creator=user), user)
else:
self.requireParams('size', params)
try:
upload = self.model('upload').createUpload(
user=user, name=params['name'], parentType=parentType,
parent=parent, size=int(params['size']), mimeType=mimeType,
reference=params.get('reference'))
except OSError as exc:
if exc.errno == errno.EACCES:
raise GirderException(
'Failed to create upload.',
'girder.api.v1.file.create-upload-failed')
raise
if upload['size'] > 0:
return upload
else:
return self.model('file').filter(
self.model('upload').finalizeUpload(upload), user)
@access.user(scope=TokenScope.DATA_WRITE)
@describeRoute(
Description('Finalize an upload explicitly if necessary.')
.notes('This is only required in certain non-standard upload '
'behaviors. Clients should know which behavior models require '
'the finalize step to be called in their behavior handlers.')
.param('uploadId', 'The ID of the upload record.', paramType='form')
.errorResponse('ID was invalid.')
.errorResponse('The upload does not require finalization.')
.errorResponse('Not enough bytes have been uploaded.')
.errorResponse('You are not the user who initiated the upload.', 403)
)
def finalizeUpload(self, params):
self.requireParams('uploadId', params)
user = self.getCurrentUser()
upload = self.model('upload').load(params['uploadId'], exc=True)
if upload['userId'] != user['_id']:
raise AccessException('You did not initiate this upload.')
# If we don't have as much data as we were told would be uploaded and
# the upload hasn't specified it has an alternate behavior, refuse to
# complete the upload.
if upload['received'] != upload['size'] and 'behavior' not in upload:
raise RestException(
'Server has only received %s bytes, but the file should be %s '
'bytes.' % (upload['received'], upload['size']))
file = self.model('upload').finalizeUpload(upload)
extraKeys = file.get('additionalFinalizeKeys', ())
return self.model('file').filter(file, user, additionalKeys=extraKeys)
@access.user(scope=TokenScope.DATA_WRITE)
@describeRoute(
Description('Request required offset before resuming an upload.')
.param('uploadId', 'The ID of the upload record.')
.errorResponse("The ID was invalid, or the offset did not match the "
"server's record.")
)
def requestOffset(self, params):
"""
This should be called when resuming an interrupted upload. It will
report the offset into the upload that should be used to resume.
:param uploadId: The _id of the temp upload record being resumed.
:returns: The offset in bytes that the client should use.
"""
self.requireParams('uploadId', params)
upload = self.model('upload').load(params['uploadId'], exc=True)
offset = self.model('upload').requestOffset(upload)
if isinstance(offset, six.integer_types):
upload['received'] = offset
self.model('upload').save(upload)
return {'offset': offset}
e |
wdv4758h/flake8 | flake8/_pyflakes.py | Python | mit | 4,605 | 0.000217 | # -*- coding: utf-8 -*-
try:
# The 'demandimport' breaks pyflakes and flake8._pyflakes
from mercurial import demandimport
except ImportError:
pass
else:
demandimport.disable()
import os
import pep8
import pyflakes
import pyflakes.checker
def patch_pyflakes():
"""Add error codes to Pyflakes messages."""
codes = dict([line.split()[::-1] for line in (
'F401 UnusedImport',
'F402 ImportShadowedByLoopVar',
'F403 ImportStarUsed',
'F404 LateFutureImport',
'F810 Redefined', # XXX Obsolete?
'F811 RedefinedWhileUnused',
'F812 RedefinedInListComp',
'F821 UndefinedName',
'F822 UndefinedExport',
'F823 UndefinedLocal',
'F831 DuplicateArgument',
'F841 UnusedVariable',
)])
for name, obj in vars(pyflakes.messages).items():
if name[0].isupper() and obj.message:
obj.flake8_msg = '%s %s' % (codes.get(name, 'F999'), obj.message)
patch_pyflakes()
class FlakesChecker(pyflakes.checker.Checker):
"""Subclass the Pyflakes checker to conform with the flake8 API."""
name = 'pyflakes'
version = pyflakes.__version__
def __init__(self, tree, filename):
filename = pep8.normalize_paths(filename)[0]
withDoctest = self.withDoctest
included_by = [include for include in self.include_in_doctest
if include != '' and filename.startswith(include)]
if included_by:
withDoctest = True
for exclude in self.exclude_from_doctest:
if exclude != '' and filename.startswith(exclude):
withDoctest = False
overlaped_by = [include for include in included_by
if include.startswith(exclude)]
if overlaped_by:
withDoctest = True
super(FlakesChecker, self).__init__(tree, filename,
withDoctest=withDoctest)
@classmethod
def add_options(cls, parser):
parser.add_option('--builtins',
help="define more built-ins, comma separated")
parser.add_option('--doctests', default=False, action='store_true',
help="check syntax of the doctests")
parser.add_option('--include-in-doctest', default='',
dest='include_in_doctest',
help='Run doctests only on these files',
type='string')
parser.add_option('--exclude-from-doctest', default='',
dest='exclude_from_doctest',
help='Skip these files when running doctests',
type='string')
parser.config_options.extend(['builtins', 'doctests',
'include-in-doctest',
'exclude-from-doctest'])
@classmethod
def parse_options(cls, options): |
if options.builtins:
cls.builtIns = cls.builtIns.union(options.builtins.split(','))
cls.withDoctest = options.doctests
included_files = []
for included_file in options.include_in_doctest.split(','):
if included_file == '':
| continue
if not included_file.startswith((os.sep, './', '~/')):
included_files.append('./' + included_file)
else:
included_files.append(included_file)
cls.include_in_doctest = pep8.normalize_paths(','.join(included_files))
excluded_files = []
for excluded_file in options.exclude_from_doctest.split(','):
if excluded_file == '':
continue
if not excluded_file.startswith((os.sep, './', '~/')):
excluded_files.append('./' + excluded_file)
else:
excluded_files.append(excluded_file)
cls.exclude_from_doctest = pep8.normalize_paths(
','.join(excluded_files))
inc_exc = set(cls.include_in_doctest).intersection(
set(cls.exclude_from_doctest))
if inc_exc:
raise ValueError('"%s" was specified in both the '
'include-in-doctest and exclude-from-doctest '
'options. You are not allowed to specify it in '
'both for doctesting.' % inc_exc)
def run(self):
for m in self.messages:
col = getattr(m, 'col', 0)
yield m.lineno, col, (m.flake8_msg % m.message_args), m.__class__
|
digitalmacgyver/vedit | examples.py | Python | mit | 16,762 | 0.019687 | #!/usr/bin/env python
import glob
import os
import random
import vedit
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel( logging.DEBUG )
def example01():
'''Clip 2 seconds out of the middle of a video.'''
# Clipping 2 seconds out of source video from 1.5 seconds to 3.5 seconds.
source = vedit.Video( "./examples/testpattern.mp4" )
output_file = "./example_output/example01.mp4"
clip = vedit.Clip( video=source, start=1.5, end=3.5 )
window = vedit.Window( width=source.get_width(),
height=source.get_height(),
output_file=output_file )
window.clips = [ clip ]
window.render()
log.info( "Output file at %s" % ( output_file ) )
return
def example02():
'''Resize an existing video a few different ways.'''
# Turning a 1280x720 16:9 input video into a 640x480 4:3 video.
source = vedit.Video( "./examples/d005.mp4" )
clip = vedit.Clip( video=source )
#Since the input and output aspect ratios don't match, pad the input onto a blue background.
pad_output = "./example_output/example02-pad.mp4"
pad_display = vedit.Display( display_style=vedit.PAD, pad_bgcolor="Blue" )
window = vedit.Window( width=640, height=480,
display=pad_display,
output_file=pad_output )
window.clips = [ clip ]
window.render()
log.info( "Pad output file at: %s" % ( pad_output ) )
# Render a cropped version as well. Note the watermark is getting cropped out on the right.
crop_output = "./example_output/example02-crop.mp4"
crop_display = vedit.Display( display_style=vedit.CROP )
window = vedit.Window( width=640, height=480,
display=crop_display,
output_file=crop_output )
window.clips = [ clip ]
window.render()
log.info( "Crop output file at: %s" % ( crop_output ) )
# Render a version where we pan over the input image as it plays as well. Note the watermark moves from left to right.
pan_output = "./example_output/example02-pan.mp4"
pan_display = vedit.Display( display_style=vedit.PAN )
window = vedit.Window( width=640, height=480,
display=pan_display,
output_file=pan_output )
window.clips = [ clip ]
window.render()
log.info( "Pan output file at: %s" % ( pan_output ) )
return
def example03():
'''Put two videos next to each other.'''
# Lets set up some source videos, and some clips for use below.
video_1 = vedit.Video( "./examples/i030.mp4" )
# Put two clips from video 1 side by side, with audio from the
# left clip only, ending after 8 seconds (we could also use clips
# from different videos).
clip_1_0_5 = vedit.Clip( video=video_1, start=0, end=5 )
clip_1_10_20 = vedit.Clip( video=video_1, start=10, end=20,
display=vedit.Display( include_audio=False ) )
# Set up two windows, one for each clip, and one to hold the other two, and set the duration.
#
# Since clip 1 is 5 seconds long and we are making an 8 second
# video, there will be time when clip 1 is not playing - set the
# background color to green during this time.
output_file = "./example_output/example03.mp4"
base_window = vedit.Window( width=1280*2, height=720, duration=8, bgcolor='Green',
output_file=output_file )
# Set the x, y coordinates of this window inside its parent, as
# measure from the top right.
#
# Here we are putting the videos flush side by side, but they
# could be on top of each other, overlapping, centered in a much
# larger base_window, etc., etc..
clip_1_window = vedit.Window( width=1280, height=720, x=0, y=0, clips=[ clip_1_0_5 ] )
clip_2_window = vedit.Window( width=1280, height=720, x=1280, y=0, clips=[ clip_1_10_20 ] )
base_window.windows = [ clip_1_window, clip_2_window ]
base_window.render()
log.info( "Side by side output is at: %s" % ( output_file ) )
return
def example04():
'''Replace the audio track of a video.'''
source = vedit.Video( "./examples/i010.mp4" )
output_file = "./example_output/example04.mp4"
# Get a clip, but override any Window settings for its audio.
clip = vedit.Clip( video=source, display=vedit.Display( include_audio=False ) )
# Give this window it's own audio track, and set the duration to
# 10 seconds (otherwise it will go on as long as the | audio track).
#
# Note - if the window audio track is longer than the video
# content, it fades out starting 5 seconds from the end.
window = vedit.Window( audio_file="./examples/a2.mp4", duration=10,
output_file=output_file )
window.clips = [ clip ]
window.render()
log.info( "Replaced audio in output: %s" % ( output_file ) )
# Let's make a version where we attribute the audio with some text.
song_attribut | ion = '''This video features the song:
Chuckie Vs Hardwell Vs Sandro Silva Vs Cedric & Quintino
EPIC CLARITY JUMP- (NC MASHUP) LIVE
By: NICOLE CHEN
Available under under a Creative Commons License:
http://creativecommons.org/licenses/by/3.0/ license'''
output_file = "./example_output/example04-attributed.mp4"
window = vedit.Window( audio_file="./examples/a2.mp4",
audio_desc=song_attribution,
duration=10,
output_file=output_file )
window.clips = [ clip ]
window.render()
log.info( "Replaced audio in output: %s" % ( output_file ) )
return
def example05():
'''Ovarlay videos on top of other videos.'''
# Let's overlay two smaller windows on top of a base video.
base_video = vedit.Video( "./examples/i030.mp4" )
base_clip = vedit.Clip( video=base_video )
output_file = "./example_output/example05.mp4"
# Use the default width, height, and display parameters:
# 1280x1024, which happens to be the size of this input.
base_window = vedit.Window( clips = [ base_clip ],
output_file=output_file )
# We'll create two smaller windows, each 1/3 the size of the
# base_window, and position them towards the top left, and bottom
# right of the base window.
overlay_window1 = vedit.Window( width=base_window.width/3, height=base_window.height/3,
x=base_window.width/12, y=base_window.height/12 )
overlay_window2 = vedit.Window( width=base_window.width/3, height=base_window.height/3,
x=7*base_window.width/12, y=7*base_window.height/12 )
# Now let's put some clips in each of the overlay windows.
window_1_clips = [
vedit.Clip( video=vedit.Video( "./examples/d006.mp4" ) ),
vedit.Clip( video=vedit.Video( "./examples/d007.mp4" ) ),
]
window_2_clips = [
vedit.Clip( video=vedit.Video( "./examples/p006.mp4" ) ),
vedit.Clip( video=vedit.Video( "./examples/p007.mp4" ) ),
vedit.Clip( video=vedit.Video( "./examples/p008.mp4" ) ),
]
# Now let's embed the clips in the windows, and the overlay
# windows in our base_window and render.
overlay_window1.clips = window_1_clips
overlay_window2.clips = window_2_clips
base_window.windows = [ overlay_window1, overlay_window2 ]
base_window.render()
log.info( "Made multi-video composition at: %s" % ( output_file ) )
# Well - the last video looks OK, but it sounds terrible - the
# audio from all the videos are being mixed together.
#
# Let's try again but exclude audio from everything but the base
# video.
output_file = "./example_output/example05-single-audio.mp4"
no_audio_display_config = vedit.Display( include_audio=False )
no_audio_overlay_window1 = vedit.Window( width=base_window.width/3, height=base_window.height/3,
x=base_window.width/12, y=base_window.height/12,
display=no_audio_d |
sde1000/quicktill | quicktill/modifiers.py | Python | gpl-3.0 | 4,594 | 0.004354 | from . import ui, td, user, linekeys, keyboard
from .models import KeyboardBinding
from decimal import Decimal
import inspect, itertools
import logging
log = logging.getLogger(__name__)
class Incompatible(Exception):
def __init__(self, msg=None):
self.msg = msg
# Dictionary of all registered modifiers, allowing modifier instances
# to be looked up using the modifier name
all = {}
class BaseModifier:
"""The base modifier. Not compatible with anything."""
def __init__(self, name):
global all
self.name = name
all[name] = self
def mod_stockline(self, stockline, transline):
raise Incompatible("The '{}' modifier can't be used with stocklines."
.format(self.name))
def mod_plu(self, plu, transline):
raise Incompatible("The '{}' modifier can't be used with price lookups."
.format(self.name))
@property
def description(self):
return inspect.cleandoc(self.__doc__)
class BadModifier(BaseModifier):
"""This modifier exists in the database, but is not defined in the
configuration file. It can't be used with any stock line or price
lookup. You should either declare it in the configuration file,
or delete its keyboard bindings.
If you modify the configuration file, you must restart the till
software to pick up the changes.
"""
pass
class RegisterSimpleModifier(type):
"""Metaclass that automatically instantiates modifiers using their
class name.
"""
def __init__(cls, name, bases, attrs):
if name != "SimpleModifier":
cls(name=name)
class SimpleModifier(BaseModifier, metaclass=RegisterSimpleModifier):
"""Modifiers created as a subclass of this register themselves
automatically using the class name. They shouldn't have their own
__init__ methods. Their methods can access the modifier name as
self.name.
"""
pass
class modify(user.permission_checked, ui.listpopup):
permission_required = ('alter-modifier',
'Alter the key bindings for a modifier')
def __init__(self, name):
# If the modifier does not exist, create it so that its
# keyboard bindings can be deleted.
global all
if name not in all:
BadModifier(name=name)
mod = all[name]
self.name = name
bindings = td.s.query(KeyboardBinding)\
.filter(KeyboardBinding.stockline==None)\
.filter(KeyboardBinding.plu==None)\
.filter(KeyboardBinding.modifier==name)\
.all()
f = ui.tableformatter(' l c ')
kbl = linekeys.keyboard_bindings_table(bindings, f)
hl = [(ui.lrline(x), ui.emptyline())
for x in mod.description.split('\n\n')]
hl = list(itertools.chain.from_iterable(hl))
hl = hl \
+ [ui.line("To add a binding, press a line key."),
ui.line("To delete a binding, highlight it and press Cancel."),
ui.emptyline(),
f("Line key", "Menu key")]
super().__init__(kbl, header=hl, title=f"{name} modifier", w=58)
def keypress(self, k):
if hasattr(k, 'line'):
self.dismiss()
linekeys.addbinding(self, k, func=lambda: modify(self.name))
elif k == keyboard.K_CANCEL:
self.deletebinding()
else:
| super().keypress(k)
def deletebinding(se | lf):
log.debug("modifier deletebinding: cursor is %s", self.s.cursor)
if self.s.cursor is None:
return
line = self.s.dl.pop(self.s.cursor)
self.s.redraw()
td.s.add(line.userdata)
td.s.delete(line.userdata)
td.s.flush()
def defined_modifiers():
"""Return a list of all modifiers."""
return sorted(all.keys())
class modifiermenu(ui.menu):
def __init__(self):
super().__init__(
[(x, modify, (x,)) for x in defined_modifiers()],
blurb="Choose a modifier to alter from the list below, "
"or press a line key that is already bound to the "
"modifier.",
title="Modifiers")
def keypress(self, k):
if hasattr(k, 'line'):
linekeys.linemenu(k, self.mod_selected, allow_stocklines=False,
allow_plus=False, allow_mods=True)
else:
super().keypress(k)
def mod_selected(self, kb):
self.dismiss()
td.s.add(kb)
modify(kb.modifier)
|
mediachain/mediachain-client | multihash/__init__.py | Python | mit | 121 | 0 | from mult | ihash import SHA1, SHA2_256, SHA2_512, SHA3, BLAKE2B, BLAKE2S, \
decode, encode, i | s_valid_code, is_app_code
|
yajiedesign/mxnet | tests/python/unittest/test_operator.py | Python | apache-2.0 | 407,016 | 0.007034 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
| #
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import | print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import xfail_when_nonstandard_decimal_separator, with_environment
import pytest
import os
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym._bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym._bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out._bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i |
grevutiu-gabriel/sympy | sympy/assumptions/handlers/common.py | Python | bsd-3-clause | 3,168 | 0.000316 | from sympy.core.logic import _fuzzy_group
from sympy.logic.boolalg import conjuncts
from sympy.assumptions import Q, ask
class AskHandler(object):
"""Base class that all Ask Handlers must inherit"""
pass
class CommonHandler(AskHandler):
"""Defines some useful methods common to most Handlers """
@staticmethod
def AlwaysTrue(expr, assumptions):
return True
@staticmethod
def AlwaysFalse(expr, assumptions):
return False
NaN = AlwaysFalse
class AskCommutativeHandler(CommonHandler):
"""
Handler for key 'commutative'
"""
@staticmethod
def Symbol(expr, assumptions):
"""Objects are expected to be commutati | ve unless otherwise stated"""
assumps = conjuncts(assumptions)
if expr.is_commutative is not None:
return expr.is_commutative and not ~Q.commutative(expr) in assumps
if Q.commutative(expr) in assumps:
return True
elif ~Q.commutative(expr) in assumps:
return False
return True
@staticmethod
def Basic(expr, assumptions):
for arg in expr.args:
if not ask(Q.commuta | tive(arg), assumptions):
return False
return True
Number, NaN = [staticmethod(CommonHandler.AlwaysTrue)]*2
class TautologicalHandler(AskHandler):
"""Wrapper allowing to query the truth value of a boolean expression."""
@staticmethod
def bool(expr, assumptions):
return expr
BooleanTrue = staticmethod(CommonHandler.AlwaysTrue)
BooleanFalse = staticmethod(CommonHandler.AlwaysFalse)
@staticmethod
def AppliedPredicate(expr, assumptions):
return ask(expr, assumptions)
@staticmethod
def Not(expr, assumptions):
value = ask(expr.args[0], assumptions=assumptions)
if value in (True, False):
return not value
else:
return None
@staticmethod
def Or(expr, assumptions):
result = False
for arg in expr.args:
p = ask(arg, assumptions=assumptions)
if p is True:
return True
if p is None:
result = None
return result
@staticmethod
def And(expr, assumptions):
result = True
for arg in expr.args:
p = ask(arg, assumptions=assumptions)
if p is False:
return False
if p is None:
result = None
return result
@staticmethod
def Implies(expr, assumptions):
p, q = expr.args
return ask(~p | q, assumptions=assumptions)
@staticmethod
def Equivalent(expr, assumptions):
p, q = expr.args
pt = ask(p, assumptions=assumptions)
if pt is None:
return None
qt = ask(q, assumptions=assumptions)
if qt is None:
return None
return pt == qt
#### Helper methods
def test_closed_group(expr, assumptions, key):
"""
Test for membership in a group with respect
to the current operation
"""
return _fuzzy_group(
(ask(key(a), assumptions) for a in expr.args), quick_exit=True)
|
glaudsonml/kurgan-ai | tools/sqlmap/plugins/dbms/maxdb/__init__.py | Python | apache-2.0 | 1,033 | 0.001936 | #!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import MAXDB_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.maxdb.enumeration import Enumeration
from plugins.dbms.maxdb.filesystem import Filesystem
from plugins.dbms.maxdb.fingerprint import Fingerprint
from plugins.dbms.maxdb.syntax import Syntax
from plugins.dbms.maxdb.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class MaxDBMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines SAP MaxDB methods
"""
def __init__(self):
self.excludeDbsList | = MAXDB_SYSTEM_DBS
Syntax.__init__(self)
Fingerprint.__init__(self)
Enumeration.__init__(self)
Filesystem.__init__(self)
Miscellaneous.__init__(self)
Takeover.__init__(self)
unescaper[DBMS.M | AXDB] = Syntax.escape
|
drzunny/hydrus | hydrus/__init__.py | Python | bsd-2-clause | 131 | 0 | # -*- coding:utf8 -*-
from . import server
__VERSION__ = server.__VERS | ION__
__author__ = 'drz' |
__email__ = 'drzunny@hotmail.com'
|
nagyistoce/devide | modules/vtk_basic/vtkImageAnisotropicDiffusion2D.py | Python | bsd-3-clause | 517 | 0.001934 | # class generated by DeVIDE::createDeVIDEModuleFromVTKObject
f | rom module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageAnisotropicDiffusion2D(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageAnisotropicDiffusion2D(), 'Processing.',
('vtkImageData',), | ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
oemof/examples | oemof_examples/oemof.solph/v0.4.x/emission_constraint/emission_constraint.py | Python | gpl-3.0 | 2,523 | 0 | # -*- coding: utf-8 -*-
"""
General description
-------------------
Example that shows how to add an emission constraint in a model.
Installation requirements
-------------------------
This example requires the version v0.3.x of oemof. Install by:
pip install 'oemof.solph>=0.4,<0.5'
"""
__copyright__ = "oemof developer group"
__license_ | _ = "MIT"
import pandas as pd
from oemof import solph
from oemof.solph import constraints
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
# create energy system
energysystem = solph.EnergySystem(
timeindex=pd.date_range("1/1/2012", periods=3, freq="H")
)
# create gas bus
bgas = solph.Bus(label="gas")
# create electricity bus
bel = solph.Bus(label="electricity")
# adding the buses to the energy system
energysystem.add(bel, bgas)
# create fixed | source object representing biomass plants
energysystem.add(
solph.Source(
label="biomass",
outputs={
bel: solph.Flow(
nominal_value=100,
variable_costs=10,
emission_factor=0.01,
fix=[0.1, 0.2, 0.3],
)
},
)
)
# create source object representing the gas commodity
energysystem.add(
solph.Source(
label="gas-source",
outputs={bgas: solph.Flow(variable_costs=10, emission_factor=0.2)},
)
)
energysystem.add(
solph.Sink(
label="demand",
inputs={
bel: solph.Flow(
nominal_value=200, variable_costs=10, fix=[0.1, 0.2, 0.3]
)
},
)
)
# create simple transformer object representing a gas power plant
energysystem.add(
solph.Transformer(
label="pp_gas",
inputs={bgas: solph.Flow()},
outputs={bel: solph.Flow(nominal_value=200)},
conversion_factors={bel: 0.58},
)
)
# initialise the operational model
model = solph.Model(energysystem)
# add the emission constraint
constraints.emission_limit(model, limit=100)
# print out the emission constraint
model.integral_limit_emission_factor_constraint.pprint()
model.integral_limit_emission_factor.pprint()
# solve the model
model.solve()
# print out the amount of emissions from the emission constraint
print(model.integral_limit_emission_factor())
results = solph.processing.results(model)
if plt is not None:
data = solph.views.node(results, "electricity")["sequences"]
ax = data.plot(kind="line", grid=True)
ax.set_xlabel("Time (h)")
ax.set_ylabel("P (MW)")
plt.show()
|
maciekswat/Twedit | Plugins/CC3DMLHelper/celltypedlg.py | Python | gpl-3.0 | 4,555 | 0.023491 | import re
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import PyQt4.QtCore as QtCore
import ui_celltypedlg
import sys
import string
MAC = "qt_mac_set_native_menubar" in dir()
class CellTypeDlg(QDialog,ui_celltypedlg.Ui_CellTypeDlg):
#signals
# gotolineSignal = QtCore.pyqtSignal( ('int',))
def __init__(self,_currentEditor=None,parent=None):
super(CellTypeDlg, self).__init__(parent)
self.editorWindow=parent
self.setupUi(self)
if not MAC:
self.cancelPB.setFocusPolicy(Qt.NoFocus)
self.updateUi()
def keyPressEvent(self, event):
cellType=str(self.cellTypeLE.text())
cellType=string.rstrip(cellType)
if event.key()==Qt.Key_Return :
if cellType!="":
self.on_cellTypeAddPB_clicked()
event.accept()
@pyqtSignature("") # signature of the signal emited by the button
def on_cellTypeAddPB_clicked(self):
cellType=str(self.cellTypeLE.text())
cellType=string.rstrip(cellType)
rows=self.cellTypeTable.rowCount()
if cellType =="":
return
# check if cell type with this name already exist
cellTypeAlreadyExists=False
for rowId in range(rows):
name=str(self.cellTypeTable.item(rowId,0).text())
name=string.rstrip(name)
print "CHECKING name=",name+"1"," type=",cellType+"1"
print "name==cellType ",name==cellType
if name==cellType:
cellTypeAlreadyExists=True
break
print "cellTypeAlreadyExists=",cellTypeAlreadyExists
if cellTypeAlreadyExists:
print "WARNING"
QMessageBox.warning(self,"Cell type name already exists","Cell type name already exist. Please choose different name",QMessageBox.Ok)
return
self.cellTypeTable.insertRow(rows)
cellTypeItem=QTableWidgetItem(cellType)
self.cellTypeTable.setItem (rows,0, cellTypeItem)
cellTypeFreezeItem=QTableWidgetItem()
cellTypeFreezeItem.data(Qt.CheckStateRole)
if self.freezeCHB.isChecked():
cellTypeFreezeItem.setCheckState(Qt.Checked)
else:
cellTypeFreezeItem.setCheckState(Qt.Unchecked)
self.cellTypeTable.setItem (rows,1, cellTypeFreezeItem)
# reset cell type entry line
self.cellTypeLE.setText("")
return
@pyqtSignature("") # signature of the signal emited by the button
def on_clearCellTypeTablePB_clicked(self):
rows=self.cellTypeTable.rowCount()
for i in range (rows-1,-1,-1):
self.cellTypeTable.removeRow(i)
#insert Medium
self.cellTypeTable.insertRow(0)
mediumItem=QTableWidgetItem("Medium")
self.cellTypeTable.setItem (0,0, mediumItem)
mediumFreezeItem=QTableWidgetItem()
mediumFreezeItem.data(Qt.CheckStateRole)
mediumFreezeItem.setCheckState(Qt.Unchecked)
self.cellTypeTable.setItem (0,1, mediumFreezeItem)
def extractInformation(self):
cellTypeDict={}
for row in range(self.cellTypeTable.rowCount()):
type=str(self.cellTypeTable.item(row,0).text())
freeze=False
if self.cellTypeTable.item(row,1).checkState()==Qt.Checked:
print "self.cellTypeTable.item(row,1).checkState()=",self.cellTypeTable.item(row,1).checkState()
freeze=True
cellTypeDict[row]=[type,freeze]
|
return cellTypeDict
def updateUi(self):
self.cellTypeTable.insertRow(0)
mediumItem=QTableWidgetItem("Medium")
self.cellTypeTable.setIte | m (0,0, mediumItem)
mediumFreezeItem=QTableWidgetItem()
mediumFreezeItem.data(Qt.CheckStateRole)
mediumFreezeItem.setCheckState(Qt.Unchecked)
self.cellTypeTable.setItem (0,1, mediumFreezeItem)
baseSize=self.cellTypeTable.baseSize()
self.cellTypeTable.setColumnWidth (0,baseSize.width()/2)
self.cellTypeTable.setColumnWidth (1,baseSize.width()/2)
self.cellTypeTable.horizontalHeader().setStretchLastSection(True)
|
BenSimonds/DHTSite | app/dhtapp/config.py | Python | mit | 166 | 0.006024 | #!/usr/bin/env python
import os
# | My Sonos
SONOS_IP = '192.168.0.34'
# API stuff,.
DHT_API_URL = os.environ['DHT_API_URL']
DB_BASE_TIMECODE | = '%Y-%m-%d %H:%M:%S.%f' |
JulienMcJay/eclock | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/Demos/RegRestoreKey.py | Python | gpl-2.0 | 1,832 | 0.019651 | import win32api, win32security
import win32con, ntsecuritycon, winnt
import os
temp_dir=win32api.GetTempPath()
fname=win32api.GetTempFileName(temp_dir,'rsk')[0]
print fname
## file can't exist
os.remove(fname)
## enable backup and restore privs
required_privs = ((win32security.LookupPrivilegeValue('',ntsecuritycon.SE_BACKUP_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_RESTORE_NAME),win32con.SE_PRIVILEGE_ENABLED)
)
ph = win32api.GetCurrentProcess()
th = win32security.OpenProcessToken(ph, win32con.TOKEN_READ|win32con.TOKEN_ADJUST_PRIVILEGES)
adjusted_privs=win32security.AdjustTokenPrivileges(th,0,required_privs)
try:
sa=win32security.SECURITY_ATTRIBUTES()
my_sid = win32security.GetTokenInformati | on(th,ntsecuritycon.TokenUser)[0]
sa.SEC | URITY_DESCRIPTOR.SetSecurityDescriptorOwner(my_sid,0)
k, disp=win32api.RegCreateKeyEx(win32con.HKEY_CURRENT_USER, 'Python test key', SecurityAttributes=sa,
samDesired=win32con.KEY_ALL_ACCESS, Class='some class', Options=0)
win32api.RegSetValue(k, None, win32con.REG_SZ, 'Default value for python test key')
subk, disp=win32api.RegCreateKeyEx(k, 'python test subkey', SecurityAttributes=sa,
samDesired=win32con.KEY_ALL_ACCESS, Class='some other class', Options=0)
win32api.RegSetValue(subk, None, win32con.REG_SZ, 'Default value for subkey')
win32api.RegSaveKeyEx(k, fname, Flags=winnt.REG_STANDARD_FORMAT, SecurityAttributes=sa)
restored_key, disp=win32api.RegCreateKeyEx(win32con.HKEY_CURRENT_USER, 'Python test key(restored)', SecurityAttributes=sa,
samDesired=win32con.KEY_ALL_ACCESS, Class='restored class', Options=0)
win32api.RegRestoreKey(restored_key, fname)
finally:
win32security.AdjustTokenPrivileges(th, 0, adjusted_privs) |
jastarex/DeepLearningCourseCodes | 01_TF_basics_and_linear_regression/tensorflow_basic.py | Python | apache-2.0 | 8,932 | 0.004368 |
# coding: utf-8
# # TensorFlow基础
# In this tutorial, we are going to learn some basics in TensorFlow.
# ## Session
# Session is a class for running TensorFlow operations. A Session object encapsulates the environment in which Operation objects are executed, and Tensor objects are evaluated. In this tutorial, we will use a session to print out the value of tensor. Session can be used as follows:
# In[1]:
import tensorflow as tf
a = tf.constant(100)
with tf.Session() as sess:
print sess.run(a)
#syntactic sugar
print a.eval()
# or
sess = tf.Session()
print sess.run(a)
# print a.eval() # this will print out an error
# ## Interactive session
# Interactive session is a TensorFlow session for use in interactive contexts, such as a shell. The only difference with a regular Session is that an Interactive session installs itself as the default session on construction. The methods [Tensor.eval()](https://www.tensorflow.org/versions/r0.11/api_docs/python/framework.html#Tensor) and [Operation.run()](https://www.tensorflow.org/versions/r0.11/api_docs/python/framework.html#Operation) will use that session to run ops.This is convenient in interactive shells and IPython notebooks, as it avoids having to pass an explicit Session object to run ops.
# In[2]:
sess = tf.InteractiveSession()
print a.eval() # simple usage
# ## Constants
# We can use the `help` function to get an annotation about any function. Just type `help(tf.consant)` on the below cell and run it.
# It will print out `constant(value, dtype=None, shape=None, name='Const')` at the top. Value of tensor constant can be scalar, matrix or tensor (more than 2-dimensional matrix). Also, you can get a shape of tensor by running [tensor.get_shape()](https://www.tensorflow.org/versions/r0.11/api_docs/python/framework.html#Tensor)`.as_list()`.
#
# * tensor.get_shape()
# * tensor.get_shape().as_list()
# In[3]:
a = tf.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.float32, name='a')
print a.eval()
print "shape: ", a.get_shape(), ",type: ", type(a.get_shape())
print "shape: ", a.get_shape().as_list(), ",type: ", type(a.get_shape().as_list()) # this is more useful
# ## Basic functions
# There are some basic functions we need to know. Those functions will be used in next tutorial **3. feed_forward_neural_network**.
# * tf.argmax
# * tf.reduce_sum
# * tf.equal
# * tf.random_normal
# #### tf.argmax
# `tf.argmax(input, dimension, name=None)` returns the index with the largest value across dimensions of a tensor.
#
# In[4]:
a = tf.constant([[1, 6, 5], [2, 3, 4]])
print a.eval()
print "argmax over axis 0"
print tf.argmax(a, 0).eval()
print "argmax over axis 1"
print tf.argmax(a, 1).eval()
# #### tf.reduce_sum
# `tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None)` computes the sum of elements across dimensions of a tensor. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in reduction_indices. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned
# In[5]:
a = tf.constant([[1, 1, 1], [2, 2, 2]])
print a.eval()
print "reduce_sum over entire matrix"
print tf.reduce_sum(a).eval()
print "reduce_sum over axis 0"
print tf.reduce_sum(a, 0).eval()
print "reduce_sum over axis 0 + keep dimensions"
print tf.reduce_sum(a, 0, keep_dims=True).eval()
print "reduce_sum over axis 1"
print tf.reduce_sum(a, 1).eval()
print "reduce_sum over axis 1 + keep dimensions"
print tf.reduce_sum(a, 1, keep_dims=True).eval()
# #### tf.equal
# `tf.equal(x, y, name=None)` returns the truth value of `(x == y)` element-wise. Note that `tf.equal` supports broadcasting. For more about broadcasting, please see [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
# In[6]:
a = tf.constant([[1, 0, 0], [0, 1, 1]])
print a.eval()
print "Equal to 1?"
print tf.equal(a, 1).eval()
print "Not equal to 1?"
print tf.not_equal(a, 1).eval()
# #### tf.random_normal
# `tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)` outputs random values from a normal distribution.
#
# In[7]:
normal = tf.random_normal([3], stddev=0.1)
print normal.eval()
# ## Variables
# When we train a model, we use variables to hold and update parameters. Variables are in-memory buffers containing tensors. They must be explicitly initialized and can be saved to disk during and after training. we can later restore saved values to exercise or analyze the model.
#
# * tf.Variable
# * tf.Tensor.name
# * tf.all_variables
#
# #### tf.Variable
# `tf.Variable(initial_value=None, trainable=True, name=None, variable_def=None, dtype=None)` creates a new variable with value `initial_value`.
# The new variable is added to the graph collections listed in collections, which defaults to `[GraphKeys.VARIABLES]`. If `trainable` is true, the variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
# In[8]:
# variable will be initialized with normal distribution
var = tf.Variable(tf.random_normal([3], stddev=0.1), name='var')
print var.name
tf.initialize_all_variables().run()
print var.eval()
# #### tf.Tensor.name
# We can call `tf.Variable` and give the same name `my_var` more than once as seen below. Note that `var3.name` prints out `my_var_1:0` instead of `my_var:0`. This is because TensorFlow doesn't allow user to create variables with the same name. In this case, TensorFlow adds `'_1'` to the origi | nal name instead of printing out an error message. Note that you should be careful not to call `tf.Variable` giving same name more than once, because it will cause a fatal problem when you save and restore the variables.
# In[9]:
var2 = tf.Variable(tf.random_normal([2, 3], stddev=0.1), name='my_var')
var3 = tf.Variable(tf.random_norm | al([2, 3], stddev=0.1), name='my_var')
print var2.name
print var3.name
# #### tf.all_variables
# Using `tf.all_variables()`, we can get the names of all existing variables as follows:
# In[10]:
for var in tf.all_variables():
print var.name
# ## Sharing variables
# TensorFlow provides several classes and operations that you can use to create variables contingent on certain conditions.
# * tf.get_variable
# * tf.variable_scope
# * reuse_variables
# #### tf.get_variable
# `tf.get_variable(name, shape=None, dtype=None, initializer=None, trainable=True)` is used to get or create a variable instead of a direct call to `tf.Variable`. It uses an initializer instead of passing the value directly, as in `tf.Variable`. An initializer is a function that takes the shape and provides a tensor with that shape. Here are some initializers available in TensorFlow:
#
# * `tf.constant_initializer(value)` initializes everything to the provided value,
# * `tf.random_uniform_initializer(a, b)` initializes uniformly from [a, b],
# * `tf.random_normal_initializer(mean, stddev)` initializes from the normal distribution with the given mean and standard deviation.
# In[11]:
my_initializer = tf.random_normal_initializer(mean=0, stddev=0.1)
v = tf.get_variable('v', shape=[2, 3], initializer=my_initializer)
tf.initialize_all_variables().run()
print v.eval()
# #### tf.variable_scope
# `tf.variable_scope(scope_name)` manages namespaces for names passed to `tf.get_variable`.
# In[12]:
with tf.variable_scope('layer1'):
w = tf.get_variable('v', shape=[2, 3], initializer=my_initializer)
print w.name
with tf.variable_scope('layer2'):
w = tf.get_variable('v', shape=[2, 3], initializer=my_initializer)
print w.name
# #### reuse_variables
# Note that you should run the cell above only once. If you run the code above more than once, an error message will be printed out: `"ValueError: Variable layer1/v already exists, disallowed."`. This is because we used `tf.get_variable` above, and this function doesn't allow creating variables with the existing names. We can solve this problem by using `scope.reuse_variables()` to get preivously created variables instead of creating new ones.
# In[13]:
with tf.variable_scope('layer1', reuse=True):
|
wxue/xiakelite | libs/flaskext/wtf/recaptcha/widgets.py | Python | mit | 2,664 | 0.003754 | """
Custom widgets
"""
try:
import json
except ImportError:
import simplejson as json
from flask import current_app
from werkzeug import url_encode
# use flaskext.babel for translations, if available
try:
from flaskext.babel import gettext as _
except ImportError:
_ = lambda(s) : s
RECAPTCHA_API_SERVER = 'http://api.recaptcha.net/'
RECAPTCHA_SSL_API_SERVER = 'https://api-secure.recaptcha.net/'
RECAPTCHA_HTML = u'''
<script type="text/javascript">var RecaptchaOptions = %(options)s;</script>
<script type="text/javascript" src="%(script_url)s"></script>
<noscript>
<div><iframe src="%(frame_url)s" height="300" width="500"></iframe></div>
<div><textarea name="recaptcha_challenge_field" rows="3" cols | ="40"></textarea>
<input type="hidden" name="recaptcha_response_field" value="manual_challenge"></div>
</noscript>
'''
__all__ = ["RecaptchaWidget"]
class RecaptchaWidget(object):
def recaptcha_html(self, server, query, options):
return RECAPTCHA_HTML % dict(
script_url='%schallenge?%s' % (server, query),
f | rame_url='%snoscript?%s' % (server, query),
options=json.dumps(options)
)
def __call__(self, field, error=None, **kwargs):
"""Returns the recaptcha input HTML."""
if current_app.config.get('RECAPTCHA_USE_SSL', False):
server = RECAPTCHA_SSL_API_SERVER
else:
server = RECAPTCHA_API_SERVER
try:
public_key = current_app.config['RECAPTCHA_PUBLIC_KEY']
except KeyError:
raise RuntimeError, "RECAPTCHA_PUBLIC_KEY config not set"
query_options = dict(k=public_key)
if field.recaptcha_error is not None:
query_options['error'] = unicode(field.recaptcha_error)
query = url_encode(query_options)
options = {
'theme': 'clean',
'custom_translations': {
'visual_challenge': _('Get a visual challenge'),
'audio_challenge': _('Get an audio challenge'),
'refresh_btn': _('Get a new challenge'),
'instructions_visual': _('Type the two words:'),
'instructions_audio': _('Type what you hear:'),
'help_btn': _('Help'),
'play_again': _('Play sound again'),
'cant_hear_this': _('Download sound as MP3'),
'incorrect_try_again': _('Incorrect. Try again.'),
}
}
options.update(current_app.config.get('RECAPTCHA_OPTIONS', {}))
return self.recaptcha_html(server, query, options)
|
ptrgags/turtle-fractals | turtlelsystem/tests/test_TurtleSVGMachine.py | Python | gpl-2.0 | 665 | 0.018045 | from turtlelsystem.TurtleSVGMachine import TurtleSVGMachine
from nose.tools import assert_almost_equal
def test_forward():
turtle = TurtleSVGMachine(width = 20, height = 20)
turtle.do_command("FORWARD 10")
assert_almost_equal(turtle.x, 20.0)
def test_backward():
turtle = TurtleSVGMachine(width = 20 | , height = 20)
turtle.do_command("BACKWARD 10")
assert_almost_equal(turtle.x, 0.0)
def test_left():
turtle = TurtleSVGMachine()
turtle.do_command("LEFT 30")
assert_almost_equal(turtle.theta, 30.0)
def test_right():
turtle = TurtleSVGMachine()
turtl | e.do_command("RIGHT 30")
assert_almost_equal(turtle.theta, 330.0)
|
houshengbo/nova_vmware_compute_driver | nova/openstack/common/lockutils.py | Python | apache-2.0 | 8,446 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory to use for lock files')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for lock | ing(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errn | o.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
'for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the |
mixman/djangodev | tests/regressiontests/model_inheritance_regress/tests.py | Python | bsd-3-clause | 15,371 | 0.001041 | """
Regression tests for Model inheritance behaviour.
"""
from __future__ import absolute_import
import datetime
from operator import attrgetter
from django.test import TestCase
from .models import (Place, Restaurant, ItalianRestaurant, ParkingLot,
ParkingLot2, ParkingLot3, Supplier, Wholesaler, Child, SelfRefParent,
SelfRefChild, ArticleWithAuthor, M2MChild, QualityControl, DerivedM,
Person, BirthdayParty, BachelorParty, MessyBachelorParty,
InternalCertificationAudit, BusStation, TrainStation)
class ModelInheritanceTest(TestCase):
def test_model_inheritance(self):
# Regression for #7350, #7202
# Check that when you create a Parent object with a specific reference
# to an existent child instance, saving the Parent doesn't duplicate
# the child. This behaviour is only activated during a raw save - it
# is mostly relevant to deserialization, but any sort of CORBA style
# 'narrow()' API would require a similar approach.
# Create a child-parent-grandparent chain
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
# Create a child-parent chain with an explicit parent link
place2 = Place(name='Main St', address='111 Main St')
place2.save_base(raw=True)
park = ParkingLot(parent=place2, capacity=100)
park.save_base(raw=True)
# Check that no extra parent objects have been created.
places = list(Place.objects.all())
self.assertEqual(places, [place1, place2])
dicts = list(Restaurant.objects.values('name','serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': u"Guido's House of Pasta",
'serves_hot_dogs': True
}])
dicts = list(ItalianRestaurant.objects.values(
'name','serves_hot_dogs','serves_gnocchi'))
self.assertEqual(dicts, [{
'name': u"Guido's House of Pasta",
'serves_gnocchi': True,
'serves_hot_dogs': True,
}])
dicts = list(ParkingLot.objects.values('name','capacity'))
self.assertEqual(dicts, [{
'capacity': 100,
'name': u'Main St',
}])
# You can also update objects when using a raw save.
place1.name = "Guido's All New House of Pasta"
place1.save_base(raw=True)
restaurant.serves_hot_dogs = False
restaurant.save_base(raw=True)
italian_restaurant.serves_gnocchi = False
italian_restaurant.save_base(raw=True)
place2.name='Derelict lot'
place2.save_base(raw=True)
park.capacity = 50
park.save_base(raw=True)
# No extra parent objects after an update, either.
places = list(Place.objects.all())
self.assertEqual(places, [place2, place1])
self.assertEqual(places[0].name, 'Derelict lot')
self.assertEqual(places[1].name, "Guido's All New House of Pasta")
dicts = list(Restaurant.objects.values('name','serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': u"Guido's All New House of Pasta",
'serves_hot_dogs': False,
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': u"Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
dicts = list(ParkingLot.objects.values('name','capacity'))
self.assertEqual(dicts, [{
'capacity': 50,
'name': u'Derelict lot',
}])
# If you try to raw_save a parent attribute onto a child object,
# the attribute will be ignored.
italian_restaurant.name = "Lorenzo's Pasta Hut"
italian_restaurant.save_base(raw=True)
# Note that the name has not changed
# - name is an attribute of Place, not ItalianRestaurant
dicts = list(ItalianRestaurant.objects.values(
'name','serves_hot_dogs','serves_gnocchi'))
self.assertEqual(dicts, [{
'name': u"Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
def test | _issue_7105(self):
# Regression | s tests for #7105: dates() queries should be able to use
# fields from the parent model as easily as the child.
obj = Child.objects.create(
name='child',
created=datetime.datetime(2008, 6, 26, 17, 0, 0))
dates = list(Child.objects.dates('created', 'month'))
self.assertEqual(dates, [datetime.datetime(2008, 6, 1, 0, 0)])
def test_issue_7276(self):
# Regression test for #7276: calling delete() on a model with
# multi-table inheritance should delete the associated rows from any
# ancestor tables, as well as any descendent objects.
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
ident = ItalianRestaurant.objects.all()[0].id
self.assertEqual(Place.objects.get(pk=ident), place1)
xx = Restaurant.objects.create(
name='a',
address='xx',
serves_hot_dogs=True,
serves_pizza=False)
# This should delete both Restuarants, plus the related places, plus
# the ItalianRestaurant.
Restaurant.objects.all().delete()
self.assertRaises(
Place.DoesNotExist,
Place.objects.get,
pk=ident)
self.assertRaises(
ItalianRestaurant.DoesNotExist,
ItalianRestaurant.objects.get,
pk=ident)
def test_issue_6755(self):
"""
Regression test for #6755
"""
r = Restaurant(serves_pizza=False)
r.save()
self.assertEqual(r.id, r.place_ptr_id)
orig_id = r.id
r = Restaurant(place_ptr_id=orig_id, serves_pizza=True)
r.save()
self.assertEqual(r.id, orig_id)
self.assertEqual(r.id, r.place_ptr_id)
def test_issue_7488(self):
# Regression test for #7488. This looks a little crazy, but it's the
# equivalent of what the admin interface has to do for the edit-inline
# case.
suppliers = Supplier.objects.filter(
restaurant=Restaurant(name='xx', address='yy'))
suppliers = list(suppliers)
self.assertEqual(suppliers, [])
def test_issue_11764(self):
"""
Regression test for #11764
"""
wholesalers = list(Wholesaler.objects.all().select_related())
self.assertEqual(wholesalers, [])
def test_issue_7853(self):
"""
Regression test for #7853
If the parent class has a self-referential link, make sure that any
updates to that link via the child update the right table.
"""
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
obj.delete()
def test_get_next_previous_by_date(self):
"""
Regression tests for #8076
get_(next/previous)_by_date should work
"""
c1 = ArticleWithAuthor(
headline='ArticleWithAuthor 1',
author="Person 1",
pub_date=datetime.datetime(2005, 8, 1, 3, 0))
c1.save()
c2 = ArticleWithAuthor(
headline='Artic |
wdv4758h/ZipPy | lib-python/3/cgi.py | Python | bsd-3-clause | 34,511 | 0.001275 | #! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from io import StringIO, BytesIO, TextIOWrapper
import sys
import os
import urllib.parse
from email.parser import FeedParser
from warnings import warn
import html
import locale
import tempfile
__all__ = ["MiniFieldStorage", "FieldStorage",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
encoding = fp.encoding
else:
encoding = 'latin-1'
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
fp = fp.buffer
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
encoding=encoding)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead",
DeprecationWarning, 2)
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead",
DeprecationWarning, 2)
return urllib.parse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations | of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
import http.client
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise V | alueError('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = http.client.parse_headers(fp)
clength |
DBCDK/opensearch | tools/transformDir.py | Python | gpl-3.0 | 3,672 | 0.025606 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- mode: python -*-
# This file is part of opensearch.
# Copyright © 2009, Dansk Bibliotekscenter a/s,
# Tempovej 7-11, DK-2750 Ballerup, Denmark. CVR: 15149043
#
# opensearch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# opensearch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with opensearch. If not, see <http://www.gnu.org/licenses/>.
import libxml2
import libxslt
import logging
import os
import os.path
import string
import datetime
import sys
def transform( xmlFile, destFile, xsltFile ):
"""
Transforming document
"""
print "parsing "+xmlFile
styledoc = libxml2.parseFile( xsltFile )
style = libxslt.parseStylesheetDoc( styledoc )
doc = libxml2.parseFile( xmlFile )
result = style.applyStylesheet( doc, None )
style.saveResultToFilename( destFile, result, 0 )
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
def main( src, dest, xslt, overwrite ):
print ""
print("-"*70)
n | ow = datetime.datetime.now()
print "Starting Transformation ", now.ctime() ;
print("-"*70)
if os.path.isfile( dest ):
os.remove( dest )
if not os.path.isdir( dest ):
os.mkdir( dest )
files_transformed = 0
for root, dirs, files in os.walk(src):
for f in files:
src_file = os.path.abspath( os | .path.join( root, f ) )
dest_file = os.path.join( dest, f )
if os.path.exists( dest_file ) and not overwrite:
print( "cannot write file='%s' already exists"%dest_file )
# log no go
else:
print( "\n--> Starting Transformation on file '%s' to '%s'"%( src_file, dest_file ) )
try:
transform( src_file, dest_file, xslt )
except libxml2.parserError:
print( "Cannot parse file '%s'"%src_file )
if __name__ == '__main__':
from optparse import OptionParser
import sys
parser = OptionParser( usage="%prog [options] -x xslt src dest" )
parser.add_option( "-x", "--xslt",type="string", action="store", dest="xslt",
help="the xslt sheet.")
parser.add_option( "-o", "--overwrite", action="store_true", dest="overwrite", default="False",
help="if destination file or folder exists it will be overwritten if this flag is set.")
(options, args) = parser.parse_args()
if not options.xslt:
sys.exit( "a xslt sheet must be specified." )
if not os.path.isfile( options.xslt ):
sys.exit( "file '%s' does not exist."%options.xslt )
if len( args ) > 2:
print "more than 2 arguments. ignoring the rest"
if len( args ) < 2:
sys.exit( "both sourcec and destination folder must be specified" )
if not os.path.isdir(args[0]):
sys.exit( "'%s' is not a directory."%options.xslt )
if os.path.exists(args[1]) and not options.overwrite:
sys.exit( "folder or file '%s' exists. use --overwrite if you mean it."%args[0] )
main( os.path.abspath( args[0] ), os.path.abspath( args[1] ), os.path.abspath( options.xslt ), options.overwrite )
|
bruecksen/impacts-world | impacts_world/core/models.py | Python | mit | 3,189 | 0.002195 | from django.db import models
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailcore.fields import RichTextField
from wagtail.contrib.settings.models import BaseSetting
from wagtail.contrib.settings.registry import register_setting
from wagtail.wagtailsnippets.models import register_snippet
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, PageChooserPanel, MultiFieldPanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailcore.models import Orderable
@register_setting(icon='list-ul')
class HeaderSettings(ClusterableModel, BaseSetting):
banner_first_intro = RichTextField(null=True, blank=True)
banner_second_intro = RichTextField(null=True, blank=True)
banner_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
show_participate = models.BooleanField(default=False)
participate_target = models.ForeignKey('wagtailcore.Page', null=True, blank=True)
participate_alt_name = models.CharField(max_length=255, null=True, blank=True, verbose_name='Alt. name', help_text='If left empty, the target\'s title will be used.')
participate_name = property(lambda self: self.participate_alt_name or self.participate_target.title)
panels = [
MultiFieldPanel(
[
FieldPanel('ban | ner_first_intro'),
FieldPanel('banner_second_intro'),
ImageChooserPanel('banner_image'),
],
heading="Banner",
),
InlinePanel('header_links', label="Link", classname="collapsed"),
MultiFieldPanel(
[
FieldPanel('show_participate') | ,
FieldPanel('participate_alt_name'),
PageChooserPanel('participate_target'),
],
heading="Participate",
),
]
class HeaderLink(Orderable, models.Model):
header = ParentalKey(HeaderSettings, related_name='header_links')
target = models.ForeignKey('wagtailcore.Page')
_name = models.CharField(max_length=255, null=True, blank=True, verbose_name='Alt. name',
help_text='If left empty, the target\'s title will be used.')
name = property(lambda self: self._name or self.target.title)
panels = [
PageChooserPanel('target'),
FieldPanel('_name'),
]
@register_snippet
class TimelineSnippet(ClusterableModel):
title = models.CharField(max_length=500)
panels = [
FieldPanel('title'),
InlinePanel('timeline_items', label='Timeline item'),
]
def __str__(self):
return self.title
class TimelineItem(Orderable, models.Model):
timeline = ParentalKey(TimelineSnippet, related_name='timeline_items')
title = models.CharField(max_length=500)
date = models.CharField(max_length=500, null=True, blank=True)
is_active = models.BooleanField(default=False, help_text='Currently active item.')
panels = [
FieldPanel('title'),
FieldPanel('date'),
FieldPanel('is_active'),
]
|
nugget/home-assistant | homeassistant/components/zha/switch.py | Python | apache-2.0 | 3,817 | 0 | """
Switches on Zigbee Home Automation networks.
For more details on this platform, please refer to the documentation
at https://home-assistant.io/components/switch.zha/
"""
import logging
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .core.const import (
DATA_ZHA, DATA_ZHA_DISPATCHERS, ZHA_DISCOVERY_NEW, ON_OFF_CHANNEL,
SIGNAL_ATTR_UPDATED
)
from .entity import ZhaEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['zha']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Old way of setting up Zigbee Home Automation switches."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation switch from config entry."""
async def async_discover(discovery_info):
await _async_setup_entities(hass, config_entry, async_add_entities,
[discovery_info])
unsub = async_dispatcher_connect(
hass, ZHA_DISCOVERY_NEW.format(DOMAIN), async_discover)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
switches = hass.data.get(DATA_ZHA, {}).get(DOMAIN)
if switches is not None:
await _async_setup_entities(hass, config_entry, async_add_entities,
switches.values())
del hass.data[DATA_ZHA][DOMAIN]
async def _async_setup_entities(hass, config_entry, async_add_entities,
discovery_infos):
"""Set up the ZHA switches."""
entities = []
for discovery_info in discovery_infos:
entities.append(Switch(**discovery_info))
async_add_entities(entities, update_before_add=True)
class Switch(ZhaEntity, SwitchDevice):
"""ZHA switch."""
_domain = DOMAIN
def __init__(self, **kwargs):
"""Initialize the ZHA switch."""
super().__init__(**kwargs)
self._on_off_channel = self.cluster_channels.get(ON_OFF_CHANNEL)
@property
def is_on(self) -> bool:
"""Return if the switch is on based on the statemachine."""
if self._state is None:
return False
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
success = await self._on_off_channel.on()
if not success:
return |
self._state = True
self.asy | nc_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
success = await self._on_off_channel.off()
if not success:
return
self._state = False
self.async_schedule_update_ha_state()
def async_set_state(self, state):
"""Handle state update from channel."""
self._state = bool(state)
self.async_schedule_update_ha_state()
@property
def device_state_attributes(self):
"""Return state attributes."""
return self.state_attributes
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
await self.async_accept_signal(
self._on_off_channel, SIGNAL_ATTR_UPDATED, self.async_set_state)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._state = last_state.state == STATE_ON
async def async_update(self):
"""Attempt to retrieve on off state from the switch."""
await super().async_update()
if self._on_off_channel:
self._state = await self._on_off_channel.get_attribute_value(
'on_off')
|
kivy/pyjnius | tests/test_bad_declaration.py | Python | mit | 2,212 | 0.001808 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from jnius import JavaException, JavaClass
from jnius.reflect import autoclass
class BadDeclarationTest(unittest.TestCase):
def test_class_not_found(self):
#self.assertRaises(JavaException, autoclass, 'org.unknow.class')
#self.assertRaises(JavaException, autoclass, 'java/lang/String')
pass
def test_invalid_attribute(self):
Stack = autoclass('java.util.Stack')
self.assertRaises(AttributeError, getattr, Stack, 'helloworld')
def test_invalid_static_call(self):
Stack = autoclass('java.util.Stack')
self.assertRaises(JavaException, Stack.push, 'hello')
def test_with_too_much_arguments(self):
Stack = autoclass('java.util.Stack')
stack = Stack()
self.assertRaises(JavaException, stack.push, 'hello', 'world', 123)
def test_java_exception_handling(self):
Stack = autoclass('java.util.Stack')
stack = Stack()
try:
stack.pop()
self.fail("Expected exception to be thrown")
except JavaE | xception as je:
# print "Got JavaException: " + str(je)
# print "Got Exception Class: " + je.classname
# print "Got stacktrace: \n" + '\n'.join(je.stacktrace)
self.assertEqual("java.util.EmptyStackException", je.classname)
def test_java_exception_chaining(self):
BasicsTest = autoclass('org.jnius.BasicsTest')
basics = BasicsTest()
try:
basics.methodExceptionChained()
self. | fail("Expected exception to be thrown")
except JavaException as je:
# print "Got JavaException: " + str(je)
# print "Got Exception Class: " + je.classname
# print "Got Exception Message: " + je.innermessage
# print "Got stacktrace: \n" + '\n'.join(je.stacktrace)
self.assertEqual("java.lang.IllegalArgumentException", je.classname)
self.assertEqual("helloworld2", je.innermessage)
self.assertIn("Caused by:", je.stacktrace)
self.assertEqual(11, len(je.stacktrace))
|
avmarchenko/exatomic | docs/source/conf.py | Python | apache-2.0 | 13,060 | 0.00513 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#
# exatomic documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 23 21:37:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
sys.path.insert(0, os.path.abspath(os.sep.join(("..", ".."))))
from exatomic._version import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath("."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
'IPython.sphinxext.ipython_console_highlighting', # see https://github.com/spatialaudio/nbsphinx/issues/24
"nbsphinx" # ipynb autodocs
]
# sphinx.ext.napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = True
napoleon_use_admonition_for_notes = True
napoleon_use_admonition_for_references = True
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#source_parsers = {".md": CommonMarkParser}
#source_suffix = [".rst", ".md"]
source_suffix = [".rst", ".txt"]
# The encoding of source files.
#source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "exatomic"
copyrite = "Copyright (c) 2015-2018, Exa Analytics Development Team"
author = "Thomas J. Duignan and Alex Marchenko"
# The version info for the project you"re documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '.'.join(__version__.split('.')[0:3])
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ""
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = "%B %d, %Y"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, "()" will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "static/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not "", a "Last updated on:" timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used | for generating the HTML full-text search i | ndex.
# Sphinx supports the following languages:
# "da", "de", "en", "es", "fi", "fr", "h", "it", "ja"
# "nl", "no", "pt", "ro", "r", "sv", "tr"
#html_search_language = "en"
# A dictionary with options for the search language support, empty by default.
# Now only "ja" uses this config value
#html_search_options = {"type": "default"}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = "scorer.js"
# Output file base name for HTML help builder.
htmlhelp_basename = "exatomicdoc"
# -- |
LLNL/spack | var/spack/repos/builtin/packages/r-affxparser/package.py | Python | lgpl-2.1 | 1,484 | 0.000674 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-Licen | se-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffxparser(RP | ackage):
"""Affymetrix File Parsing SDK
Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR). It
provides methods for fast and memory efficient parsing of Affymetrix
files using the Affymetrix' Fusion SDK. Both ASCII- and binary-based
files are supported. Currently, there are methods for reading chip
definition file (CDF) and a cell intensity file (CEL). These files can
be read either in full or in part. For example, probe signals from a few
probesets can be extracted very quickly from a set of CEL files into a
convenient list structure."""
homepage = "https://bioconductor.org/packages/affxparser"
git = "https://git.bioconductor.org/packages/affxparser.git"
version('1.62.0', commit='b3e988e5c136c3f1a064e1da13730b403c8704c0')
version('1.56.0', commit='20d27701ad2bdfacf34d857bb8ecb4f505b4d056')
version('1.54.0', commit='dce83d23599a964086a84ced4afd13fc43e7cd4f')
version('1.52.0', commit='8e0c4b89ee1cb4ff95f58a5dd947249dc718bc58')
version('1.50.0', commit='01ef641727eadc2cc17b5dbb0b1432364436e3d5')
version('1.48.0', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('r@2.14.0:', type=('build', 'run'))
|
pelson/conda-build | tests/test-recipes/metadata/_compile-test/f2_bad.py | Python | bsd-3-clause | 49 | 0 | I am a bad | file that should not pas | s compileall.
|
SolPi/Web-Django | webLH/context_processor.py | Python | gpl-2.0 | 352 | 0.002841 | # | coding:utf8
from datetime import date
from django.conf import settings
def SiteInfo(request):
return {
'site_title': settings.SITE_TITLE,
'site_motto': settings.SITE_MOTTO,
'site_since': "Desde " + settings.SITE_SINCE,
'more_than_five': date.today().year - int(settings.IS_OLD) > int(settings.SI | TE_SINCE)
}
|
USGSDenverPychron/pychron | pychron/igsn/definitions.py | Python | apache-2.0 | 3,973 | 0.000755 | # ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
# ============= local library imports ==========================
LAT_TT = (
"Latitude of the location where the sample was collected. Needs to be entered in decimal degrees. "
"Negative values for South latitudes. (Coordinate system: WGS84)"
)
LONG_TT = (
"Longitude of the location where the sample was collected. Needs to be entered in decimal degrees. "
"Negative values for West longitudes. (Coordinate system: WGS84)"
)
ELEVATION_TT = "Elevation at which a sample was collected (in meters). Use negative values for depth below sea level"
ROCK_TYPES = ("Igneous", "Metamorphic", "Ore", "Sedimentary", "Xenolithic")
SUB_ROCK_TYPES = {
"Igneous": ("Plutonic", "Volcanic"),
"Metamorphic": [],
"Ore": [],
"Sedimentary": [],
"Xenolithic": [],
}
ROCK_TYPE_DETAILS = {
"Igneous": ("Exotic", "Felsic", "Intermediate", "Mafic", "Ultramafic"),
"Metamorphic": (
"Calc-Silicate",
"Eclogite",
"Gneiss",
"Granofels",
"Granulite",
"MechanicallyBroken",
"Meta-Carbonate",
"Meta-Ultramafic",
"Metasedimentary",
"Metasomatic",
"Schist",
"Slate",
),
"Ore": ("Other", "Oxide", "Sulfide"),
"Sedimentary": (
"Carbonate",
"ConglomerateAndOrBreccia",
"Evaporite",
"GlacialAndOrPaleosol",
"Hybrid",
"Ironstone",
"MixedCarbAndOrSiliciclastic",
"MnNoduleAndOrCrust",
"SiliceousBiogenic",
"Siliciclastic",
"Volcaniclastic",
),
"Xenolithic": [],
}
SAMPLE_TYPES = (
"Bead",
"Chemical Fraction",
"Core",
"Core Half Round",
"Core Piece",
"Core Quarter Round",
| "Core Section",
"Core Section Half",
"Core Sub-Piece",
"Core Whole Round",
"CTD",
"Cube",
"Culture",
"Cuttings",
"Cylinder",
"Dredge",
"Gas",
"Grab",
"Hole",
"Individual Sample",
"Liqui | d",
"Mechanical Fraction",
"Oriented Core",
"Powder",
"Rock Powder",
"Site",
"Slab",
"Smear",
"Specimen",
"Squeeze Cake",
"Terrestrial Section",
"Thin Section",
"Toothpick",
"Trawl",
"U-Channel",
"Wedge",
"Other",
)
SAMPLE_ATTRS = (
("user_code", "", True),
("sample_type", "", True),
("name", "", True),
("material", "", True),
("description", "", False),
("age_min", "", False),
("age_max", "", False),
("age_unit", "", False),
("collection_method", "", False),
("latitude", "", False),
("longitude", "", False),
("elevation", "", False),
("primary_location_name", "", False),
("country", "", False),
("province", "", False),
("county", "", False),
("collector", "", False),
("collection_start_date", "", False),
("collection_date_precision", "", False),
("original_archive", "", False),
)
MATERIALS = (
"Rock",
"Sediment",
"Soil",
"Synthetic",
"NotApplicable",
"Other",
"Biology",
"Gas",
"Ice",
"LiquidAqueous",
"LiquidOrganic",
"Mineral",
"Particulate",
)
# ============= EOF =============================================
|
15Mpedia/15Mpedia-scripts | import-flickr-photos.py | Python | gpl-3.0 | 11,518 | 0.008684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 emijrp <emijrp@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import re
import sys
import time
import urllib.parse
import urllib.request
import pywikibot
from pywikibot.specialbots import UploadRobot
def getURL(url=''):
raw = ''
req = urllib.request.Request(url, headers={ 'User-Agent': 'Mozilla/5.0' })
try:
raw = urllib.request.urlopen(req).read().strip().decode('utf-8')
except:
try:
raw = urllib.request.urlopen(req).read().strip().decode('latin-1')
except:
sleep = 10 # seconds
maxsleep = 0
while sleep <= maxsleep:
print('Error while retrieving: %s' % (url))
print('Retry in %s seconds...' % (sleep))
time.sleep(sleep)
try:
raw = urllib.request.urlopen(req).read().strip().decode('utf-8')
except:
pass
sleep = sleep * 2
return raw
def unquote(s):
s = re.sub('"', '"', s)
return s
def generateInfobox(photoid, photometadata, cats, flickrseturl, flickrsetname, flickruser):
"""
<licenses>
<license id="0" name="All Rights Reserved" url="" />
<license id="1" name="Attribution-NonCommercial-ShareAlike License" url="https://creativecommons.org/licenses/by-nc-sa/2.0/" />
<license id="2" name="Attribution-NonCommercial License" url="https://creativecommons.org/licenses/by-nc/2.0/" />
<license id="3" name="Attribution-NonCommercial-NoDerivs License" url="https://creativecommons.org/licenses/by-nc-nd/2.0/" />
<license id="4" name="Attribution License" url="https://creativecommons.org/licenses/by/2.0/" />
<license id="5" name="Attribution-ShareAlike License" url="https://creativecommons.org/licenses/by-sa/2.0/" />
<license id="6" name="Attribution-NoDerivs License" url="https://creativecommons.org/licenses/by-nd/2.0/" />
<license id="7" name="No known copyright restrictions" url="https://www.flickr.com/commons/usage/" />
<license id="8" name="United States Government Work" url="http://www.usa.gov/copyright.shtml" />
<license id="9" name="Public Domain Dedication (CC0)" url="https://creativecommons.org/publicdomain/zero/1.0/" />
<license id="10" name="Public Domain Mark" url="https://creativecommons.org/publicdomain/mark/1.0/" />
</licenses>
"""
licenses = { "1": "cc-by-nc-sa-2.0", "2": "cc-by-nc-2.0", "3": "cc-by-nc-nd-2.0", "4": "cc-by-2.0", "5": "cc-by-sa-2.0", "6": "cc-by-nd-2.0", "9": "cc-zero-1.0", "10": "cc-pd-mark-1.0"}
desc = photometadata['title']
if photometadata['description']:
if desc:
desc = '%s. %s' % (desc, photometadata['description'])
else:
desc = photometadata['description']
source = '[%s %s] ([%s %s])' % (photometadata['photourl'], photometadata['title'], flickrseturl, flickrsetname)
date | = photometadata['date-taken']
author = '{{flickr|%s}}' % (flickruser)
license = '{ | {%s}}' % (licenses[photometadata['license']])
coordinates = ''
if photometadata['coordinates']:
coordinates = '\n| coordenadas = %s' % (', '.join(photometadata['coordinates']))
tags = ''
if photometadata['tags']:
tags = '\n| palabras clave = %s' % (', '.join(photometadata['tags']))
output = u"""{{Infobox Archivo\n| descripción = %s\n| fuente = %s\n| fecha de creación = %s\n| autor = %s\n| licencia = %s%s%s\n}}%s""" % (desc, source, date, author, license, coordinates and coordinates or '', tags and tags or '', cats)
return output
def getflickrapikey():
f = open('flickrapi.key', 'r')
flickrapikey = f.read().strip()
return flickrapikey
def main():
site = pywikibot.Site('15mpedia', '15mpedia')
flickrapilimit = 500
flickrapikey = getflickrapikey() #do not share key
flickrseturl = ""
categories = []
tags = []
#load parameters
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg.startswith('--flickrset:'): # --flickrset:http://www.flickr.com/photos/15mmalagacc/sets/72157629844179358/
flickrseturl = arg[12:]
elif arg.startswith('--categories:'): # --categories:"15M_en_Madrid;Ocupa_el_Congreso"
categories = [re.sub('_', ' ', category) for category in arg[13:].split(';')]
elif arg.startswith('--tags:'): # --tags:"15M;Acampada_Sol"
tags = [re.sub('_', ' ', tag) for tag in arg[7:].split(';')]
if not flickrseturl:
print('Provide --flickrset: parameter. Example: --flickrset:https://www.flickr.com/photos/15mmalagacc/sets/72157629844179358/')
sys.exit()
"""if not categories:
print('Provide --categories: parameter. Example: --categories:"15M_en_Madrid;Ocupa_el_Congreso"')
sys.exit()"""
flickrseturls = []
if '://' in flickrseturl:
if '/people/' in flickrseturl:
raw = getURL(url=flickrseturl)
flickruser = flickrseturl.split('/people/')[1].split('/')[0].strip('/')
flickruserid = re.findall(r'"nsid":"([^"]+)"', raw)[0]
apiquery = 'https://api.flickr.com/services/rest/?method=flickr.photosets.getList&api_key=%s&user_id=%s&format=json&nojsoncallback=1' % (flickrapikey, flickruserid)
jsonset3 = json.loads(getURL(url=apiquery))
if not "photosets" in jsonset3:
print("ERROR: API key caducada o invalida?")
sys.exit()
flickrseturls = ["https://www.flickr.com/photos/%s/albums/%s" % (flickruser, x["id"]) for x in jsonset3["photosets"]["photoset"]]
print('\n'.join(flickrseturls))
sys.exit()
else:
flickrseturls = [flickrseturl]
else:
f = open(flickrseturl, 'r')
flickrseturls = f.read().strip().splitlines()
f.close()
print('Loaded', len(flickrseturls), 'photosets')
for flickrseturl in flickrseturls:
flickrseturl = flickrseturl.replace('/sets/', '/albums/')
flickruser = flickrseturl.split('/photos/')[1].split('/albums/')[0].strip()
flickrsetid = flickrseturl.split('/albums/')[1].split('/')[0].strip('/').strip()
raw = getURL(url=flickrseturl)
m = re.findall(r'"albumId":"%s","nsid":"([^"]+?)"' % (flickrsetid), raw)
flickruserid = ''
if m:
flickruserid = m[0]
else:
print("No se encontro flickruserid")
sys.exit()
m = re.findall(r',"pathAlias":"([^"]+?)"', raw)
#load set metadata
apiquery = 'https://api.flickr.com/services/rest/?method=flickr.photosets.getPhotos&api_key=%s&photoset_id=%s&user_id=%s&per_page=%s&format=json&nojsoncallback=1' % (flickrapikey, flickrsetid, flickruserid, flickrapilimit)
jsonset = json.loads(getURL(url=apiquery))
if not "photoset" in jsonset:
print("ERROR: API key caducada o invalida?")
sys.exit()
#print(jsonset)
flickrsetname = jsonset["photoset"]["title"]
#flickruser = jsonset["photoset"]["ownername"] #hay usuarios con espacios, mejor extraer el usuario de la url del set
photoids = [photo["id"] for photo in jsonset["photoset"]["photo"]]
pages = int(jsonset["photoset"]["pages"])
if pages > 1:
for page in range(2, pages+1):
apiquery2 = apiquery + '&page=' + str(page)
jsonset2 = json.loads(getURL(url=apiquery2))
phot |
nive-cms/nive | nive/userdb/userview/view.py | Python | gpl-3.0 | 11,106 | 0.011255 | #----------------------------------------------------------------------
# Copyright 2012, 2013 Arndt Droullier, Nive GmbH. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#----------------------------------------------------------------------
__doc__ = """
"""
from nive.i18n import _
from nive.definitions import FieldConf, ViewConf, ViewModuleConf, Conf
# view module definition ------------------------------------------------------------------
#@nive_module
configuration = ViewModuleConf(
id = "userview",
name = _(u"User signup"),
static = "nive.userdb.userview:static",
containment = "nive.userdb.app.UserDB",
context = "nive.userdb.root.root",
view = "nive.userdb.userview.view.UserView",
templates = "nive.userdb.userview:",
permission = "view"
)
t = configuration.templates
configuration.views = [
# User Views
ViewConf(name="login", attr="login", renderer=t+"loginpage.pt"),
ViewConf(name="signup", attr="create", renderer=t+"signup.pt", permission="signup"),
ViewConf(name="update", attr="update", renderer=t+"update.pt", permission="updateuser"),
ViewConf(name="resetpass",attr="resetpass",renderer=t+"resetpass.pt"),
ViewConf(name="logout", attr="logout"),
# disabled
#ViewConf(name="mailpass", attr="mailpass", renderer=t+"mailpass.pt"),
]
# view and form implementation ------------------------------------------------------------------
from nive.views import BaseView, Unauthorized, Mail
from nive.forms import ObjectForm
class UserForm(ObjectForm):
"""
Extended User form
"""
def __init__(self, view=None, loadFromType=None, context=None, request=None, app=None, **kw):
ObjectForm.__init__(self, view=view, loadFromType=loadFromType)
self.actions = [
Conf(id="default", method="StartForm", name=_(u"Initialize"), hidden=True),
Conf(id="defaultEdit",method="LoadUser", name=_(u"Initialize"), hidden=True),
Conf(id="create", method="AddUser", name=_(u"Signup"), hidden=False, options={"renderSuccess":False}),
Conf(id="edit", method="Update", name=_(u"Confirm"), hidden=False),
Conf(id="mailpass", method="MailPass", name=_(u"Mail password"), hidden=False),
Conf(id="resetpass", method="ResetPass", name=_(u"Reset password"), hidden=False),
Conf(id="login", method="Login", name=_(u"Login"), hidden=False),
]
self.subsets = {
"create": {"fields": ["name", "password", "email", "surname", "lastname"],
"actions": ["create"],
"defaultAction": "default"},
"create2":{"fields": ["name", "email"],
"actions": ["create"],
"defaultAction": "default"},
"edit": {"fields": ["email",
FieldConf(id="password", name=_("Password"), datatype="password", required=False, settings={"update": True}),
"surname", "lastname"],
"actions": ["defaultEdit", "edit"],
"defaultAction": "defaultEdit"},
| "login": {"fields": ["name", FieldConf(id="password", name=_("Password"), datatype="password", settings={"single": True})],
"actions": ["login"],
"defaultAction": "default"},
"mailpass":{"fields": ["email"],
"actions": ["mailpass"],
"defaultAction": "default"},
| "resetpass":{"fields": ["email"],
"actions": ["resetpass"],
"defaultAction": "default"},
}
self.activate = 1
self.generatePW = 0
self.notify = True
self.mail = None
self.mailpass = None
self.groups = ""
self.css_class = "smallform"
def AddUser(self, action, **kw):
"""
Form action: safely add a user
"""
msgs = []
result,data,errors = self.Validate(self.request)
if result:
result, msgs = self.context.AddUser(data,
activate=self.activate,
generatePW=self.generatePW,
mail=self.mail,
groups=self.groups,
notify=self.notify,
currentUser=self.view.User())
return self._FinishFormProcessing(result, data, msgs, errors, **kw)
def LoadUser(self, action, **kw):
"""
Initially load data from obj.
context = obj
"""
user = self.view.User()
if not user:
raise Unauthorized, "User not found."
data = self.LoadObjData(user)
try:
del data["password"]
except:
pass
return data!=None, self.Render(data)
def Update(self, action, **kw):
"""
Form action: safely update a user
"""
user = self.view.User()
if not user:
raise Unauthorized, "User not found."
msgs = []
result,data,errors = self.Validate(self.request)
if result:
uobj = self.context.LookupUser(id=user.id)
result = uobj.SecureUpdate(data, user)
if result:
msgs.append(_(u"OK"))
return self._FinishFormProcessing(result, data, msgs, errors, **kw)
def Login(self, action, **kw):
"""
Form action: user login
"""
redirectSuccess = kw.get("redirectSuccess")
data = self.GetFormValues(self.request)
user, msgs = self.context.Login(data.get("name"), data.get("password"), 0)
if user:
self.context.app.RememberLogin(self.request, user.data.get("name"))
if self.view and redirectSuccess:
self.view.Redirect(redirectSuccess)
return
errors=None
return user, self.Render(data, msgs=msgs, errors=errors)
def MailPass(self, action, **kw):
"""
"""
redirectSuccess = kw.get("redirectSuccess")
return self.ResetPass(action, createNewPasswd=False, **kw)
def ResetPass(self, action, createNewPasswd=True, **kw):
"""
"""
#result, data, e = self.Validate(self.request)
data = self.GetFormValues(self.request)
result, msgs = self.context.MailUserPass(email=data.get("email"), mailtmpl=self.mailpass, createNewPasswd=createNewPasswd, currentUser=self.view.User())
if result:
data = {}
return self._FinishFormProcessing(result, data, msgs, None, **kw)
class UserView(BaseView):
def __init__(self, context, request):
BaseView.__init__(self, context, request)
self.form = UserForm(view=self, loadFromType="user")
self.form.groups = ""
self.publicSignup = False
def create(self):
self.form.activate=1
self.form.generatePW=0
self.form.Setup(subset="create")
return self._render()
def createNotActive(self):
self.form.activate=0
self.form.generatePW=0
self.form.Setup(subset="create")
return self._render()
def createPassword(self):
self.form.activate |
yxs1112003/Django-read-python | polls/views.py | Python | bsd-2-clause | 1,707 | 0.000586 | from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import RequestContext, loader
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from .models import Question, Choice
def index(request):
latest_question_list = Questio | n.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list': latest_question_list,
}
return render(reque | st, 'polls/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
# choice = get_object_or_404(Choice, pk=choice_id)
# return render(request, 'polls/result.html', {'choice': choice, 'question': question})
return render(request, 'polls/result.html', {'question': question})
def vote(request, question_id):
p = get_object_or_404(Question, pk=question_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
|
kbidarkar/robottelo | tests/foreman/api/test_host.py | Python | gpl-3.0 | 53,992 | 0 | # -*- encoding: utf-8 -*-
"""Unit tests for the ``hosts`` paths.
An API reference can be found here:
http://theforeman.org/api/apidoc/v2/hosts.html
:Requirement: Host
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: API
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_integer, gen_ipaddr, gen_mac, gen_string
from nailgun import client, entities
from requests.exceptions import HTTPError
from six.moves import http_client
from robottelo.api.utils import publish_puppet_module
from robottelo.config import settings
from robottelo.constants import CUSTOM_PUPPET_REPO, ENVIRONMENT
from robottelo.datafactory import (
invalid_interfaces_list,
invalid_values_list,
valid_data_list,
valid_hosts_list,
valid_interfaces_list,
)
from robottelo.decorators import (
bz_bug_is_open,
run_only_on,
stubbed,
tier1,
tier2,
tier3,
upgrade
)
from robottelo.decorators.func_locker import lock_function
from robottelo.test import APITestCase
class HostTestCase(APITestCase):
"""Tests for ``entities.Host().path()``."""
@classmethod
@lock_function
def setUpClass(cls):
"""Setup common entities."""
super(HostTestCase, cls).setUpClass()
cls.org = entities.Organization().create()
cls.loc = entities.Location(organization=[cls.org]).create()
# Content View and repository related entities
cls.cv = publish_puppet_module(
[{'author': 'robottelo', 'name': 'generic_1'}],
CUSTOM_PUPPET_REPO,
organization_id=cls.org.id
)
cls.env = entities.Environment().search(
query={'search': u'content_view="{0}"'.format(cls.cv.name)}
)[0].read()
cls.lce = entities.LifecycleEnvironment().search(query={
'search': 'name={0} and organization_id={1}'.format(
ENVIRONMENT, cls.org.id)
})[0].read()
cls.puppet_classes = entities.PuppetClass().search(query={
'search': u'name ~ "{0}" and environment = "{1}"'.format(
'generic_1', cls.env.name)
})
# Compute Resource related entities
cls.compresource_libvirt = entities.LibvirtComputeResource(
organization=[cls.org],
location=[cls.loc],
).create()
cls.image = entities.Image(
compute_resource=cls.compresource_libvirt).create()
@run_only_on('sat')
@tier1
def test_positive_get_search(self):
"""GET ``api/v2/hosts`` and specify the ``search`` parameter.
:id: d63f87e5-66e6-4886-8b44-4129259493a6
:expectedresults: HTTP 200 is returned, along with ``search`` term.
:CaseImportance: Critical
"""
query = gen_string('utf8', gen_integer(1, 100))
response = client.get(
entities.Host().path(),
auth=settings.server.get_credentials(),
data={u'search': query},
verify=False,
)
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json()['search'], query)
@run_only_on('sat')
@tier1
def test_positive_get_per_page(self):
"""GET ``api/v2/hosts`` and specify the ``per_page`` parameter.
:id: 9086f41c-b3b9-4af2-b6c4-46b80b4d1cfd
:expectedresults: HTTP 200 is returned, along with per ``per_page``
value.
:CaseImportance: Critical
"""
per_page = gen_integer(1, 1000)
response = client.get(
entities.Host().path(),
auth=settings.server.get_credentials(),
data={u'per_page': per_page},
verify=False,
)
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json()['per_page'], per_page)
@run_only_on('sat')
@tier1
def test_negative_create_with_owner_type(self):
"""Create a host and specify only ``owner_type``.
:id: cdf9d16f-1c47-498a-be48-901355385dde
:expectedresults: The host can't be created as ``owner`` is required.
:CaseImportance: Critical
"""
for owner_type in ('User', 'Usergroup'):
with self.subTest(owner_type):
if owner_type == 'Usergroup' and bz_bug_is_open(1203865):
continue # instead of skip for compatibility with py.test
with self.assertRaises(HTTPError) as context:
entities.Host(owner_type=owner_type).create()
self.assertEqual(context.exception.response.status_code, 422)
self.assertRegexpMatches(
context.exception.response.text, "owner must be specified")
@run_only_on('sat')
@tier1
def test_positive_update_owner_type(self):
"""Update a host's ``owner_type``.
:id: b72cd8ef-3a0b-4d2d-94f9-9b64908d699a
:expectedresults: The host's ``owner_type`` attribute is updated as
requested.
:CaseImportance: Critical
"""
owners = {
'User': entities.User(
organization=[self.org], location=[self.loc]).create(),
'Usergroup': entities.UserGroup().create(),
}
host = entities.Host(
organization=self.org, location=self.loc).create()
for owner_type in owners:
with self.subTest(owner_type):
if owner_type == 'Usergroup' and bz_bug_is_open(1210001):
continue # instead of skip for compatibility with py.test
host.owner_type = owner_type
host.owner = owners[owner_type]
host = host.update(['owner_type', 'owner'])
self.assertEqual(host.owner_type, owner_type)
self.assertEqual(host.owner.read(), owners[owner_type])
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""Create a host with different names and minimal input parameters
:id: a7c0e8ec-3816-4092-88b1-0324cb271752
:expectedresults: A host is created with expected name
| :CaseImportance: Critical
"""
for name in valid_hosts_list():
with self.subTest(name):
host = entities.Host(name=name).create()
self.assertEqual(
host.name,
'{0}.{1}'.format(name, host.domain.read().name)
)
@run_only_on('sat')
@tier1
def test_positive_create_with_ip(self):
"""Create a host with IP address specified
:id: | 3f266906-c509-42ce-9b20-def448bf8d86
:expectedresults: A host is created with expected IP address
:CaseImportance: Critical
"""
ip_addr = gen_ipaddr()
host = entities.Host(ip=ip_addr).create()
self.assertEqual(host.ip, ip_addr)
@run_only_on('sat')
@tier2
def test_positive_create_with_hostgroup(self):
"""Create a host with hostgroup specified
:id: 8f9601f9-afd8-4a88-8f28-a5cbc996e805
:expectedresults: A host is created with expected hostgroup assigned
:CaseLevel: Integration
"""
org = entities.Organization().create()
loc = entities.Location(organization=[org]).create()
hostgroup = entities.HostGroup(
location=[loc],
organization=[org],
).create()
host = entities.Host(
hostgroup=hostgroup,
location=loc,
organization=org,
).create()
self.assertEqual(host.hostgroup.read().name, hostgroup.name)
@run_only_on('sat')
@tier2
def test_positive_create_inherit_lce_cv(self):
"""Create a host with hostgroup specified. Make sure host inherited
hostgroup's lifecycle environment and content-view
:id: 229cbdbc-838b-456c-bc6f-4ac895badfbc
:expectedresults: Host's lifecycle environment and content view match
the ones specified in hostgroup
:CaseLevel: Integration
:BZ: 1391656
"""
hostgroup = entities.HostGroup(
content_view=self.cv,
|
jsalcedo09/webengine | webengine/views.py | Python | gpl-2.0 | 1,293 | 0.006187 | '''
Created on 7/01/2014
@author: Jorge Salcedo
'''
import settings
from importlib import import_module
from webengine import route, WSGIWebEngine
import logging
@route('/_ah/warmup')
def warmup(handler):
for app in settings.APPS:
for name in ('views', 'models', 'apis', 'admin_views'):
try:
module = 'apps.%s.%s' % (app, name)
import_module(module)
except ImportError:
logging.info("Could not import [%s], error:",module)
return "warming up..."
def handle_401(request, response, exception):
logging.exception(exception)
response.write('Oops! I could swear this page was here!')
response.set_status(401)
|
def handle_404(request, response, exception):
logging.exception(exception)
response.write('Oops! I could swear this page was here!')
response.set_status(404)
def handle_500(request, response, exception):
logging.exception(exception)
response.write('A server error occurred!')
response.set_status(500) |
WSGIWebEngine._instance.error_handlers[401] = handle_401 # @UndefinedVariable
WSGIWebEngine._instance.error_handlers[404] = handle_404 # @UndefinedVariable
WSGIWebEngine._instance.error_handlers[500] = handle_500 # @UndefinedVariable |
noba3/KoTos | addons/plugin.audio.radio7ulm/default.py | Python | gpl-2.0 | 1,255 | 0.023108 | # -*- coding: cp1254 -*-
# please visit http://www.iptvxtra.net
import xbmc,xbmcgui,xbmcplugin,sys
icondir = xbmc.translatePath("special://home/addons/plugin.audio.radio7ulm/icons/")
plugin_handle = int(sys.argv[1])
def add_video_item(url, infolabels, img=''):
listitem = xbmcgui.ListItem(infolabels['title'], iconImage=img, thumbnailImage=img)
listitem.setInfo('video', infolabels)
listitem.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(plugin_han | dle, url, listitem, isFolder=False)
add_video_item('http://srv01.radio7.fmstreams.de/stream1/livestream.mp3',{ 'title': 'Radio 7 - Webradio'},img=icondir + 'radio-7_web.png')
add_video_item('http://srv02 | .radio7.fmstreams.de/radio7_upa',{ 'title': 'Radio 7 - 80er'},img=icondir + 'radio-7_80er.png')
add_video_item('http://srv02.radio7.fmstreams.de/radio7_downa',{ 'title': 'Radio 7 - Herz'},img=icondir + 'radio-7_herz.png')
add_video_item('http://str0.creacast.com/radio7_acta',{ 'title': 'Radio 7 - OnTour'},img=icondir + 'radio-7_ontour.png')
add_video_item('http://srv01.radio7.fmstreams.de/stream5/livestream.mp3',{ 'title': 'Radio 7 - Live'},img=icondir + 'radio-7_live.png')
xbmcplugin.endOfDirectory(plugin_handle)
xbmc.executebuiltin("Container.SetViewMode(500)") |
tiangolo/fastapi | tests/test_tutorial/test_path_operation_configurations/test_tutorial005.py | Python | mit | 4,083 | 0.000735 | from fastapi.testclient import TestClient
from docs_src.path_operation_configuration.tutorial005 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"post": {
"responses": {
"200": {
"description": "The created item",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create an item",
"description": "Create an item with all the information:\n\n- **name**: each item must have a name\n- **description**: a long description\n- **price**: required\n- **tax**: if the item doesn't have tax, you can omit this\n- **tags**: a set of unique tag strings for this item",
"operationId": "create_item_items__post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
}
}
},
"components": {
"schemas": {
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": | {"title": "Price", "type": "number"},
"description": {"title": "Description", "type": "string"},
"tax": {"title": "Tax", "type": "number"},
"tags": {
"title": "Tags",
"uniqueItems": True,
"type": "array",
"items": {"type": "string"},
| "default": [],
},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_query_params_str_validations():
response = client.post("/items/", json={"name": "Foo", "price": 42})
assert response.status_code == 200, response.text
assert response.json() == {
"name": "Foo",
"price": 42,
"description": None,
"tax": None,
"tags": [],
}
|
cuker/sorl-thumbnail-legacy | sorl/thumbnail/tests/__init__.py | Python | bsd-3-clause | 665 | 0 | # For these tests to run successfully, two conditions must be met:
# 1. MEDIA_URL and MEDIA_ROOT must be set in settings
# 2. The user running the tests must have read/write access to MEDIA_ROOT
# Unit tests:
from sorl.thumbnail.tests.classes import ThumbnailTest, DjangoThumbnailTest
from | sorl.thumbnail.tests.templatetags import ThumbnailTagTest
from sorl.thumbnail.tests.fields import FieldTest, \
ImageWithThumbnailsFieldTest, ThumbnailFieldTest
# Doc tests:
from sorl.t | humbnail.tests.utils import utils_tests
from sorl.thumbnail.tests.templatetags import filesize_tests
__test__ = {
'utils_tests': utils_tests,
'filesize_tests': filesize_tests,
}
|
nearlyfreeapps/python-googleadwords | tests/adspygoogle/adwords/v201206/error_handling_unittest.py | Python | apache-2.0 | 2,306 | 0.010408 | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover Error Handling examples."""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert( | 0, os.path.join('..', '..', '..', '..'))
import time
import unittest
from examples.adspygoogle.adwords.v201206.error_handling import handle_partial_failures
from examples.adspygoogle.adwords.v201206.error_handling import handle_two_factor_authorization_error
from tests.adspygoogle.adwords import client
from tests.adspygoogle.adwords import SERV | ER_V201206
from tests.adspygoogle.adwords import TEST_VERSION_V201206
from tests.adspygoogle.adwords import util
from tests.adspygoogle.adwords import VERSION_V201206
class ErrorHandling(unittest.TestCase):
"""Unittest suite for Error Handling code examples."""
SERVER = SERVER_V201206
VERSION = VERSION_V201206
client.debug = False
loaded = False
def setUp(self):
"""Prepare unittest."""
time.sleep(1)
client.use_mcc = False
if not self.__class__.loaded:
self.__class__.campaign_id = util.CreateTestCampaign(client)
self.__class__.ad_group_id = util.CreateTestAdGroup(
client, self.__class__.campaign_id)
self.__class__.loaded = True
def tearDown(self):
"""Reset partial failure."""
client.partial_failure = False
def testHandlePartialFailures(self):
"""Tests whether we can handle partial failures."""
handle_partial_failures.main(client, self.__class__.ad_group_id)
def testHandleTwoFactorAuthorizationError(self):
"""Test whether we can handle two factor authorization errors."""
handle_two_factor_authorization_error.main()
if __name__ == '__main__':
if TEST_VERSION_V201206:
unittest.main()
|
drgarcia1986/drf-pdf | drf_pdf/exceptions.py | Python | mit | 186 | 0 | # encoding: utf-8
from rest_framework import status
from rest_framework.exceptions import APIEx | ception
class PDFFileNotFound(APIException):
status_code = status.H | TTP_404_NOT_FOUND
|
NathanW2/qquery | btree.py | Python | gpl-2.0 | 19,500 | 0.000359 | import bisect
import itertools
import operator
class _BNode(object):
__slots__ = ["tree", "contents", "children"]
def __init__(self, tree, contents=None, children=None):
self.tree = tree
self.contents = contents or []
self.children = children or []
if self.children:
assert len(self.contents) + 1 == len(self.children), \
"one more child than data item required"
def __repr__(self):
name = getattr(self, "children", 0) and "Branch" or "Leaf"
return "<%s %s>" % (name, ", ".join(map(str, self.contents)))
def lateral(self, parent, parent_index, dest, dest_index):
if parent_index > dest_index:
dest.contents.append(parent.contents[dest_index])
parent.contents[dest_index] = self.contents.pop(0)
if self.children:
dest.children.append(self.children.pop(0))
else:
dest.contents.insert(0, p | arent.contents[parent_index])
parent.contents[parent_index] = self.contents.pop()
if self.children:
dest.children.insert(0, self.children.pop())
def shrink(self, ancestors):
parent = None
if ancesto | rs:
parent, parent_index = ancestors.pop()
# try to lend to the left neighboring sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, left_sib, parent_index - 1)
return
# try the right neighbor
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, right_sib, parent_index + 1)
return
center = len(self.contents) // 2
sibling, push = self.split()
if not parent:
parent, parent_index = self.tree.BRANCH(
self.tree, children=[self]), 0
self.tree._root = parent
# pass the median up to the parent
parent.contents.insert(parent_index, push)
parent.children.insert(parent_index + 1, sibling)
if len(parent.contents) > parent.tree.order:
parent.shrink(ancestors)
def grow(self, ancestors):
parent, parent_index = ancestors.pop()
minimum = self.tree.order // 2
left_sib = right_sib = None
# try to borrow from the right sibling
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) > minimum:
right_sib.lateral(parent, parent_index + 1, self, parent_index)
return
# try to borrow from the left sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) > minimum:
left_sib.lateral(parent, parent_index - 1, self, parent_index)
return
# consolidate with a sibling - try left first
if left_sib:
left_sib.contents.append(parent.contents[parent_index - 1])
left_sib.contents.extend(self.contents)
if self.children:
left_sib.children.extend(self.children)
parent.contents.pop(parent_index - 1)
parent.children.pop(parent_index)
else:
self.contents.append(parent.contents[parent_index])
self.contents.extend(right_sib.contents)
if self.children:
self.children.extend(right_sib.children)
parent.contents.pop(parent_index)
parent.children.pop(parent_index + 1)
if len(parent.contents) < minimum:
if ancestors:
# parent is not the root
parent.grow(ancestors)
elif not parent.contents:
# parent is root, and its now empty
self.tree._root = left_sib or self
def split(self):
center = len(self.contents) // 2
median = self.contents[center]
sibling = type(self)(
self.tree,
self.contents[center + 1:],
self.children[center + 1:])
self.contents = self.contents[:center]
self.children = self.children[:center + 1]
return sibling, median
def insert(self, index, item, ancestors):
self.contents.insert(index, item)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def remove(self, index, ancestors):
minimum = self.tree.order // 2
if self.children:
# try promoting from the right subtree first,
# but only if it won't have to resize
additional_ancestors = [(self, index + 1)]
descendent = self.children[index + 1]
while descendent.children:
additional_ancestors.append((descendent, 0))
descendent = descendent.children[0]
if len(descendent.contents) > minimum:
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[0]
descendent.remove(0, ancestors)
return
# fall back to the left child
additional_ancestors = [(self, index)]
descendent = self.children[index]
while descendent.children:
additional_ancestors.append(
(descendent, len(descendent.children) - 1))
descendent = descendent.children[-1]
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[-1]
descendent.remove(len(descendent.children) - 1, ancestors)
else:
self.contents.pop(index)
if len(self.contents) < minimum and ancestors:
self.grow(ancestors)
class _BPlusLeaf(_BNode):
__slots__ = ["tree", "contents", "data", "next"]
def __init__(self, tree, contents=None, data=None, next=None):
self.tree = tree
self.contents = contents or []
self.data = data or []
self.next = next
assert len(self.contents) == len(self.data), "one data per key"
def insert(self, index, key, data, ancestors):
self.contents.insert(index, key)
self.data.insert(index, data)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def lateral(self, parent, parent_index, dest, dest_index):
if parent_index > dest_index:
dest.contents.append(self.contents.pop(0))
dest.data.append(self.data.pop(0))
parent.contents[dest_index] = self.contents[0]
else:
dest.contents.insert(0, self.contents.pop())
dest.data.insert(0, self.data.pop())
parent.contents[parent_index] = dest.contents[0]
def split(self):
center = len(self.contents) // 2
median = self.contents[center - 1]
sibling = type(self)(
self.tree,
self.contents[center:],
self.data[center:],
self.next)
self.contents = self.contents[:center]
self.data = self.data[:center]
self.next = sibling
return sibling, sibling.contents[0]
def remove(self, index, ancestors):
minimum = self.tree.order // 2
if index >= len(self.contents):
self, index = self.next, 0
key = self.contents[index]
# if any leaf that could accept the key can do so
# without any rebalancing necessary, then go that route
current = self
while current is not None and current.contents[0] == ke |
stuliveshere/PySeis | examples/01.0_import_su.py | Python | mit | 787 | 0.030496 | imp | ort PySeis as ps
import numpy as np
import pylab
#import dataset
input = ps.io.su.SU("./d | ata/sample.su")
input.read("./data/raw.npy")
#initialise dataset
#~ data, params = toolbox.initialise("geometries.su")
#trim data
#~ params['ns'] = 1500
#~ data = toolbox.slice(data, None, **params)
#~ data.tofile("geom_short.su")
#initialise dataset
#data, params = toolbox.initialise("geom_short.su")
#agc
#~ toolbox.agc(data, None, **params)
#params['gamma'] = 1.5
#toolbox.tar(data, None, **params)
#kills = [270, 300, 374, 614] #fldr
#mask = toolbox.build_mask(data['fldr'], kills)
#data = data[mask]
#data.tofile("prepro.su")
#display
#~ params['primary'] = 'fldr'
#~ params['secondary'] = 'tracf'
#~ params['wiggle'] = True
#~ toolbox.display(data, **params)
#~ pylab.show()
|
exleym/IWBT | iwbt/data/readers.py | Python | apache-2.0 | 2,496 | 0.004808 | """ DataReader code lives here. All DataReader classes should extend DataReader
and implement the following methods:
- help()
- get_data()
"""
from datetime import datetime
import requests
class DataReader(object):
""" Class-level constants that can be defined in code until we get around
to putting them into a database table """
TARGET_URL = None
DATA_TYPE = None
RETURN_FORMAT = None # This is a help-feature for help() method
def __init__(self):
pass
def help(self):
return self.RETURN_FORMAT
class USGSDataReader(DataReader):
TARGET_URL = "http://waterservices.usgs.gov/nwis/iv/?format={}&sites={}¶meterCd={}"
DATA_TYPE = "json"
USGS_CODE_FLOW = '00060'
USGS_CODE_LEVEL = '00065'
def __init_ | _(self):
super(USGSDataReader, self).__init__()
pass
| def get_data(self, uri):
""" Hit USGS API and retrieve JSON object. Return GaugeData JSON object.
:param uri: USGS id of the gauge being queried
:returns: <GaugeData>: updated values in the form of a JSON object
"""
usgs_code = '0' * (8 - len(str(uri))) + str(uri)
params = self.USGS_CODE_LEVEL + ',' + self.USGS_CODE_FLOW
usgs_url = self.TARGET_URL.format(self.DATA_TYPE, usgs_code, params)
result = requests.get(usgs_url).json()
data = result["value"]["timeSeries"]
output = { x["variable"]["variableCode"][0]["value"]: x["values"][0]["value"][0] for x in data }
flow, level = None, None
if "00060" in output.keys():
flow = output["00060"]["value"]
if "00065" in output.keys():
level = output["00065"]["value"]
package = {"gauge_id": uri,
"timestamp": datetime.strptime(
output["00065"]["dateTime"][:18],
"%Y-%m-%dT%H:%M:%S").strftime("%Y-%m-%d %H:%M:%S"),
"level": level,
"flow_cfs": flow}
return package
class WundergroundReader(DataReader):
TARGET_URL = "http://api.wunderground.com/api/{}/geolookup/conditions/q/{}/{}.json"
def __init__(self):
super(WundergroundReader, self).__init__()
pass
def get_data(self, uri):
""" Hit Wunderground API and retrieve JSON object. Return GaugeData object.
:param uri : this is the gauge being queried
:returns: <GaugeData>: updated values in the form of a GaugeData object
"""
|
ovresko/erpnext | erpnext/config/desktop.py | Python | gpl-3.0 | 12,428 | 0.047324 | # coding=utf-8
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Item",
"_doctype": "Item",
"color": "#f39c12",
"icon": "octicon octicon-package",
"type": "link",
"link": "List/Item"
},
{
"module_name": "Customer",
"_doctype": "Customer",
"color": "#1abc9c",
"icon": "octicon octicon-tag",
"type": "link",
"link": "List/Customer"
},
{
"module_name": "Supplier",
"_doctype": "Supplier",
"color": "#c0392b",
"icon": "octicon octicon-briefcase",
"type": "link",
"link": "List/Supplier"
},
{
"_doctype": "Employee",
"module_name": "Employee",
"color": "#2ecc71",
"icon": "octicon octicon-organization",
"type": "link",
"link": "List/Employee"
},
{
"module_name": "Project",
"_doctype": "Project",
"color": "#8e44ad",
"icon": "octicon octicon-rocket",
"type": "link",
"link": "List/Project"
},
{
"module_name": "Issue",
"color": "#2c3e50",
"icon": "octicon octicon-issue-opened",
"_doctype": "Issue",
"type": "link",
"link": "List/Issue"
},
{
"module_name": "Lead",
"icon": "octicon octicon-broadcast",
"_doctype": "Lead",
"type": "link",
"link": "List/Lead"
},
{
"module_name": "Profit and Loss Statement",
"_doctype": "Account",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "link",
"link": "query-report/Profit and Loss Statement"
},
# old
{
"module_name": "Accounts",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "module",
"hidden": 1
},
{
"module_name": "Stock",
"color": "#f39c12",
"icon": "octicon octicon-package",
"type": "module",
"hidden": 1
},
{
"module_name": "CRM",
"color": "#EF4DB6",
"icon": "octicon octicon-broadcast",
"type": "module",
"hidden": 1
},
{
"module_name": "Selling",
"color": "#1abc9c",
"icon": "octicon octicon-tag",
"type": "module",
"hidden": 1
},
{
"module_name": "Buying",
"color": "#c0392b",
"icon": "octicon octicon-briefcase",
"type": "module",
"hidden": 1
},
{
"module_name": "HR",
"color": "#2ecc71",
"icon": "octicon octicon-organization",
"label": _("Human Resources"),
"type": "module",
"hidden": 1
},
{
"module_name": "Manufacturing",
"color": "#7f8c8d",
"icon": "octicon octicon-tools",
"type": "module",
"hidden": 1
},
{
"module_name": "POS",
"color": "#589494",
"icon": "octicon octicon-credit-card",
"type": "page",
"link": "pos",
"label": _("POS")
},
{
"module_name": "Leaderboard",
"color": "#589494",
"icon": "octicon octicon-graph",
"type": "page",
"link": "leaderboard",
"label": _("Leaderboard")
},
{
"module_name": "Projects",
"color": "#8e44ad",
"icon": "octicon octicon-rocket",
"type": "module",
"hidden": 1
},
{
"module_name": "Support",
"color": "#2c3e50",
"icon": "octicon octicon-issue-opened",
"type": "module",
"hidden": 1
},
{
"module_name": "Learn",
"color": "#FF888B",
"icon": "octicon octicon-device-camera-video",
"type": "module",
"is_help": True,
"label": _("Learn"),
"hidden": 1
},
{
"module_name": "Maintenance",
"color": "#FF888B",
"icon": "octicon octicon-tools",
"type": "module",
"label": _("Maintenance"),
"hidden": 1
},
{
"module_name": "Student",
"color": "#c0392b",
"icon": "octicon octicon-person",
"label": _("Student"),
"link": "List/Student",
"_doctype": "Student",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Group",
"color": "#d59919",
"icon": "octicon octicon-organization",
"label": _("Student Group"),
"link": "List/Student Group",
"_doctype": "Student Group",
"type": "list",
"hidden": 1
},
{
"module_name": "Course Schedule",
"color": "#fd784f",
"icon": "octicon octicon-calendar",
"label": _("Course Schedule"),
"link": "List/Course Schedule/Calendar",
"_doctype": "Course Schedule",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Attendance Tool",
"color": "#C0392B",
"icon": "octicon octicon-checklist",
"label": _("Student Attendance Tool"),
"link": "List/Student Attendance Tool",
"_doctype": "Student Attendance Tool",
"type": "list",
"hidden": 1
},
{
"module_name": "Course",
"color": "#8e44ad",
"icon": "octicon octicon-book",
"label": _("Course"),
"link": "List/Course",
"_doctype": "Course",
"type": "list",
"hidden": 1
},
{
"module_name": "Program",
"color": "#9b59b6",
"icon": "octicon octicon-repo",
"label": _("Program"),
"link": "List/Program",
"_doctype": "Program",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Applicant",
"color": "#4d927f",
"icon": "octicon octicon-clippy",
"label": _("Student Applicant"),
"link": "List/Student Applicant",
"_doctype": "Student Applicant",
"type": "list",
"hidden": 1
},
{
"module_name": "Fees",
"color": "#83C21E",
"icon": "fa fa-money",
"label": _("Fees"),
"link": "List/Fees",
"_doctype": "Fees",
"type": "list",
"hidden": 1
},
{
"module_name": "Instructor",
"color": "#a99e4c",
"icon": "octicon octicon-broadcast",
"label": _("Instructor"),
"link": "List/Instructor",
"_doctype": "Instructor",
"type": "list",
"hidden": 1
},
{
"module_name": "Room",
"color": "#f22683",
"icon": "fa fa-map-marker",
"label": _("Room"),
"link": "List/Room",
"_doctype": "Room",
"type": "list",
"hidden": 1
},
{
"module_name": "Education",
"color": "#428B46",
"icon": "octicon octicon-mortar-board",
"type": "module",
"label": _("Education"),
"hidden": 1
},
{
"module_name": "Healthcare",
"color": "#FF888B",
"icon": "fa fa-heartbeat",
"type": "module",
"label": _("Healthcare"),
"hidden": 1
},
{
"module_name": "Patient",
"color": "#6BE273",
"icon": "fa fa-user",
"doctype": "Patient",
"type": "link",
"link": "List/Patient",
"label": _("Patient"),
"hidden": 1
},
{
"module_name": "Healthcare Practitioner",
"color": "#2ecc71",
"icon": "fa fa-user-md",
"doctype": "Healthcare Practitioner",
"type": "link",
"link": "List/Healthcare Practitioner",
"label": _("Healthcare Practitioner"),
"hidden": 1
},
{
"module_name": "Patient Appointment",
"color": "#934F92",
"icon": "fa fa-calendar-plus-o",
"doctype": "Patient Appointment",
"type": "link",
"link": "List/Patient Appointment",
"label": _("Patient Appointment"),
"hidden": 1
},
{
"module_name": "Patient Encounter",
"colo | r": "#2ecc71",
"icon": "fa fa-stethoscope",
"doctype": "Patient Encounter",
"type": "link",
"link": "List/Patient Encounter",
"label": _("Patient Encounter"),
"hidden": 1
},
{
"module_name": "Lab Test",
"color": "#7578f6",
"icon": "octicon octicon-beaker",
"doctype": "Lab Test",
"type": "list",
"link": "List/Lab Test",
"la | bel": _("Lab Test"),
"hidden": 1
},
{
"module_name": "Vital Signs",
"color": "#2ecc71",
"icon": "fa fa-thermometer-empty",
"doctype": "Vital Signs",
"type": "list",
"link": "List/Vital Signs",
"label": _("Vital Signs"),
"hidden": 1
},
{
"module_name": "Clinical Procedure",
"color": "#FF888B",
"icon": "fa fa-medkit",
"doctype": "Clinical Procedure",
"type": "list",
"link": "List/Clinical Procedure",
"label": _("Clinical Procedure"),
"hidden": 1
},
{
"module_name": "Inpatient Record",
"color": "#7578f6",
"icon": "fa fa-list-alt",
"doctype": "Inpatient Record",
"type": "list",
"link": "List/Inpatient Record",
"label": _("Inpatient Record"),
"hidden": 1
},
{
"module_name": "Hub",
"color": "#009248",
"icon": "/assets/erpnext/images/hub_logo.svg",
"type": "page",
"link": "Hub/Item",
"label": _("Hub")
},
{
"module_name |
Donkyhotay/MoonPy | twisted/words/im/ircsupport.py | Python | gpl-3.0 | 9,837 | 0.010674 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""IRC support for Instance Messenger."""
import string
from twisted.words.protocols import irc
from twisted.words.im.locals import ONLINE
from twisted.internet import defer, reactor, protocol
from twisted.internet.defer import succeed
from twisted.words.im import basesupport, interfaces, locals
from zope.interface import implements
class IRCPerson(basesupport.AbstractPerson):
def imperson_whois(self):
if self.account.client is None:
raise locals.OfflineError
self.account.client.sendLine("WHOIS %s" % self.name)
### interface impl
def isOnline(self):
return ONLINE
def getStatus(self):
return ONLINE
def setStatus(self,status):
self.status=status
self.chat.getContactsList().setContactStatus(self)
def sendMessage(self, text, meta=None):
if self.account.client is None:
raise locals.OfflineError
for line in string.split(text, '\n'):
if meta and meta.get("style", None) == "emote":
self.account.client.ctcpMakeQuery(self.name,[('ACTION', line)])
else:
self.account.client.msg(self.name, line)
return succeed(text)
class IRCGroup(basesupport.AbstractGroup):
implements(interfaces.IGroup)
def imgroup_testAction(self):
print 'action test!'
def imtarget_kick(self, target):
if self.account.client is None:
raise locals.OfflineError
reason = "for great justice!"
self.account.client.sendLine("KICK #%s %s :%s" % (
self.name, target.name, reason))
### Interface Implementation
def setTopic(self, topic):
if self.account.client is None:
raise locals.OfflineError
self.account.client.topic(self.name, topic)
def sendGroupMessage(self, text, meta={}):
if self.account.client is None:
raise locals.OfflineError
if meta and meta.get("style", None) == "emote":
self.account.client.me(self.name,text)
return succeed(text)
#standard shmandard, clients don't support plain escaped newlines!
for line in string.split(text, '\n'):
self.account.client.say(self.name, line)
return succeed(text)
def leave(self):
if self.account.client is None:
raise locals.OfflineError
self.account.client.leave(self.name)
self.account.client.getGroupConversation(self.name,1)
class IRCProto(basesupport.AbstractClientMixin, irc.IRCClient):
def __init__(self, account, chatui, logonDeferred=None):
basesupport.AbstractClientMixin.__init__(self, account, chatui,
logonDeferred)
self._namreplies={}
self._ingroups={}
self._groups={}
self._topics={}
def getGroupConversation(self, name, hide=0):
name=string.lower(name)
return self.chat.getGroupConversation(self.chat.getGroup(name, self),
stayHidden=hide)
def getPerson(self,name):
return self.chat.getPerson(name, self)
def connectionMade(self):
# XXX: Why do I duplicate code in IRCClient.register?
try:
print 'connection made on irc service!?', self
if self.account.password:
self.sendLine("PASS :%s" % self.account.password)
self.setNick(self.account.username)
self.sendLine("USER %s foo bar :Twisted-IM user" % (self.nickname,))
for channel in self.account.channels:
self.joinGroup(channel)
self.account._isOnline=1
print 'uh, registering irc acct'
if self._logonDeferred is not None:
self._logonDeferred.callback(self)
self.chat.getContactsList()
except:
import traceback
traceback.print_exc()
def setNick(self,nick):
self.name=nick
self.accountName="%s (IRC)"%nick
irc.IRCClient.setNick(self,nick)
def kickedFrom(self, channel, kicker, message):
"""Called when I am kicked from a channel.
"""
print 'ono i was kicked', channel, kicker, message
return self.chat.getGroupConversation(
self.chat.getGroup(channel[1:], self), 1)
def userKicked(self, kickee, channel, kicker, message):
print 'whew somebody else', kickee, channel, kicker, message
def noticed(self, username, channel, message):
self.privmsg(username, channel, message, {"dontAutoRespond": 1})
def privmsg(self, username, channel, message, metadata=None):
if metadata is None:
metadata = {}
username=string.split(username,'!',1)[0]
if username==self.name: return
if channel[0]=='#':
group=channel[1:]
self.getGroupConversation(group).showGroupMessage(username, message, metadata)
return
self.chat.getConversation(self.getPerson(username)).showMessage(message, metadata)
def action(self,username,channel,emote):
username=string.split(username,'!',1)[0]
if username==self.name: return
meta={'style':'emote'}
if channel[0]=='#':
group=channel[1:]
self.getGroupConversation(group).showGroupMessage(username, emote, meta)
return
self.chat.getConversation(self.getPerson(username)).showMessage(emote,meta)
def irc_RPL_NAMREPLY(self,prefix,params):
"""
RPL_NAMREPLY
>> NAMES #bnl
<< :Arlington.VA.US.Undernet.Org 353 z3p = #bnl :pSwede Dan-- SkOyg AG
"""
group=string.lower(params[2][1:])
users=string.split(params[3])
for ui in range(len(users)):
while users[ui][0] in ["@","+"]: # channel modes
users[ui]=users[ui][1:]
if not self._namreplies.has_key(group):
self._namreplies[group]=[]
self._namreplies[group].extend(users)
for nickname in users:
try:
self._ingroups[nickname].append(group)
except:
self._ingroups[nickname]=[group]
def irc_RPL_ENDOFNAMES(self,prefix,params):
group=params[1][1:]
self.getGroupConversation(group).setGroupMembers(self._namreplies[string.lower(group)])
del self._namreplies[string.lower(group)]
def irc_RPL_TOPIC(self,prefix,params):
self._topics[params[1][1:]]=params[2]
def irc_333(self,prefix,params):
group=params[1][1:]
self.getGroupConversation(group).setTopic(self._topics[group],params[2])
del self._topics[group]
def irc_TOPIC(self,prefix,params):
nickname = string.split(prefix,"!")[0]
group = params[0][1:]
topic = params[1]
self.getGroupConversation(group).setTopic(topic,nickname)
def irc_JOIN(self,prefix,params):
nickname=string.split(prefix,"!")[0]
group=string.lower(params[0][1:])
if nickname!=self.nickname:
try:
self._ingroups[nickname].append(group)
except:
self._ingroups[nickname]=[group]
self.getGroupConversation(group).memberJoined(nickname)
def irc_PART(self,prefix,params):
nickname=string.split(prefix,"!")[0]
group=string.lower(params[0][1:])
if nickname!=self.nickname:
if group in self._ingroups[nickname]:
self._ingroups[nickname].remove(group)
self.getGroupConversation(group).memberLeft(nickname)
else:
print "%s left %s, but wasn't in the room."%(nickname,gro | up)
def irc_QUIT(self,prefix,params):
nickname=string.split(prefix,"!")[0]
if self._ingroups.has_key(nickname):
for group in self._ingro | ups[nickname]:
self.getGroupConversation(group).memberLeft(nickname)
self._ingroups[nickname]=[]
else:
print '*** WARNING: ingroups had no such key %s' % nickname
d |
noman798/dcny | lib/f42/setup.py | Python | mpl-2.0 | 368 | 0.029891 | #coding:utf-8
from setuptools import setup, find_packages
setup(
name='f42',
| version="0.1.3",
description="Some common use functions",
author="Lerry",
author_email="lvdachao@gmail.com",
packages = ['f42'],
zip_safe=False,
inclu | de_package_data=True,
install_requires = [
],
url = "https://bitbucket.org/vcwatch/f42",
)
|
sixty-north/bark-spider | setup.py | Python | agpl-3.0 | 1,191 | 0.00084 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
# 'brooks',
'aiohttp',
'pandas',
'pytest',
'stevedore',
]
setup(name='bark_spider',
version='0.0',
description="A web front end for Sixty North's Brooks' Law simulator",
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Elm",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Internet :: WWW/HTTP",
"Topic :: | Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Sixty North AS',
author_email='austin@sixty-north.com',
url='http://github.com/sixty-north/bark_spider',
keywords='web pyramid pylons',
packages=fin | d_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="bark_spider",
)
|
praekelt/seed-stage-based-messaging | contentstore/apps.py | Python | bsd-3-clause | 190 | 0 | from django.apps import App | Config
class ContentStoreAppConfig(AppConfig):
name = "contentstore"
def ready(self):
import contentstore.sign | als
contentstore.signals
|
openstack/networking-bagpipe | networking_bagpipe/bagpipe_bgp/vpn/evpn/linux_vxlan.py | Python | apache-2.0 | 15,445 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from pyroute2 import ndb as ndb_mod # pylint: disable=no-name-in-module
from networking_bagpipe.bagpipe_bgp.common import log_decorator
from networking_bagpipe.bagpipe_bgp import constants as consts
from networking_bagpipe.bagpipe_bgp.engine import exa
from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as dp_drivers
from networking_bagpipe.bagpipe_bgp.vpn import evpn
from networking_bagpipe.privileged import privileged_utils
BRIDGE_NAME_PREFIX = "evpn---"
VXLAN_INTERFACE_PREFIX = "vxlan--"
class LinuxVXLANEVIDataplane(evpn.VPNInstanceDataplane):
def __init__(self, *args, **kwargs):
super(LinuxVXLANEVIDataplane, self).__init__(*args, **kwargs)
if 'linuxbr' in kwargs:
self.bridge_name = kwargs.get('linuxbr')
else:
self.bridge_name = (
BRIDGE_NAME_PREFIX +
self.external_instance_id)[:consts.LINUX_DEV_LEN]
if not self._interface_exists(self.bridge_name):
self.log.debug("Starting bridge %s", self.bridge_name)
# Create bridge
self._run_command("brctl addbr %s" % self.bridge_name,
run_as_root=True)
self._run_command("brctl setfd %s 0" % self.bridge_name,
run_as_root=True)
self._run_command("brctl stp %s off" % self.bridge_name,
run_as_root=True)
self._run_command("ip link set %s up" % self.bridge_name,
run_as_root=True)
self.log.debug("Bridge %s created", self.bridge_name)
self._create_and_plug_vxlan_if()
self.log.debug("VXLAN interface %s plugged on bridge %s",
self.vxlan_if_name, self.bridge_name)
self._cleaning_up = False
@log_decorator.log_info
def cleanup(self):
self.log.info("Cleaning EVI bridge and VXLAN interface %s",
self.bridge_name)
self._cleaning_up = True
# removing the vxlan interface removes our routes,
# but if we don't remove the vxlan if (if it was reused) then
# cleanup will not happen, which is why we use cleanup assist
# (see needs cleanup assist below)
self._cleanup_vxlan_if()
# Delete only EVPN Bridge (Created by dataplane driver)
if BRIDGE_NAME_PREFIX in self.bridge_name:
self._run_command("ip link set %s down" % self.bridge_name,
run_as_root=True,
raise_on_error=False)
self._run_command("brctl delbr %s" % self.bridge_name,
run_as_root=True,
raise_on_error=False)
def needs_cleanup_assist(self):
# If we reused a vxlan interface we won't cleanup fdb entries
# in cleanup(), so we need to have remove_dataplane_for_x
# be called for reach state via cleanup assist
return VXLAN_INTERFACE_PREFIX not in self.vxlan_if_name
def _create_and_plug_vxlan_if(self):
# if a VXLAN interface, with the VNI we want to use, is already plugged
# in the bridge, we want to reuse it
with ndb_mod.main.NDB() as ndb:
# pylint: disable=no-member
for port_id in ndb.interfaces[self.bridge_name].ports:
port = ndb.interfaces[port_id] # pylint: disable=no-member
if (port['kind'] == "vxlan" and
port['vxlan_id'] == self.instance_label):
self.log.info("reuse vxlan interface %s for VXLAN VNI %s",
port['ifname'], self.instance_label)
self.vxlan_if_name = port['ifname']
return
self.vxlan_if_name = (VXLAN_INTERFACE_PREFIX +
self.external_instance_id)[:consts.LINUX_DEV_LEN]
self.log.debug("Creating and plugging VXLAN interface %s",
self.vxlan_if_name)
if self._interface_exists(self.vxlan_if_name):
self._remove_vxlan_if()
dst_port_spec = ""
if self.driver.config.vxlan_dst_port:
dst_port_spec = ("dstport %d" %
self.driver.config.vxlan_dst_port)
# Create VXLAN interface
self._run_command(
"ip link add %s type vxlan id %d local %s nolearning proxy %s" %
(self.vxlan_if_name, self.instance_label,
self.driver.get_local_address(), dst_port_spec),
run_as_root=True
)
self._run_command("ip link set %s up" % self.vxlan_if_name,
run_as_root=True)
# Plug VXLAN interface into bridge
self._run_command("brctl addif %s %s" % (self.bridge_name,
self.vxlan_if_name),
run_as_root=True)
def _cleanup_vxlan_if(self):
if VXLAN_INTERFACE_PREFIX not in self.vxlan_if_name:
self.log.debug("we reused the VXLAN interface, don't cleanup")
return
if self._is_vxlan_if_on_bridge():
# Unplug VXLAN interface from Linux bridge
self._unplug_from_bridge(self.vxlan_if_name)
self._remove_vxlan_if()
def _remove_vxlan_if(self):
if not VXLAN_INTERFACE_PREFIX not in self.vxlan_if_name:
self.log.debug("we reused the VXLAN interface, don't remove")
return
# Remove VXLAN interface
self._run_command("ip link set %s down" % self.vxlan_if_name,
run_as_root=True)
self._run_command("ip link del %s" % self.vxlan_if_name,
run_as_root=True)
def _is_if_on_bridge(self, ifname):
with ndb_mod.main.NDB() as ndb:
try:
# pylint: disable=no-member
for port_id in ndb.interfa | ces[self.bridge_name].ports:
port = ndb.interfaces[port_id] # pylint: disable=no-member
if port.ifname == ifname:
return True
except KeyError:
return False
return False
def _is_vxlan_if_on_bridge(self):
return self._is_if_on_b | ridge(self.vxlan_if_name)
def _interface_exists(self, interface):
"""Check if interface exists."""
(_, exit_code) = self._run_command("ip link show dev %s" % interface,
raise_on_error=False,
acceptable_return_codes=[-1])
return (exit_code == 0)
def _unplug_from_bridge(self, interface):
if self._interface_exists(self.bridge_name):
self._run_command("brctl delif %s %s" %
(self.bridge_name, interface),
run_as_root=True,
acceptable_return_codes=[0, 1])
def set_gateway_port(self, linuxif, gw_ip):
self._run_command("brctl addif %s %s" %
(self.bridge_name, linuxif),
run_as_root=True,
raise_on_error=False)
self._fdb_dump()
def gateway_port_down(self, linuxif):
self._run_command("brctl delif %s %s" %
(self.bridge_name, linuxif),
run_as_root=True,
raise_on_error=False)
# TODO(tmorin): n |
lopp2005/spatial_cluster_fs | tool_densfilter.py | Python | apache-2.0 | 5,412 | 0.013673 | # -*- coding: utf-8 -*-
"""
Density Filter Tool
Created on Thu May 11 11:03:05 2017
@author: cheny
"""
from arcpy import Parameter
import arcpy
from section_cpu import dens_filter_cpu
from multiprocessing import cpu_count
class DensFilterTool(object):
def __init__(self):
"""Classify Tool"""
self.label = "4 Density Filtering Tool"
self.description = "Post Processing - Density Filter"
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
#1
paramclsinput = Parameter(
displayName="Input Classified Points",
name="in_cls_points",
datatype="DEFeatureClass",
parameterType="Required",
direction="Input")
paramclsinput.filter.list = ["Point"]
#2
paramcntrinput = Parameter(
displayName="Input Centers Points",
name="in_cntr_points",
datatype="DEFeatureClass",
parameterType="Required",
direction="Input")
paramcntrinput.filter.list = ["Point"]
#3
paramidfield = Parameter(
displayName="Identifier Field",
name="id_field",
datatype="Field",
parameterType="Required",
direction="Input")
paramidfield.parameterDependencies = [paramclsinput.name]
paramidfield.filter.list = ['Short','Long']
#4
paramcntridfield = Parameter(
displayName="Center ID Field",
name="cntr_id_field",
datatype="Field",
parameterType="Required",
direction="Input")
paramcntridfield.parameterDependencies = [paramclsinput.name]
paramcntridfield.filter.list = ['Short','Long']
paramcntridfield.value='CN | TR_ID'
#5
paramdens = Parameter(
displayName="Density Field",
name="density_field",
datatype="Field",
parameterT | ype="Required",
direction="Input")
# Set the filter to accept only fields that are Short or Long type
paramdens.filter.list = ['Short','Long','Float','Single','Double']
paramdens.parameterDependencies = [paramclsinput.name]
paramdens.value='DENSITY'
#6
paramclsoutput = Parameter(
displayName="Output Classified Points",
name="out_cls_points",
datatype="DEFeatureClass",
parameterType="Required",
direction="Output")
#7
paramdistthrs = Parameter(
displayName="Distance for Density Connection",
name="distthrs",
datatype="GPDouble",
parameterType="Required",
direction="Input"
)
paramdistthrs.value=100.0
#8
paramdensthrs= Parameter(
displayName="Density Threshold for Density Connection",
name="densthrs",
datatype="GPDouble",
parameterType="Required",
direction="Input"
)
paramdensthrs.value=1.2
#9
paramdevice = Parameter(
displayName="Device for Calculation",
name="calc_device",
datatype="GPString",
parameterType="Required",
direction="Input"
)
paramdevice.filter.list=['CPU']
paramdevice.value='CPU'
#10
paramcpuc = Parameter(
displayName="CPU Parallel Cores",
name="cpu_cores",
datatype="GPLong",
parameterType="Required",
direction="Input"
)
paramcpuc.value=cpu_count()
params = [paramclsinput,paramcntrinput,paramidfield,
paramcntridfield,paramdens,paramclsoutput,
paramdistthrs,paramdensthrs,paramdevice,
paramcpuc]
return params
def updateParameters(self, parameters):
# if parameters[0].altered and not parameters[2].altered:
# parameters[2].value=arcpy.Describe(parameters[0].valueAsText).OIDFieldName
if parameters[0].altered and not parameters[5].altered:
in_fe=parameters[0].valueAsText
parameters[5].value=in_fe[:len(in_fe)-4]+'_filter'+in_fe[-4:] if in_fe[-3:]=='shp' else in_fe+'_filter'
return
def execute(self, parameters, messages):
cls_input=parameters[0].valueAsText
cntr_input=parameters[1].valueAsText
id_field=parameters[2].valueAsText
cntr_id_field=parameters[3].valueAsText
dens_field=parameters[4].valueAsText
cls_output=parameters[5].valueAsText
dist_thrs=parameters[6].value
dens_thrs=parameters[7].value
cpu_core=parameters[9].value
dens_filter_cpu(cls_input,cntr_input,id_field,
cntr_id_field,dens_field,cls_output,
dist_thrs,dens_thrs,cpu_core)
return
|
andrewbird/wader | core/oses/linux.py | Python | gpl-2.0 | 20,257 | 0.000444 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2008 Vodafone España, S.A.
# Copyright (C) 2008-2009 Warp Networks, S.L.
# Author: Pablo Martí
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) | any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Gen | eral Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Linux-based OS plugin"""
import dbus
from functools import partial
from os.path import join, exists
import re
import subprocess
from zope.interface import implements
from twisted.internet import defer, reactor, utils
from twisted.python import log, reflect
from twisted.python.procutils import which
from wader.common import consts
from wader.common.interfaces import IHardwareManager
from wader.common.utils import get_file_data, natsort
from core.hardware.base import identify_device, probe_ports
from core.plugin import PluginManager
from core.oses.unix import UnixPlugin
from core.startup import setup_and_export_device
from core.serialport import Ports
IDLE, BUSY = range(2)
ADD_THRESHOLD = 6
MODEL, VENDOR, DRIVER = "ID_MODEL_ID", "ID_VENDOR_ID", "ID_USB_DRIVER"
SUBSYSTEMS = ["tty", "usb", "net"]
REQUIRED_PROPS = [VENDOR, MODEL, DRIVER, "ID_BUS", "DEVNAME"]
BAD_DEVFILE = re.compile('^/dev/(tty\d*?|console|ptmx)$')
def get_ancestor(device):
"""
Returns the greatest ancestor with matching vid / pid
"""
def get_vidpid(device):
vendor = None
model = None
for key in device.get_property_keys():
if key == VENDOR:
try:
vendor = int(device.get_property(key), 16)
except (ValueError, TypeError):
pass
if key == MODEL:
try:
model = int(device.get_property(key), 16)
except (ValueError, TypeError):
pass
return (vendor, model)
vidpid = get_vidpid(device)
ancestor = parent = device
while parent is not None:
if get_vidpid(parent) == vidpid:
ancestor = parent
parent = parent.get_parent()
return ancestor
def is_valid_device(device):
"""Checks whether ``device`` is valid"""
if not device.get_device_file():
return False
# before checking all the properties, filter out all the /dev/tty%d
if BAD_DEVFILE.match(device.get_device_file()):
return False
# filter out /dev/usb/foo/bar/foo like too
parts = device.get_device_file().split('/')
if len(parts) > 3:
return False
# check that it has all the required properties
# otherwise we are not interested on it
props = device.get_property_keys()
for prop in REQUIRED_PROPS:
if prop not in props:
return False
return True
def get_devices(client):
devices = []
# get all the devices under the tty, usb and net subsystems
for subsystem in SUBSYSTEMS:
for device in client.query_by_subsystem(subsystem):
if is_valid_device(device):
devices.append(device)
return devices
class HardwareManager(object):
"""
I find and configure devices on Linux
I am resilient to ports assigned in unusual locations
and devices sharing ids.
"""
implements(IHardwareManager)
def __init__(self):
super(HardwareManager, self).__init__()
#: dictionary with all my configured clients
self.clients = {}
#: reference to StartupController
self.controller = None
self._waiting_deferred = None
# remember the total client count for opath generation
self._client_count = -1
gudev = reflect.namedAny("gudev")
self.gudev_client = gudev.Client(SUBSYSTEMS)
# temporary place to store hotplugged devices to process
self._hotplugged_devices = []
self._call = None
self._connect_to_signals()
def _connect_to_signals(self):
self.gudev_client.connect("uevent", self._on_uevent)
def _on_uevent(self, client, action, device):
msg = "UEVENT device: %s action: %s"
log.msg(msg % (device.get_sysfs_path(), action))
if action == 'remove':
# handle remove
for opath, plugin in self.clients.items():
if plugin.sysfs_path == device.get_sysfs_path():
self.controller.DeviceRemoved(plugin.opath)
self._unregister_client(plugin)
elif action == 'add':
# if valid, append it to the list of hotplugged devices
# for later processing
if is_valid_device(device):
self._hotplugged_devices.append(device)
if self._call is None:
# the first time we set a small delay and whenever a device
# is added we will reset the call 2 seconds
self._call = reactor.callLater(2,
self._process_hotplugged_devices)
elif self._call.active():
# XXX: this can be optimized by substracting x milliseconds
# for every device added to the reset call. However it
# introduces some more logic and perhaps should live outside.
self._call.reset(ADD_THRESHOLD)
def register_controller(self, controller):
"""
See `IHardwareManager.register_controller`
"""
self.controller = controller
def get_devices(self):
"""See :meth:`wader.common.interfaces.IHardwareManager.get_devices`"""
# If clients is an empty dict we assume that this is the first
# time get_devices is executed. If not, we just return the current
# devices. If get_devices is executed in the middle of a hotplugging
# event, the "just added" device won't be returned, but it will be
# processed in a few seconds by _process_hotplugged_devices anyway.
if self.clients:
return defer.succeed(self.clients.values())
return self._process_found_devices(get_devices(self.gudev_client))
def _process_hotplugged_devices(self):
# get DevicePlugin out of a list of gudev.Device
self._process_found_devices(self._hotplugged_devices)
self._hotplugged_devices, self._call = [], None
def _process_found_devices(self, devices=None, emit=True):
"""
Processes gudev ``devices`` and returns ``DevicePlugin``s
Find devices with a common parent and merge them, identify
the ones that need it, register and emit a signal if ``emit``
is True.
"""
deferreds = []
for device in self._setup_devices(devices):
d = identify_device(device)
d.addCallback(self._register_client, emit=emit)
deferreds.append(d)
return defer.gatherResults(deferreds)
def _setup_devices(self, devices):
"""Sets up ``devices``"""
found_devices = {}
for device in devices:
props = {}
for prop in REQUIRED_PROPS:
value = device.get_property(prop)
# values are either string or hex
try:
props[prop] = int(value, 16)
except ValueError:
props[prop] = value
if 'DEVNAME' in props:
abspath = device.get_device_file()
if props['DEVNAME'] != abspath:
# Sometimes the DEVNAME property obtained from 'gudev' is
# just a relative pathname. This seems to occur on boot
# with the device already inserted, |
essepuntato/opencitations | script/statistics.py | Python | isc | 3,561 | 0.001685 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
__author__ = 'essepuntato'
from conf_spacin import *
from datetime import datetime
from SPARQLWrapper import SPARQLWrapper, JSON
queries = [
# 0
"""
PREFIX fabio: <http://purl.org/spar/fabio/>
PREFIX cito: <http://purl.org/spar/cito/>
SELECT (count(?citing) as ?tot) {
GRAPH <https://w3id.org/oc/corpus/br/> {
?citing a fabio:Expression .
FILTER EXISTS { ?citing cito:cites | ^cito:cites [] }
}
}""",
# 1
"""
PREFIX cito: <http://purl.org/spar/cito/>
SELECT (count(?citing) as ?tot) {
GRAPH <https://w3id.org/oc/corpus/br/> {
?citing cito:cites ?cited
}
}""",
# 2
"""
PREFIX fabio: <http://purl.org/spar/fabio/>
PREFIX frbr: <http://purl.org/vocab/frbr/core#>
SELECT (count(DISTINCT ?container) as ?tot) {
GRAPH <https://w3id.org/oc/corpus/br/> {
?container ^frbr:partOf ?something .
FILTER NOT EXISTS { ?container frbr:partOf ?other_container }
}
}""",
# 3
"""
PREFIX datacite: <http://purl.org/spar/datacite/>
PREFIX pro: <http://purl.org/spar/pro/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT (count(?id) as ?tot) {
GRAPH <https://w3id.org/oc/corpus/id/> {
?id datacite:usesIdentifierScheme datacite:orcid
}
}""",
# 4
"""
PREFIX fabio: <http://purl.org/spar/fabio/>
PREFIX cito: <http://purl.org/spar/cito/>
SELECT (count(?citing) as ?tot) {
GRAPH <https://w3id.org/oc/corpus/br/> {
?citing a fabio:Expression .
FILTER EXISTS { ?citing cito:cites [] }
}
}""",
# 5
"""
PREFIX fabio: <http://purl.org/spar/fabio/>
PREFIX cito: <http://purl.org/spar/cito/>
SELECT (count(?cited) as ?tot) {
GRAPH <https://w3id.org/oc/corpus/br/> {
?cited a fabio:Expression .
FILTER EXISTS { ?cited ^cito:cites [] }
}
}""",
# 6
"""
PREFIX datacite: <http://purl.org/spar/datacite/>
PREFIX pro: <http://purl.org/spar/pro/>
PREFIX foaf: <http://xmlns.com/foaf | /0.1/>
SELECT (count(?auth) as ?tot) {
GRAPH <https://w3id.org/oc/corpus/ra/> {
?auth a foaf:Agent
}
}"""
]
tp = SPARQLWrapper(triplestore_url)
tp.setMethod('GET')
res = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
for | idx, query in enumerate(queries):
if idx in (0, 2, 3, 6):
res += ",-"
else:
tp.setQuery(query)
tp.setReturnFormat(JSON)
results = tp.query().convert()
for result in results["results"]["bindings"]:
res += "," + result["tot"]["value"]
with open(base_home + "statistics.csv", "a") as f:
f.write(res + "\n")
|
Orav/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/commands/search.py | Python | lgpl-3.0 | 4,868 | 0.001438 | import sys
import textwrap
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor import pkg_resources
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default='https://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list f | rom pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {'name': name, ' | summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
|
praekelt/diamondash | diamondash/widgets/dynamic/__init__.py | Python | bsd-3-clause | 154 | 0 | from diamo | ndash.widgets.dynamic.dynamic import (
DynamicWidgetConfig,
DynamicWidget)
__all__ = [
'DynamicWidgetConfig',
'Dyna | micWidget']
|
benjyw/pants | src/python/pants/backend/shell/dependency_inference.py | Python | apache-2.0 | 8,271 | 0.003748 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import logging
import re
from collections import defaultdict
from dataclasses import dataclass
from typing import DefaultDict
from pants.backend.shell.lint.shellcheck.subsystem import Shellcheck
from pants.backend.shell.shell_setup import ShellSetup
from pants.backend.shell.target_types import ShellSources
from pants.base.specs import AddressSpecs, DescendantAddresses
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.engine.addresses import Address
from pants.engine.collection import DeduplicatedCollection
from pants.engine.fs import Digest, DigestSubset, MergeDigests, PathGlobs
from pants.engine.platform import Platform
from pants.engine.process import FallibleProcessResult, Process, ProcessCacheScope
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
ExplicitlyProvidedDependencies,
HydratedSources,
HydrateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
SourcesPaths,
SourcesPathsRequest,
Targets,
WrappedTarget,
)
from pants.engine.unions import UnionRule
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.ordered_set import OrderedSet
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ShellMapping:
"""A mapping of Shell file names to their owning file address."""
mapping: FrozenDict[str, Address]
ambiguous_modules: FrozenDict[str, tuple[Address, ...]]
@rule(desc="Creating map of Shell file names to Shell targets", level=LogLevel.DEBUG)
async def map_shell_files() -> ShellMapping:
all_expanded_targets = await Get(Targets, AddressSpecs([DescendantAddresses("")]))
shell_tgts = tuple(tgt for tgt in all_expanded_targets if tgt.has_field(ShellSources))
sources_per_target = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(tgt[ShellSources])) for tgt in shell_tgts
)
files_to_addresses: dict[str, Address] = {}
files_with_multiple_owners: DefaultDict[str, set[Address]] = defaultdict(set)
for tgt, sources in zip(shell_tgts, sources_per_target):
for f in sources.files:
if f in files_to_addresses:
files_with_multiple_owners[f].update({files_to_addresses[f], tgt.address})
else:
files_to_addresses[f] = tgt.address
# Remove files with ambiguous owners.
for ambiguous_f in files_with_multiple_owners:
files_to_addresses.pop(ambiguous_f)
return ShellMapping(
mapping=FrozenDict(sorted(files_to_addresses.items())),
ambiguous_modules=FrozenDict(
(k, tuple(sorted(v))) for k, v in sorted(files_with_multiple_owners.items())
),
)
class ParsedShellImports(DeduplicatedCollection):
sort_input = True
@dataclass(frozen=True)
class ParseShellImportsRequest:
# NB: We parse per-file, rather than per-target. This is necessary so that we can have each
# file in complete isolation without its sibling files present so that Shellcheck errors when
# trying to source a sibling file, which then allows us to extract that path.
digest: Digest
fp: str
PATH_FROM_SHELLCHECK_ERROR = re.compile(r"Not following: (.+) was not specified as input")
@rule
async def parse_shell_imports(
request: ParseShellImportsRequest, shellcheck: Shellcheck
) -> ParsedShellImports:
# We use Shellcheck to parse for us by running it against each file in isolation, which means
# that all `source` statements will error. Then, we can extract the problematic paths from the
# JSON output.
downloaded_shellcheck = await Get(
DownloadedExternalTool, ExternalToolRequest, shellcheck.get_request(Platform.current)
)
input_digest = await Get(Digest, MergeDigests([request.digest, downloaded_shellcheck.digest]))
process_result = await Get(
FallibleProcessResult,
Process(
# NB: We do not load up `[shellcheck].{args,config}` because it would risk breaking
# determinism of dependency inference in an unexpected way.
[downloaded_shellcheck.exe, "--format=json", request.fp],
input_digest=input_digest,
description=f"Detect Shell imports for {request.f | p}",
level=LogLevel.DEBUG,
# We expect this to always fail, but it should still be cached because the process is
# deterministic.
cache_scope=ProcessCacheScope.ALWAYS,
),
)
try:
output = | json.loads(process_result.stdout)
except json.JSONDecodeError:
logger.error(
f"Parsing {request.fp} for dependency inference failed because Shellcheck's output "
f"could not be loaded as JSON. Please open a GitHub issue at "
f"https://github.com/pantsbuild/pants/issues/new with this error message attached.\n\n"
f"\nshellcheck version: {shellcheck.version}\n"
f"process_result.stdout: {process_result.stdout.decode()}"
)
return ParsedShellImports()
paths = set()
for error in output:
if not error.get("code", "") == 1091:
continue
msg = error.get("message", "")
matches = PATH_FROM_SHELLCHECK_ERROR.match(msg)
if matches:
paths.add(matches.group(1))
else:
logger.error(
f"Parsing {request.fp} for dependency inference failed because Shellcheck's error "
f"message was not in the expected format. Please open a GitHub issue at "
f"https://github.com/pantsbuild/pants/issues/new with this error message "
f"attached.\n\n\nshellcheck version: {shellcheck.version}\n"
f"error JSON entry: {error}"
)
return ParsedShellImports(paths)
class InferShellDependencies(InferDependenciesRequest):
infer_from = ShellSources
@rule(desc="Inferring Shell dependencies by analyzing imports")
async def infer_shell_dependencies(
request: InferShellDependencies, shell_mapping: ShellMapping, shell_setup: ShellSetup
) -> InferredDependencies:
if not shell_setup.dependency_inference:
return InferredDependencies([], sibling_dependencies_inferrable=False)
address = request.sources_field.address
wrapped_tgt = await Get(WrappedTarget, Address, address)
explicitly_provided_deps, hydrated_sources = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),
Get(HydratedSources, HydrateSourcesRequest(request.sources_field)),
)
per_file_digests = await MultiGet(
Get(Digest, DigestSubset(hydrated_sources.snapshot.digest, PathGlobs([f])))
for f in hydrated_sources.snapshot.files
)
all_detected_imports = await MultiGet(
Get(ParsedShellImports, ParseShellImportsRequest(digest, f))
for digest, f in zip(per_file_digests, hydrated_sources.snapshot.files)
)
result: OrderedSet[Address] = OrderedSet()
for detected_imports in all_detected_imports:
for import_path in detected_imports:
unambiguous = shell_mapping.mapping.get(import_path)
ambiguous = shell_mapping.ambiguous_modules.get(import_path)
if unambiguous:
result.add(unambiguous)
elif ambiguous:
explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
ambiguous,
address,
import_reference="file",
context=f"The target {address} sources `{import_path}`",
)
maybe_disambiguated = explicitly_provided_deps.disambiguated_via_ignores(ambiguous)
if maybe_disambiguated:
result.add(maybe_disambiguated)
return InferredDependencies(sorted(result), sibling_dependencies_inferrable=True)
def rules():
|
Grokzen/repy | setup.py | Python | mit | 1,433 | 0.004187 | # -*- coding: utf-8 -*-
import repy
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name=repy.__pkgname__,
version=repy.__version__,
description="Python Regex cli tool",
author_name=repy.__author_name__,
author_email=repy.__author_email__,
packages=["repy"],
scripts=['bin/repy-cli'],
data_files=[],
include_package_data=True,
install_requires=['docopt>=0.6.1'],
classifiers=(
# As from https://pypi.python.org/pypi?%3Aaction=list_classifiers
#'Development Status :: 1 - Planning',
#'Development | Status :: 2 - Pre-Alpha',
'Development Status :: 3 - Alpha',
#'Development Status :: 4 - Beta',
#'Development Status :: 5 - Production/Stable',
#'Development Status :: 6 - Mature | ',
#'Development Status :: 7 - Inactive',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Environment :: Console',
'Operating System :: POSIX',
# 'Operating System :: Microsoft :: Windows',
'Topic :: Software Development',
'Topic :: cli',
'Topic :: Regex')
)
|
rneher/FitnessInference | flu/sequence_and_annotations/get_L_L_H3N2_predictions.py | Python | mit | 1,340 | 0.015672 | #############################
#get_L_L_H3N2_predictions.tex
#
# this script identifies the strain names we obtained from Luksza and Laessig
# with sequences in our mutliple sequence alignment
#
################################
import sys
sys.path.append('../src')
import predict_flu as PF
from datetime import date
from Bio import Phylo,AlignIO,SeqIO
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
import glob,pickle,gzip,os, argparse
#parse the command line arguments
aln_fname = '../data/H3N2_HA1_all_years_f | iltered.fasta.gz'
with open('../data/annotations.pickle', 'r') as infile:
annotation = pickle.load(infile)
with gzip.open(aln_fname) as infile:
total_alignment = AlignIO.read(infile,'fasta')
seq_look_up = {c.name.split('|')[0].lower():c for c in total_alignment}
L_L={}
with open('../data/H3N2_L_L_predicted_vaccine_strains.dat') as L_L_file:
for line in L_L_file:
entries = | line.strip().split()
year=int(entries[0][:-1])
strain_name = '_'.join(entries[1].split('_')[:-3]).lower().replace('-', '_')
if strain_name in seq_look_up:
L_L[year-1] = seq_look_up[strain_name]
else:
print "strain for year", year-1, "not found"
with open('../data/H3N2_L_L_predictions.pickle', 'w') as outfile:
pickle.dump(L_L, outfile)
|
penzance/canvas_python_sdk | canvas_sdk/methods/admins.py | Python | mit | 3,874 | 0.004904 | from canvas_sdk import client, utils
def make_account_admin(request_ctx, account_id, user_id, role=None, role_id=None, send_confirmation=None, **request_kwargs):
"""
Flag an existing user as an admin within the account.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param user_id: (required) The id of the user to promote.
:type user_id: integer
:param role: (optional) (deprecated)
The user's admin relationship with the account will be created with the
given role. Defaults to 'AccountAdmin'.
:type role: string or None
:param role_id: (optional) The user's admin relationship with the account will be created with the
given role. Defaults to the built-in role for 'AccountAdmin'.
:type role_id: integer or None
:param send_confirmation: (optional) Send a notification email to
the new admin if true. Default is true.
| :type send_confirmation: boolean or None
:return: Make an account admin
:rtype: requests.Response (with Admin data)
"""
path = '/v1/accounts/{account_id}/admins'
payload = {
'user_id' : user_id,
'role' : role,
'role_id' : role_id,
'send_confirmation' : send_confirmation,
}
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.post(request_ctx, url, | payload=payload, **request_kwargs)
return response
def remove_account_admin(request_ctx, account_id, user_id, role=None, role_id=None, **request_kwargs):
"""
Remove the rights associated with an account admin role from a user.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param user_id: (required) ID
:type user_id: string
:param role: (optional) (Deprecated)
Account role to remove from the user. Defaults to 'AccountAdmin'. Any
other account role must be specified explicitly.
:type role: string or None
:param role_id: (optional) The user's admin relationship with the account will be created with the
given role. Defaults to the built-in role for 'AccountAdmin'.
:type role_id: integer or None
:return: Remove account admin
:rtype: requests.Response (with Admin data)
"""
path = '/v1/accounts/{account_id}/admins/{user_id}'
payload = {
'role' : role,
'role_id' : role_id,
}
url = request_ctx.base_api_url + path.format(account_id=account_id, user_id=user_id)
response = client.delete(request_ctx, url, payload=payload, **request_kwargs)
return response
def list_account_admins(request_ctx, account_id, user_id=None, per_page=None, **request_kwargs):
"""
List the admins in the account
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param user_id: (optional) Scope the results to those with user IDs equal to any of the IDs specified here.
:type user_id: array or None
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List account admins
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/accounts/{account_id}/admins'
payload = {
'user_id' : user_id,
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
|
nkmk/python-snippets | notebook/numpy_gcd.py | Python | mit | 807 | 0 | import numpy as np
a = np.array([0, 2, 3, 6])
b = np.array([3, 4, 5, 15])
print(np.gcd(a, b))
# [3 2 1 3]
print(type(np.gcd(a, b)))
# <class 'numpy.ndar | ray'>
l_a = [0, 2, 3, 6]
l_b = [3, 4, 5, 14]
print(np.gcd(l_a, l_b))
# [3 2 1 2]
print(type(np.gcd(l_a, l_b)))
# <class 'numpy.ndarray'>
print(np.gcd(6, 15))
# 3
print(type(np.gcd(6, 15)))
# <class 'numpy.int64'>
a_2d = np.array([[0, 2, 3, 6], [0, 2, 3, 6]])
print(a_2d)
# [[0 2 3 6]
# [0 2 3 6]]
print(b)
# [ 3 4 5 15]
print(a_2d + b)
# [[ 3 6 8 21]
# [ | 3 6 8 21]]
print(np.gcd(a_2d, b))
# [[3 2 1 3]
# [3 2 1 3]]
a_mismatch = np.array([0, 1, 2])
# print(np.gcd(a_mismatch, b))
# ValueError: operands could not be broadcast together with shapes (3,) (4,)
print(np.gcd(a, 15))
# [15 1 3 3]
print(np.gcd(15, a))
# [15 1 3 3]
|
hgqislub/hybird-orchard | code/cloudmanager/install/hws/hws_cascaded_configer.py | Python | apache-2.0 | 4,081 | 0.002695 | # -*- coding:utf-8 -*-
__author__ = 'q00222219@huawei'
import time
from heat.openstack.common import log as logging
import heat.engine.resources.cloudmanager.commonutils as commonutils
import heat.engine.resources.cloudmanager.constant as constant
import heat.engine.resources.cloudmanager.exception as exception
import pdb
LOG = logging.getLogger(__name__)
class CascadedConfiger(object):
def __init__(self, public_ip_api, api_ip, domain, user, password,
cascading_domain, cascading_api_ip, cascaded_domain,
cascaded_api_ip, cascaded_api_subnet_gateway):
self.public_ip_api = public_ip_api
self.api_ip = api_ip
self.domain = domain
self.user = user
self.password = password
self.cascading_domain = cascading_domain
self.cascading_api_ip = cascading_api_ip
self.cascaded_domain = cascaded_domain
self.cascaded_ip = cascaded_api_ip
self.gateway = cascaded_api_subnet_gateway
def do_config(self):
start_time = time.time()
#pdb.set_trace()
LOG.info("start config cascaded, cascaded: %s" % self.domain)
# wait cascaded tunnel can visit
commonutils.check_host_status(host=self.public_ip_api,
user=self.user,
password=self.password,
retry_time=500, interval=1)
# config cascaded host
self._config_az_cascaded()
cost_time = time.time() - start_time
LOG.info("first config success, cascaded: %s, cost time: %d"
% (self.domain, cost_time))
# check config result
for i in range(3):
try:
# check 90s
commonutils.check_host_status(
host=self.public_ip_api,
user=constant.VcloudConstant.ROOT,
password=constant.VcloudConstant.ROOT_PWD,
retry_time=15,
interval=1)
LOG.info("cascaded api is ready..")
break
except exception.CheckHostStatusFailure:
if i == 2:
LOG.error("check cascaded api failed ...")
break
LOG.error("check cascaded api error, "
"retry config cascaded ...")
self._config_az_cascaded()
cost_time = time.time() - start_time
LOG.info("config cascaded success, cascaded: %s, cost_time: %d"
% (self.domain, cost_time))
def _config_az_cascaded(self):
LOG.info("start config cascaded host, host: %s" % self.api_ip)
# modify dns server address
address = "/%(cascading_domain)s/%(cascading_ip)s,/%(cascaded_domain)s/%(cascaded_ip)s" \
% {"cascading_domain": self.cascading_domain,
"cascading_ip": self.cascading_api_ip,
"cascaded_domain":self.cascaded_domain,
"cascaded_ip":self.cascaded_ip}
for i in range(30):
try:
commonutils.execute_cmd_without_stdout(
host=self.public_ip_api,
user=self.user,
password=self.password,
cmd='cd %(dir)s; source /root/adminrc; sh %(script)s replace %(address)s'
% {"dir": constant.PublicConstant.SCRIPTS_DIR,
"script": constant.PublicConstant.
MODIFY_DNS_SERVER_ADDRESS,
"address": address})
break
except exception.SSHComman | dFailure as e:
LOG.error("modify cascaded dns address error, cascaded: "
"%s, error: %s"
% (self.domain, e.format_message()))
time.sleep(1)
LOG.info(
"config cascaded dns address success, cascaded: %s"
% self.public_ip_api)
return True
| |
zhangyage/Python-oldboy | day13/Django_study/oldboy/mymiddleware.py | Python | apache-2.0 | 731 | 0.029173 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#自定义中间件 在Django中定义的中间件必须有下面的4个函数
#使用中间件时候需要现在setting中引用一下
from django.http.response import HttpRes | ponse
class Day13Middleware(object):
def process_request(self,request):
print '1.process_quest'
def process_view(self,request,callback,callback_args,callback_kwargs):
print '2.process_view'
def process_exception(self,request,exception):
#处理异常的,只有在出错的时候才会执行
print '3.process_exception'
def process_response(self,request,response):
print '4.p | rocess_response'
return response
|
bcostea/tomate2 | tomate2.py | Python | mpl-2.0 | 5,644 | 0.019135 | #!/usr/bin/env python2
from __future__ import division
import os
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import GObject as gobject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Notify
from gi.repository import AppIndicator3 as appindicator
from time import time
from math import floor
Gdk.threads_init()
class TomateConfig(object):
#Parameters
MIN_WORK_TIME = 60 * 10 # min work time in seconds
DONE_WORK_TIME = 60 * 25 # finished work time in seconds
POLL_INTERVAL = 5 # polling interval in seconds
class States(object):
IDLE = 1
WORKING = 2
OK = 3
DONE = 4
STATE_MESSAGES = {
States.IDLE : 'Idle',
States.WORKING : 'Working',
States.OK : 'Ok',
States.DONE : 'Done'
}
STATE_ICONS = {
States.IDLE : 'idle',
States.WORKING : 'working',
States.OK : 'ok',
States.DONE : 'done'
}
class Pomodoro:
def __init__(self):
# we start with an idle state
self.state = States.IDLE
self.tick_interval = TomateConfig.POLL_INTERVAL
self.start_working_time = 0
def init_ui(self):
Notify.init("Tomate")
self.ind = self.build_indicator()
menu = self.build_menu()
self.ind.set_menu(menu)
def build_indicator(self):
ind = appindicator.Indicator.new(
"Tomate",
self.get_icon(self.state),
appindicator.IndicatorCategory.APPLICATION_STATUS)
ind.set_status(appindicator.IndicatorStatus.ACTIVE)
return ind
def build_menu(self):
menu = Gtk.Menu()
self.st_menu = Gtk.MenuItem("Start")
self.st_menu.connect('activate',self.icon_click)
menu.append(self.st_menu)
mi = Gtk.ImageMenuItem("Quit")
img = Gtk.Image.new_from_stock(Gtk.STOCK_QUIT, Gtk.IconSize.MENU)
mi.set_image(img)
mi.connect('activate',Gtk.main_quit)
menu.append(mi)
menu.show_all()
return menu
def get_icon(self, state):
return self.icon_directory() + "/img/" + STATE_ICONS[state] + ".png"
def format_time(self,seconds):
if seconds < 60:
return "%d seconds" % seconds
minutes = floor( seconds / 60 )
hours = floor( minutes / 60 )
days = floor( hours / 24 )
d_string = ''
h_string = ''
m_string = ''
if minutes < 60:
if minutes > 1: return "%d minutes" % minutes
else: return "1 minute"
if days > 0:
hours = hours - ( days * 24 )
minutes = minutes - ( days * 24 * 60 )
if days == 1: d_string = "1 day "
else: d_string = "%d day%s " % (days, 's')
if hours > 0:
minutes = minutes - (hours * 60)
if hours == 1: h_string = '1 hour '
else: h_string = "%d hours " % hours
if minutes > 0 :
if minutes == 1: m_string = 'and 1 minute'
else: m_string = "and %d minutes" % minutes
return d_string + h_string + m_string
def set_state(self, state, time):
old_state=self.state
if self.state == state:
return
if state == States.IDLE:
delta = time - self.start_working_time
self.st_menu.set_label("Start")
if old_state == States.OK:
self.tooltip = "Good, you worked for " + self.format_time(delta) + "!"
elif old_state == States.WORKING:
self.tooltip = "Not good: worked for only " + self.format_time(delta)
elif old_state == States.DONE:
self.tooltip = "Good, you worked for " + self.format_time(delta) + "! \
Time for a break!"
elif state == States.WORKIN | G:
self.start_working_time = time
delta = time - self.start_working_time
self.tooltip = "Working for " + self.format_time(delta) + "..."
self.st_menu.set_label("Working for %s... stop" % self.format_time(delta))
elif state == States.OK:
delta = time - self.start_working_time
self.tooltip = "Good, you worked for " + self.format | _time(delta) + "!"
elif state == States.DONE:
self.tooltip = "Worked enough, take a break!"
self.state=state
self.ind.set_icon(self.get_icon(state))
self.show_notification(self.state, self.tooltip)
def show_notification(self, state, notification):
try:
nw = Notify.Notification.new("Tomate state changed to " +
STATE_MESSAGES[state],
notification, self.get_icon(state))
nw.show()
except:
pass
def icon_directory(self):
return os.path.dirname(os.path.realpath(__file__)) + os.path.sep
def icon_click(self, dummy):
if self.state == States.IDLE:
self.set_state(States.WORKING, time())
else:
self.set_state(States.IDLE, time())
def update(self, time):
"""This method is called everytime a tick interval occurs"""
delta = time - self.start_working_time
if self.state == States.IDLE:
pass
else:
self.st_menu.set_label("Working for %s... stop" % self.format_time(delta))
if self.state == States.WORKING:
if delta > TomateConfig.MIN_WORK_TIME:
self.set_state(States.OK, time)
elif self.state == States.OK:
if delta > TomateConfig.DONE_WORK_TIME:
self.set_state(States.DONE, time)
def tick(self):
self.update(time())
source_id = gobject.timeout_add(self.tick_interval*1000, self.tick)
def main(self):
# All PyGTK applications must have a gtk.main(). Control ends here
# and waits for an event to occur (like a key press or mouse event).
source_id = gobject.timeout_add(self.tick_interval, self.tick)
self.init_ui()
Gtk.main()
# If the program is run directly or passed as an argument to the python
# interpreter then create a Pomodoro instance and show it
if __name__ == "__main__":
app = Pomodoro()
app.main()
|
dmdm/Pym-elFinder | pym_elfinder_tests/t002_functional/test_3080_cmd_rm.py | Python | bsd-3-clause | 1,935 | 0.006718 | import unittest
import os
from pprint import pprint
import pym_elfinder.exceptions as exc
from .. import lib
from .. import lib_localfilesystem as lfs
class TestCmdRm(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.finder = lib.create_finder()
cls.fixt = lib.CMD_FIXT['cmd_rm.txt']
cls.file_1 = os.path.join(lfs.DIR, 'file_1.txt')
cls.file_2 = os.path.join(lfs.DIR, 'file_2.txt')
cls.dir_1 = os.path.join(lfs.DIR, 'dir_1')
cls.file_1_1 = os.path.join(lfs.DIR, 'dir_1', 'file_1_1.txt')
def test_rm_files(self):
"""
Test removal of file_1 and file_2.
"""
lfs.mkfile(self.file_1)
lfs.mkfile(self.file_2)
req = self.fixt[0]['request']
r0 = self.fixt[0]['response'] # expected response
cmd, args = lib.prepare_request(req)
assert cmd == 'rm'
# This throws exception on error
self.finder.run(cmd, args, debug=True)
r = self.finder.response
#pprint(r); raise Exception("DIE")
#pprint(r0); raise Exception("DIE")
lib.prepare_response(r0, r)
self.assertEqual(r0.keys(), r.keys())
del r0['debug']
del r['debug']
self.maxDiff = None
self.assertEqual(r0, r)
def test_rm_non_empty_dir(self):
"""
Test removal of dir_1
"""
os.mkdir(self.dir_1)
| lfs.mkfile(self.file_1_1)
req = self.fixt[1]['request']
cmd, args = lib.prepare_request(req)
assert cmd == 'rm'
with self.assertRaisesRegexp(exc.FinderError, exc.ERROR_RM):
self.finder.run(cmd, args, debug=True)
r = self.finder.response
self.assertTrue('error' in r)
self.assertEqual(r['error'][0], exc.ERROR_RM)
os.remove(self.file_1_1 | )
os.rmdir(self.dir_1)
|
atomgomba/txtable | txtable/__main__.py | Python | unlicense | 3,048 | 0.001312 | import csv
import json
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from enum import Enum
from sys import exit, stdin
from .formatters import DefaultFormatter, HeadlessFormatter, MdFormatter, RstFormatter
from .table import TextTable
class Formatter(Enum):
default = DefaultFormatter
headless = HeadlessFormatter
md = MdFormatter
rst = RstFormatter
@staticmethod
def enum(value: str) -> 'Formatter':
try:
return Formatter[value]
except KeyError:
raise ValueError()
def __str__(self):
return self.name
class DataFormat(Enum):
csv = "csv"
json = "json"
@staticmethod
def enum(value: str) -> 'DataFormat':
try:
return DataFormat[value]
except KeyError:
raise ValueError()
def __str__(self):
return self.name
def create_json_table(s: str) -> list:
data = json.loads(s)
if type(data) is not list:
if type(data) is dict:
data = [data]
else:
print(data)
print("ERROR: JSON must contain a collection or an object")
raise SystemExit(1)
if 0 == len(data):
return []
header = list(data[0].keys())
table = [header]
for obj in data:
row = []
for column in header:
row.append(obj.get(column))
table.append(row)
return table
def create_csv_table(f) -> list:
return list(csv.reader(f))
def format_stdin(args) -> int:
if args.type == "json":
data = stdin.read()
table = create_json_table(data)
elif args.type == "csv":
table = create_csv_table(stdin)
else:
print("ERROR: Unknown data format")
return 1
print(TextTable(table, formatter=args.formatter.value()))
return 0
def format_files(args) -> int:
for path in args.input:
with open(path) as f:
if path.endswith(".json"):
table = create_json_table(f.read())
elif path.endswith(".csv"):
table = create_csv_table(f)
else:
print("ERROR: Unknown file format")
return 1
print("# %s\n" % path)
print(TextTable(table, formatter=args.formatter.value()))
print("\n")
return 0
def main(args) -> int:
return format_files(args) if args.input else format_stdin(args)
if __name__ == "__main__":
parser = Argumen | tParser(prog="txtable", formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("input", default=[], nargs="*",
help="File paths or stdin when empty")
parser.add_argument("-f", "--formatter", type=Formatter.enum, default=Formatter.default, choices=list(Formatter),
help="Select table formatter")
parser.add_argument("-t", "--type", type=DataFormat.enum, default=DataFormat.json, choices=list(DataFormat),
| help="Specify input data format from stdin")
exit(main(parser.parse_args()))
|
lmazuel/azure-sdk-for-python | azure-mgmt-rdbms/azure/mgmt/rdbms/postgresql/models/storage_profile.py | Python | mit | 1,573 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageProfile(Model):
"""Storage Profile properties of a server.
:param backup_retention_days: Backup retention days for the server.
:type backup_retention_days: int
:param geo_redun | dant_backup: Enable Geo-redundant or not for server
backup. Possible values include: 'Enabled', 'Disabled'
:type geo_redundant_backup: str or
~azure.mgmt.rdbms.postgresql.models.GeoRedundantBackup
:param storage_mb: Max storage allowed for a server.
: | type storage_mb: int
"""
_attribute_map = {
'backup_retention_days': {'key': 'backupRetentionDays', 'type': 'int'},
'geo_redundant_backup': {'key': 'geoRedundantBackup', 'type': 'str'},
'storage_mb': {'key': 'storageMB', 'type': 'int'},
}
def __init__(self, **kwargs):
super(StorageProfile, self).__init__(**kwargs)
self.backup_retention_days = kwargs.get('backup_retention_days', None)
self.geo_redundant_backup = kwargs.get('geo_redundant_backup', None)
self.storage_mb = kwargs.get('storage_mb', None)
|
IECS/MansOS | tools/remoteaccess/tests/03-read.py | Python | mit | 439 | 0.006834 | #!/usr/bin/env python
# Get data
import os, sys, time
import urllib2
def ma | in():
host = "http://localhost:30001/read?port=/dev/ttyUSB0"
# host = "http:/ | /10.0.0.1:30001/read?port=/dev/ttyUSB0"
try:
req = urllib2.urlopen(host)
print("Reply data:")
print(req.read())
except Exception as e:
print("exception occurred:")
print(e)
return 1
if __name__ == '__main__':
main()
|
dionaea-honeypot/dionaea | modules/python/dionaea/smb/rpcservices.py | Python | gpl-2.0 | 133,752 | 0.005024 | # This file is part of the dionaea honeypot
#
# SPDX-FileCopyrightText: 2009 Paul Baecher & Markus Koetter & Mark Schloesser
# SPDX-FileCopyrightText: 2010 Markus Koetter & Tan Kean Siong
# SPDX-FileCopyrightText: 2015 Katarina Durechova
# SPDX-FileCopyrightText: 2017 Tan Kean Siong
# SPDX-FileCopyrightText: 2016-2017 PhiBo (DinoTools)
#
# SPDX-License-Identifier: GPL-2.0-or-later
import logging
import tempfile
from uuid import UUID
from time import time, localtime, altzone
from dionaea import ndrlib
from dionaea.core import g_dionaea, incident
from .include.smbfields import DCERPC_Header, DCERPC_Response
rpclog = logging.getLogger('rpcservices')
# Set the operating system of Dionaea by changing the value
# Default value is 2
# 1:"Windows XP Service Pack 0/1",
# 2:"Windows XP Service Pack 2",
# 3:"Windows XP Service Pack 3",
# 4:"Windows 7 Service Pack 1",
# 5:"Linux Samba 4.3.11"
OS_TYPE = 2
class DCERPCValueError(Exception):
"""Raised when an a value is passed to a dcerpc operation which is invalid"""
def __init__(self, varname, reason, value):
self.varname = varname
self.reason = reason
self.value = value
def __str__(self):
return "%s is %s (%s)" % (self.varname, self.reason, self.value)
class RPCService(object):
uuid = ''
version_major = 0
version_minor = 0
# syntax = UUID('8a885d04-1ceb-11c9-9fe8-08002b104860').hex
ops = {}
vulns = {}
@classmethod
def processrequest(cls, service, con, opnum, p):
if opnum in cls.ops:
opname = cls.ops[opnum]
method = getattr(cls, "handle_" + opname, None)
if method != None:
if opnum in cls.vulns:
vulnname = cls.vulns[opnum]
rpclog.info("Calling %s %s (%x) maybe %s exploit?" % (
service.__class__.__name__, opname, opnum, vulnname ) )
else:
rpclog.info("Calling %s %s (%x)" %
( service.__class__.__name__, opname, opnum ) )
r = DCERPC_Header() / DCERPC_Response()
try:
data = method(con, p)
except DCERPCValueError as e:
rpclog.debug("DCERPCValueError %s" % e)
return None
except EOFError as e:
rpclog.warn("EOFError data %s" % format(p.StubData))
return None
if data is None:
data = b''
#for metasploit OS type 'Windows XP Service Pack 2+"
if OS_TYPE == 2 or OS_TYPE == 3:
if opname == "NetNameCanonicalize":
r.PacketType = 3
r.StubData = data
r.AllocHint = len(data)
r.CallID = p.CallID
r.FragLen = 24 + len(data)
rpclog.debug(data)
# print(r.show())
return r
else:
rpclog.info("Unknown RPC Call to %s %i" %
( service.__class__.__name__, opnum) )
class ATSVC(RPCService):
uuid = UUID('1ff70682-0a51-30e8-076d-740be8cee98b').hex
ops = {
0x02: "NetrJobEnum",
}
class ATSVC_HANDLE(object):
# 2.3.2 ATSVC_HANDLE
#
# http://msdn.microsoft.com/en-us/library/cc248473%28PROT.13%29.aspx
#
#typedef [handle] const wchar_t* ATSVC_HANDLE;
def __init__(self, p):
self.__packer = p
if isinstance(self.__packer,ndrlib.Packer):
pass
elif isinstance(self.__packer,ndrlib.Unpacker):
self.Pointer = p.unpack_pointer()
self.Handle = p.unpack_string()
def pack(self):
if isinstance(self.__packer,ndrlib.Packer):
pass
#this function have not tested for the moment
@classmethod
def handle_NetrJobEnum(cls, con, p):
# 3.2.5.2.3 NetrJobEnum (Opnum 2)
#
# http://msdn.microsoft.com/en-us/library/cc248425%28PROT.10%29.aspx
#
#NET_API_STATUS NetrJobEnum(
# [in, string, unique] ATSVC_HANDLE ServerName,
# [in, out] LPAT_ENUM_CONTAINER pEnumContainer,
# [in] DWORD PreferedMaximumLength,
# [out] LPDWORD pTotalEntries,
# [in, out, unique] LPDWORD pResumeHandle
#);
x = ndrlib.Unpacker(p.StubData)
ServerName = ATSVC.ATSVC_HANDLE(x)
Pad = x.unpack_short()
# pEnumContainer
EntriesRead = x.unpack_long()
pEntries = x.unpack_pointer()
# PreferedMaximumLength
PreferedMaxLength = x.unpack_long()
# pResumeHandle
Pointer = x.unpack_pointer()
ResumeHandle = x.unpack_long()
r = ndrlib.Packer()
# pEnumContainer
r.pack_long(0) # EntriesRead
r.pack_pointer(0) # pEntries
# pTotalEntries
r.pack_long(0)
# pResumeHandle
r.pack_pointer(0x0016c918)
r.pack_long(0)
# return
r.pack_long(0)
return r.get_buffer()
class AudioSrv(RPCService):
uuid = UUID('3faf4738-3a21-4307-b46c-fdda9bb8c0d5').hex
class browser(RPCService):
uuid = UUID('6bffd098-a112-3610-9833-012892020162').hex
class davclntrpc(RPCService):
uuid = UUID('c8cb7687-e6d3-11d2-a958-00c04f682e16').hex
class DCOM(RPCService):
uuid = UUID('4d9f4ab8-7d1c-11cf-861e-0020af6e7c57').hex
ops = {
0x00: "RemoteActivation",
}
vulns = {
0x00: "MS03-26",
}
@classmethod
def handle_RemoteActivation(cls, con, p):
# MS03-026
pass
class DnsServer(RPCService):
uuid = UUID('50abc2a4-574d-40b3-9d66-ee4fd5fba076').hex
class DSSETUP(RPCService):
uuid = UUID('3919286a-b10c-11d0-9ba8-00c04fd92ef5').hex
ops = {
0x09: "DsRolerUpgradeDownlevelServer"
}
vulns = {
0x09: "MS04-11",
}
@classmethod
def handle_DsRolerUpgradeDownlevelServer(cls, con, p):
# MS04-011
pass
class epmp(RPCService):
uuid = UUID('e1af8308-5d1f-11c9-91a4-08002b14a0fa').hex
class eventlog(RPCService):
uuid = UUID('82273fdc-e32a-18c3-3f78-827929dc23ea').hex
class GetUserToken(RPCService):
uuid = UUID('a002b3a0-c9b7-11d1-ae88-0080c75e4ec1').hex
class ICertPassage(RPCService):
uuid = UUID('91ae6020-9e3c-11cf-8d7c-00aa00c091be').hex
class ICertProtect(RPCService):
uuid = UUID('0d72a7d4-6148-11d1-b4aa-00c04fb66ea0').hex
class InitShutdown(RPCService):
uuid = UUID('894de0c0-0d55-11d3-a322-00c04fa321a1').hex
class IKeySvc(RPCService):
uuid = UUID('8d0ffe72-d252-11d0-bf8f-00c04fd9126b').hex
class IPStoreProv(RPCService):
uuid = UUID('c9378ff1-16f7-11d0-a0b2-00aa0061426a').hex
class ISeclogon(RPCService):
uuid = UUID('12b81e99-f207-4a4c-85d3-77b42f76fd14').hex
class ISystemActivator(RPCService):
uuid = UUID('000001a0-0000-0000-c000-000000000046').hex
ops = {
0x4: "RemoteCreateInstance"
}
vulns = {
0x4: "MS04-12",
}
@classmethod
def handle_RemoteCreateInstance(cls, con, p):
# MS04-012
pass
class RPC_C_AUTHN(object):
# http://msdn.microsoft.com/en-us/library/ms692656%28VS.85%29.aspx
# seems globally used
NONE = 0
DCE_PRIVATE = 1
DCE_PUBLIC = 2
DEC_PUBLIC = 4
GSS_NEGOTIATE = 9
WINNT = 10
GSS_SCHANNEL = 14
GSS_KERBEROS = 16
DEFAULT = 0xFFFFFFFF
class NCACN(object):
# http://www.opengroup.org/onlinepubs/9692999399/apdxi.htm#tagtcjh_51
UDP =8
IP = 9
class IOXIDResolver(RPCService):
"""[MS-DCOM]: Distributed Component Object Mod | el (DCOM) Remote Protocol Specification
http://msdn.microsoft.com/en-us/library/cc226801%28PROT.10%29.aspx"""
uuid = UUID('99fcfec4-5260-101b-bbcb-00aa0021347a').hex
ops = {
0x5: "ServerAlive | 2"
}
class COMVERSION(object):
# typedef struct tagCOMVERSION {
# unsigned short MajorVersion;
# unsigned short MinorVersion;
# } COMVERSION;
def __init__(self, p):
self.__packer = p
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.