repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
datalogics/scons
|
test/Perforce/P4COMSTR.py
|
Python
|
mit
| 4,112
| 0.002432
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test setting the $P4COMSTR variable.
"""
import os.path
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('Perforce', ['Perforce', 's
|
ub'], 'sub')
sub_Perforce = os.path.join('sub', 'Perforce')
sub_SConscript = os.path.join('sub', 'SConscript')
sub_all = os.path.join('sub', 'all')
sub_ddd_in = os.path.join('sub', 'ddd.in')
sub_ddd_out = os.path.join('sub', 'ddd.out')
sub_eee_in = os.path.join('sub', 'eee.in')
sub_eee_out = os.path.join('sub'
|
, 'eee.out')
sub_fff_in = os.path.join('sub', 'fff.in')
sub_fff_out = os.path.join('sub', 'fff.out')
test.write('my-p4.py', """
import shutil
import sys
for f in sys.argv[1:]:
shutil.copy('Perforce/'+f, f)
""")
test.write('SConstruct', """
def cat(env, source, target):
target = str(target[0])
source = map(str, source)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
env = Environment(TOOLS = ['default', 'Perforce'],
BUILDERS={'Cat':Builder(action=cat)},
P4COM='%(_python_)s my-p4.py $TARGET',
P4COMSTR='Checking out $TARGET from our fake Perforce')
env.Cat('aaa.out', 'aaa.in')
env.Cat('bbb.out', 'bbb.in')
env.Cat('ccc.out', 'ccc.in')
env.Cat('all', ['aaa.out', 'bbb.out', 'ccc.out'])
env.SourceCode('.', env.Perforce())
SConscript('sub/SConscript', "env")
""" % locals())
test.write(['Perforce', 'sub', 'SConscript'], """\
Import("env")
env.Cat('ddd.out', 'ddd.in')
env.Cat('eee.out', 'eee.in')
env.Cat('fff.out', 'fff.in')
env.Cat('all', ['ddd.out', 'eee.out', 'fff.out'])
""")
test.write(['Perforce', 'aaa.in'], "Perforce/aaa.in\n")
test.write('bbb.in', "checked-out bbb.in\n")
test.write(['Perforce', 'ccc.in'], "Perforce/ccc.in\n")
test.write(['Perforce', 'sub', 'ddd.in'], "Perforce/sub/ddd.in\n")
test.write(['sub', 'eee.in'], "checked-out sub/eee.in\n")
test.write(['Perforce', 'sub', 'fff.in'], "Perforce/sub/fff.in\n")
test.run(arguments = '.',
stdout = test.wrap_stdout(read_str = """\
Checking out %(sub_SConscript)s from our fake Perforce
""" % locals(),
build_str = """\
Checking out aaa.in from our fake Perforce
cat(["aaa.out"], ["aaa.in"])
cat(["bbb.out"], ["bbb.in"])
Checking out ccc.in from our fake Perforce
cat(["ccc.out"], ["ccc.in"])
cat(["all"], ["aaa.out", "bbb.out", "ccc.out"])
Checking out %(sub_ddd_in)s from our fake Perforce
cat(["%(sub_ddd_out)s"], ["%(sub_ddd_in)s"])
cat(["%(sub_eee_out)s"], ["%(sub_eee_in)s"])
Checking out %(sub_fff_in)s from our fake Perforce
cat(["%(sub_fff_out)s"], ["%(sub_fff_in)s"])
cat(["%(sub_all)s"], ["%(sub_ddd_out)s", "%(sub_eee_out)s", "%(sub_fff_out)s"])
""" % locals()))
test.must_match('all',
"Perforce/aaa.in\nchecked-out bbb.in\nPerforce/ccc.in\n")
test.must_match(['sub', 'all'],
"Perforce/sub/ddd.in\nchecked-out sub/eee.in\nPerforce/sub/fff.in\n")
#
test.pass_test()
|
knz/slcore
|
slc/tools/slc/input/parse.py
|
Python
|
gpl-3.0
| 6,199
| 0.042104
|
from ..msg import die
from ..ast import *
def unexpected(item):
die("unexpected construct '%s'" % item.get('type','unknown'), item)
def parse_varuse(varuse, item):
#print "parse varuse %x: item %x: %r" % (id(varuse), id(item), item)
varuse.loc = item['loc']
varuse.name = item['name'].strip()
if item.has_key('rhs'):
varuse.rhs = parse_block(item['rhs'])
return varuse
def parse_create(item):
c = Create(loc = item['loc'],
loc_end = item['loc_end'],
label = item['lbl'],
place = parse_block(item['place']),
start = parse_block(item['start']),
limit = parse_block(item['limit']),
step = parse_block(item['step']),
block = parse_block(item['block']),
extras = parse_extras(item['extras']),
sync_type = item['sync'],
fun = parse_block(item['fun']),
body = parse_block(item['body']))
for p in item['args']:
c.args.append(parse_argparm(CreateArg(), 'arg', p))
if 'fid' in item and item['fid']:
c.fid_lvalue = parse_block(item['fid'])
if 'result' in item and item['result']:
c.result_lvalue = parse_block(item['result'])
return c
def parse_indexdecl(item):
return IndexDecl(loc = item['loc'],
indexname = item['name'].strip())
def parse_spawndecl(item):
return SpawnDecl(loc = item['loc'],
spawnname = item['name'].strip())
def parse_spawnsync(item):
return SpawnSync(loc = item['loc'],
rhs = parse_block(item['rhs']))
def parse_scope(item):
s = Scope(loc = item['loc'],
loc_end = item['loc_end'])
s += parse_block(item['body'])
return s
def parse_attr(item):
n = item['name'].strip()
del item['type']
del item['name']
for k in item:
item[k] = item[k].strip()
return Attr(name = n,
payload = item)
def parse_extras(items):
if len(items) == 0:
return None
b = Extras()
for item in items:
if isinstance(item, dict):
t = item['type']
if t == 'attr': b += parse_attr(item)
else: unexpected(item)
else:
assert isinstance(item, str)
# ignore strings
if len(b) > 0:
return b
return None
def parse_block(items):
if len(items) == 0:
return None
b = Block()
#print "new block %x (len %d)" % (id(b), len(b))
for item in items:
#print "parse block %x (len %d): item %x: %r" % (id(b), len(b), id(item), item)
if isinstance(item, dict):
t = item['type']
if t == 'indexdecl': b += parse_indexdecl(item)
elif t == 'getp': b += parse_varuse(GetP(), item)
elif t == 'setp': b += parse_varuse(SetP(), item)
elif t == 'geta': b += parse_varuse(GetA(), item)
elif t == 'seta': b += parse_varuse(SetA(), item)
elif t == 'create': b += parse_create(item)
elif t == 'break': b += parse_break(item)
elif t == 'end_thread': b += parse_end_thread(item)
elif t == 'decl_funptr': b += parse_funptrdecl(item)
elif t == 'scope': b += parse_scope(item)
elif t == 'spawndecl': b += parse_spawndecl(item)
elif t == 'spawnsync': b += parse_spawnsync(item)
else: unexpected(item)
else:
|
assert isinstance(item, str)
csp = item.strip(' \t')
if len(csp) > 0:
b += Opaque(item)
#print "parse block %x: item %x -- END (len %d)" % (id(b), id(item), len(b))
|
if len(b) > 0:
return b
return None
def parse_argparm(p, cat, item):
#print "parse argparm %x: item %x: %r" % (id(p), id(item), item)
t = item['type'].replace('_mutable','')
if not t.endswith(cat):
unexpected(item)
p.loc = item['loc']
p.type = item['type']
p.ctype = CType(items = item['ctype'])
p.name = item['name'].strip()
if item.has_key('init'):
p.init = parse_block(item['init'])
return p
def parse_break(item):
return Break(loc = item['loc'])
def parse_end_thread(item):
return EndThread(loc = item['loc'])
def parse_funptrdecl(item):
d = FunDeclPtr(loc = item['loc'],
loc_end = item['loc_end'],
name = item['name'].strip(),
extras = parse_extras(item['extras']))
for p in item['params']:
d += parse_argparm(FunParm(), 'parm', p)
return d
def parse_fundecl(item):
d = FunDecl(loc = item['loc'],
loc_end = item['loc_end'],
name = item['name'].strip(),
extras = parse_extras(item['extras']))
for p in item['params']:
d += parse_argparm(FunParm(), 'parm', p)
return d
def parse_fundef(item):
d = FunDef(loc = item['loc'],
loc_end = item['loc_end'],
name = item['name'].strip(),
extras = parse_extras(item['extras']),
body = parse_block(item['body']))
for p in item['params']:
d += parse_argparm(FunParm(), 'parm', p)
return d
def parse_program(source):
source = eval(source)
p = Program()
for item in source:
if type(item) == type({}):
t = item['type']
if t == 'decl': p += parse_fundecl(item)
elif t == 'decl_funptr': p += parse_funptrdecl(item)
elif t == 'fundef': p += parse_fundef(item)
elif t == 'scope': p += parse_scope(item)
else: unexpected(item)
else: p += Opaque(item)
return p
__all__ = ['parse_program']
|
WaveBlocks/WaveBlocksND
|
WaveBlocksND/IOM_plugin_overlaplcwp.py
|
Python
|
bsd-3-clause
| 10,811
| 0.003515
|
"""The WaveBlocks Project
IOM plugin providing functions for handling various
overlap matrices of linear combinations of general
wavepackets.
@author: R. Bourquin
@copyright: Copyright (C) 2013 R. Bourquin
@license: Modified BSD License
"""
import numpy as np
def add_overlaplcwp(self, parameters, timeslots=None, matrixsize=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Add storage for various overlap matrices. We can store one matrix type
per key.
========= ======
Key name Matrix
========= ======
``ov`` :math:`\langle\Upsilon | \Upsilon\rangle`
``ovkin`` :math:`\langle\Upsilon | T | \Upsilon\rangle`
``ovpot`` :math:`\langle\Upsilon | V(\underline{x}) | \Upsilon\rangle`
========= ======
Note that 'strange' errors occur if we later try to load or save
matrices for a key we did not initialise with this function.
:param parameters: A :py:class:`ParameterProvider` instance. It can
be empty and is not used at the moment.
:param timeslots: The number of time slots we need. Can be set to ``None``
to get automatically growing datasets.
:param matrixsize: The (maximal) size of each of the overlap matrices. If specified
this remains fixed for all timeslots. Can be set to ``None`` (default)
to get automatically growing datasets.
:type matrixsize: Pair of integers or ``None``.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
valid_keys = ("ov", "ovkin", "ovpot")
# Create the dataset with appropriate parameters
grp_ov = self._srf[self._prefixb + str(blockid)].create_group("overlaplcwp")
if timeslots is None:
T = 0
Ts = None
csTs = 128
else:
T = timeslots
Ts = timeslots
csTs = min(128, Ts)
if matrixsize is None:
Jr = 0
Jc = 0
Jrs = None
Jcs = None
csJrs = 128
csJcs = 128
else:
Jr, Jc = matrixsize
Jrs, Jcs = matrixsize
csJrs = min(128, Jrs)
csJcs = min(128, Jcs)
for k in key:
if k not in valid_keys:
raise ValueError("Unknown key value " + str(k))
name = k[2:]
daset_tg = grp_ov.create_dataset("timegrid" + name, (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1)
grp_ov.create_dataset("shape" + name, (T, 2), dtype=np.integer, chunks=(csTs, 2), maxshape=(Ts, 2))
grp_ov.create_dataset("overlap" + name, (T, Jr, Jc), dtype=np.complexfloating, chunks=(1, csJrs, csJcs), maxshape=(Ts, Jrs, Jcs))
daset_tg.attrs["pointer"] = 0
def delete_overlaplcwp(self, blockid=0):
r"""Remove the stored overlap matrices.
:param blockid: The ID of the data block to operate on.
"""
try:
del self._srf[self._prefixb + str(blockid) + "/overlaplcwp"]
except KeyError:
pass
def has_overlaplcwp(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Ask if the specified data block has the desired data tensor.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
r = True
r &= ("overlaplcwp" in self._srf[self._prefixb + str(blockid)].keys())
if r and "ov" in key:
r &= ("overlap" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
if r and "ovpot" in key:
r &= ("overlappot" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
if r and "ovkin" in key:
r &= ("overlapkin" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
return r
def save_overlaplcwp(self, data, timestep=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Save overlap matrices of linear combinations of general wavepackets.
In principle this function also supports non-square matrices.
:param data: The data matrices to save.
:type data: A list of :py:class:`ndarray` entries.
:param timestep: The timestep at which we save the data.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
for item, datum in zip(key, data):
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlap"
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlapkin"
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlappot"
else:
raise ValueError("Unknown key value {}".format(item))
timeslot = self._srf[pathtg].attrs["pointer"]
# Write the data
self.must_resize(pathd, timeslot)
data = np.atleast_2d(np.squeeze(data))
rows, cols = data.shape
self.must_resize(pathd, rows - 1, axis=1)
self.must_resize(pathd, cols - 1, axis=2)
self._srf[pathd][timeslot, :rows, :cols] = data
self.must_resize(pathsh, timeslot)
self._srf[pathsh][timeslot, :] = np.array([rows, cols])
# Write the timestep to which the stored values belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
self._srf[pathtg].attrs["pointer"] += 1
def load_overlaplcwp_timegrid(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load the timegrid corresponding to the overlap matrices specified.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to load. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` each having one column.
"""
tg = []
for item in key:
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
tg.append(self._s
|
rf[pathtg][:])
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
tg.append(self._srf[pathtg][:])
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid
|
) + "/overlaplcwp/timegridpot"
tg.append(self._srf[pathtg][:])
else:
raise ValueError("Unknown key value {}".format(item))
if len(tg) == 1:
print(tg)
return tg[0]
else:
return tuple(tg)
def load_overlaplcwp_shape(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load the shape of the overlap matrices specified.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` each having two columns.
"""
tg = []
for item in key:
if item == "ov":
pathsh = "/" + se
|
ArthurVal/RIDDLE_naoqi_bridge
|
naoqi_sensors_py/src/naoqi_sensors/naoqi_microphone.py
|
Python
|
bsd-3-clause
| 4,844
| 0.007019
|
#!/usr/bin/env python
# Copyright (C) 2014 Aldebaran Robotics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
import rospy
from naoqi_driver.naoqi_node import NaoqiNode
from dynamic_reconfigure.server import Server
from naoqi_sensors.cfg import NaoqiMicrophoneConfig
from naoqi_msgs.msg import AudioBuffer
from naoqi import ALModule, ALBroker, ALProxy
class NaoqiMic (ALModule, NaoqiNode):
def __init__(self, name):
NaoqiNode.__init__(self, name)
self.myBroker = ALBroker("pythonBroker", "0.0.0.0", 0, self.pip, self.pport)
ALModule.__init__(self, name)
self.isSubscribed = False
self.microVersion = 0
# Create a proxy to ALAudioDevice
try:
self.audioProxy = self.get_proxy("ALAudioDevice")
except Exception, e:
rospy.logerr("Error when creating proxy on ALAudioDevice:")
rospy.logerr(str(e))
exit(1)
try:
self.robotProxy = self.get_proxy("ALRobotModel")
self.microVersion = self.robotProxy._getMicrophoneConfig()
except Exception, e:
rospy.logwarn("Could not retrieve microphone version:")
rospy.logwarn(str(e))
rospy.logwarn("Microphone channel map might not be accurate.")
def returnNone():
return None
self.config = defaultdict(returnNone)
rospy.loginfo('channel = %s'%self.config['channel'])
# ROS publishers
self.pub_audio_ = rospy.Publisher('~audio_raw', AudioBuffer)
# initialize the parameter server
self.srv = Server(NaoqiMicrophoneConfig, self.reconfigure)
def reconfigure( self, new_config, level ):
"""
Reconfigure the mi
|
crophones
"""
rospy.loginfo('reconfigure changed')
if self.pub_audio_.get_num_connections(
|
) == 0:
rospy.loginfo('Changes recorded but not applied as nobody is subscribed to the ROS topics.')
self.config.update(new_config)
return self.config
# check if we are already subscribed
if not self.isSubscribed:
rospy.loginfo('subscribed to audio proxy, since this is the first listener with channel = %s'%new_config['channel'])
self.audioProxy.setClientPreferences(self.getName(), new_config['frequency'], new_config['channel'], 0)
self.audioProxy.subscribe(self.getName())
self.isSubscribed = True
self.config.update(new_config)
return self.config
def run(self):
r=rospy.Rate(2)
while self.is_looping():
if self.pub_audio_.get_num_connections() == 0:
if self.isSubscribed:
rospy.loginfo('Unsubscribing from audio bridge as nobody listens to the topics.')
self.release()
continue
if not self.isSubscribed:
self.reconfigure(self.config, 0)
r.sleep()
if self.isSubscribed:
self.release()
self.myBroker.shutdown()
def release(self):
self.audioProxy.unsubscribe(self.name)
self.isSubscribed=False
def processRemote(self, nbOfInputChannels, fNbOfInputSamples, timeStamp, inputBuff):
audio_msg = AudioBuffer()
# Deal with the sound
# get data directly with the _getInputBuffer() function because inputBuff is corrupted in python
mictmp = []
for i in range (0,len(inputBuff)/2) :
mictmp.append(ord(inputBuff[2*i])+ord(inputBuff[2*i+1])*256)
# convert 16 bit samples to signed 16 bits samples
for i in range (0,len(mictmp)) :
if mictmp[i]>=32768 :
mictmp[i]=mictmp[i]-65536
if self.config['use_ros_time']:
audio_msg.header.stamp = rospy.Time.now()
else:
audio_msg.header.stamp = rospy.Time(timeStamp)
audio_msg.frequency = self.config['frequency']
if self.config['channel'] == 0:
if self.microVersion == 0:
channels = [0,2,1,4]
else:
channels = [3,5,0,2]
else:
channels = [self.config['channel']]
audio_msg.channelMap = channels
audio_msg.data = mictmp
self.pub_audio_.publish(audio_msg)
|
jdekerautem/TurtleBot-Receptionist
|
pocketsphinx_files/notsotalkative.py
|
Python
|
mit
| 6,969
| 0.033434
|
#!/usr/bin/env python
"""
voice_nav.py allows controlling a mobile base using simple speech commands.
Based on the voice_cmd_vel.py script by Michael Ferguson in the pocketsphinx ROS package.
"""
import roslib; #roslib.load_manifest('pi_speech_tutorial')
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import String
from math import copysign
from sound_play.libsoundplay import SoundClient
class voice_cmd_vel:
def __init__(self):
self.rate = rospy.get_param("~rate", 5)
r = rospy.Rate(self.rate)
self.paused = False
self.voice = rospy.get_param("~voice", "voice_cmu_us_bdl_arctic_clunits")
self.wavepath = rospy.get_param("~wavepath", "")
# Create the sound client object
self.soundhandle = SoundClient()
rospy.sleep(1)
self.soundhandle.stopAll()
# Subscribe to the /recognizer/output topic to receive voice commands.
rospy.Subscriber('/recognizer/output', String, self.speechCb)
# A mapping from keywords to commands.
self.keywords_to_command = {'stop': ['stop', 'halt', 'abort', 'kill', 'panic', 'off', 'freeze', 'shut down', 'turn off', 'help', 'help me'],
'bye': ['bye', 'cheers', 'goodbye', 'see you', 'bye'],
'cafe' : ['cafe', 'campus', 'tea', 'coffee', 'eat'],
|
'hello': ['hi', 'hey', 'hello'],
'help' : ['help me', 'can help', 'help'],
'name' : ['your name', 'name'],
'wash' : ['washroom', 'toilet'],
'
|
library' : ['library', 'book', 'borrow'],
'labs' : ['labs'],
'talk': ['talk to me?', 'really talk?', 'you talk', 'you really talk?', 'talk'],
'amazing' : ['amazing', 'wonderful'],
'psychology' : ['psychology'],
'teaching' : ['teaching', 'music'],
'engineering' : ['engineering'],
'biology' : ['biology', 'english', 'chemistry'],
'maths' : ['computing', 'mathematics'],
'geo' : ['geology', 'geography'],
'marine' : ['marine'],
'art' : ['art'],
'roland' : ['reception', 'architecture'],
'business' : ['business'],
'staff' : ['staff'],
'sports' : ['sports'],
'robots' : ['robotics', 'robots'],
'visit' : ['visit', 'to do'],
'supermarket' : ['shop', 'supermarket'],
'cashpoint' : ['cash points', 'ATM', 'cash machines'],
'day' : ['day', 'today'],
'weather' : ['weather'],
'pause': ['pause speech'],
'continue': ['continue speech']}
rospy.loginfo("Ready to receive voice commands")
# We have to keep publishing the cmd_vel message if we want the robot to keep moving.
while not rospy.is_shutdown():
r.sleep()
def get_command(self, data):
for (command, keywords) in self.keywords_to_command.iteritems():
for word in keywords:
if data.find(word) > -1:
return command
def speechCb(self, msg):
command = self.get_command(msg.data)
rospy.loginfo("Command: " + str(command))
if command == 'pause':
self.paused = True
elif command == 'continue':
self.paused = False
if self.paused:
return
if command == 'hello':
self.soundhandle.say("Greetings!.", self.voice)
if command == 'help':
self.soundhandle.say("Ask me questions", self.voice)
if command == 'talk':
self.soundhandle.say("yes, I can", self.voice)
if command == 'bye':
self.soundhandle.say("Bye Bye", self.voice)
if command == 'weather':
self.soundhandle.say("I Don't know.", self.voice)
if command == 'supermarket':
self.soundhandle.say("The nearest supermarket is the TESCO!. ", self.voice)
if command == 'day':
self.soundhandle.say("It's tuesday!.", self.voice)
if command == 'psychology':
self.soundhandle.say("It's in link building!", self.voice)
if command == 'teaching':
self.soundhandle.say("the rolle building!.", self.voice)
if command == 'engineering':
self.soundhandle.say("That's right here!.", self.voice)
if command == 'biology':
self.soundhandle.say("It's is in the Davy building!.!", self.voice)
if command == 'maths':
self.soundhandle.say("In the babbage building!.!", self.voice)
if command == 'geo':
self.soundhandle.say("It's in the Fitzroy building!.!", self.voice)
if command == 'marine':
self.soundhandle.say("In the reynolds And the marine building.! ", self.voice)
if command == 'art':
self.soundhandle.say(" in the scott building!.!", self.voice)
if command == 'roland':
self.soundhandle.say(" in the roland levinsky building!.!", self.voice)
if command == 'business':
self.soundhandle.say("should be cookworthy building!", self.voice)
if command == 'staff':
self.soundhandle.say("In the Portland Square building!", self.voice)
if command == 'sports':
self.soundhandle.say("It's the Nancy Astor building. ", self.voice)
if command == 'robots':
self.soundhandle.say("in Smeaton's building or in Portland Square. !", self.voice)
if command == 'cashpoint':
self.soundhandle.say("There are some on the eastern exit of this building.!!", self.voice)
if command == 'visit':
self.soundhandle.say("Well, you can walk along the seashore. May be.!", self.voice)
if command == 'name':
self.soundhandle.say("charlie.", self.voice)
if command == 'amazing':
self.soundhandle.say("thank you so much.", self.voice)
if command == 'cafe':
self.soundhandle.say(" at the S U shop.", self.voice)
if command == 'wash':
self.soundhandle.say("the second floor and the third floor.", self.voice)
if command == 'library':
self.soundhandle.say("It's next to the Smeaton's building.", self.voice)
if command == 'labs':
self.soundhandle.say(" on the third floor.", self.voice)
def cleanup(self):
# When shutting down be sure to stop the robot! Publish a Twist message consisting of all zeros.
rospy.loginfo("Shutting Down..")
if __name__=="__main__":
rospy.init_node('voice_nav')
try:
voice_cmd_vel()
except:
pass
|
Jc11235/Kekulean_Program
|
GUI_Version/Ubuntu_Version/DriverMethods.py
|
Python
|
gpl-2.0
| 39,406
| 0.044054
|
from PerfectMatchingData import *
from Face import *
from Vertex import *
from Graph import *
from VertexList import *
from Output import *
from KekuleanMethods import *
from Checkers import *
from RequiredEdgeMethods import *
from Tkinter import *
from AppInformation import *
from random import randint
import time
import os
import shutil
import multiprocessing as mp
import threading
Break = False
BreakLoop = False
#These methods the main drivers of the program. Some of their helper methods are also present here.
settings = {}
#function that reads in the graph returns a 2D string list of the graph
def getInput(fileName):
faceGraph = []
inputFile = open(fileName, 'r')
row = inputFile.readline()
y = 0
while len(row) > 0:
row = row.replace('\n', '')
row = row.split(" ")
for i in range(len(row)):
x = row[i]
faceGraph.append((Face(int(x), y)))
row = inputFile.readline()
y += 1
inputFile.close()
return faceGraph
def getSettings():
fileName = "settings.txt"
inputFile = open(fileName, 'r')
lineNumber = 0
minW = 0
maxW = 0
minH = 0
maxH = 0
line = inputFile.readline()
while len(line) > 0:
line = line.replace('\n', '')
settings[lineNumber] = float(line)
line = inputFile.readline()
lineNumber += 1
inputFile.close()
def resetGraph(root,appInfo,submitGraph,graphNumberEntry,view):
submitGraph.destroy()
view.destroy()
graphNumberEntry.destroy()
def analyzeGraph(root,appInfo):
root.geometry("600x400")
selection = StringVar()
choiceEntry = Entry(root, textvariable = selection)
choice = selection.get()
def callback(root,appInfo,choice,selection,choiceEntry,fileName = "graph.txt"):
loading = Label(root, text="Analyzing graph data, this may take a few minutes.")
loading.pack()
fileName = fileName
faceGraph = getInput(fileName)
#check for connectedness
connected = isConnected(faceGraphToInts(faceGraph))
if connected == True:
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
loading.destroy()
choiceEntry.pack()
typeSelection = Label(root, text="Would you like to view the graphs ranked by Fries or Clars?")
typeSelection.pack()
submit = Button(root, text ="Submit", command = lambda: userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry))
submit.pack(side = BOTTOM)
while True:
choice = selection.get()
flag = False
exit = False
if choice != 'fries' and choice != 'clars' and choice != "":
againSelection = Label(root, text="That file does not exist, please try again.")
againSelection.pack()
print "again"
flag = True
while choice != 'fries' and choice != 'clars':
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
againSelection.update_idletasks()
choice = selection.get()
if exit == True:
againSelection.destroy()
break
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
t = threading.Thread(target = lambda: callback(root,appInfo,choice,selection,choiceEntry))
t.setDaemon(True)
appInfo.setThreads(t)
t.start()
def userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry):
structureNumber = IntVar()
submit.destroy()
typeSelection.destroy()
choiceEntry.destroy()
def displayClarFries(structureNumber,structures,choice):
structures.sort()
if choice == 'clars':
Graph.comparison = 'clars'
elif choice == 'fries':
Graph.comparison = 'fries'
structures.reverse()
structures[structureNumber].displayGraph()
view = Label(root, text="There are " + str(len(structures)) + " distince Kekule structures avaiable. Which would you like to view?")
view.pack()
graphNumberEntry = Entry(root, textvariable = structureNumber)
graphNumberEntry.pack()
number = structureNumber.get()
submitGraph = Button(root, text ="Submit Structure", command = lambda: displayClarFries(number,structures,choice))
submitGraph.pack(side = BOTTOM)
def deleteB(button):
button.destroy()
reset = Button(root, text ="Quit", command = lambda: resetB(root,appInfo,submitGraph,graphNumberEntry,view))
reset.pack(side = BOTTOM)
def resetB(root,appInfo,submitGraph,graphNumberEntry,view):
deleteB(reset)
resetGraph(root,appInfo,submitGraph,graphNumberEntry,view)
#A user-entered number of graphs are generated and tested for Kekulean-ness and written to their proper text files
def randomIntoFiles():
kekuleanFile = open("Kekuleans.txt", "w")
notKekuleanFile = open("NotKekulean.txt", "w")
numK = 0
numNotK = 0
trials = int(raw_input("How many graphs would you like to create? "))
print "\n" #just to provide some visual space
t1 = time.time()
for i in range(trials):
faceGraph = createRandomConnectedGraph()
vGraph = makeVertexGraph(faceGraph)
randGraph = Graph(faceGraph, vGraph)
if isKekulean(randGraph) == True:
numK += 1
kekuleanFile.write("Graph #" + str(numK) + "\n")
kekuleanFile.write(randGraph.simpleToString() + '\n')
else:
numNotK += 1
notKekuleanFile.write("Graph #" + str(numNotK) + "\n")
notKekuleanFile.write(randGraph.simpleToString() + '\n')
#print randGraph
#print "\n"
t2 = time.time()
print "\n" + str(numK) + " Kekulean graph(s) were found.\n" + str(numNotK) + " non-Kekulean graph(s) were found."
print "Time elapsed (in seconds): " + str(t2 - t1) + "\n"
kekuleanFile.close()
notKekuleanFile.close()
#creates a random Kekulean graph ands does stuff with it and saves it to an png
def createRandomKekulean():
#creates a face graphs
randomFaces = createRandomGraph()
randomGraph = _createRandomKekulean()
print "There are", len(randomGraph.getVertexGraph()), "vertices"
graphs = assignMatching(randomGraph)
graphs.sort()
if len(graphs) > 0:
#save graphs as PNG file
savePNG(graphs, "graphs - Fries.png")
Graph.comparison = 'clars'
graphs.sort()
savePNG(graphs, "graphs - Clars.png")
while True:
choice = raw_input("Would you like to view the graphs ranked by Fries or Clars? (or quit?) ")
while choice.lower() != 'fries' and choice.lower() != 'clars' and choice.lower() != 'quit':
choice = raw_in
|
put("Would you like to view the graphs ranked by Fries
|
or Clars? (or quit?) ")
if choice.lower() == 'clars':
Graph.comparison = 'clars'
elif choice.lower() == 'fries':
Graph.comparison = 'fries'
else:
break
graphs.sort()
graphs.reverse()
print "There are", len(graphs), "Kekulean structures"
displayGraphs(graphs)
else:
print "error - Graph is Kekulean but has no perfect matching - see error.txt for graph"
errorFile = open("error.txt", "w")
errorFile.write(randomGraph.simpleToString() + '\n')
#Creates a random planar graph, which may not be connected
def createRandomGraph():
height = randint(settings[2], settings[3])
randGraph = []
for i in range(height):
rowLength = randint(settings[0], settings[1])
row = getRow(rowLength, i)
while len(row) == 0:
row = getRow(rowLength, i)
randGraph.extend(row)
if checkAlignment(randGraph) == False:
randGraph = createRandomGraph()
return randGraph
def checkAlignment(graph):
for face in graph:
if face.getX() == 0:
break
else:
#there is no face on the y-axis
return False
for face in graph:
if face.getY() == 0:
break
else:
#there is no face on the x-axis
return False
#there is a face on the x-axis
return True
def createRandomConnectedGraph():
g = createRandomGraph()
while isConnected(faceGraphToInts(g)) == False:
g = createRandomGraph()
return g
#generates a row for the the createRandomGraph method
def getRow(rl, rowNum):
r = []
for j in range(rl):
chance = randint(0, 100)
if chance > settings[4] * 100:
r.append(Face(j, rowNum))
return r
def _createRandomKekulean():
#creates a face graphs
randomFaces = createRandomGraph()
while isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGr
|
justinjfu/chaos_theory
|
scripts/run_ddpg.py
|
Python
|
gpl-3.0
| 526
| 0.001901
|
import logging
import gym
import numpy as np
from chaos_theory.algorithm import DDPG
from chaos_theory.run.run_algorithm import run_online_algorithm
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
np.random.seed(1)
if __name__ == "__main__":
env_name = 'HalfCheet
|
ah-v1'
env = gym.make(env_name)
algorithm = DDPG(env, track_tau=0.001, discount=0.95)
run_online_algorithm(env, algorithm, max_length=1000, samples_per_update=1, verbose_trial=5, log_name='ddpg_'
|
+env_name)
|
NuChwezi/nubrain
|
nubrain/urls.py
|
Python
|
mit
| 1,184
| 0.000845
|
"""nubrain URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to ur
|
lpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2.
|
Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
from nubrain.settings import BASE_URL, APP_NAME
from django.views.generic import RedirectView
from django.utils.translation import ugettext_lazy
urlpatterns = patterns('',
(r'^$', RedirectView.as_view(url='%s/admin/' % BASE_URL)),
url(r'^admin/', include(admin.site.urls)),
)
admin.site.site_title = ugettext_lazy(APP_NAME)
admin.site.site_header = ugettext_lazy('%s Admin' % APP_NAME)
admin.site.index_title = ugettext_lazy('%s Dashboard' % APP_NAME)
admin.autodiscover()
|
invinst/ResponseBot
|
tests/unit_tests/models/test_user_model.py
|
Python
|
apache-2.0
| 824
| 0
|
from unittest.case import TestCase
from dateutil.parser import parse
fr
|
om responsebot.models import User, Tweet
class UserModelTestCase(TestCase):
def test_create_from_raw_data(self):
created_at = 'Mon Apr 25 08:25:58 +0000 2016'
raw = {
'some_key': 'some value',
'created_at': created_at,
'status': {
'created_at': created_at
},
'following': True
}
expected_created_at = parse(created_at)
user = User(raw)
self.assertEqua
|
l(user.some_key, 'some value')
self.assertEqual(user.created_at, expected_created_at)
self.assertTrue(isinstance(user.tweet, Tweet))
self.assertEqual(user.tweet.created_at, expected_created_at)
self.assertEqual(user.following, True)
|
simonolander/euler
|
euler-150-searching-a-triangular-array-for-a-sub-triangle-having-minimum-sum.py
|
Python
|
mit
| 1,906
| 0.001574
|
def triangle_sum(tri, r, c, h, memo):
if (r, c, h) in memo:
return memo[(r, c, h)]
ans = tri[r][c]
if h > 0:
ans += triangle_sum(tri, r + 1, c, h - 1, memo)
ans += triangle_sum(tri, r + 1, c + 1, h - 1, memo)
if h > 1:
ans -= triangle_sum(t
|
ri, r + 2, c + 1, h - 2, memo)
memo[(r, c, h)] = ans
return ans
def min_triangle_sum(tri):
memo = {}
minimum = tri[0][0]
for r in range(len(tri)):
for c in range(r + 1):
print(r, c)
for h in range(len(tri) - r):
print(r, c, h)
s
|
= triangle_sum(tri, r, c, h, memo)
if s < minimum:
minimum = s
print(r, c, h, ':', minimum)
return minimum
def min_triangle_sum_2(tri):
memo = {}
for r in range(len(tri)):
s = 0
memo[(r, 0)] = 0
for c in range(0, r + 1):
s += tri[r][c]
memo[(r, c+1)] = s
minimum = tri[0][0]
for r in range(len(tri)):
for c in range(r + 1):
minimum_2 = 0
for h in range(len(tri) - r):
minimum_2 += memo[(r + h, c + h + 1)] - memo[(r + h, c)]
if minimum_2 < minimum:
minimum = minimum_2
print(r, c, h, ':', minimum)
return minimum
def make_triangle(n=1000):
triangle = [[0] * k for k in range(1, n + 1)]
r = 0
c = 0
t = 0
for k in range(n * (n + 1) // 2):
t = (615949 * t + 797807) % 2**20
triangle[r][c] = t - 2**19
c += 1
if c == len(triangle[r]):
r += 1
c = 0
return triangle
triangle = [
[ 15],
[-14, - 7],
[ 20, -13, - 5],
[- 3, 8, 23, -26],
[ 1, - 4, - 5, -18, 5],
[-16, 31, 2, 9, 28, 3],
]
triangle = make_triangle()
print(min_triangle_sum_2(triangle))
|
cloudify-cosmo/cloudify-manager
|
rest-service/manager_rest/test/security_utils.py
|
Python
|
apache-2.0
| 3,245
| 0
|
#########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from flask_security.utils import hash_password
from cloudify.cluster_status import (
DB_STATUS_REPORTER,
BROKER_STATUS_REPORTER,
MANAGER_STATUS_REPORTER,
MANAGER_STATUS_REPORTER_ID,
BROKER_STATUS_REPORTER_ID,
DB_STATUS_REPORTER_ID
)
from manager_rest.storage.models import Tenant, UserTenantAssoc
from
|
manager_rest.storage import user_datastore
from manager_rest.constants import (
DEFAULT_TENANT_ID,
DEFAULT_TENANT_ROLE,
)
ADMIN_ROLE = 'sys_admin'
USER_ROLE = 'default'
USER_IN_TENANT_ROLE = 'user'
def get_admin_user():
return {
'username': 'admin',
|
'password': 'admin',
'role': ADMIN_ROLE
}
def get_status_reporters():
return [
{
'username': MANAGER_STATUS_REPORTER,
'password': 'password',
'role': MANAGER_STATUS_REPORTER,
'id': MANAGER_STATUS_REPORTER_ID
},
{
'username': BROKER_STATUS_REPORTER,
'password': 'password',
'role': BROKER_STATUS_REPORTER,
'id': BROKER_STATUS_REPORTER_ID
},
{
'username': DB_STATUS_REPORTER,
'password': 'password',
'role': DB_STATUS_REPORTER,
'id': DB_STATUS_REPORTER_ID
},
]
def get_test_users():
test_users = [
{
'username': 'alice',
'password': 'alice_password',
'role': ADMIN_ROLE
},
{
'username': 'bob',
'password': 'bob_password',
'role': USER_ROLE
},
{
'username': 'clair',
'password': 'clair_password',
'role': USER_ROLE,
'active': False
},
{
'username': 'dave',
'password': 'dave_password',
'role': USER_ROLE
}
]
return test_users
def add_users_to_db(user_list):
default_tenant = Tenant.query.get(DEFAULT_TENANT_ID)
for user in user_list:
role = user_datastore.find_role(user['role'])
user_obj = user_datastore.create_user(
username=user['username'],
password=hash_password(user['password']),
roles=[role]
)
default_tenant_role = user_datastore.find_role(DEFAULT_TENANT_ROLE)
user_obj.active = user.get('active', True)
user_tenant_association = UserTenantAssoc(
user=user_obj,
tenant=default_tenant,
role=default_tenant_role,
)
user_obj.tenant_associations.append(user_tenant_association)
user_datastore.commit()
|
VU-Cog-Sci/PRF_experiment
|
exp_tools/Staircase.py
|
Python
|
mit
| 6,218
| 0.040849
|
#!/usr/bin/env python
# encoding: utf-8
"""
Staircase.py
Created by Tomas HJ Knapen on 2009-11-26.
Copyright (c) 2009 TK. All rights reserved.
"""
import os, sys, datetime
import subprocess, logging
import pickle, datetime, time
import scipy as sp
import numpy as np
# import matplotlib.pylab as pl
from math import *
class OneUpOneDownStaircase(object):
"""
OneUpOneDownStaircase object, for one-up-one-down staircase in its standard form.
"""
def __init__(self, initial_value, initial_stepsize, nr_reversals = 10, increment_value = None, stepsize_multiplication_on_reversal = 0.75, max_nr_trials = 40 ):
self.initial_value = initial_value
self.initial_stepsize = initial_stepsize
self.nr_reversals = nr_reversals
self.increment_value = increment_value
self.stepsize_multiplication_on_reversal = stepsize_multiplication_on_reversal
self.max_nr_trials = max_nr_trials
self.test_value = self.initial_value
self.present_increment_value = increment_value
# set up filler variables
self.past_answers = []
self.nr_trials = 0
self.present_nr_reversals = 0
def test_value(self):
return self.test_value
def answer( self, correct ):
continue_after_this_trial = True
self.nr_trials = self.nr_trials + 1
if correct: # answer was correct and so we lower the contrast/whatever value
self.test_value = self.test_value - self.present_increment_value
else:
self.test_value = self.test_value + self.present_increment_value
self.past_answers.append(correct)
if self.nr_trials > 1:
if self.past_answers[-1] != self.past_answers[-2]: # we have a reversal here
self.present_nr_reversals = self.present_nr_reversals + 1
if self.present_nr_reversals % 2 == 0:
self.present_increment_value = self.present_increment_value * self.stepsize_multiplication_on_reversal
if self.present_nr_reversals >= self.nr_reversals:
continue_after_this_trial = False
else:
pass
if self.nr_trials >= self.max_nr_trials:
continue_after_this_trial = False
return continue_after_this_trial
class TwoUpOneDownStaircase(OneUpOneDownStaircase):
def __init__(self, initial_value, initial_stepsize, nr_reversals = 10, increment_value = None, stepsize_multiplication_on_reversal = 0.75, max_nr_trials = 40 ):
super(TwoUpOneDownStaircase, self).__init__(initial_value, initial_stepsize, nr_reversals = 10, increment_value = None, stepsize_multiplication_on_reversal = 0.75, max_nr_trials = 40)
self.past_answers = [0.5, 0.5, 0.5]
def answer( self, correct ):
continue_after_this_trial = True
self.nr_trials = self.nr_trials + 1
self.past_answers.append(correct)
nr_corrects_in_last_2_trials = np.array(self.past_answers, dtype = float)[-2:].sum()
if nr_corrects_in_last_2_trials == 2: # this subject is too good for this stimulus value
self.test_value = self.test_value - self.present_increment_value
else:
self.test_value = self.test_value + self.present_increment_value
if self.nr_trials > 1:
if self.past_answers[-1] != self.past_answers[-2]: # we have a reversal here
self.present_nr_reversals = self.present_nr_reversals + 1
if self.present_nr_reversals % 2 == 0:
self.present_increment_value = self.present_increment_value * self.stepsize_multiplication_on_reversal
if self.present_nr_reversals >= self.nr_reversals:
continue_after_this_trial = False
else:
pass
if self.nr_trials >= self.max_nr_trials:
con
|
tinue_after_this_trial = False
return continue_after_this_trial
class ThreeUpOneDownStaircase(TwoUpOneDownStaircase):
def answer( self, correct ):
continue_aft
|
er_this_trial = True
self.nr_trials = self.nr_trials + 1
self.past_answers.append(correct)
nr_corrects_in_last_3_trials = np.array(self.past_answers, dtype = float)[-3:].sum()
if nr_corrects_in_last_3_trials == 3: # this subject is too good for this stimulus value
self.test_value = self.test_value - self.present_increment_value
else:
self.test_value = self.test_value + self.present_increment_value
if self.nr_trials > 1:
if self.past_answers[-1] != self.past_answers[-2]: # we have a reversal here
self.present_nr_reversals = self.present_nr_reversals + 1
if self.present_nr_reversals % 2 == 0:
self.present_increment_value = self.present_increment_value * self.stepsize_multiplication_on_reversal
if self.present_nr_reversals >= self.nr_reversals:
continue_after_this_trial = False
else:
pass
if self.nr_trials >= self.max_nr_trials:
continue_after_this_trial = False
return continue_after_this_trial
class YesNoStaircase(object):
def __init__(self, initial_value, initial_stepsize, nr_reversals = 100, stepsize_multiplication_on_reversal = 0.75, max_nr_trials = 400 ):
self.initial_value = initial_value
self.initial_stepsize = initial_stepsize
self.nr_reversals = nr_reversals
self.stepsize_multiplication_on_reversal = stepsize_multiplication_on_reversal
self.max_nr_trials = max_nr_trials
self.test_value = self.initial_value
self.present_increment_value = initial_stepsize
# set up filler variables
self.past_answers = []
self.nr_trials = 0
self.present_nr_reversals = 0
def test_value(self):
return self.test_value
def answer( self, correct ):
continue_after_this_trial = True
self.nr_trials = self.nr_trials + 1
if correct: # answer was correct and so we lower the contrast/whatever value according to Kaernbach's method
self.test_value = self.test_value - self.present_increment_value
else:
self.test_value = self.test_value + 3.0 * self.present_increment_value
self.past_answers.append(correct)
if self.nr_trials > 1:
if self.past_answers[-1] != self.past_answers[-2]: # we have a reversal here
self.present_nr_reversals = self.present_nr_reversals + 1
if self.present_nr_reversals % 2 == 0:
self.present_increment_value = self.present_increment_value * self.stepsize_multiplication_on_reversal
if self.present_nr_reversals >= self.nr_reversals:
continue_after_this_trial = False
else:
pass
if self.nr_trials >= self.max_nr_trials:
continue_after_this_trial = False
return continue_after_this_trial
|
pklaus/brother_ql
|
brother_ql/__init__.py
|
Python
|
gpl-3.0
| 110
| 0.009091
|
from .exceptions import *
from .raster import BrotherQLRaster
from .brother_ql_create import c
|
reate_
|
label
|
franziz/arcrawler
|
lib/factory/writer.py
|
Python
|
gpl-3.0
| 940
| 0.03617
|
from ..w
|
riter.crawler import CrawlerWriter
from ..writer.run import RunConfigWriter
from ..writer.sentry import SentryConfigWriter
from ..writer.route import RouteConfigWriter
from ..writer.monitor import Mo
|
nitorConfigWriter
class WriterFactory:
CRAWLER = 0
RUN_CONFIG = 1
SENTRY_CONFIG = 2
ROUTE_CONFIG = 3
MONITOR_CONFIG = 4
def __init__(self):
pass
@classmethod
def get_writer(self, writer_name=None):
""" Exceptions:
- AssertionError
"""
assert writer_name is not None, "writer_name is not defined."
if writer_name == WriterFactory.CRAWLER:
return CrawlerWriter()
elif writer_name == WriterFactory.RUN_CONFIG:
return RunConfigWriter()
elif writer_name == WriterFactory.SENTRY_CONFIG:
return SentryConfigWriter()
elif writer_name == WriterFactory.ROUTE_CONFIG:
return RouteConfigWriter()
elif writer_name == WriterFactory.MONITOR_CONFIG:
return MonitorConfigWriter()
|
Lincoln-Cybernetics/Explore-
|
mapgen.py
|
Python
|
unlicense
| 7,161
| 0.012847
|
import pygame
import random
import item
import mob
import tile
class Mapgen(object):
def __init__(self, level):
self.xsiz = 10
self.ysiz = 10
self.biome = "random"
self.procedure = 0
self.zone = []
self.level = level
self.sizefactor = 2
#self.items = pygame.sprite.Group()
#self.mobs = pygame.sprite.Group()
#creates the base map
def generate(self,x,y,biome):
self.zone = []
self.xsiz = x
self.ysiz = y
self.biome = biome
self.sizefactor = (x/10)+(y/10)
landtype = 0
#for num in range(sizefactor*3):
# itemo = item.Item(self.level, self.level.items)
# itemo.set_type(random.randrange(6)+1)
#for umb in range(sizefactor*3):
# mobbo = mob.Mob(self.level, self.level.mobs)
# mobbo.set_type(random.randrange(7))
# mobbo.set_species(random.randrange(4)+1)
#main land generation
for a in range(x):
mapcol = []
for b in range(y):
#Purely Random
if (self.procedure == 0):
landtype = random.randrange(17)+1
#probability manipulation
if (self.procedure == 1):
if (biome == "grassland"):
common = [1,2,3,13]
uncommon = [4,5,6,7]
rare = [8,9,10]
vrare = [12,15]
self.level.passable = 1
if(biome == "forest"):
common = [3,4,5,9]
uncommon = [1,2,6]
rare = [7,13]
vrare = [10,11,12]
self.level.passable = 2
if(biome == "desert"):
common = [8,7]
uncommon = [16,17]
rare = [9,13]
vrare = [1,2]
self.level.passable = 7
landex = random.randrange(256)
if landex < 256:
landtype = random.choice(common)
if landex < 64:
landtype = random.choice(uncommon)
if landex < 16:
landtype = random.choice(rare)
if landex < 2:
landtype = random.choice(vrare)
#generate the tiles
acre = tile.Land(self.level, self.level.terrain)
if a == 0 or b == 0 or a == x-1 or b == y-1:
acre.set_type(0)
self.level.space.add(acre)
for mobbo in self.level.mobs:
mobbo.unpassable.add(acre)
else:
acre.set_type(landtype)
acre.get_image()
acre.spawn(a, b)
self.level.background.add(acre)
mapcol.append(acre)
self.zone.append( mapcol )
for a in range(len(self.zone)):
for b in range(len(self.zone[0])):
place = self.zone[a][b]
if place in self.level.space:
pass
else:
for wa in range(3):
for ha in range(3):
if a+wa-1 >= len(s
|
elf.zone) or b+ha-1 >= len(self.zone[0]):
pass
else:
|
place.neighbors.add(self.zone[a+wa-1][b+ha-1])
return self.zone
#causes deserts to expand
def desertify(self):
for place in self.level.terrain:
place.desert_check()
#causes forests to grow
def grow_forest(self):
for place in self.level.terrain:
place.forest_check()
#lowers sea level
def sea_lower(self):
for place in self.level.terrain:
if place.flavnum == 15:
if random.randrange(100) < 80:
place.set_type(14)
if place.flavnum == 14:
if random.randrange(100) < 70:
place.set_type(13)
if place.flavnum == 13:
if random.randrange(100) < 60:
place.set_type(1)
#raises sea level
def sea_fill(self):
for place in self.level.terrain:
excepts = [0,15,14,12,11,10]
if place.flavnum == 15:
for location in place.neighbors:
if location.flavnum in excepts:
pass
else:
location.set_type(14)
if place.flavnum == 14:
for location in place.neighbors:
if location.flavnum in excepts:
pass
else:
location.set_type(13)
if place.flavnum == 13:
for location in place.neighbors:
if random.randrange(100) < 10:
if location.flavnum in excepts:
pass
else:
location.set_type(13)
#populates the map with mobs
def populate(self, density):
for a in range(self.sizefactor*density):
mobbo = mob.Mob(self.level, self.level.mobs)
mobbo.set_type(random.randrange(7))
mobbo.set_species(random.randrange(4)+1)
mobbo.unpassable.add(self.level.space)
mobbo.spawn(random.randrange(len(self.zone)-2)+1,random.randrange(len(self.zone[0])-2)+1)
if mobbo.mapx == self.level.player1.mapx and mobbo.mapy == self.level.player1.mapy:
mobbo.kill()
#adds items to the map
def litter(self, density):
for a in range(self.sizefactor*density):
itemo = item.Item(self.level, self.level.items)
itemo.set_type(random.randrange(8))
itemo.spawn(random.randrange(len(self.zone)-2)+1,random.randrange(len(self.zone[0])-2)+1)
if itemo.mapx == self.level.player1.mapx and itemo.mapy == self.level.player1.mapy:
itemo.kill()
#adds landmarks
def monumentalize(self, number):
for a in range(number):
monument = tile.Landmark(self.level, self.level.background)
monument.set_type(random.randrange(4))
monument.spawn(random.randrange(len(self.zone)-3)+1,random.randrange(len(self.zone[0])-3)+1)
pygame.sprite.spritecollide(monument, self.level.landmarks, True)
self.level.landmarks.add(monument)
|
david672orford/pykarta
|
pykarta/geometry/from_text.py
|
Python
|
gpl-2.0
| 2,761
| 0.030215
|
# encoding=utf-8
# pykarta/geometry/from_text.py
# Copyright 2013--2020, Trinity College
# Last modified: 9 February 2020
import re
from . import Point
# Create a Point() from a text string describing a latitude and longitude
|
#
# E
|
xample from Wikipedia article Whitehouse: 38° 53′ 51.61″ N, 77° 2′ 11.58″ W
# \u2032 -- prime (minutes sign)
# \u2033 -- double prime (seconds sign)
# \u2019 -- single closing quote
# \u201d -- double closing quote
def PointFromText(coords_text):
if not re.search(u'^[\(\-0-9\.°\'\u2019\u2032"\u201d\u2033NSEW, \)]+$', coords_text, flags=re.IGNORECASE):
return None
#print "Pasted coordinates:", coords_text
# Make the format more standard
coords_text = coords_text.upper() # nsew -> NSEW
coords_text = coords_text.replace(u"(", u"") # remove parenthesis
coords_text = coords_text.replace(u")", u"")
coords_text = coords_text.replace(u"'", u"\u2032") # ASCII single quote (apostroph) to prime
coords_text = coords_text.replace(u"\u2019", u"\u2032") # right single quote to prime
coords_text = coords_text.replace(u'"', u'\u2033') # ASCII double quote to double prime
coords_text = coords_text.replace(u'\u201d', u'\u2033') # right double quote to double prime
words = _split_coords_text(coords_text)
lat = _parse_degrees(words[0], "NS")
lon = _parse_degrees(words[1], "EW")
return Point(lat, lon)
def _split_coords_text(coords_text):
m = re.match('^([^,]+),([^,]+)$', coords_text)
if m:
return (m.group(1), m.group(2))
m = re.match('^(\S+)\s+(\S+)$', coords_text)
if m:
return (m.group(1), m.group(2))
m = re.match('^([NS].+)([EW].+)$', coords_text)
if m:
return (m.group(1), m.group(2))
m = re.match('^(.+[NS])(.+[EW])$', coords_text)
if m:
return (m.group(1), m.group(2))
raise Exception("Two coordinates required")
def _parse_degrees(degrees_string, directions):
degrees_string = degrees_string.replace(u" ", u"") # remove spaces
sign = 1.0
if directions[0] in degrees_string: # N or E
degrees_string = degrees_string.replace(directions[0], "")
elif directions[1] in degrees_string: # S or W
degrees_string = degrees_string.replace(directions[1], "")
sign = -1.0
# Decimal degrees signed
m = re.search(u'^([-\d\.]+)°?$', degrees_string)
if m:
return float(m.group(1)) * sign
# Degrees, minutes, seconds
m = re.search(u'^(\d+)°(\d+)\u2032([\d\.]+)\u2033$', degrees_string)
if m:
degrees = int(m.group(1))
degrees += int(m.group(2)) / 60.0
degrees += float(m.group(3)) / 3600.0
return degrees * sign
m = re.search(u'^(\d+)°([\d\.]+)\u2032?$', degrees_string)
if m:
degrees = int(m.group(1))
degrees += float(m.group(2)) / 60.0
return degrees * sign
raise Exception("Failed to parse coordinate: %s" % degrees_string)
|
jacobajit/ion
|
intranet/apps/announcements/migrations/0009_announcement_expiration_date.py
|
Python
|
gpl-2.0
| 432
| 0.002315
|
# -*- co
|
ding: utf-8 -*-
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('announcements', '0008_auto_20150603_1401')]
operations = [migrations.AddField(model_name='announcement',
name='expiration_date',
field=models.DateTimeField(default=datetime.datetime(3000, 1, 1, 0, 0)),)]
| |
Linaro/lava-dispatcher
|
lava_dispatcher/job.py
|
Python
|
gpl-2.0
| 10,777
| 0.001299
|
# Copyright (C) 2014 Linaro Limited
#
# Author: Neil Williams <neil.williams@linaro.org>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
import atexit
import errno
import shutil
import tempfile
import datetime
import time
import pytz
import traceback
import os
from lava_dispatcher.action import (
LAVABug,
LAVAError,
JobError,
)
from lava_dispatcher.logical import PipelineContext
from lava_dispatcher.diagnostics import DiagnoseNetwork
from lava_dispatcher.protocols.multinode import MultinodeProtocol # pylint: disable=unused-import
from lava_dispatcher.utils.constants import DISPATCHER_DOWNLOAD_DIR
from lava_dispatcher.utils.filesystem import debian_package_version
class ZMQConfig(object):
"""
Namespace for the ZMQ logging configuration
"""
def __init__(self, logging_url, master_cert, slave_cert, ipv6):
self.logging_url = logging_url
self.master_cert = master_cert
self.slave_cert = slave_cert
self.ipv6 = ipv6
class Job(object): # pylint: disable=too-many-instance-attributes
"""
Populated by the parser, the Job contains all of the
Actions and their pipelines.
parameters provides the immutable data about this job:
action_timeout
job_name
priority
device_type (mapped to target by scheduler)
yaml_line
logging_level
job_timeout
Job also provides the primary access to the Device.
The NewDevice class only loads the specific configuration of the
device for this job - one job, one device.
"""
def __init__(self, job_id, parameters, logger): # pylint: disable=too-many-arguments
self.job_id = job_id
self.logger = logger
self.device = None
self.parameters = parameters
self.__context__ = PipelineContext()
self.pipeline = None
self.connection = None
self.triggers = [] # actions can add trigger st
|
rings to the run a diagnostic
self.diagnostics = [
DiagnoseNetwork,
]
self.timeout = None
self.protocols = []
self.compatibility = 2
# Was the job cleaned
self.cleaned = False
# Root directory for the job tempfiles
self.tmp_dir = None
# override in use
self.base_overrides = {}
self.started = False
@property
def context(self):
|
return self.__context__.pipeline_data
@context.setter
def context(self, data):
self.__context__.pipeline_data.update(data)
def diagnose(self, trigger):
"""
Looks up the class to execute to diagnose the problem described by the
specified trigger.
"""
trigger_tuples = [(cls.trigger(), cls) for cls in self.diagnostics]
for diagnostic in trigger_tuples:
if trigger is diagnostic[0]:
return diagnostic[1]()
return None
def describe(self):
return {'device': self.device,
'job': self.parameters,
'compatibility': self.compatibility,
'pipeline': self.pipeline.describe()}
def mkdtemp(self, action_name, override=None):
"""
Create a tmp directory in DISPATCHER_DOWNLOAD_DIR/{job_id}/ because
this directory will be removed when the job finished, making cleanup
easier.
"""
if override is None:
if self.tmp_dir is None:
create_base_dir = True
base_dir = DISPATCHER_DOWNLOAD_DIR
else:
create_base_dir = False
base_dir = self.tmp_dir
else:
if override in self.base_overrides:
create_base_dir = False
base_dir = self.base_overrides[override]
else:
create_base_dir = True
base_dir = override
if create_base_dir:
# Try to create the directory.
base_dir = os.path.join(base_dir, str(self.job_id))
try:
os.makedirs(base_dir, mode=0o755)
except OSError as exc:
if exc.errno != errno.EEXIST:
# When running unit tests
base_dir = tempfile.mkdtemp(prefix='pipeline-')
atexit.register(shutil.rmtree, base_dir, ignore_errors=True)
# Save the path for the next calls (only if that's not an override)
if override is None:
self.tmp_dir = base_dir
else:
self.base_overrides[override] = base_dir
# Create the sub-directory
tmp_dir = tempfile.mkdtemp(prefix=action_name + '-', dir=base_dir)
os.chmod(tmp_dir, 0o755)
return tmp_dir
def _validate(self):
"""
Validate the pipeline and raise an exception (that inherit from
LAVAError) if it fails.
"""
self.logger.info("Start time: %s (UTC)", pytz.utc.localize(datetime.datetime.utcnow()))
for protocol in self.protocols:
try:
protocol.configure(self.device, self)
except LAVAError:
self.logger.error("Configuration failed for protocol %s", protocol.name)
raise
except Exception as exc:
self.logger.error("Configuration failed for protocol %s", protocol.name)
self.logger.exception(traceback.format_exc())
raise LAVABug(exc)
if not protocol.valid:
msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors)
self.logger.exception(msg)
raise JobError(msg)
# Check that namespaces are used in all actions or none
namespaces = set()
for action in self.parameters["actions"]:
action_name = list(action.keys())[0]
namespaces.add(action[action_name]["namespace"])
# 'common' is a reserved namespace that should not be present with
# other namespaces.
if len(namespaces) > 1 and 'common' in namespaces:
msg = "'common' is a reserved namespace that should not be present with other namespaces"
self.logger.error(msg)
self.logger.debug("Namespaces: %s", ", ".join(namespaces))
raise JobError(msg)
# validate the pipeline
self.pipeline.validate_actions()
def validate(self):
"""
Public wrapper for the pipeline validation.
Send a "fail" results if needed.
"""
label = "lava-dispatcher, installed at version: %s" % debian_package_version(split=False)
self.logger.info(label)
self.logger.info("start: 0 validate")
start = time.time()
success = False
try:
self._validate()
except LAVAError as exc:
raise
except Exception as exc:
# provide useful info on command line, e.g. failed unit tests.
self.logger.exception(traceback.format_exc())
raise LAVABug(exc)
else:
success = True
finally:
if not success:
self.cleanup(connection=None)
self.logger.info("validate duration: %.02f", time.time() - start)
self.logger.results({"definition": "lava",
"case": "validate",
"result": "pass" if success else "fail"})
def _run(self):
"""
Run the pipeline under the run() wrapper that w
|
av8ramit/tensorflow
|
tensorflow/python/ops/linalg/linear_operator_full_matrix.py
|
Python
|
apache-2.0
| 6,505
| 0.001845
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorFullMatrix"]
@tf_export("linalg.LinearOperatorFullMatrix")
class LinearOperatorFullMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorFullMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorFullMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorFullMatrix` has exactly the same performance as would be
achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
made based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all case
|
s, suppose `operator` is a `LinearOperatorFullMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
|
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorFullMatrix"):
r"""Initialize a `LinearOperatorFullMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
self._check_matrix(self._matrix)
super(LinearOperatorFullMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_matrix(self, matrix):
"""Static check of the `matrix` argument."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if matrix.get_shape().ndims is not None and matrix.get_shape().ndims < 2:
raise ValueError(
"Argument matrix must have at least 2 dimensions. Found: %s"
% matrix)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _to_dense(self):
return self._matrix
|
dhinakg/BitSTAR
|
plugins/srcutils.py
|
Python
|
apache-2.0
| 1,479
| 0.008114
|
# Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compl
|
iance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expres
|
s or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from api import command, message, plugin
def onInit(plugin_in):
gitsrc_command = command.Command(plugin_in, 'source', shortdesc='Get the git repo for the bot!')
docs_command = command.Command(plugin_in, 'docs', shortdesc='Get a link to the bot\'s documentation')
tests_command = command.Command(plugin_in, 'tests', shortdesc='Get a link to the bot\'s tests')
return plugin.Plugin(plugin_in, 'tinyurl', [gitsrc_command, docs_command, tests_command])
async def onCommand(message_in):
if message_in.command == 'source':
return message.Message(body="https://github.com/StarbotDiscord/Starbot")
if message_in.command == 'docs':
return message.Message(body="http://starbot.readthedocs.io/en/latest/")
if message_in.command == 'tests':
return message.Message(body="https://travis-ci.org/StarbotDiscord/Starbot")
|
UManPychron/pychron
|
pychron/pipeline/nodes/audit.py
|
Python
|
apache-2.0
| 1,164
| 0
|
# ==========================================================
|
=====================
# Copyright 2018 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BAS
|
IS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from pychron.pipeline.editors.audit_editor import AuditEditor
from pychron.pipeline.nodes.base import BaseNode
class AuditNode(BaseNode):
auto_configure = False
name = 'Audit'
configurable = False
def run(self, state):
editor = AuditEditor()
editor.set_unks_refs(state.unknowns, state.references)
state.editors.append(editor)
# ============= EOF =============================================
|
comedate/VolumeRendering
|
test_mpr.py
|
Python
|
mit
| 580
| 0.003448
|
import v
|
tk
from vtk.util.misc import vtkGetDataRoot
import render_mpr
reader = vtk.vtkMetaImageReader()
read
|
er.SetFileName("C:\\Users\\fei.wang\\PycharmProjects\\Rendering\\data\\org.mha")
reader.Update()
render_mpr = render_mpr.RendererMPR()
render_mpr.set_volume(reader)
render_mpr.set_output_image_size(1024, 1024)
render_mpr.set_lut_property("")
render_mpr.render()
render_mpr.get_output_png_image("1.png")
# test Camera
cur_camera = render_mpr.get_camera()
print("cur_camera:" + str(cur_camera))
render_mpr.set_camera(cur_camera)
render_mpr.render()
|
asedunov/intellij-community
|
python/testData/formatter/noBlankLinesAfterLocalImports_after.py
|
Python
|
apache-2.0
| 182
| 0.005495
|
from pprint i
|
mport pprint
VAR = 42
def foo():
import sys
import ast, tokenize
pass
class C:
from textwrap import dedent
pass
import code
|
cs as C
pass
|
fxia22/ASM_xf
|
PythonD/site_python/OpenGL/GL/SUN/convolution_border_modes.py
|
Python
|
gpl-2.0
| 585
| 0.011966
|
import
|
string
_
|
_version__ = string.split('$Revision: 1.6 $')[1]
__date__ = string.join(string.split('$Date: 2001/11/17 14:12:34 $')[1:3], ' ')
__author__ = 'Tarn Weisner Burton <twburton@users.sourceforge.net>'
__doc__ = 'http://oss.sgi.com/projects/ogl-sample/registry/SUN/convolution_border_modes.txt'
__api_version__ = 0x103
GL_WRAP_BORDER_SUN = 0x81D4
def glInitConvolutionBorderModesSUN():
from OpenGL.GL import __has_extension
return __has_extension("GL_SUN_convolution_border_modes")
def __info():
if glInitConvolutionBorderModesSUN():
return []
|
luzfcb/django-autocomplete-light
|
test_project/select2_one_to_one/urls.py
|
Python
|
mit
| 547
| 0.001828
|
from dal import autocomplete
from django.conf.urls import url
from django.views import generic
from .forms import TestForm
from .models import TestModel
urlpatterns = [
url(
|
'test-autocomplete/$',
autocomplete.Select2QuerySetView.as_view(
model=TestModel,
cre
|
ate_field='name',
),
name='select2_one_to_one_autocomplete',
),
url(
'test/(?P<pk>\d+)/$',
generic.UpdateView.as_view(
model=TestModel,
form_class=TestForm,
)
),
]
|
saudisproject/saudi-bots
|
bots/spa.py
|
Python
|
gpl-3.0
| 4,254
| 0.002367
|
from urllib.request import urlopen
from urllib.parse import urlparse, parse_qs
from socket import error as SocketError
import errno
from bs4 import BeautifulSoup
MAX_PAGES_TO_SEARCH = 3
def parse_news(item):
'''Parse news item
return is a tuple(id, title, url)
'''
url = 'http://www.spa.gov.sa' + item['href']
url_parsed = urlparse(url)
qs = parse_qs(url_parsed[4])
id = qs['newsid'][0]
title = item.h2.contents[0]
title = " ".join(title.split())
item_parsed = (id, title, url)
return item_parsed
def retrieve_news(person=0, royal=0, cabinet=0, last_id=-1):
'''Retrieve news for person or royal
person 1= king, 2= crown prince and 3= deputy crown prince
if royal is = 1 news will be retriveved
if last_id not definend it will return the max
return list of news tuples up to MAX_PAGES_TO_SEARCH (page = 10 news)
[(id, title, url)...]
'''
all_news = []
found = False
page = 1
while (page <= MAX_PAGES_TO_SEARCH and not found):
url = ("http://www.spa.gov.sa/ajax/listnews.php?sticky={}&cat=0&cabine"
"t={}&royal={}&lang=ar&pg={}".format(person, cabinet, royal, page))
try:
html = urlopen(url)
soup = BeautifulSoup(html, "html.parser")
news = soup.find_all("a", class_="aNewsTitle")
for item in news:
item_parsed = parse_news(item)
if item_parsed[0] <= str(last_id):
found = True
break
all_news.append(item_parsed)
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise
pass
page = page + 1
return all_news
def retrieve_detail(item):
'''Retrive detaill for news item
return is tuple (id, title, url, text)
'''
url = item[2]
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
detail = soup.find(class_='divNewsDetailsText')
detail = detail.get_text()
_list = list(item)
_list.insert(3, detail)
item = tuple(_list)
return item
def royal_order(last_id=-1):
'''Retrive royal orders
if last_id not defiend it will return the max
return list of royal orders tuples up to MAX_PAGES_TO_SEARCH (page=10)
[(id, title, url, text)...]
'''
orders = []
_news = retrieve_news(royal=1, last_id=last_id)
for item in _news:
_detail = retrieve_detail(item)
orders.append(_detail)
return orders
def cabinet_decision(last_id=-1):
'''Retrive cabinet decisions
if last_id not defiend it will return the max
return list of cabinet decisions tuples up to MAX_PAGES_TO_SEARCH (page=10)
[(id, title, url, text)...]
'''
decisions = []
_news = retrieve_news(cabinet=1, last_id=last_id)
for item in _news:
_detail = retrieve_detail(item)
decisions.append(_detail)
return decisions
def arrival_news(person, last_id=-1):
'''Retrive only arrival news for person
if last_id not defiend it will return the max
return list of arrival news tuples up to MAX_PAGES_TO_SEARCH (page = 10 news)
[(id, title, url, location)...]
'''
arrival_news = []
all_news = retrieve_news(person=person, last_id= last_id)
for item in all_news:
if 'يصل إلى' in item[1]:
_list = list(item)
_list.insert(3, (item[1].split('يصل إلى'))[1].split('قادماً من')[0])
item = tuple(_list)
arrival_news.append(item)
return arrival_news
def leave_news(person, last_id=-1):
'''Retrive only leave news for person
if last_id not defiend it will return the max
return list of leave news tuples up to MAX_PAGES_TO_SEARCH (page = 10 news)
[(id, title, url, locationFromTo)...]
'''
leave_news = []
all_news = retrieve_news(person=person, last_id= last_id)
for item in all_news:
if 'يغادر' in item[1]:
_list = list(item)
_list.insert(3, item[1].split('ي
|
غادر')[1])
item = tuple(_list)
leave_news.append(item)
return leave_news
if __name__ == "__main__":
# just for testin
|
g
news = cabinet_decision()
print(news)
|
cjwfuller/python-challenge
|
level6.py
|
Python
|
mit
| 250
| 0
|
nothing = '90
|
052'
while True:
f = open('channel/' + nothing + '.txt', 'r')
line = f.readline()
splits = line.split('Next nothing is ', 1)
if(len(splits) == 2):
nothing = splits[1]
|
print nothing
else:
break
|
cklb/pyinduct
|
pyinduct/core.py
|
Python
|
gpl-3.0
| 100,888
| 0.000545
|
"""
In the Core module you can find all basic classes and functions which form the backbone of the toolbox.
"""
import warnings
import numbers
import numpy as np
import numpy.ma as ma
import collections
from copy import copy, deepcopy
from numbers import Number
from scipy import integrate
from scipy.linalg import block_diag
from scipy.optimize import root
from scipy.interpolate import interp1d, interp2d, RectBivariateSpline, RegularGridInterpolator
from .registry import get_base
__all__ = ["Domain", "EvalData", "Parameters",
"Base", "BaseFraction", "StackedBase",
"Function", "ConstantFunction", "ComposedFunctionVector",
"find_roots", "sanitize_input", "real", "dot_product_l2",
"normalize_base", "project_on_base", "change_projection_base",
"back_project_from_base",
"calculate_scalar_product_matrix",
"calculate_base_transformation_matrix",
"calculate_expanded_base_transformation_matrix",
]
def sanitize_input(input_object, allowed_type):
"""
Sanitizes input data by testing if *input_object* is an array of type *allowed_type*.
Args:
input_object: Object which is to be checked.
allowed_type: desired type
Return:
input_object
"""
input_object = np.atleast_1d(input_object)
for obj in np.nditer(input_object, flags=["refs_ok"]):
if not isinstance(obj.item(), allowed_type):
raise TypeError("Only objects of type: {0} accepted.".format(allowed_type))
return input_object
class BaseFraction:
"""
Abstract base class representing a basis that can be used to describe functions of several variables.
"""
def __init__(self, members):
self.members = members
def scalar_product_hint(self):
"""
Empty Hint that can return steps for scalar product calculation.
Note:
Overwrite to implement custom functionality.
For an example implementation see :py:class:`.Function`
"""
pass
def function_space_hint(self):
"""
Empty Hint that can return properties which uniquely define
the function space of the :py:class:`.BaseFraction`.
Note:
Overwrite to implement custom functionality.
For an example implementation see :py:class:`.Function`.
"""
pass
def derive(self, order):
|
"""
Basic implementation of derive function.
Empty implementation, overwr
|
ite to use this functionality.
For an example implementation see :py:class:`.Function`
Args:
order (:class:`numbers.Number`): derivative order
Return:
:py:class:`.BaseFraction`: derived object
"""
if order == 0:
return self
else:
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def scale(self, factor):
"""
Factory method to obtain instances of this base fraction, scaled by the
given factor. Empty function, overwrite to implement custom
functionality. For an example implementation see :py:class:`.Function`.
Args:
factor: Factor to scale the vector.
"""
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def raise_to(self, power):
"""
Raises this fraction to the given *power*.
Args:
power (:obj:`numbers.Number`): power to raise the fraction onto
Return:
raised fraction
"""
if power == 1:
return self
else:
raise NotImplementedError("Implement this functionality to make use of it.")
def get_member(self, idx):
"""
Getter function to access members.
Empty function, overwrite to implement custom functionality.
For an example implementation see :py:class:`.Function`
Note:
Empty function, overwrite to implement custom functionality.
Args:
idx: member index
"""
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def __call__(self, *args, **kwargs):
"""
Spatial evaluation of the base fraction.
Args:
*args: Positional arguments.
**kwargs: Keyword arguments.
Returns:
"""
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def add_neutral_element(self):
"""
Return the neutral element of addition for this object.
In other words: `self + ret_val == self`.
"""
raise NotImplementedError()
def mul_neutral_element(self):
"""
Return the neutral element of multiplication for this object.
In other words: `self * ret_val == self`.
"""
raise NotImplementedError()
def evaluation_hint(self, values):
"""
If evaluation can be accelerated by using special properties of a function, this function can be
overwritten to performs that computation. It gets passed an array of places where the caller
wants to evaluate the function and should return an array of the same length, containing the results.
Note:
This implementation just calls the normal evaluation hook.
Args:
values: places to be evaluated at
Returns:
numpy.ndarray: Evaluation results.
"""
return self(values)
class Function(BaseFraction):
"""
Most common instance of a :py:class:`.BaseFraction`.
This class handles all tasks concerning derivation and evaluation of
functions. It is used broad across the toolbox and therefore incorporates
some very specific attributes. For example, to ensure the accurateness of
numerical handling functions may only evaluated in areas where they provide
nonzero return values. Also their domain has to be taken into account.
Therefore the attributes *domain* and *nonzero* are provided.
To save implementation time, ready to go version like
:py:class:`.LagrangeFirstOrder` are provided in the
:py:mod:`pyinduct.simulation` module.
For the implementation of new shape functions subclass this implementation
or directly provide a callable *eval_handle* and callable
*derivative_handles* if spatial derivatives are required for the
application.
Args:
eval_handle (callable): Callable object that can be evaluated.
domain((list of) tuples): Domain on which the eval_handle is defined.
nonzero(tuple): Region in which the eval_handle will return
nonzero output. Must be a subset of *domain*
derivative_handles (list): List of callable(s) that contain
derivatives of eval_handle
"""
# TODO: overload add and mul operators
def __init__(self, eval_handle, domain=(-np.inf, np.inf), nonzero=(-np.inf, np.inf), derivative_handles=None):
super().__init__(None)
self._vectorial = False
self._function_handle = None
self._derivative_handles = None
self.domain = set()
self.nonzero = set()
for kw, val in zip(["domain", "nonzero"], [domain, nonzero]):
if not isinstance(val, set):
if isinstance(val, tuple):
val = {val}
else:
raise TypeError("(Set of) or tuple(s) has to be provided "
"for {0}".format(kw))
setattr(self, kw, domain_simplification(val))
self.function_handle = eval_handle
self.derivative_handles = derivative_handles
@property
def derivative_handles(self):
return self._derivative_handles
@derivative
|
confluentinc/examples
|
clients/cloud/python/ccloud_lib.py
|
Python
|
apache-2.0
| 5,500
| 0.001273
|
#!/usr/bin/env python
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
#
# Helper module
#
# =============================================================================
import argparse, sys
from confluent_kafka import avro, KafkaError
from confluent_kafka.admin import AdminClient, NewTopic
from uuid import uuid4
#import certifi
name_schema = """
{
"namespace": "io.confluent.examples.clients.cloud",
"name": "Name",
"type": "record",
"fields": [
{"name": "name", "type": "string"}
]
}
"""
class Name(object):
"""
Name stores the deserialized Avro record for the Kafka key.
"""
# Use __slots__ to explicitly declare all data members.
__slots__ = ["name", "id"]
def __init__(self, name=None):
self.name = name
# Unique id used to track produce request success/failures.
# Do *not* include in the serialized object.
self.id = uuid4()
@staticmethod
def dict_to_name(obj, ctx):
return Name(obj['name'])
@staticmethod
def name_to_dict(name, ctx):
return Name.to_dict(name)
def to_dict(self):
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
"""
return dict(name=self.name)
# Schema used for serializing Count class, passed in as the Kafka value
count_schema = """
{
"namespace": "io.confluent.examples.clients.cloud",
"name": "Count",
"type": "record",
"fields": [
{"name": "count", "type": "int"}
]
}
"""
class Count(object):
"""
Count stores the deserialized Avro record for the Kafka value.
"""
# Use __slots__ to explicitly declare all data members.
__slots__ = ["count", "id"]
def __init__(self, count=None):
self.count = count
# Unique id used to track produce request success/failures.
# Do *not* include in the serialized object.
self.id = uuid4()
@staticmethod
def dict_to_count(obj, ctx):
return Count(obj['count'])
@staticmethod
def count_to_dict(count, ctx):
return Count.to_dict(count)
def to_dict(self):
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
"""
return dict(count=self.count)
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Confluent Python Client example to produce messages \
to Confluent Cloud")
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-f',
dest="config_file",
|
help="path to Confluent Cloud configuration file",
required=True)
required.add_argument('-t',
dest="topic",
help="topic name",
required=True)
args = parser.parse_args()
return args
|
def read_ccloud_config(config_file):
"""Read Confluent Cloud configuration for librdkafka clients"""
conf = {}
with open(config_file) as fh:
for line in fh:
line = line.strip()
if len(line) != 0 and line[0] != "#":
parameter, value = line.strip().split('=', 1)
conf[parameter] = value.strip()
#conf['ssl.ca.location'] = certifi.where()
return conf
def pop_schema_registry_params_from_config(conf):
"""Remove potential Schema Registry related configurations from dictionary"""
conf.pop('schema.registry.url', None)
conf.pop('basic.auth.user.info', None)
conf.pop('basic.auth.credentials.source', None)
return conf
def create_topic(conf, topic):
"""
Create a topic if needed
Examples of additional admin API functionality:
https://github.com/confluentinc/confluent-kafka-python/blob/master/examples/adminapi.py
"""
admin_client_conf = pop_schema_registry_params_from_config(conf.copy())
a = AdminClient(admin_client_conf)
fs = a.create_topics([NewTopic(
topic,
num_partitions=1,
replication_factor=3
)])
for topic, f in fs.items():
try:
f.result() # The result itself is None
print("Topic {} created".format(topic))
except Exception as e:
# Continue if error code TOPIC_ALREADY_EXISTS, which may be true
# Otherwise fail fast
if e.args[0].code() != KafkaError.TOPIC_ALREADY_EXISTS:
print("Failed to create topic {}: {}".format(topic, e))
sys.exit(1)
|
dangoldin/pyproducthunt
|
analyze.py
|
Python
|
mit
| 168
| 0.005952
|
from phapi import ProductHuntApi
import settings
import json
pha = ProductHuntApi(settings.DEVELOP
|
ER_TOKEN)
posts = pha.get_posts()
print json.dumps(p
|
osts, indent=2)
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Python
|
gpl-2.0
| 5,254
| 0.007423
|
import subprocess
from distutils import spawn
brctlexe = spawn.find_executable("brctl")
ipexe = spawn.find_executable("ip")
class BridgeException(Exception):
pass
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could n
|
ot set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
|
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
class BridgeController(object):
def addbr(self, name):
""" Create a bridge and set the device up. """
_runshell([brctlexe, 'addbr', name],
"Could not create bridge %s." % name)
_runshell([ipexe, 'link', 'set', 'dev', name, 'up'],
"Could not set link up for %s." % name)
return Bridge(name)
def delbr(self, name):
""" Set the device down and delete the bridge. """
self.getbr(name) # Check if exists
_runshell([ipexe, 'link', 'set', 'dev', name, 'down'],
"Could not set link down for %s." % name)
_runshell([brctlexe, 'delbr', name],
"Could not delete bridge %s." % name)
def showall(self):
""" Return a list of all available bridges. """
p = _runshell([brctlexe, 'show'],
"Could not show bridges.")
wlist = map(str.split, p.stdout.read().splitlines()[1:])
brwlist = filter(lambda x: len(x) != 1, wlist)
brlist = map(lambda x: x[0], brwlist)
return map(Bridge, brlist)
def getbr(self, name):
""" Return a bridge object."""
for br in self.showall():
if br.name == name:
return br
raise BridgeException("Bridge does not exist.")
def _runshell(cmd, exception):
""" Run a shell command. if fails, raise a proper exception. """
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait() != 0:
raise BridgeException(exception)
return p
|
statsmodels/statsmodels
|
statsmodels/stats/rates.py
|
Python
|
bsd-3-clause
| 12,513
| 0
|
'''Test for ratio of Poisson intensities in two independent samples
Author: Josef Perktold
License: BSD-3
'''
import numpy as np
import warnings
from scipy import stats
from statsmodels.stats.base import HolderTuple
from statsmodels.stats.weightstats import _zstat_generic2
def test_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1,
method='score', alternative='two-sided',
etest_kwds=None):
'''test for ratio of two sample Poisson intensities
If the two Poisson rates are g1 and g2, then the Null hypothesis is
- H0: g1 / g2 = ratio_null
against one of the following alternatives
- H1_2-sided: g1 / g2 != ratio_null
- H1_larger: g1 / g2 > ratio_null
- H1_smaller: g1 / g2 < ratio_null
Parameters
----------
count1 : int
Number of events in first sample.
exposure1 : float
Total exposure (time * subjects) in first sample.
count2 : int
Number of events in second sample.
exposure2 : float
Total exposure (time * subjects) in second sample.
ratio: float
ratio of the two Poisson rates under the Null hypothesis. Default is 1.
method : string
Method for the test statistic and the p-value. Defaults to `'score'`.
Current Methods are based on Gu et. al 2008.
Implemented are 'wald', 'score' and 'sqrt' based asymptotic normal
distribution, and the exact conditional test 'exact-cond', and its
mid-point version 'cond-midp'. method='etest' and method='etest-wald'
provide pvalues from `etest_poisson_2indep` using score or wald
statistic respectively.
see Notes.
alternative : string
The alternative hypothesis, H1, has to be one of the following
- 'two-sided': H1: ratio of rates is not equal to ratio_null (default)
- 'larger' : H1: ratio of rates is larger than ratio_null
- 'smaller' : H1: ratio of rates is smaller than ratio_null
etest_kwds: dictionary
Additional parameters to be passed to the etest_poisson_2indep
function, namely y_grid.
Returns
-------
results : instance of HolderTuple class
The two main attributes are test statistic `statistic` and p-value
`pvalue`.
Notes
-----
- 'wald': method W1A, wald test, variance based on separate estimates
- 'score': method W2A, score test, variance based on estimate under Null
- 'wald-log': W3A
- 'score-log' W4A
- 'sqrt': W5A, based on variance stabilizing square root transformation
- 'exact-cond': exact conditional test based on binomial distribution
- 'cond-midp': midpoint-pvalue of exact conditional test
- 'etest': etest with score test statistic
- 'etest-wald': etest with wald test statistic
References
----------
Gu, Ng, Tang, Schucany 2008: Testing the Ratio of Two Poisson Rates,
Biometrical Journal 50 (2008) 2, 2008
See Also
--------
tost_poisson_2indep
etest_poisson_2indep
'''
# shortcut names
y1, n1, y2, n2 = count1, exposure1, count2, exposure2
d = n2 / n1
r = ratio_null
r_d = r / d
if method in ['score']:
stat = (y1 - y2 * r_d) / np.sqrt((y1 + y2) * r_d)
dist = 'normal'
elif method in ['wald']:
stat = (y1 - y2 * r_d) / np.sqrt(y1 + y2 * r_d**2)
dist = 'normal'
elif method in ['sqrt']:
stat = 2 * (np.sqrt(y1 + 3 / 8.) - np.sqrt((y2 + 3 / 8.) * r_d))
stat /= np.sqrt(1 + r_d)
dist = 'normal'
elif method in ['exact-cond', 'cond-midp']:
from statsmodels.stats import proportion
bp = r_d / (1 + r_d)
y_total = y1 + y2
stat = None
# TODO: why y2 in here and not y1, check definition of H1 "larger"
pvalue = proportion.binom_test(y1, y_total, prop=bp,
alternative=alternative)
if method in ['cond-midp']:
# not inplace in case we still want binom pvalue
pvalue = pvalue - 0.5 * stats.binom.pmf(y1, y_total, bp)
dist = 'binomial'
elif method.startswith('etest'):
if method.endswith('wald'):
method_etest = 'wald'
else:
method_etest = 'score'
if etest_kwds is None:
etest_kwds = {}
stat, pvalue = etest_poisson_2indep(
count1, exposure1, count2, exposure2, ratio_null=ratio_null,
method=method_etest, alternative=alternative, **etest_kwds)
dist = 'poisson'
else:
raise ValueError('method not recognized')
if dist == 'normal':
stat, pvalue = _zstat_generic2(stat, 1, alternative)
rates = (y1 / n1, y2 / n2)
ratio = rates[0] / rates[1]
res = HolderTuple(statistic=stat,
pvalue=pvalue,
distribution=dist,
method=method,
alternative=alternative,
rates=rates,
ratio=ratio,
ratio_null=ratio_null)
return res
def etest_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1,
method='score', alternative='2-sided', ygrid=None,
y_grid=None):
"""E-test for ratio of two sample Poisson rates
If the two Poisson rates are g1 and g2, then the Null hypothesis is
- H0: g1 / g2 = ratio_null
against one of the following alternatives
- H1_2-sided: g1 / g2 != ratio_null
- H1_larger: g1 / g2 > ratio_null
- H1_smaller: g1 / g2 < ratio_null
Parameters
----------
count1 : int
Number of events in first sample
exposure1 : float
Total exposure (time * subjects) in first sample
count2 : int
Number of events in first sample
exposure2 : float
Total exposure (time * subjects) in first sample
ratio : float
ratio of the two Poisson rates under the Null hypothesis. Default is 1.
method : {"score", "wald"}
Method for the test statistic that defines the rejection region.
alternative : string
The alternative hypothesis, H1, has to be one of the following
'two-sided': H1: ratio of rates is not equal to ratio_null (default)
'larger' : H1: ratio of rates is larger than ratio_null
'smaller' : H1: ratio of rates is smaller than ratio_null
y_grid : None or 1-D ndarray
Grid values for counts of the Poisson distribution used for computing
the pvalue. By default truncation is based on an upper tail Poisson
quantiles.
ygrid : None or 1-D ndarray
Same as y_grid. Deprecated. If both y_grid and ygrid are provided,
ygrid will be ignored.
Returns
-------
stat_sample : float
test statistic for the sample
pvalue : float
References
----------
Gu, Ng, Tang, Schucany 2008: Testing the Ratio of Two Poisson Rates,
Biometrical Journal 50 (2008) 2, 2008
"""
y1, n1, y2, n2 = count1, exposure1, count2, exposure2
d = n2 / n1
r = ratio_null
r_d = r / d
eps = 1e-20 # avoid zero division in stat_func
if method in ['score']:
|
def stat_func(x1, x2):
return (x1 - x2 * r_d) / np.sqrt((x1 + x2) * r_d + eps)
# TODO: do I need these? return_results ?
# rate2_cmle = (y1 + y2) / n2 / (1 + r_d)
# rate1_cmle = rate
|
2_cmle * r
# rate1 = rate1_cmle
# rate2 = rate2_cmle
elif method in ['wald']:
def stat_func(x1, x2):
return (x1 - x2 * r_d) / np.sqrt(x1 + x2 * r_d**2 + eps)
# rate2_mle = y2 / n2
# rate1_mle = y1 / n1
# rate1 = rate1_mle
# rate2 = rate2_mle
else:
raise ValueError('method not recognized')
# The sampling distribution needs to be based on the null hypotheis
# use constrained MLE from 'score' calculation
rate2_cmle = (y1 + y2) / n2 / (1 + r_d)
rate1_cmle = rate2_cmle * r
rate1 = rate1_cmle
rate2 = rate2_cmle
mean1 = n1 * rate1
mean2 = n2 * rate2
stat_sample = stat_func(y1, y2)
|
jamescarignan/Flask-User
|
example_apps/user_auth_app.py
|
Python
|
bsd-2-clause
| 6,986
| 0.006298
|
import os
from flask import Flask, render_template_string, request
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import login_required, SQLAlchemyAdapter, UserManager, UserMixin
from flask_user import roles_required
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///single_file_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', 'email@example.com')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <noreply@example.com>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = int(os.getenv('MAIL_USE_SSL', True))
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
def create_app(test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
mail = Mail(app) # Initialize Flask-Mail
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
# Define the User data model. Make sure to add flask.ext.user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
user_auth = db.relationship('UserAuth', uselist=False)
roles = db.relationship('Role', secondary='user_roles',
backref=db.backref('users', lazy='dynamic'))
# Define the UserAuth data model.
class UserAuth(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
# User authentication information
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
user = db.relationship('User', uselist=False)
# Define the Role data model
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
# Define the UserRoles data model
class UserRoles(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('role.id', ondelete='CASCADE'))
# Reset all the database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserAuthClass=UserAuth)
user_manager = UserManager(db_adapter, app)
# Create 'user007' user with 'secret' and 'agent' roles
if not UserAuth.query.filter(UserAuth.username=='user007').first():
user1 = User(email='user007@example.com', first_name='James', last_name='Bond', active=True)
db.session.add(user1)
user_auth1 = UserAuth(user=user1, username='user007',
password=user_manager.hash_password('Password1')
)
db.session.add(user_auth1)
user1.roles.append(Role(name='secret'))
user1.roles.append(Role(name='agent'))
db.session.commit()
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Home page</h2>
<p>This page can be accessed by anyone.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def members_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Members page</h2>
<p>This page can only be accessed by authenticated users.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('
|
members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Special page requires a user with 'special' and 'sauce' roles or with 'special' and 'agent' roles.
@app.route('/special')
@roles_required('secret', ['sauce', 'agent
|
']) # Use of @roles_required decorator
def special_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Special Page</h2>
<p>This page can only be accessed by user007.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=True)
|
robin1885/algorithms-exercises-using-python
|
source-code-from-author-book/Listings-for-Second-Edition/listing_8_14.py
|
Python
|
mit
| 65
| 0
|
class SkipList:
de
|
f __init__(self):
self.head = N
|
one
|
lifeinoppo/littlefishlet-scode
|
SRC/Server/Components/input/python/keyInput.py
|
Python
|
gpl-2.0
| 175
| 0.034014
|
# replace all key events in
# js files and htmls
#
|
to our standard key input event
# more details see in DOC dir
# Key 事件进行全局替换, 统一
|
处理。
|
tylercal/dragonfly
|
dragonfly/accessibility/__init__.py
|
Python
|
lgpl-3.0
| 949
| 0.004215
|
from contextlib import contextmanager
import sys
from . import controller
from .utils import (CursorPosition, TextQuery)
if sys.platform.startswith("win"):
from . import ia2
os_controller_class = ia2.Controller
else:
# TODO Support Linux.
pass
controller_instance = None
def get_accessibility_controller():
"""Get the OS-in
|
dependent accessibility controller which is the gateway to all
accessibility functionality."""
global controller_instance
if not controller_instance or controller_instance.stopped:
os_controller = os_controller_class()
|
controller_instance = controller.AccessibilityController(os_controller)
return controller_instance
@contextmanager
def get_stopping_accessibility_controller():
"""Same as :func:`get_accessibility_controller`, but automatically stops when
used in a `with` context."""
yield get_accessibility_controller()
controller_instance.stop()
|
oh6hay/refworks-bibtex-postprocess
|
textutil.py
|
Python
|
mit
| 943
| 0.004242
|
#!/usr/bin/env python
# http://stackoverflow.com/questions/517
|
923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
import re
import unicodedata
def strip_accents(text):
"""
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
try:
text = unicode(text, 'utf-8')
except N
|
ameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
def text_to_id(text):
"""
Convert input text to id.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
text = strip_accents(text.lower())
text = re.sub('[ ]+', '_', text)
text = re.sub('[^0-9a-zA-Z_-]', '', text)
return text
|
salv-orlando/MyRepo
|
nova/scheduler/filters/json_filter.py
|
Python
|
apache-2.0
| 5,243
| 0.000572
|
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import operator
import nova.scheduler
from nova.scheduler.filters import abstract_filter
class JsonFilter(abstract_filter.AbstractHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = not args[0] in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms"""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def instance_type_to_filter(self, instance_type):
"""Convert instance_type into JSON filter object."""
required_ram = instance_type['memory_mb']
required_disk = instance_type['local_gb']
query = ['and',
['>=', '$compute.host_memory_free', required_ram],
['>=', '$compute.disk_available', required_disk]]
return json.dumps(query)
def _parse_string(self, string, host, hostinfo):
"""Strings prefixed with $ are capability lookups in the
form '$service.capability[.subcap*]'.
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
services = dict(compute=hostinfo.compute, network=hostinfo.network,
volume=hostinfo.volume)
service = services.get(path[0], None)
if not service:
return None
for item in path[1:]:
service = service.get(item, None)
if not service:
return None
return service
def _process_filter(self, query, host, hostinfo):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host, hostinfo)
elif isinstance(arg, basestring):
arg = self._parse_string(arg, host, hostinfo)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def filter_hosts(self, host_list, query, options):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
expanded = json.loads(query)
filtered_hosts = []
for host, hostinfo in host_list:
if not hostinfo:
continue
if hostinfo.compute and not hostinfo.compute.get("enabled", True):
# Host is disabled
continue
result = self._process_filter(expanded, host, hostinfo)
if isinstance(
|
result, list):
# If any succeeded, include the host
result = any(result)
if result:
filtered_hosts.app
|
end((host, hostinfo))
return filtered_hosts
|
oesteban/niworkflows
|
niworkflows/utils/tests/test_misc.py
|
Python
|
bsd-3-clause
| 2,459
| 0.001627
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Test misc module."""
import os
import shutil
from unittest import mock
import pytest
from ..misc import pass_dummy_scans, check_valid_fs_license
@pytest.mark.parametrize(
"algo_dummy_scans,dummy_scans,expected_out", [(2, 1, 1), (2, None, 2), (2, 0, 0)]
)
def test_pass_dummy_scans(algo_dummy_scans, dummy_scans, expected_out):
"""Check dummy scans passing."""
skip_vols = pass_dummy_scans(algo_dummy_scans, dummy_scans)
assert skip_vols == expected_out
@pytest.mark.parametrize(
"stdout,rc,valid",
[
(b"Successful command", 0, True),
(b"", 0, True),
(b"ERROR: FreeSurfer license file /made/up/license.txt not found", 1, False),
(b"Failed output", 1, False),
(b"ERROR: Systems running GNU glibc version greater than 2.15", 0, False),
],
)
def test_fs_license_check(stdout, rc, valid):
with mock.patch("subprocess.run") as mocked_run:
mocked_run.return_value.stdout = stdout
|
mocked_run.return_value.returncode = rc
assert check_valid_fs_license() is valid
@pytest.mark.skipif(not os.getenv("FS_LICENSE"), reason="No FS license found")
def test_fs_license_check2(monkeypatch):
"""Execute the canary itself."""
assert check_valid_fs_license() is True
@pytest.mark.skipif(shutil.which('mri_convert') is None, reason="FreeSurfer not installed")
def test_fs_license_check3(monkeypatch):
with monkeypatch.context() as m:
m.delenv("FS_LIC
|
ENSE", raising=False)
m.delenv("FREESURFER_HOME", raising=False)
assert check_valid_fs_license() is False
|
wfxiang08/ansible
|
lib/ansible/plugins/callback/__init__.py
|
Python
|
gpl-3.0
| 6,672
| 0.002998
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
|
Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import json
from ansible import constants as C
__all__ = ["CallbackBase"]
class CallbackBase:
'''
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
'''
# FIXME: the list of functions here needs to be updated once we have
# finalized th
|
e list of callback methods used in the default callback
def __init__(self, display):
self._display = display
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'with no defined name')
ctype = getattr(self, 'CALLBACK_TYPE', 'unknwon')
version = getattr(self, 'CALLBACK_VERSION', 'unknwon')
self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version))
def _dump_results(self, result, indent=4, sort_keys=True):
return json.dumps(result, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
def _handle_warnings(self, res):
''' display warnings, if enabled and any exist in the result '''
if C.COMMAND_WARNINGS and 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
####### V2 METHODS, by default they call v1 counterparts if possible ######
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
host = result._host.get_name()
#FIXME, get item to pass through
item = None
self.runner_on_skipped(host, item)
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
def v2_runner_on_no_hosts(self, task):
self.runner_on_no_hosts()
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
#FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_runner_on_file_diff(self, result, diff):
pass #no v1 correspondance
def v2_playbook_on_start(self):
self.playbook_on_start()
def v2_playbook_on_notify(self, result, handler):
host = result._host.get_name()
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task, is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_handler_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default)
def v2_playbook_on_setup(self):
self.playbook_on_setup()
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
|
tensorflow/tensorflow
|
tensorflow/core/platform/ram_file_system_test.py
|
Python
|
apache-2.0
| 5,699
| 0.006492
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ram_file_system.h."""
import numpy as np
from tensorflow.python.eager import def_function
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training import adam
from tensorflow.python.training import training_util
class RamFilesystemTest(test_util.TensorFlowTestCase):
def test_create_and_delete_directory(self):
file_io.create_dir_v2('ram://testdirectory')
file_io.delete_recursively_v2('ram://testdirectory')
def test_create_and_delete_directory_tree_recursive(self):
file_io.create_dir_v2('ram://testdirectory')
file_io.create_dir_v2('ram://testdirectory/su
|
bdir1')
file_io.create_dir_v2('ram://testdirectory/subdir2')
file_io.create_dir_v2('ram://testdirectory/subdir1/subdir3')
with gfile.GFile('ram://testdirectory/subdir1/subdir3/a.txt', 'w') as f:
f.write('Hello, world.')
file_io.delete_recursively_v2('ram://testdirectory')
self.assertEqual(gfile.Glob('ram://testdirectory/*'), [])
def test_write_file(self):
with gfile.GFile('ram://a.txt', 'w') as f:
f.write('Hello, world.')
|
f.write('Hello, world.')
with gfile.GFile('ram://a.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_append_file_with_seek(self):
with gfile.GFile('ram://c.txt', 'w') as f:
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'w+') as f:
f.seek(offset=0, whence=2)
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_list_dir(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['%d.txt' % i for i in range(10)]
self.assertEqual(gfile.ListDirectory('ram://a/b/'), matches)
def test_glob(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['ram://a/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://a/b/*'), matches)
matches = []
self.assertEqual(gfile.Glob('ram://b/b/*'), matches)
matches = ['ram://c/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://c/b/*'), matches)
def test_file_exists(self):
with gfile.GFile('ram://exists/a/b/c.txt', 'w') as f:
f.write('')
self.assertTrue(gfile.Exists('ram://exists/a'))
self.assertTrue(gfile.Exists('ram://exists/a/b'))
self.assertTrue(gfile.Exists('ram://exists/a/b/c.txt'))
self.assertFalse(gfile.Exists('ram://exists/b'))
self.assertFalse(gfile.Exists('ram://exists/a/c'))
self.assertFalse(gfile.Exists('ram://exists/a/b/k'))
def test_estimator(self):
def model_fn(features, labels, mode, params):
del params
x = core_layers.dense(features, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
y = core_layers.dense(x, 1)
loss = losses.mean_squared_error(labels, y)
opt = adam.AdamOptimizer(learning_rate=0.1)
train_op = opt.minimize(
loss, global_step=training_util.get_or_create_global_step())
return EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
def input_fn():
batch_size = 128
return (constant_op.constant(np.random.randn(batch_size, 100),
dtype=dtypes.float32),
constant_op.constant(np.random.randn(batch_size, 1),
dtype=dtypes.float32))
config = RunConfig(
model_dir='ram://estimator-0/', save_checkpoints_steps=1)
estimator = Estimator(config=config, model_fn=model_fn)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
def test_savedmodel(self):
class MyModule(module.Module):
@def_function.function(input_signature=[])
def foo(self):
return constant_op.constant([1])
saved_model.save(MyModule(), 'ram://my_module')
loaded = saved_model.load('ram://my_module')
self.assertAllEqual(loaded.foo(), [1])
if __name__ == '__main__':
test.main()
|
cookbrite/ebs-deploy
|
ebs_deploy/__init__.py
|
Python
|
mit
| 26,434
| 0.002951
|
from boto.exception import S3ResponseError, BotoServerError
from boto.s3.connection import S3Connection
from boto.ec2.autoscale import AutoScaleConnection
from boto.beanstalk import connect_to_region
from boto.s3.key import Key
from datetime import datetime
from time import time, sleep
import zipfile
import os
import subprocess
import sys
import yaml
import re
import logging
logger = None
LOGGER_NAME = 'ebs_deploy'
MAX_RED_SAMPLES = 20
def utcnow_isoformat():
return datetime.utcnow().isoformat() + 'Z'
def out(message):
"""
print alias
"""
if logger:
logger.info("%s", message)
else:
sys.stdout.write(message + "\n")
sys.stdout.flush()
def init_logging(use_logging=False):
global logger
if use_logging:
logger = logging.getLogger(LOGGER_NAME)
def configure_logging(level, handlers):
l = logging.getLogger(LOGGER_NAME)
l.setLevel(level)
for h in l.handlers[:]:
l.removeHandler(h)
for h in handlers:
l.addHandler(h)
return l
def merge_dict(dict1, dict2):
ret = dict(dict2)
for key, val in dict1.items():
val2 = dict2.get(key)
if val2 is None:
ret[key] = val
elif isinstance(val, dict) and isinstance(val2, dict):
ret[key] = merge_dict(val, val2)
elif isinstance(val, (list,)) and isinstance(val2, (list,)):
ret[key] = val + val2
else:
ret[key] = val2
return ret
def get(vals, key, default_val=None):
"""
Returns a dictionary value
"""
val = vals
for part in key.split('.'):
if isinstance(val, dict):
val = val.get(part, None)
if val is None:
return default_val
else:
return default_val
return val
def parse_option_settings(option_settings):
"""
Parses option_settings as they are defined in the configuration file
"""
ret = []
for namespace, params in option_settings.items():
for key, value in params.items():
ret.append((namespace, key, value))
return ret
def override_scaling(option_settings, min_size, max_size):
""" takes the merged option_settings and injects custom min/max autoscaling sizes """
match_namespace = "aws:autoscaling:asg"
match_keys = {"MinSize": min_size, "MaxSize": max_size}
copied_option_settings = []
for (namespace, key, value) in option_settings:
new_option = (namespace, key, value)
if match_namespace == namespace and key in match_keys:
new_option = (namespace, key, match_keys[key])
copied_option_settings.append(new_option)
return copied_option_settings
def parse_env_config(config, env_name):
"""
Parses an environment config
"""
all_env = get(config, 'app.all_environments', {})
env = get(config, 'app.environments.' + str(env_name), {})
return merge_dict(all_env, env)
def upload_application_archive(helper, env_config, archive=None, directory=None, version_label=None):
if version_label is None:
version_label = datetime.now().strftime('%Y%m%d_%H%M%S')
else:
# don't attempt to create an application version which already exists
existing_version_labels = [version['VersionLabel'] for version in helper.get_versions()]
if version_label in existing_version_labels:
return version_label
archive_file_name = None
if archive:
archive_file_name = os.path.basename(archive)
archive_files = get(env_config, 'archive.files', [])
# generate the archive externally
if get(env_config, 'archive.generate'):
cmd = get(env_config, 'archive.generate.cmd')
output_file = get(env_config, 'archive.generate.output_file')
use_shell = get(env_config, 'archive.generate.use_shell', True)
exit_code = get(env_config, 'archive.generate.exit_code', 0)
if not cmd or not output_file:
raise Exception('Archive generation requires cmd and output_file at a minimum')
output_regex = None
try:
output_regex = re.compile(output_file)
except:
pass
result = subprocess.call(cmd, shell=use_shell)
if result != exit_code:
raise Exception('Generate command execited with code %s (expected %s)' % (result, exit_code))
if output_file and os.path.exists(output_file):
archive_file_name = os.path.basename(output_file)
else:
for root, dirs, files in os.walk(".", followlinks=True):
for f in files:
fullpath = os.path.join(root, f)
if fullpath.endswith(output_file):
archive = fullpath
archive_file_name = os.path.basename(fullpath)
break
elif output_regex and output_regex.match(fullpath):
archive = fullpath
archive_file_name = os.path.basename(fullpath)
break
if archive:
break
if not archive or not archive_file_name:
raise Exception('Unable to find expected output file matching: %s' % (output_file))
# create the archive
elif not archive:
if not directory:
directory = "."
includes = get(env_config, 'archive.includes', [])
excludes = get(env_config, 'archive.excludes', [])
def _predicate(f):
for exclude in excludes:
if re.match(exclude, f):
return False
if len(includes) > 0:
for include in includes:
if re.match(include, f):
return True
return False
return True
archive = create_archive(directory, str(version_label) + ".zip", config=archive_files, ignore_predicate=_predicate)
archive_file_name = str(version_label) + ".zip"
add_config_files_to_archive(directory, archive, config=archive_files)
helper.upload_archive(archive, archive_file_name)
helper.create_application_version(version_label, archive_file_name)
return version_label
def create_archive(directory, filename, config={}, ignore_predicate=None, ignored_files=['.git', '.svn']):
"""
|
Creates an archive from a directory and returns
the file that was created.
"""
with zipfile.ZipFile(filename, 'w', compression=zipfile.ZIP_DEFLATED) as zip_file:
root_len = len(os.path.abspath(directory))
# create it
out("Creating archive: " + str(filename))
for root, dirs, files in os.walk(directory, followlinks=True):
archive_root = os.path.abspath(root)[root_len + 1:]
for f in files:
fullpath = os.pat
|
h.join(root, f)
archive_name = os.path.join(archive_root, f)
# ignore the file we're creating
if filename in fullpath:
continue
# ignored files
if ignored_files is not None:
for name in ignored_files:
if fullpath.endswith(name):
out("Skipping: " + str(name))
continue
# do predicate
if ignore_predicate is not None:
if not ignore_predicate(archive_name):
out("Skipping: " + str(archive_name))
continue
out("Adding: " + str(archive_name))
zip_file.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
return filename
def add_config_files_to_archive(directory, filename, config={}):
"""
Adds configuration files to an existing archive
"""
with zipfile.ZipFile(filename, 'a') as zip_file:
for conf in config:
for conf, tree in conf.items():
if tree.has_key('yaml'):
content = yaml.dump(tree['yaml'], default_flow_style=False)
else:
content = tree.get('content',
|
maybelinot/bellring
|
bellring/_version.py
|
Python
|
gpl-3.0
| 305
| 0
|
#!/usr/bin/env python
|
# -*- coding: utf-8 -*-
# @Author: Eduard Trott
# @Date: 2015-09-15 08:57:35
# @Email: etrott@redhat.com
# @Last modified by: etrott
# @Last Modified time: 201
|
5-12-17 16:53:17
version_info = ('0', '0', '1')
__version__ = '.'.join(version_info[0:3]) # + '-' + version_info[3]
|
amwelch/a10sdk-python
|
a10sdk/core/cgnv6/cgnv6_ds_lite_port_reservation.py
|
Python
|
apache-2.0
| 2,890
| 0.010035
|
from a10sdk.common.A10BaseClass import A10BaseClass
class PortReservation(A10BaseClass):
"""Class Description::
DS-Lite Static Port Reservation.
Class port-reservation supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for
|
this module.`
:param nat_end_port: {"description": "NAT End Port
|
", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param inside: {"optional": false, "type": "string", "description": "Inside User Address and Port Range (DS-Lite Inside User's Tunnel Source IPv6 Address)", "format": "ipv6-address"}
:param tunnel_dest_address: {"optional": false, "type": "string", "description": "DS-Lite Inside User's Tunnel Destination IPv6 Address", "format": "ipv6-address"}
:param inside_start_port: {"description": "Inside Start Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param nat: {"optional": false, "type": "string", "description": "NAT Port Range (NAT IP address)", "format": "ipv4-address"}
:param inside_end_port: {"description": "Inside End Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param nat_start_port: {"description": "NAT Start Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param inside_addr: {"optional": false, "type": "string", "description": "Inside User IP address", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ds-lite/port-reservation/{inside}+{tunnel_dest_address}+{inside_addr}+{inside_start_port}+{inside_end_port}+{nat}+{nat_start_port}+{nat_end_port}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "inside","tunnel_dest_address","inside_addr","inside_start_port","inside_end_port","nat","nat_start_port","nat_end_port"]
self.b_key = "port-reservation"
self.a10_url="/axapi/v3/cgnv6/ds-lite/port-reservation/{inside}+{tunnel_dest_address}+{inside_addr}+{inside_start_port}+{inside_end_port}+{nat}+{nat_start_port}+{nat_end_port}"
self.DeviceProxy = ""
self.nat_end_port = ""
self.uuid = ""
self.inside = ""
self.tunnel_dest_address = ""
self.inside_start_port = ""
self.nat = ""
self.inside_end_port = ""
self.nat_start_port = ""
self.inside_addr = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
dhuang/incubator-airflow
|
airflow/providers/tableau/example_dags/example_tableau_refresh_workbook.py
|
Python
|
apache-2.0
| 2,507
| 0.002792
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an example dag that performs two refresh operations on a Tableau Workbook aka Extract. The first one
waits until it succeeds. The second does not wait since this is an asynchronous operation and we don't know
when the operation actually finishes. That's why we have another task that checks only that.
"""
fr
|
om datetime import timedelta
from airflow import DAG
from airflow.providers.tableau.operators.tableau_refresh_workbook import TableauRefreshWorkbookOperator
from airflow.providers.tableau.sensors.tableau_job_status import TableauJobStatusSensor
from airflow.utils.dates import days_ago
with DAG(
dag_id='exa
|
mple_tableau_refresh_workbook',
dagrun_timeout=timedelta(hours=2),
schedule_interval=None,
start_date=days_ago(2),
tags=['example'],
) as dag:
# Refreshes a workbook and waits until it succeeds.
task_refresh_workbook_blocking = TableauRefreshWorkbookOperator(
site_id='my_site',
workbook_name='MyWorkbook',
blocking=True,
task_id='refresh_tableau_workbook_blocking',
)
# Refreshes a workbook and does not wait until it succeeds.
task_refresh_workbook_non_blocking = TableauRefreshWorkbookOperator(
site_id='my_site',
workbook_name='MyWorkbook',
blocking=False,
task_id='refresh_tableau_workbook_non_blocking',
)
# The following task queries the status of the workbook refresh job until it succeeds.
task_check_job_status = TableauJobStatusSensor(
site_id='my_site',
job_id="{{ ti.xcom_pull(task_ids='refresh_tableau_workbook_non_blocking') }}",
task_id='check_tableau_job_status',
)
task_refresh_workbook_non_blocking >> task_check_job_status
|
gochist/horizon
|
openstack_dashboard/dashboards/project/access_and_security/keypairs/views.py
|
Python
|
apache-2.0
| 3,012
| 0.000332
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing keypairs.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from django.template.defaultfilters import slugify # noqa
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView # noqa
from django.view
|
s.generic import View # noqa
from horizon import exceptions
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.keypairs \
|
import forms as project_forms
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateKeypair
template_name = 'project/access_and_security/keypairs/create.html'
success_url = 'horizon:project:access_and_security:keypairs:download'
def get_success_url(self):
return reverse(self.success_url,
kwargs={"keypair_name": self.request.POST['name']})
class ImportView(forms.ModalFormView):
form_class = project_forms.ImportKeypair
template_name = 'project/access_and_security/keypairs/import.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
def get_object_id(self, keypair):
return keypair.name
class DownloadView(TemplateView):
def get_context_data(self, keypair_name=None):
return {'keypair_name': keypair_name}
template_name = 'project/access_and_security/keypairs/download.html'
class GenerateView(View):
def get(self, request, keypair_name=None):
try:
keypair = api.nova.keypair_create(request, keypair_name)
except Exception:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to create key pair: %(exc)s'),
redirect=redirect)
response = http.HttpResponse(content_type='application/binary')
response['Content-Disposition'] = \
'attachment; filename=%s.pem' % slugify(keypair.name)
response.write(keypair.private_key)
response['Content-Length'] = str(len(response.content))
return response
|
robwarm/gpaw-symm
|
gpaw/test/test.py
|
Python
|
gpl-3.0
| 5,364
| 0.001119
|
import os
import gc
import platform
import sys
import time
import tempfile
import warnings
from optparse import OptionParser
import gpaw.mpi as mpi
from gpaw.hooks import hooks
from gpaw import debug
from gpaw.version import version
def run():
description = ('Run the GPAW test suite. The test suite can be run in '
'parallel with MPI through gpaw-python. The test suite '
'supports 1, 2, 4 or 8 CPUs although some tests are '
'skipped for some parallelizations. If no TESTs are '
'given, run all tests supporting the parallelization.')
parser = OptionParser(usage='%prog [OPTION...] [TEST...]',
description=description,
version='%%prog %s' % version)
parser.add_option('-x', '--exclude',
type='string', default=None,
help='Exclude tests (comma separated list of tests).',
metavar='test1.py,test2.py,...')
parser.add_option('-f', '--run-failed-tests-only',
action='store_true',
help='Run failed tests only.')
parser.add_option('--from', metavar='TESTFILE', dest='from_test',
help='Run remaining tests, starting from TESTFILE')
parser.add_option('--after', metavar='TESTFILE', dest='after_test',
help='Run remaining tests, starting after TESTFILE')
parser.add_option('--range',
type='string', default=None,
help='Run tests in range test_i.py to test_j.py '
'(inclusive)',
metavar='test_i.py,test_j.py')
parser.add_option('-j', '--jobs', type='int', default=1,
help='Run JOBS threads. Each test will be executed '
'in serial by one thread. This option cannot be used '
'for parallelization together with MPI.')
parser.add_option('--reverse', action='store_true',
help=('Run tests in reverse order (less overhead with '
'multiple jobs)'))
parser.add_option('-k', '--keep-temp-dir', action='store_true',
dest='keep_tmpdir', help='Do not delete temporary files.')
parser.add_option('-d', '--directory', help='Run test in this directory')
parser.add_option('-s', '--show-output', action='store_true',
help='Show standard output from tests.')
opt, tests = parser.parse_args()
if len(tests) == 0:
from gpaw.test import tests
if opt.reverse:
tests.reverse()
if opt.run_failed_tests_only:
tests = [line.strip() for line in open('failed-tests.txt')]
exclude = []
if opt.exclude is not None:
exclude += opt.exclude.split(',')
if opt.from_test:
fromindex = tests.index(opt.from_test)
tests = tests[fromindex:]
if opt.after_test:
index = tests.index(opt.after_test) + 1
tests = tests[index:]
if opt.range:
# default start(stop) index is first(last) test
indices = opt.range.split(',')
try:
start_index = tests.index(indices[0])
except ValueError:
start_index = 0
try:
stop_index = tests.index(indices[1]) + 1
except ValueError:
stop_index = len(tests)
tests = tests[start_index:stop_index]
if opt.jobs > 1:
exclude.append('maxrss.py')
for test in exclude:
if test in tests:
tests.remove(test)
from gpaw.test import TestRunner
if mpi.world.size > 8:
if mpi.rank == 0:
message = '!!!!!!!\n' \
'GPAW regression test suite was not designed to run on more\n' \
'than 8 MPI tasks. Re-run test suite using 1, 2, 4 or 8 MPI\n' \
'tasks instead.'
warnings.warn(message, RuntimeWarning)
old_hooks = hooks.copy()
hooks.clear()
if mpi.rank == 0:
if opt.directory is None:
tmpdir = tempfile.mkdtemp(prefix='gpaw-test-')
else:
tmpdir = opt.directory
if os.path.isdir(tmpdir):
opt.keep_tmpdir = True
else:
os.mkdir(tmpdir)
else:
tmpdir = None
tmpdir = mpi.broadcast_string(tmpdir)
cwd = os.getcwd()
os.chdir(tmpdir)
operating_system = platform.system() + ' ' + platform.machine()
operating_system += ' ' + ' '.join(platform.dist())
python = platform.python_version() + ' ' + platform.python_compiler()
python += '
|
' + ' '.join(platform.architecture())
if mpi.rank == 0:
print('python %s on %s' % (python, operating_system))
print('Running tests in %s' % tmpdir)
print('Jobs: %d, Cores: %d, debug-mode: %r' % (opt.jobs, mpi.size,
debug))
failed = TestRunner(tests, jobs=opt.jobs, show_output=opt.show_output).run()
os.chdir(cwd)
if mpi.rank == 0:
if len(f
|
ailed) > 0:
open('failed-tests.txt', 'w').write('\n'.join(failed) + '\n')
elif not opt.keep_tmpdir:
os.system('rm -rf ' + tmpdir)
hooks.update(old_hooks.items())
return len(failed)
if __name__ == '__main__':
run()
|
fedarko/CAIP
|
Code/LevelReader.py
|
Python
|
gpl-3.0
| 1,886
| 0.003181
|
"""
Copyright 2011 Marcus Fedarko
Contact Email: marcus.fedarko@gmail.com
This file is part of CAIP.
CAIP is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any
|
later version.
CAIP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERC
|
HANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CAIP. If not, see <http://www.gnu.org/licenses/>.
====
LevelReader.py
-----
class LevelReader: reads through a level and
creates cells, which are added to a sprite group
in SpriteManager.
"""
import Cell
import Levels
from Config import *
import pygame
from pygame.locals import *
class LevelReader(object):
"""Reads through a given level list and sets up the cells
in the level."""
def __init__(self, levelnum, sprite_manager):
"""Sets some attributes of the LevelReader."""
self.levelnum = levelnum
self.sprite_manager = sprite_manager
# Dict relating topleft of cells to the cell.
# Used in CellManager.
self.topleftToCell = {}
def read(self):
"""Reads through Levels.level1 and creates cells."""
x = 0
y = 0
for string in Levels.level1:
for char in string:
if char == "#":
c = Cell.Cell((x, y), True)
else:
c = Cell.Cell((x, y), False)
self.sprite_manager.cells.add(c)
self.topleftToCell[(x, y)] = c
x += TILESIZE[0]
y += TILESIZE[1]
x = 0
|
zleap/python-qrcode
|
qrreadfromfile.py
|
Python
|
gpl-3.0
| 1,039
| 0.008662
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# qrgen1.py
#
# Copyright 2013 psutton <zleap@zleap.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTAB
|
ILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from
|
qrtools import QR
myCode = QR(filename=u"/home/psutton/Documents/Python/qrcodes/qrcode.png")
if myCode.decode():
print myCode.data
print myCode.data_type
print myCode.data_to_string()
|
JamesPavek/payroll
|
timesheet.py
|
Python
|
mit
| 2,615
| 0.047419
|
import xml.etree.ElementTree as ET
import datetime
import sys
import openpyxl
import re
import dateutil
def main():
print 'Number of arguments:', len(sys.argv), 'arguments.' #DEBUG
print 'Argument List:', str(sys.argv) #DEBUG
Payrate = raw_input("Enter your pay rate: ") #DEBUG
sNumber = raw_input("Enter 900#: ") #DEBUG
xml = ET.parse("xml.xml") #DEBUG
root = xml.getroot()
root = root[3][0] #Go directly to worksheet/table
sheet = openpyxl.load_workbook(sys.argv[1], data_only=True).active
writeName(root)
writeEmployeeNum(root)
writeStudentNum(sNumber)
writePayRate(payRate)
#At this point all that is left are the times
for x in root.findall(".//*"):
if x.text != None:
dates.append(x.text)
for x in char_range('G','Z'):
writeTimes(x + '17' , dates)
def writeTimes (position, dateList):
match = next(x[0] for x in enumerate(dateList) if x[1] == sheet[position].value)
jobCode = dateList[num+4]
if jobCode == 900:
raise error("Cannot start day with 900 break")
else:
sheet[date] = roundTime(
def roundTime(time):
date = dateutil.parser.parse(x)
if date.minute <= 7
return date.replace(minute=0)
else if date.minute >= 8 and date.minute <= 22:
return date.replace(minute=15)
else if date.minute >= 23 and date.minute <= 37:
return date.replace(minute=30)
else if date.minute >= 38 and date.minute <= 52:
return date.replace(minute=45)
else if date.minute >= 53:
if date.hour == 23:
raise error("Worked overnight or did not clock out")
else:
date += datetime.timedelta(minutes= (60-date.minute))
#Rounds time to next hour by adding minutes until 60
return date
else:
raise error("Something went wrong in roundTime")
def writeName(tree):
name = tree[-1][4][0].text
sheet['I8'] = name
def writeEmployeeNum(tree):
num = root[2][0]
|
[0].text
sheet['4D'] = re.match('.*?([0-9]+)$', num).group(1)
def writeStudentNum(num):
sheet['8S']=num
def writePayRate(num):
sheet['6k']=num
def char_range(c1, c2):
"""Generates the characters from `c1` to `c2
|
`, inclusive."""
"""Courtesy http://stackoverflow.com/questions/7001144/range-over-character-in-python"""
for c in xrange(ord(c1), ord(c2)+1):
yield chr(c)
main()
|
hasadna/OpenTrain
|
webserver/opentrain/timetable/migrations/0013_auto__add_field_tttrip_shape.py
|
Python
|
bsd-3-clause
| 2,893
| 0.005876
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TtTrip.shape'
db.add_column(u'timetable_tttrip', 'shape',
self.gf('django.db.models.fields.rel
|
ated.ForeignKey')(to=orm['timetable.TtShape'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TtTrip.shape'
db.delete_column(u'timetable_tttrip', 'shape_id')
models = {
u'timetable.ttshape': {
'Meta': {'object_name': 'TtShape'},
'gtfs_shape_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10
|
0', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.TextField', [], {})
},
u'timetable.ttstop': {
'Meta': {'object_name': 'TtStop'},
'gtfs_stop_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop_lat': ('django.db.models.fields.FloatField', [], {}),
'stop_lon': ('django.db.models.fields.FloatField', [], {}),
'stop_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'stop_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'timetable.ttstoptime': {
'Meta': {'object_name': 'TtStopTime'},
'exp_arrival': ('django.db.models.fields.DateTimeField', [], {}),
'exp_departure': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtStop']"}),
'stop_sequence': ('django.db.models.fields.IntegerField', [], {}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtTrip']"})
},
u'timetable.tttrip': {
'Meta': {'object_name': 'TtTrip'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gtfs_trip_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtShape']", 'null': 'True'})
}
}
complete_apps = ['timetable']
|
HKUST-SING/tensorflow
|
tensorflow/python/debug/wrappers/local_cli_wrapper.py
|
Python
|
apache-2.0
| 20,594
| 0.004079
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger Wrapper Session Consisting of a Local Curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shutil
import sys
import tempfile
# Google-internal import(s).
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import stepper_cli
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
_DUMP_ROOT_PREFIX = "tfdbg_"
class LocalCLIDebugWrapperSession(framework.BaseDebugWrapperSession):
"""Concrete subclass of BaseDebugWrapperSession implementing a local CLI.
This class has all the methods that a `session.Session` object has, in order
to support debugging with minimal code changes. Invoking its `run()` method
will launch the command-line interface (CLI) of tfdbg.
"""
def __init__(self, sess, dump_root=None, log_usage=True, ui_type="curses"):
"""Constructor of LocalCLIDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an
|
empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards.
log_usage: (`bool`) whether the usage of this class is to be logged.
ui_type: (`str`) requested UI type. Currently supported:
(curses | readline)
Raises:
ValueError: If dump_root is an existing and non-empty directory or if
|
dump_root is a file.
"""
if log_usage:
pass # No logging for open-source.
framework.BaseDebugWrapperSession.__init__(self, sess)
if dump_root is None:
self._dump_root = tempfile.mktemp(prefix=_DUMP_ROOT_PREFIX)
else:
if os.path.isfile(dump_root):
raise ValueError("dump_root path points to a file: %s" % dump_root)
elif os.path.isdir(dump_root) and os.listdir(dump_root):
raise ValueError("dump_root path points to a non-empty directory: %s" %
dump_root)
self._dump_root = dump_root
self._initialize_argparsers()
# Registered tensor filters.
self._tensor_filters = {}
# Below are the state variables of this wrapper object.
# _active_tensor_filter: what (if any) tensor filter is in effect. If such
# a filter is in effect, this object will call run() method of the
# underlying TensorFlow Session object until the filter passes. This is
# activated by the "-f" flag of the "run" command.
# _run_through_times: keeps track of how many times the wrapper needs to
# run through without stopping at the run-end CLI. It is activated by the
# "-t" option of the "run" command.
# _skip_debug: keeps track of whether the current run should be executed
# without debugging. It is activated by the "-n" option of the "run"
# command.
#
# _run_start_response: keeps track what OnRunStartResponse the wrapper
# should return at the next run-start callback. If this information is
# unavailable (i.e., is None), the run-start CLI will be launched to ask
# the user. This is the case, e.g., right before the first run starts.
self._active_tensor_filter = None
self._run_through_times = 1
self._skip_debug = False
self._run_start_response = None
self._is_run_start = True
self._ui_type = ui_type
def _initialize_argparsers(self):
self._argparsers = {}
ap = argparse.ArgumentParser(
description="Run through, with or without debug tensor watching.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-t",
"--times",
dest="times",
type=int,
default=1,
help="How many Session.run() calls to proceed with.")
ap.add_argument(
"-n",
"--no_debug",
dest="no_debug",
action="store_true",
help="Run through without debug tensor watching.")
ap.add_argument(
"-f",
"--till_filter_pass",
dest="till_filter_pass",
type=str,
default="",
help="Run until a tensor in the graph passes the specified filter.")
self._argparsers["run"] = ap
ap = argparse.ArgumentParser(
description="Invoke stepper (cont, step, breakpoint, etc.)",
usage=argparse.SUPPRESS)
self._argparsers["invoke_stepper"] = ap
ap = argparse.ArgumentParser(
description="Display information about this Session.run() call.",
usage=argparse.SUPPRESS)
self._argparsers["run_info"] = ap
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
Args:
filter_name: (`str`) name of the filter.
tensor_filter: (`callable`) the filter callable. See the doc string of
`DebugDumpDir.find()` for more details about its signature.
"""
self._tensor_filters[filter_name] = tensor_filter
def on_session_init(self, request):
"""Overrides on-session-init callback.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
"""
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Overrides on-run-start callback.
Invoke the CLI to let user choose what action to take:
`run` / `invoke_stepper`.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
Raises:
RuntimeError: If user chooses to prematurely exit the debugger.
"""
self._is_run_start = True
self._update_run_calls_state(request.run_call_count, request.fetches,
request.feed_dict)
if self._active_tensor_filter:
# If we are running till a filter passes, we just need to keep running
# with the DEBUG_RUN option.
return framework.OnRunStartResponse(framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls())
if self._run_call_count > 1 and not self._skip_debug:
if self._run_through_times > 0:
# Just run through without debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.NON_DEBUG_RUN, [])
elif self._run_through_times == 0:
# It is the run at which the run-end CLI will be launched: activate
# debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls())
if self._run_start_response is None:
self._prep_cli_for_run_start()
self._run_start_response = self._launch_cli()
if self._run_through_times > 1:
self._run_through_times -= 1
if self._run_start_response == debugger_cli_common.EXPLICIT_USER_EXIT:
# Explicit user "exit" command leads to sys.exit(1).
print(
"Note: user exited from debugger CLI: Calling sys.exit(1).",
file=sys.stderr)
sys.exit(1)
return self._run_start_response
def _prep_cli_for_run_start(self):
"""Prepare (but not launch) the CLI for run-start."""
self._run_cl
|
torbjoernk/pySDC
|
examples/spiraling_particle/HookClass.py
|
Python
|
bsd-2-clause
| 1,411
| 0.008505
|
from __future__ import division
from pySDC.Hooks import hooks
from pySDC.Stats import stats
import matplotlib.pyplot as plt
import numpy as np
class particles_output(hooks):
def __init__(self):
"""
Initialization of particles output
"""
super(particles_output,self).__init__()
# add figure object for further use
fig = plt.figure()
self.ax = fig.add_subplot(111)
self.ax.set_xlim([-1.5,1.5])
self.ax.set_ylim([-1.5,1.5])
plt.ion()
self.sframe = None
def dump_step(self,status):
"""
Overwrite standard dump per step
Args:
status: status object per step
"""
super(particles_output,self).dump_step(status)
# some abbreviations
L = self.level
u
|
= L.uend
R = np.linalg.norm(u.pos.values)
H = 1/2*np.dot(u.vel.values,u.vel.values)+0.02/R
stats.add_to_stats(step=status.step, time=status.time, type='energy', value=H)
oldcol = self.sframe
# self.sframe = self.ax.scatter(L.uend.pos.values[0],L.uend.pos.values[1],L.uend.pos.values[2])
self.sframe = self.ax.scatter(L.uend.pos.values[0],L.uend.pos.values[1])
# Remove old line collection before drawing
if oldcol is not None:
self.
|
ax.collections.remove(oldcol)
plt.pause(0.00001)
return None
|
flashingpumpkin/filerotate
|
filerotate/__version__.py
|
Python
|
mit
| 21
| 0.047619
|
__version__ = "1.0
|
.3"
|
|
flyapen/UgFlu
|
flumotion/test/test_admin_multi.py
|
Python
|
gpl-2.0
| 3,725
| 0
|
# -*- Mode: Python; test-case-name: flumotion.test.test_admin_multi -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumot
|
ion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file
|
may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
from flumotion.common import testsuite
from twisted.internet import defer
from flumotion.admin import multi
from flumotion.common import connection
from flumotion.twisted import pb
class MultiAdminTest(testsuite.TestCaseWithManager):
def testConstructor(self):
model = multi.MultiAdminModel()
def testConnectSuccess(self):
def connected(_):
self.assertEqual(len(self.vishnu.adminHeaven.avatars),
1)
return m.removeManager(str(self.connectionInfo))
m = multi.MultiAdminModel()
d = m.addManager(self.connectionInfo, writeConnection=False)
d.addCallback(connected)
return d
def testConnectFailure(self):
def connected(_):
self.fail('should not have connected')
def failure(f):
# ok!
self.assertEqual(len(self.vishnu.adminHeaven.avatars), 0)
self.assertEqual(m.admins, {})
self.assertEqual(m._reconnectHandlerIds, {})
m = multi.MultiAdminModel()
i = connection.PBConnectionInfo(self.connectionInfo.host,
self.connectionInfo.port,
self.connectionInfo.use_ssl,
pb.Authenticator(username='user',
password='pest'))
d = m.addManager(i, writeConnection=False)
d.addCallbacks(connected, failure)
return d
def testReconnect(self):
class Listener:
def __init__(self):
self.disconnectDeferred = defer.Deferred()
self.reconnectDeferred = defer.Deferred()
def model_addPlanet(self, admin, planet):
self.reconnectDeferred.callback(admin)
self.reconnectDeferred = None
def model_removePlanet(self, admin, planet):
self.disconnectDeferred.callback(admin)
self.disconnectDeferred = None
Listener = Listener()
def connected(_):
self.assertEqual(len(self.vishnu.adminHeaven.avatars),
1)
a = m.admins[str(self.connectionInfo)]
m.addListener(Listener)
a.clientFactory.disconnect()
return Listener.disconnectDeferred
def disconnected(_):
return Listener.reconnectDeferred
def reconnected(_):
m.removeListener(Listener)
return m.removeManager(str(self.connectionInfo))
m = multi.MultiAdminModel()
d = m.addManager(self.connectionInfo, writeConnection=False)
d.addCallback(connected)
d.addCallback(disconnected)
d.addCallback(reconnected)
return d
|
jfmorcillo/mss
|
mss/agent/__init__.py
|
Python
|
gpl-3.0
| 844
| 0
|
# -*- coding: UTF-8 -*-
#
# (c) 2010 Mandriva, http://www.mandriva.com/
#
# This file is part of Mandriva Server Setup
#
# MSS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2
|
of the License, or
# (at your option) any later version.
#
# MSS is distributed in the hope that it will be useful,
# but WITHOUT ANY W
|
ARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MSS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from time import time
START_TIME = time()
|
codenote/chromium-test
|
tools/telemetry/telemetry/core/tab.py
|
Python
|
bsd-3-clause
| 3,230
| 0.005263
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import web_contents
DEFAULT_TAB_TIMEOUT = 60
class Tab(web_contents.WebContents):
"""Represents a tab in the browser
The important parts of the Tab object are in the runtime and page objects.
E.g.:
# Navigates the tab to a given url.
tab.Navigate('http://www.google.com/')
# Evaluates 1+1 in the tab's JavaScript context.
tab.Evaluate('1+1')
"""
def __init__(self, inspector_backend):
super(Tab, self).__init__(inspector_backend)
def __del__(self):
super(Tab, self).__del__()
@property
def dom_stats(self):
"""A dictionary populated with measured DOM statistics.
Currently this dictionary contains:
{
'document_count': integer,
'node_count': integer,
'event_listener_count': integer
}
"""
dom_counters = self._inspector_backend.GetDOMStats(
timeout=DEFAULT_TAB_TIMEOUT)
assert (len(dom_counters) == 3 and
all([x in dom_counters for x in ['document_count', 'node_count',
'event_listener_count']]))
return dom_counters
def Activate(self):
"""Brings this tab to the foreground asynchronously.
Not all browsers or browser versions support this method.
Be sure to check browser.supports_tab_control.
Please note: this is asynchronous. There is a delay between this call
and the page's documentVisibilityState becoming 'visible', and yet more
delay until the actual tab is visible to the user. None of these delays
are included in this call."""
self._inspector_backend.Activate()
@property
def screenshot_supported(self):
"""True if the browser instance is capable of capturing screenshots"""
return self._inspector_backend.screenshot_supported
def Screenshot(self, timeout=DEFAULT_TAB_TIMEOUT):
"""Capture a screenshot of the window for rendering validation"""
return self._inspector_backend.Screenshot(timeout)
def PerformActionAndWaitForNavigate(
self, action_function, timeout=DEFAULT_TAB_TIMEOUT):
"""Executes action_function, and waits for the navigation to complete.
action_function must be a Python function that results in a navigation.
This function returns when the navigation is complete or when
the timeout has been exceeded.
"""
self._inspector_backend.PerformActionAndWaitForNavigate(
action_function, timeout)
def Navigate(self, url, script_to_evaluate_on_commit=None,
timeout=DEFAULT_TAB_TIMEOUT):
"""Navigates to url.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
"""
self._inspector_ba
|
ckend.Navigate(url, script_to_evaluate_on_commit, timeout)
def GetCookieByName(self, name, timeout=DEFAULT_TAB_TIMEOUT):
"
|
""Returns the value of the cookie by the given |name|."""
return self._inspector_backend.GetCookieByName(name, timeout)
|
afaheem88/rally
|
tests/unit/deployment/engines/test_devstack.py
|
Python
|
apache-2.0
| 4,402
| 0
|
# Copyright 2013: Mirantis
|
Inc.
# All Rights Reserved.
#
# Licensed
|
under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
import mock
from rally.deployment.engines import devstack
from tests.unit import test
SAMPLE_CONFIG = {
"type": "DevstackEngine",
"provider": {
"name": "ExistingServers",
"credentials": [{"user": "root", "host": "example.com"}],
},
"localrc": {
"ADMIN_PASSWORD": "secret",
},
}
DEVSTACK_REPO = "https://git.openstack.org/openstack-dev/devstack"
class DevstackEngineTestCase(test.TestCase):
def setUp(self):
super(DevstackEngineTestCase, self).setUp()
self.deployment = {
"uuid": "de641026-dbe3-4abe-844a-ffef930a600a",
"config": SAMPLE_CONFIG,
}
self.engine = devstack.DevstackEngine(self.deployment)
def test_invalid_config(self):
self.deployment = SAMPLE_CONFIG.copy()
self.deployment["config"] = {"type": 42}
engine = devstack.DevstackEngine(self.deployment)
self.assertRaises(jsonschema.ValidationError,
engine.validate)
def test_construct(self):
self.assertEqual(self.engine.localrc["ADMIN_PASSWORD"], "secret")
@mock.patch("rally.deployment.engines.devstack.open", create=True)
def test_prepare_server(self, mock_open):
mock_open.return_value = "fake_file"
server = mock.Mock()
server.password = "secret"
self.engine.prepare_server(server)
calls = [
mock.call("/bin/sh -e", stdin="fake_file"),
mock.call("chpasswd", stdin="rally:secret"),
]
self.assertEqual(calls, server.ssh.run.mock_calls)
filename = mock_open.mock_calls[0][1][0]
self.assertTrue(filename.endswith("rally/deployment/engines/"
"devstack/install.sh"))
self.assertEqual([mock.call(filename, "rb")], mock_open.mock_calls)
@mock.patch("rally.deployment.engine.Engine.get_provider")
@mock.patch("rally.deployment.engines.devstack.get_updated_server")
@mock.patch("rally.deployment.engines.devstack.get_script")
@mock.patch("rally.deployment.serverprovider.provider.Server")
@mock.patch("rally.deployment.engines.devstack.objects.Endpoint")
def test_deploy(self, mock_endpoint, mock_server, mock_get_script,
mock_get_updated_server, mock_engine_get_provider):
mock_engine_get_provider.return_value = fake_provider = (
mock.Mock()
)
server = mock.Mock(host="host")
mock_endpoint.return_value = "fake_endpoint"
mock_get_updated_server.return_value = ds_server = mock.Mock()
mock_get_script.return_value = "fake_script"
server.get_credentials.return_value = "fake_credentials"
fake_provider.create_servers.return_value = [server]
with mock.patch.object(self.engine, "deployment") as mock_deployment:
endpoints = self.engine.deploy()
self.assertEqual({"admin": "fake_endpoint"}, endpoints)
mock_endpoint.assert_called_once_with(
"http://host:5000/v2.0/", "admin", "secret", "admin", "admin")
mock_deployment.add_resource.assert_called_once_with(
info="fake_credentials",
provider_name="DevstackEngine",
type="credentials")
repo = "https://git.openstack.org/openstack-dev/devstack"
cmd = "/bin/sh -e -s %s master" % repo
server.ssh.run.assert_called_once_with(cmd, stdin="fake_script")
ds_calls = [
mock.call.ssh.run("cat > ~/devstack/localrc", stdin=mock.ANY),
mock.call.ssh.run("~/devstack/stack.sh")
]
self.assertEqual(ds_calls, ds_server.mock_calls)
localrc = ds_server.mock_calls[0][2]["stdin"]
self.assertIn("ADMIN_PASSWORD=secret", localrc)
|
Klaminite1337/Paragon
|
inc/VOCAL/translate.py
|
Python
|
mit
| 12,795
| 0.005784
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/abs/1412.2007
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import seq2seq_model
tf.app.flags.DEFINE_float("learning_rate", 0.5, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("en_vocab_size", 40000, "English vocabulary size.")
tf.app.flags.DEFINE_integer("fr_vocab_size", 40000, "French vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "/tmp", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
tf.app.flags.DEFINE_boolean("use_fp16", False,
"Train using fp16 instead of fp32.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
_buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
def read_data(source_path, target_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size,
FLAGS.fr_vocab_size
|
,
_buckets,
FLAGS.size,
FLAGS.num_layers,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
forward_only=forward_only,
dtype=dtype)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
pri
|
nt("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.initialize_all_variables())
return model
def train():
"""Train a en->fr translation model using WMT data."""
# Prepare WMT data.
print("Preparing WMT data in %s" % FLAGS.data_dir)
en_train, fr_train, en_dev, fr_dev, _, _ = data_utils.prepare_wmt_data(
FLAGS.data_dir, FLAGS.en_vocab_size, FLAGS.fr_vocab_size)
with tf.Session() as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(en_dev, fr_dev)
train_set = read_data(en_train, fr_train, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print
|
LudovicRousseau/pyscard
|
smartcard/CardType.py
|
Python
|
lgpl-2.1
| 3,695
| 0
|
"""Abstract CarType.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from smartcard.Exceptions import InvalidATRMaskLengthException
from smartcard.System import readers
from smartcard.util import toHexString
class CardType(object):
"""Abstract base class for CardTypes.
Known sub-classes: L{smartcard.CardType.AnyCardType}
L{smartcard.CardType.ATRCardType}."""
def __init__(self):
"""CardType constructor."""
pass
def matches(self, atr, reader=None):
"""Returns true if atr and card connected match the CardType.
@param atr: the atr to chek for matching
@param reader: the reader (optional); default is None
The reader can be use in some sub-classes to do advanced
matching that require connecting to the card."""
pass
class AnyCardType(CardType):
"""The AnyCardType matches any card."""
def __init__(self):
super().__init__()
def matches(self, atr, reader=None):
"""Always returns true, i.e. AnyCardType matches any card.
@param atr: the atr to chek for matching
@param reader: the reader (optional); default is None"""
return True
class ATRCardType(CardType):
"""The ATRCardType defines a card from an ATR and a mask."""
def __init__(self, atr, mask=None):
"""ATRCardType constructor.
@param atr: the ATR of the CardType
@param mask: an optional mask to be applied to the ATR for
L{CardType} matching default is None
"""
super().__init__()
self.atr = list(atr)
self.mask = mask
if mask is None:
self.maskedatr = self.atr
else:
if len(self.atr) != len(self.mask):
raise InvalidATRMaskLengthException(toHexString(mask))
self.maskedatr = list(map(lambda x, y: x & y, self.atr, self.mask))
def matches(self, atr, reader=None):
"""Returns true if the atr matches the masked CardType atr.
@param atr: the atr
|
to chek for matching
@param reader: the reader (optional); default is None
When atr is compared to the CardType ATR, matches returns true if
and only if CardType.atr & CardType.mask = atr & CardType.mask,
where & is the bitwise logical AND."""
if len(atr) != len(self.atr):
return not True
if self.mask is not None:
maskedatr = list(map(lambda x, y: x & y, list(atr), self.mask))
else:
|
maskedatr = atr
return self.maskedatr == maskedatr
if __name__ == '__main__':
"""Small sample illustrating the use of CardType.py."""
r = readers()
print(r)
connection = r[0].createConnection()
connection.connect()
atrct = ATRCardType([0x3B, 0x16, 0x94, 0x20, 0x02, 0x01, 0x00, 0x00, 0x0D])
print(atrct.matches(connection.getATR()))
|
Juniper/ceilometer
|
ceilometer/network/notifications.py
|
Python
|
apache-2.0
| 8,892
| 0
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handler for producing network counter messages from Neutron notification
events.
"""
import oslo.messaging
from oslo_config import cfg
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
from ceilometer.openstack.common import log
from ceilometer import sample
OPTS = [
cfg.StrOpt('neutron_control_exchange',
default='neutron',
help="Exchange name for Neutron notifications.",
deprecated_name='quantum_control_exchange'),
]
cfg.CONF.register_opts(OPTS)
LOG = log.getLogger(__name__)
class NetworkNotificationBase(plugin_base.NotificationBase):
resource_name = None
@property
def event_types(self):
return [
# NOTE(flwang): When the *.create.start notification sending,
# there is no resource id assigned by Neutron yet. So we ignore
# the *.create.start notification for now and only listen the
# *.create.end to make sure the resource id is existed.
'%s.create.end' % self.resource_name,
'%s.update.*' % self.resource_name,
'%s.exists' % self.resource_name,
# FIXME(dhellmann): Neutron delete notifications do
# not include the same metadata as the other messages,
# so we ignore them for now. This isn't ideal, since
# it may mean we miss charging for some amount of time,
# but it is better than throwing away the existing
# metadata for a resource when it is deleted.
# '%s.delete.start' % (self.resource_name),
]
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo.messaging.Target
This sequence is defining the exchange and topics to be connected for
this plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.neutron_control_exchange)
for topic in conf.notification_topics]
def process_notification(self, message):
LOG.info(_('network notification %r') % message)
counter_name = getattr(self, 'counter_name', self.resource_name)
unit_value = getattr(self, 'unit', self.resource_name)
resource = message['payload'].get(self.resource_name)
if resource:
# NOTE(liusheng): In %s.update.start notifications, the id is in
# message['payload'] instead of resource itself.
if message['event_type'].endswith('update.start'):
resource['id'] = message['payload']['id']
resources = [resource]
else:
resources = message['payload'].get(self.resource_name + 's')
resource_message = message.copy()
for resource in resources:
resource_message['payload'] = resource
yield sample.Sample.from_notification(
name=counter_name,
type=sample.TYPE_GAUGE,
unit=unit_value,
volume=1,
user_id=resource_message['_context_user_id'],
project_id=resource_message['_context_tenant_id'],
resource_id=resource['id'],
message=resource_message)
event_type_split = resource_message['event_type'].split('.')
if len(event_type_split) > 2:
yield sample.Sample.from_notification(
name=counter_name
+ "." + event_type_split[1],
type=sample.TYPE_DELTA,
unit=unit_value,
volume=1,
user_id=resource_message['_context_user_id'],
project_id=resource_message['_context_tenant_id'],
resource_id=resource['id'],
message=resource_message)
class Network(NetworkNotificationBase):
"""Listen for Neutron network notifications.
Handle network.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'network'
class Subnet(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle subnet.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'subnet'
class Port(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle port.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'port'
class Router(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle router.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'router'
class FloatingIP(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle floatingip.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'floatingip'
counter_name = 'ip.floating'
unit = 'ip'
class Bandwidth(NetworkNotificationBase):
"""Listen for Neutron notifications.
Listen in order to mediate with the metering framework.
"""
event_types = ['l3.meter']
def process_notification(self, message):
yield sample.Sample.from_notification(
name='bandwidth',
type=sample.TYPE_DELTA,
unit='B',
volume=message['payload']['bytes'],
user_id=None,
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['label_id'],
message=message)
class Pool(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle pool.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'pool'
counter_name = 'network.services.lb.pool'
class Vip(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle vip.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'vip'
counter_name = 'network.services.lb.vip'
class Member(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle member.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'member'
counter_name = 'network.services.lb.member'
class HealthMonitor(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle health_monitor.{create.end|update.*|exists} notifications
from neutron.
"""
resource_name = 'health_monitor'
counter_name = 'network.services.lb.health_monitor'
class Firewall(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle firewall.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'firewall'
counter_name = 'network.services.firewall'
class FirewallPolicy(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle firewall_policy.{create.end|update.*|exists} notifications
from neutron.
"""
resource_name = 'firewall_policy'
counter_name = 'network.services.firewall.policy'
class FirewallRule(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle firewall_rule.{create.end|
|
update.*|exists} notification
|
s
from neutron.
"""
resource_name = 'firewall_rule'
counter_name = 'network.services.firewall.rule'
class VPNService(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle vpnservice.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'vpnservice'
counter_name = 'network.services.vpn'
class IPSecPolicy(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handl
|
flake123p/ProjectH
|
Python/_Basics2_/A02_for_while/for_while.py
|
Python
|
gpl-3.0
| 238
| 0.016807
|
scores = [60, 73, 81, 95, 34]
n = 0
total = 0
for x in scores:
n += 1
total += x
avg = total/n
pr
|
int("f
|
or loop print")
print(total)
print(avg)
i = 1
x = 0
while i <= 50:
x += 1
i += 1
print("while loop print")
print(x)
print(i)
|
starnes/Python
|
guessnameclass.py
|
Python
|
mit
| 1,452
| 0.006887
|
# A program that has a list of six colors and chooses one by random. The user can then has three chances to quess the right color. After the third attepmt the program outputs "Nope. The color I was thinking of was..."
import random
# this is the function that will execute the program
def program():
# These are the constants declaring what the colors are.
RED = 'red'
BLUE = 'blue'
GREE
|
N = 'green'
ORANGE = 'orange'
PURPLE = 'purple'
PINK = 'pink'
class Color:
|
pass
c1 = Color()
c2 = Color()
c3 = Color()
guesses_made = 0
# This input causes the program to refer to you as your name.
c1.name = input('Hello! What is your name?\n')
c2.color = [BLUE, GREEN, RED, ORANGE, PURPLE, PINK]
# This randomizes what color is chosen
c2.color = random.choice(c2.color)
print ('Well, {0}, I am thinking of a color between blue, green, red, orange, purple and pink.'.format(c1.name))
while guesses_made < 3:
c3.guess = input('Take a guess: ')
guesses_made += 1
if c3.guess != c2.color:
print ('Your guess is wrong.')
if c3.guess == c2.color:
break
if c3.guess == c2.color:
print ('Good job, {0}! You guessed my color in {1} guesses!'.format(c1.name, guesses_made))
else:
print ('Nope. The color I was thinking of was {0}'.format(c2.color))
if __name__ == "__main__":
program()
|
erikgrinaker/BOUT-dev
|
tools/pylib/boututils/watch.py
|
Python
|
gpl-3.0
| 2,197
| 0.004096
|
"""
Routines for watching files for changes
"""
from __future__ import print_function
from builtins import zip
import time
import os
def watch(files, timeout=None, poll=2):
"""
Watch a given file or collection of files
until one changes. Uses polling.
Inputs
======
files - Name of one or more files to watch
timeout - Optional timeout in seconds
(Default is no timeout)
poll - Optional polling interval in seconds
(Default is 2 seconds)
Returns
=======
The name of the first changed file,
or None if timed out before any changes
Examples
========
To watch one file, timing out after 60 seconds:
>>> watch('file1', timeout=60)
To watch 2 files, never timing out:
>>> watch(['file1', 'file2'])
Author: Ben Dudson <benjamin.dudson@york.ac.uk>
"""
#
|
Get modification time of file(s)
try:
if hasattr(files, '__iter__'):
# Iterable
lastmod = [ os.stat(f).st_mtime for f in files ]
iterable = True
else:
# Not iterable -> just one file
lastmod = os.stat(files).st_mtime
iterable = False
except:
print("Can't test modified time. Wrong file name?")
raise
start_time = time.time()
running = True
while running:
|
sleepfor = poll
if timeout:
# Check if timeout will be reached before next poll
if time.time() - start_time + sleepfor > timeout:
# Adjust time so that finish at timeout
sleepfor = timeout - (time.time() - start_time)
running = False # Stop after next test
time.sleep(sleepfor)
if iterable:
for last_t, f in zip(lastmod, files):
# Get the new modification time
t = os.stat(f).st_mtime
if t > last_t + 1.0: # +1 to reduce risk of false alarms
# File has been modified
return f
else:
t = os.stat(files).st_mtime
if t > lastmod + 1.0:
return files
return None
|
mhbu50/erpnext
|
erpnext/education/doctype/quiz_result/test_quiz_result.py
|
Python
|
gpl-3.0
| 153
| 0.006536
|
# Copyright
|
(c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
class TestQuizResult(unittest.TestCase):
pas
|
s
|
shinglyu/servo
|
tests/wpt/web-platform-tests/mathml/tools/limits.py
|
Python
|
mpl-2.0
| 2,284
| 0.000438
|
#!/usr/bin/python
from utils import mathfont
import fontforge
nArySumCodePoint = 0x2211 # largeop operator
v = 3 * mathfont.em
f = mathfont.create("limits-lowerlimitbaselinedropmin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = v
f.math.LowerLimitGapMin = 0
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = 0
f.math.UpperLimitGapMin = 0
mathfont.save(f)
v = 11 * mathfont.em
f = mathfont.create("limits-lowerlimitgapmin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = 0
f.math.LowerLimitGapMin = v
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = 0
f.math.UpperLimitGapMin = 0
mathfont.save(f)
v = 5 * mathfont.em
f = mathfont.create("limits-upperlimitbaselinerisemin%d" % v)
mathfont.createSquareGlyph(f, nAry
|
SumCodePoint)
f.math.LowerLimitBaselineDropMin = 0
f.math.LowerLimitGapMin = 0
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.
|
math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = v
f.math.UpperLimitGapMin = 0
mathfont.save(f)
v = 7 * mathfont.em
f = mathfont.create("limits-upperlimitgapmin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = 0
f.math.LowerLimitGapMin = 0
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = 0
f.math.UpperLimitGapMin = v
mathfont.save(f)
|
gnott/elife-bot
|
activity/activity_VersionDateLookup.py
|
Python
|
mit
| 3,787
| 0.004489
|
import json
from zipfile import ZipFile
import uuid
import activity
import re
import os
from os.path import isfile, join
from os import listdir, makedirs
from os import path
import datetime
from S3utility.s3_notification_info import S3NotificationInfo
from provider.execution_context import Session
import requests
from provider.storage_provider import StorageContext
from provider.article_structure import ArticleInfo
import provider.lax_provider as lax_provider
class activity_VersionDateLookup(activity.activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "VersionDateLookup"
self.pretty_name = "Version Date Lookup"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = "Looks up version date on Lax endpoints and stores version date in session " \
"(Currently used in Silent corrections only)"
self.logger = logger
def do_activity(self, data=None):
try:
session = Session(self.settings)
version = session.get_value(data['run'], 'version')
filename = session.get_value(data['run'], 'filename_last_element')
article_structure = ArticleInfo(filename)
version_date, error = self.get_version(self.settings, article_structure, article_structure.article_id, version)
if error is not None:
self.logger.error(error)
self.emit_monitor_event(self.settings, article_structure.article_id, version, data['run'],
self.pretty_name, "error",
" ".join(("Error Looking up version article", article_structure.article_id,
"message:", error)))
return activity.activity.ACTIVITY_PERMANENT_FAILURE
self.emit_monitor_event(self.settings, article_structure.article_id, version, data['run'],
self.pretty_name, "end",
" ".join(("Finished Version Lookup for article", article_structure.article_id,
"version:", version)))
session.store_value(data['run'], 'update_date', version_date)
return activity.activity.ACTIVITY_SUCCESS
except Exception as e:
|
self.logger.exception("Exception when trying to Lookup next version")
self.emit_monitor_event(self.settings, article_structure.article_id, version, data['run'], self.pretty_name,
"error", " ".join(("Error looking up version for article",
article_structure.article_id,
|
"message:", str(e))))
return activity.activity.ACTIVITY_PERMANENT_FAILURE
def get_version(self, settings, article_structure, article_id, version):
try:
version_date = article_structure.get_update_date_from_zip_filename()
if version_date:
return version_date, None
version_date = lax_provider.article_version_date_by_version(article_id, version, settings)
return version_date, None
except Exception as e:
error_message = "Exception when looking up version Date. Message: " + str(e)
return version_date, error_message
def execute_function(self, the_function, arg1, arg2):
return the_function(arg1, arg2)
|
openhumanoids/exotica
|
exotations/dynamics_solvers/exotica_cartpole_dynamics_solver/scripts/gen_second_order_dynamics.py
|
Python
|
bsd-3-clause
| 2,089
| 0.004787
|
from sympy import Symbol, sin, cos, diff
from pprint import pprint
theta = Symbol('theta')
tdot = Symbol('tdot')
xdot = Symbol('xdot')
u = Symbol('u')
m_p_ = Symbol('m_p_')
m_c_ = Symbol('m_c_')
g_ = Symbol('g_')
l_ = Symbol('l_')
xddot = (u + m_p_ * sin(theta) * (l_ * (tdot * tdot) + g_ * cos(theta))) / (m_c_ + m_p_ * (sin(theta) * sin(theta)))
tddot = - (l_ * m_p_ * cos(theta) * sin(theta) * (tdot * tdot) + u * cos(theta) + (m_c_ + m_p_) * g_ * \
sin(theta))/ (l_ * m_c_ + l_ * m_p_ * (sin(theta)*sin(theta)))
f = [
xdot, tdot, xddot, tddot
]
fx = [
# d/dx, d/dtheta, d/dxdot, d/dtdot
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, diff(xddot, theta), 0, diff(xddot, tdot)],
[0, diff(tddot, theta), 0, diff(tddot, tdot)]
]
fu = [
0, 0, diff(xddot, u), diff(tddot, u)
]
fuu = [
0, 0, diff(diff(xddot, u), u), diff(diff(tddot, u), u)
|
]
fxx = [
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
], # fx_x
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, diff(diff(xddot, theta), theta), 0, diff(diff(xddot, tdot), theta)],
[0,
|
diff(diff(tddot, theta), theta), 0, diff(diff(tddot, tdot), theta)]
], # fx_theta
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
], # fx_xdot
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, diff(diff(xddot, theta), tdot), 0, diff(diff(xddot, tdot), tdot)],
[0, diff(diff(tddot, theta), tdot), 0, diff(diff(tddot, tdot), tdot)]
], # fx_tdot
]
fu = [
0, 0, diff(xddot, u), diff(tddot, u)
]
# fu = 0, 0, diff(xddot, u), diff(tddot, u)
fux = [
# d/dx, d/dtheta, d/dxdot, d/dtdot
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, diff(diff(xddot, u), theta), 0, diff(diff(xddot, u), tdot)],
[0, diff(diff(tddot, u), theta), 0, diff(diff(tddot, u), tdot)]
]
fxu = [
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, diff(diff(xddot, theta), u), 0, diff(diff(xddot, tdot), u)],
[0, diff(diff(tddot, theta), u), 0, diff(diff(tddot, tdot), u)]
]
pprint(fxx)
pprint(fux)
pprint(fuu)
|
opensemanticsearch/open-semantic-etl
|
src/opensemanticetl/enhance_xmp.py
|
Python
|
gpl-3.0
| 4,568
| 0.001751
|
import xml.etree.ElementTree as ElementTree
import os.path
import sys
#
# is there a xmp sidecar file?
#
def get_xmp_filename(filename):
xmpfilename = False
# some xmp sidecar filenames are based on the original filename without extensions like .jpg or .jpeg
filenamewithoutextension = '.' . join(filename.split('.')[:-1])
# check if a xmp sidecar file exists
if os.path.isfile(filename + ".xmp"):
xmpfilename = filename + ".xmp"
elif os.path.isfile(filename + ".XMP"):
xmpfilename = filename + ".XMP"
elif os.path.isfile(filenamewithoutextension + ".xmp"):
xmpfilename = filenamewithoutextension + ".xmp"
elif os.path.isfile(filenamewithoutextension + ".XMP"):
xmpfilename = filenamewithoutextension + ".XMP"
return xmpfilename
# Build path facets from filename
class enhance_xmp(object):
def process(self, parameters=None, data=None):
if parameters is None:
parameters = {}
if data is None:
data = {}
verbose = False
if 'verbose' in parameters:
if parameters['verbose']:
verbose = True
filename = parameters['filename']
#
# is there a xmp sidecar file?
#
xmpfilename = get_xmp_filename(filename)
if not xmpfilename:
if verbose:
print("No xmp sidecar file")
#
# read meta data of the xmp sidecar
|
file (= xml + rdf)
#
if xmpfilename:
creator = False
headline = False
creator = False
location = False
tags = []
if verbose:
print("Reading xmp sidecar file {}".format(xmpfilename))
try:
# Parse the xmp file with utf 8 encoding
parser = ElementTree.XMLParser(encoding="utf-8")
et = ElementTree.parse(xmpfilename, parser)
root =
|
et.getroot()
# get author
try:
creator = root.findtext(
".//{http://purl.org/dc/elements/1.1/}creator")
if creator:
data['author_ss'] = creator
except BaseException as e:
sys.stderr.write("Exception while parsing creator from xmp {} {}".format(
xmpfilename, e.args[0]))
# get headline
try:
headline = root.findtext(
".//{http://ns.adobe.com/photoshop/1.0/}Headline")
if headline:
data['title_txt'] = headline
except BaseException as e:
sys.stderr.write("Exception while parsing headline from xmp {} {}".format(
xmpfilename, e.args[0]))
# get location
try:
location = root.findtext(
".//{http://iptc.org/std/Iptc4xmpCore/1.0/xmlns/}Location")
if location:
if 'locations_ss' in data:
data['locations_ss'].append(location)
else:
data['locations_ss'] = [location]
except BaseException as e:
sys.stderr.write("Exception while parsing location from xmp {} {}".format(
xmpfilename, e.args[0]))
# get tags (named "subject")
try:
for tag in root.findall(".//{http://purl.org/dc/elements/1.1/}subject/{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Bag/{http://www.w3.org/1999/02/22-rdf-syntax-ns#}li"):
try:
if 'tag_ss' in data:
data['tag_ss'].append(tag.text)
else:
data['tag_ss'] = [tag.text]
except BaseException as e:
sys.stderr.write("Exception while parsing a tag from xmp {} {}".format(
xmpfilename, e.args[0]))
except BaseException as e:
sys.stderr.write("Exception while parsing tags from xmp {} {}".format(
xmpfilename, e.args[0]))
except BaseException as e:
sys.stderr.write("Exception while parsing xmp {} {}".format(
xmpfilename, e.args[0]))
return parameters, data
|
emsi/hackoort
|
python/oorthap/bulb.py
|
Python
|
gpl-3.0
| 4,950
| 0.000606
|
import colorsys
import logging
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_LIGHTBULB, CATEGORY_FAN
from hackoort.bulb import Bulb
def hls2rgb(h, l, s):
"""Convert h, l, s in 0-1 range to rgb in 0-255
:param h: hue
:param l: luminance
:param s: saturation
:return: red, green, blue in 0-255 range
"""
rgb = colorsys.hls_to_rgb(h, l, s)
r, g, b = (int(color * 255) for color in rgb)
return r,g, b
def rgb2hls(r, g, b):
"""Convert r,g,b in 0-255 range to hls in 0.1
:param r: red
:param g: green
:param b: blue
:return: hue, luminance, saturation
"""
return colorsys.rgb_to_hls(r/255.0, g/255.0, b/255.0)
c
|
lass OortColorBulb(Accessory):
category = CATEGORY_LIGHTBULB
def __init__(self, driver, name, bulb: Bulb):
"""
:param driver: pyhap driver
:param name: descriptive name
|
:param bulb: it has to be connected oort bulb
"""
super().__init__(driver, name)
self.status = bulb.status
self.hue, _, self.saturation = rgb2hls(
self.status.red, self.status.green, self.status.blue)
serv_light = self.add_preload_service(
'Lightbulb', chars=["On", "Brightness", "Hue", "Saturation"]
)
self.char_on = serv_light.configure_char(
'On', setter_callback=self.set_on, value=self.status.on,
getter_callback=self.get_on
)
self.char_brightness = serv_light.configure_char(
"Brightness", setter_callback=self.set_brightness,
value=self.status.brightness, getter_callback=self.get_brightness
)
self.char_brightness = serv_light.configure_char(
"Hue", setter_callback=self.set_hue,
)
self.char_brightness = serv_light.configure_char(
"Saturation", setter_callback=self.set_saturation,
)
self.bulb = bulb
def get_on(self):
return self.status.on
def set_on(self, value):
# logging.info("Setting bulb: %s", value)
self.bulb.onoff(value)
# if value and self.bulb.status.rgbon:
# self.bulb.set_rgb_onoff(0)
def get_brightness(self):
return self.status.brightness
def set_brightness(self, value):
"""
The corresponding value is an integer representing a percentage
of the maximum brightness.
:param value:
:return:
"""
# logging.info("Setting brightness value: %s", value)
self.bulb.set_brightness_pct(value)
def set_hue(self, value):
"""
The corresponding value is a floating point number in units
of arc degrees. Values range from 0 to 360, representing the color
spectrum starting from red, through yellow, green, cyan, blue,
and finally magenta, before wrapping back to red.
"""
self.hue = value/360.0
self.bulb.set_rgb(*hls2rgb(self.hue, 0.5, self.saturation))
logging.info("Hue: %s", value)
def set_saturation(self, value):
"""
The corresponding value is a percentage of maximum saturation.
:param value:
:return:
"""
self.saturation = value / 100.0
logging.info("Saturation: %s", value)
def stop(self):
self.bulb.disconnect()
class OortColorBulbSwitch(Accessory):
category = CATEGORY_FAN
def __init__(self, driver, name, bulb: Bulb):
"""
:param driver: pyhap driver
:param name: descriptive name
:param bulb: it has to be connected oort bulb
"""
super().__init__(driver, name)
self.status = bulb.status
self.hue, _, self.saturation = rgb2hls(
self.status.red, self.status.green, self.status.blue)
serv_light = self.add_preload_service(
'Fan', chars=["On", "RotationDirection", "RotationSpeed"]
)
self.char_on = serv_light.configure_char(
'On', setter_callback=self.set_fake_on,
value=1, getter_callback=self.get_fake_on
)
self.char_color_on = serv_light.configure_char(
'RotationDirection', setter_callback=self.set_color_on,
value=self.status.on,
getter_callback=self.get_color_on
)
self.char_temperature = serv_light.configure_char(
"RotationSpeed", setter_callback=self.set_temperature,
value=self.status.brightness, getter_callback=self.get_temperature
)
self.bulb = bulb
def get_fake_on(value):
return 1
def set_fake_on(self, value):
pass
def get_color_on(self):
return self.status.rgbon
def set_color_on(self, value):
self.bulb.set_rgb_onoff(value)
def get_temperature(self):
return self.status.temperature
def set_temperature(self, value):
self.bulb.set_temperature_pct(value)
|
glenn-edgar/local_controller_3
|
redis_graph_py3/redis_graph_functions.py
|
Python
|
mit
| 10,886
| 0.054106
|
import redis
import copy
import json
def basic_init(self):
self.sep = "["
self.rel_sep = ":"
self.label_sep = "]"
self.namespace = []
class Build_Configuration(object):
def __init__( self, redis_handle):
self.redis_handle = redis_handle
self.delete_all()
self.keys = set()
basic_init(self)
def build_namespace( self,name ):
return_value = copy.deepcopy(self.namespace)
return_value.append(name)
return return_value
def pop_namespace( self ):
del self.namespace[-1]
def add_header_node( self, relation,label=None, properties = {}, json_flag= True ):
if label== None:
label = relation
properties["name"] = label
self.construct_node( True, relation, label, properties, json_flag )
def end_header_node( self, assert_namespace ):
assert (assert_namespace == self.namespace[-1][0]) ,"miss match namespace got "+assert_namespace+" expected "+self.namespace[-1][0]
del self.namespace[-1]
def check_namespace( self ):
assert len(self.namespace) == 0, "unbalanced name space, current namespace: "+ json.dumps(self.namespace)
#print ("name space is in balance")
def add_info_node( self, relation,label, properties = {}, json_flag= True ):
self.construct_node( False, relation, label, properties, json_flag )
# concept of namespace name is a string which ensures unique name
# the name is essentially the directory structure of the tree
def construct_node(self, push_namespace,relationship, label, properties, json_flag = True ):
redis_key, new_name_space = self.construct_basic_node( self.namespace, relationship,label )
if redis_key in self.keys:
raise ValueError("Duplicate Key")
self.keys.add(redis_key)
for i in properties.keys():
temp = json.dumps(properties[i] )
self.redis_handle.hset(redis_key, i, temp )
if push_namespace == True:
self.namespace = new_name_space
def _convert_namespace( self, namespace):
temp_value = []
for i in namespace:
temp_value.append(self.make_string_key( i[0],i[1] ))
key_string = self.sep+self.sep.join(temp_value)
return key_string
def construct_basic_node( self, namespace, relationship,label ): #tested
new_name_space = copy.copy(namespace)
new_name_space.append( [ relationship,label ] )
redis_string = self._convert_namespace(new_name_space)
self.redis_handle.hset(redis_string,"namespace",json.dumps(redis_string))
self.redis_handle.hset(redis_string,"name",json.dumps(label))
self.update_terminals( relationship, label, redis_string)
self.update_relationship( new_name_space, redis_string )
return redis_string, new_name_space
def make_string_key( self, relationship,label):
return relationship+self.rel_sep+label+self.label_sep
def update_relationship( self, new_name_space, redis_string ):
for relationship,label in new_name_space:
#print( relationship,label,redis_string)
self.redis_handle.sadd("@RELATIONSHIPS",relationship)
self.redis_handle.sadd("%"+relationship,redis_string)
self.redis_handle.sadd("#"+relationship+self.rel_sep+label,redis_string)
def update_terminals( self, relationship,label, redis_string ):
self.redis_handle.sadd("@TERMINALS",relationship)
self.redis_handle.sadd("&"+relationship,redis_string)
self.redis_handle.sadd("$"+relationship+self.rel_sep+label,redis_string)
def store_keys( self ):
for i in self.keys:
self.redis_handle.sadd("@GRAPH_KEYS", i )
def delete_all(self): #tested
self.redis_handle.flushdb()
class Query_Configuration(object):
def __init__( self, redis_handle):
self.redis_handle = redis_handle
basic_init(self)
def to_dictionary( self, list, key, json_flag = False ):
return_value = {}
for i in list:
if json_flag == True:
i = json.loads(i)
return_value[i[key]] = i
return return_value
def match_terminal_relationship( self, relationship, label= None , starting_set = None,property_values = None, data_flag = True ):
return_value = None
#print("initial starting set",starting_set)
if starting_set == None:
starting_set = self.redis_handle.smembers("@GRAPH_KEYS")
#print("starting set",starting_set)#
if label == None:
#print("made it here")
if self.redis_handle.sismember( "@TERMINALS", relationship) == True:
#print("made it here #2")
return_value = set(self.redis_handle.smembers("&"+relationship))
#print("return_value 1",return_value)
#print( starting_set)
return_value = return_value.intersection(starting_set)
#print("return_value",return_value)
else:
if self.redis_handle.sismember( "@TERMINALS", relationship) == True:
if self.redis_handle.exists("$"+relationship+self.rel_sep+label) == True:
return_value = self.redis_handle.smembers("$"+relationship+self.rel_sep+label)
return_value = return_value.intersection(starting_set)
if (property_values != None) and (return_value != None):
return_value = self.match_properties( return_value , property_values )
if data_flag == True:
return_value = self.return_data( return_value)
return return_value
|
def match_relationship( self, relationship, label= None , starting_set = None ):
return_value = None
if s
|
tarting_set == None:
starting_set = self.redis_handle.smembers("@GRAPH_KEYS")
#print("starting set",starting_set)#
if label == None:
#print("made it here")
if self.redis_handle.sismember( "@RELATIONSHIPS", relationship) == True:
#print("made it here #2")
return_value = set(self.redis_handle.smembers("%"+relationship))
#print("return_value 1",return_value)
#print( starting_set)
return_value = return_value.intersection(starting_set)
else:
if self.redis_handle.sismember( "@RELATIONSHIPS", relationship) == True:
if self.redis_handle.exists("#"+relationship+self.rel_sep+label) == True:
return_value = self.redis_handle.smembers("#"+relationship+self.rel_sep+label)
return_value = return_value.intersection(starting_set)
return return_value
def match_properties( self, starting_set , property_values ):
return_value = []
for i in list(starting_set):
flag = True
for j , value in property_values.items():
data = self.redis_handle.hget(i,j)
if data == None:
flag = False
break
if json.loads(data) != value:
flag = False
break
if flag == True:
return_value.append( i)
return return_value
def match_relationship_list ( self, relationship_list, starting_set = None, property_values = None, fetch_values = True ):
for relationship ,label in relationship_list:
starting_set = self.match_relationship( relationship, label, starting_set )
if property_values != None:
starting_set = self.match_properties( starting_set, property_values )
if fetch_values == True:
return_value = self.return_data( starting_set)
else:
return_value = starting_set
return return_value
def return_data( self, key_set ):
return_value = []
for i in key_set:
data = self.redis_handle.hgetall(i)
temp = {}
for j in data.keys():
try:
temp[j] = json.loads(data[j] )
except:
|
mmdg-oxford/papers
|
Schlipf-PRL-2018/model/step.py
|
Python
|
gpl-3.0
| 830
| 0.012048
|
from __future__ import print_function
from bose_einstein import bose_einstein
from constant import htr_to_K, htr_to_meV, htr_to_eV
import argparser
import norm_k
import numpy as np
import scf
import system
args = argparser.read_argument(
|
'Evaluate step-like feature in electron-phonon coupling')
thres = args.thres / htr_to_meV
beta = htr_to_K / args.temp
Sigma = system.make_data(args.dft, args.vb)
Sigma.bose_einstein = bose_einstein(Sigma.freq, beta)
for energy_meV in np.arange(0.0, args.energy, 0.5):
energy = energy_meV / htr_to_meV
kk = norm_k.eval(Sigma.eff_mass, energy)
Sigma_in = 1e-3j / htr_to_meV
Sigma_out, it = scf.self_energy(args.method,
|
thres, Sigma, kk, Sigma_in)
if args.vb: real_energy = -energy
else: real_energy = energy
print(real_energy * htr_to_meV, -Sigma_out.imag * htr_to_meV, it)
|
fake-name/ReadableWebProxy
|
Misc/install_vmprof.py
|
Python
|
bsd-3-clause
| 616
| 0.021104
|
# From https://gist.github.com/destan/554070
|
2#file-text2png-py
# coding=utf8
import multiprocessing
import threading
import time
import atexit
import os
import vmprof
def install_vmprof(name="thread"):
cpid = multiprocessing.current_process().name
ctid = threading.current_thread().name
fname = "vmprof-{}-{}-{}-{}.dat".format(name, cpid, ctid, time.time())
flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC
outfd = os.open(fname, flags)
vmprof.enable(outfd, period=0.01)
# atexit.register(close_profile_file)
def
|
close_profile_file():
print("Closing VMProf!")
vmprof.disable()
print("VMProf closed!")
|
roypur/python-bitcoin-accounting
|
new.py
|
Python
|
gpl-3.0
| 797
| 0.015056
|
#!/bin/python
from urllib import request
from pymongo import Connection
import argparse
import json
import pymongo
req = request.urlopen('https://blockchain.info/no/api/receive?method=create&address=19J9J4QHDun5YgUTfEU1qb3fSHTbCwcjGj')
encoding = req.headers.get_content_charse
|
t()
obj = json.loads(req.read().decode(encoding))
print(obj['input_address'])
parser = argparse.ArgumentParser()
parser.add_argument("--price")
parser.ad
|
d_argument("--name")
parser.add_argument("--description")
args = parser.parse_args()
price = float(args.price) * 100000000
connection=Connection()
database=connection['bitcoin']
mycollection=database.entries
post={"Address":(obj['input_address']), "Price":price, "Name":args.name, "Description":args.description, "Confirmed":"No"}
mycollection.insert(post)
|
mosen/commandment
|
commandment/mdm/response_schema.py
|
Python
|
mit
| 10,132
| 0.00227
|
from marshmallow import Schema, fields, post_load
from marshmallow_enum import EnumField
from enum import IntFlag
from .. import models
from commandment.inventory import models as inventory_models
class ErrorChainItem(Schema):
LocalizedDescription = fields.String()
USEnglishDescription = fields.String()
ErrorDomain = fields.String()
ErrorCode = fields.Number()
class CommandResponse(Schema):
Status = fields.String()
UDID = fields.UUID()
CommandUUID = fields.UUID()
ErrorChain = fields.Nested(ErrorChainItem, many=True)
class OrganizationInfo(Schema):
pass
class AutoSetupAdminAccount(Schema):
GUID = fields.UUID()
shortName = fields.String()
class OSUpdateSettings(Schema):
CatalogURL = fields.String()
IsDefaultCatalog = fields.Boolean()
PreviousScanDate = fields.Date()
PreviousScanResult = fields.String()
PerformPeriodicCheck = fields.Boolean()
AutomaticCheckEnabled = fields.Boolean()
BackgroundDownloadEnabled = fields.Boolean()
AutomaticAppInstallationEnabled = fields.Boolean()
AutomaticOSInstallationEnabled = fields.Boolean()
AutomaticSecurityUpdatesEnabled = fields.Boolean()
class DeviceInformation(Schema):
# Table 5
UDID = fields.String(attribute='udid')
# Languages
DeviceID = fields.String(attribute='device_id')
OrganizationInfo = fields.Nested(OrganizationInfo)
LastCloudBackupDate = fields.Date(attribute='last_cloud_backup_date')
AwaitingConfiguration = fields.Boolean(attribute='awaiting_configuration')
AutoSetupAdminAccounts = fields.Nested(AutoSetupAdminAccount, many=True)
# Table 6
iTunesStoreAccountIsActive = fields.Boolean(attribute='itunes_store_account_is_active')
iTunesStoreAccountHash = fields.String(attribute='itunes_store_account_hash')
# Table 7
DeviceName = fields.String(attribute='device_name')
OSVersion = fields.String(attribute='os_version')
BuildVersion = fields.String(attribute='build_version')
ModelName = fields.String(attribute='model_name')
Model = fields.String(attribute='model')
ProductName = fields.String(attribute='product_name')
SerialNumber = fields.String(attribute='serial_number')
DeviceCapacity = fields.Float(attribute='device_capacity')
AvailableDeviceCapacity = fields.Float(attribute='available_device_capacity')
BatteryLevel = fields.Float(attribute='battery_level')
CellularTechnology = fields.Integer(attribute='cellular_technology')
IMEI = fields.String(attribute='imei')
MEID = fields.String(attribute='meid')
ModemFirmwareVersion = fields.String(attribute='modem_firmware_version')
IsSupervised = fields.Boolean(attribute='is_supervised')
IsDeviceLocatorServiceEnabled = fields.Boolean(attribute='is_device_locator_service_enabled')
IsActivationLockEnabled = fields.Boolean(attribute='is_activation_lock_enabled')
IsDoNotDisturbInEffect = fields.Boolean(attribute='is_do_not_disturb_in_effect')
EASDeviceIdentifier = fields.String(attribute='eas_device_identifier')
IsCloudBackupEnabled = fields.Boolean(attribute='is_cloud_backup_enabled')
OSUpdateSettings = fields.Nested(OSUpdateSettings, attribute='os_update_settings') # T8
LocalHostName = fields.String(attribute='local_hostname')
HostName = fields.String(attribute='hostname')
SystemIntegrityProtectionEnabled = fields.Boolean(attribute='sip_enabled')
# Array of str
#ActiveManagedUsers = fields.Nested(ActiveManagedUser)
IsMDMLostModeEnabled = fields.Boolean(attribute='is_mdm_lost_mode_enabled')
MaximumResidentUsers = fields.Integer(attribute='maximum_resident_users')
# Table 9
ICCID = fields.String(attribute='iccid')
BluetoothMAC = fields.String(attribute='bluetooth_mac')
WiFiMAC = fields.String(attribute='wifi_mac')
EthernetMACs = fields.String(attribute='ethernet_macs', many=True)
CurrentCarrier
|
Network = fields.String(attribute='current_carrier_network')
SIMCarrierNetwork = fields.String(attribute='sim_carrier_network')
SubscriberCarrierNetwork = fields.String(attribute='subscriber_carrier_network')
CarrierSettingsVersion = fields.String(attribute=
|
'carrier_settings_version')
PhoneNumber = fields.String(attribute='phone_number')
VoiceRoamingEnabled = fields.Boolean(attribute='voice_roaming_enabled')
DataRoamingEnabled = fields.Boolean(attribute='data_roaming_enabled')
IsRoaming = fields.Boolean(attribute='is_roaming')
PersonalHotspotEnabled = fields.Boolean(attribute='personal_hotspot_enabled')
SubscriberMCC = fields.String(attribute='subscriber_mcc')
SubscriberMNC = fields.String(attribute='subscriber_mnc')
CurrentMCC = fields.String(attribute='current_mcc')
CurrentMNC = fields.String(attribute='current_mnc')
# @post_load
# def make_device(self, data):
# return models.Device(**data)
class DeviceInformationResponse(CommandResponse):
QueryResponses = fields.Nested(DeviceInformation)
class HardwareEncryptionCaps(IntFlag):
Nothing = 0
BlockLevelEncryption = 1
FileLevelEncryption = 2
All = BlockLevelEncryption | FileLevelEncryption
class FirewallApplicationItem(Schema):
BundleID = fields.String()
Allowed = fields.Boolean()
Name = fields.String()
class FirewallSettings(Schema):
FirewallEnabled = fields.Boolean()
BlockAllIncoming = fields.Boolean()
StealthMode = fields.Boolean()
Applications = fields.Nested(FirewallApplicationItem, many=True)
class SecurityInfoResponse(CommandResponse):
HardwareEncryptionCaps = EnumField(HardwareEncryptionCaps)
PasscodePresent = fields.Boolean()
PasscodeCompliant = fields.Boolean()
PasscodeCompliantWithProfiles = fields.Boolean()
PasscodeLockGracePeriodEnforced = fields.Integer()
FDE_Enabled = fields.Boolean()
FDE_HasPersonalRecoveryKey = fields.Boolean()
FDE_HasInstitutionalRecoveryKey = fields.Boolean()
FirewallSettings = fields.Nested(FirewallSettings)
SystemIntegrityProtectionEnabled = fields.Boolean()
class InstalledApplication(Schema):
Identifier = fields.String(attribute='bundle_identifier')
Version = fields.String(attribute='version')
ShortVersion = fields.String(attribute='short_version')
Name = fields.String(attribute='name')
BundleSize = fields.Integer(attribute='bundle_size')
DynamicSize = fields.Integer(attribute='dynamic_size')
IsValidated = fields.Boolean(attribute='is_validated')
ExternalVersionIdentifier = fields.String(attribute='external_version_identifier') # iOS 11
@post_load
def make_installed_application(self, data: dict) -> inventory_models.InstalledApplication:
return inventory_models.InstalledApplication(**data)
class InstalledApplicationListResponse(CommandResponse):
InstalledApplicationList = fields.Nested(InstalledApplication, many=True)
class CertificateListItem(Schema):
CommonName = fields.String()
IsIdentity = fields.Boolean()
Data = fields.String()
@post_load
def make_installed_certificate(self, data: dict) -> inventory_models.InstalledCertificate:
return inventory_models.InstalledCertificate(**data)
class CertificateListResponse(CommandResponse):
CertificateList = fields.Nested(CertificateListItem, many=True)
class AvailableOSUpdate(Schema):
AllowsInstallLater = fields.Boolean(attribute='allows_install_later')
AppIdentifiersToClose = fields.List(fields.String, attribute='app_identifiers_to_close', many=True)
HumanReadableName = fields.String(attribute='human_readable_name')
HumanReadableNameLocale = fields.String(attribute='human_readable_name_locale')
IsConfigDataUpdate = fields.Boolean(attribute='is_config_data_update')
IsCritical = fields.Boolean(attribute='is_critical')
IsFirmwareUpdate = fields.Boolean(attribute='is_firmware_update')
MetadataURL = fields.String(attribute='metadata_url')
ProductKey = fields.String(attribute='product_key')
RestartRequired = fields.Boolean(attribute='restart_required')
Version = fields.String(attribute='version')
@post_load
def make_available_os_update(self, data:
|
andrius-preimantas/purchase-workflow
|
purchase_request_to_requisition/tests/test_purchase_request_to_requisition.py
|
Python
|
agpl-3.0
| 3,251
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests import common
from openerp.tools import SUPERUSER_ID
class TestPurchaseRequestToRequisition(common.TransactionCase):
def setUp(self):
super(TestPurchaseRequestToRequisition, self).setUp()
self.purchase_request = self.env['purchase.request']
self.purchase_request_line = self.env['purchase.request.line']
self.wiz =\
self.env['purchase.request.line.make.purchase.requisition']
self.purchase_requisition_partner_model =\
self.env['purchase.requisition.partner']
self.purchase_order = self.env['purchase.order']
def test_purchase_request_to_purchase_requisition(self):
vals = {
'picking_type_id': self.env.ref('stock.picking_type_in').id,
'requested_by': SUPERUSER_ID,
}
purchase_request = self.purchase_request.create(vals)
vals = {
'request_id': purchase_request.id,
'product_id':
|
self.env.ref('product.product_product_13').id,
'product_uom_id': self.env.ref('product.product_uom_unit').id,
'product_qty': 5.0,
}
purchase_request_line = self.purchase_request_line.create(vals)
wiz_id = self.wiz.with_context(
active_model="purchase.request.line",
active_ids=[purchase_request_line.id],
active_id=purchase_request_line.id,).create({})
wiz_id.make_purchase_requisition()
|
self.assertTrue(
len(purchase_request_line.requisition_lines.ids) == 1,
'Should have one purchase requisition line created')
requisition_id = purchase_request_line.requisition_lines.requisition_id
self.assertEquals(
len(purchase_request.line_ids),
len(requisition_id.line_ids), 'Should have the same lines')
requisition_line = requisition_id.line_ids
self.assertEquals(
requisition_line.product_id.id,
purchase_request_line.product_id.id,
'Should have the same products')
self.assertEquals(
purchase_request.state,
requisition_id.state,
'Should have the same state')
requisition_id.tender_in_progress()
requisition_id.tender_open()
vals = {
'partner_id': self.env.ref('base.res_partner_12').id,
}
requisition_partner_id =\
self.purchase_requisition_partner_model.with_context(
active_model='purchase.requisition',
active_ids=[requisition_id.id],
active_id=requisition_id.id,).create(vals)
requisition_partner_id.create_order()
domain = [
('requisition_id', '=', requisition_id.id),
]
purchase_id = self.purchase_order.search(domain)
self.assertTrue(purchase_id, 'Should find purchase order')
purchase_id.signal_workflow('purchase_confirm')
self.assertEquals(
len(
purchase_id.order_line.purchase_request_lines
), 1, 'Should have a link between order lines and request lines')
|
saturnast/python-learning
|
tempCodeRunnerFile.py
|
Python
|
mit
| 441
| 0.006803
|
#!/usr/bin python3
# -*- coding:utf-8 -*-
# File Name: fact.py
# Author: Lipsu
|
m
# Mail: niuleipeng@gmail.com
# Created Time: 2016-0
|
5-11 17:27:38
# def fact(n):
# if n == 1:
# return 1
# return fact(n-1) * n
def fact(n):
return fact_iter(n, 1)
def fact_iter(num, product):
if num == 1:
return product
return fact_iter(num - 1, num * product)
num = int(input('input a number plz:'))
print(fact(num));
|
laurentb/weboob
|
modules/caissedepargne/cenet/browser.py
|
Python
|
lgpl-3.0
| 12,250
| 0.002857
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
from weboob.browser import LoginBrowser, need_login, StatesMixin
from weboob.browser.url import URL
from weboob.browser.exceptions import ClientError
from weboob.exceptions import BrowserIncorrectPassword, BrowserUnavailable
from weboob.capabilities.base import find_object
from weboob.capabilities.bank import Account
from weboob.tools.capabilities.bank.transactions import sorted_transactions, FrenchTransaction
from .pages import (
ErrorPage,
LoginPage, CenetLoginPage, CenetHomePage,
CenetAccountsPage, CenetAccountHistoryPage, CenetCardsPage,
CenetCardSummaryPage, SubscriptionPage, DownloadDocumentPage,
CenetLoanPage,
)
from ..pages import CaissedepargneKeyboard
__all__ = ['CenetBrowser']
class CenetBrowser(LoginBrowser, StatesMixin):
BASEURL = "htt
|
ps://www.cenet.caisse-epargne.fr"
STATE_DURATION = 5
login = URL(
r'https://(?P<domain>[^/]+)/authentification/manage\?step=identification&identifiant=(?P<login>.*)',
r'https://.*/authentification/manage\?step=identification&identifiant=.*',
r'https://.*/login.aspx',
LoginPage,
)
account_login =
|
URL(r'https://(?P<domain>[^/]+)/authentification/manage\?step=account&identifiant=(?P<login>.*)&account=(?P<accountType>.*)', LoginPage)
cenet_vk = URL(r'https://www.cenet.caisse-epargne.fr/Web/Api/ApiAuthentification.asmx/ChargerClavierVirtuel')
cenet_home = URL(r'/Default.aspx$', CenetHomePage)
cenet_accounts = URL(r'/Web/Api/ApiComptes.asmx/ChargerSyntheseComptes', CenetAccountsPage)
cenet_loans = URL(r'/Web/Api/ApiFinancements.asmx/ChargerListeFinancementsMLT', CenetLoanPage)
cenet_account_history = URL(r'/Web/Api/ApiComptes.asmx/ChargerHistoriqueCompte', CenetAccountHistoryPage)
cenet_account_coming = URL(r'/Web/Api/ApiCartesBanquaires.asmx/ChargerEnCoursCarte', CenetAccountHistoryPage)
cenet_tr_detail = URL(r'/Web/Api/ApiComptes.asmx/ChargerDetailOperation', CenetCardSummaryPage)
cenet_cards = URL(r'/Web/Api/ApiCartesBanquaires.asmx/ChargerCartes', CenetCardsPage)
error = URL(
r'https://.*/login.aspx',
r'https://.*/Pages/logout.aspx.*',
r'https://.*/particuliers/Page_erreur_technique.aspx.*',
ErrorPage,
)
cenet_login = URL(
r'https://.*/$',
r'https://.*/default.aspx',
CenetLoginPage,
)
subscription = URL(r'/Web/Api/ApiReleves.asmx/ChargerListeEtablissements', SubscriptionPage)
documents = URL(r'/Web/Api/ApiReleves.asmx/ChargerListeReleves', SubscriptionPage)
download = URL(r'/Default.aspx\?dashboard=ComptesReleves&lien=SuiviReleves', DownloadDocumentPage)
__states__ = ('BASEURL',)
def __init__(self, nuser, *args, **kwargs):
# The URL to log in and to navigate are different
self.login_domain = kwargs.pop('domain', self.BASEURL)
if not self.BASEURL.startswith('https://'):
self.BASEURL = 'https://%s' % self.BASEURL
self.accounts = None
self.nuser = nuser
super(CenetBrowser, self).__init__(*args, **kwargs)
def do_login(self):
data = self.login.go(login=self.username, domain=self.login_domain).get_response()
if len(data['account']) > 1:
# additional request where there is more than one
# connection type (called typeAccount)
# TODO: test all connection type values if needed
account_type = data['account'][0]
self.account_login.go(login=self.username, accountType=account_type, domain=self.login_domain)
data = self.page.get_response()
if data is None:
raise BrowserIncorrectPassword()
elif not self.nuser:
raise BrowserIncorrectPassword("Erreur: Numéro d'utilisateur requis.")
if "authMode" in data and data['authMode'] != 'redirect':
raise BrowserIncorrectPassword()
payload = {'contexte': '', 'dataEntree': None, 'donneesEntree': "{}", 'filtreEntree': "\"false\""}
res = self.cenet_vk.open(data=json.dumps(payload), headers={'Content-Type': "application/json"})
content = json.loads(res.text)
d = json.loads(content['d'])
end = json.loads(d['DonneesSortie'])
_id = end['Identifiant']
vk = CaissedepargneKeyboard(end['Image'], end['NumerosEncodes'])
code = vk.get_string_code(self.password)
post_data = {
'CodeEtablissement': data['codeCaisse'],
'NumeroBad': self.username,
'NumeroUtilisateur': self.nuser
}
self.location(data['url'], data=post_data, headers={'Referer': 'https://www.cenet.caisse-epargne.fr/'})
return self.page.login(self.username, self.password, self.nuser, data['codeCaisse'], _id, code)
@need_login
def get_accounts_list(self):
if self.accounts is None:
data = {
'contexte': '',
'dateEntree': None,
'donneesEntree': 'null',
'filtreEntree': None
}
try:
self.accounts = [account for account in self.cenet_accounts.go(json=data).get_accounts()]
except ClientError:
# Unauthorized due to wrongpass
raise BrowserIncorrectPassword()
self.cenet_loans.go(json=data)
for account in self.page.get_accounts():
self.accounts.append(account)
for account in self.accounts:
try:
account._cards = []
self.cenet_cards.go(json=data)
for card in self.page.get_cards():
if card['Compte']['Numero'] == account.id:
account._cards.append(card)
except BrowserUnavailable:
# for some accounts, the site can throw us an error, during weeks
self.logger.warning('ignoring cards because site is unavailable...')
account._cards = []
return iter(self.accounts)
def get_loans_list(self):
return []
@need_login
def get_history(self, account):
if account.type == Account.TYPE_LOAN:
return []
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Accept': 'application/json, text/javascript, */*; q=0.01'
}
data = {
'contexte': '',
'dateEntree': None,
'filtreEntree': None,
'donneesEntree': json.dumps(account._formated),
}
items = []
self.cenet_account_history.go(data=json.dumps(data), headers=headers)
# there might be some duplicate transactions regarding the card type ones
# because some requests lead to the same transaction list
# even with different parameters/data in the request
card_tr_list = []
while True:
data_out = self.page.doc['DonneesSortie']
for tr in self.page.get_history():
items.append(tr)
if tr.type is FrenchTransaction.TYPE_CARD_SUMMARY:
if find_object(card_tr_list, label=tr.label, amount=tr.amount, raw=tr.raw, date=tr.date, rdate=tr.rdate):
self.logger.warning('Duplicated transaction: %s', tr)
item
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/smetric.py
|
Python
|
gpl-3.0
| 1,194
| 0.000838
|
import networkx as nx
#from networkx.generators.smax import li_smax_graph
def s_metric(G, normalized=True):
"""Return the s-metric of graph.
The s-metric is defined as the sum of the products deg(u)*deg(v)
for every edge (u,v) in G. If norm is provided construct the
s-max graph and compute it's s_metric, and return the normalized
s value
Parameters
----------
G : graph
The graph used to compute the s-metric.
normalized : bool (optional)
Normalize the value.
Returns
-------
s : float
The s-metric of the graph.
References
----------
.. [1] Lun Li, David Alderson, John C. Doyle, and Walter Willinger,
Towards a Theory of Scale-Free Graphs:
Definition, Properties, and Implications (Extended Version), 2005.
https://arxiv.org/a
|
bs/cond-mat/0501169
"""
if normalized:
raise nx.NetworkXError("Normalization not implemented")
#
|
Gmax = li_smax_graph(list(G.degree().values()))
# return s_metric(G,normalized=False)/s_metric(Gmax,normalized=False)
# else:
return float(sum([G.degree(u) * G.degree(v) for (u, v) in G.edges()]))
|
semanticbits/survey_stats
|
src/survey_stats/survey.py
|
Python
|
bsd-2-clause
| 5,498
| 0.002001
|
import pandas as pd
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
from rpy2.robjects import Formula
from rpy2 import robjects as ro
from survey_stats.helpr import svyciprop_xlogit, svybyci_xlogit, factor_summary
from survey_stats.helpr import filter_survey_var, rm_nan_survey_var, svyby_nodrop
from survey_stats.helpr import fix_lonely_psus
from survey_stats import pdutil as u
from survey_stats.const import DECIMALS
from survey_stats import log
import gc
rbase = importr('base')
rstats = importr('stats')
rsvy = importr('survey')
rfeather = importr('feather', on_conflict='warn')
logger = log.getLogger()
def dim_design(d):
return pandas2ri.ri2py(rbase.dim(d[d.names.index('variables')]))
def subset_survey(des, filt, qn=None):
# filt is a dict with vars as keys and list of acceptable values as levels
# example from R:
# subset(dclus1, sch.wide=="Yes" & comp.imp=="Yes"
if not len(filt.keys()) > 0:
# empty filter, return original design object
return des
filtered = rbase.Reduce(
"&",
[filter_survey_var(des, k, v) for k, v in filt.items()] +
([rm_nan_survey_var(des, qn)] if qn else [])
)
return rsvy.subset_survey_design(des, filtered)
def fetch_stats_by(des, qn_f, r, vs):
lvl_f = '~%s' % '+'.join(vs)
ct_f = '%s + %s' % (lvl_f, qn_f[1:])
logger.info('gen stats for interaction level', lvl_f=lvl_f, qn_f=qn_f, ct_f=ct_f, r=r)
cols = vs + ['mean', 'se', 'ci_l', 'ci_u']
df = svybyci_xlogit(Formula(qn_f), Formula(lvl_f), des, svyciprop_xlogit, vartype=['se', 'ci'])
df = pandas2ri.ri2py(df)
df.columns = cols
df = df.set_index(vs)
cts = svyby_nodrop(Formula(lvl_f), Formula(ct_f), des, rsvy.unwtd_count, keep_var=True)
cts = pandas2ri.ri2py(cts).fillna(0.0)
cts.columns = vs + ['eql', 'ct', 'se_ignore']
cts = cts.set_index(vs)
cts['eql'] = cts.eql.apply(lambda x: x == 'TRUE' if type(x) == str else x > 0)
counts = cts.ct[cts.eql == True].tolist()
ssizes = cts.groupby(vs).sum()['ct']
df = df.assign(count=counts, sample_size=ssizes)
if df.shape[0] > 0:
df['response'] = r
df['level'] = len(vs)
rdf = u.fill_none(df.round(DECIMALS)).reset_index()
logger.info('create svyby df', df=rdf, vars=vs, eq=cts)
return rdf
def fetch_stats_totals(des, qn_f, r):
total_ci = svyciprop_xlogit(Formula(qn_f), des, multicore=False)
# extract stats
logger.info('fetching stats totals', r=r, q=qn_f)
cts = rsvy.svyby(Formula(qn_f), Formula(qn_f), des,
rsvy.unwtd_count, na_rm=True,
na_rm_by=True, na_rm_all=True, multicore=False)
cts = pandas2ri.ri2py(cts)
cols = ['eql', 'ct', 'se_ignore']
cts.columns = cols
ct = cts.ct[cts.eql == 1].sum()
ss = cts.ct.sum()
res = {'level': 0,
'response': r,
'mean': u.guard_nan(
rbase.as_numeric(total_ci)[0]) if total_ci else None,
'se': u.guard_nan(
rsvy.SE(total_ci)[0]) if total_ci else None,
'ci_l': u.guard_nan(
rbase.attr(total_ci, 'ci')[0]) if total_ci else None,
'ci_u': u.guard_nan(
rbase.attr(total_ci, 'ci')[1]) if total_ci else None,
'count': ct,
'sample_size': ss
}
# round as appropriate
logger.info('finished computation lvl1', res=res,
total_ci=total_ci, ct=ct, ss=ss)
res = pd.DataFrame([res]).round(DECIMALS)
return u.fill_none(res)
def fetch_stats(des, qn, r, vs=[], filt={}):
# ex: ~qn8
rbase.gc()
gc.collect()
qn_f = '~I(%s=="%s")' % (qn, r)
logger.info('subsetting des with filter', filt=filt)
des = subset_survey(des, filt)
logger.info('done subsetting')
dfs = [fetch_stats_totals(des, qn_f, r)]
levels = [vs[:k+1] for k in range(len(vs))]
sts = map(lambda lvl: fetch_stats_by(des, qn_f, r, lvl), levels)
dfz = pd.concat(dfs + sts, ignore_index=True)
# get stats_by_fnats for each level of interactions in vars
# using svyby to compute across combinations of loadings
logger.info('finished computations, appending dfs', dfs=dfz)
return u.fill_none(dfz) # .round(DECIMALS)
def subset(d, filter):
return d._replace(des=subset_survey(d, filter))
def des_from_feather(fthr_file, denovo=False, fpc=False, design='cluster'):
|
rbase.gc()
gc.collect()
|
if fpc and design=='cluster':
fix_lonely_psus()
rdf = rfeather.read_feather(fthr_file)
logger.info('creating survey design from data and annotations',
cols=list(rbase.colnames(rdf)))
strata = '~strata'
if denovo:
strata = '~year+strata'
res = rsvy.svydesign(
id=(Formula('~psu') if design == 'cluster' else Formula('~1')),
weight=Formula('~weight'),
strata=Formula(strata), data=rdf, nest=True,
fpc=(Formula('~fpc') if fpc else ro.NULL))
rbase.gc()
gc.collect()
return res
def des_from_survey_db(tbl, db, host, port, denovo=False, fpc=False,design='cluster'):
strata = '~strata'
if denovo:
strata = '~yr+sitecode'
return rsvy.svydesign(id=Formula('~psu'), weight=Formula('~weight'),
strata=Formula(strata), nest=True,
fpc=(Formula('~fpc') if fpc else ro.NULL),
data=tbl, dbname=db, host=host, port=port,
dbtype='MonetDB.R')
|
nachoplus/cronoStamper
|
tools/cameraSimulator.py
|
Python
|
gpl-2.0
| 1,376
| 0.021076
|
#!/usr/bin/python
'''
Cronostamper test suit:
Simple trigger simulator. Open a socket and execute
/oneShot when someone get connected and exit.
"oneshot" activate the GPIO 7 just one time.
Nacho Mas Junary-2017
'''
import socket
import commands
import sys
import time
import datetime
from thread import *
HOST = '' # Symbolic name meaning all available interfaces
PORT = 7777 # A
|
rbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Starting CronoStamper Sockets Server.'
print 'Socket created'
#Bind socket to local host and port
try:
s.bind((HOST, PORT))
except socket.error as msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
#Start listening on socket
s.listen(10)
print 'Socket now listening'
def clientthread(conn):
rst=commands.
|
getoutput('./oneShot')
d = str(datetime.datetime.fromtimestamp(float(rst)))
conn.sendall(d+'\r\n')
print d
conn.close()
#now keep talking with the client
while 1:
#wait to accept a connection - blocking call
conn, addr = s.accept()
print 'Connected with ' + addr[0] + ':' + str(addr[1])
#start new thread takes 1st argument as a function name to be run, second is the tuple of arguments to the function.
start_new_thread(clientthread ,(conn,))
s.close()
|
dropbox/changes-lxc-wrapper
|
tests/cli/test_wrapper.py
|
Python
|
apache-2.0
| 3,544
| 0.000564
|
import threading
from mock import patch
from uuid import uuid4
from changes_lxc_wrapper.cli.wrapper import WrapperCommand
def generate_jobstep_data():
# this must generic a *valid* dataset that should result in a full
# run
return {
'status': {'id': 'queued'},
'data': {},
'expectedSnapshot': None,
'snapshot': {
'id': 'a1028849e8cf4ff0a7d7fdfe3c4fe925',
},
}
def setup_function(function):
assert threading.activeCount() == 1
def teardown_function(function):
assert threading.activeCount() == 1
@patch.object(WrapperCommand, 'run_build_script')
def test_local_run(mock_run):
command = WrapperCommand([
'--', 'echo 1',
])
command.run()
mock_run.assert_called_once_with(
release='precise',
post_launch=None,
snapshot=None,
save_snapshot=False,
s3_bucket=None,
pre_launch=None,
validate=True,
user='ubuntu',
cmd=['echo 1'],
script=None,
flush_cache=False,
clean=False,
keep=False,
)
@patch('changes_lxc_wrapper.cli.wrapper.ChangesApi')
@patch.object(WrapperCommand, 'run_build_script')
def test_remote_run(mock_run, mock_api_cls):
jobstep_id = uuid4()
jobstep_data = generate_jobstep_data()
mock_api = mock_api_cls.return_value
mock_api.get_jobstep.return_value = jobstep_data
command = WrapperCommand([
'--jobstep-id', jobstep_id.hex,
'--api-url', 'http://changes.example.com',
])
command.run()
mock_run.assert_called_once_with(
release='precise',
post_launch=None,
snapshot='a1028849-e8cf-4ff0-a7d7-fdfe3c4fe925',
save_snapshot=False,
s3_bucket=None,
pre_launch=None,
validate=True,
user='ubuntu',
cmd=['changes-client', '--server', 'http://changes.example.com', '--jobstep_id', jobstep_id.hex],
flush_cache=False,
clean=False,
keep=False,
)
@patch('changes_lxc_wrapper.cli.wrapper.ChangesApi')
@patch.object(WrapperCommand, 'run_build_script')
def test_already_finishe
|
d_job(mock_run, mock_api_cls):
jobstep_id = uuid4()
jobstep_data = generate_jobstep_da
|
ta()
jobstep_data['status']['id'] = 'finished'
mock_api = mock_api_cls.return_value
mock_api.get_jobstep.return_value = jobstep_data
command = WrapperCommand([
'--jobstep-id', jobstep_id.hex,
'--api-url', 'http://changes.example.com',
])
command.run()
assert not mock_run.called
@patch('changes_lxc_wrapper.cli.wrapper.ChangesApi')
@patch.object(WrapperCommand, 'run_build_script')
def test_non_default_release(mock_run, mock_api_cls):
jobstep_id = uuid4()
jobstep_data = generate_jobstep_data()
jobstep_data['data']['release'] = 'fakerelease'
mock_api = mock_api_cls.return_value
mock_api.get_jobstep.return_value = jobstep_data
command = WrapperCommand([
'--jobstep-id', jobstep_id.hex,
'--api-url', 'http://changes.example.com',
])
command.run()
mock_run.assert_called_once_with(
release='fakerelease',
post_launch=None,
snapshot='a1028849-e8cf-4ff0-a7d7-fdfe3c4fe925',
save_snapshot=False,
s3_bucket=None,
pre_launch=None,
validate=True,
user='ubuntu',
cmd=['changes-client', '--server', 'http://changes.example.com', '--jobstep_id', jobstep_id.hex],
flush_cache=False,
clean=False,
keep=False,
)
|
googlefonts/statmake
|
statmake/lib.py
|
Python
|
mit
| 7,470
| 0.002142
|
import collections
import copy
from typing import Dict, Mapping, Optional, Set
import fontTools.misc.py23
import fontTools.ttLib
import fontTools.ttLib.tables.otTables as otTables
import statmake.classes
def apply_stylespace_to_variable_font(
stylespace: statmake.classes.Stylespace,
varfont: fontTools.ttLib.TTFont,
additional_locations: Mapping[str, float],
):
"""Generate and apply a STAT table to a variable font.
additional_locations: used in subset Designspaces to express where on which other
axes not defined by an <axis> element the varfont stands. The primary use-case is
defining a complete STAT table for variable fonts that do not include all axes of a
family (either because they intentionally contain just a subset of axes or because
the designs are incompatible).
"""
name_table, stat_table = generate_name_and_STAT_v
|
ariable(
stylespace, varfont, additional_locations
)
varfont["name"] = name_table
varfont["STAT"] = stat_table
def generate_name_and_STAT_variable(
stylespace: statmake.classes.Stylespace,
varfont: fontTools.ttLib.TTFont,
additional_loc
|
ations: Mapping[str, float],
):
"""Generate a new name and STAT table ready for insertion."""
if "fvar" not in varfont:
raise ValueError(
"Need a variable font with the fvar table to determine which instances "
"are present."
)
stylespace_name_to_axis = {a.name.default: a for a in stylespace.axes}
fvar_name_to_axis = {}
name_to_tag: Dict[str, str] = {}
name_to_index: Dict[str, int] = {}
index = 0
for index, fvar_axis in enumerate(varfont["fvar"].axes):
fvar_axis_name = _default_name_string(varfont, fvar_axis.axisNameID)
try:
stylespace_axis = stylespace_name_to_axis[fvar_axis_name]
except KeyError:
raise ValueError(
f"No stylespace entry found for axis name '{fvar_axis_name}'."
)
if fvar_axis.axisTag != stylespace_axis.tag:
raise ValueError(
f"fvar axis '{fvar_axis_name}' tag is '{fvar_axis.axisTag}', but "
f"Stylespace tag is '{stylespace_axis.tag}'."
)
fvar_name_to_axis[fvar_axis_name] = fvar_axis
name_to_tag[fvar_axis_name] = fvar_axis.axisTag
name_to_index[fvar_axis_name] = index
for axis_name in additional_locations:
try:
stylespace_axis = stylespace_name_to_axis[axis_name]
except KeyError:
raise ValueError(f"No stylespace entry found for axis name '{axis_name}'.")
name_to_tag[stylespace_axis.name.default] = stylespace_axis.tag
index += 1
name_to_index[stylespace_axis.name.default] = index
# First, determine which stops are used on which axes. The STAT table must contain
# a name for each stop that is used on each axis, so each stop must have an entry
# in the Stylespace. Also include locations in additional_locations that can refer
# to axes not present in the current varfont.
stylespace_stops: Dict[str, Set[float]] = {}
for axis in stylespace.axes:
stylespace_stops[axis.tag] = {l.value for l in axis.locations}
for named_location in stylespace.locations:
for name, value in named_location.axis_values.items():
stylespace_stops[name_to_tag[name]].add(value)
axis_stops: Mapping[str, Set[float]] = collections.defaultdict(set) # tag to stops
for instance in varfont["fvar"].instances:
for k, v in instance.coordinates.items():
if v not in stylespace_stops[k]:
raise ValueError(
f"There is no Stylespace entry for stop {v} on axis {k}."
)
axis_stops[k].add(v)
for k, v in additional_locations.items():
axis_tag = name_to_tag[k]
if v not in stylespace_stops[axis_tag]:
raise ValueError(
f"There is no Stylespace entry for stop {v} on axis {k} (from "
"additional locations)."
)
axis_stops[axis_tag].add(v)
# Construct temporary name and STAT tables for returning at the end.
name_table = copy.deepcopy(varfont["name"])
stat_table = _new_empty_STAT_table()
# Generate axis records. Reuse an axis' name ID if it exists, else make a new one.
for axis_name, axis_tag in name_to_tag.items():
stylespace_axis = stylespace_name_to_axis[axis_name]
if axis_name in fvar_name_to_axis:
axis_name_id = fvar_name_to_axis[axis_name].axisNameID
else:
axis_name_id = name_table.addMultilingualName(
stylespace_axis.name.mapping, mac=False
)
axis_record = _new_axis_record(
tag=axis_tag, name_id=axis_name_id, ordering=stylespace_axis.ordering
)
stat_table.table.DesignAxisRecord.Axis.append(axis_record)
# Generate formats 1, 2 and 3.
for axis in stylespace.axes:
for location in axis.locations:
if location.value not in axis_stops[axis.tag]:
continue
axis_value = otTables.AxisValue()
name_id = name_table.addMultilingualName(location.name.mapping, mac=False)
location.fill_in_AxisValue(
axis_value, axis_index=name_to_index[axis.name.default], name_id=name_id
)
stat_table.table.AxisValueArray.AxisValue.append(axis_value)
# Generate format 4.
for named_location in stylespace.locations:
if all(
name_to_tag[k] in axis_stops and v in axis_stops[name_to_tag[k]]
for k, v in named_location.axis_values.items()
):
stat_table.table.Version = 0x00010002
axis_value = otTables.AxisValue()
name_id = name_table.addMultilingualName(
named_location.name.mapping, mac=False
)
named_location.fill_in_AxisValue(
axis_value,
axis_name_to_index=name_to_index,
name_id=name_id,
axis_value_record_type=otTables.AxisValueRecord,
)
stat_table.table.AxisValueArray.AxisValue.append(axis_value)
stat_table.table.ElidedFallbackNameID = stylespace.elided_fallback_name_id
return name_table, stat_table
def _default_name_string(otfont: fontTools.ttLib.TTFont, name_id: int) -> str:
"""Return first name table match for name_id for language 'en'."""
name = otfont["name"].getName(name_id, 3, 1, 0x0409).toUnicode()
if name is not None:
return name
raise ValueError(f"No default Windows record for id {name_id}.")
def _new_empty_STAT_table():
stat_table = fontTools.ttLib.newTable("STAT")
stat_table.table = otTables.STAT()
stat_table.table.Version = 0x00010001
stat_table.table.DesignAxisRecord = otTables.AxisRecordArray()
stat_table.table.DesignAxisRecord.Axis = []
stat_table.table.AxisValueArray = otTables.AxisValueArray()
stat_table.table.AxisValueArray.AxisValue = []
return stat_table
def _new_axis_record(tag: str, name_id: int, ordering: Optional[int]):
if ordering is None:
raise ValueError("ordering must be an integer.")
axis_record = otTables.AxisRecord()
axis_record.AxisTag = fontTools.misc.py23.Tag(tag)
axis_record.AxisNameID = name_id
axis_record.AxisOrdering = ordering
return axis_record
|
rec/BiblioPixel
|
bibliopixel/layout/geometry/segment.py
|
Python
|
mit
| 1,539
| 0.00065
|
from . import strip
class Segment(strip.Strip):
"""Represents an offset, length segment within a strip."""
def __init__(self, strip, length, offset=0):
if offset < 0 or length < 0:
raise ValueError('Segment indices are non-negative.')
if offset + length > len(strip):
raise ValueError('Segment too long.')
self.strip = strip
self.offset = offset
self.length = length
def __getitem__(self, index):
return self.strip[self._fix_index(index)]
def __setitem__(self, index, value):
self.strip[self._fi
|
x_index(index)] = value
def __len__(self):
retur
|
n self.length
def next(self, length):
"""Return a new segment starting right after self in the same buffer."""
return Segment(self.strip, length, self.offset + self.length)
def _fix_index(self, index):
if isinstance(index, slice):
raise ValueError('Slicing segments not implemented.')
if index < 0:
index += self.length
if index >= 0 and index < self.length:
return self.offset + index
raise IndexError('Index out of range')
def make_segments(strip, length):
"""Return a list of Segments that evenly split the strip."""
if len(strip) % length:
raise ValueError('The length of strip must be a multiple of length')
s = []
try:
while True:
s.append(s[-1].next(length) if s else Segment(strip, length))
except ValueError:
return s
|
slack-sqlbot/slack-sqlbot
|
slack_sqlbot/urls.py
|
Python
|
mit
| 164
| 0
|
from django.conf.urls import patterns, url
urlpatterns = patterns('',
|
url(r'^sql/$', 'sqlparser.views.parse_sql'),
)
|
|
Aerolyzer/Aerolyzer
|
aerolyzer/location.py
|
Python
|
apache-2.0
| 3,932
| 0.007121
|
import urllib2
import json
import sys
import os
import wunderData
def get_coord(exifdict):
'''
Purpose: The purpose of this script is to extract the Latitude and Longitude from the EXIF data
Inputs: exifdict: structure storing the image's EXIF data.
Outputs: coords: A tuple of the Latitude and Longitude in Decimal form
Returns: (lat,lon)
Assumptions: The EXIF data is valid.
'''
values = exifdict['gps gpslatitude'][1:-1].split(", ")
s = values[2]
df = float(values[0])
mf = float(values[1])
smath = s.split("/")
sf = float(smath[0])/float(smath[1])
lat = df + mf/60 + sf/3600
if exifdict['gps gpslatituderef'] == 'S':
lat = lat*(-1)
values = exifdict['gps gpslongitude'][1:-1].split(", ")
s = values[2]
df = float(values[0])
mf = float(values[1])
smath = s.split("/")
sf = float(smath[0])/float(smath[1])
lon = df + mf/60 + sf/3600
if exifdict['gps gpslongituderef'] == 'W':
lon = lon*(-1)
return (lat,lon)
def coord_to_zip(coord,googlegeokey):
'''
Purpose: The purpose of this script is to convert Latitude and Longitude to a ZIP Code
Inputs: coord: tuple holding latitude and longitude, googlegeokey: The Google geocoding API
Outputs: string of 5 digit long ZIP code.
Returns: zipcode
Assumptions: The EXIF data is valid.
'''
try:
url = "https://maps.googleapis.com/maps/api/geocode/json?latlng="+str(coord[0])+","+str(coord[1])+"&key="+googlegeokey
c = urllib2.urlopen(url)
response = c.read()
parsedResults = json.loads(response)
zipcode = parsedResults['results'][0]['address_components'][-1]['long_name']
except Exception:
print "Unable to retrieve data: ", sys.exc_info()[0]
zipcode = "99999"
finally:
return zipcode
def zip_to_coord(zipcode,googlegeokey):
'''
Purpose: The purpose of this script is to convert ZIP Cod
|
e to a Latitude and Longitude
Inputs: zipcode: 5 digit long ZIP code.
Outputs: coord: tuple holding latitude and longitude
Returns: (lat,lon)
Assumptions: The EXIF data is valid.
'''
try:
url = 'https://maps.googleapis.com/maps/api/geocode/json?address='+zipcode+'&key='+googlege
|
okey
c = urllib2.urlopen(url)
results = c.read()
parsedResults = json.loads(results)
lat = float(parsedResults['results'][0]['geometry']['location']['lat'])
lon = float(parsedResults['results'][0]['geometry']['location']['lng'])
except Exception:
print "Unable to retrieve data: ", sys.exc_info()[0]
(lat,lon) = (0.0,0.0)
finally:
return (lat,lon)
def sun_position(exifdict):
'''
Purpose: Identify whether an image was taken during sunrise or sunset.
Inputs: exifdict: structure storing the image's EXIF data.
Outputs: string
Returns: sunrise,sunset,night,day
Assumptions: N/A
'''
coord = get_coord(exifdict)
wData = wunderData.get_data(str(coord[0])+","+str(coord[1]))
sunriseTime = wData['sunrise'].split(':')
sunsetTime = wData['sunset'].split(':')
sunriseTarget = (int(sunriseTime[0])*60)+int(sunriseTime[1])
sunsetTarget = (int(sunsetTime[0])*60)+int(sunsetTime[1])
hoursTime = (str(exifdict['exif datetimeoriginal']).split(' '))[1].split(':')
pictureTime = (int(hoursTime[0])*60)+int(hoursTime[1])+int(float(hoursTime[2])/60)
if ((pictureTime >= (sunriseTarget - 15)) & (pictureTime <= (sunriseTarget + 30))):
return 'sunrise'
elif ((pictureTime >= (sunsetTarget - 15)) & (pictureTime <= (sunsetTarget + 30))):
return 'sunset'
elif ((pictureTime > (sunsetTarget + 15))|(pictureTime < (sunriseTarget - 15))):
return 'night'
else:
return 'day'
|
analogue/bravado
|
bravado/swagger_model.py
|
Python
|
bsd-3-clause
| 5,223
| 0.000191
|
# -*- coding: utf-8 -*-
import contextlib
import logging
import os
import os.path
import yaml
from bravado_core.spec import is_yaml
from six.moves import urllib
from six.moves.urllib import parse as urlparse
from bravado.compat import json
from bravado.requests_client import RequestsClient
log = logging.getLogger(__name__)
def is_file_scheme_uri(url):
return urlparse.urlparse(url).scheme == u'file'
class FileEventual(object):
"""Adaptor which supports the :class:`crochet.EventualResult`
interface for retrieving api docs from a local file.
"""
class FileResponse(object):
def __init__(self, data):
self.text = data
self.headers = {}
def json(self):
return json.loads(self.text)
def
|
__init__(self, path):
self.path = path
self.is_yaml = is_yaml(path)
def get_path(self):
if not self.path.endswith('.json') and not self.is_yaml:
return self.path + '.json'
return self.path
def wait(self, timeout=None):
with contextlib.closing(urllib.request.urlopen(self.get_path())) as fp:
content
|
= fp.read()
return self.FileResponse(content)
def result(self, *args, **kwargs):
return self.wait(*args, **kwargs)
def cancel(self):
pass
def request(http_client, url, headers):
"""Download and parse JSON from a URL.
:param http_client: a :class:`bravado.http_client.HttpClient`
:param url: url for api docs
:return: an object with a :func`wait` method which returns the api docs
"""
if is_file_scheme_uri(url):
return FileEventual(url)
request_params = {
'method': 'GET',
'url': url,
'headers': headers,
}
return http_client.request(request_params)
class Loader(object):
"""Abstraction for loading Swagger API's.
:param http_client: HTTP client interface.
:type http_client: http_client.HttpClient
:param request_headers: dict of request headers
"""
def __init__(self, http_client, request_headers=None):
self.http_client = http_client
self.request_headers = request_headers or {}
def load_spec(self, spec_url, base_url=None):
"""Load a Swagger Spec from the given URL
:param spec_url: URL to swagger.json
:param base_url: TODO: need this?
:returns: json spec in dict form
"""
response = request(
self.http_client,
spec_url,
self.request_headers,
).result()
content_type = response.headers.get('content-type', '').lower()
if is_yaml(spec_url, content_type):
return self.load_yaml(response.text)
else:
return response.json()
def load_yaml(self, text):
"""Load a YAML Swagger spec from the given string, transforming
integer response status codes to strings. This is to keep
compatibility with the existing YAML spec examples in
https://github.com/OAI/OpenAPI-Specification/tree/master/examples/v2.0/yaml
:param text: String from which to parse the YAML.
:type text: basestring
:return: Python dictionary representing the spec.
:raise: yaml.parser.ParserError: If the text is not valid YAML.
"""
data = yaml.safe_load(text)
for path, methods in iter(data.get('paths', {}).items()):
for method, operation in iter(methods.items()):
if 'responses' in operation:
operation['responses'] = dict(
(str(code), response)
for code, response in iter(
operation['responses'].items()
)
)
return data
# TODO: Adding the file scheme here just adds complexity to request()
# Is there a better way to handle this?
def load_file(spec_file, http_client=None):
"""Loads a spec file
:param spec_file: Path to swagger.json.
:param http_client: HTTP client interface.
:return: validated json spec in dict form
:raise: IOError: On error reading swagger.json.
"""
file_path = os.path.abspath(spec_file)
url = urlparse.urljoin(u'file:', urllib.request.pathname2url(file_path))
# When loading from files, everything is relative to the spec file
dir_path = os.path.dirname(file_path)
base_url = urlparse.urljoin(u'file:', urllib.request.pathname2url(dir_path))
return load_url(url, http_client=http_client, base_url=base_url)
def load_url(spec_url, http_client=None, base_url=None):
"""Loads a Swagger spec.
:param spec_url: URL for swagger.json.
:param http_client: HTTP client interface.
:param base_url: Optional URL to be the base URL for finding API
declarations. If not specified, 'basePath' from the
resource listing is used.
:return: validated spec in dict form
:raise: IOError, URLError: On error reading api-docs.
"""
if http_client is None:
http_client = RequestsClient()
loader = Loader(http_client=http_client)
return loader.load_spec(spec_url, base_url=base_url)
|
Tigge/trello-to-web
|
zip.py
|
Python
|
mit
| 659
| 0.007587
|
#! /usr/bin/env python3
import os
import zipfile
import sys
import settings
__author__ = 'tigge'
def main():
zipfilename = os.path.join(settings.get("folder"), settings.get("basename") + ".zip")
zip = zipfile.ZipFile(zipfilename, mode="w", )
for filename in os.listdir(settings.get("folder")):
|
print(filename, os.path.basename(zipfilename),filename == os.path.basenam
|
e(zipfilename))
if not filename.startswith(".t2w-temp-") and filename != os.path.basename(zipfilename):
zip.write(os.path.join(settings.get("folder"), filename), arcname=filename)
zip.close()
if __name__ == "__main__":
sys.exit(main())
|
yanchen036/tensorflow
|
tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
|
Python
|
apache-2.0
| 121,921
| 0.006389
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import signal
import threading
import time
import traceback
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu
|
import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training
|
.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for nex
|
sivel/ansible-modules-core
|
network/dellos9/dellos9_command.py
|
Python
|
gpl-3.0
| 6,997
| 0.001143
|
#!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: dellos9_command
version_added: "2.2"
author: "Dhivya P (@dhivyap)"
short_description: Run commands on remote devices running Dell OS9
description:
- Sends arbitrary commands to a Dell OS9 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos9_config) to configure Dell OS9 devices.
extends_documentation_fragment: dellos9
options:
commands:
description:
- List of commands to send to the remote dellos9 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
notes:
- This module requires Dell OS9 version 9.10.0.1P13 or above.
- This module requires to increase the ssh connection rate limit.
Use the following command I(ip ssh connection-rate-limit 60)
to configure the same. This can be done via M(dnos_config) module
as well.
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos9_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains OS9
dellos9_command:
commands: show version
wait_for: result[0] contains OS9
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos9_command:
commands:
- show version
- show interfaces
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos9_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains OS9
- result[1] contains Loopback
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always
type: list
|
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: [
|
'...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner, FailedConditionsError
from ansible.module_utils.network import NetworkModule, NetworkError
import ansible.module_utils.dellos9
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def main():
spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = NetworkModule(argument_spec=spec,
connect_on_load=False,
supports_check_mode=True)
commands = module.params['commands']
conditionals = module.params['wait_for'] or list()
warnings = list()
runner = CommandRunner(module)
for cmd in commands:
if module.check_mode and not cmd.startswith('show'):
warnings.append('only show commands are supported when using '
'check mode, not executing `%s`' % cmd)
else:
if cmd.startswith('conf'):
module.fail_json(msg='dellos9_command does not support running '
'config mode commands. Please use '
'dellos9_config instead')
runner.add_command(cmd)
for item in conditionals:
runner.add_conditional(item)
runner.retries = module.params['retries']
runner.interval = module.params['interval']
try:
runner.run()
except FailedConditionsError:
exc = get_exception()
module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc))
result = dict(changed=False)
result['stdout'] = list()
for cmd in commands:
try:
output = runner.get_command(cmd)
except ValueError:
output = 'command not executed due to check_mode, see warnings'
result['stdout'].append(output)
result['warnings'] = warnings
result['stdout_lines'] = list(to_lines(result['stdout']))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Goodly/TextThresher
|
thresher/migrations/0002_auto_20170607_1544.py
|
Python
|
apache-2.0
| 473
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-07 15:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dep
|
endencies = [
('thresher', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='project',
name='pybossa_url',
field=models.CharField(blank=True, default=b'', max_length=200),
),
|
]
|
ubiquitypress/rua
|
src/core/util.py
|
Python
|
gpl-2.0
| 1,313
| 0
|
from bs4 import BeautifulSoup
from urllib.parse import quote
from core import models
def get_setting(setting_name, setting_group_name, default=None):
try:
setting = models.Setting.o
|
bjects.get(
name=setting_name,
group__name=setting_group_name,
)
return setting
|
.value
except models.Setting.DoesNotExist:
if default:
return default
return ''
def strip_html_tags(raw_html):
return BeautifulSoup(raw_html, "html.parser").get_text()
def add_content_disposition_header(
response,
filename,
disposition='attachment'
):
"""
Add an RFC5987 / RFC6266 compliant Content-Disposition header to an
HttpResponse to tell the browser to save the HTTP response to a file.
Args:
response (django.http.response.HttpResponseBase): the response object.
filename (str): the name that the file should be served under.
disposition (str): the disposition: 'inline' or 'attachment' (default)
"""
try:
filename.encode('ascii')
file_expr = 'filename="{}"'.format(filename)
except UnicodeEncodeError:
file_expr = "filename*=utf-8''{}".format(quote(filename))
response['Content-Disposition'] = f'{disposition}; {file_expr}'
return response
|
smarr/mxtool
|
mx.mx/suite.py
|
Python
|
gpl-2.0
| 4,144
| 0.021477
|
suite = {
"name" : "mx",
"libraries" : {
# ------------- Libraries -------------
"JACOCOAGENT" : {
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jacoco/jacocoagent-0.7.1-1.jar"],
"sha1" : "2f73a645b02e39290e577ce555f00b02004650b0",
},
"JACOCOREPORT" : {
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jacoco/jacocoreport-0.7.1-2.jar"],
"sha1" : "a630436391832d697a12c8f7daef8655d
|
7a1efd2",
},
"FINDBUGS_DIST" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/findbugs-3.0
|
.0.zip",
"http://sourceforge.net/projects/findbugs/files/findbugs/3.0.0/findbugs-3.0.0.zip/download",
],
"sha1" : "6e56d67f238dbcd60acb88a81655749aa6419c5b",
},
"SIGTEST" : {
"urls" : [
"http://hg.netbeans.org/binaries/A7674A6D78B7FEA58AF76B357DAE6EA5E3FDFBE9-apitest.jar",
],
"sha1" : "a7674a6d78b7fea58af76b357dae6ea5e3fdfbe9",
},
"CODESNIPPET-DOCLET" : {
"urls" : [
"http://repo1.maven.org/maven2/org/apidesign/javadoc/codesnippet-doclet/0.5/codesnippet-doclet-0.5.jar",
],
"sha1" : "e9f37916a0ee0f2f6dc0c1d4ae0ce6e7c7a6e874",
},
"JUNIT" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/junit-4.11.jar",
"https://search.maven.org/remotecontent?filepath=junit/junit/4.11/junit-4.11.jar",
],
"sha1" : "4e031bb61df09069aeb2bffb4019e7a5034a4ee0",
"eclipse.container" : "org.eclipse.jdt.junit.JUNIT_CONTAINER/4",
"sourceUrls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/junit-4.11-sources.jar",
"https://search.maven.org/remotecontent?filepath=junit/junit/4.11/junit-4.11-sources.jar",
],
"sourceSha1" : "28e0ad201304e4a4abf999ca0570b7cffc352c3c",
"dependencies" : ["HAMCREST"],
"licence" : "CPL",
"maven" : {
"groupId" : "junit",
"artifactId" : "junit",
"version" : "4.11",
}
},
"CHECKSTYLE" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/checkstyle-6.0-all.jar",
"jar:http://sourceforge.net/projects/checkstyle/files/checkstyle/6.0/checkstyle-6.0-bin.zip/download!/checkstyle-6.0/checkstyle-6.0-all.jar",
],
"sha1" : "2bedc7feded58b5fd65595323bfaf7b9bb6a3c7a",
"licence" : "LGPLv21",
"maven" : {
"groupId" : "com.puppycrawl.tools",
"artifactId" : "checkstyle",
"version" : "6.0",
}
},
"HAMCREST" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/hamcrest-core-1.3.jar",
"https://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
"sha1" : "42a25dc3219429f0e5d060061f71acb49bf010a0",
"sourceUrls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/hamcrest-core-1.3-sources.jar",
"https://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3-sources.jar",
],
"sourceSha1" : "1dc37250fbc78e23a65a67fbbaf71d2e9cbc3c0b",
"licence" : "BSD-new",
"maven" : {
"groupId" : "org.hamcrest",
"artifactId" : "hamcrest-core",
"version" : "1.3",
}
},
},
"licenses" : {
"GPLv2-CPE" : {
"name" : "GNU General Public License, version 2, with the Classpath Exception",
"url" : "http://openjdk.java.net/legal/gplv2+ce.html"
},
"BSD-new" : {
"name" : "New BSD License (3-clause BSD license)",
"url" : "http://opensource.org/licenses/BSD-3-Clause"
},
"CPL" : {
"name" : "Common Public License Version 1.0",
"url" : "http://opensource.org/licenses/cpl1.0.txt"
},
"LGPLv21" : {
"name" : "GNU Lesser General Public License, version 2.1",
"url" : "http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html"
},
},
"projects" : {
"com.oracle.mxtool.junit" : {
"subDir" : "java",
"sourceDirs" : ["src"],
"dependencies" : [
"JUNIT",
],
"javaCompliance" : "1.8",
},
},
}
|
karban/agros2d
|
data/scripts/dc_motor_dynamic.py
|
Python
|
gpl-2.0
| 3,460
| 0.010116
|
import numpy as np
import pylab as pl
from scipy.integrate import odeint
from scipy.interpolate import interp1d
phi = [0.0, 4.0, 8.0, 12.0, 16.0, 20.0, 24.0, 28.0, 32.0, 36.0, 40.0, 44.0, 48.0, 52.0, 56.0, 60.0, 64.0, 68.0, 72.0, 76.0, 80.0, 84.0, 88.0, 92.0, 96.0, 100.0, 104.0, 108.0, 112.0, 116.0, 120.0, 124.0, 128.0, 132.0, 136.0, 140.0, 144.0, 148.0, 152.0, 156.0, 160.0, 164.0, 168.0, 172.0, 176.0, 180.0]
T_magnetic_torque = [-4.2099750000000006e-07, -0.0006306440000000001, -0.0012431405000000002, -0.0027673415, -0.0037627800000000007, -0.004668125, -0.005372255, -0.005509490000000001, -0.006019965000000001, -0.006202910000000001, -0.007951475000000001, -0.009308495000000002, -0.00903189, -0.009031260000000001, -0.011202345, -0.01173942, -0.013634670000000002, -0.013729415, -0.013753075000000002, -0.014419475000000001, -0.0097538, -0.008428175000000001, -0.0028582085000000003, 0.001922431, 0.00836486, 0.010786545, 0.013908825000000001, 0.013557495000000001, 0.013209665, 0.013566455000000002, 0.011872665000000001, 0.011166470000000001, 0.009009595, 0.009028250000000002, 0.009307900000000001, 0.007950670000000002, 0.006194965, 0.0060320750000000005, 0.00558495, 0.0053764550000000005, 0.0046711700000000005, 0.003763025, 0.0026294870000000007, 0.001254253, 0.000597345, -4.944730000000001e-07]
T_torque = [7.600635000000001e-08, -0.00017802715, -0.00043366050000000005, -0.0013786395, -0.002051854, -0.0025863285000000003, -0.0029615285000000003, -0.0029484280000000003, -0.008016085000000001, -0.008393595, -0.01086897, -0.012900475000000002, -0.012870795, -0.01335537, -0.016747500000000002, -0.018461975000000002, -0.022139145000000002, -0.024000515000000004, -0.025957925, -0.030677990000000002, -0.029933050000000006, -0.037302300000000004, -0.03650815, -0.0453334, -0.02398515, -0.012230330000000003, -0.005922595000000001, -0.0013065115, 0.0007364700000000001, 0.0028762475000000003, 0.0035826000000000005, 0.0041284600000000005, 0.0029878625, 0.0038398150000000003, 0.004532675000000001, 0.0039266150000000005, 0.00301847, 0.0031519530000000003, 0.0030171505000000003, 0.0029608460000000005, 0.0025858875000000004, 0.002052134, 0.001297366, 0.0004423615, 0.00016526405, 1.6689750000000002e-08]
# static characteristic
pl.close()
pl.figure(figsize = [8, 5])
pl.plot(phi, T_magnetic_torque
|
, 'b', label="$\mathrm{0.0~A$")
pl.plot(phi, T_torque, 'r', label="$\mathrm{0.8~A$")
pl.plot([0, 180], [0, 0], '--k')
pl.xlabel("$\\phi~\mathrm{(deg.)}$")
pl.ylabel("$T~
|
\mathrm{(Nm)}$")
pl.legend(loc="lower right")
fn_chart_static = pythonlab.tempname("png")
pl.savefig(fn_chart_static, dpi=60)
pl.close()
# show in console
pythonlab.image(fn_chart_static)
J = 7.5e-5;
k = 2e-4
T_f = interp1d(phi, T_torque, kind = "linear")
def func(x, t):
dx = [0., 0.]
dx[0] = x[1]
dx[1] = (- T_f((x[0]*180/np.pi) % 180) - k*x[1]) * 1/J
return dx
x0 = [np.pi/6, 0]
time = np.linspace(0, 2, 1000)
y = odeint(func, x0, time)
# dynamic characteristic
pl.close()
pl.figure(figsize = [8, 5])
pl.subplot(2,1,1)
pl.plot(time, y[:,0]/np.pi*180 % 180, 'r-')
# pl.xlabel("$t~\mathrm{(s)}$")
pl.ylabel("$\\phi~\mathrm{(deg.)}$")
pl.xlim(0, 1.41)
pl.subplot(2,1,2)
pl.plot(time, y[:,1]/2/np.pi*60, 'r-')
pl.xlabel("$t~\mathrm{(s)}$")
pl.ylabel("$n~\mathrm{(rev./min.)}$")
pl.xlim(0, 1.41)
fn_chart_dynamic = pythonlab.tempname("png")
pl.savefig(fn_chart_dynamic, dpi=60)
pl.close()
# show in console
pythonlab.image(fn_chart_dynamic)
|
eklitzke/icfp08
|
src/constants.py
|
Python
|
isc
| 491
| 0.004073
|
# The docs say the processing time is less than 20 milliseconds
#PROCESSING_TIME = 0.015
PROCESSING_TIME = 0.010
INTERVAL_SCALE = 0.95
# Number o
|
f degrees for a small angle... if the angle is smaller than this then
# the rover won't try to turn, to help keep the path straight
SMALL_ANGLE = 7.0
# E
|
nsure that the rover isn't in a hard turn for this kind of angle
SOFT_ANGLE = 15.0
FORCE_TURN_DIST = 40.0
FORCE_TURN_SQ = FORCE_TURN_DIST ** 2
BLOAT = 1.3 # make things 30 percent bigger
|
anhstudios/swganh
|
data/scripts/templates/object/draft_schematic/food/shared_drink_charde.py
|
Python
|
mit
| 446
| 0.047085
|
#### NOTICE: THI
|
S FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/food/shared_drink_charde.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
|
return result
|
iulian787/spack
|
var/spack/repos/builtin/packages/perl-io-socket-ssl/package.py
|
Python
|
lgpl-2.1
| 1,217
| 0.003287
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import inspect
class PerlIoSocketSsl(PerlPackage):
"""SSL sockets with IO::Socket interface"""
homepage = "http://search.cpan.org/~sullr/IO-Socket-SSL-2.052/lib/IO/Socket/SSL.pod"
url = "http://search.cpan.org/CPAN/authors/id/S/SU/SULLR/IO-Socket-SSL-2.052.tar.gz"
version('2.052', sha256='e4897a9b17cb18a3c44aa683980d52cef534cdfcb8063d6877c879bfa2f26673')
depends_on('perl-net-ssleay', type=('build', 'run'))
def configure(self, spec, prefix):
self.build_method = 'Makefile.PL'
self.build_executable = inspect.getmodule(self).make
# Should I do external tests?
config_answers = ['n\n']
config_answ
|
ers_filename = 'spack-config.in'
with open(co
|
nfig_answers_filename, 'w') as f:
f.writelines(config_answers)
with open(config_answers_filename, 'r') as f:
inspect.getmodule(self).perl('Makefile.PL', 'INSTALL_BASE={0}'.
format(prefix), input=f)
|
MrCreosote/kb_read_library_to_file
|
lib/kb_read_library_to_file/kb_read_library_to_fileServer.py
|
Python
|
mit
| 23,263
| 0.00129
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import random as _random
import os
import requests.packages.urllib3
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_read_library_to_file'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_read_library_to_file.kb_read_library_to_fileImpl import kb_read_library_to_file
impl_kb_read_library_to_file = kb_read_library_to_file(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPC
|
ServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
|
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
|
divio/django-cms
|
cms/utils/compat/__init__.py
|
Python
|
bsd-3-clause
| 545
| 0.00367
|
from platform import python_vers
|
ion
from django import get_version
from distutils.version import LooseVersion
DJANGO_VERSION = get_version()
PYTHON_VERSION = python_version()
# These means "less than or equal to DJANGO_FOO_BAR"
DJANGO_2_2 = LooseVersion(DJANGO_VERSION) < LooseVersion('3.0')
DJANGO_3_0 = LooseVersion(DJANGO_VERSION) < LooseVersion('3.1')
DJANGO_3_1 = LooseVersion(DJANGO_VERSION) < LooseVersion('3.2')
DJANGO_3_2 = LooseVersion('3.2') <= LooseVersion(DJANGO_VERSION) and LooseVersion(DJANGO_VERSION) < LooseV
|
ersion('3.3')
|
blakedewey/nipype
|
nipype/interfaces/fsl/tests/test_auto_ErodeImage.py
|
Python
|
bsd-3-clause
| 1,597
| 0.028178
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.maths import ErodeImage
def test_ErodeImage_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
kernel_file=dict(argstr='%s',
position=5,
xor=['kernel_size'],
),
kernel_shape=dict(argstr='-kernel %s',
position=4,
),
kernel_size=dict(argstr='%.4f',
position=5,
xor=['kernel_file'],
),
minimum_filter=dict(argstr='%s',
position=6,
usedefault=True,
),
nan2zeros=dict(argstr='-nan',
position=3,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=di
|
ct(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = ErodeImage.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ErodeImage_outpu
|
ts():
output_map = dict(out_file=dict(),
)
outputs = ErodeImage.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.