content
stringlengths 5
1.05M
|
|---|
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
with open('LINKS.md', 'r') as file_:
config = yaml.load(file_, Loader=Loader)
for link in config['links']:
print("* [{}]({}) - {}".format(link.get('title', 'LINK'), link.get('url', ''), link.get('description', '')))
|
"""
This batch script executes an experiment according to a config file.
"""
if __name__ == "__main__":
pass
|
# sometimes the standard random is overwritten by numpy.random
# so random is renamed as std_random here
import numpy as np
from warp import *
import random as std_random
from scipy.constants import k, m_e
class ParticleReflector:
"""Class that handles particle reflection.
To use:
To define a particlereflector instance based on the installed scraper named "scraper"
and the conductor named "collector" with the reflected species being "rc_electrons":
>>> collector_reflector = particleconductor(scraper=scraper, conductor=collector,
spref=rc_electrons)
The defined reflector must be installed, otherwise it will NOT be used:
>>> installparticlereflector(collector_reflector)
To define the specular reflection probability to be 0.2 (default 0),
and the diffuse reflection probability to be 0.3 (default 0):
>>> collector_reflector = particleconductor(scraper=scraper, conductor=collector,
spref=rc_electrons,
srefprob=0.2, drefprob=0.3)
To only allow species "e1" and "e2" reflectable
(default allows all defined species reflectable):
>>> collector_reflector = particleconductor(scraper=scraper, conductor=collector,
spref=rc_electrons,
spinc=["e1", "e2"],
srefprob=0.2, drefprob=0.3)
"""
def __init__(self, scraper=None, conductor=None, spref=None, spinc=None,
srefprob=0., drefprob=0., refscheme="rand_angle", top=top):
"""Initialize a particle reflector.
Args:
scraper: Warp's scraper instance (must be specified).
conductor: Warp's conductor instance to be treated as reflector (must be specified).
spref: Reflected species (must be given)
spinc: A list of incident species which are allowed to be reflected.
All the species defined are allowed to be reflected if it is not specified.
srefprob: specular reflection probability (default 0)
drefprob: diffuse reflection probability (default 0)
refscheme: Scheme of assigning velocity compoents for reflected particles.
Available options are "rand_angle" (default) and "uniform".
"rand_angle" randomizes the angles in the spherical coordinate system
for the reflected particles.
"uniform" first assigns a random fraction (uniformly distributed between 0 and 1)
of the particle's total kinetic energy to the normal component, and then
randomly distributes the rest of the kinetic energy to the two tangent components.
top: Warp's top module
"""
if not isinstance(scraper, ParticleScraper):
raise RuntimeError("scraper must be defined")
else:
if not scraper.lcollectlpdata:
raise RuntimeError("lcollectlpdata flag must be True in scraper")
if not scraper.lsaveintercept:
raise RuntimeError("lsaveintercept flag must be True in scraper")
if not conductor:
raise RuntimeError("conductor must be defined")
else:
if not conductor in scraper.conductors:
raise RuntimeError("conductor is not registered in scraper")
if not spref:
raise RuntimeError("reflected species (spref) must be defined")
if srefprob < 0.:
raise RuntimeError("srefprob cannot be negative")
if drefprob < 0.:
raise RuntimeError("drefprob cannot be negative")
if srefprob + drefprob > 1.:
raise RuntimeError("sum of srefprob and drefprob cannot exceed 1")
self._top = top
self._conductor = conductor
self._spref = spref
self._jsref = spref.js
self._srefprob = srefprob
self._drefprob = drefprob
self._totrefprob = srefprob + drefprob
self._srefprobratio = self._srefprob/(self._totrefprob+1e-14)
if spinc is None: # consider all sp as incident sp if spinc not defined
self._jsinc = range(top.ns)
else:
try:
self._jsinc = [s.js for s in spinc]
except:
self._jsinc = [spinc.js]
if refscheme == "rand_angle":
self._refscheme = 0
elif refscheme == "uniform":
self._refscheme = 1
else:
raise RuntimeError("Illegal refscheme value [{}]".format(refscheme))
self._nsinc = len(self._jsinc)
self._npslost_cumulative = np.zeros((self._nsinc,), dtype=np.uint64)
self._nps_sref_res = np.zeros((self._nsinc,), dtype=np.float)
self._nps_dref_res = np.zeros((self._nsinc,), dtype=np.float)
def inject_particles(self):
"""Inject reflected particles.
This function is passed to Warp through the interface "installparticlereflector",
which wraps Warp's "installuserinjection" function.
"""
for i, js in enumerate(self._jsinc):
# skip if no particle lost in this step
npslost = int(self._top.npslost[js] - self._npslost_cumulative[i]) # num of lost js particles for this step
if npslost == 0: continue
istart = int(self._npslost_cumulative[i] + self._top.inslost[js]-1)
iend = istart + npslost
plostindx = np.arange(istart, iend)
# get a local reference
# initial coordinates of particles at this step
xpold = self._top.pidlost[plostindx, 0]
ypold = self._top.pidlost[plostindx, 1]
zpold = self._top.pidlost[plostindx, 2]
# initial velocities of particles at this step
uxpold = self._top.uxplost[plostindx]
uypold = self._top.uyplost[plostindx]
uzpold = self._top.uzplost[plostindx]
# time step
dt = self._top.dt
# new position (inside conductor)
xpinsi = uxpold*dt + xpold
ypinsi = uypold*dt + ypold
zpinsi = uzpold*dt + zpold
# particles scraped by this conductor
cinside = nint(self._conductor.isinside(xpinsi, ypinsi, zpinsi).isinside)
cplostindx = compress(cinside, plostindx)
cnpslost = len(cplostindx)
# floating total num of particles to be reflected
nps_totref = cnpslost*self._totrefprob
# floating num of particles to be specularly and diffusively reflected
nps_sref = nps_totref*self._srefprobratio
nps_dref = nps_totref-nps_sref
nps_sref_inj = int(nps_sref + self._nps_sref_res[i] + np.random.rand())
nps_dref_inj = int(nps_dref + self._nps_dref_res[i] + np.random.rand())
# if total num of particles to be injected exceeds total num of lost particles this step
# then reduce num of particles to be injected
while nps_sref_inj + nps_dref_inj > cnpslost:
if np.random.rand() < self._srefprobratio:
nps_sref_inj -= 1
else:
nps_dref_inj -= 1
# accumulate the residual number of particles
# to correct the number of particles to be injected in the future
self._nps_sref_res[i] += (nps_sref - nps_sref_inj)
self._nps_dref_res[i] += (nps_dref - nps_dref_inj)
if nps_sref_inj+nps_dref_inj > 0:
# The current strategy is the fastest (at least for array of range(istart, iend))
# alternatives purely based on numpy could be:
# rand_indx = np.random.choice(np.arange(istart, iend), nps_sref_inj+nps_dref_inj, replace=False)
# or
# rand_indx = np.random.permutation(np.arange(istart, iend))[:nps_sref_inj+nps_dref_inj]
# the two numpy strategies are essentially the same, but slower than the following implementation
rand_indx = np.array(std_random.sample(range(cnpslost), nps_sref_inj+nps_dref_inj))
if nps_sref_inj > 0: # specular reflection
self._specular_reflection(cplostindx[rand_indx[:nps_sref_inj]])
if nps_dref_inj > 0: # diffuse reflection
self._diffuse_reflection(cplostindx[rand_indx[nps_sref_inj:]])
# self._conductor.emitparticles_data.append(array([top.time,
# totemit,
# top.dt,
# self.inter[js]['emitted_species'][ics][ie].jslist[0],
# totabsorb]))
self._conductor.emitparticles_data.append(array([self._top.time,
self._top.dt,
js,
nps_sref_inj*self._top.pgroup.sq[self._jsref]*self._top.pgroup.sw[self._jsref],
nps_dref_inj*self._top.pgroup.sq[self._jsref]*self._top.pgroup.sw[self._jsref]]))
self._npslost_cumulative[i] = self._top.npslost[js]
return
def _specular_reflection(self, selected_lpid):
"""Perform specular reflection.
Args:
selected_lpid: A list of particle id selected to be reflected specularly.
"""
pos_intsec = self._get_intersection(selected_lpid)
nvec = self._get_surface_normal(selected_lpid)
vel_inc = self._get_incident_velocity(selected_lpid)
# velocities of reflected particles
vel_ref = self._vel_specular_reflection(nvec, vel_inc)
t_left = self._get_time_left(selected_lpid)
# coordinates of reflected particles
pos_ref = self._pos_reflection(t_left, pos_intsec, vel_ref)
# add the new reflected particles to "reflected species"
self._spref.addparticles(x=pos_ref[:,0], y=pos_ref[:,1], z=pos_ref[:,2],
vx=vel_ref[:,0], vy=vel_ref[:,1], vz=vel_ref[:,2])
return
def _diffuse_reflection(self, selected_lpid):
"""Perform diffuse reflection.
Args:
selected_lpid: A list of particle id selected to be reflected diffusively.
"""
pos_intsec = self._get_intersection(selected_lpid)
nvec = self._get_surface_normal(selected_lpid)
vel_inc = self._get_incident_velocity(selected_lpid)
# velocities of reflected particles
vel_ref = self._vel_diffuse_reflection(nvec, vel_inc)
t_left = self._get_time_left(selected_lpid)
# coordinates of reflected particles
pos_ref = self._pos_reflection(t_left, pos_intsec, vel_ref)
# add the new reflected particles to "reflected species"
self._spref.addparticles(x=pos_ref[:,0], y=pos_ref[:,1], z=pos_ref[:,2],
vx=vel_ref[:,0], vy=vel_ref[:,1], vz=vel_ref[:,2])
return
def _pos_reflection(self, t_left, pos_intsec, v_re):
"""Calculate reflected particles' new positions
Args:
t_left: A list of time left for particles after they hit the reflector.
pos_intsec: A list of position coordinates where particles hit the reflector.
v_re: A list of reflected particles' new velocities.
Return:
pos_re: Reflected particles' new positions.
"""
pos_re = pos_intsec + v_re*t_left
return pos_re
def _vel_specular_reflection(self, nvec, v_in):
"""Calculate new velocities for specularly reflected particles.
Args:
nvec: Normal vectors for the reflector surfaces at which particles hit.
v_in: Velocities of incident particles.
Return:
v_re: New velocities of reflected particles.
"""
size = v_in.shape[0]
v_in_n = np.zeros_like(v_in)
for i in range(size):
dotprod = np.dot(v_in[i,:], nvec[i,:])
v_in_n[i] = dotprod*nvec[i,:]
v_in_t = v_in - v_in_n
v_re_n = -v_in_n
v_re_t = v_in_t
v_re = v_re_n + v_re_t
return v_re
def _vel_diffuse_reflection(self, nvec, v_in):
"""Calculate new velocities for diffusively reflected particles.
Args:
nvec: Normal vectors for the reflector surfaces at which particles hit.
v_in: Velocities of incident particles.
Return:
v_re: New velocities of reflected particles.
"""
size = v_in.shape[0]
v_re = np.zeros_like(v_in)
###############################################
# redistribute post-kinetic energy #
###############################################
if self._refscheme == 0:
# S0: randomize velocity spherical coordinate
v_mag = np.sqrt(v_in[:,0]**2 + v_in[:,1]**2 + v_in[:,2]**2)
# spherical coordinates theta [0~pi/2], phi [0~2*pi]
theta = np.random.rand(size)*0.5*np.pi
phi = np.random.rand(size)*2.0*np.pi
# velocity aligned with the conductor coordinate system
vn = v_mag*np.cos(theta) # normal component
vt1 = v_mag*np.sin(theta)
vt2 = vt1*np.sin(phi) # 1st tangent component
vt1[:] *= np.cos(phi) # 2nd tangent component
elif self._refscheme == 1:
# S1: randomize En with a uniform distribution
vsq = v_in[:,0]**2 + v_in[:,1]**2 + v_in[:,2]**2
vn = vsq*np.random.rand(size)
vt1 = vsq - vn
vt2 = vt1*np.random.rand(size)
vt1 = vt1 - vt2
vn = np.sqrt(vn)
vt1 = np.sqrt(vt1)
vt2 = np.sqrt(vt2)
###############################################
# convert to Cartesian coordinate system #
###############################################
t1, t2 = self._get_tangent_from_normal(nvec)
v_re[:, 0] = vn*nvec[:, 0] + vt1*t1[:, 0] + vt2*t2[:, 0]
v_re[:, 1] = vn*nvec[:, 1] + vt1*t1[:, 1] + vt2*t2[:, 1]
v_re[:, 2] = vn*nvec[:, 2] + vt1*t1[:, 2] + vt2*t2[:, 2]
return v_re
def _get_intersection(self, selected_lpid):
# particles' intersections with the conductor
xx = self._top.xplost[selected_lpid]
yy = self._top.yplost[selected_lpid]
zz = self._top.zplost[selected_lpid]
return np.array([xx, yy, zz]).transpose()
def _get_surface_normal(self, selected_lpid):
theta = self._top.pidlost[selected_lpid, -3]
phi = self._top.pidlost[selected_lpid, -2]
# convert spherical coordinate angles to normal vector
return self._normal_spherical_to_cartesian(theta, phi)
def _get_incident_velocity(self, selected_lpid):
vx = self._top.uxplost[selected_lpid]
vy = self._top.uyplost[selected_lpid]
vz = self._top.uzplost[selected_lpid]
return np.array([vx, vy, vz]).transpose()
def _get_time_left(self, selected_lpid):
t_left = self._top.time - self._top.pidlost[selected_lpid, -4]
return t_left.reshape(len(t_left), 1)
def _normal_spherical_to_cartesian(self, theta, phi):
"""Convert the normal vector in spherical coordinate system to Cartesian.
The surface normal obtained from Warp is in spherical coordinate system.
This is converted to Cartesian coordinate system in order to easily
compute the reflected particles' new velocities and positions.
"""
nvec = np.zeros((len(theta), 3))
nvec[:, 0] = np.sin(theta)*np.cos(phi)
nvec[:, 1] = np.sin(theta)*np.sin(phi)
nvec[:, 2] = np.cos(theta)
return nvec
def _get_tangent_from_normal(self, nvec):
"""Compute two tangent vectors based on the surface normal.
This is needed when converting the velocties and positions
of particles in the surface coordinate system back to
the lab coordinate system.
"""
a = np.array([0., 0., 1.])
b = np.array([0., 1., 0.])
t1v = np.zeros_like(nvec)
t2v = np.zeros_like(nvec)
for i in range(nvec.shape[0]):
n = nvec[i,:]
c1 = np.cross(n, a)
c2 = np.cross(n, b)
norm_c1 = np.linalg.norm(c1)
norm_c2 = np.linalg.norm(c2)
if (norm_c1 > norm_c2):
t1 = c1/norm_c1
else:
t1 = c2/norm_c2
t2 = np.cross(n, t1) # second tangent
t1v[i,:] = t1
t2v[i,:] = t2
return t1v, t2v
# end of class ParticleReflector
def installparticlereflector(pr):
"""Install particle reflector.
Args:
pr: Particle reflector instance already defined.
"""
if not isinstance(pr, ParticleReflector):
raise RuntimeError("Illegal ParticleReflector instance")
installuserinjection(pr.inject_particles)
def analyze_collected_charge(top, solver):
"""Analyze charges collected by all conductors.
Args:
top: Warp's top module.
solver: Warp's field solver.
Return:
collected_charge: A list of collected particle info for all conductors and all species.
collected_charge[conductor_id][species_id]
"""
nspecies = top.ns
cond_ids = []
cond_objs = []
collected_charge = {}
for cond in solver.conductordatalist:
cond_objs.append(cond[0])
cond_ids.append(cond[0].condid)
for i, ids in enumerate(cond_ids):
collected_charge[ids] = [[] for _ in range(nspecies)]
for js in range(nspecies):
jsid = top.pgroup.sid[js]
indx = np.ndarray.astype(cond_objs[i].lostparticles_data[:, 3] + 1e-6, 'int') == jsid
collected_charge[ids][js] = np.copy(cond_objs[i].lostparticles_data[indx, 0:4])
return collected_charge
def analyze_reflected_charge(top, reflectors, comm_world=None):
"""Analyze charges due to reflection.
Args:
top: Warp's top module.
reflectors: A list of particle reflectors.
comm_wold: MPI communicator, must be passed in if running in parallel.
Return:
reflected_charge: A list of charges reflected particle info
for the corresponding reflector and species.
reflected_charge[reflector's_cond_id]
"""
cond_ids = []
cond_objs = []
reflected_charge = {}
for reflector in reflectors:
cond_objs.append(reflector._conductor)
cond_ids.append(reflector._conductor.condid)
for i, ids in enumerate(cond_ids):
reflected_charge[ids] = np.copy(cond_objs[i].emitparticles_data[:, 0:5])
if comm_world:
all_reflected_charge = {}
for ids in cond_ids:
all_reflected_charge[ids] = comm_world.gather(reflected_charge[ids], root=0)
if comm_world.rank == 0:
all_reflected_charge[ids] = np.vstack(all_reflected_charge[ids])
return all_reflected_charge
else:
return reflected_charge
|
#
# Copyright (c) 2017 Stefan Seefeld
# All rights reserved.
#
# This file is part of Faber. It is made available under the
# Boost Software License, Version 1.0.
# (Consult LICENSE or http://www.boost.org/LICENSE_1_0.txt)
from ..artefact import source
import yaml
class node(object):
"""Convert a (nested) dictionary into an ordinary object."""
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [node(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, node(b) if isinstance(b, dict) else b)
def load_info(filename):
"""Load a package metadata file."""
with open(filename) as f:
data = yaml.safe_load(f)
return node(data)
class info(source):
def __init__(self, name):
source.__init__(self, name)
self.doc = load_info(self._filename)
|
from main import *
from transaction import *
from bci import *
from deterministic import *
# Takes privkey, address, value (satoshis), fee (satoshis)
def send(frm, to, value, fee=1000):
u = unspent(privtoaddr(frm))
u2 = select(u, value+fee)
argz = u2 + [to+':'+str(value), privtoaddr(to), fee]
tx = mksend(argz)
tx2 = signall(tx, privtoaddr(to))
pushtx(tx2)
def bip32_hdm_script(*args):
if len(args) == 3:
keys, req, path = args
else:
i, keys, path = 0, [], []
while len(args[i]) > 40:
keys.append(args[i])
i += 1
req = int(args[i])
path = map(int, args[i+1:])
pubs = sorted(map(lambda x: bip32_descend(x, path), keys))
return mk_multisig_script(pubs, req)
def bip32_hdm_addr(*args):
return scriptaddr(bip32_hdm_script(*args))
def setup_coinvault_tx(tx, script):
txobj = deserialize(tx)
N = deserialize_script(script)[-2]
for inp in txobj["ins"]:
inp["script"] = serialize_script([None] * (N+1) + [script])
return serialize(txobj)
|
import paho.mqtt.client as mqtt
import cv2
import logging
from von.singleton import Singleton
from von.terminal_font import TerminalFont
class MQTT_ConnectionConfig:
broker = 'voicevon.vicp.io'
port = 1883
uid = ''
password = ''
client_id = ''
class MqttConfigableItem():
topic = ''
type = ''
value = ''
def __init__(self,topic,value,type='string'):
self.topic = topic
self.type = type
self.value = value
class MqttHelper(metaclass=Singleton):
# class MqttHelper(mqtt.Client, metaclass=Singleton):
def __init__(self):
# super(MqttHelper, self).__init__()
self.__is_connected = False
self.client = None
# self.client = mqtt
# self.client = mqtt.Client(client_id) # create new instance
self.__YELLOW = TerminalFont.Color.Fore.yellow
self.__GREEN = TerminalFont.Color.Fore.green
self.__RED = TerminalFont.Color.Fore.red
self.__RESET = TerminalFont.Color.Control.reset
# self.mqtt_system_turn_on = True
self.__on_message_callbacks = []
self.__configable_vars = []
self.__counter = 0
def on_connect(self, client, userdata, flags,rc):
'''
rc == return code.
'''
if rc==0:
self.connected_flag=True #set flag
logging.info(self.__GREEN + "MQTT connected OK. Start subscribe. Returned code=" + self.__RESET,rc)
#client.subscribe(topic)
self.auto_subscribe()
else:
print("Bad connection Returned code= ",rc)
# def connect_to_broker(self, client_id, broker, port, uid, psw):
def connect_to_broker(self, config:MQTT_ConnectionConfig):
self.client = mqtt.Client(config.client_id) # create new instance
self.client.on_connect = self.on_connect # binding call back function
self.client.username_pw_set(username=config.uid, password=config.password)
self.client.connect(config.broker, port=config.port)
if self.client.is_connected():
print(self.__GREEN + '[Info]: MQTT has connected to: %s' % config.broker + self.__RESET)
else:
print(self.__RED + '[Warn]: MQTT has NOT! connected to: %s, Is trying auto connect backgroundly.' % config.broker + self.__RESET)
self.client.on_message = self.__on_message
self.__do_debug_print_out = False
#self.client.loop_forever()
self.client.loop_start()
# self.client.loop_stop()
return self.client
def append_on_message_callback(self, callback, do_debug_print_out=False):
'''
will call back on received any message.
Says not invoved to topic.
'''
self.__on_message_callbacks.append(callback)
self.__do_debug_print_out = do_debug_print_out
def append_configable_var(self, var):
self.__configable_vars.append(var)
def subscribe(self, topic, qos=0):
self.client.subscribe(topic, qos)
def subscribe_with_var(self, var, qos=1, space_len=0):
'''
Search all members of var and childrens
if the member(or child, grand child)'s type is 'mqtt_configableItem'
subscribe it by the 'topic' of that member.
'''
# TODO: remove build in methods
if space_len / 8 >= 3:
return
#self.__counter += 1
#print('oooo', self.__counter,var)
target_type_name = 'MqttConfigableItem'
for this_item in dir(var):
if this_item[:1] != '_':
attr = getattr(var,this_item)
type_name =type(attr).__name__
# space = ' ' * space_len
if type_name == target_type_name:
# For better understanding, we rename attr.
configable_item = attr
# print ('aaaa', space + configable_item, type_name)
for type_value_topic in dir(configable_item):
# print('bbbb',type_value_topic)
if type_value_topic == 'topic':
topic_string = getattr(configable_item,type_value_topic)
# print('cccc', type_value_topic,topic_string)
self.client.subscribe(topic_string,qos)
print('MQTT subscribed: Topic= ', topic_string)
else:
self.subscribe_with_var(attr, qos, space_len + 4)
def auto_subscribe(self, qos=1, space_len=0):
'''
call append_configable_var() in advance.
'''
# target_type_name = 'MqttConfigableItem'
for var in self.__configable_vars:
self.subscribe_with_var(var)
def update_leaf_by_topic(self, root_var, topic, payload, space_len=0):
'''
Search all members of var and childrens
if the member(or child, grand child)'s type is 'mqtt_configableItem',
and the topic_string is wanted,
update the value of that member to payload.
'''
if space_len / 8 >= 3:
return
#self.__counter += 1
#print(self.__counter, root_var)
target_type_name = 'MqttConfigableItem'
for this_item in dir(root_var):
if this_item[:1] != '_':
attr = getattr(root_var,this_item)
type_name =type(attr).__name__
# space = ' ' * space_len
if type_name == target_type_name:
# For better understanding, we rename attr.
configable_item = attr
# print ('aaaa', space + configable_item, type_name)
for type_value_topic in dir(configable_item):
# print('bbbb',type_value_topic)
if type_value_topic == 'topic':
topic_string = getattr(configable_item,type_value_topic)
# print('cccc', type_value_topic,topic_string)
if topic_string == topic:
# print('ffff',type_value_topic,topic_string)
if topic_string == topic:
# print('RRRRRRRRRRRR', configable_item, type_value_topic, value)
#TODO: type checking here.
setattr(configable_item,'value',payload)
print('Configable item is updated from MQTT-server, topic=%s, value=', topic, payload)
else:
self.update_leaf_by_topic(attr, topic, payload, space_len + 8)
def update_from_topic(self, topic, payload, space_len=3):
'''
call append_configable_var() in advance.
'''
self.__counter =0
for var in self.__configable_vars:
self.update_leaf_by_topic(var, topic, payload)
print('======================================== update_from_topic() is Done!')
def __on_message(self, client, userdata, message):
#self.__do_debug_print_out = True
if self.__do_debug_print_out:
#print("MQTT message received ", str(message.payload.decode("utf-8")))
print("MQTT message topic=", message.topic)
print('MQTT message payload=', message.payload)
print("MQTT message qos=", message.qos)
print("MQTT message retain flag=", message.retain)
payload = str(message.payload.decode("utf-8"))
#print('ppppppppppppppppppppppppp',topic, payload)
#Solution A:
for invoking in self.__on_message_callbacks:
invoking(message.topic, payload)
#Solution B:
#logging.info('payload %s'oad)
self.update_from_topic(message.topic, payload)
def publish_init(self):
# traverse Json file, publish all elements to broker with default values
pass
def publish_cv_image(self, topic, cv_image, retain=True):
# return image as mqtt message payload
is_success, img_encode = cv2.imencode(".jpg", cv_image)
if is_success:
img_pub = img_encode.tobytes()
self.client.publish(topic, img_pub, retain=retain)
def publish_file_image(self, topic, file_name, retain=True):
with open(file_name, 'rb') as f:
byte_im = f.read()
self.client.publish('sower/img/bin',byte_im )
def publish(self, topic, value):
self.client.publish(topic, value, qos=2, retain =True)
g_mqtt = MqttHelper()
if __name__ == "__main__":
class mqtt_config_test:
right = MqttConfigableItem('gobot/test/right',1)
left = MqttConfigableItem('gobot/test/right',2)
hello = MqttConfigableItem('gobot/test/hello','Hello World')
# put this line to your system_setup()
config = MQTT_ConnectionConfig()
config.uid = ''
config.password = ''
g_mqtt.connect_to_broker(config)
test_id =2
if test_id ==1:
# put this line to anywhere.
g_mqtt.publish('test/test1/test2', 1)
if test_id ==2:
g_mqtt.append_configable_var(mqtt_config_test)
g_mqtt.update_from_topic(mqtt_config_test,'gobot/test/hello', 'aaaabbbb')
print (mqtt_config_test.hello.value)
|
#!/usr/bin/env python
"""
Example usage of 'print_container', a tool to print
any layout in a non-interactive way.
"""
from prompt_toolkit.shortcuts import print_container
from prompt_toolkit.widgets import Frame, TextArea
print_container(
Frame(
TextArea(text="Hello world!\n"),
title="Stage: parse",
)
)
|
from abc import ABC
from httpx import Response
from starlette.status import HTTP_200_OK
from app.db.repositories import RefreshSessionsRepository
from app.schemas.authentication import AuthenticationResult
from app.services.authentication.cookie import REFRESH_TOKEN_COOKIE_KEY
from ...base import BaseTestRoute
__all__ = ['BaseTestAuthRoute']
class BaseTestAuthRoute(BaseTestRoute, ABC):
def test_response(self, response: Response): # noqa Method may be 'static'
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert 'user' in response_json
assert 'tokens' in response_json
def test_setting_refresh_token_cookie(self, response: Response): # noqa Method may be 'static'
refresh_token_in_cookie = response.cookies[REFRESH_TOKEN_COOKIE_KEY]
refresh_token_in_response = AuthenticationResult(**response.json()).refresh_token
assert refresh_token_in_cookie == refresh_token_in_response
async def test_creating_refresh_session_in_db( # noqa Method may be 'static'
self,
response: Response,
test_refresh_sessions_repository: RefreshSessionsRepository
):
refresh_token = response.cookies[REFRESH_TOKEN_COOKIE_KEY]
assert await test_refresh_sessions_repository.fetch_by_refresh_token(refresh_token)
|
from typing import Union
def get_first_line(o, default_val: str) -> Union[str, None]:
"""
Get first line for a pydoc string
:param o: object which is documented (class or function)
:param default_val: value to return if there is no documentation
:return: the first line which is not whitespace
"""
doc: Union[str, None] = o.__doc__
if doc is None:
return default_val
lines = doc.split("\n")
for line in lines:
if line == "" or line.isspace():
continue
return line.strip()
return default_val
|
# --------------------------------------------------------------------------
# This extension generates a customizable string of page navigation links
# for index pages.
#
# The links can be accessed in templates as:
#
# {{ paging }}
#
# Default settings can be overridden by including a 'paging' dictionary in
# the site's config.py file containing one or more of the following options:
#
# paging = {
# 'first': 'First', # text for link to first page
# 'last': 'Last', # text for link to last page
# 'prev': 'Prev', # text for link to previous page
# 'next': 'Next', # text for link to next page
# 'delta': 2, # number of neighbouring pages to include
# 'multiples': 2, # number of larger/smaller multiples
# 'multiple': 10, # link to page numbers in multiples of...
# }
#
# Author: Darren Mulholland <darren@mulholland.xyz>
# License: Public Domain
# --------------------------------------------------------------------------
from ark import hooks, site
# Register a callback on the 'render_page' event hook to generate our
# string of page navigation links and add it to the page object.
@hooks.register('render_page')
def add_paging_links(page):
if page['is_paged']:
page['paging'] = generate_paging_links(
page['slugs'][:-1],
page['page'],
page['total']
)
# Generates a string of page navigation links.
def generate_paging_links(slugs, page_number, total_pages):
# Default settings can be overridden in the site's configuration file.
data = {
'first': 'First',
'last': 'Last',
'prev': 'Prev',
'next': 'Next',
'delta': 2,
'multiples': 2,
'multiple': 10,
}
data.update(site.config('paging', {}))
# Start and end points for the sequence of numbered links.
start = page_number - data['delta']
end = page_number + data['delta']
if start < 1:
start = 1
end = 1 + 2 * data['delta']
if end > total_pages:
start = total_pages - 2 * data['delta']
end = total_pages
if start < 1:
start = 1
out = []
# First page link.
if start > 1:
out.append("<a class='first' href='%s'>%s</a>" % (
site.paged_url(slugs, 1, total_pages),
data['first']
))
# Previous page link.
if page_number > 1:
out.append("<a class='prev' href='%s'>%s</a>" % (
site.paged_url(slugs, page_number - 1, total_pages),
data['prev']
))
# Smaller multiple links.
if data['multiples']:
multiples = list(range(data['multiple'], start, data['multiple']))
for multiple in multiples[-data['multiples']:]:
out.append("<a class='pagenum multiple' href='%s'>%s</a>" % (
site.paged_url(slugs, multiple, total_pages), multiple
))
# Sequence of numbered page links.
for i in range(start, end + 1):
if i == page_number:
out.append("<span class='pagenum current'>%s</span>" % i)
else:
out.append("<a class='pagenum' href='%s'>%s</a>" % (
site.paged_url(slugs, i, total_pages), i
))
# Larger multiple links.
if data['multiples']:
starting_multiple = (int(end / data['multiple']) + 1) * data['multiple']
multiples = list(range(starting_multiple, total_pages, data['multiple']))
for multiple in multiples[:data['multiples']]:
out.append("<a class='pagenum multiple' href='%s'>%s</a>" % (
site.paged_url(slugs, multiple, total_pages), multiple
))
# Next page link.
if page_number < total_pages:
out.append("<a class='next' href='%s'>%s</a>" % (
site.paged_url(slugs, page_number + 1, total_pages),
data['next']
))
# Last page link.
if end < total_pages:
out.append("<a class='last' href='%s'>%s</a>" % (
site.paged_url(slugs, total_pages, total_pages),
data['last']
))
return ''.join(out)
|
from client import exceptions as ex
from client.sources.common import core
from client.sources.common import interpreter
from client.sources.common import pyconsole
from client.sources.ok_test import models
from client.utils import format
import logging
log = logging.getLogger(__name__)
class DoctestSuite(models.Suite):
setup = core.String(default='')
teardown = core.String(default='')
console_type = pyconsole.PythonConsole
def __init__(self, test, verbose, interactive, timeout=None, **fields):
super().__init__(test, verbose, interactive, timeout, **fields)
self.skip_locked_cases = True
self.console = self.console_type(verbose, interactive, timeout)
def post_instantiation(self):
for i, case in enumerate(self.cases):
if not isinstance(case, dict):
raise ex.SerializeException('Test cases must be dictionaries')
self.cases[i] = interpreter.CodeCase(self.console, self.setup,
self.teardown, **case)
def run(self, test_name, suite_number, env=None):
"""Runs test for the doctest suite.
PARAMETERS:
test_name -- str; the name of the parent test. Used for printing
purposes.
suite_number -- int; the suite number in relation to the parent test.
Used for printing purposes.
env -- dict; environment in which to run tests. If None, an
empty dictionary is used instead.
RETURNS:
dict; results of the following form:
{
'passed': int,
'failed': int,
'locked': int,
}
"""
results = {
'passed': 0,
'failed': 0,
'locked': 0,
}
if env is not None:
# env should be None in the command-line scenario. env should only
# be non-None in the programmatic API case.
self.console.load_env(env)
for i, case in self.enumerate_cases():
log.info('Running case {}'.format(i))
if (case.locked == True or results['locked'] > 0) and self.skip_locked_cases:
# If a test case is locked, refuse to run any of the subsequent
# test cases
log.info('Case {} is locked'.format(i))
results['locked'] += 1
continue
success = self._run_case(test_name, suite_number,
case, i + 1)
if not success and self.interactive:
self.console.interact()
if success:
results['passed'] += 1
else:
results['failed'] += 1
if not success and not self.verbose:
# Stop at the first failed test
break
return results
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : hydra_demo
# @Time : 2021/1/26 1:28 下午
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description : https://github.com/facebookresearch/hydra/tree/1.0_branch/examples/tutorials/basic/your_first_hydra_app
from omegaconf import DictConfig, OmegaConf
import hydra
@hydra.main(config_name='conf')
def my_app(cfg: DictConfig) -> None:
print(OmegaConf.to_yaml(cfg))
logger.info("Info level message")
if __name__ == "__main__":
my_app()
|
#!/usr/bin/env python3
import argparse
# command line arguments parser
from db import Database, DatabaseError
from dbhelper import DatabaseHelper
parser = argparse.ArgumentParser(description="Database helper for FSG exam bank.")
parser.add_argument(
"--db", action="store", type=str, dest="db_path", default=".",
help="Path to the database, default is current path")
subparsers = parser.add_subparsers(dest="command")
def add_exam_fields_options(sp: argparse.ArgumentParser) -> None:
sp.add_argument(
"-y", "--year", type=int, action="store", dest="year", default=None,
help="Exam year")
sp.add_argument(
"-s", "--semester", type=str, action="store", dest="semester",
help="Exam semester (accepted values: W, S, F, A, E, H)")
sp.add_argument(
"-c", "--course", type=str, action="store", dest="course", default=None,
help="Exam course code (DDD-NNNN format, where D is the department and N the class number)")
sp.add_argument(
"--course-name", type=str, action="store", dest="course_name", default=None,
help="Full name of the course. Required if this is the first exam for the course.")
sp.add_argument(
"-a", "--author", type=str, action="store", dest="author", default=None,
help="Exam author name")
sp.add_argument(
"-t", "--title", type=str, action="store", dest="title", default=None,
help="Exam title or name")
def add_no_confirm_option(sp: argparse.ArgumentParser) -> None:
sp.add_argument(
"--no-confirm", action="store_false", dest="confirm", default=True,
help="Set to skip confirmation for command")
# add command
add_parser = subparsers.add_parser("add", help="Add exams to the database")
add_parser.add_argument(
action="store", type=str, nargs="*", dest="files",
help="PDF files to add to database. At least one is required.")
add_exam_fields_options(add_parser)
add_parser.add_argument(
"--batch", type=str, action="store", dest="batch",
help="Raw JSON data to add exams in batch mode. The expected JSON structure is a list of "
"objects with the all of the following fields: 'course', 'author', 'year', 'semester', "
"'title', 'files'. The format is the same as otherwise expected by parameters."
"Files is a list of PDF filenames for each exam.")
add_parser.add_argument("--batch-regex", type=str, action="store", dest="batch_regex",
help="Alternative method to add exams in batch mode. "
"The given regex will match all PDF files in the input directory. "
"The regex must have named groups for 'course', 'year' and 'semester',"
" and may also have named groups for 'author' and 'title'. "
"If more several files have the same fields, they are considered part"
"of the same exam. Here's an example regex: "
"'(?P<course>\w{3}-\d{4})-(?P<author>.*)-(?P<title>.*?)"
"-(?P<year>\d{4})(?P<semester>[AHE]).*', "
"which will match the following files: "
"'gel-1000-John Doe-Exam 1-2020H.pdf', "
"'gel-1000-John Doe-Exam 1-2020H-sol.pdf', "
"'gel-1001-John Smith-Exam 2-2001A.pdf', etc."
"A file must be a full match (except the extension) to be added.")
add_parser.add_argument(
"-f", "--force", action="store_true", dest="force", default=False,
help="Disable checks to skip operation if a similar exam already exists")
add_no_confirm_option(add_parser)
# edit command
edit_parser = subparsers.add_parser("edit", help="Edit individual exam in the database")
edit_parser.add_argument(
type=int, action="store", dest="id", default=None,
help="Exam ID")
add_exam_fields_options(edit_parser)
edit_parser.add_argument(
"-H", "--hashes", type=str, nargs="+", action="store", dest="hashes", default=None,
help="Exam file hashes (will match partial hash start)")
add_no_confirm_option(edit_parser)
# remove command
remove_parser = subparsers.add_parser("remove", help="Remove exams from the database")
remove_parser.add_argument(
type=int, nargs="+", action="store", dest="exam_ids",
help="Exam IDs to remove")
add_no_confirm_option(remove_parser)
# hash command
hash_parser = subparsers.add_parser("hash", help="Hash PDF files and add them to database")
hash_parser.add_argument(
type=str, nargs="*", action="store", dest="files",
help="PDF files to hash")
hash_parser.add_argument(
"--gc", action="store_true", dest="gc", default=False,
help="Set to garbage collect unused hashes")
# list command
list_parser = subparsers.add_parser("list", help="List exams")
list_parser.add_argument(
"-c", "--course", type=str, action="store", dest="course", default=None,
help="Exam course code (DDD-NNNN format, where D is the department and N the class number)")
list_parser.add_argument(
"-a", "--author", type=str, action="store", dest="author", default=None,
help="Exam author name")
list_parser.add_argument(
"-y", "--year", type=int, action="store", dest="year", default=None,
help="Exam year")
list_parser.add_argument(
"-s", "--semester", type=str, action="store", dest="semester", default=None,
help="Exam semester (accepted values: W, S, F, A, E, H)")
list_parser.add_argument(
"-H", "--show-hashes", action="store_true", dest="show_hashes", default=False,
help="Set to show file hashes for each exam")
# rewrite command
rewrite_parser = subparsers.add_parser("rewrite", help="Read and rewrite database JSON files")
def main():
args = parser.parse_args()
# load database
db = Database(args.db_path)
try:
db.load()
except DatabaseError as e:
print(f"Database error: {e.args[0]}")
exit(1)
# perform action
helper = DatabaseHelper(db)
try:
if args.command == "add":
if args.batch:
helper.batch_add_exam(args.batch, force=args.force)
elif args.batch_regex:
helper.regex_batch_add_exam(args.files[0], args.batch_regex,
force=args.force, confirm=args.confirm)
else:
helper.add_exam(args.course, args.author, args.year, args.semester,
args.title, args.files, args.course_name,
force=args.force, confirm=args.confirm)
elif args.command == "edit":
helper.edit_exam(args.id, args.course, args.author, args.year, args.semester,
args.title, args.course_name, args.hashes, confirm=args.confirm)
elif args.command == "remove":
helper.remove_exams(args.exam_ids, args.confirm)
elif args.command == "hash":
helper.hash_files(args.files)
if args.gc:
helper.garbarge_collect()
elif args.command == "list":
helper.list_exams(args.course, args.author, args.year, args.semester, args.show_hashes)
elif args.command == "rewrite":
# no-op, load then save database
pass
except DatabaseError as e:
print(f"Database error: {e.args[0]}")
exit(1)
else:
db.save()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
x = [21,22,23,4,5,6,77,8,9,10,31,32,33,34,35,36,37,18,49,50,100]
num_bins = 5
n, bins, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
plt.show()
|
ANYBLOCK_MINUTE_AGG_DATA = {
'minute_bucket': {'buckets': [
{'key_as_string': '2021-10-22 08:21', 'key': 1634890860000, 'doc_count': 129,
'avgGasMin': {'value': 68135881075.89922}},
{'key_as_string': '2021-10-22 08:22', 'key': 1634890920000, 'doc_count': 1431,
'avgGasMin': {'value': 53474229167.313065}},
{'key_as_string': '2021-10-22 08:23', 'key': 1634890980000, 'doc_count': 1154,
'avgGasMin': {'value': 58339753373.4558}},
{'key_as_string': '2021-10-22 08:24', 'key': 1634891040000, 'doc_count': 2034,
'avgGasMin': {'value': 50850212010.623405}},
{'key_as_string': '2021-10-22 08:25', 'key': 1634891100000, 'doc_count': 685,
'avgGasMin': {'value': 56162807162.2511}},
{'key_as_string': '2021-10-22 08:26', 'key': 1634891160000, 'doc_count': 1409,
'avgGasMin': {'value': 50257151345.425125}},
{'key_as_string': '2021-10-22 08:27', 'key': 1634891220000, 'doc_count': 1353,
'avgGasMin': {'value': 56984724851.223946}},
{'key_as_string': '2021-10-22 08:28', 'key': 1634891280000, 'doc_count': 1115,
'avgGasMin': {'value': 60951365588.26457}},
{'key_as_string': '2021-10-22 08:29', 'key': 1634891340000, 'doc_count': 448,
'avgGasMin': {'value': 73265356340.99107}},
{'key_as_string': '2021-10-22 08:30', 'key': 1634891400000, 'doc_count': 1169,
'avgGasMin': {'value': 69580203498.75534}},
{'key_as_string': '2021-10-22 08:31', 'key': 1634891460000, 'doc_count': 880,
'avgGasMin': {'value': 65513321690.40114}},
{'key_as_string': '2021-10-22 08:32', 'key': 1634891520000, 'doc_count': 879,
'avgGasMin': {'value': 66162217315.27759}},
{'key_as_string': '2021-10-22 08:33', 'key': 1634891580000, 'doc_count': 1238,
'avgGasMin': {'value': 58550840044.47334}},
{'key_as_string': '2021-10-22 08:34', 'key': 1634891640000, 'doc_count': 754,
'avgGasMin': {'value': 59942899486.002655}},
{'key_as_string': '2021-10-22 08:35', 'key': 1634891700000, 'doc_count': 703,
'avgGasMin': {'value': 61471394121.12376}},
{'key_as_string': '2021-10-22 08:36', 'key': 1634891760000, 'doc_count': 931,
'avgGasMin': {'value': 58100635475.92589}},
{'key_as_string': '2021-10-22 08:37', 'key': 1634891820000, 'doc_count': 1667,
'avgGasMin': {'value': 60605478821.61068}},
{'key_as_string': '2021-10-22 08:38', 'key': 1634891880000, 'doc_count': 1331,
'avgGasMin': {'value': 58973958574.04433}},
{'key_as_string': '2021-10-22 08:39', 'key': 1634891940000, 'doc_count': 567,
'avgGasMin': {'value': 58446003675.82363}},
{'key_as_string': '2021-10-22 08:40', 'key': 1634892000000, 'doc_count': 853,
'avgGasMin': {'value': 59449708098.34115}},
{'key_as_string': '2021-10-22 08:41', 'key': 1634892060000, 'doc_count': 1090,
'avgGasMin': {'value': 64930326844.1789}},
{'key_as_string': '2021-10-22 08:42', 'key': 1634892120000, 'doc_count': 535,
'avgGasMin': {'value': 67590469518.00561}},
{'key_as_string': '2021-10-22 08:43', 'key': 1634892180000, 'doc_count': 891,
'avgGasMin': {'value': 64634446410.420876}},
{'key_as_string': '2021-10-22 08:44', 'key': 1634892240000, 'doc_count': 674,
'avgGasMin': {'value': 71032697589.07863}},
{'key_as_string': '2021-10-22 08:45', 'key': 1634892300000, 'doc_count': 960,
'avgGasMin': {'value': 68639741361.99167}},
{'key_as_string': '2021-10-22 08:46', 'key': 1634892360000, 'doc_count': 1093,
'avgGasMin': {'value': 65490351297.72644}},
{'key_as_string': '2021-10-22 08:47', 'key': 1634892420000, 'doc_count': 885,
'avgGasMin': {'value': 68430186896.47118}},
{'key_as_string': '2021-10-22 08:48', 'key': 1634892480000, 'doc_count': 959,
'avgGasMin': {'value': 72626010357.08238}},
{'key_as_string': '2021-10-22 08:49', 'key': 1634892540000, 'doc_count': 658,
'avgGasMin': {'value': 74761446538.49088}},
{'key_as_string': '2021-10-22 08:50', 'key': 1634892600000, 'doc_count': 804,
'avgGasMin': {'value': 74816062909.62563}},
{'key_as_string': '2021-10-22 08:51', 'key': 1634892660000, 'doc_count': 869,
'avgGasMin': {'value': 68546417774.591484}},
{'key_as_string': '2021-10-22 08:52', 'key': 1634892720000, 'doc_count': 764,
'avgGasMin': {'value': 76314294216.25131}},
{'key_as_string': '2021-10-22 08:53', 'key': 1634892780000, 'doc_count': 704,
'avgGasMin': {'value': 77738963700.23154}},
{'key_as_string': '2021-10-22 08:54', 'key': 1634892840000, 'doc_count': 925,
'avgGasMin': {'value': 81939003269.3081}},
{'key_as_string': '2021-10-22 08:55', 'key': 1634892900000, 'doc_count': 415,
'avgGasMin': {'value': 92267225327.56868}},
{'key_as_string': '2021-10-22 08:56', 'key': 1634892960000, 'doc_count': 1395,
'avgGasMin': {'value': 73272395530.44803}},
{'key_as_string': '2021-10-22 08:57', 'key': 1634893020000, 'doc_count': 1337,
'avgGasMin': {'value': 59864221440.795815}},
{'key_as_string': '2021-10-22 08:58', 'key': 1634893080000, 'doc_count': 625,
'avgGasMin': {'value': 70126002063.1872}},
{'key_as_string': '2021-10-22 08:59', 'key': 1634893140000, 'doc_count': 1152,
'avgGasMin': {'value': 63373701814.328125}},
{'key_as_string': '2021-10-22 09:00', 'key': 1634893200000, 'doc_count': 713,
'avgGasMin': {'value': 69898977722.02664}},
{'key_as_string': '2021-10-22 09:01', 'key': 1634893260000, 'doc_count': 1350,
'avgGasMin': {'value': 55824486614.157776}},
{'key_as_string': '2021-10-22 09:02', 'key': 1634893320000, 'doc_count': 1032,
'avgGasMin': {'value': 65995156476.00291}},
{'key_as_string': '2021-10-22 09:03', 'key': 1634893380000, 'doc_count': 0,
'avgGasMin': {'value': None}},
{'key_as_string': '2021-10-22 09:04', 'key': 1634893440000, 'doc_count': 853,
'avgGasMin': {'value': 69733239130.09496}},
{'key_as_string': '2021-10-22 09:05', 'key': 1634893500000, 'doc_count': 480,
'avgGasMin': {'value': 75507886086.31459}},
{'key_as_string': '2021-10-22 09:06', 'key': 1634893560000, 'doc_count': 928,
'avgGasMin': {'value': 71178325609.48167}},
{'key_as_string': '2021-10-22 09:07', 'key': 1634893620000, 'doc_count': 1284,
'avgGasMin': {'value': 63354990033.1176}},
{'key_as_string': '2021-10-22 09:08', 'key': 1634893680000, 'doc_count': 767,
'avgGasMin': {'value': 68934012129.87483}},
{'key_as_string': '2021-10-22 09:09', 'key': 1634893740000, 'doc_count': 924,
'avgGasMin': {'value': 60570865579.67966}},
{'key_as_string': '2021-10-22 09:10', 'key': 1634893800000, 'doc_count': 808,
'avgGasMin': {'value': 54730003709.49505}},
{'key_as_string': '2021-10-22 09:11', 'key': 1634893860000, 'doc_count': 793,
'avgGasMin': {'value': 61156687147.75662}},
{'key_as_string': '2021-10-22 09:12', 'key': 1634893920000, 'doc_count': 930,
'avgGasMin': {'value': 71012849621.8043}},
{'key_as_string': '2021-10-22 09:13', 'key': 1634893980000, 'doc_count': 898,
'avgGasMin': {'value': 61332431775.412025}},
{'key_as_string': '2021-10-22 09:14', 'key': 1634894040000, 'doc_count': 494,
'avgGasMin': {'value': 65544940406.18219}},
{'key_as_string': '2021-10-22 09:15', 'key': 1634894100000, 'doc_count': 1067,
'avgGasMin': {'value': 62775822838.15558}},
{'key_as_string': '2021-10-22 09:16', 'key': 1634894160000, 'doc_count': 1132,
'avgGasMin': {'value': 56120401780.516785}},
{'key_as_string': '2021-10-22 09:17', 'key': 1634894220000, 'doc_count': 1464,
'avgGasMin': {'value': 50130807544.32923}},
{'key_as_string': '2021-10-22 09:18', 'key': 1634894280000, 'doc_count': 1113,
'avgGasMin': {'value': 58455164505.05481}},
{'key_as_string': '2021-10-22 09:19', 'key': 1634894340000, 'doc_count': 419,
'avgGasMin': {'value': 73177058329.70645}},
{'key_as_string': '2021-10-22 09:20', 'key': 1634894400000, 'doc_count': 912,
'avgGasMin': {'value': 66653118380.26864}},
{'key_as_string': '2021-10-22 09:21', 'key': 1634894460000, 'doc_count': 132,
'avgGasMin': {'value': 62583250801.257576}}]}
}
ANYBLOCK_HOUR_AGG_DATA = {'hour_bucket': {'buckets': [
{'key_as_string': '2021-10-19 09:00:00', 'key': 1634677200000, 'doc_count': 21312,
'avgGasHour': {'value': 128609148597.81198}},
{'key_as_string': '2021-10-19 10:00:00', 'key': 1634680800000, 'doc_count': 51046,
'avgGasHour': {'value': 106046536123.98033}},
{'key_as_string': '2021-10-19 11:00:00', 'key': 1634684400000, 'doc_count': 51390,
'avgGasHour': {'value': 96298327219.7446}},
{'key_as_string': '2021-10-20 12:00:00', 'key': 1634688000000, 'doc_count': 47705,
'avgGasHour': {'value': 82949475547.78291}},
{'key_as_string': '2021-10-20 01:00:00', 'key': 1634691600000, 'doc_count': 52216,
'avgGasHour': {'value': 71954552231.77655}},
{'key_as_string': '2021-10-20 02:00:00', 'key': 1634695200000, 'doc_count': 49461,
'avgGasHour': {'value': 73729363299.80168}},
{'key_as_string': '2021-10-20 03:00:00', 'key': 1634698800000, 'doc_count': 49777,
'avgGasHour': {'value': 70843359178.92155}},
{'key_as_string': '2021-10-20 04:00:00', 'key': 1634702400000, 'doc_count': 62096,
'avgGasHour': {'value': 68633037229.229065}},
{'key_as_string': '2021-10-20 05:00:00', 'key': 1634706000000, 'doc_count': 53930,
'avgGasHour': {'value': 57285349713.77369}},
{'key_as_string': '2021-10-20 06:00:00', 'key': 1634709600000, 'doc_count': 47129,
'avgGasHour': {'value': 60660921007.97214}},
{'key_as_string': '2021-10-20 07:00:00', 'key': 1634713200000, 'doc_count': 52334,
'avgGasHour': {'value': 57231938121.359344}},
{'key_as_string': '2021-10-20 08:00:00', 'key': 1634716800000, 'doc_count': 55418,
'avgGasHour': {'value': 57852333057.37024}},
{'key_as_string': '2021-10-20 09:00:00', 'key': 1634720400000, 'doc_count': 48818,
'avgGasHour': {'value': 57692391242.17033}},
{'key_as_string': '2021-10-20 10:00:00', 'key': 1634724000000, 'doc_count': 48752,
'avgGasHour': {'value': 55517683905.113}},
{'key_as_string': '2021-10-20 11:00:00', 'key': 1634727600000, 'doc_count': 46157,
'avgGasHour': {'value': 62063161856.74955}},
{'key_as_string': '2021-10-20 12:00:00', 'key': 1634731200000, 'doc_count': 52856,
'avgGasHour': {'value': 65053463155.662155}},
{'key_as_string': '2021-10-20 01:00:00', 'key': 1634734800000, 'doc_count': 55183,
'avgGasHour': {'value': 78284792502.60052}},
{'key_as_string': '2021-10-20 02:00:00', 'key': 1634738400000, 'doc_count': 54349,
'avgGasHour': {'value': 145158063644.64172}},
{'key_as_string': '2021-10-20 03:00:00', 'key': 1634742000000, 'doc_count': 57141,
'avgGasHour': {'value': 144049130859.45502}},
{'key_as_string': '2021-10-20 04:00:00', 'key': 1634745600000, 'doc_count': 56502,
'avgGasHour': {'value': 145216649270.29474}},
{'key_as_string': '2021-10-20 05:00:00', 'key': 1634749200000, 'doc_count': 55978,
'avgGasHour': {'value': 118768161158.30257}},
{'key_as_string': '2021-10-20 06:00:00', 'key': 1634752800000, 'doc_count': 59864,
'avgGasHour': {'value': 91634074112.87233}},
{'key_as_string': '2021-10-20 07:00:00', 'key': 1634756400000, 'doc_count': 49747,
'avgGasHour': {'value': 116518113577.62349}},
{'key_as_string': '2021-10-20 08:00:00', 'key': 1634760000000, 'doc_count': 48167,
'avgGasHour': {'value': 122562894010.7537}},
{'key_as_string': '2021-10-20 09:00:00', 'key': 1634763600000, 'doc_count': 55740,
'avgGasHour': {'value': 87340609528.22296}},
{'key_as_string': '2021-10-20 10:00:00', 'key': 1634767200000, 'doc_count': 53604,
'avgGasHour': {'value': 92236205568.716}},
{'key_as_string': '2021-10-20 11:00:00', 'key': 1634770800000, 'doc_count': 46958,
'avgGasHour': {'value': 95961859983.37193}},
{'key_as_string': '2021-10-21 12:00:00', 'key': 1634774400000, 'doc_count': 45975,
'avgGasHour': {'value': 175510950626.2469}},
{'key_as_string': '2021-10-21 01:00:00', 'key': 1634778000000, 'doc_count': 53404,
'avgGasHour': {'value': 98646447171.46677}},
{'key_as_string': '2021-10-21 02:00:00', 'key': 1634781600000, 'doc_count': 57307,
'avgGasHour': {'value': 82982317140.85931}},
{'key_as_string': '2021-10-21 03:00:00', 'key': 1634785200000, 'doc_count': 58239,
'avgGasHour': {'value': 87932574597.74727}},
{'key_as_string': '2021-10-21 04:00:00', 'key': 1634788800000, 'doc_count': 56245,
'avgGasHour': {'value': 76149225896.83566}},
{'key_as_string': '2021-10-21 05:00:00', 'key': 1634792400000, 'doc_count': 52593,
'avgGasHour': {'value': 66794596644.02105}},
{'key_as_string': '2021-10-21 06:00:00', 'key': 1634796000000, 'doc_count': 50254,
'avgGasHour': {'value': 68584199168.770485}},
{'key_as_string': '2021-10-21 07:00:00', 'key': 1634799600000, 'doc_count': 52662,
'avgGasHour': {'value': 74093194393.5105}},
{'key_as_string': '2021-10-21 08:00:00', 'key': 1634803200000, 'doc_count': 56055,
'avgGasHour': {'value': 70875795767.52893}},
{'key_as_string': '2021-10-21 09:00:00', 'key': 1634806800000, 'doc_count': 48973,
'avgGasHour': {'value': 78291452674.4224}},
{'key_as_string': '2021-10-21 10:00:00', 'key': 1634810400000, 'doc_count': 54487,
'avgGasHour': {'value': 74190310367.98138}},
{'key_as_string': '2021-10-21 11:00:00', 'key': 1634814000000, 'doc_count': 56060,
'avgGasHour': {'value': 85341007596.41272}},
{'key_as_string': '2021-10-21 12:00:00', 'key': 1634817600000, 'doc_count': 58082,
'avgGasHour': {'value': 78661573194.15747}},
{'key_as_string': '2021-10-21 01:00:00', 'key': 1634821200000, 'doc_count': 55588,
'avgGasHour': {'value': 93190974309.3877}},
{'key_as_string': '2021-10-21 02:00:00', 'key': 1634824800000, 'doc_count': 57909,
'avgGasHour': {'value': 98395441364.02623}},
{'key_as_string': '2021-10-21 03:00:00', 'key': 1634828400000, 'doc_count': 54955,
'avgGasHour': {'value': 122117944923.87564}},
{'key_as_string': '2021-10-21 04:00:00', 'key': 1634832000000, 'doc_count': 51953,
'avgGasHour': {'value': 297610331514.54034}},
{'key_as_string': '2021-10-21 05:00:00', 'key': 1634835600000, 'doc_count': 53669,
'avgGasHour': {'value': 125452941952.27438}},
{'key_as_string': '2021-10-21 06:00:00', 'key': 1634839200000, 'doc_count': 47957,
'avgGasHour': {'value': 110303017723.39642}},
{'key_as_string': '2021-10-21 07:00:00', 'key': 1634842800000, 'doc_count': 51274,
'avgGasHour': {'value': 107972688875.16872}},
{'key_as_string': '2021-10-21 08:00:00', 'key': 1634846400000, 'doc_count': 49324,
'avgGasHour': {'value': 99975925578.08195}},
{'key_as_string': '2021-10-21 09:00:00', 'key': 1634850000000, 'doc_count': 48910,
'avgGasHour': {'value': 99688685379.98743}},
{'key_as_string': '2021-10-21 10:00:00', 'key': 1634853600000, 'doc_count': 52831,
'avgGasHour': {'value': 80309199059.89757}},
{'key_as_string': '2021-10-21 11:00:00', 'key': 1634857200000, 'doc_count': 52476,
'avgGasHour': {'value': 73593371533.68233}},
{'key_as_string': '2021-10-22 12:00:00', 'key': 1634860800000, 'doc_count': 47731,
'avgGasHour': {'value': 186850208605.2955}},
{'key_as_string': '2021-10-22 01:00:00', 'key': 1634864400000, 'doc_count': 48950,
'avgGasHour': {'value': 89257706752.80869}},
{'key_as_string': '2021-10-22 02:00:00', 'key': 1634868000000, 'doc_count': 53172,
'avgGasHour': {'value': 75746856454.40625}},
{'key_as_string': '2021-10-22 03:00:00', 'key': 1634871600000, 'doc_count': 49497,
'avgGasHour': {'value': 74944458968.99258}},
{'key_as_string': '2021-10-22 04:00:00', 'key': 1634875200000, 'doc_count': 61439,
'avgGasHour': {'value': 77660922014.95834}},
{'key_as_string': '2021-10-22 05:00:00', 'key': 1634878800000, 'doc_count': 44808,
'avgGasHour': {'value': 79378527233.8227}},
{'key_as_string': '2021-10-22 06:00:00', 'key': 1634882400000, 'doc_count': 43334,
'avgGasHour': {'value': 74253499759.39934}},
{'key_as_string': '2021-10-22 07:00:00', 'key': 1634886000000, 'doc_count': 57603,
'avgGasHour': {'value': 68793691171.71289}},
{'key_as_string': '2021-10-22 08:00:00', 'key': 1634889600000, 'doc_count': 60075,
'avgGasHour': {'value': 65178481428.598465}},
{'key_as_string': '2021-10-22 09:00:00', 'key': 1634893200000, 'doc_count': 30141,
'avgGasHour': {'value': 63167919678.08699}}]}}
|
# -*- coding: utf-8 -*-
import logging
import os
from astropy.io import fits
from astropy.io.registry import IORegistryError
from astropy.table import Column, Table
from .database import get_filters
logging.basicConfig(level=logging.getLevelName(
os.getenv('LOG_LEVEL', 'WARNING')))
LOGGER = logging.getLogger(__name__)
def get_filter_meta_table():
"""Generate a table with meta information about HELP filters
This function generates an astropy.table.Table containing the information
about the filters used in HELP, except their transmission profile.
Returns
-------
astropy.table.Table
"""
# List of filter attributes to put in the table
attributes = ['filter_id', 'description', 'band_name', 'facility',
'instrument', 'mean_wavelength', 'min_wavelength',
'max_wavelength', 'att_ebv', 'notes']
all_filters = get_filters()
table_columns = []
for attribute in attributes:
table_columns.append(Column(
name=attribute,
data=[getattr(_, attribute) for _ in all_filters]
))
return Table(table_columns)
def export_to_cigale(directory):
"""Export the HELP filter database to CIGALE compatible files.
This function export the filter transmission profiles to files (one per
filter) that can be imported in CIGALE and be used in SED fitting.
Parameters
----------
directory: str
Directory in which to save the filter files.
"""
for filt in get_filters():
with open("{}/{}.dat".format(directory, filt.filter_id), 'w') as out:
out.write("# {}\n".format(filt.filter_id))
out.write("# energy\n")
out.write("# {}".format(filt.description))
Table(filt.response.T).write(out, format="ascii.no_header")
def export_to_eazy(analysed_table):
"""Export the HELP filter database to be used with EAZY.
This function export the filter transmission profiles to be used by EAZY
for photometric redshift computation. As EAZY needs to associate to each
column in the catalogue the index of the filter in the database, we are
exporting it for a given table that will be analysed. This table must be
in the format used within HELP.
It create three files <analysed_table>.res containing the responses of the
filters in analysed_table, <analysed_table>.translate associating the
aperture flux and error columns to their corresponding filter, and
<analysed_table>.info containing additional information about the filters.
Parameters
----------
analysed_table: str
The table that will be processed with EAZY. The aperture fluxes must be
labelled f_ap_<filter> and the associated errors ferr_ap_<filter>.
"""
response_filename = "{}.res".format(os.path.splitext(analysed_table)[0])
translate_filename = "{}.translate".format(
os.path.splitext(analysed_table)[0])
info_filename = "{}.info".format(os.path.splitext(analysed_table)[0])
if os.path.exists(response_filename):
raise IOError("{} file exists.".format(response_filename))
if os.path.exists(translate_filename):
raise IOError("{} file exists.".format(translate_filename))
if os.path.exists(info_filename):
raise IOError("{} file exists.".format(info_filename))
# If the table is a FITS file, we use astropy.io.fits so that we can deal
# with huge files without loading them in memory.
# TODO: Find a way to deal with not readable ascii files
if analysed_table.endswith(".fits") or analysed_table.endswith(".fit"):
with fits.open(analysed_table) as hdulist:
column_names = hdulist[1].columns.names
else:
try:
catalogue = Table.read(analysed_table)
except IORegistryError:
catalogue = Table.read(analysed_table, format='ascii')
column_names = catalogue.colnames
# EAZY uses aperture fluxes.
catalogue_bands = [col[5:] for col in column_names
if col.startswith('f_ap_')]
with open(response_filename, 'w') as filter_responses, \
open(translate_filename, 'w') as translate_table, \
open(info_filename, 'w') as info_file:
for idx_band, band in enumerate(catalogue_bands):
filt = get_filters(band)
if filt is None:
# The band is not in HELP filter database
LOGGER.error("Filter %s is not in the database.", band)
else:
filt_nb_points = len(filt.response[0])
filter_responses.write(
"{:>8d} {}, {}, mean_lambda={}, att/ebv={}\n".format(
filt_nb_points,
band,
filt.description,
filt.mean_wavelength,
filt.att_ebv))
for idx, row in enumerate(filt.response.T):
filter_responses.write(
"{:>8d} {:>10.2f} {:>12.8g}\n".format(
idx+1, row[0], row[1])
)
translate_table.write("f_ap_{} F{}\n".format(
band, idx_band + 1))
translate_table.write("ferr_ap_{} E{}\n".format(
band, idx_band + 1))
info_file.write(
"{:>3d} {}, {}, mean_lambda={}, att/ebv={}\n".format(
idx_band + 1,
band,
filt.description,
filt.mean_wavelength,
filt.att_ebv))
def correct_galactic_extinction(catalogue, inplace=False):
"""Correct photometric catalogue for galactic extinction.
This function takes a photometric catalogue in the HELP format and correct
the fluxes and magnitudes for the galactic extinction given the E(B-V)
value associated with each source. The catalogue must have an ebv column
and the fluxes and magnitudes columns are identified using the HELP column
format (f_<filter>, ferr_filter, f_ap_<filter>, ...).
Column associated with filters that are not in the HELP database will not
be corrected and an error message will be logged with the name of the
missing filters.
Parameters
----------
catalogue: astropy.table.Table
The catalogue to be corrected.
inplace: boolean
If inplace is set to True, the function will not make a copy of the
input catalogue. This will save some memory space at the expense of
modifying the input catalogue.
Returns
-------
astropy.table.Table
The corrected catalogue.
"""
if not inplace:
catalogue = catalogue.copy()
try:
ebv = catalogue['ebv']
except KeyError:
raise KeyError("The catalogue is missing the ebv column.")
# Instead of logging an error message for each missing band and problematic
# column, lets just log a summary.
missing_band, prob_columns = set(), set()
for column in catalogue.colnames:
band = None
if column.startswith('f_ap_') or column.startswith('m_ap_'):
band = column[5:]
elif column.startswith('ferr_ap_') or column.startswith('merr_ap_'):
band = column[8:]
elif column.startswith('f_') or column.startswith('m_'):
band = column[2:]
elif column.startswith('ferr_') or column.startswith('merr_'):
band = column[5:]
if band is None:
LOGGER.debug("Column %s is not associated with any band",
column)
else:
filt = get_filters(band)
if filt is None:
missing_band.add(band)
prob_columns.add(column)
else:
att = filt.att_ebv * ebv
if column.startswith("f"):
catalogue[column] *= 10**(att/2.5)
elif column.startswith("merr"):
# Error in magnitude is not affected by extinction.
pass
else:
catalogue[column] -= att
if len(missing_band) > 0:
LOGGER.error("The filters are missing in the database: %s.\n"
"The are present in these columns: %s.",
", ".join(sorted(missing_band)),
", ".join(sorted(prob_columns)))
return catalogue
|
from spikeforest import SFMdaRecordingExtractor, SFMdaSortingExtractor, mdaio
from spikeforest_analysis import bandpass_filter
import mlprocessors as mlpr
from mountaintools import client as mt
import numpy as np
import json
class FilterTimeseries(mlpr.Processor):
NAME = 'FilterTimeseries'
VERSION = '0.1.0'
CONTAINER = None
recording_directory = mlpr.Input(description='Recording directory', optional=False, directory=True)
timeseries_out = mlpr.Output(description='Filtered timeseries file (.mda)')
def run(self):
rx = SFMdaRecordingExtractor(dataset_directory=self.recording_directory, download=True)
rx2 = bandpass_filter(recording=rx, freq_min=300, freq_max=6000, freq_wid=1000)
if not mdaio.writemda32(rx2.get_traces(), self.timeseries_out):
raise Exception('Unable to write output file.')
class ComputeUnitDetail(mlpr.Processor):
NAME = 'ComputeUnitDetail'
VERSION = '0.1.0'
CONTAINER = None
recording_dir = mlpr.Input(description='Recording directory', optional=False, directory=True)
firings = mlpr.Input(description='Input firings.mda file')
unit_id = mlpr.IntegerParameter(description='Unit ID')
json_out = mlpr.Output(description='Output .json file')
def run(self):
recording = SFMdaRecordingExtractor(dataset_directory=self.recording_directory, download=True)
sorting = SFMdaSortingExtractor(firings_file=self.firings)
waveforms0 = _get_random_spike_waveforms(recording=recording, sorting=sorting, unit=self.unit_id)
channel_ids = recording.get_channel_ids()
avg_waveform = np.median(waveforms0, axis=2)
ret = dict(
channel_ids = channel_ids,
average_waveform = avg_waveform.tolist()
)
with open(self.json_out, 'w') as f:
json.dump(ret, f)
def _get_random_spike_waveforms(*, recording, sorting, unit, max_num=50, channels=None, snippet_len=100):
st = sorting.get_unit_spike_train(unit_id=unit)
num_events = len(st)
if num_events > max_num:
event_indices = np.random.choice(
range(num_events), size=max_num, replace=False)
else:
event_indices = range(num_events)
spikes = recording.get_snippets(reference_frames=st[event_indices].astype(int), snippet_len=snippet_len,
channel_ids=channels)
if len(spikes) > 0:
spikes = np.dstack(tuple(spikes))
else:
spikes = np.zeros((recording.get_num_channels(), snippet_len, 0))
return spikes
|
import scrapy
from baidutie.items import BaidutieItem
class DutieSpider(scrapy.Spider):
name = 'dutie'
# 2、检查修改允许的域
allowed_domains = ['baidu.com']
# 1、修改起始url
start_urls = ['https://tieba.baidu.com/f?kw=%E6%96%B9%E8%88%9F%E7%94%9F%E5%AD%98%E8%BF%9B%E5%8C%96']
def parse(self, response):
# 3、编写解析方法
# 获取所有节点
node_list = response.xpath('//*[@id="thread_list"]/li')
# 遍历节点列表,提取数据name、link
for node in node_list:
# 实例化
item = BaidutieItem()
item['name'] = node.xpath('./div/div[2]/div[1]/div[1]/a/text()').extract_first()
item['link'] = response.urljoin(node.xpath('./div/div[2]/div[1]/div[1]/a/@href').extract_first())
yield item
# 模拟翻页
# 获取下一页url
part_url = response.xpath('//*[@id="frs_list_pager"]/a[@class="next pagination-item "]/@href | //*[@id="frs_list_pager"]/a[@class="next pagination-item"]/@href').extract_first()
print(part_url)
# 判断终止条件
if part_url != None:
next_url = response.urljoin(part_url)
# 构建请求对象,并返回yield给引擎
yield scrapy.Request(
url=next_url,
callback=self.parse
)
|
import asyncio
import datetime as dt
import random
import typing as t
from enum import Enum
from twitchio.ext import commands
from carberretta import Config
HACK_COST = 100
class GameState(Enum):
__slots__ = ("STOPPED", "WAITING", "RUNNING")
STOPPED = 0
WAITING = 1
RUNNING = 2
class HackGame:
__slots__ = ("state", "bot", "participants")
def __init__(self, bot: commands.Bot) -> None:
self.state = GameState.STOPPED
self.bot = bot
self.participants: t.List[str] = []
async def start(self, ctx: commands.bot.Context) -> None:
self.state = GameState.WAITING
await ctx.send(
f"A hacking squad is being set up; use {Config.PREFIX}hack to join!"
)
self.bot.scheduler.add_job(
self.run,
next_run_time=dt.datetime.utcnow() + dt.timedelta(seconds=60),
args=[ctx],
)
async def run(self, ctx: commands.bot.Context) -> None:
if len(self.participants) < 3:
await self.bot.db.executemany(
"UPDATE economy SET Credits = Credits + ? WHERE User = ?",
((HACK_COST, p) for p in self.participants),
)
await ctx.send(
"The hack was cancelled as not enough users joined the squad. "
"Those users that did have been refunded."
)
return await self.reset()
self.state = GameState.RUNNING
pool = await self.bot.db.field(
"SELECT Credits FROM economy WHERE User = 'bank'"
)
await ctx.send(
f"The hack has started! The total up for grabs is {pool:,} credits. "
"Who will show their elite hacking skills and get the goods?"
)
await asyncio.sleep(random.randint(10, 30))
winners: t.List[str] = []
odds = min(len(self.participants) * 10, 75)
for user in self.participants:
if random.randint(1, 100) <= odds:
winners.append(user)
await self.bot.db.execute(
"UPDATE economy SET Credits = Credits - ? WHERE User = 'bank'",
pool,
)
if not winners:
await ctx.send(
"The hack is complete; nobody managed to get anything!"
)
return await self.reset()
winnings = pool // len(self.participants)
await self.bot.db.executemany(
"UPDATE economy SET Credits = Credits + ? WHERE User = ?",
((winnings, w) for w in winners),
)
await ctx.send(
f"The hack is complete! The following users got their {winnings:,} "
f"credit share: {', '.join(winners)}"
)
await self.reset()
async def reset(self) -> None:
self.state = GameState.STOPPED
self.participants = []
async def try_add_participant(self, ctx: commands.bot.Context) -> None:
if ctx.author.name in self.participants:
return await ctx.send(
f"{ctx.author.name}, you are already good to go."
)
bal: int = await self.bot.db.field(
"SELECT Credits FROM economy WHERE User = ?", ctx.author.name
)
if bal < 100:
return await ctx.send(
f"{ctx.author.name}, you need at least {HACK_COST:,} "
f"credits to hack; you have {bal:,}."
)
await self.bot.db.execute(
"UPDATE economy SET Credits = Credits - ? WHERE User = ?",
HACK_COST,
ctx.author.name,
)
self.participants.append(ctx.author.name)
await ctx.send(
f"Welcome to the squad {ctx.author.name}! The {HACK_COST:,} "
"credits needed for the hacking equipment has been taken from "
"your balance."
)
class Fun:
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.hackgame = HackGame(bot)
@commands.command(name="dice", aliases=["roll"])
async def dice_command(
self, ctx: commands.bot.Context, sides: int = 6
) -> None:
if not 0 < sides < 101:
return await ctx.send(
f"{ctx.author.name}, you can only roll dice of between "
"1 and 100 sides inclusive."
)
await ctx.send(
f"{ctx.author.name}, you rolled a {random.randint(1, sides)}."
)
@commands.command(name="hug", aliases=["cuddle"])
async def hug_command(
self, ctx: commands.bot.Context, recipient: str
) -> None:
users = await self.bot.db.column("SELECT User FROM economy")
if recipient.strip("@").lower() not in users:
return await ctx.send(
f"{ctx.author.name}, I don't know who you're trying to hug."
)
await ctx.send(f"{ctx.author.name} hugged {recipient.strip('@')}!")
@commands.command(name="coinflip", aliases=["coin", "flip"])
async def coinflip_command(
self, ctx: commands.bot.Context, bet: int, guess: str
) -> None:
bal: int = await self.bot.db.field(
"SELECT Credits FROM economy WHERE User = ?", ctx.author.name
)
if bet < 1:
return await ctx.send(
f"{ctx.author.name}, you must bet at least 1 credit."
)
if bet > bal:
return await ctx.send(
f"{ctx.author.name}, you do not have enough credits to make "
f"that bet; you only have {bal:,}."
)
if (guess := guess.lower()) not in ("heads", "tails", "h", "t"):
return await ctx.send(
f"{ctx.author.name}, you need to guess either 'heads' or 'tails'."
)
if guess[0] != random.choice("ht"):
await ctx.send(
f"Too bad {ctx.author.name}, you were wrong! {bet:,} credits "
"have been added to the bank."
)
await self.bot.db.execute(
"UPDATE economy SET Credits = Credits - ? WHERE User = ?",
bet,
ctx.author.name,
)
return await self.bot.db.execute(
"UPDATE economy SET Credits = Credits + ? WHERE User = 'bank'",
bet,
)
await ctx.send(
f"Congratulations {ctx.author.name}, you were right! "
f"You've won {bet*2:,} credits!"
)
await self.bot.db.execute(
"UPDATE economy SET Credits = Credits + ? WHERE User = ?",
bet,
ctx.author.name,
)
@commands.command(name="hack")
async def hack_command(self, ctx: commands.bot.Context) -> None:
if self.hackgame.state == GameState.RUNNING:
return await ctx.send(
f"{ctx.author.name}, a hack is already in progress!"
)
if self.hackgame.state == GameState.WAITING:
return await self.hackgame.try_add_participant(ctx)
if self.hackgame.state == GameState.STOPPED:
if (
await self.bot.db.field(
"SELECT Credits FROM economy WHERE User = 'bank'"
)
< 500
):
return await ctx.send(
"The bank does not have enough credits to be worth hacking."
)
await self.hackgame.start(ctx)
await self.hackgame.try_add_participant(ctx)
def prepare(bot: commands.Bot) -> None:
bot.add_cog(Fun(bot))
|
num = [[], []]
for c in range(0, 7):
valor = int(input(f'Digite o {c+1}º valor: '))
if valor % 2 == 0:
num[0].append(valor)
if valor % 2 != 0:
num[1].append(valor)
num[0].sort(), num[1].sort()
print(f'Os números PARES foram: {num[0]}')
print(f'Os números IMPARES foram: {num[1]}')
|
#!/usr/bin/env python
# coding: utf-8
# file="text_euc2.txt"
# dat = open(file, "rb").read()
import sys
#
filename=sys.argv[1]
dat = open(filename).read().encode('euc-jp')
dat=bytes(dat)
l=len(dat)
i=0
put_index=0
imgs=[]
print("const int TEXT_LENGTH = %d;" % (int(l/2)+1))
print("byte[] text_data = {")
while(True):
dh=dat[i]
i+=1
if dh<0x80: # ascii
print("0x00,0x%02x," % dh, end="")
put_index+=1
if dh>=0xA0:
dl=dat[i]
i+=1
ku=dh-0xa0
ten=dl-0xa0
print("0x%02x,0x%02x," % (ku,ten),end="")
put_index+=1
if put_index%8==0:
print()
if i>l-1:
break
print("};")
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Masterprojekt\03_Production\Maya\scripts\assetIO\assetIOWidget.ui'
#
# Created: Wed May 30 10:05:54 2018
# by: pyside2-uic running on PySide2 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_AssetIO(object):
def setupUi(self, AssetIO):
AssetIO.setObjectName("AssetIO")
AssetIO.resize(574, 474)
self.centralwidget = QtWidgets.QWidget(AssetIO)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.assetIO_tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.assetIO_tabWidget.setEnabled(True)
self.assetIO_tabWidget.setStyleSheet("QTabBar::tab { height: 25px; width: 100%; }")
self.assetIO_tabWidget.setObjectName("assetIO_tabWidget")
self.exportTab = QtWidgets.QWidget()
self.exportTab.setObjectName("exportTab")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.exportTab)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.export_assetDetails_grpBox = QtWidgets.QGroupBox(self.exportTab)
self.export_assetDetails_grpBox.setTitle("")
self.export_assetDetails_grpBox.setObjectName("export_assetDetails_grpBox")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.export_assetDetails_grpBox)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.exportAssetName_layout = QtWidgets.QHBoxLayout()
self.exportAssetName_layout.setObjectName("exportAssetName_layout")
self.eportAssetName_label = QtWidgets.QLabel(self.export_assetDetails_grpBox)
self.eportAssetName_label.setMargin(0)
self.eportAssetName_label.setObjectName("eportAssetName_label")
self.exportAssetName_layout.addWidget(self.eportAssetName_label)
self.exportAssetName_input = QtWidgets.QLineEdit(self.export_assetDetails_grpBox)
self.exportAssetName_input.setMinimumSize(QtCore.QSize(250, 0))
self.exportAssetName_input.setObjectName("exportAssetName_input")
self.exportAssetName_layout.addWidget(self.exportAssetName_input)
self.exportAssetName_browseButton = QtWidgets.QPushButton(self.export_assetDetails_grpBox)
self.exportAssetName_browseButton.setObjectName("exportAssetName_browseButton")
self.exportAssetName_layout.addWidget(self.exportAssetName_browseButton)
self.verticalLayout_10.addLayout(self.exportAssetName_layout)
self.exportCategory_layout = QtWidgets.QHBoxLayout()
self.exportCategory_layout.setObjectName("exportCategory_layout")
self.exportCategory_label = QtWidgets.QLabel(self.export_assetDetails_grpBox)
self.exportCategory_label.setObjectName("exportCategory_label")
self.exportCategory_layout.addWidget(self.exportCategory_label)
self.exportCategory_CB = QtWidgets.QComboBox(self.export_assetDetails_grpBox)
self.exportCategory_CB.setObjectName("exportCategory_CB")
self.exportCategory_layout.addWidget(self.exportCategory_CB)
self.exportCategoryAdd_button = QtWidgets.QPushButton(self.export_assetDetails_grpBox)
self.exportCategoryAdd_button.setObjectName("exportCategoryAdd_button")
self.exportCategory_layout.addWidget(self.exportCategoryAdd_button)
self.verticalLayout_10.addLayout(self.exportCategory_layout)
self.exportFrameSel_layout = QtWidgets.QHBoxLayout()
self.exportFrameSel_layout.setObjectName("exportFrameSel_layout")
self.exportFrameSel_checkbox = QtWidgets.QCheckBox(self.export_assetDetails_grpBox)
self.exportFrameSel_checkbox.setChecked(True)
self.exportFrameSel_checkbox.setObjectName("exportFrameSel_checkbox")
self.exportFrameSel_layout.addWidget(self.exportFrameSel_checkbox)
self.export_breakRef_chkbox = QtWidgets.QCheckBox(self.export_assetDetails_grpBox)
self.export_breakRef_chkbox.setObjectName("export_breakRef_chkbox")
self.exportFrameSel_layout.addWidget(self.export_breakRef_chkbox)
self.verticalLayout_10.addLayout(self.exportFrameSel_layout)
self.verticalLayout_3.addWidget(self.export_assetDetails_grpBox)
self.exportDescription_grpBox = QtWidgets.QGroupBox(self.exportTab)
self.exportDescription_grpBox.setObjectName("exportDescription_grpBox")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.exportDescription_grpBox)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.exportDescription_textEdit = QtWidgets.QTextEdit(self.exportDescription_grpBox)
self.exportDescription_textEdit.setObjectName("exportDescription_textEdit")
self.verticalLayout_7.addWidget(self.exportDescription_textEdit)
self.verticalLayout_3.addWidget(self.exportDescription_grpBox)
self.exportButtons_grpBox = QtWidgets.QGroupBox(self.exportTab)
self.exportButtons_grpBox.setTitle("")
self.exportButtons_grpBox.setObjectName("exportButtons_grpBox")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.exportButtons_grpBox)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.exportAll_button = QtWidgets.QPushButton(self.exportButtons_grpBox)
self.exportAll_button.setObjectName("exportAll_button")
self.horizontalLayout_5.addWidget(self.exportAll_button)
self.exportSelected_button = QtWidgets.QPushButton(self.exportButtons_grpBox)
self.exportSelected_button.setObjectName("exportSelected_button")
self.horizontalLayout_5.addWidget(self.exportSelected_button)
self.verticalLayout_3.addWidget(self.exportButtons_grpBox)
self.assetIO_tabWidget.addTab(self.exportTab, "")
self.importTab = QtWidgets.QWidget()
self.importTab.setObjectName("importTab")
self.verticalLayout = QtWidgets.QVBoxLayout(self.importTab)
self.verticalLayout.setObjectName("verticalLayout")
self.importSearch_layout = QtWidgets.QHBoxLayout()
self.importSearch_layout.setObjectName("importSearch_layout")
self.importCategory_label = QtWidgets.QLabel(self.importTab)
self.importCategory_label.setObjectName("importCategory_label")
self.importSearch_layout.addWidget(self.importCategory_label)
self.importCategory_cBox = QtWidgets.QComboBox(self.importTab)
self.importCategory_cBox.setObjectName("importCategory_cBox")
self.importSearch_layout.addWidget(self.importCategory_cBox)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.importSearch_layout.addItem(spacerItem)
self.importSearch_label = QtWidgets.QLabel(self.importTab)
self.importSearch_label.setObjectName("importSearch_label")
self.importSearch_layout.addWidget(self.importSearch_label)
self.importSearch_lineEdit = QtWidgets.QLineEdit(self.importTab)
self.importSearch_lineEdit.setObjectName("importSearch_lineEdit")
self.importSearch_layout.addWidget(self.importSearch_lineEdit)
self.verticalLayout.addLayout(self.importSearch_layout)
self.importBrowser_layout = QtWidgets.QHBoxLayout()
self.importBrowser_layout.setObjectName("importBrowser_layout")
self.importList = QtWidgets.QListWidget(self.importTab)
self.importList.setObjectName("importList")
self.importBrowser_layout.addWidget(self.importList)
self.importProperties_grpBox = QtWidgets.QGroupBox(self.importTab)
self.importProperties_grpBox.setMinimumSize(QtCore.QSize(150, 0))
self.importProperties_grpBox.setObjectName("importProperties_grpBox")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.importProperties_grpBox)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.importProperties_icon = QtWidgets.QLabel(self.importProperties_grpBox)
self.importProperties_icon.setText("")
self.importProperties_icon.setPixmap(QtGui.QPixmap("assetDefaultIcon.png"))
self.importProperties_icon.setAlignment(QtCore.Qt.AlignCenter)
self.importProperties_icon.setObjectName("importProperties_icon")
self.verticalLayout_4.addWidget(self.importProperties_icon)
self.importProperties_layout = QtWidgets.QFormLayout()
self.importProperties_layout.setObjectName("importProperties_layout")
self.importPropertiesName_label = QtWidgets.QLabel(self.importProperties_grpBox)
self.importPropertiesName_label.setObjectName("importPropertiesName_label")
self.importProperties_layout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.importPropertiesName_label)
self.importPropertiesName_display = QtWidgets.QLabel(self.importProperties_grpBox)
self.importPropertiesName_display.setObjectName("importPropertiesName_display")
self.importProperties_layout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.importPropertiesName_display)
self.importPropertiesCategory_label = QtWidgets.QLabel(self.importProperties_grpBox)
self.importPropertiesCategory_label.setObjectName("importPropertiesCategory_label")
self.importProperties_layout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.importPropertiesCategory_label)
self.importPropertiesCategory_display = QtWidgets.QLabel(self.importProperties_grpBox)
self.importPropertiesCategory_display.setObjectName("importPropertiesCategory_display")
self.importProperties_layout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.importPropertiesCategory_display)
self.importPropertiesDescription_label = QtWidgets.QLabel(self.importProperties_grpBox)
self.importPropertiesDescription_label.setObjectName("importPropertiesDescription_label")
self.importProperties_layout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.importPropertiesDescription_label)
self.importPropertiesDescription_textEdit = QtWidgets.QTextEdit(self.importProperties_grpBox)
self.importPropertiesDescription_textEdit.setReadOnly(True)
self.importPropertiesDescription_textEdit.setObjectName("importPropertiesDescription_textEdit")
self.importProperties_layout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.importPropertiesDescription_textEdit)
self.importPropertiesMod_label = QtWidgets.QLabel(self.importProperties_grpBox)
self.importPropertiesMod_label.setObjectName("importPropertiesMod_label")
self.importProperties_layout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.importPropertiesMod_label)
self.importPropertiesMod_display = QtWidgets.QLabel(self.importProperties_grpBox)
self.importPropertiesMod_display.setObjectName("importPropertiesMod_display")
self.importProperties_layout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.importPropertiesMod_display)
self.verticalLayout_4.addLayout(self.importProperties_layout)
self.importProperties_chkbox_layout = QtWidgets.QHBoxLayout()
self.importProperties_chkbox_layout.setObjectName("importProperties_chkbox_layout")
self.importProperties_locator_checkbox = QtWidgets.QCheckBox(self.importProperties_grpBox)
self.importProperties_locator_checkbox.setObjectName("importProperties_locator_checkbox")
self.importProperties_chkbox_layout.addWidget(self.importProperties_locator_checkbox)
self.importProperties_selectable_checkbox = QtWidgets.QCheckBox(self.importProperties_grpBox)
self.importProperties_selectable_checkbox.setEnabled(False)
self.importProperties_selectable_checkbox.setObjectName("importProperties_selectable_checkbox")
self.importProperties_chkbox_layout.addWidget(self.importProperties_selectable_checkbox)
self.verticalLayout_4.addLayout(self.importProperties_chkbox_layout)
self.importBrowser_layout.addWidget(self.importProperties_grpBox)
self.verticalLayout.addLayout(self.importBrowser_layout)
self.importButtons_layout = QtWidgets.QHBoxLayout()
self.importButtons_layout.setObjectName("importButtons_layout")
self.importImport_button = QtWidgets.QPushButton(self.importTab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.importImport_button.sizePolicy().hasHeightForWidth())
self.importImport_button.setSizePolicy(sizePolicy)
self.importImport_button.setObjectName("importImport_button")
self.importButtons_layout.addWidget(self.importImport_button)
self.importReference_button = QtWidgets.QPushButton(self.importTab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.importReference_button.sizePolicy().hasHeightForWidth())
self.importReference_button.setSizePolicy(sizePolicy)
self.importReference_button.setObjectName("importReference_button")
self.importButtons_layout.addWidget(self.importReference_button)
self.verticalLayout.addLayout(self.importButtons_layout)
self.assetIO_tabWidget.addTab(self.importTab, "")
self.ManageTab = QtWidgets.QWidget()
self.ManageTab.setObjectName("ManageTab")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.ManageTab)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.manage_dupliRemove_layout = QtWidgets.QHBoxLayout()
self.manage_dupliRemove_layout.setObjectName("manage_dupliRemove_layout")
self.manage_duplicateSel_btn = QtWidgets.QPushButton(self.ManageTab)
self.manage_duplicateSel_btn.setObjectName("manage_duplicateSel_btn")
self.manage_dupliRemove_layout.addWidget(self.manage_duplicateSel_btn)
self.manage_deleteSel_btn = QtWidgets.QPushButton(self.ManageTab)
self.manage_deleteSel_btn.setObjectName("manage_deleteSel_btn")
self.manage_dupliRemove_layout.addWidget(self.manage_deleteSel_btn)
self.verticalLayout_6.addLayout(self.manage_dupliRemove_layout)
self.manage_referenceObjects_list = QtWidgets.QListWidget(self.ManageTab)
self.manage_referenceObjects_list.setObjectName("manage_referenceObjects_list")
self.verticalLayout_6.addWidget(self.manage_referenceObjects_list)
self.assetIO_tabWidget.addTab(self.ManageTab, "")
self.verticalLayout_2.addWidget(self.assetIO_tabWidget)
AssetIO.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(AssetIO)
self.menubar.setGeometry(QtCore.QRect(0, 0, 574, 21))
self.menubar.setObjectName("menubar")
AssetIO.setMenuBar(self.menubar)
self.retranslateUi(AssetIO)
self.assetIO_tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(AssetIO)
def retranslateUi(self, AssetIO):
AssetIO.setWindowTitle(QtWidgets.QApplication.translate("AssetIO", "Asset IO", None, -1))
self.eportAssetName_label.setText(QtWidgets.QApplication.translate("AssetIO", "Asset Name", None, -1))
self.exportAssetName_browseButton.setText(QtWidgets.QApplication.translate("AssetIO", "Browse", None, -1))
self.exportCategory_label.setText(QtWidgets.QApplication.translate("AssetIO", "Category", None, -1))
self.exportCategoryAdd_button.setText(QtWidgets.QApplication.translate("AssetIO", "Add", None, -1))
self.exportFrameSel_checkbox.setText(QtWidgets.QApplication.translate("AssetIO", "Auto Frame Selected", None, -1))
self.export_breakRef_chkbox.setText(QtWidgets.QApplication.translate("AssetIO", "Break References", None, -1))
self.exportDescription_grpBox.setTitle(QtWidgets.QApplication.translate("AssetIO", "Description", None, -1))
self.exportAll_button.setText(QtWidgets.QApplication.translate("AssetIO", "Export All", None, -1))
self.exportSelected_button.setText(QtWidgets.QApplication.translate("AssetIO", "Export Selected", None, -1))
self.assetIO_tabWidget.setTabText(self.assetIO_tabWidget.indexOf(self.exportTab), QtWidgets.QApplication.translate("AssetIO", "Export", None, -1))
self.importCategory_label.setText(QtWidgets.QApplication.translate("AssetIO", "Category Filter", None, -1))
self.importSearch_label.setText(QtWidgets.QApplication.translate("AssetIO", "Search", None, -1))
self.importProperties_grpBox.setTitle(QtWidgets.QApplication.translate("AssetIO", "Properties", None, -1))
self.importPropertiesName_label.setText(QtWidgets.QApplication.translate("AssetIO", "Name", None, -1))
self.importPropertiesName_display.setText(QtWidgets.QApplication.translate("AssetIO", "-", None, -1))
self.importPropertiesCategory_label.setText(QtWidgets.QApplication.translate("AssetIO", "Category", None, -1))
self.importPropertiesCategory_display.setText(QtWidgets.QApplication.translate("AssetIO", "-", None, -1))
self.importPropertiesDescription_label.setText(QtWidgets.QApplication.translate("AssetIO", "Description", None, -1))
self.importPropertiesMod_label.setText(QtWidgets.QApplication.translate("AssetIO", "Last Modified", None, -1))
self.importPropertiesMod_display.setText(QtWidgets.QApplication.translate("AssetIO", "-", None, -1))
self.importProperties_locator_checkbox.setText(QtWidgets.QApplication.translate("AssetIO", "Group to Locator", None, -1))
self.importProperties_selectable_checkbox.setText(QtWidgets.QApplication.translate("AssetIO", "Selectable", None, -1))
self.importImport_button.setText(QtWidgets.QApplication.translate("AssetIO", "Import", None, -1))
self.importReference_button.setText(QtWidgets.QApplication.translate("AssetIO", "Reference", None, -1))
self.assetIO_tabWidget.setTabText(self.assetIO_tabWidget.indexOf(self.importTab), QtWidgets.QApplication.translate("AssetIO", "Import", None, -1))
self.manage_duplicateSel_btn.setText(QtWidgets.QApplication.translate("AssetIO", "Duplicate Selected", None, -1))
self.manage_deleteSel_btn.setText(QtWidgets.QApplication.translate("AssetIO", "Delete Selected", None, -1))
self.assetIO_tabWidget.setTabText(self.assetIO_tabWidget.indexOf(self.ManageTab), QtWidgets.QApplication.translate("AssetIO", "Manage", None, -1))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by Bastian Schnell <bastian.schnell@idiap.ch>
#
import torch.nn as nn
class Pooling(nn.Module):
def __init__(self, batch_first):
super().__init__()
self.batch_first = batch_first
def extra_repr(self):
return "batch_first={}".format(self.batch_first)
def get_output_length(self, seq_lengths_input):
return seq_lengths_input.fill_(1)
def select_inputs(self, input_, **kwargs):
return input_, kwargs.pop("seq_lengths_input", None)
class SelectLastPooling(Pooling):
def __init__(self, batch_first):
super(SelectLastPooling, self).__init__(batch_first)
def forward(self, input_):
input_, lengths = input_
batch_dim = 0 if self.batch_first else 1
batch_size = input_.shape[batch_dim]
if lengths is None:
time_dim = 1 if self.batch_first else 0
seq_len_indices = [input_.shape[time_dim] - 1] * batch_size
else:
seq_len_indices = [length - 1 for length in lengths]
batch_indices = [i for i in range(batch_size)]
if self.batch_first:
return input_[batch_indices, seq_len_indices].unsqueeze(dim=1)
else:
return input_[seq_len_indices, batch_indices].unsqueeze(dim=0)
class MeanPooling(Pooling):
def __init__(self, batch_first):
super().__init__(batch_first)
self.time_dim = 1 if batch_first else 0
def forward(self, input_):
input_, lengths = input_
input_sum = input_.sum(self.time_dim, keepdim=True)
batch_dim = len(lengths)
missing_dims = [1] * max(0, input_sum.ndim - 2)
if self.batch_first:
lengths = lengths.view(batch_dim, 1, *missing_dims).float()
else:
lengths = lengths.view(1, batch_dim, *missing_dims).float()
input_mean = input_sum / lengths
return input_mean
|
from discord.ext import commands
import discord
import json
from discord.utils import get
from discord import Embed
import os.path
from os import path
##
# This is a helper function to playlist.py to manage the JSON file I/O
##
def logUpdate(ctx, songName): #automatically logs all songs played by a user in a textfile named their name.
user_file = os.path.join("SongLog",str(ctx.author.id))
user_file = str(user_file) + ".txt"
user_write = open(user_file,"a")
user_write.write(str(songName) + "\n")
user_write.close()
def playlist_read(listname,ctx):
userpath = os.path.join("Playlist",str(ctx.author.id))
userpath = str(userpath) + ".json"
i = 1
try:
with open(userpath,"r") as fileRead:
data = json.load(fileRead)
specific = data[listname]
final = ""
for item in specific:
final += str(i) + ": " + item + "\n"
i = i + 1
return final
except Exception as error:
return "Failed-Process"
print (error)
def list_playlist(ctx): #function to list all of the users playlists.
userpath = os.path.join("Playlist",str(ctx.author.id))
userpath = str(userpath) + ".json"
i = 1
final = ""
try:
with open(userpath,"r") as file_read:
data = json.load(file_read)
for key in data:
final+= str(i) + ": " + key + "\n"
i = i + 1
return final
except Exception as error:
return "Failed-Process"
def new_playlist(ctx,playlist_name,now_playing): #function to create a new playlist in the JSON file or make a JSON file if none exists for the user
userpath = os.path.join("Playlist",str(ctx.author.id))
userpath = str(userpath) + ".json"
if path.exists(userpath):
with open(userpath,"r") as read_file:
data = json.load(read_file)
temp = [now_playing]
data[playlist_name] = temp
dataFinal = json.dumps(data, indent = 1)
help_newplaylist(ctx,dataFinal)
else:
dataStart = {playlist_name:[now_playing]}
with open(userpath,"w") as write_file:
json.dump(dataStart,write_file)
def help_newplaylist(ctx,data): #has no safety checks to write. needs to be done before hand only a helper function for within the doc
userpath = os.path.join("Playlist",str(ctx.author.id))
userpath = str(userpath) + ".json"
file = open(userpath,"w")
file.write(data)
file.close()
#with open(userpath,"w") as write_file: Alternative way to open the file.
#json.dump(data,write_file) This will not work as intended.
def delete_playlist(ctx,playlist_name):
userpath = os.path.join("Playlist",str(ctx.author.id))
userpath = str(userpath) + ".json"
if path.exists(userpath):
with open(userpath,"r") as read_file:
data = json.load(read_file)
try:
data.pop(playlist_name)
dataFinal = json.dumps(data, indent = 1)
help_newplaylist(ctx,dataFinal)
return "Done"
except Exception as error:
return "Not-Found"
else:
return "No-Playlists"
def delete_from_playlist(ctx, playlist_name,selection):
userpath = os.path.join("Playlist",str(ctx.author.id))
userpath = str(userpath) + ".json"
if path.exists(userpath):
with open(userpath,"r") as read_file:
try:
data = json.load(read_file)
data[playlist_name].pop(selection - 1)
dataFinal = json.dumps(data, indent = 1)
help_newplaylist(ctx,dataFinal)
return "Done"
except Exception as error:
return "Not-Found"
else:
return "No-Playlists"
def add_to_playlist(ctx,playlist_name,now_playing) -> bool: # Reads json, finds playlists and add song then uses help_newplaylist to write back.
userpath = os.path.join("Playlist",str(ctx.author.id))
userpath = str(userpath) + ".json"
if path.exists(userpath):
try:
with open(userpath,"r") as read_file:
data = json.load(read_file)
temp = [now_playing]
data[playlist_name] += temp
dataFinal = json.dumps(data, indent = 1)
help_newplaylist(ctx,dataFinal)
return True
except Exception as error:
return False
def play_playlist(ctx,playlist_name): # loads songs from a playlist to be parsed by the calling function
userpath = os.path.join("Playlist",str(ctx.author.id))
userpath = str(userpath) + ".json"
if path.exists(userpath):
with open(userpath,"r") as read_file: # using with auto closes the file after.
data = json.load(read_file)
if playlist_name in data:
songlist = data[playlist_name]
return songlist
else:
return False #return false if playlist doesnt exist which will be caught by music.py and output playlist doesnt exist.
else:
return False #same as above comment
def rename_playlist(ctx,raw_input) -> bool:
userpath = os.path.join("Playlist",str(ctx.author.id))
userpath = str(userpath) + ".json"
splitNames = raw_input.split(',')
try:
if splitNames[0] is not None and splitNames[1] is not None:
data = ""
specific = ""
try:
with open(userpath,"r") as fileRead:
data = json.load(fileRead)
specific = data[splitNames[0]]
with open(userpath,"w") as fileRead:
data.pop(splitNames[0]) #pop off old playlist
data[splitNames[1]] = specific #store the same data as a new list.
dataFinal = json.dumps(data, indent = 1)
help_newplaylist(ctx,dataFinal)
return "Success"
except Exception as error:
print(error)
return "No-List"
except Exception as error:
return "Invalid-Input"
|
"""
GRIB - Contouring with Gradient Shading
"""
# (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import metview as mv
# get data
use_mars = False
if use_mars:
# retrieve data from MARS
t = mv.retrieve(
class_="era5",
stream="moda",
levtype="sfc",
param="2t",
date=20200101,
time=0,
grid=[1, 1],
)
else:
# read data from GRIB file
filename = "era5_t2_jan.grib"
if mv.exist(filename):
t = mv.read(filename)
else:
t = mv.gallery.load_dataset(filename)
# define coastlines
coast = mv.mcoast(map_grid="off", map_label="off")
# define view
view = mv.geoview(
map_projection="robinson",
subpage_y_position=14,
subpage_y_length=86,
coastlines=coast,
page_frame="off",
subpage_frame="off",
)
# define contouring
cont = mv.mcont(
legend="on",
contour="off",
contour_level_selection_type="level_list",
contour_level_list=[-45, -20, 0, 20, 45],
contour_label="off",
contour_shade="on",
contour_shade_colour_method="gradients",
contour_shade_method="area_fill",
contour_gradients_colour_list=[
"RGB(0.1532,0.1187,0.5323)",
"RGB(0.5067,0.7512,0.8188)",
"RGB(0.9312,0.9313,0.9275)",
"RGB(0.9523,0.7811,0.3104)",
"RGB(0.594,0.104,0.104)",
],
contour_gradients_step_list=20,
)
# define legend
legend = mv.mlegend(
legend_box_mode="positional",
legend_text_font_size=0.4,
legend_box_y_position=1,
legend_box_y_length=1.5,
legend_entry_border="off",
legend_label_frequency=10,
)
# define title
title = mv.mtext(
text_line_1="ERA5 T2 Monthly Mean 2020 January 0UTC", text_font_size=0.6
)
# define the output plot file
mv.setoutput(mv.pdf_output(output_name="gradient_shading"))
# generate plot
mv.plot(view, t, cont, title, legend)
|
import sys
from faster_solution import run_faster_version
def run():
args = sys.argv
run_faster_version(args)
if __name__ == '__main__':
run()
|
#! /usr/bin/env python
# A tiny framework that makes it easy to write Test Data Builders in Python
# Port of Java make-it-easy by Nat Pryce
# Copyright (C) 2013 Dori Reuveni
# E-mail: dorireuv AT gmail DOT com
from setuptools import setup
import os
import re
__version__ = __author__ = __license__ = ''
fh = open(os.path.join(os.path.dirname(__file__), 'make_it_easy', '__init__.py'))
try:
for line in fh:
if re.match('^__[a-z_]+__.*', line):
exec(line)
finally:
if fh:
fh.close()
params = dict(
name='make-it-easy',
version=__version__,
packages=['make_it_easy'],
author=__author__,
author_email='dorireuv@gmail.com',
description='A tiny framework that makes it easy to write Test Data Builders in Python',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
keywords=['testing', 'test', 'tdd', 'unittest', 'builder', 'goos'],
url='https://www.github.com/dorireuv/make-it-easy',
license=__license__,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: Apache Software License',
],
tests_require=['PyHamcrest'],
test_suite='tests',
)
setup(**params)
|
''' We parse each document of each file from our data set '''
''' This results in text files with ID name and text content '''
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import nltk
import re
from numpy import zeros
import numpy as np
import math
import sys
import os
# Turns xml parsed file into separate documents
def docsToFiles(foldername, language):
backup = {}
folder_path = './DOCUMENTS/'
input_path = './' + foldername
for filename in os.listdir(input_path):
with open(input_path + '/' + filename) as in_file:
lines = [[line.rstrip('\n')] for line in in_file]
for line in lines:
if '<doc id=' in line[0]:
elements = line[0].split(' ')
docID = elements[1][4:-1]
url = elements[2][5:-1]
flt = []
backup[docID] = url
# print(elements)
elif '</doc>' in line[0]:
with open(folder_path + docID + '.txt', 'w') as f:
flt = re.sub(r'[^\w\s]', '', str(flt))
f.write(flt)
docID = None
else:
if docID and line[0] != '':
sl = []
sl.extend(line)
s = re.sub(r'[^\w\s]', '', sl[0])
tokens = nltk.word_tokenize(s.lower())
new = [w for w in tokens if not w in stopwords.words(language)]
stemmer = PorterStemmer()
final = [stemmer.stem(word) for word in new]
flt.extend(final)
return backup
# Creation of posting list (index_dic), lenght of document memory (Ndic),
# position of words in vectors (posDic)
# Index file is saved without word countings in each file
def indexing(backup):
index_dic = {}
Ndic = {}
posDic = {}
position = 0
folder_path = './DOCUMENTS/'
for k in backup.keys():
docID = k
with open(folder_path + docID + '.txt', 'r') as inputs:
line = inputs.read().split(' ')
Ndic[docID] = len(line)
for token in line:
if token not in index_dic.keys():
index_dic[token] = {docID: 1}
posDic[token] = position
position += 1
else:
if docID in index_dic[token].keys():
index_dic[token][docID] += 1
else:
index_dic[token][docID] = 1
with open('index.txt', 'w') as index_output, open('positions.txt', 'w') as position_output:
for k, v in index_dic.items():
word, IDs = k, str([id for id in v.keys()])
IDs = re.sub(r'[^\w\s]', '', IDs)
index_output.write(word + ' ' + IDs + '\n')
for k, v in posDic.items():
word, pos = k, str(v)
position_output.write(word + ' ' + pos + '\n')
return Ndic, index_dic, posDic
# Creation of a vector space (matrix) containing all documents vectors
def doc_vector(ind, back, pos, docLen):
mapping = {}
matrix = zeros(shape=(len(back.keys()), len(ind.keys())))
pointer = 0
for i in back.keys():
file_words = set()
for k, v in ind.items():
if i in v.keys():
file_words.add(k)
for j in file_words:
tf = ind[j][i] / docLen[i]
idf = math.log(len(back.keys())/(len(ind[j])))
matrix[pointer][pos[j]] = tf * idf
mapping[i] = pointer
pointer += 1
with open('vectorSpace.txt', 'w') as vec, open('mapping.txt', 'w') as mapp:
np.savetxt(vec, matrix, fmt='%.6f')
for key, val in mapping.items():
docID, row = str(key), str(val)
mapp.write(docID + ' ' + row + '\n')
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Wrong number of arguments. You must run the program with the following command:\n \
python generator.py <parsed xml folder>')
sys.exit(0)
backup = docsToFiles(sys.argv[1], 'english')
docLenghts, index, positions = indexing(backup)
doc_vector(index, backup, positions, docLenghts)
|
# Generated by Django 2.2.5 on 2020-07-11 13:19
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0003_auto_20200711_1617'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='rights',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
|
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get(isamAppliance, directory_name, check_mode=False, force=False):
"""
Retrieving the list of suffixes for a particular federated directory
"""
return isamAppliance.invoke_get("Retrieving the list of suffixes for a particular federated directory",
"/isam/runtime_components/federated_directories/{0}/suffix/v1".format(
directory_name))
def add(isamAppliance, directory_name, suffix, use_ssl=False, client_cert_label=None,
check_mode=False,
force=False):
"""
Create a new suffix in a particular federated directory
"""
if force is True or _check(isamAppliance, directory_name, suffix) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Create a new suffix in a particular federated directory",
"/isam/runtime_components/federated_directories/{0}/suffix/v1".format(directory_name),
{
'suffix': suffix
})
return isamAppliance.create_return_object()
def delete(isamAppliance, directory_name, suffix_name, check_mode=False, force=False):
"""
Remove an existing suffix from a federated directory
"""
if force is True or _check(isamAppliance, directory_name, suffix_name) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Remove an existing suffix from a federated directory",
"/isam/runtime_components/federated_directories/{0}/suffix/{1}/v1".format(directory_name, suffix_name))
return isamAppliance.create_return_object()
def _check(isamAppliance, directory_name, suffix):
"""
Check if federated directory suffix exists - will return true if any match is found
:param isamAppliance:
:param directory_name:
:param suffix:
:return:
"""
ret_obj = get(isamAppliance, directory_name)
for suffix_obj in ret_obj['data']:
if isinstance(suffix, list): # Add passes a list
for new_suffix in suffix:
if new_suffix['id'] == suffix_obj['id']:
return True
else: # Update passes just suffix_name
if suffix_obj['id'] == suffix:
return True
return False
def compare(isamAppliance1, isamAppliance2, directory_name):
"""
Compare snmp objects between two appliances
"""
ret_obj1 = get(isamAppliance1, directory_name)
ret_obj2 = get(isamAppliance2, directory_name)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
|
from django import template
from ...product.templatetags.product_prices import BasePriceNode, parse_price_tag
register = template.Library()
class CartItemPriceNode(BasePriceNode):
def get_currency_for_item(self, item):
return item.cart.currency
def get_price(self, cartitem, currency, **kwargs):
return cartitem.get_price(currency=currency, **kwargs)
class CartItemUnitPriceNode(BasePriceNode):
def get_currency_for_item(self, item):
return item.cart.currency
def get_price(self, cartitem, currency, **kwargs):
return cartitem.get_unit_price(currency=currency, **kwargs)
@register.tag
def cartitem_price(parser, token):
try:
return CartItemPriceNode(*parse_price_tag(parser, token))
except (ImportError, NotImplementedError):
pass
return ''
@register.tag
def cartitem_unit_price(parser, token):
try:
return CartItemUnitPriceNode(*parse_price_tag(parser, token))
except (ImportError, NotImplementedError):
pass
return ''
|
import numpy as np
import numba as nb
import warnings
from numba.core.errors import NumbaDeprecationWarning, NumbaPerformanceWarning, NumbaWarning
warnings.simplefilter('ignore', category=NumbaDeprecationWarning)
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)
warnings.simplefilter('ignore', category=NumbaWarning)
@nb.jit(parallel=True)
def FoBaGreedy(A, y, tau, args = [100,5,1,True]):
"""
Forward-Backward greedy algorithm for sparse regression.
For more details see:
Zhang, Tong. 'Adaptive Forward-Backward Greedy Algorithm for Sparse Learning with Linear Models', NIPS, 2008
For relearn option, see github code for:
Thaler et al. 'Sparse identification of truncation errors,' JCP, 2019
Inputs:
A,y : from linear system Ax=y
tau : sparsity parameter
args : method specific arguments including:
maxit_f : max forward iterations
maxit_b : max backward iterations per backward call
backwards_freq : frequency of backwards method calls
relearn : see lines 48-59
Returns:
x : Sparse approximation to A^{-1}y
delta_tau : minimal change in tau to affect result
"""
maxit_f, maxit_b, backwards_freq, relearn = args
n,d = A.shape
F = {}
F[0] = set()
x = {}
x[0] = np.zeros((d,1))
k = 0
delta = {}
# We initially assume delta_tau is infinite and lower as needed
delta_tau = np.inf
for forward_iter in range(maxit_f):
k = k+1
# forward step
zero_coeffs = np.where(x[k-1] == 0)[0]
err_after_addition = []
residual = y - A.dot(x[k-1])
for i in zero_coeffs:
if relearn:
F_trial = F[k-1].union({i})
x_added = np.zeros((d,1))
x_added[list(F_trial)] = np.linalg.lstsq(A[:, list(F_trial)], y, rcond=None)[0]
else:
# Per figure 3 line 8 in Zhang, do not retrain old variables.
# Only look for optimal alpha, which is solving for new x if
# and only if columns of $A$ are orthogonal
alpha = A[:,i].T.dot(residual)/np.linalg.norm(A[:,i])**2
x_added = np.copy(x[k-1])
x_added[i] = alpha
err_after_addition.append(np.linalg.norm(A.dot(x_added)-y))
i = zero_coeffs[np.argmin(err_after_addition)]
F[k] = F[k-1].union({i})
x[k] = np.zeros((d,1))
x[k][list(F[k])] = np.linalg.lstsq(A[:, list(F[k])], y, rcond=None)[0]
# If improvement is sufficiently small, return last estimate
delta[k] = np.linalg.norm(A.dot(x[k-1]) - y)**2 - np.linalg.norm(A.dot(x[k]) - y)**2
if delta[k] <= tau:
return x[k-1], delta_tau
# Otherwise, how much larger would tolerance need to be to stop?
delta_tau = np.min([delta_tau, delta[k]-tau])
# backward step, do once every backwards_freq forward stdelta_tau
if forward_iter % backwards_freq == 0 and forward_iter > 0:
dk = delta[k]
for backward_iter in range(maxit_b):
non_zeros = np.where(x[k] != 0)[0]
err_after_simplification = []
for j in non_zeros:
if relearn:
F_trial = F[k].difference({j})
x_simple = np.zeros((d,1))
x_simple[list(F_trial)] = np.linalg.lstsq(A[:, list(F_trial)], y, rcond=None)[0]
else:
x_simple = np.copy(x[k])
x_simple[j] = 0
err_after_simplification.append(np.linalg.norm(A.dot(x_simple) - y)**2)
j = np.argmin(err_after_simplification)
# check for break condition on backward step
# how much does error increase when subtracting a term?
delta_p = err_after_simplification[j] - np.linalg.norm(A.dot(x[k]) - y)**2
# Original cutoff from paper is based on improvement of kth term
if delta_p > 0.5*delta[k]: break
# Optionally, we can use the improvement from the last term added
# if delta_p > 0.5*dk: break
k = k-1;
F[k] = F[k+1].difference({j})
x[k] = np.zeros((d,1))
x[k][list(F[k])] = np.linalg.lstsq(A[:, list(F[k])], y, rcond=None)[0]
if k == 0: break
if np.count_nonzero(x[k]) == x[k].size: break
return x[k], delta_tau
@nb.jit(nopython=True)
def STRidge(A, y, tau, args=1e-5):
"""
Sequential Threshold Ridge Regression algorithm for finding sparse approximation to x = A^{-1}y.
If not none, C is symmetric positive definite weight matrix.
Inputs:
A,y : from linear system Ax=y
tau : sparsity parameter
args : method specific arguments including:
lam : ridge penalty
Returns:
x : Sparse approximation to A^{-1}y
delta_tau : minimal change in tau to affect result
"""
m,n = A.shape
lam = args
# Solve least squares problem
x = np.linalg.solve(A.T @ A + lam*np.eye(n), A.T @ y)
# Threshold
G = np.where(np.abs(x) > tau)[0] # Set of active terms
Gc = np.where(np.abs(x) <= tau)[0] # Complimentary set (to be removed)
if len(G) != 0: delta_tau = np.min(np.abs(x[G])) - tau
else: delta_tau = np.inf
if len(Gc) == 0:
# No terms have been removed
return x, delta_tau
else:
# Terms were removed
xG, delta_tau_tilde = STRidge(A[:,G], y, tau, args)
for j in range(len(G)): x[G[j]] = xG[j]
for j in Gc: x[j] = 0
delta_tau = np.min(np.array([delta_tau, delta_tau_tilde]))
return x, delta_tau
|
from time import time
class PID:
def __init__(self, Kp, Ki, Kd, max_integral, min_interval = 0.001, set_point = 0.0, last_time = None):
self._Kp = Kp
self._Ki = Ki
self._Kd = Kd
self._min_interval = min_interval
self._max_integral = max_integral
self._set_point = set_point
self._last_time = last_time if last_time is not None else time()
self._p_value = 0.0
self._i_value = 0.0
self._d_value = 0.0
self._d_time = 0.0
self._d_error = 0.0
self._last_error = 0.0
self._output = 0.0
def update(self, cur_value, cur_time = None):
if cur_time is None:
cur_time = time()
error = self._set_point - cur_value
d_time = cur_time - self._last_time
d_error = error - self._last_error
if d_time >= self._min_interval:
self._p_value = error
self._i_value = min(max(error * d_time, -self._max_integral), self._max_integral)
self._d_value = d_error / d_time if d_time > 0 else 0.0
self._output = self._p_value * self._Kp + self._i_value * self._Ki + self._d_value * self._Kd
self._d_time = d_time
self._d_error = d_error
self._last_time = cur_time
self._last_error = error
return self._output
def reset(self, last_time = None, set_point = 0.0):
self._set_point = set_point
self._last_time = last_time if last_time is not None else time()
self._p_value = 0.0
self._i_value = 0.0
self._d_value = 0.0
self._d_time = 0.0
self._d_error = 0.0
self._last_error = 0.0
self._output = 0.0
def assign_set_point(self, set_point):
self._set_point = set_point
def get_set_point(self):
return self._set_point
def update_pid_factor(self, Kp, Ki, Kd):
#print 'pid factor updated: Kp=%f, Ki=%f, Kd=%f' % (Kp, Ki, Kd)
self._Kp = Kp
self._Ki = Ki
self._Kd = Kd
# def get_p_value(self):
# return self._p_value
# def get_i_value(self):
# return self._i_value
# def get_d_value(self):
# return self._d_value
# def get_delta_time(self):
# return self._d_time
# def get_delta_error(self):
# return self._d_error
# def get_last_error(self):
# return self._last_error
# def get_last_time(self):
# return self._last_time
# def get_output(self):
# return self._output
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetJobTemplateResult',
'AwaitableGetJobTemplateResult',
'get_job_template',
'get_job_template_output',
]
@pulumi.output_type
class GetJobTemplateResult:
def __init__(__self__, arn=None, job_executions_retry_config=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if job_executions_retry_config and not isinstance(job_executions_retry_config, dict):
raise TypeError("Expected argument 'job_executions_retry_config' to be a dict")
pulumi.set(__self__, "job_executions_retry_config", job_executions_retry_config)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="jobExecutionsRetryConfig")
def job_executions_retry_config(self) -> Optional['outputs.JobExecutionsRetryConfigProperties']:
return pulumi.get(self, "job_executions_retry_config")
class AwaitableGetJobTemplateResult(GetJobTemplateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetJobTemplateResult(
arn=self.arn,
job_executions_retry_config=self.job_executions_retry_config)
def get_job_template(job_template_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJobTemplateResult:
"""
Job templates enable you to preconfigure jobs so that you can deploy them to multiple sets of target devices.
"""
__args__ = dict()
__args__['jobTemplateId'] = job_template_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:iot:getJobTemplate', __args__, opts=opts, typ=GetJobTemplateResult).value
return AwaitableGetJobTemplateResult(
arn=__ret__.arn,
job_executions_retry_config=__ret__.job_executions_retry_config)
@_utilities.lift_output_func(get_job_template)
def get_job_template_output(job_template_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetJobTemplateResult]:
"""
Job templates enable you to preconfigure jobs so that you can deploy them to multiple sets of target devices.
"""
...
|
import logging
from util import unique
class LineupMapList(list):
def __init__(self, *args, **kwargs):
super(LineupMapList, self).__init__(*args, **kwargs)
def unique_channels(self, channel_filter=None):
return unique((channel
for lineup_map in self
for channel in lineup_map.channels
if channel_filter is None or channel_filter.pass_channel(lineup_map.lineup, channel)), lambda c: c.get_unique_id())
def unique_stations(self, channel_filter=None):
return unique((channel.station
for channel in self.unique_channels(channel_filter)), lambda s: s.station_id)
|
# Generated by Django 3.1.5 on 2021-05-01 13:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20210501_1930'),
]
operations = [
migrations.RemoveField(
model_name='blog',
name='catagories',
),
migrations.AddField(
model_name='blog',
name='catagories',
field=models.ManyToManyField(null=True, to='blog.Catagory'),
),
]
|
"""
Composition-based decision tree for anomaly detection
-------------------------------
CDT detector.
:authors: Ines Ben Kraiem & Geoffrey Roman-Jimenez
:copyright:
Copyright 2020 SIG Research Group, IRIT, Toulouse-France.
"""
import numpy as np
import uuid
import itertools
import copy
from helper import convert_label_RCIS
def gini_impurity(classes, nclasses):
# calculation of probabilities (or fractions of observations)
prob = [0.0 for _ in range(nclasses)]
N = len(classes)
for obs in classes:
prob[obs] += 1/N
return sum([ p* (1-p) for p in prob])
class node_tree():
def __init__(self, observations, classes, gini, parent, split_rule):
""" Definition of node.
Parameters
----------
observations : set
The set of observations considered in this node
classes : list
The list of classes corresponding to observations.
gini : float
gini index of the current node
parent : object
node-parent of the current node
split_rule : .....
"""
self.observations = observations
self.classes = classes
self.gini = gini
self.parent = parent
self.split_rule = split_rule
self.id = uuid.uuid1()
def dict(self):
d = {"observations": self.observations, "classes": self.classes,
"gini": self.gini, "id":self.id, "parent": self.parent,
"split_rule": self.split_rule}
return d
def list_of_all_possible_composition(observations):
# Calculate all the compositions of the observations that have a class 'anomaly'.
listofcomposition = []
for o in observations:
for i, j in itertools.combinations(range(len(o) + 1), 2):
if len(o[i:j])>1 and not o[i:j] in listofcomposition:
listofcomposition.append(o[i:j])
return sorted(listofcomposition, key=len)
def islistinlist(s, l):
if len(s) > len(l):
return False
else:
return True in [ s == sl for sl in [ l[index:index+len(s)] for index in range(len(l)-len(s)+1) ] ]
class composition_tree():
def __init__(self, nclasses=2, iteration_max=10000, epsilon = 1e-6, inter_type=0):
self.nclasses = nclasses
self.queue = []
self.tree = []
self.root = None
self.epsilon = epsilon
self.iteration_max = iteration_max
self.inter_type = inter_type
self.window_size = None
self.nblabels = None
def split(self, node):
# Split the node in [node true, node false] by maximizing the gain of Gini.
observations = node.observations
classes = node.classes
parent = node.id
gini_origin = node.gini
gini_true = 0
gini_false = 0
best_composition = None
observations_true, observations_false = [], []
classes_true, classes_false = [], []
observations_with_anomaly = [o for o, c in zip(observations, classes) if c!=0]
classes_with_anomaly = [c for c in classes if c!=0]
gain_gini_max = 0
for composition in list_of_all_possible_composition(observations_with_anomaly):
# split the nodes according to the presence or not of the composition
_classes_true = [ c for o, c in zip(observations, classes)
if islistinlist(composition, o) ]
_gini_true = gini_impurity(_classes_true, self.nclasses)
_classes_false = [ c for o, c in zip(observations, classes)
if not islistinlist(composition, o) ]
_gini_false = gini_impurity(_classes_false, self.nclasses)
N, N_true, N_false = len(classes), len(_classes_true), len(_classes_false)
gain_gini = gini_origin-(((N_true/N)*_gini_true)+((N_false/N)*_gini_false))
if gain_gini > gain_gini_max:
gain_gini_max = gain_gini
gini_true = _gini_true
gini_false = _gini_false
best_composition = composition
observations_true = [ o for o in observations
if islistinlist(composition, o) ]
observations_false = [ o for o in observations
if not islistinlist(composition, o) ]
classes_true = _classes_true
classes_false = _classes_false
gain_gini = gain_gini_max
split_rule_true = {"composition": best_composition, "condition": True}
split_rule_false = {"composition": best_composition, "condition": False }
node_true = node_tree(observations_true, classes_true,
gini_true, parent, split_rule_true)
node_false = node_tree(observations_false, classes_false,
gini_false, parent, split_rule_false)
return [node_true, node_false], gain_gini
def fit(self, observations, classes):
""" Fit CDT to the time series data.
Parameters
----------
observations : dictionary {number: np.array}
list containing the windowed labeled time series data.
classes : dictionary {number: np.array}
list containing the classes corresponding to each observation (window).
"""
gini = gini_impurity(classes, self.nclasses)
self.root = node_tree(observations, classes, gini, 0, None)
self.window_size = min([len(o) for o in observations])
self.nblabels = len(set([ l for o in observations for l in o]))
self.queue = [self.root]
self.tree = [self.root]
# Tree construction
n=0
while not len(self.queue) == 0 and n < self.iteration_max:
node = self.queue.pop(0)
splitted_nodes, gain_gini = self.split(node)
for _node in splitted_nodes:
if len(_node.classes) > 0 and _node.gini > self.epsilon:
self.queue.append(_node)
if len(_node.classes) > 0:
self.tree.append(_node)
n+=1
def rules_per_class(self):
leaves = self.get_leaves()
branches = [(l.classes, self.get_branch(l)) for l in leaves]
rules_per_class = [[] for _ in range(self.nclasses)]
for i, (classes, branch) in enumerate(branches):
if not len(classes) == 0:
setclasses =[x for i, x in enumerate(classes) if i == classes.index(x)]
c = max(setclasses, key = classes.count)
listofrule = [n for n in branch if n.split_rule]
rules_per_class[c].append(listofrule)
return rules_per_class
def get_parent(self, node):
for _node in self.tree:
if _node.id == node.parent:
return _node
def get_childrens(self, node):
childrens = []
for _node in self.tree:
if _node != self.root and _node.parent == node.id:
childrens.append(_node)
return childrens
def get_leaves(self):
leaves = []
for node in self.tree:
if node.gini <= self.epsilon:
leaves.append(node)
else:
childrens = self.get_childrens(node)
if len(childrens) == 0:
leaves.append(node)
return leaves
def get_branch(self, leaf):
branch = []
node = leaf
while node != self.root:
branch.append(node)
node = self.get_parent(node)
return branch
def class_of_node(self, node): # vote majoritaire dans une feuille impure
setclasses =[x for i, x in enumerate(node.classes) if i == node.classes.index(x)]
c = max( setclasses, key = node.classes.count)
return c
def which_leaf(self, observation):
""" Classify a new observation.
Parameters
----------
observation : list
a windowed observation from a test datasets.
Returns
-------
leaf : .....
class_of_leaf: 0 (normal) or 1 (anomaly)
"""
_leaf = self.root
childrens = self.get_childrens(_leaf)
while not len(childrens) == 0:
for children in childrens:
rule = children.split_rule
checkrule = rule["condition"] is islistinlist(rule["composition"], observation)
if checkrule:
_leaf = children
childrens = self.get_childrens(_leaf)
class_of_leaf = self.class_of_node(_leaf)
leaf = _leaf
return leaf, class_of_leaf
def anomaly_rules(self):
""" extrat and simplify rules from CDT.
Returns
-------
rpc : dictionary
rules per class.
"""
rpc = self.rules_per_class()
rpc = [[[{"split_rule": {
"composition": [convert_label_RCIS(c) for c in r.split_rule["composition"]],
"condition": r.split_rule["condition"] },
"classes":[r.classes.count(0), r.classes.count(1)]
}
for r in rb] for rb in rules] for rules in rpc]
return rpc
|
from gennav.planners.base import Planner # noqa: F401
from gennav.planners.potential_field import PotentialField # noqa: F401
from gennav.planners.prm import PRM, PRMStar # noqa: F401
from gennav.planners.rrt import RRG, RRT, InformedRRTstar, RRTConnect # noqa: F401
|
from django.urls import path
from . import admin
urlpatterns = [
path("generic_inline_admin/admin/", admin.site.urls),
]
|
#!/usr/local/bin/python3
import sys
import sqlite3
import configparser
# read dbpath from config file
config = configparser.ConfigParser()
config.read('sstt.config')
dbpath = config.get('database','dbpath',fallback='sstt.db')
conn = sqlite3.connect(dbpath)
conn.row_factory = sqlite3.Row
conn.execute("""
create table if not exists tbl_times
(id integer primary key not null,
status text default 'A',
project text not null,
start datetime default current_timestamp,
end datetime default null);
""")
# ok so we need 2 arguments:
# project_name & command (start|stop)
if len(sys.argv) < 2:
print("Missing arguments")
sys.exit(-1)
cmd = sys.argv[1].upper()
try:
project = sys.argv[2]
except:
project = "-"
if cmd == "START":
# do we have an open project called that already? if so ignore start command
print("starting: ", project)
conn.execute("""insert into tbl_times (project) values (?)""",[project])
conn.commit()
elif cmd == "STOP":
# do we have an open project called that? if so close it
res = conn.execute("""select id from tbl_times where project=? and end is null order by id desc limit 1""",[project])
row = res.fetchone()
if row != None:
print("Stopping: ", project)
conn.execute("""update tbl_times set end=current_timestamp where id=?""",[row[0]])
conn.commit()
elif cmd == "REPORT":
# do a report on all projects grouped by project
for row in conn.execute("""
select project,
strftime('%Y-%m',start) yr_mon,
round(sum(julianday(end) - julianday(start))*24*60,0) as mins
from tbl_times
group by yr_mon,project
"""):
t = divmod(row['mins'],60)
print("%s: %s - %d:%d hrs" % (row['yr_mon'],row['project'],t[0],t[1]))
elif cmd == "LIST":
# show listing of all items in given project
print()
for row in conn.execute("""select id,status,project,start,end from tbl_times where project=? order by id""",[project]):
print(" > %d | %s | %s | %s | %s" % (row['id'],row['status'],row['start'],row['end'],row['project']))
print()
elif cmd == "PROJECTS":
# show listing of all project names
print()
for row in conn.execute("""select distinct project from tbl_times order by project"""):
print(" > %s" % (row['project']))
print()
|
src = Split('''
yts_main.c
''')
component = aos_component('testcase', src)
component.add_global_includes('include')
component.add_comp_deps('test/yunit')
if aos_global_config.compiler == 'gcc':
component.add_cflags( "-Wall" )
component.add_cflags( "-Werror")
|
import struct
import textwrap
import secrets
import logging
from shellerate import encoder;
from binascii import unhexlify, hexlify;
from shellerate import strings;
# First version with clear text decoder stub: https://www.virustotal.com/#/file/7b25b33a1527d2285ebdefd327bc72b6d932c140489e8bfb7424bef115aa2ecd/detection
class Xority(encoder.Encoder):
def __init__(self, shellcode):
super(Xority, self).__init__(shellcode)
self.debug=True;
# val is the hex representation of the register without 0x
# calc_xor_key("d2b00bcd") -> 62bbc61f
def calc_xor_key(self, val):
key = ""
a=strings.split(val)
b=[]
b.append(self.xor_str(a[0], secrets.token_hex(1)))
b.append(self.xor_str(a[1], secrets.token_hex(1)))
b.append(self.xor_str(a[2], secrets.token_hex(1)))
b.append(self.xor_str(a[3], secrets.token_hex(1)))
return ''.join(b)
def encode(self, output_format="c"):
padded_shellcode = strings.pad(self.shellcode())
output = ""
for c in textwrap.wrap(padded_shellcode, 16):
val=strings.from_char_to_hexcode(c)
key=self.calc_xor_key(val)
encoded=self.xor_str(val, key)
logging.debug("V: %s" %val)
logging.debug("K: %s" %key.zfill(8))
logging.debug("E: %s" %encoded.zfill(8))
output+=key.zfill(8)
output+=encoded.zfill(8)
mark=secrets.token_hex(4)
output+=mark
output+=mark
a=strings.split(output)
if output_format == "c":
return ''.join('\\x'+x.zfill(2) for x in a)
if output_format == "asm":
o = ''.join('0x'+x.zfill(2)+', ' for x in a)
if output_format == "raw":
return ''.join(x.zfill(2) for x in a)
return o[:-2]
def payload(self, output_format="c"):
stub_raw = "eb225e8d3e31c031db31c931d28b1c0604048b140631d339cb740e891f83c7040404ebe9e8d9ffffff"
if output_format == "raw":
return stub_raw + self.encode("raw")
a=strings.split(stub_raw)
if output_format == "c":
stub=''.join('\\x'+x.zfill(2) for x in a)
if output_format == "asm":
stub = ''.join('0x'+x.zfill(2)+', ' for x in a)
return stub +self.encode(output_format)
|
import caffe
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
plt.rcParams['font.size'] = 20
# plt.rcParams['xtick.labelzie'] = 18
def make_2d(data):
return np.reshape(data, (data.shape[0], -1))
caffe.set_mode_gpu()
caffe.set_device(0)
caffe_root = '/home/moritz/Repositories/caffe_lp/'
model_root = 'examples/low_precision/imagenet/models/'
snapshot_root = '/media/moritz/Data/ILSVRC2015/Snapshots/'
# prototxt_file = caffe_root + model_root + 'VGG16_deploy_vis.prototxt'
# prototxt_file = caffe_root + model_root + 'LP_VGG16_0_7_vis.prototxt'
# prototxt_file = caffe_root + model_root + 'LP_VGG16_1_6_vis.prototxt'
# prototxt_file = caffe_root + model_root + 'LP_VGG16_2_5_vis.prototxt'
# prototxt_file = caffe_root + model_root + 'LP_VGG16_0_15_vis.prototxt'
# prototxt_file = caffe_root + model_root + 'LP_VGG16_1_14_vis.prototxt'
# prototxt_file = caffe_root + model_root + 'LP_VGG16_2_13_vis.prototxt'
# prototxt_file = caffe_root + model_root + 'LP_VGG16_3_12_vis.prototxt'
prototxt_file = caffe_root + model_root + 'LP_VGG16_5_10_vis.prototxt'
# weights_file = '/home/moritz/Downloads/VGG16_tmp/' + 'LP_VGG16.caffemodel.h5'
# weights_file = '/home/moritz/Downloads/VGG16_tmp/' + 'HP_VGG16.caffemodel'
weights_file = '/home/moritz/Downloads/VGG16_tmp/' + 'converted_hp_weights.caffemodel'
net = caffe.Net(prototxt_file, weights_file, caffe.TEST)
print('Doing forward pass...')
net.forward()
print('Done.')
# %matplotlib inline
print("All params in the net: {}".format(net.params.keys()))
print("All blobs in the net: {}".format(net.blobs.keys()))
# Extract the data
data = net.blobs['data'].data
labels = net.blobs['label'].data
print('Input data shape: {}'.format(data.shape))
# Build a translation dictionary for the labels that converts label to text
trans_dict = {0.: 'Left', 1.: 'Center', 2.: 'Right', 3.: 'Not Visible'}
# Pick out four images to process
show_data = [0, 2, 3, 1]
# plt.figure(1, figsize=(8, 8))
# for iter_num, d_idx in enumerate(show_data):
# plt.subplot(2, 2, iter_num + 1)
# plt.imshow(data[d_idx, 0, :, :], interpolation='nearest', cmap='gray')
# # plt.title(trans_dict[labels[d_idx]])
# plt.colorbar()
# plt.draw()
print 'Start plotting the weights'
binwidth = 0.001
bd = 5
ad = 10
ymax = 100
x_min = -0.5
x_max = 0.5
nb_bins = 100
if 'LP' in prototxt_file:
hp = False
l_idx1 = 'conv_lp_1'
l_idx2 = 'conv_lp_3'
l_idx3 = 'conv_lp_6'
l_idx4 = 'conv_lp_8'
l_idx5 = 'conv_lp_11'
l_idx6 = 'conv_lp_13'
l_idx7 = 'conv_lp_15'
l_idx8 = 'conv_lp_18'
l_idx9 = 'conv_lp_20'
l_idx10 = 'conv_lp_22'
l_idx11 = 'conv_lp_25'
l_idx12 = 'conv_lp_27'
l_idx13 = 'conv_lp_29'
l_idx14 = 'fc_lp_32'
l_idx15 = 'fc_lp_34'
l_idx16 = 'fc_lp_36'
else:
hp = True
l_idx1 = 'conv1_1'
l_idx2 = 'conv1_2'
l_idx3 = 'conv2_1'
l_idx4 = 'conv2_2'
l_idx5 = 'conv3_1'
l_idx6 = 'conv3_2'
l_idx7 = 'conv3_3'
l_idx8 = 'conv4_1'
l_idx9 = 'conv4_2'
l_idx10 = 'conv4_3'
l_idx11 = 'conv5_1'
l_idx12 = 'conv5_2'
l_idx13 = 'conv5_3'
l_idx14 = 'fc6'
l_idx15 = 'fc7'
l_idx16 = 'fc8'
print hp
# hp = True
if hp:
# plt.figure(2, figsize=(16, 8))
# plt.subplot(2, 4, 1)
# plt.title('High precision Weight Matrix {}'.format(l_idx1))
# plt.imshow(make_2d(net.params[l_idx1][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 2)
# plt.title('High precision Weight Matrix {}'.format(l_idx2))
# plt.imshow(make_2d(net.params[l_idx2][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 3)
# plt.title('High precision Weight Matrix {}'.format(l_idx3))
# plt.imshow(make_2d(net.params[l_idx3][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 4)
# plt.title('High precision Weight Matrix {}'.format(l_idx4))
# plt.imshow(make_2d(net.params[l_idx4][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 5)
# plt.title('High precision Weight Matrix {}'.format(l_idx1))
# show_data = net.params[l_idx1][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 6)
# plt.title('High precision Weight Matrix {}'.format(l_idx2))
# show_data = net.params[l_idx2][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 7)
# plt.title('High precision Weight Matrix {}'.format(l_idx3))
# show_data = net.params[l_idx3][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 8)
# plt.title('High precision Weight Matrix {}'.format(l_idx4))
# show_data = net.params[l_idx4][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.show()
# plt.figure(3, figsize=(16, 8))
# plt.subplot(2, 4, 1)
# plt.title('High precision Weight Matrix {}'.format(l_idx5))
# plt.imshow(make_2d(net.params[l_idx5][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 2)
# plt.title('High precision Weight Matrix {}'.format(l_idx6))
# plt.imshow(make_2d(net.params[l_idx6][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 3)
# plt.title('High precision Weight Matrix {}'.format(l_idx7))
# plt.imshow(make_2d(net.params[l_idx7][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 4)
# plt.title('High precision Weight Matrix {}'.format(l_idx8))
# plt.imshow(make_2d(net.params[l_idx8][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 5)
# plt.title('High precision Weight Matrix {}'.format(l_idx5))
# show_data = net.params[l_idx5][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 6)
# plt.title('High precision Weight Matrix {}'.format(l_idx6))
# show_data = net.params[l_idx6][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 7)
# plt.title('High precision Weight Matrix {}'.format(l_idx7))
# show_data = net.params[l_idx7][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 8)
# plt.title('High precision Weight Matrix {}'.format(l_idx8))
# show_data = net.params[l_idx8][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.tight_layout()
# plt.show()
# plt.figure(4, figsize=(16, 8))
# plt.subplot(2, 4, 1)
# plt.title('High precision Weight Matrix {}'.format(l_idx9))
# plt.imshow(make_2d(net.params[l_idx9][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 2)
# plt.title('High precision Weight Matrix {}'.format(l_idx10))
# plt.imshow(make_2d(net.params[l_idx10][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 3)
# plt.title('High precision Weight Matrix {}'.format(l_idx11))
# plt.imshow(make_2d(net.params[l_idx11][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 4)
# plt.title('High precision Weight Matrix {}'.format(l_idx12))
# plt.imshow(make_2d(net.params[l_idx12][0].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 5)
# plt.title('High precision Weight Matrix {}'.format(l_idx9))
# show_data = net.params[l_idx9][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 6)
# plt.title('High precision Weight Matrix {}'.format(l_idx10))
# show_data = net.params[l_idx10][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 7)
# plt.title('High precision Weight Matrix {}'.format(l_idx11))
# show_data = net.params[l_idx11][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 8)
# plt.title('High precision Weight Matrix {}'.format(l_idx12))
# show_data = net.params[l_idx12][0].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.show()
plt.figure(5, figsize=(16, 8))
plt.subplot(2, 4, 1)
plt.title('{}'.format(l_idx13))
plt.imshow(make_2d(net.params[l_idx13][0].data), interpolation='nearest', aspect='auto')
plt.colorbar()
plt.draw()
plt.subplot(2, 4, 2)
plt.title('{}'.format(l_idx14))
plt.imshow(make_2d(net.params[l_idx14][0].data), interpolation='nearest', aspect='auto')
plt.colorbar()
plt.draw()
plt.subplot(2, 4, 3)
plt.title('{}'.format(l_idx15))
plt.imshow(make_2d(net.params[l_idx15][0].data), interpolation='nearest', aspect='auto')
plt.colorbar()
plt.draw()
plt.subplot(2, 4, 4)
plt.title('{}'.format(l_idx16))
plt.imshow(make_2d(net.params[l_idx16][0].data), interpolation='nearest', aspect='auto')
plt.colorbar()
plt.draw()
plt.subplot(2, 4, 5)
plt.title('{}'.format(l_idx13))
show_data = net.params[l_idx13][0].data.flatten()
# plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
plt.hist(show_data, nb_bins)
# plt.ylim([0, ymax])
plt.draw()
plt.subplot(2, 4, 6)
plt.title('{}'.format(l_idx14))
show_data = net.params[l_idx14][0].data.flatten()
# plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
plt.hist(show_data, nb_bins)
# plt.ylim([0, ymax])
plt.draw()
plt.subplot(2, 4, 7)
plt.title('{}'.format(l_idx15))
show_data = net.params[l_idx15][0].data.flatten()
# plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
plt.hist(show_data, nb_bins)
# plt.ylim([0, ymax])
plt.draw()
plt.subplot(2, 4, 8)
plt.title('{}'.format(l_idx16))
show_data = net.params[l_idx16][0].data.flatten()
# plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
plt.hist(show_data, nb_bins)
# plt.ylim([0, ymax])
plt.draw()
plt.tight_layout()
plt.show()
else:
# 'conv_lp_1', 'conv_lp_3', 'conv_lp_6', 'conv_lp_8', 'conv_lp_11', 'conv_lp_13', 'conv_lp_15',
# 'conv_lp_18', 'conv_lp_20', 'conv_lp_22', 'conv_lp_25', 'conv_lp_27', 'conv_lp_29', 'fc_lp_32',
# 'fc_lp_34', 'fc_lp_36']
# All blobs in the net: ['data', 'label', 'label_data_1_split_0', 'label_data_1_split_1',
# 'conv_lp_1', 'act_lp_2', 'conv_lp_3', 'act_lp_4', 'pool_5', 'conv_lp_6', 'act_lp_7',
# 'conv_lp_8', 'act_lp_9', 'pool_10', 'conv_lp_11', 'act_lp_12', 'conv_lp_13', 'act_lp_14',
# 'conv_lp_15', 'act_lp_16', 'pool_17', 'conv_lp_18', 'act_lp_19', 'conv_lp_20', 'act_lp_21',
# 'conv_lp_22', 'act_lp_23', 'pool_24', 'conv_lp_25', 'act_lp_26', 'conv_lp_27', 'act_lp_28',
# 'conv_lp_29', 'act_lp_30', 'pool_31', 'fc_lp_32', 'act_lp_33', 'fc_lp_34', 'act_lp_35',
# 'fc_lp_36', 'fc_lp_36_fc_lp_36_0_split_0', 'fc_lp_36_fc_lp_36_0_split_1', 'accuracy', 'loss'
# plt.figure(2, figsize=(16, 8))
# plt.subplot(2, 4, 1)
# plt.title('Rounded Weight Matrix {}'.format(l_idx1))
# plt.imshow(make_2d(net.params[l_idx1][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 2)
# plt.title('Rounded Weight Matrix {}'.format(l_idx2))
# plt.imshow(make_2d(net.params[l_idx2][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 3)
# plt.title('Rounded Weight Matrix {}'.format(l_idx3))
# plt.imshow(make_2d(net.params[l_idx3][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 4)
# plt.title('Rounded Weight Matrix {}'.format(l_idx4))
# plt.imshow(make_2d(net.params[l_idx4][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 5)
# plt.title('rounded weight matrix {}'.format(l_idx1))
# show_data = net.params[l_idx1][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 6)
# plt.title('rounded weight matrix {}'.format(l_idx2))
# show_data = net.params[l_idx2][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 7)
# plt.title('rounded weight matrix {}'.format(l_idx3))
# show_data = net.params[l_idx3][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 8)
# plt.title('rounded weight matrix {}'.format(l_idx4))
# show_data = net.params[l_idx4][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.show()
# plt.figure(3, figsize=(16, 8))
# plt.subplot(2, 4, 1)
# plt.title('Rounded Weight Matrix {}'.format(l_idx5))
# plt.imshow(make_2d(net.params[l_idx5][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 2)
# plt.title('Rounded Weight Matrix {}'.format(l_idx6))
# plt.imshow(make_2d(net.params[l_idx6][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 3)
# plt.title('Rounded Weight Matrix {}'.format(l_idx7))
# plt.imshow(make_2d(net.params[l_idx7][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 4)
# plt.title('Rounded Weight Matrix {}'.format(l_idx8))
# plt.imshow(make_2d(net.params[l_idx8][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 5)
# plt.title('rounded weight matrix {}'.format(l_idx5))
# show_data = net.params[l_idx5][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 6)
# plt.title('rounded weight matrix {}'.format(l_idx6))
# show_data = net.params[l_idx6][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 7)
# plt.title('rounded weight matrix {}'.format(l_idx7))
# show_data = net.params[l_idx7][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 8)
# plt.title('rounded weight matrix {}'.format(l_idx8))
# show_data = net.params[l_idx8][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.show()
# plt.figure(4, figsize=(16, 8))
# plt.subplot(2, 4, 1)
# plt.title('Rounded Weight Matrix {}'.format(l_idx9))
# plt.imshow(make_2d(net.params[l_idx9][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 2)
# plt.title('Rounded Weight Matrix {}'.format(l_idx10))
# plt.imshow(make_2d(net.params[l_idx10][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 3)
# plt.title('Rounded Weight Matrix {}'.format(l_idx11))
# plt.imshow(make_2d(net.params[l_idx11][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 4)
# plt.title('Rounded Weight Matrix {}'.format(l_idx12))
# plt.imshow(make_2d(net.params[l_idx12][1].data), interpolation='nearest', aspect='auto')
# plt.colorbar()
# plt.draw()
# plt.subplot(2, 4, 5)
# plt.title('rounded weight matrix {}'.format(l_idx9))
# show_data = net.params[l_idx9][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 6)
# plt.title('rounded weight matrix {}'.format(l_idx10))
# show_data = net.params[l_idx10][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 7)
# plt.title('rounded weight matrix {}'.format(l_idx11))
# show_data = net.params[l_idx11][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.subplot(2, 4, 8)
# plt.title('rounded weight matrix {}'.format(l_idx12))
# show_data = net.params[l_idx12][1].data.flatten()
# # plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
# plt.hist(show_data, nb_bins)
# # plt.ylim([0, ymax])
# plt.draw()
# plt.show()
plt.figure(5, figsize=(16, 8))
plt.subplot(2, 4, 1)
plt.title('{}'.format(l_idx13))
plt.imshow(make_2d(net.params[l_idx13][1].data), interpolation='nearest', aspect='auto')
plt.colorbar()
plt.draw()
plt.subplot(2, 4, 2)
plt.title('{}'.format(l_idx14))
plt.imshow(make_2d(net.params[l_idx14][1].data), interpolation='nearest', aspect='auto')
plt.colorbar()
plt.draw()
plt.subplot(2, 4, 3)
plt.title('{}'.format(l_idx15))
plt.imshow(make_2d(net.params[l_idx15][1].data), interpolation='nearest', aspect='auto')
plt.colorbar()
plt.draw()
plt.subplot(2, 4, 4)
plt.title('{}'.format(l_idx16))
plt.imshow(make_2d(net.params[l_idx16][1].data), interpolation='nearest', aspect='auto')
plt.colorbar()
plt.draw()
plt.subplot(2, 4, 5)
plt.title('{}'.format(l_idx13))
show_data = net.params[l_idx13][1].data.flatten()
# plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
plt.hist(show_data, nb_bins)
# plt.ylim([0, ymax])
plt.draw()
plt.subplot(2, 4, 6)
plt.title('{}'.format(l_idx14))
show_data = net.params[l_idx14][1].data.flatten()
# plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
plt.hist(show_data, nb_bins)
# plt.ylim([0, ymax])
plt.draw()
plt.subplot(2, 4, 7)
plt.title('{}'.format(l_idx15))
show_data = net.params[l_idx15][1].data.flatten()
# plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
plt.hist(show_data, nb_bins)
# plt.ylim([0, ymax])
plt.draw()
plt.subplot(2, 4, 8)
plt.title('{}'.format(l_idx16))
show_data = net.params[l_idx16][1].data.flatten()
# plt.hist(show_data, bins=np.arange(x_min, x_max, binwidth))
plt.hist(show_data, nb_bins)
# plt.ylim([0, ymax])
plt.draw()
plt.tight_layout()
plt.show()
# plt.figure(8)
# plt.hist(show_data, 20)
# plt.title('Rounded weight distribution 1000 Class Classifier')
# plt.draw()
# plt.show()
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.forms.models import model_to_dict
from networkapi import settings
from networkapi.admin_permission import AdminPermission
from networkapi.ambiente.models import Ambiente
from networkapi.ambiente.models import AmbienteError
from networkapi.ambiente.models import AmbienteNotFoundError
from networkapi.auth import has_perm
from networkapi.distributedlock import distributedlock
from networkapi.distributedlock import LOCK_ENVIRONMENT
from networkapi.equipamento.models import Equipamento
from networkapi.exception import InvalidValueError
from networkapi.filterequiptype.models import FilterEquipType
from networkapi.grupo.models import GrupoError
from networkapi.infrastructure.xml_utils import dumps_networkapi
from networkapi.infrastructure.xml_utils import loads
from networkapi.infrastructure.xml_utils import XMLError
from networkapi.rest import RestResource
from networkapi.util import is_valid_int_greater_zero_param
from networkapi.util import is_valid_string_maxsize
from networkapi.util import is_valid_string_minsize
from networkapi.vlan.models import Vlan
from networkapi.vlan.models import VlanError
from networkapi.vlan.models import VlanNameDuplicatedError
from networkapi.vlan.models import VlanNumberNotAvailableError
class VlanAllocateResource(RestResource):
log = logging.getLogger('VlanAllocateResource')
def handle_post(self, request, user, *args, **kwargs):
"""Handles POST requests to create new VLAN without add NetworkIPv4.
URLs: /vlan/no-network/
"""
self.log.info('Create new VLAN without add NetworkIPv4')
try:
# Commons Validations
# User permission
if not has_perm(user, AdminPermission.VLAN_MANAGEMENT, AdminPermission.WRITE_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
return self.not_authorized()
# Business Validations
# Load XML data
xml_map, attrs_map = loads(request.raw_post_data)
# XML data format
networkapi_map = xml_map.get('networkapi')
if networkapi_map is None:
msg = u'There is no value to the networkapi tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
vlan_map = networkapi_map.get('vlan')
if vlan_map is None:
msg = u'There is no value to the vlan tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
# Get XML data
environment = vlan_map.get('environment_id')
name = vlan_map.get('name')
description = vlan_map.get('description')
vrf = vlan_map.get('vrf')
# Name must NOT be none and 50 is the maxsize
if not is_valid_string_minsize(name, 3) or not is_valid_string_maxsize(name, 50):
self.log.error(u'Parameter name is invalid. Value: %s.', name)
raise InvalidValueError(None, 'name', name)
# Description can NOT be greater than 200
if not is_valid_string_minsize(description, 3, False) or not is_valid_string_maxsize(description, 200, False):
self.log.error(
u'Parameter description is invalid. Value: %s.', description)
raise InvalidValueError(None, 'description', description)
# vrf can NOT be greater than 100
if not is_valid_string_maxsize(vrf, 100, False):
self.log.error(
u'Parameter vrf is invalid. Value: %s.', vrf)
raise InvalidValueError(None, 'vrf', vrf)
# Environment
try:
# Valid environment ID
if not is_valid_int_greater_zero_param(environment):
self.log.error(
u'Parameter environment_id is invalid. Value: %s.', environment)
raise InvalidValueError(
None, 'environment_id', environment)
# Find environment by ID to check if it exist
env = Ambiente.get_by_pk(environment)
except AmbienteNotFoundError, e:
self.log.error(u'The environment parameter does not exist.')
return self.response_error(112)
# Business Rules
# New Vlan
vlan = Vlan()
vlan.nome = name
vlan.descricao = description
vlan.ambiente = env
# Check if environment has min/max num_vlan value or use the value
# thas was configured in settings
if (vlan.ambiente.min_num_vlan_1 and vlan.ambiente.max_num_vlan_1) or (vlan.ambiente.min_num_vlan_2 and vlan.ambiente.max_num_vlan_2):
min_num_01 = vlan.ambiente.min_num_vlan_1 if vlan.ambiente.min_num_vlan_1 and vlan.ambiente.max_num_vlan_1 else vlan.ambiente.min_num_vlan_2
max_num_01 = vlan.ambiente.max_num_vlan_1 if vlan.ambiente.min_num_vlan_1 and vlan.ambiente.max_num_vlan_1 else vlan.ambiente.max_num_vlan_2
min_num_02 = vlan.ambiente.min_num_vlan_2 if vlan.ambiente.min_num_vlan_2 and vlan.ambiente.max_num_vlan_2 else vlan.ambiente.min_num_vlan_1
max_num_02 = vlan.ambiente.max_num_vlan_2 if vlan.ambiente.min_num_vlan_2 and vlan.ambiente.max_num_vlan_2 else vlan.ambiente.max_num_vlan_1
else:
min_num_01 = settings.MIN_VLAN_NUMBER_01
max_num_01 = settings.MAX_VLAN_NUMBER_01
min_num_02 = settings.MIN_VLAN_NUMBER_02
max_num_02 = settings.MAX_VLAN_NUMBER_02
# To avoid allocation same vlan number twice for different environments in same equipments
# Lock all environments related to this environment when allocating vlan number
# select all equipments from this environment that are not part of a filter
# and them selects all environments from all these equipments and
# lock them out
filtered_equipment_type_ids = list()
env_filter = None
try:
env_filter = env.filter.id
except:
pass
for fet in FilterEquipType.objects.filter(filter=env_filter):
filtered_equipment_type_ids.append(fet.equiptype.id)
filtered_environment_equips = Equipamento.objects.filter(equipamentoambiente__ambiente=env).exclude(
tipo_equipamento__in=filtered_equipment_type_ids)
# select all environments from the equips that were not filtered
locks_list = list()
environments_list = Ambiente.objects.filter(
equipamentoambiente__equipamento__in=filtered_environment_equips).distinct().order_by('id')
for environment in environments_list:
lock = distributedlock(LOCK_ENVIRONMENT % environment.id)
lock.__enter__()
locks_list.append(lock)
# Persist
try:
vlan.create_new(user,
min_num_01,
max_num_01,
min_num_02,
max_num_02
)
except Exception, e:
# release all the locks if failed
for lock in locks_list:
lock.__exit__('', '', '')
raise e
for lock in locks_list:
lock.__exit__('', '', '')
vlan_map = dict()
vlan_map['vlan'] = model_to_dict(vlan)
# Return XML
return self.response(dumps_networkapi(vlan_map))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except XMLError, x:
self.log.error(u'Error reading the XML request.')
return self.response_error(3, x)
except GrupoError:
return self.response_error(1)
except AmbienteNotFoundError:
return self.response_error(112)
except VlanNameDuplicatedError:
return self.response_error(108)
except VlanNumberNotAvailableError:
return self.response_error(109, min_num_01, max_num_01, min_num_02, max_num_02)
except (VlanError, AmbienteError):
return self.response_error(1)
|
from django import forms
from django.contrib.auth.forms import AuthenticationForm, ReadOnlyPasswordHashField, UserChangeForm, UserCreationForm
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.utils.translation import ugettext, ugettext_lazy as _
from .models import User
class UserLoginForm(AuthenticationForm):
username = forms.CharField(label=_('Username:'), required=True)
password = forms.CharField(label=_('Password:'), widget=forms.PasswordInput)
def clean_password(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
valid = True
try:
user = User.objects.get(username=username)
if not user.check_password(password):
valid = False
except ObjectDoesNotExist:
valid = False
if not valid: raise ValidationError(_('Wrong password. Try again...'))
return password
def confirm_login_allowed(self, user):
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
class CreateUserForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class Meta(UserCreationForm.Meta):
model = User
fields = ('username','password','email','active','staff','admin')
def clean_username(self):
username = self.cleaned_data['username']
try:
User.objects.get(username=username)
except ObjectDoesNotExist:
return username
raise ValidationError(_('Username already exists'))
def save(self, commit=True):
user = super(CreateUserForm, self).save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class ChangeUserForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta(UserChangeForm.Meta):
model = User
fields = ('username','email', 'password', 'active','staff','admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
|
import sys, os, inspect
from JumpScale import j
home = os.curdir # Default
if 'JSBASE' in os.environ:
home = os.environ['JSBASE']
elif 'JSJAIL' in os.environ:
home = os.environ['JSJAIL']
elif os.name == 'posix':
# home = os.path.expanduser("~/")
home="/opt/jumpscale"
elif os.name == 'nt': # Contributed by Jeff Bauer
if 'HOMEPATH' in os.environ:
if 'HOMEDRIVE' in os.environ:
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
else:
home = os.environ['HOMEPATH']
# if not 'JSBASE' in os.environ:
# print "WARNING: did not find JSBASE env environment, please set and point to your sandbox"
def pathToUnicode(path):
"""
Convert path to unicode. Use the local filesystem encoding. Will return
path unmodified if path already is unicode.
@param path: path to convert to unicode
@type path: basestring
@return: unicode path
@rtype: unicode
"""
if isinstance(path, unicode):
return path
return path.decode(sys.getfilesystemencoding())
class Dirs(object):
"""Utility class to configure and store all relevant directory paths"""
def __init__(self):
'''jumpscale sandbox base folder'''
self.__initialized = False ##bool
import sys
if os.path.exists("library.zip"):
self.frozen=True
else:
self.frozen=False
iswindows=os.name=="nt"
self.baseDir=home
self.baseDir=self.baseDir.replace("\\","/")
'''Application installation base folder (basedir/apps)'''
self.appDir = os.path.abspath(".")
'''Configuration file folder (appdir/cfg)'''
if 'JSBASE' in os.environ:
self.cfgDir=os.path.join(os.path.realpath("%s/../"%self.baseDir),"%s_data"%os.path.basename(self.baseDir.rstrip("/")),"cfg")
elif 'JSJAIL' in os.environ:
self.cfgDir=os.path.join(os.path.realpath("%s/../"%self.baseDir),"%s"%os.path.basename(self.baseDir.rstrip("/")),"cfg")
else:
self.cfgDir = os.path.join(self.baseDir,"cfg")
self._createDir(self.cfgDir)
tpath = os.path.join(self.cfgDir,"debug")
self._createDir(tpath)
tpath = os.path.join(self.cfgDir,"debug","protecteddirs")
self._createDir(tpath)
tpath = os.path.join(self.cfgDir,"grid")
self._createDir(tpath)
tpath = os.path.join(self.cfgDir,"hrd")
self._createDir(tpath)
'''Var folder (basedir/var)'''
if self.frozen:
self.varDir = "/var/jumpscale"
elif 'JSBASE' in os.environ:
self.varDir=os.path.join(os.path.realpath("%s/../"%self.baseDir),"%s_data"%os.path.basename(self.baseDir),"var")
else:
self.varDir = os.path.join(self.baseDir,"var")
self._createDir(self.varDir)
'''Temporary file folder (appdir/tmp)'''
if iswindows or self.frozen:
self.tmpDir = os.path.join(self.varDir,"tmp")
else:
self.tmpDir = "/tmp/jumpscale"
self._createDir(self.tmpDir)
if iswindows or self.frozen:
self.libDir = os.path.join(self.baseDir,"library.zip")
else:
self.libDir = os.path.join(self.baseDir,"lib")
self._createDir(self.libDir)
self.libExtDir = os.path.join(self.baseDir,"libext")
self._createDir(os.path.join(self.baseDir,"libext"))
if self.libDir in sys.path:
sys.path.pop(sys.path.index(self.libDir))
sys.path.insert(0,self.libDir)
pythonzip = os.path.join(self.libDir, 'python.zip')
if os.path.exists(pythonzip):
if pythonzip in sys.path:
sys.path.pop(sys.path.index(pythonzip))
sys.path.insert(0,pythonzip)
if self.libExtDir in sys.path:
sys.path.pop(sys.path.index(self.libExtDir))
sys.path.insert(2,self.libExtDir)
self.logDir = os.path.join(self.varDir,"log")
self._createDir(self.logDir)
self.packageDir = os.path.join(self.varDir,"jpackages")
self._createDir(self.packageDir)
# self.homeDir = pathToUnicode(os.path.join(home, ".jsbase"))
self.pidDir = os.path.join(self.varDir,"log")
if 'JSBASE' in os.environ:
self.binDir = os.path.join(self.baseDir, 'bin')
else:
self.binDir = "/usr/local/bin"
if self.frozen:
self.codeDir=os.path.join(self.varDir,"code")
else:
self.codeDir="/opt/code"
self._createDir(self.codeDir)
self.hrdDir = os.path.join(self.cfgDir,"hrd")
self._createDir(self.hrdDir)
self.configsDir = os.path.join(self.cfgDir,"jsconfig")
self._createDir(self.configsDir)
self.jsLibDir = self._getLibPath()
if self.jsLibDir not in sys.path:
sys.path.append(self.jsLibDir)
def replaceTxtDirVars(self,txt,additionalArgs={}):
"""
replace $base,$vardir,$cfgdir,$bindir,$codedir,$tmpdir,$logdir,$appdir with props of this class
"""
txt=txt.replace("$base",self.baseDir)
txt=txt.replace("$appdir",self.appDir)
txt=txt.replace("$codedir",self.codeDir)
txt=txt.replace("$vardir",self.varDir)
txt=txt.replace("$cfgdir",self.cfgDir)
txt=txt.replace("$bindir",self.binDir)
txt=txt.replace("$logdir",self.logDir)
txt=txt.replace("$tmpdir",self.tmpDir)
txt=txt.replace("$libdir",self.libDir)
txt=txt.replace("$jslibdir",self.jsLibDir)
txt=txt.replace("$jslibextdir",self.libExtDir)
txt=txt.replace("$jsbindir",self.binDir)
txt=txt.replace("$nodeid",str(j.application.whoAmI.nid))
for key,value in additionalArgs.iteritems():
txt=txt.replace("$%s"%key,str(value))
return txt
def replaceFilesDirVars(self,path,recursive=True, filter=None,additionalArgs={}):
if j.system.fs.isFile(path):
paths=[path]
else:
paths=j.system.fs.listFilesInDir(path,recursive,filter)
for path in paths:
content=j.system.fs.fileGetContents(path)
content2=self.replaceTxtDirVars(content,additionalArgs)
if content2<>content:
j.system.fs.writeFile(filename=path,contents=content2)
def _createDir(self,path):
if not os.path.exists(path):
os.makedirs(path)
def init(self,reinit=False):
"""Initializes all the configured directories if needed
If a folder attribute is None, set its value to the corresponding
default path.
@returns: Initialization success
@rtype: bool
"""
if reinit==False and self.__initialized == True:
return True
if j.system.platformtype.isWindows() :
self.codeDir=os.path.join(self.baseDir, 'code')
self.loadProtectedDirs()
self.deployDefaultFilesInSandbox()
self.__initialized = True
return True
def _getParent(self, path):
"""
Returns the parent of the path:
/dir1/dir2/file_or_dir -> /dir1/dir2/
/dir1/dir2/ -> /dir1/
@todo why do we have 2 implementations which are almost the same see getParentDirName()
"""
parts = path.split(os.sep)
if parts[-1] == '':
parts=parts[:-1]
parts=parts[:-1]
if parts==['']:
return os.sep
return os.sep.join(parts)
def _getLibPath(self):
parent = self._getParent
libDir=parent(parent(__file__))
libDir=os.path.abspath(libDir).rstrip("/")
return libDir
def getPathOfRunningFunction(self,function):
return inspect.getfile(function)
def loadProtectedDirs(self):
protectedDirsDir = os.path.join(self.cfgDir, 'debug', 'protecteddirs')
if not os.path.exists(protectedDirsDir):
self._createDir(protectedDirsDir)
_listOfCfgFiles = j.system.fs.listFilesInDir(protectedDirsDir, filter='*.cfg')
_protectedDirsList = []
for _cfgFile in _listOfCfgFiles:
_cfg = open(_cfgFile, 'r')
_dirs = _cfg.readlines()
for _dir in _dirs:
_dir = _dir.replace('\n', '').strip()
if j.system.fs.isDir(_dir):
# npath=j.system.fs.pathNormalize(_dir)
if _dir not in _protectedDirsList:
_protectedDirsList.append(_dir)
self.protectedDirs = _protectedDirsList
def addProtectedDir(self,path,name="main"):
if j.system.fs.isDir(path):
path=j.system.fs.pathNormalize(path)
configfile=os.path.join(self.cfgDir, 'debug', 'protecteddirs',"%s.cfg"%name)
if not j.system.fs.exists(configfile):
j.system.fs.writeFile(configfile,"")
content=j.system.fs.fileGetContents(configfile)
if path not in content.split("\n"):
content+="%s\n"%path
j.system.fs.writeFile(configfile,content)
self.loadProtectedDirs()
def removeProtectedDir(self,path):
path=j.system.fs.pathNormalize(path)
protectedDirsDir = os.path.join(self.cfgDir, 'debug', 'protecteddirs')
_listOfCfgFiles = j.system.fs.listFilesInDir(protectedDirsDir, filter='*.cfg')
for _cfgFile in _listOfCfgFiles:
_cfg = open(_cfgFile, 'r')
_dirs = _cfg.readlines()
out=""
found=False
for _dir in _dirs:
_dir = _dir.replace('\n', '').strip()
if _dir==path:
#found, need to remove
found=True
else:
out+="%s\n"%_dir
if found:
j.system.fs.writeFile(_cfgFile,out)
self.loadProtectedDirs()
def checkInProtectedDir(self,path):
path=j.system.fs.pathNormalize(path)
for item in self.protectedDirs :
if path.find(item)!=-1:
return True
return False
def deployDefaultFilesInSandbox(self):
iswindows=os.name=="nt"
if self.frozen or iswindows:
return
if 'JSJAIL' in os.environ:
return
#@todo P3 let it work for windows as well
bindest=j.system.fs.joinPaths(self.baseDir,"bin")
utilsdest=j.system.fs.joinPaths(self.baseDir,"utils")
cfgdest=self.cfgDir
if not os.path.exists(bindest) or not os.path.exists(utilsdest) or not os.path.exists(cfgdest):
cfgsource=j.system.fs.joinPaths(self.jsLibDir,"core","_defaultcontent","cfg")
binsource=j.system.fs.joinPaths(self.jsLibDir,"core","_defaultcontent","linux","bin")
utilssource=j.system.fs.joinPaths(self.jsLibDir,"core","_defaultcontent","linux","utils")
j.system.fs.copyDirTree(binsource,bindest)
j.system.fs.copyDirTree(utilssource,utilsdest)
j.system.fs.copyDirTree(cfgsource,cfgdest,overwriteFiles=False)
ipythondir = j.system.fs.joinPaths(os.environ['HOME'], '.ipython')
j.system.fs.removeDirTree(ipythondir)
j.packages.reloadconfig()
def __str__(self):
return str(self.__dict__) #@todo P3 implement (thisnis not working)
__repr__=__str__
|
from ....Classes.Arc1 import Arc1
from ....Classes.SurfLine import SurfLine
def get_surface_active(self, alpha=0, delta=0):
"""Return the full winding surface
Parameters
----------
self : SlotW22
A SlotW22 object
alpha : float
float number for rotation (Default value = 0) [rad]
delta : complex
complex number for translation (Default value = 0)
Returns
-------
surf_wind: Surface
Surface corresponding to the Winding Area
"""
# get the name of the lamination
st = self.get_name_lam()
# Create curve list
curve_list = self.build_geometry()[1:-1]
curve_list.append(
Arc1(
begin=curve_list[-1].get_end(),
end=curve_list[0].get_begin(),
radius=-abs(curve_list[-1].get_end()),
is_trigo_direction=False,
)
)
# Create surface
if self.is_outwards():
Zmid = self.get_Rbo() + self.H0 + self.H2 / 2
else:
Zmid = self.get_Rbo() - self.H0 - self.H2 / 2
surface = SurfLine(
line_list=curve_list, label="Wind_" + st + "_R0_T0_S0", point_ref=Zmid
)
# Apply transformation
surface.rotate(alpha)
surface.translate(delta)
return surface
|
def cadena():
while True:
try:
text= input("Ingrese las notas separadas por coma: ")
text = text.split(sep=",")
for i,a in enumerate(text):
text[i]=int(text[i].strip())
break
except:
print("Ingrese solo números")
return text
notas = cadena()
print(f"notas: {notas}")
|
import numpy as np
import pandas as pd
import copy
import os
import sklearn
# from scipy.stats import pearsonr
# from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import RobustScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
# from sklearn.metrics import confusion_matrix
import featuretools as ft
# from sklearn.ensemble import RandomForestRegressor
import utils_original as utils
from numpy import mean
import os
# def run(dates, labels, auto_human_20feature_matrix):
# pipeline_preprocessing = [("imputer",
# SimpleImputer()),
# ("scaler", RobustScaler(with_centering=True))] # StandardScaler
# splitter = utils.TimeSeriesSplitByDate(dates=dates, earliest_date=pd.Timestamp('1/1/2008')) # predict 2004 2008 2012
# all_X = auto_human_20feature_matrix.values
# rf_regressor = RandomForestRegressor(
# n_estimators=200,
# random_state=50
# )
# pipeline_reg = Pipeline(pipeline_preprocessing + [('rf_reg', rf_regressor)])
# # regression_score, a, b = utils.fit_and_score(np.array(X), labels, splitter, pipeline_reg)
# regression_score, a, b = utils.fit_and_score(all_X, labels, splitter, pipeline_reg)
# print(regression_score, mean(regression_score))
# return regression_score
def loadData(es, DATA_DIR):
label_country_Olympics = os.path.join(DATA_DIR, "num_medals_by_country_labels2016.csv")
label_df1 = pd.read_csv(label_country_Olympics,
parse_dates=['Olympics Date'],
usecols=['Number of Medals', 'Olympics Date', 'Country', 'Olympic Games ID'],
encoding='utf-8')
label_country = os.path.join(DATA_DIR, "cleaned_new_dictionary.csv")
label_df2 = pd.read_csv(label_country,
encoding='utf-8')
label_summer = os.path.join(DATA_DIR, "summer2016.csv")
label_df3 = pd.read_csv(label_summer,
usecols=['Year', 'City', 'Sport', 'Athlete', 'Country', 'Gender', 'Medal', 'Age', 'Height', 'Weight'],
encoding='gbk')
# label_df1 # label_df2 # label_df3
# label_df.sort_values(['Olympics Date', 'Country'], inplace=True)
cutoff_times = label_df1[['Country', 'Olympics Date']].rename(columns={'Country': 'Code', 'Olympics Date': 'time'})
dates = label_df1['Olympics Date']
labels = label_df1['Number of Medals']
return label_df1, label_df2, label_df3, cutoff_times, dates, labels
def auto_human_features():
auto_feature_matrix_encoded = auto_10features()
auto_feature_matrix_encoded.drop(auto_feature_matrix_encoded.columns[[0]], axis = 1, inplace = True)
human_10feature_names, human_10features = human_10eatures()
auto_human_20feature_matrix = copy.deepcopy(auto_feature_matrix_encoded)
for i in range(len(human_10feature_names)):
auto_human_20feature_matrix.insert(auto_human_20feature_matrix.shape[1], human_10feature_names[i], human_10features[i])
return auto_human_20feature_matrix
def autoFeatures(es, cutoff_times):
agg_primitives = ['Sum', 'Max', 'Min', 'Mean',
'Count', 'Num_Unique',
'Mode', 'Trend', 'Skew']
trans_primitives = ['Absolute', 'Percentile']
feature_matrix, features = ft.dfs(
entityset = es,
target_entity = "countries", # parameter 1
trans_primitives = trans_primitives,
agg_primitives = agg_primitives, # parameter 2
max_depth = 2, # parameter 3
cutoff_time = cutoff_times, # parameter 4
verbose = True
)
print("{} features generated automatically".format(len(features)))
feature_matrix_encoded, features_encoded = ft.encode_features(feature_matrix, features)
return feature_matrix_encoded, features_encoded
def human_features():
label_df = pd.read_csv("features/human/human_feature_past_medal_avg_num.csv",
encoding='utf-8')
mean_medal_num = []
for f in label_df.past_medal_avg_num:
mean_medal_num.append(f)
label_df = pd.read_csv("features/human/human_feature_athlete_num.csv",
encoding='utf-8')
athlete_num = []
for f in label_df.athlete_num:
athlete_num.append(f)
label_df = pd.read_csv("features/human/human_feature_women_athlete_num.csv",
encoding='utf-8')
women_athlete_num = []
for f in label_df.women_athlete_num:
women_athlete_num.append(f)
label_df = pd.read_csv("features/human/human_feature_men_athlete_num.csv",
encoding='utf-8')
men_athlete_num = []
for f in label_df.men_athlete_num:
men_athlete_num.append(f)
label_df = pd.read_csv("features/human/human_feature_historic_gold_modal_num.csv",
encoding='utf-8')
historic_gold_modal_num = []
for f in label_df.historic_gold_modal_num:
historic_gold_modal_num.append(f)
label_df = pd.read_csv("features/human/human_feature_historic_silver_modal_num.csv",
encoding='utf-8')
historic_silver_modal_num = []
for f in label_df.historic_silver_modal_num:
historic_silver_modal_num.append(f)
label_df = pd.read_csv("features/human/human_feature_historic_bronze_modal_num.csv",
encoding='utf-8')
historic_bronze_modal_num = []
for f in label_df.historic_bronze_modal_num:
historic_bronze_modal_num.append(f)
label_df = pd.read_csv("features/human/human_feature_historic_Aquatics_modal_num.csv",
encoding='utf-8')
historic_Aquatics_modal_num = []
for f in label_df.historic_Aquatics_modal_num:
historic_Aquatics_modal_num.append(f)
label_df = pd.read_csv("features/human/human_feature_historic_Athletics_modal_num.csv",
encoding='utf-8')
historic_Athletics_modal_num = []
for f in label_df.historic_Athletics_modal_num:
historic_Athletics_modal_num.append(f)
label_df = pd.read_csv("features/human/human_feature_historic_Gymnastics_modal_num.csv",
encoding='utf-8')
historic_Gymnastics_modal_num = []
for f in label_df.historic_Gymnastics_modal_num:
historic_Gymnastics_modal_num.append(f)
label_df = pd.read_csv("features/human/human_feature_man_woman_ratio.csv",
encoding='utf-8')
man_woman_ratio = []
for f in label_df.man_woman_ratio_total:
man_woman_ratio.append(f)
label_df = pd.read_csv("features/human/human_feature_man_ratio.csv",
encoding='utf-8')
man_ratio = []
for f in label_df.man_ratio_total:
man_ratio.append(f)
label_df = pd.read_csv("features/human/human_feature_woman_ratio.csv",
encoding='utf-8')
woman_ratio = []
for f in label_df.woman_ratio_total:
woman_ratio.append(f)
label_df = pd.read_csv("features/human/human_feature_athlete_age_min.csv",
encoding='utf-8')
athlete_age_min = []
for f in label_df.athlete_age_min:
athlete_age_min.append(f)
label_df = pd.read_csv("features/human/human_feature_athlete_age_max.csv",
encoding='utf-8')
athlete_age_max = []
for f in label_df.athlete_age_max:
athlete_age_max.append(f)
label_df = pd.read_csv("features/human/human_feature_athlete_age_mean.csv",
encoding='utf-8')
athlete_age_mean = []
for f in label_df.athlete_age_mean:
athlete_age_mean.append(f)
# ,
all_human_feature_names =['human_<MEAN(COUNT(medals_won.Medal))>','human_<SUM(medaling_athletes.NUM_UNIQUE(athletes.Athelete))>','human_<COUNT(medaling_athletes WHERE athlete.Gender = Women)>', 'human_<COUNT(medaling_athletes WHERE athlete.Gender = Men)>', 'human_<COUNT(countries_at_plympicgames WHERE medals_won.Medal = Gold)>', 'human_<COUNT(countries_at_plympicgames WHERE medals_won.Medal = Silver)>', 'human_<COUNT(countries_at_plympicgames WHERE medals_won.Medal = Bronze)>', 'human_<COUNT(medals_won.Medal WHERE sports.Sport = Aquatics)>', 'human_<COUNT(medals_won.Medal WHERE sports.Sport = Athletics)>', 'human_<COUNT(medals_won.Medal WHERE sports.Sport = Gymnastics)>', 'human_<RATIO(COUNT(athletes WHERE Gender = Men), COUNT(athletes WHERE Gender = Women))>', 'human_<RATIO(COUNT(athletes WHERE Gender = Men), COUNT(athletes.Athelete))>', 'human_<RATIO(COUNT(athletes WHERE Gender = Women), COUNT(athletes.Athelete))>','human_<MIN(atheletes.Age)>', 'human_<MAX(atheletes.Age)>', 'human_<MEAN(atheletes.Age)>']
all_human_features = [mean_medal_num, athlete_num, women_athlete_num, men_athlete_num, historic_gold_modal_num, historic_silver_modal_num,
historic_bronze_modal_num, historic_Aquatics_modal_num, historic_Athletics_modal_num, historic_Gymnastics_modal_num, man_woman_ratio, man_ratio, woman_ratio, athlete_age_min, athlete_age_max, athlete_age_mean]
select_human_feature_names =['COUNT(medals_won.Medal WHERE sports.Sport = Gymnastics)',
'MIN(atheletes.Age)', 'MAX(atheletes.Age)', 'MEAN(atheletes.Age)',
'COUNT(medaling_athletes WHERE athlete.Gender = Women)',
'RATIO(COUNT(athletes WHERE Gender = Men), COUNT(athletes WHERE Gender = Women))',
'RATIO(COUNT(athletes WHERE Gender = Women), COUNT(athletes.Athelete))',
'SUM(medaling_athletes.NUM_UNIQUE(athletes.Athelete))',
'COUNT(medaling_athletes WHERE athlete.Gender = Men)'
]
select_human_features = [historic_Gymnastics_modal_num,
athlete_age_min, athlete_age_max, athlete_age_mean,
women_athlete_num, man_woman_ratio, woman_ratio,
athlete_num, men_athlete_num]
return select_human_feature_names, select_human_features
|
#! /usr/bin/env python
# by caozj
# Jun 5, 2019
# 3:42:21 PM
import os
import time
import random
import argparse
import numpy as np
import tensorflow as tf
import Cell_BLAST as cb
import scscope as DeepImpute
import utils
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input", type=str, required=True)
parser.add_argument("-g", "--genes", dest="genes", type=str, required=True)
parser.add_argument("-o", "--output", dest="output", type=str, required=True)
parser.add_argument("--n-latent", dest="n_latent", type=int, default=10)
parser.add_argument("--n-epochs", dest="n_epochs", type=int, default=1000)
parser.add_argument("-s", "--seed", dest="seed", type=int, default=None)
parser.add_argument("-d", "--device", dest="device", type=str, default=None)
parser.add_argument("--clean", dest="clean", type=str, default=None)
cmd_args = parser.parse_args()
output_path = os.path.dirname(cmd_args.output)
if not os.path.exists(output_path):
os.makedirs(output_path)
os.environ["CUDA_VISIBLE_DEVICES"] = utils.pick_gpu_lowest_memory() \
if cmd_args.device is None else cmd_args.device
return cmd_args
def main(cmd_args):
dataset = cb.data.ExprDataSet.read_dataset(
cmd_args.input, sparsify=True
).normalize()
if cmd_args.clean is not None:
dataset = utils.clean_dataset(dataset, cmd_args.clean)
if cmd_args.genes is not None:
dataset = dataset[:, dataset.uns[cmd_args.genes]]
dataset = dataset.exprs.log1p().toarray()
start_time = time.time()
model = DeepImpute.train(
dataset, cmd_args.n_latent,
max_epoch=cmd_args.n_epochs, random_seed=cmd_args.seed
)
latent, _imputed_val, _batch_effect = DeepImpute.predict(dataset, model)
cb.data.write_hybrid_path(
time.time() - start_time,
"//".join([cmd_args.output, "time"])
)
cb.data.write_hybrid_path(
latent,
"//".join([cmd_args.output, "latent"])
)
if __name__ == "__main__":
main(parse_args())
print("Done!")
|
# Use this command if numpy import fails: sudo apt-get install python-dev libatlas-base-dev
# If this doesn't work, uninstall both numpy and scipy. Thonny will keep an older default version of numpy.
# Install an older version of scipy that corresponds to the correct version of numpy.
from guizero import App, PushButton, Slider, Text, ButtonGroup, Picture, Box, CheckBox
import sys
import time
import subprocess
import os
DEBUG_MODE = False
#CONT_REALTIME_MONITORING = False
def gui_open_rr_hr():
app.destroy()
#os.system("cmd /c py final.py -u")
process = subprocess.run('python3 scripts/run_rr_hr.py -u', shell=True)
def gui_open_hrv_hr():
app.destroy()
process = subprocess.run('python3 scripts/run_hrv_hr.py -u', shell=True)
def gui_go_to_connect():
print("Connecting...")
start_menu_box.hide()
connect_menu_box.show()
start_footer_box.hide()
other_footer_box.show()
connect_menu_text2.hide()
# Connection function
connect_menu_text.after(1000, gui_check_connection)
def gui_go_to_manual():
start_menu_box.hide()
manual_menu_box.show()
start_footer_box.hide()
other_footer_box.show()
def gui_check_connection():
connect_menu_text.value = "Connected!"
connect_menu_text2.show()
def gui_go_back_to_menu():
connect_menu_box.hide()
manual_menu_box.hide()
if connect_menu_text.value == "Connected!":
connect_menu_text.value = "Connecting to MyVitals..."
start_menu_box.show()
other_footer_box.hide()
start_footer_box.show()
app = App(title="BioRadar (Prototype)", width=480, height=320, bg="#141414")
if not DEBUG_MODE:
app.full_screen = True
start_menu_box = Box(app, width="fill")
pad_1 = Box(start_menu_box, width="fill", height=20)
box_1 = Box(start_menu_box, width="fill")
pad_1_2 = Box(box_1, width=140, height=1, align="left")
picture = Picture(box_1, image="images/brlogo.png", width=51, height=40, align="left") # W:H = 1.277
pad_1_2 = Box(box_1, width=10, height=1, align="left")
message = Text(box_1, text="BioRadar", color="#FFFFFF", size=20, align="left")
pad_2 = Box(start_menu_box, width="fill", height=40)
message = Text(start_menu_box, text="Select how you want to monitor your vitals.", color="#FFFFFF", size=15)
pad_3 = Box(start_menu_box, width="fill", height=18)
button1 = PushButton(start_menu_box, text="Online mode", command=gui_go_to_connect)
button1.bg = "#6ED3A9"
pad_4 = Box(start_menu_box, width="fill", height=10)
button2 = PushButton(start_menu_box, text="Manual mode", command=gui_go_to_manual)
button2.bg = "#6ED3A9"
start_menu_box.hide()
connect_menu_box = Box(app, width="fill")
pad_1 = Box(connect_menu_box, width="fill", height=100)
connect_menu_text = Text(connect_menu_box, text="Connecting to MyVitals...", color="#FFFFFF", size=20)
pad_2 = Box(connect_menu_box, width="fill", height=30)
connect_menu_text2 = Text(connect_menu_box, text="Waiting for online commands...", color="#FFFFFF", size=16)
connect_menu_box.hide()
# Manual mode
manual_menu_box = Box(app, width="fill")
pad = Box(manual_menu_box, width="fill", height=20)
manual_menu_text = Text(manual_menu_box, text="Manual Mode", color="#FFFFFF", size=20)
pad = Box(manual_menu_box, width="fill", height=50)
button_box = Box(manual_menu_box, width=460, height=90)
button1 = PushButton(button_box, text="Respiration Rate\nHeart Rate", command=gui_open_rr_hr, align="left")
pad = Box(button_box, width=10, height=90, align="left")
button2 = PushButton(button_box, text="Heart Rate Variability\nHeart Rate*", command=gui_open_hrv_hr, align="right")
button1.text_size = 16
button2.text_size = 16
button1.bg = "#6ED3A9"
button2.bg = "#6ED3A9"
pad = Box(manual_menu_box, width="fill", height=30)
pad = Box(manual_menu_box, width="fill", height=6)
txt = Text(manual_menu_box, text="* You will need to hold your breath for 10 seconds for\nheart rate variability measurements.", color="#C8C8C8", size=11)
# Footers
start_footer_box = Box(app, width="fill", align="bottom")
fyp_text = Text(start_footer_box, text=" © 2021 Final-Year Project, SEECS, NUST", color="#C8C8C8", size=11, align="left")
exit_button = PushButton(start_footer_box, text="Exit", align="right", command=exit)
exit_button.bg = "#6ED3A9"
start_footer_box.hide()
other_footer_box = Box(app, width="fill", align="bottom")
exit_button = PushButton(other_footer_box, text="Exit", align="right", command=exit)
exit_button.bg = "#6ED3A9"
back_button = PushButton(other_footer_box, text="Back", align="right", command=gui_go_back_to_menu)
back_button.bg = "#6ED3A9"
app.display()
|
import roslib; roslib.load_manifest('hrl_fabric_based_tactile_sensor')
import rospy
from hrl_msgs.msg import FloatArray
import hrl_lib.util as ut
import hrl_fabric_based_tactile_sensor.adc_publisher_node as apn
from m3skin_ros.msg import RawTaxelArray
from geometry_msgs.msg import Transform
from m3skin_ros.srv import None_TransformArray, None_TransformArrayResponse
from m3skin_ros.srv import None_String
class Fabric_Skin_Patch():
def __init__(self):
self.n_taxels_x = 5
self.n_taxels_y = 3
self.taxel_dim_x = 0.05
self.taxel_dim_y = 0.05
self.link_name = '/fabric_skin_link'
self.tar = None_TransformArrayResponse()
for i in range(self.n_taxels_x):
for j in range(self.n_taxels_y):
t = Transform()
t.translation.x = i*self.taxel_dim_x
t.translation.y = j*self.taxel_dim_y
t.translation.z = 0.
t.rotation.x = 0
t.rotation.y = 0
t.rotation.z = 0
t.rotation.w = 1
self.tar.data.append(t)
def local_coord_frames_cb(self, req):
return self.tar
def link_name_cb(self, req):
return self.link_name
if __name__ == '__main__':
dev_name = '/dev/ttyACM0'
baudrate = 115200
serial_dev = apn.setup_serial(dev_name, baudrate)
raw_data_pub = rospy.Publisher('/fabric_skin/taxels/raw_data',
RawTaxelArray)
fsp = Fabric_Skin_Patch()
d = ut.load_pickle('taxel_registration_dict.pkl')
fsp.link_name = d['tf_name']
fsp.tar = d['transform_array_response']
rospy.Service('/fabric_skin/taxels/srv/local_coord_frames',
None_TransformArray, fsp.local_coord_frames_cb)
rospy.Service('/fabric_skin/taxels/srv/link_name', None_String,
fsp.link_name_cb)
rospy.init_node('fabric_skin_driver_node')
for i in range(10):
ln = serial_dev.readline()
rospy.loginfo('Started publishing data')
rta = RawTaxelArray()
while not rospy.is_shutdown():
rta.val_z = apn.get_adc_data(serial_dev)[0:15]
raw_data_pub.publish(rta)
serial_dev.close()
|
import ply.lex as lex
tokens = (
'ID',
'NUM',
'HEXNUM',
'JNS',
'LOAD',
'STORE',
'ADD',
'SUBT',
'SUBTI',
'INPUT',
'OUTPUT',
'HALT',
'SKIPCOND',
'JUMP',
'CLEAR',
'ADDI',
'JUMPI',
'LOADI',
'STOREI',
'PUSH',
'POP',
'INCR',
'DECR',
'SHIFTL',
'SHIFTR',
'STOREX',
'LOADX',
'STOREY',
'LOADY',
'NEWLINE',
'DEC',
'HEX',
'LABEL',
)
reserved = {
'JnS': 'JNS',
'Load': 'LOAD',
'LoadI': 'LOADI',
'Store': 'STORE',
'StoreI': 'STOREI',
'Add': 'ADD',
'AddI': 'ADDI',
'Subt': 'SUBT',
'SubtI': 'SUBTI',
'Input': 'INPUT',
'Output': 'OUTPUT',
'Skipcond': 'SKIPCOND',
'Jump': 'JUMP',
'JumpI': 'JUMPI',
'Clear': 'CLEAR',
'Halt': 'HALT',
'DEC': 'DEC',
'HEX': 'HEX',
'StoreX': 'STOREX',
'StoreY': 'STOREY',
'LoadX': 'LOADX',
'LoadY': 'LOADY',
'Push': 'PUSH',
'Pop': 'POP',
'ShiftL': 'SHIFTL',
'ShiftR': 'SHIFTR',
'Incr': 'INCR',
'Decr': 'DECR',
}
t_JNS = r'JnS'
t_LOAD = r'Load'
t_LOADI = r'LoadI'
t_STORE = r'Store'
t_STOREI = r'StoreI'
t_ADD = r'Add'
t_ADDI = r'AddI'
t_SUBT = r'Subt'
t_INPUT = r'Input'
t_OUTPUT = r'Output'
t_SKIPCOND = r'Skipcond'
t_JUMP = r'Jump'
t_JUMPI = r'JumpI'
t_CLEAR = r'Clear'
t_HALT = r'Halt'
t_PUSH = r'Push'
t_POP = r'Pop'
t_SHIFTL = r'ShiftL'
t_SHIFTR = r'ShiftR'
t_STOREX = r'StoreX'
t_STOREY = r'StoreY'
t_DEC = r'DEC'
t_HEX = r'HEX'
t_ignore = ' \t'
def t_NEWLINE(t):
r'\n\s*'
t.lexer.lineno += 1 # lineno represents memory address
if not hasattr(t.lexer, 'line_number'):
t.lexer.line_number = 2
else:
t.lexer.line_number += t.value.count('\n')
return t
def t_LABEL(t):
r'[A-Za-z_]+,'
return t
def t_ID(t):
r'[A-Za-z_]+'
if t.value in reserved:
t.type = reserved[t.value]
return t
def t_HEXNUM(t):
r'0x[0-9A-F]+'
t.value = int(t.value, 16)
return t
def t_NUM(t):
r'[0-9]+'
t.value = int(t.value)
return t
lexer = lex.lex()
|
/anaconda3/lib/python3.7/abc.py
|
def shape(A):
num_rows = len(A)
num_cols=len(A[0]) if A else 0
return num_rows, num_cols
A=[
[1,2,3],
[3,4,5],
[4,5,6],
[6,7,8]
]
print(shape(A))
|
class AnsibleHostModel:
def __init__(self, name: str, ip: str):
self.name: str = name
self.ip: str = ip
def __eq__(self, other) -> bool:
return (self.name == other.name and
self.ip == other.ip)
def __lt__(self, other) -> bool:
pass
class AnsibleOrderedHostModel(AnsibleHostModel):
"""
Sortable variant of AnsibleHostModel
"""
def __lt__(self, other) -> bool:
return self.name < other.name
|
import os
import unittest
import http.client
import warnings
from google.cloud import firestore
from retirable_resources import RetirableResourceManager
firestore_project = "foo"
firestore_host = "localhost"
firestore_port = 8080
class FirestoreEmulatorTest:
def setUp(self):
self.__set_environ()
warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed")
def __set_environ(self):
self._original_environ = {}
self._original_environ["FIRESTORE_EMULATOR_HOST"] = os.environ.get(
"FIRESTORE_EMULATOR_HOST"
)
self._original_environ["GCLOUD_PROJECT"] = os.environ.get("GCLOUD_PROJECT")
os.environ["FIRESTORE_EMULATOR_HOST"] = f"{firestore_host}:{firestore_port}"
os.environ["GCLOUD_PROJECT"] = firestore_project
def __restore_environ(self):
for k, v in self._original_environ.items():
if v is not None:
os.environ[k] = v
def __clear_firebase(self):
conn = http.client.HTTPConnection(firestore_host, firestore_port)
conn.request(
"DELETE",
f"/emulator/v1/projects/{firestore_project}/databases/(default)/documents",
)
response = conn.getresponse()
response.read()
if response.status != 200:
raise Exception(
f"unable to delete database: {response.status} {response.reason}"
)
response.close()
conn.close()
def tearDown(self):
self.__clear_firebase()
self.__restore_environ()
class RetirableResourceManagerTest(FirestoreEmulatorTest, unittest.TestCase):
def setUp(self):
super().setUp()
self.client = firestore.Client()
self.r = RetirableResourceManager('foo/bar', client = self.client)
def tearDown(self):
self.client.close()
super().tearDown()
|
from catsleep.cat import config as cfg
if __name__ == '__main__':
conf = cfg.Config()
try:
print('user configurations:')
print(conf.get_user_config())
except Exception as e:
print('default configurations:')
print(conf.get_default_config())
|
'''
Given a string S, you are allowed to convert it to a palindrome by adding characters in front of it. Find and return the shortest palindrome you can find by performing this transformation.
For example:
Given "aacecaaa", return "aaacecaaa".
Given "abcd", return "dcbabcd".
Credits:
Special thanks to @ifanchu for adding this problem and creating all test cases. Thanks to @Freezen for additional test cases.
'''
class Solution:
# @param {string} s
# @return {string}
def shortestPalindrome(self, s):
if not s: return s
new_s = s + '#' + s[::-1]
n = len(new_s)
arr = [0 for i in xrange(n)]
for i in xrange(1, n):
index = arr[i-1]
while index > 0 and new_s[index] != new_s[i]:
index = arr[index-1]
arr[i] = index + (1 if new_s[index] == new_s[i] else 0)
return s[arr[n-1]:][::-1] + s
# Reference: https://leetcode.com/discuss/36807/c-8-ms-kmp-based-o-n-time-%26-o-n-memory-solution
|
from django.core.urlresolvers import reverse
from rest_framework import serializers
from django_gravatar.templatetags import gravatar
from profiles.models import Profile
class UserProfileSerializer(serializers.ModelSerializer):
absolute_url = serializers.SerializerMethodField()
avatar = serializers.SerializerMethodField()
class Meta:
model = Profile
fields = ('id', 'username', 'absolute_url', 'avatar',)
def get_absolute_url(self, obj):
return reverse("api-profile-detail", args=[obj.username])
def get_avatar(self, obj):
return gravatar.get_gravatar_url(obj.email)
|
# log.py
#
#
""" log module to set standard format of logging. """
from meds.utils.file import cdir
from meds.utils.join import j
import logging.handlers
import logging
import os
LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'warn': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
#:
ERASE_LINE = '\033[2K'
BOLD='\033[1m'
GRAY='\033[99m'
RED = '\033[91m'
YELLOW = '\033[93m'
GREEN = '\033[92m'
BLUE = '\033[94m'
BLA = '\033[95m'
ENDC = '\033[0m'
#:
homedir = os.path.expanduser("~")
curdir = os.getcwd()
try: hostname = socket.getfqdn()
except: hostname = "localhost"
logdir = homedir + os.sep + "meds.logs" + os.sep
#:
datefmt = '%H:%M:%S'
format_large = "%(asctime)-8s %(message)-8s %(module)s.%(lineno)s %(threadName)-10s"
format = "%(message)-8s"
class Formatter(logging.Formatter):
def format(self, record):
target = str(record.msg)
if not target: target = " "
if target[0] in [">", ]: target = "%s%s%s%s" % (BLUE, target[0], ENDC, target[1:])
elif target[0] in ["<", ]: target = "%s%s%s%s" % (GREEN, target[0], ENDC, target[1:])
elif target[0] in ["!", ]: target = "%s%s%s%s" % (BLUE, target[0], ENDC, target[1:])
elif target[0] in ["#", ]: target = "%s%s%s%s" % (BLA, target[0], ENDC, target[1:])
elif target[0] in ["^", ]: target = "%s%s%s%s" % (YELLOW, target[0], ENDC, target[1:])
elif target[0] in ["-", ]: target = "%s%s%s%s" % (BOLD, target[0], ENDC, target[1:])
elif target[0] in ["&", ]: target = "%s%s%s%s" % (RED, target[0], ENDC, target[1:])
record.msg = target
return logging.Formatter.format(self, record)
class FormatterClean(logging.Formatter):
def format(self, record):
target = str(record.msg)
if not target: target = " "
if target[0] in [">", "<", "!", "#", "^", "-", "&"]: target = target[2:]
record.msg = target
return logging.Formatter.format(self, record)
def log(level, error):
l = LEVELS.get(str(level).lower(), logging.NOTSET)
logging.log(l, error)
def loglevel(loglevel="error", colors=True):
logger = logging.getLogger("")
if colors: formatter = Formatter(format, datefmt=datefmt)
else: formatter = FormatterClean(format, datefmt=datefmt)
level = LEVELS.get(str(loglevel).lower(), logging.NOTSET)
filehandler = None
logger.setLevel(level)
if logger.handlers:
for handler in logger.handlers: logger.removeHandler(handler)
if not os.path.exists(logdir): cdir(logdir)
try: filehandler = logging.handlers.TimedRotatingFileHandler(j(logdir, "meds.log"), 'midnight')
except Exception as ex: logging.error(ex)
ch = logging.StreamHandler()
ch.setLevel(level)
if colors: ch.setFormatter(formatter)
else: ch.setFormatter(formatter)
logger.addHandler(ch)
if filehandler:
ch.setFormatter(formatter)
filehandler.setLevel(level)
logger.addHandler(filehandler)
global enabled
enabled = True
return logger
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import yaml
from ansible.plugins.action import ActionBase
from ansible.errors import AnsibleActionFail
from ansible.utils.vars import isidentifier
from ansible.plugins.filter.core import combine
from ansible.plugins.loader import lookup_loader
from ansible_collections.arista.avd.plugins.module_utils.strip_empties import strip_null_from_data
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = {}
result = super().run(tmp, task_vars)
del tmp # tmp no longer has any effect
root_key = ""
if self._task.args:
if "root_key" in self._task.args:
n = self._task.args.get("root_key")
n = self._templar.template(n)
if not isidentifier(n):
raise AnsibleActionFail(f"The argument 'root_key' value of '{n}' is not valid. Keys must start with a letter or underscore character, "
"and contain only letters, numbers and underscores.")
root_key = n
if "templates" in self._task.args:
t = self._task.args.get("templates")
if isinstance(t, list):
template_list = t
else:
raise AnsibleActionFail("The argument 'templates' is not a list")
else:
raise AnsibleActionFail("The argument 'templates' must be set")
else:
raise AnsibleActionFail("The argument 'templates' must be set")
output = {}
template_lookup_module = lookup_loader.get('ansible.builtin.template', loader=self._loader, templar=self._templar)
template_vars = task_vars
for template_item in template_list:
template = template_item.get('template')
if not template:
raise AnsibleActionFail("Invalid template data")
template_options = template_item.get('options', {})
list_merge = template_options.get('list_merge', 'append')
strip_empty_keys = template_options.get('strip_empty_keys', True)
if root_key:
template_vars[root_key] = output
else:
template_vars = combine(task_vars, output, recursive=True)
template_output = template_lookup_module.run([template], template_vars)
template_output_data = yaml.safe_load(template_output[0])
if strip_empty_keys:
template_output_data = strip_null_from_data(template_output_data)
if template_output_data:
output = combine(output, template_output_data, recursive=True, list_merge=list_merge)
if root_key:
result['ansible_facts'] = {root_key: output}
else:
result['ansible_facts'] = output
return result
|
"""
One Away
There are three types of edits that can be performed on strings: insert a character, remove a character, or replace a character.
Given two strings, write a function to check if they are one edit (or zero edits) away.
Example:
pale, ple -> true
pales, pale -> true
pale, bale -> true
pale, bake -> false
"""
# Time: O(n)
# Space: O(1)
# If the string lengths are equal, check for number of replacements needed. If the difference in string lengths is one, count
# the number of times each letter appears for each string separately in hash tables, and then count the number of letters that
# have different counts. If the differece in string lengths is greater than one, then return false.
class Solution:
def is_one_away(self, string1: str, string2: str) -> bool:
length_diff = abs(len(string1) - len(string2))
if length_diff > 1:
return False
count_diff = 0
if length_diff == 1:
freq_table_1 = [0] * 128
freq_table_2 = [0] * 128
for c in string1:
freq_table_1[ord(c)] += 1
for c in string2:
freq_table_2[ord(c)] += 1
for i in range(len(freq_table_1)):
if freq_table_1[i] != freq_table_2[i]:
count_diff += 1
if count_diff > 1:
return False
else:
for i in range(len(string1)):
if string1[i] != string2[i]:
count_diff += 1
if count_diff > 1:
return False
return True
|
from unittest import mock
from ... import *
from .common import known_langs, mock_which
from bfg9000 import options as opts
from bfg9000.file_types import (HeaderDirectory, HeaderFile,
MsvcPrecompiledHeader, ObjectFile, SourceFile)
from bfg9000.iterutils import merge_dicts
from bfg9000.path import Path, Root
from bfg9000.tools.msvc import MsvcBuilder
class TestMsvcCompiler(CrossPlatformTestCase):
def __init__(self, *args, **kwargs):
super().__init__(clear_variables=True, *args, **kwargs)
def setUp(self):
with mock.patch('bfg9000.shell.which', mock_which):
self.compiler = MsvcBuilder(self.env, known_langs['c++'], ['cl'],
'version').compiler
def test_call(self):
extra = self.compiler._always_flags
self.assertEqual(self.compiler('in', 'out'),
[self.compiler] + extra + ['/c', 'in', '/Foout'])
self.assertEqual(
self.compiler('in', 'out', flags=['flags']),
[self.compiler] + extra + ['flags', '/c', 'in', '/Foout']
)
self.assertEqual(
self.compiler('in', 'out', 'out.d'),
[self.compiler] + extra + ['/showIncludes', '/c', 'in', '/Foout']
)
self.assertEqual(
self.compiler('in', 'out', 'out.d', ['flags']),
[self.compiler] + extra + ['flags', '/showIncludes', '/c', 'in',
'/Foout']
)
def test_default_name(self):
src = SourceFile(Path('file.cpp', Root.srcdir), 'c++')
self.assertEqual(self.compiler.default_name(src, None), 'file')
def test_output_file(self):
fmt = self.env.target_platform.object_format
self.assertEqual(self.compiler.output_file('file', None),
ObjectFile(Path('file.obj'), fmt, 'c++'))
def test_flags_empty(self):
self.assertEqual(self.compiler.flags(opts.option_list()), ['/MD'])
def test_flags_include_dir(self):
p = self.Path('/path/to/include')
self.assertEqual(self.compiler.flags(opts.option_list(
opts.include_dir(HeaderDirectory(p))
)), ['/I' + p, '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.include_dir(HeaderDirectory(p))
), mode='pkg-config'), ['-I' + p])
def test_flags_define(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME')
)), ['/DNAME', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME')
), mode='pkg-config'), ['-DNAME'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME', 'value')
)), ['/DNAME=value', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME', 'value')
), mode='pkg-config'), ['-DNAME=value'])
def test_flags_std(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.std('c++14')
)), ['/std:c++14', '/MD'])
def test_flags_static(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.static()
)), ['/MT'])
self.assertEqual(self.compiler.flags(
opts.option_list(),
opts.option_list(opts.static()),
), ['/MT'])
def test_flags_debug(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.debug()
)), ['/Zi', '/MDd'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.debug(), opts.static()
)), ['/Zi', '/MTd'])
self.assertEqual(self.compiler.flags(
opts.option_list(),
opts.option_list(opts.debug()),
), ['/MDd'])
self.assertEqual(self.compiler.flags(
opts.option_list(opts.static()),
opts.option_list(opts.debug()),
), ['/MTd'])
def test_flags_warning(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.warning('disable')
)), ['/w', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.warning('all')
)), ['/W3', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.warning('extra')
)), ['/W4', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.warning('error')
)), ['/WX', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.warning('all', 'extra', 'error')
)), ['/W3', '/W4', '/WX', '/MD'])
with self.assertRaises(ValueError):
self.compiler.flags(opts.option_list(opts.warning('unknown')))
def test_flags_optimize(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.optimize('disable')
)), ['/Od', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.optimize('size')
)), ['/O1', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.optimize('speed')
)), ['/O2', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.optimize('linktime')
)), ['/GL', '/MD'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.optimize('speed', 'linktime')
)), ['/O2', '/GL', '/MD'])
def test_flags_include_pch(self):
p = self.Path('/path/to/header.hpp')
self.assertEqual(self.compiler.flags(opts.option_list(opts.pch(
MsvcPrecompiledHeader(p, p, 'header', 'native', 'c++')
))), ['/Yuheader', '/MD'])
def test_flags_sanitize(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.sanitize()
)), ['/RTC1', '/MD'])
def test_flags_string(self):
self.assertEqual(self.compiler.flags(opts.option_list('-v')),
['-v', '/MD'])
def test_flags_invalid(self):
with self.assertRaises(TypeError):
self.compiler.flags(opts.option_list(123))
def test_parse_flags(self):
default = {
'debug': None,
'defines': [],
'extra': [],
'includes': [],
'nologo': None,
'pch': {'create': None, 'use': None},
'runtime': None,
'warnings': {'as_error': None, 'level': None}
}
def assertFlags(flags, extra={}):
self.assertEqual(self.compiler.parse_flags(flags),
merge_dicts(default, extra))
assertFlags([])
assertFlags(['/un', 'known'], {'extra': ['/un', 'known']})
assertFlags(['/nologo'], {'nologo': True})
assertFlags(['/Dfoo'], {'defines': ['foo']})
assertFlags(['/Idir'], {'includes': ['dir']})
assertFlags(['/Z7'], {'debug': 'old'})
assertFlags(['/Zi'], {'debug': 'pdb'})
assertFlags(['/ZI'], {'debug': 'edit'})
assertFlags(['/W0'], {'warnings': {'level': '0'}})
assertFlags(['/Wall'], {'warnings': {'level': 'all'}})
assertFlags(['/WX'], {'warnings': {'as_error': True}})
assertFlags(['/WX-'], {'warnings': {'as_error': False}})
assertFlags(['/w'], {'warnings': {'level': '0'}})
assertFlags(['/Yufoo'], {'pch': {'use': 'foo'}})
assertFlags(['/Ycfoo'], {'pch': {'create': 'foo'}})
class TestMsvcPchCompiler(TestMsvcCompiler):
def setUp(self):
with mock.patch('bfg9000.shell.which', mock_which):
self.compiler = MsvcBuilder(self.env, known_langs['c++'], ['cl'],
'version').pch_compiler
def test_call(self):
extra = self.compiler._always_flags
self.assertEqual(self.compiler('in', ['out_pch', 'out']),
[self.compiler] + extra + ['/c', 'in', '/Foout',
'/Fpout_pch'])
self.assertEqual(
self.compiler('in', ['out_pch', 'out'], flags=['flags']),
[self.compiler] + extra + ['flags', '/c', 'in', '/Foout',
'/Fpout_pch']
)
self.assertEqual(
self.compiler('in', ['out_pch', 'out'], 'out.d'),
[self.compiler] + extra + ['/showIncludes', '/c', 'in', '/Foout',
'/Fpout_pch']
)
self.assertEqual(
self.compiler('in', ['out_pch', 'out'], 'out.d', ['flags']),
[self.compiler] + extra + ['flags', '/showIncludes', '/c', 'in',
'/Foout', '/Fpout_pch']
)
def test_default_name(self):
hdr = HeaderFile(Path('file.hpp', Root.srcdir), 'c++')
self.assertEqual(self.compiler.default_name(hdr, None), 'file.hpp')
def test_output_file(self):
fmt = self.env.target_platform.object_format
out = MsvcPrecompiledHeader(
Path('hdr.pch'), Path('src.obj'), 'hdr.h', fmt, 'c++'
)
self.assertEqual(self.compiler.output_file('hdr.h', AttrDict(
pch_source=SourceFile(Path('src.c'), 'c')
)), [out, out.object_file])
|
#!/usr/bin/python
"""
__version__ = "$Revision: 1.27 $"
__date__ = "$Date: 2004/10/03 18:16:55 $"
"""
from PythonCard import dialog, model
import os, sys
import wx
import minimalDialog
class Dialogs(model.Background):
def on_initialize(self, event):
self.fitToComponents(None, 5)
def on_listDialogs_select(self, event):
name = event.stringSelection
handlers = {
'alert':self.on_buttonAlert_mouseClick,
'color':self.on_buttonColor_mouseClick,
'directory':self.on_buttonDir_mouseClick,
'file':self.on_buttonFile_mouseClick,
'find':self.on_buttonFind_mouseClick,
'font':self.on_buttonFont_mouseClick,
'message':self.on_buttonMessage_mouseClick,
'multiple choice':self.on_buttonMultipleChoice_mouseClick,
'scrolled message':self.on_buttonScrolledMessage_mouseClick,
'single choice':self.on_buttonSingleChoice_mouseClick,
'open file':self.on_buttonOpenFile_mouseClick,
'save file':self.on_buttonSaveFile_mouseClick,
'text entry':self.on_buttonTextEntry_mouseClick,
'minimal':self.on_buttonMinimalDialog_mouseClick,
}
# call the appropriate handler
handlers[name](None)
def on_buttonMultipleChoice_mouseClick(self, event):
result = dialog.multipleChoiceDialog(self, "message", "title", ['one', 'two', 'three'])
self.components.fldResults.text = "multipleChoiceDialog result:\naccepted: %s\nSelection: %s" % (result.accepted, result.selection)
def on_buttonSingleChoice_mouseClick(self, event):
result = dialog.singleChoiceDialog(self, "message", "title", ['one', 'two', 'three'])
self.components.fldResults.text = "singleChoiceDialog result:\naccepted: %s\nSelection: %s" % (result.accepted, result.selection)
def on_buttonFind_mouseClick(self, event):
result = dialog.findDialog(self)
self.components.fldResults.text = "findDialog result:\naccepted: %s\nText: %s\nWhole word only: %s\nCase sensitive: %s" % (result.accepted,
result.searchText,
result.wholeWordsOnly,
result.caseSensitive)
def on_buttonColor_mouseClick(self, event):
result = dialog.colorDialog(self)
self.components.fldResults.text = "colorDialog result:\naccepted: %s\nColor: %s" % (result.accepted, result.color)
def on_buttonFont_mouseClick(self, event):
result = dialog.fontDialog(self)
self.components.fldResults.text = "fontDialog result:\naccepted: %s\nColor: %s\nFont: %s" % (result.accepted, result.color, result.font)
def on_buttonFile_mouseClick(self, event):
wildcard = "JPG files (*.jpg;*.jpeg)|*.jpeg;*.JPG;*.JPEG;*.jpg|GIF files (*.gif)|*.GIF;*.gif|All Files (*.*)|*.*"
# wildcard = '*.py'
result = dialog.fileDialog(self, 'Open', '', '', wildcard )
self.components.fldResults.text = "fileDialog result:\naccepted: %s\npaths: %s" % (result.accepted, result.paths)
def on_buttonOpenFile_mouseClick(self, event):
wildcard = "JPG files (*.jpg;*.jpeg)|*.jpeg;*.JPG;*.JPEG;*.jpg|GIF files (*.gif)|*.GIF;*.gif|All Files (*.*)|*.*"
# wildcard = '*.py'
result = dialog.openFileDialog(wildcard=wildcard)
self.components.fldResults.text = "openFileDialog result:\naccepted: %s\npaths: %s" % (result.accepted, result.paths)
def on_buttonSaveFile_mouseClick(self, event):
wildcard = "JPG files (*.jpg;*.jpeg)|*.jpeg;*.JPG;*.JPEG;*.jpg|GIF files (*.gif)|*.GIF;*.gif|All Files (*.*)|*.*"
# wildcard = '*.py'
result = dialog.saveFileDialog(wildcard=wildcard)
self.components.fldResults.text = "saveFileDialog result:\naccepted: %s\npaths: %s" % (result.accepted, result.paths)
def on_buttonDir_mouseClick(self, event):
result = dialog.directoryDialog(self, 'Choose a directory', 'a')
self.components.fldResults.text = "directoryDialog result:\naccepted: %s\npath: %s" % (result.accepted, result.path)
"""
You can pass in a specific icon (default is wx.ICON_INFORMATION)
as well as the buttons (default is wx.OK | wx.CANCEL)
wx.ICON_EXCLAMATION # Shows an exclamation mark icon.
wx.ICON_HAND # Shows an error icon.
wx.ICON_ERROR # Shows an error icon - the same as wxICON_HAND.
wx.ICON_QUESTION # Shows a question mark icon.
wx.ICON_INFORMATION # Shows an information (i) icon.
wx.OK # Show an OK button.
wx.CANCEL # Show a Cancel button.
wx.YES_NO # Show Yes and No buttons.
wx.YES_DEFAULT # Used with wx.YES_NO, makes Yes button the default - which is the default behaviour.
wx.NO_DEFAULT # Used with wx.YES_NO, makes No button the default.
"""
def on_buttonMessage_mouseClick(self, event):
"""
result = dialog.messageDialog(self, 'a message', 'a title',
wx.ICON_ERROR | wx.YES_NO)
"""
result = dialog.messageDialog(self, 'a message', 'a title',
wx.ICON_INFORMATION | wx.YES_NO | wx.NO_DEFAULT | wx.CANCEL)
#result = dialog.messageDialog(self, 'a message', 'a title')
self.components.fldResults.text = "messageDialog result:\naccepted: %s\nreturnedString: %s" % (result.accepted, result.returnedString)
# you can pass in an additional aStyle parameter
# of wx.TE_PASSWORD or wx.TE_MULTILINE
def on_buttonTextEntry_mouseClick(self, event):
result = dialog.textEntryDialog(self,
'What is your favorite language?',
'A window title',
'Python')
"""
result = dialog.textEntryDialog(self,
'What is your favorite language?',
'A window title',
'Python', wx.TE_MULTILINE)
"""
self.components.fldResults.text = "textEntryDialog result:\naccepted: %s\nreturnedString: %s\ntext: %s" % (result.accepted, result.returnedString, result.text)
def on_buttonScrolledMessage_mouseClick(self, event):
base, ext = os.path.splitext(os.path.split(sys.argv[0])[-1])
filename = base + ".py"
if os.path.exists(filename):
f = open(filename, "r")
msg = f.read()
else:
msg = "Can't find the file dialogs.py"
result = dialog.scrolledMessageDialog(self, msg, filename)
self.components.fldResults.text = "scrolledMessageDialog result:\naccepted: %s" % (result.accepted)
def on_buttonAlert_mouseClick(self, event):
result = dialog.alertDialog(self, 'a message', 'a title')
self.components.fldResults.text = "alertDialog result:\naccepted: %s\nreturnedString: %s" % (result.accepted, result.returnedString)
def on_buttonMinimalDialog_mouseClick(self, event):
result = minimalDialog.minimalDialog(self, 'hello minimal')
self.components.fldResults.text = "minimalDialog result:\naccepted: %s\ntext: %s" % (result.accepted, result.text)
if __name__ == '__main__':
app = model.Application(Dialogs)
app.MainLoop()
|
__author__ = 'demi'
# Question 9: Deep Reverse
# Define a procedure, deep_reverse, that takes as input a list,
# and returns a new list that is the deep reverse of the input list.
# This means it reverses all the elements in the list, and if any
# of those elements are lists themselves, reverses all the elements
# in the inner list, all the way down.
# Note: The procedure must not change the input list.
# The procedure is_list below is from Homework 6. It returns True if
# p is a list and False if it is not.
def is_list(p):
return isinstance(p, list)
def deep_reverse(l):
if is_list(l):
length = len(l)
newlist = [i for i in l]
i = 0
half = int(length/2)
while (i < half):
first = l[i]
last = l[length - i - 1]
first = deep_reverse(first)
last = deep_reverse(last)
newlist[i] = last
newlist[length - i - 1] = first
i += 1
if length % 2 != 0 and is_list(newlist[half]):
newlist[half] = deep_reverse(newlist[half])
return newlist
return l
#For example,
p = [1, [2, 3], [4, 5]]
print(deep_reverse(p))
#>>> [[5, 4], [3, 2], 1]
# print(p)
# p = [1, [2, 3, [4, [5, 6]]]]
# print(deep_reverse(p))
# #>>> [[[[6, 5], 4], 3, 2], 1]
# print(p)
# #>>> [1, [2, 3, [4, [5, 6]]]]
#
# q = [1, [2,3], 4, [5,6]]
# print(deep_reverse(q))
# #>>> [ [6,5], 4, [3, 2], 1]
# print(q)
#>>> [1, [2,3], 4, [5,6]]
|
test_data = [3,4,3,1,2]
data = [3,4,1,2,1,2,5,1,2,1,5,4,3,2,5,1,5,1,2,2,2,3,4,5,2,5,1,3,3,1,3,4,1,5,3,2,2,1,3,2,5,1,1,4,1,4,5,1,3,1,1,5,3,1,1,4,2,2,5,1,5,5,1,5,4,1,5,3,5,1,1,4,1,2,2,1,1,1,4,2,1,3,1,1,4,5,1,1,1,1,1,5,1,1,4,1,1,1,1,2,1,4,2,1,2,4,1,3,1,2,3,2,4,1,1,5,1,1,1,2,5,5,1,1,4,1,2,2,3,5,1,4,5,4,1,3,1,4,1,4,3,2,4,3,2,4,5,1,4,5,2,1,1,1,1,1,3,1,5,1,3,1,1,2,1,4,1,3,1,5,2,4,2,1,1,1,2,1,1,4,1,1,1,1,1,5,4,1,3,3,5,3,2,5,5,2,1,5,2,4,4,1,5,2,3,1,5,3,4,1,5,1,5,3,1,1,1,4,4,5,1,1,1,3,1,4,5,1,2,3,1,3,2,3,1,3,5,4,3,1,3,4,3,1,2,1,1,3,1,1,3,1,1,4,1,2,1,2,5,1,1,3,5,3,3,3,1,1,1,1,1,5,3,3,1,1,3,4,1,1,4,1,1,2,4,4,1,1,3,1,3,2,2,1,2,5,3,3,1,1]
max_days = 80
#data = test_data
class Fish():
def __init__(self, starting_count = 8) -> None:
self.counter = starting_count
self.first_time = True
def decrement_and_spawn(self):
self.counter -= 1
if self.counter < 0:
self.counter = 6
return True
return False
fishes = [Fish(timer) for timer in data]
new_fish = []
for day_num in range(max_days):
for fish in fishes:
if fish.decrement_and_spawn():
new_fish.append(Fish())
fishes.extend(new_fish)
new_fish.clear()
print(len(fishes))
|
import asyncio
from typing import Union, List
from abc import ABCMeta, abstractmethod
__all__ = ["AbstractApp"]
class AbstractApp(metaclass=ABCMeta):
def __init__(
self,
host: Union[str, None] = None,
port: Union[str, int, None] = None,
loop: "asyncio.AbstractEventLoop" = None,
limit: int = 10,
):
self._host = host or "localhost"
self._port = port or 8000
self._loop = loop or asyncio.get_event_loop()
self._limit = limit
@abstractmethod
async def run_server(self):
pass
|
#!/usr/bin/python
def rest_server(dummy,state):
from bottle import route, run, get, post, request, static_file, abort
from subprocess import call
from datetime import datetime
import set_time
import config as conf
import os
basedir = os.path.dirname(__file__)
wwwdir = basedir+'/www/'
@route('/')
def docroot():
return static_file('index.html',wwwdir)
@route('/<filepath:path>')
def servfile(filepath):
return static_file(filepath,wwwdir)
@route('/curtemp')
def curtemp():
return str(state['temp'])
@get('/settemp')
def settemp():
return str(state['settemp'])
@get('/setsteamtemp')
def setsteamtemp():
return str(state['setsteamtemp'])
@post('/settemp')
def post_settemp():
try:
settemp = float(request.forms.get('settemp'))
if settemp >= 10 and settemp <= 110 :
state['settemp'] = settemp
state['settemp_orig'] = settemp
return str(settemp)
else:
pass
# abort(400,'Set temp out of range 10-110.')
except:
pass
# abort(400,'Invalid number for set temp.')
@post('/setsteamtemp')
def post_setsteamtemp():
try:
setsteamtemp = float(request.forms.get('setsteamtemp'))
if setsteamtemp >= 110 and setsteamtemp <= 150 :
state['setsteamtemp'] = setsteamtemp
return str(setsteamtemp)
else:
pass
# abort(400,'Set temp out of range 110-150.')
except:
pass
# abort(400,'Invalid number for set temp.')
@post('/getclienttime')
def post_clienttime():
try:
clienttime = float(request.forms.get('getclienttime'))/1000
ctv = datetime.fromtimestamp(clienttime)
set_time.set_time((ctv.year, ctv.month, ctv.day, ctv.hour, ctv.minute, ctv.second, 0))
except:
pass
# abort(400,'Could not synchronize time')
@post('/TimerOnMo')
def post_TimerOnMo():
TimerOnMo = request.forms.get('TimerOnMo')
try:
datetime.strptime(TimerOnMo,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOnMo'] = TimerOnMo
return str(TimerOnMo)
@post('/TimerOnTu')
def post_TimerOnTu():
TimerOnTu = request.forms.get('TimerOnTu')
try:
datetime.strptime(TimerOnTu,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOnTu'] = TimerOnTu
return str(TimerOnTu)
@post('/TimerOnWe')
def post_TimerOnWe():
TimerOnWe = request.forms.get('TimerOnWe')
try:
datetime.strptime(TimerOnWe,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOnWe'] = TimerOnWe
return str(TimerOnWe)
@post('/TimerOnTh')
def post_TimerOnTh():
TimerOnTh = request.forms.get('TimerOnTh')
try:
datetime.strptime(TimerOnTh,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOnTh'] = TimerOnTh
return str(TimerOnTh)
@post('/TimerOnFr')
def post_TimerOnFr():
TimerOnFr = request.forms.get('TimerOnFr')
try:
datetime.strptime(TimerOnFr,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOnFr'] = TimerOnFr
return str(TimerOnFr)
@post('/TimerOnSa')
def post_TimerOnSa():
TimerOnSa = request.forms.get('TimerOnSa')
try:
datetime.strptime(TimerOnSa,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOnSa'] = TimerOnSa
return str(TimerOnSa)
@post('/TimerOnSu')
def post_TimerOnSu():
TimerOnSu = request.forms.get('TimerOnSu')
try:
datetime.strptime(TimerOnSu,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOnSu'] = TimerOnSu
return str(TimerOnSu)
@post('/TimerOffMo')
def post_TimerOffMo():
TimerOffMo = request.forms.get('TimerOffMo')
try:
datetime.strptime(TimerOffMo,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOffMo'] = TimerOffMo
return str(TimerOffMo)
@post('/TimerOffTu')
def post_TimerOffTu():
TimerOffTu = request.forms.get('TimerOffTu')
try:
datetime.strptime(TimerOffTu,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOffTu'] = TimerOffTu
return str(TimerOffTu)
@post('/TimerOffWe')
def post_TimerOffWe():
TimerOffWe = request.forms.get('TimerOffWe')
try:
datetime.strptime(TimerOffWe,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOffWe'] = TimerOffWe
return str(TimerOffWe)
@post('/TimerOffTh')
def post_TimerOffTh():
TimerOffTh = request.forms.get('TimerOffTh')
try:
datetime.strptime(TimerOffTh,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOffTh'] = TimerOffTh
return str(TimerOffTh)
@post('/TimerOffFr')
def post_TimerOffFr():
TimerOffFr = request.forms.get('TimerOffFr')
try:
datetime.strptime(TimerOffFr,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOffFr'] = TimerOffFr
return str(TimerOffFr)
@post('/TimerOffSa')
def post_TimerOffSa():
TimerOffSa = request.forms.get('TimerOffSa')
try:
datetime.strptime(TimerOffSa,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOffSa'] = TimerOffSa
return str(TimerOffSa)
@post('/TimerOffSu')
def post_TimerOffSu():
TimerOffSu = request.forms.get('TimerOffSu')
try:
datetime.strptime(TimerOffSu,'%H:%M')
except:
pass
# abort(400,'Invalid time format.')
state['TimerOffSu'] = TimerOffSu
return str(TimerOffSu)
@get('/allstats')
def allstats():
return dict(state)
@route('/restart')
def restart():
call(["reboot"])
return '';
@route('/shutdown')
def shutdown():
call(["shutdown","-h","now"])
return '';
@get('/healthcheck')
def healthcheck():
return 'OK'
run(host='0.0.0.0',port=conf.port,server='auto')
|
# Copyright 2018-2021 Jakub Kuczys (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import re
import typing
from redbot import VersionInfo
from strictyaml import Regex
from strictyaml.exceptions import YAMLSerializationError, YAMLValidationError
from strictyaml.utils import is_string
from strictyaml.yamllocation import YAMLChunk
__all__ = ("PythonVersion", "RedVersion")
if typing.TYPE_CHECKING:
# TODO: stub strictyaml
# this is awful workaround (along with the ignore missing imports in mypy.ini)
ScalarValidator = object
else:
from strictyaml import ScalarValidator
class PythonVersion(ScalarValidator):
REGEX = re.compile(r"(\d+)\.(\d+)\.(\d+)")
def __init__(self) -> None:
self._matching_message = "when expecting Python version (MAJOR.MINOR.MICRO)"
def validate_scalar(self, chunk: YAMLChunk) -> typing.List[int]:
match = self.REGEX.fullmatch(chunk.contents)
if match is None:
raise YAMLValidationError(
self._matching_message, "found non-matching string", chunk
)
return [int(group) for group in match.group(1, 2, 3)]
def to_yaml(self, data: typing.Any) -> str:
if isinstance(data, collections.abc.Sequence):
if len(data) != 3:
raise YAMLSerializationError(
f"expected a sequence of 3 elements, got {len(data)} elements"
)
for item in data:
if not isinstance(item, int):
raise YAMLSerializationError(
f"expected int, got '{item}' of type '{type(item).__name__}'"
)
if item < 0:
raise YAMLSerializationError(
f"expected non-negative int, got {item}"
)
return ".".join(str(segment) for segment in data)
if is_string(data):
# we just validated that it's a string
version_string = typing.cast(str, data)
if self.REGEX.fullmatch(version_string) is None:
raise YAMLSerializationError(
"expected Python version (MAJOR.MINOR.MICRO),"
f" got '{version_string}'"
)
return version_string
raise YAMLSerializationError(
"expected string or sequence,"
f" got '{data}' of type '{type(data).__name__}'"
)
def RedVersion() -> Regex:
return Regex(VersionInfo._VERSION_STR_PATTERN.pattern)
|
import hashlib
from Crypto.Hash import MD2, MD4
from hashlib import md5
class CryptographerMD2:
def __init__(self):
pass
def encrypt(self, plain_text):
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
cipher = MD2.new(plain_text)
cipher_text = cipher.hexdigest()
return cipher_text
class CryptographerMD4:
def __init__(self):
pass
def encrypt(self, plain_text):
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
cipher = MD4.new(plain_text)
cipher_text = cipher.hexdigest()
return cipher_text
class CryptographerMD5:
def __init__(self):
pass
def encrypt(self, plain_text):
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
cipher = md5(plain_text)
cipher_text = cipher.hexdigest()
return cipher_text
class CryptographerRIPEMD:
def __init__(self):
pass
def encrypt(self, plain_text):
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
cipher = hashlib.new('ripemd160')
cipher.update(plain_text)
cipher_text = cipher.hexdigest()
return cipher_text
class CryptographerWHIRLPOOL:
def __init__(self):
pass
def encrypt(self, plain_text):
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
cipher = hashlib.new('whirlpool')
cipher.update(plain_text)
cipher_text = cipher.hexdigest()
return cipher_text
class CryptographerSHA1:
def __init__(self):
pass
def encrypt(self, plain_text):
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
msg = hashlib.sha1(plain_text)
return msg.hexdigest()
class CryptographerSHA224:
def __init__(self):
pass
def encrypt(self, plain_text):
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
msg = hashlib.sha224(plain_text)
return msg.hexdigest()
class CryptographerSHA256:
def __init__(self):
pass
def encrypt(self, plain_text):
is_string = False
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
msg = hashlib.sha256(plain_text)
return msg.hexdigest()
class CryptographerSHA384:
def __init__(self):
pass
def encrypt(self, plain_text):
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
msg = hashlib.sha384(plain_text)
return msg.hexdigest()
class CryptographerSHA512:
def __init__(self):
pass
def encrypt(self, plain_text):
if type(plain_text) == str:
plain_text = plain_text.encode('utf-8')
msg = hashlib.sha512(plain_text)
return msg.hexdigest()
|
import torch
import data as Data
import model as Model
import argparse
import logging
import core.logger as Logger
import core.metrics as Metrics
from tensorboardX import SummaryWriter
import os
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='config/sr_sr3_16_128.json',
help='JSON file for configuration')
parser.add_argument('-p', '--phase', type=str, choices=['train', 'val'],
help='Run either train(training) or val(generation)', default='train')
parser.add_argument('-gpu', '--gpu_ids', type=str, default=None)
parser.add_argument('-debug', '-d', action='store_true')
# parse configs
args = parser.parse_args()
opt = Logger.parse(args)
# Convert to NoneDict, which return None for missing key.
opt = Logger.dict_to_nonedict(opt)
# logging
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
Logger.setup_logger(None, opt['path']['log'],
'train', level=logging.INFO, screen=True)
Logger.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO)
logger = logging.getLogger('base')
logger.info(Logger.dict2str(opt))
tb_logger = SummaryWriter(log_dir=opt['path']['tb_logger'])
# dataset
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train' and args.phase != 'val':
train_set = Data.create_dataset(dataset_opt, phase)
train_loader = Data.create_dataloader(
train_set, dataset_opt, phase)
elif phase == 'val':
val_set = Data.create_dataset(dataset_opt, phase)
val_loader = Data.create_dataloader(
val_set, dataset_opt, phase)
logger.info('Initial Dataset Finished')
# model
diffusion = Model.create_model(opt)
logger.info('Initial Model Finished')
# Train
current_step = diffusion.begin_step
current_epoch = diffusion.begin_epoch
n_iter = opt['train']['n_iter']
if opt['path']['resume_state']:
logger.info('Resuming training from epoch: {}, iter: {}.'.format(
current_epoch, current_step))
diffusion.set_new_noise_schedule(
opt['model']['beta_schedule'][opt['phase']], schedule_phase=opt['phase'])
if opt['phase'] == 'train':
while current_step < n_iter:
current_epoch += 1
for _, train_data in enumerate(train_loader):
current_step += 1
if current_step > n_iter:
break
diffusion.feed_data(train_data)
diffusion.optimize_parameters()
# log
if current_step % opt['train']['print_freq'] == 0:
logs = diffusion.get_current_log()
message = '<epoch:{:3d}, iter:{:8,d}> '.format(
current_epoch, current_step)
for k, v in logs.items():
message += '{:s}: {:.4e} '.format(k, v)
tb_logger.add_scalar(k, v, current_step)
logger.info(message)
# validation
if current_step % opt['train']['val_freq'] == 0:
avg_psnr = 0.0
idx = 0
result_path = '{}/{}'.format(opt['path']
['results'], current_epoch)
os.makedirs(result_path, exist_ok=True)
diffusion.set_new_noise_schedule(
opt['model']['beta_schedule']['val'], schedule_phase='val')
for _, val_data in enumerate(val_loader):
idx += 1
diffusion.feed_data(val_data)
diffusion.test(continous=False)
visuals = diffusion.get_current_visuals()
sr_img = Metrics.tensor2img(visuals['SR']) # uint8
hr_img = Metrics.tensor2img(visuals['HR']) # uint8
lr_img = Metrics.tensor2img(visuals['LR']) # uint8
fake_img = Metrics.tensor2img(visuals['INF']) # uint8
# generation
Metrics.save_img(
hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx))
Metrics.save_img(
sr_img, '{}/{}_{}_sr.png'.format(result_path, current_step, idx))
Metrics.save_img(
lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx))
Metrics.save_img(
fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx))
tb_logger.add_image(
'Iter_{}'.format(current_step),
np.transpose(np.concatenate(
(fake_img, sr_img, hr_img), axis=1), [2, 0, 1]),
idx)
avg_psnr += Metrics.calculate_psnr(
sr_img, hr_img)
avg_psnr = avg_psnr / idx
diffusion.set_new_noise_schedule(
opt['model']['beta_schedule']['train'], schedule_phase='train')
# log
logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))
logger_val = logging.getLogger('val') # validation logger
logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e}'.format(
current_epoch, current_step, avg_psnr))
# tensorboard logger
tb_logger.add_scalar('psnr', avg_psnr, current_step)
if current_step % opt['train']['save_checkpoint_freq'] == 0:
logger.info('Saving models and training states.')
diffusion.save_network(current_epoch, current_step)
# save model
logger.info('End of training.')
else:
logger.info('Begin Model Evaluation.')
avg_psnr = 0.0
avg_ssim = 0.0
idx = 0
result_path = '{}'.format(opt['path']['results'])
os.makedirs(result_path, exist_ok=True)
for _, val_data in enumerate(val_loader):
idx += 1
diffusion.feed_data(val_data)
diffusion.test(continous=True)
visuals = diffusion.get_current_visuals()
hr_img = Metrics.tensor2img(visuals['HR']) # uint8
lr_img = Metrics.tensor2img(visuals['LR']) # uint8
fake_img = Metrics.tensor2img(visuals['INF']) # uint8
sr_img_mode = 'grid'
if sr_img_mode == 'single':
# single img series
sr_img = visuals['SR'] # uint8
sample_num = sr_img.shape[0]
for iter in range(0, sample_num):
Metrics.save_img(
Metrics.tensor2img(sr_img[iter]), '{}/{}_{}_sr_{}.png'.format(result_path, current_step, idx, iter))
else:
# grid img
sr_img = Metrics.tensor2img(visuals['SR']) # uint8
Metrics.save_img(
sr_img, '{}/{}_{}_sr_process.png'.format(result_path, current_step, idx))
Metrics.save_img(
Metrics.tensor2img(visuals['SR'][-1]), '{}/{}_{}_sr.png'.format(result_path, current_step, idx))
Metrics.save_img(
hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx))
Metrics.save_img(
lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx))
Metrics.save_img(
fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx))
# generation
avg_psnr += Metrics.calculate_psnr(
Metrics.tensor2img(visuals['SR'][-1]), hr_img)
avg_ssim += Metrics.calculate_ssim(
Metrics.tensor2img(visuals['SR'][-1]), hr_img)
avg_psnr = avg_psnr / idx
avg_ssim = avg_ssim / idx
# log
logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))
logger.info('# Validation # SSIM: {:.4e}'.format(avg_ssim))
logger_val = logging.getLogger('val') # validation logger
logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e}, ssim:{:.4e}'.format(
current_epoch, current_step, avg_psnr, avg_ssim))
|
__author__ = "Lukas McClelland <lumcclel@cisco.com>"
from unicon.plugins.iosxe.statemachine import IosXESingleRpStateMachine
class IosXECat8kSingleRpStateMachine(IosXESingleRpStateMachine):
def create(self):
super().create()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import numpy as np
# Local imports
from .c_4_grad import c_4_grad
from .gradient import gradient
from .avg_4sc import avg_4sc
from .ts_vec_xyz import ts_vec_xyz
__author__ = "Louis Richard"
__email__ = "louisr@irfu.se"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def st_diff(r_mms, b_mms, lmn):
r"""Computes velocity of the structure using spatio-temporal
derivative method [13]_ [14]_ as
.. math::
\\mathbf{V}_{str}^{\\mathbf{LMN}} = -\\textrm{d}_t
\\mathbf{B}^{\\mathbf{LMN}}\\left [\\nabla^{\\mathbf{LMN}}
\\mathbf{B}^{\\mathbf{LMN}}\\right]^T \\left [
\\mathbf{S}^{\\mathbf{LMN}}\\right ]^{-1}
where :math:`\\mathbf{B}^{\\mathbf{LMN}}`, :math:`\\nabla^{\\mathbf{
LMN}}\\mathbf{B}^{\\mathbf{LMN}}`, :math:`\\mathbf{S}^{\\mathbf{LMN}}`
and :math:`\\mathbf{V}_{str}^{\\mathbf{LMN}}` are namely the magnetic
field, its gradient, its rotation rate tensor and the velocity of the
structure in the LMN coordinates system.
Parameters
----------
r_mms : list of xarray.DataArray
Spacecraft positions.
b_mms : list of xarray.DataArray
Background magnetic field.
lmn : ndarray
Structure coordinates system.
Returns
-------
v_str : xarray.DataArray
Velocity of the structure in its coordinates system.
References
----------
.. [13] Shi, Q. Q., Shen, C., Pu, Z. Y., Dunlop, M. W., Zong, Q. G.,
Zhang, H., et al. (2005), Dimensional analysis of observed
structures using multipoint magnetic field measurements:
Application to Cluster. Geophysical Research Letters, 32,
L12105. doi : https://doi.org/10.1029/2005GL022454.
.. [14] Shi, Q. Q., Shen, C., Dunlop, M. W., Pu, Z. Y., Zong, Q. G.,
Liu, Z. X., et al. (2006), Motion of observed structures
calculated from multi‐point magnetic field measurements:
Application to Cluster. Geophysical Research Letters, 33,
L08109. doi : https://doi.org/10.1029/2005GL025073.
"""
# Compute magnetic field at the center of mass of the tetrahedron
b_xyz = avg_4sc(b_mms)
# Gradient of the magnetic field
grad_b = c_4_grad(r_mms, b_mms)
# Time derivative of the magnetic field at the center of mass of the
# tetrahedron
db_dt = gradient(b_xyz)
# Transform gradient to LMN frame
l_grad_b = np.matmul(grad_b.data, lmn[:, 0])
m_grad_b = np.matmul(grad_b.data, lmn[:, 1])
n_grad_b = np.matmul(grad_b.data, lmn[:, 2])
# Compute velocity of the structure using MDD
v_str = np.zeros(db_dt.shape)
v_str[:, 0] = np.sum(db_dt * l_grad_b, axis=1)
v_str[:, 0] /= np.linalg.norm(l_grad_b, axis=1) ** 2
v_str[:, 1] = np.sum(db_dt * m_grad_b, axis=1)
v_str[:, 1] /= np.linalg.norm(m_grad_b, axis=1) ** 2
v_str[:, 2] = np.sum(db_dt * n_grad_b, axis=1)
v_str[:, 2] /= np.linalg.norm(n_grad_b, axis=1) ** 2
# To time series
v_str_xyz = ts_vec_xyz(b_xyz.time.data, -v_str)
return v_str_xyz
|
import tensorflow as tf
from tensorflow import keras
from modeling import building_model
def training(model, train_data, train_labels, epochs, early_stop=False, patience=10):
if early_stop:
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)
history = model.fit(
train_data, train_labels, epochs=epochs, validation_split=0.2, verbose=1, callbacks=[early_stop]
)
else:
history = model.fit(
train_data, train_labels, epochs=epochs, validation_split=0.2, verbose=1, callbacks=[]
)
return history
|
from pytensor.ops.array_ops import *
from pytensor.ops.embedding_ops import *
from pytensor.ops.loss_ops import *
from pytensor.ops.lstm_ops import *
from pytensor.ops.math_ops import *
from pytensor.ops.rnn_ops import *
from pytensor.ops.rnn_util_ops import *
|
class EPL_Team:
def __init__(self,name,slogan="No Slogan",title=0):
self.name = name
self.slogan = slogan
self.title = title
def increaseTitle(self):
self.title += 1
def changeSong(self,song):
self.slogan = song
def showClubInfo(self):
final = ""
final += "Name: "+self.name +"\n"
final += "Song: "+self.slogan+"\n"
final += "Total No of title: "+str(self.title)
return final
manu = EPL_Team('Manchester United', 'Glory Glory Man United')
chelsea = EPL_Team('Chelsea')
print('===================')
print(manu.showClubInfo())
print('##################')
manu.increaseTitle()
print(manu.showClubInfo())
print('===================')
print(chelsea.showClubInfo())
chelsea.changeSong('Keep the blue flag flying high')
print(chelsea.showClubInfo())
|
# coding: utf-8
import datetime
import numpy as np
import pytest
from ..tests import test_platform_base
from ...types.state import State
from ...platform import MovingPlatform, FixedPlatform
from ...models.transition.linear import ConstantVelocity, CombinedLinearGaussianTransitionModel
from ...sensor.radar.radar import RadarBearingRange
from ...types.array import StateVector, CovarianceMatrix
def get_3d_expected(i):
if i == 0:
# static platform or X velocity
return np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [-1, 0, 0],
[0, -1, 0], [0, 0, 1], [0, 0, -1]])
elif i == 1:
# y-axis motion
return np.array([[0, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0],
[1, 0, 0], [0, 0, 1], [0, 0, -1]])
elif i == 2:
# negative x-axis motion
return np.array([[0, 0, 0], [-1, 0, 0], [0, -1, 0], [1, 0, 0],
[0, 1, 0], [0, 0, 1], [0, 0, -1]])
elif i == 3:
# negative y-axis motion
return np.array([[0, 0, 0], [0, -1, 0], [1, 0, 0], [0, 1, 0],
[-1, 0, 0], [0, 0, 1], [0, 0, -1]])
elif i == 4:
# x-y motion
return np.array([[0, 0, 0], [1/np.sqrt(2), 1/np.sqrt(2), 0],
[-1/np.sqrt(2), 1/np.sqrt(2), 0],
[-1/np.sqrt(2), -1/np.sqrt(2), 0],
[1/np.sqrt(2), -1/np.sqrt(2), 0], [0, 0, 1],
[0, 0, -1]])
elif i == 5:
# neg x- neg y motion
return np.array([[0, 0, 0], [-1/np.sqrt(2), -1/np.sqrt(2), 0],
[1/np.sqrt(2), -1/np.sqrt(2), 0],
[1/np.sqrt(2), 1/np.sqrt(2), 0],
[-1/np.sqrt(2), 1/np.sqrt(2), 0], [0, 0, 1],
[0, 0, -1]])
elif i == 6:
# pos x- neg y motion
return np.array([[0, 0, 0], [1/np.sqrt(2), -1/np.sqrt(2), 0],
[1/np.sqrt(2), 1/np.sqrt(2), 0],
[-1/np.sqrt(2), 1/np.sqrt(2), 0],
[-1/np.sqrt(2), -1/np.sqrt(2), 0], [0, 0, 1],
[0, 0, -1]])
elif i == 7:
# neg x- pos y motion
return np.array([[0, 0, 0], [-1/np.sqrt(2), 1/np.sqrt(2), 0],
[-1/np.sqrt(2), -1/np.sqrt(2), 0],
[1/np.sqrt(2), -1/np.sqrt(2), 0],
[1/np.sqrt(2), 1/np.sqrt(2), 0], [0, 0, 1],
[0, 0, -1]])
elif i == 8:
# "z vel"
return np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, -1],
[0, -1, 0], [-1, 0, 0], [1, 0, 0]])
elif i == 9:
# "-z vel"
return np.array([[0, 0, 0], [0, 0, -1], [0, 1, 0], [0, 0, 1],
[0, -1, 0], [1, 0, 0], [-1, 0, 0]])
elif i == 10:
# "y.z vel"
return np.array([[0, 0, 0], [0, 1/np.sqrt(2), 1/np.sqrt(2)],
[-1, 0, 0], [0, -1/np.sqrt(2), -1/np.sqrt(2)],
[1, 0, 0], [0, -1/np.sqrt(2), 1/np.sqrt(2)],
[0, 1/np.sqrt(2), -1/np.sqrt(2)]])
elif i == 11:
# "y.-z vel"
return np.array([[0, 0, 0], [0, 1/np.sqrt(2), -1/np.sqrt(2)],
[-1, 0, 0], [0, -1/np.sqrt(2), 1/np.sqrt(2)],
[1, 0, 0], [0, 1/np.sqrt(2), 1/np.sqrt(2)],
[0, -1/np.sqrt(2), -1/np.sqrt(2)]])
elif i == 12:
# "-y.z vel"
return np.array([[0, 0, 0], [0, -1/np.sqrt(2), 1/np.sqrt(2)],
[1, 0, 0], [0, 1/np.sqrt(2), -1/np.sqrt(2)],
[-1, 0, 0], [0, 1/np.sqrt(2), 1/np.sqrt(2)],
[0, -1/np.sqrt(2), -1/np.sqrt(2)]])
elif i == 13:
# "-y.-z vel"
return np.array([[0, 0, 0], [0, -1/np.sqrt(2), -1/np.sqrt(2)],
[1, 0, 0], [0, 1/np.sqrt(2), 1/np.sqrt(2)],
[-1, 0, 0], [0, -1/np.sqrt(2), 1/np.sqrt(2)],
[0, 1/np.sqrt(2), -1/np.sqrt(2)]])
elif i == 14:
# x.z vel
return np.array([[0, 0, 0], [1/np.sqrt(2), 0, 1/np.sqrt(2)],
[0, 1, 0], [-1/np.sqrt(2), 0, -1/np.sqrt(2)],
[0, -1, 0], [-1/np.sqrt(2), 0, 1/np.sqrt(2)],
[1/np.sqrt(2), 0, -1/np.sqrt(2)]])
elif i == 15:
# -x.z vel
return np.array([[0, 0, 0], [-1/np.sqrt(2), 0, 1/np.sqrt(2)],
[0, -1, 0], [1/np.sqrt(2), 0, -1/np.sqrt(2)],
[0, 1, 0], [1/np.sqrt(2), 0, 1/np.sqrt(2)],
[-1/np.sqrt(2), 0, -1/np.sqrt(2)]])
elif i == 16:
# x.-z vel
return np.array([[0, 0, 0], [1/np.sqrt(2), 0, -1/np.sqrt(2)],
[0, 1, 0], [-1/np.sqrt(2), 0, 1/np.sqrt(2)],
[0, -1, 0], [1/np.sqrt(2), 0, 1/np.sqrt(2)],
[-1/np.sqrt(2), 0, -1/np.sqrt(2)]])
elif i == 17:
# -x,-z vel
return np.array([[0, 0, 0], [-1/np.sqrt(2), 0, -1/np.sqrt(2)],
[0, -1, 0], [1/np.sqrt(2), 0, 1/np.sqrt(2)],
[0, 1, 0], [-1/np.sqrt(2), 0, 1/np.sqrt(2)],
[1/np.sqrt(2), 0, -1/np.sqrt(2)]])
elif i == 18:
# x.y.z vel
a = np.cos(np.arctan2(1, np.sqrt(2)) * -1)
b = np.sin(np.arctan2(1, np.sqrt(2)) * -1) / np.sqrt(2)
return np.array([[0, 0, 0], [1/np.sqrt(3), 1/np.sqrt(3), 1/np.sqrt(3)],
[-1/np.sqrt(2), 1/np.sqrt(2), 0],
[-1/np.sqrt(3), -1/np.sqrt(3), -1/np.sqrt(3)],
[1/np.sqrt(2), -1/np.sqrt(2), 0],
[b, b, a], [-b, -b, -a]])
elif i == 19:
# -x.-y.-z vel
a = np.cos(np.arctan2(-1, np.sqrt(2)) * -1)
b = np.sin(np.arctan2(-1, np.sqrt(2)) * -1) / np.sqrt(2)
return np.array([[0, 0, 0],
[-1/np.sqrt(3), -1/np.sqrt(3), -1/np.sqrt(3)],
[1/np.sqrt(2), -1/np.sqrt(2), 0],
[1/np.sqrt(3), 1/np.sqrt(3), 1/np.sqrt(3)],
[-1/np.sqrt(2), 1/np.sqrt(2), 0],
[-b, -b, a], [b, b, -a]])
@pytest.fixture
def radars_2d():
# Generate 5 radar models for testing purposes
noise_covar = CovarianceMatrix(np.array([[0.015, 0],
[0, 0.1]]))
measurement_mapping = np.array([0, 2])
# Create 5 simple radar sensor objects
radar1 = RadarBearingRange(
ndim_state=4,
position_mapping=measurement_mapping,
noise_covar=noise_covar,
)
radar2 = RadarBearingRange(
ndim_state=4,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
radar3 = RadarBearingRange(
ndim_state=4,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
radar4 = RadarBearingRange(
ndim_state=4,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
radar5 = RadarBearingRange(
ndim_state=4,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
return [radar1, radar2, radar3, radar4, radar5]
@pytest.fixture
def radars_3d():
# Generate 7 radar models for testing purposes
noise_covar = CovarianceMatrix(np.array([[0.015, 0],
[0, 0.1]]))
measurement_mapping = np.array([0, 2, 4])
# Create 5 simple radar sensor objects
radar1 = RadarBearingRange(
ndim_state=6,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
radar2 = RadarBearingRange(
ndim_state=6,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
radar3 = RadarBearingRange(
ndim_state=6,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
radar4 = RadarBearingRange(
ndim_state=6,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
radar5 = RadarBearingRange(
ndim_state=6,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
radar6 = RadarBearingRange(
ndim_state=6,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
radar7 = RadarBearingRange(
ndim_state=6,
position_mapping=measurement_mapping,
noise_covar=noise_covar
)
return [radar1, radar2, radar3, radar4, radar5, radar6, radar7]
@pytest.fixture(scope='session')
def mounting_offsets_2d():
# Generate sensor mounting offsets for testing purposes
offsets = [[0, 0],
[1, 0],
[0, 1],
[-1, 0],
[0, -1]]
return [StateVector(offset) for offset in offsets]
@pytest.fixture(scope='session')
def mounting_offsets_3d():
# Generate sensor mounting offsets for testing purposes
offsets = [[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[-1, 0, 0],
[0, -1, 0],
[0, 0, 1],
[0, 0, -1]]
return [StateVector(offset) for offset in offsets]
@pytest.fixture(params=[MovingPlatform, FixedPlatform],
ids=['MovingPlatform', 'FixedPlatform'])
def platform_type(request):
return request.param
@pytest.fixture(params=[True, False], ids=["Moving", "Static"])
def move(request):
return request.param
@pytest.fixture(params=[True, False], ids=["Add", "Initialise"])
def add_sensor(request):
return request.param
testdata_2d = [
StateVector([0, 0, 0, 0]),
StateVector([10, 0, 0, 0]),
StateVector([0, 1, 0, 0]),
StateVector([0, 0, 0, 1]),
StateVector([0, -1, 0, 0]),
StateVector([0, 0, 0, -1]),
StateVector([0, 1, 0, 1]),
StateVector([0, -1, 0, -1]),
StateVector([0, 1, 0, -1]),
StateVector([0, -1, 0, 1])
]
expected_2d = [
# static platform or X velocity
np.array([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]]),
# static platform or X velocity
np.array([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]]),
# static platform or X velocity
np.array([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]]),
# y-axis motion
np.array([[0, 0], [0, 1], [-1, 0], [0, -1], [1, 0]]),
# negative x-axis motion
np.array([[0, 0], [-1, 0], [0, -1], [1, 0], [0, 1]]),
# negative y-axis motion
np.array([[0, 0], [0, -1], [1, 0], [0, 1], [-1, 0]]),
# x-y motion
np.array([[0, 0], [1/np.sqrt(2), 1/np.sqrt(2)],
[-1/np.sqrt(2), 1/np.sqrt(2)], [-1/np.sqrt(2), -1/np.sqrt(2)],
[1/np.sqrt(2), -1/np.sqrt(2)]]),
# neg x- neg y motion
np.array([[0, 0], [-1/np.sqrt(2), -1/np.sqrt(2)],
[1/np.sqrt(2), -1/np.sqrt(2)], [1/np.sqrt(2), 1/np.sqrt(2)],
[-1/np.sqrt(2), 1/np.sqrt(2)]]),
# pos x- neg y motion
np.array([[0, 0], [1/np.sqrt(2), -1/np.sqrt(2)],
[1/np.sqrt(2), 1/np.sqrt(2)], [-1/np.sqrt(2), 1/np.sqrt(2)],
[-1/np.sqrt(2), -1/np.sqrt(2)]]),
# neg x- pos y motion
np.array([[0, 0], [-1/np.sqrt(2), 1/np.sqrt(2)],
[-1/np.sqrt(2), -1/np.sqrt(2)], [1/np.sqrt(2), -1/np.sqrt(2)],
[1/np.sqrt(2), 1/np.sqrt(2)]])
]
@pytest.mark.parametrize(
'state, expected', zip(testdata_2d, expected_2d),
ids=["Static", "pos offset", "x vel", "y vel", "-x vel", "-y vel",
"x,y vel", "-x,-y vel", "x,-y vel", "-x,y vel"])
def test_2d_platform(state, expected, move, radars_2d,
mounting_offsets_2d, add_sensor):
# Define time related variables
timestamp = datetime.datetime.now()
# Define transition model and position for platform
model_1d = ConstantVelocity(0.0) # zero noise so pure movement
trans_model = CombinedLinearGaussianTransitionModel(
[model_1d] * (radars_2d[0].ndim_state // 2))
platform_state = State(state, timestamp)
# This defines the position_mapping to the platforms state vector (i.e. x and y)
mounting_mapping = np.array([0, 2])
# create a platform with the simple radar mounted
for sensor, offset in zip(radars_2d, mounting_offsets_2d):
sensor.mounting_offset = offset
if add_sensor:
platform = MovingPlatform(
states=platform_state,
transition_model=trans_model,
sensors=[],
position_mapping=mounting_mapping
)
for sensor in radars_2d:
platform.add_sensor(sensor)
else:
platform = MovingPlatform(
states=platform_state,
transition_model=trans_model,
sensors=radars_2d,
position_mapping=mounting_mapping
)
if move:
# Move the platform
platform.move(timestamp + datetime.timedelta(seconds=2))
sensor_positions_test(expected, platform)
testdata_3d = [
(StateVector([0, 0, 0, 0, 0, 0]), get_3d_expected(0)),
(StateVector([10, 0, 0, 0, 0, 0]), get_3d_expected(0)),
(StateVector([0, 1, 0, 0, 0, 0]), get_3d_expected(0)),
(StateVector([0, 0, 0, 1, 0, 0]), get_3d_expected(1)),
(StateVector([0, -1, 0, 0, 0, 0]), get_3d_expected(2)),
(StateVector([0, 0, 0, -1, 0, 0]), get_3d_expected(3)),
(StateVector([0, 1, 0, 1, 0, 0]), get_3d_expected(4)),
(StateVector([0, -1, 0, -1, 0, 0]), get_3d_expected(5)),
(StateVector([0, 1, 0, -1, 0, 0]), get_3d_expected(6)),
(StateVector([0, -1, 0, 1, 0, 0]), get_3d_expected(7)),
(StateVector([0, 0, 0, 0, 0, 1]), get_3d_expected(8)),
(StateVector([0, 0, 0, 0, 0, -1]), get_3d_expected(9)),
(StateVector([0, 0, 0, 1, 0, 1]), get_3d_expected(10)),
(StateVector([0, 0, 0, 1, 0, -1]), get_3d_expected(11)),
(StateVector([0, 0, 0, -1, 0, 1]), get_3d_expected(12)),
(StateVector([0, 0, 0, -1, 0, -1]), get_3d_expected(13)),
(StateVector([0, 1, 0, 0, 0, 1]), get_3d_expected(14)),
(StateVector([0, -1, 0, 0, 0, 1]), get_3d_expected(15)),
(StateVector([0, 1, 0, 0, 0, -1]), get_3d_expected(16)),
(StateVector([0, -1, 0, 0, 0, -1]), get_3d_expected(17)),
(StateVector([0, 1, 0, 1, 0, 1]), get_3d_expected(18)),
(StateVector([0, -1, 0, -1, 0, -1]), get_3d_expected(19))
]
@pytest.mark.parametrize('state, expected', testdata_3d, ids=[
"Static", "pos offset", "x vel", "y vel", "-x vel", "-y vel", "x,y vel",
"-x,-y vel", "x,-y vel", "-x,y vel", "z vel", "-z vel", "y.z vel",
"y.-z vel", "-y.z vel", "-y.-z vel", "x.z vel", "-x.z vel", "x.-z vel",
"-x,-z vel", "x,y,z vel", "-x,-y,-z vel"
])
def test_3d_platform(state, expected, move, radars_3d, mounting_offsets_3d,
add_sensor):
# Define time related variables
timestamp = datetime.datetime.now()
# Define transition model and position for platform
model_1d = ConstantVelocity(0.0) # zero noise so pure movement
trans_model = CombinedLinearGaussianTransitionModel(
[model_1d] * (radars_3d[0].ndim_state // 2))
platform_state = State(state, timestamp)
# This defines the position_mapping to the platforms state vector (i.e. x and y)
mounting_mapping = np.array([0, 2, 4])
# create a platform with the simple radar mounted
for sensor, offset in zip(radars_3d, mounting_offsets_3d):
sensor.mounting_offset = offset
if add_sensor:
platform = MovingPlatform(
states=platform_state,
transition_model=trans_model,
sensors=[],
position_mapping=mounting_mapping
)
for sensor, offset in zip(radars_3d, mounting_offsets_3d):
platform.add_sensor(sensor)
else:
platform = MovingPlatform(
states=platform_state,
transition_model=trans_model,
sensors=radars_3d,
position_mapping=mounting_mapping
)
if move:
# Move the platform
platform.move(timestamp + datetime.timedelta(seconds=2))
sensor_positions_test(expected, platform)
@pytest.fixture(scope='session')
def rotation_offsets_2d():
# Generate sensor mounting offsets for testing purposes
offsets = [[0, 0, 0],
[0, 0, np.pi / 4],
[0, 0, -np.pi / 4],
[0, 0, np.pi / 2],
[0, 0, -np.pi / 2]]
return [StateVector(offset) for offset in offsets]
@pytest.fixture(scope='session')
def rotation_offsets_3d():
# Generate sensor rotation offsets for testing purposes
offsets = [[0, 0, 0],
[np.pi / 4, 0, 0],
[0, np.pi / 4, 0],
[-np.pi / 4, 0, 0],
[0, -np.pi / 4, 0],
[0, 0, np.pi / 4],
[0, 0, -np.pi / 4]]
return [StateVector(offset) for offset in offsets]
def expected_orientations_3d():
pi = np.pi
offset_3d_movement = np.arctan(1 / np.sqrt(2))
return [np.array([[0., 0., 0.], [pi/4, 0., 0.], [0., pi/4, 0.], [-pi/4, 0., 0.],
[0., -pi/4, 0.], [0., 0., pi/4], [0., 0., -pi/4]]),
np.array([[0., pi/2, 0.], [pi/4, pi/2, 0.], [0., 3*pi/4, 0.], [-pi/4, pi/2, 0.],
[0., pi/4, 0.], [0., pi/2, pi/4], [0., pi/2, -pi/4]]),
np.array([[0., 0., pi/2], [pi/4, 0., pi/2], [0., pi/4, pi/2], [-pi/4, 0., pi/2],
[0., -pi/4, pi/2], [0., 0., 3*pi/4], [0., 0., pi/4]]),
np.array([[0., 0., 0.], [pi/4, 0., 0.], [0., pi/4, 0.], [-pi/4, 0., 0.],
[0., -pi/4, 0.], [0., 0., pi/4], [0., 0., -pi/4]]),
np.array([[0., pi/2, 0.], [pi/4, pi/2, 0.], [0., 3*pi/4, 0.], [-pi/4, pi/2, 0.],
[0., pi/4, 0.], [0., pi/2, pi/4], [0., pi/2, -pi/4]]),
np.array([[0., 0., pi/2], [pi/4, 0., pi/2], [0., pi/4, pi/2], [-pi/4, 0., pi/2],
[0., -pi/4, pi/2], [0., 0., 3*pi/4], [0., 0., pi/4]]),
np.array([[0., pi/4, 0.], [pi/4, pi/4, 0.], [0., pi/2, 0.], [-pi/4, pi/4, 0.],
[0., 0., 0.], [0., pi/4, pi/4], [0., pi/4, -pi/4]]),
np.array([[0., pi/4, pi/2], [pi/4, pi/4, pi/2], [0., pi/2, pi/2],
[-pi/4, pi/4, pi/2], [0., 0., pi/2], [0., pi/4, 3*pi/4], [0., pi/4, pi/4]]),
np.array([[0., offset_3d_movement, pi/4], [pi/4, offset_3d_movement, pi/4],
[0., pi/4+offset_3d_movement, pi/4], [-pi/4, offset_3d_movement, pi/4],
[0., -pi/4+offset_3d_movement, pi/4], [0., offset_3d_movement, pi/2],
[0., offset_3d_movement, 0.]]),
np.array([[0., 0., pi], [pi/4, 0., pi], [0., pi/4, pi], [-pi/4, 0., pi],
[0., -pi/4, pi], [0., 0., 5*pi/4], [0., 0., 3*pi/4]]),
np.array([[0., 0., -pi/2], [pi/4, 0., -pi/2], [0., pi/4, -pi/2], [-pi/4, 0., -pi/2],
[0., -pi/4, -pi/2], [0., 0., -pi/4], [0., 0., -3*pi/4]]),
np.array([[0., -pi/2, 0.], [pi/4, -pi/2, 0.], [0., -pi/4, 0.], [-pi/4, -pi/2, 0.],
[0., -3*pi/4, 0.], [0., -pi/2, pi/4], [0., -pi/2, -pi/4]]),
np.array([[0., 0., pi], [pi/4, 0., pi], [0., pi/4, pi], [-pi/4, 0., pi],
[0., -pi/4, pi], [0., 0., 5*pi/4], [0., 0., 3*pi/4]]),
np.array([[0., 0., -pi/2], [pi/4, 0., -pi/2], [0., pi/4, -pi/2], [-pi/4, 0., -pi/2],
[0., -pi/4, -pi/2], [0., 0., -pi/4], [0., 0., -3*pi/4]]),
np.array([[0., -pi/2, 0.], [pi/4, -pi/2, 0.], [0., -pi/4, 0.], [-pi/4, -pi/2, 0.],
[0., -3*pi/4, 0.], [0., -pi/2, pi/4], [0., -pi/2, -pi/4]]),
np.array([[0., -pi/4, pi], [pi/4, -pi/4, pi], [0., 0., pi], [-pi/4, -pi/4, pi],
[0., -pi/2, pi], [0., -pi/4, 5*pi/4], [0., -pi/4, 3*pi/4]]),
np.array([[0., -pi/4, -pi/2], [pi/4, -pi/4, -pi/2], [0., 0., -pi/2],
[-pi/4, -pi/4, -pi/2], [0., -pi/2, -pi/2], [0., -pi/4, -pi/4],
[0., -pi/4, -3*pi/4]])]
def expected_orientations_2d():
pi = np.pi
return [
np.array([[0., 0., 0.], [0., 0., pi/4], [0., 0., -pi/4], [0., 0., pi/2],
[0., 0., -pi/2]]),
np.array([[0., 0., pi/2], [0., 0., 3 * pi/4], [0., 0., pi/4], [0., 0., pi],
[0., 0., 0.]]),
np.array([[0., 0., 0.], [0., 0., pi/4], [0., 0., -pi/4], [0., 0., pi/2],
[0., 0., -pi/2]]),
np.array([[0., 0., pi/2], [0., 0., 3 * pi/4], [0., 0., pi/4], [0., 0., pi],
[0., 0., 0.]]),
np.array([[0., 0., pi/4], [0., 0., pi/2], [0., 0., 0.], [0., 0., 3 * pi/4],
[0., 0., -pi/4]]),
np.array([[0., 0., pi], [0., 0., 5*pi/4], [0., 0., 3 * pi/4], [0., 0., 3 * pi/2],
[0., 0., pi/2]]),
np.array([[0., 0., -pi/2], [0., 0., -pi/4], [0., 0., -3 * pi/4], [0., 0., 0.],
[0., 0., -pi]]),
np.array([[0., 0., pi], [0., 0., 5 * pi/4], [0., 0., 3 * pi/4], [0., 0., 3 * pi/2],
[0., 0., pi/2]]),
np.array([[0., 0., -pi/2], [0., 0., -pi/4], [0., 0., -3 * pi/4], [0., 0., 0.],
[0., 0., -pi]]),
np.array([[0., 0., -3 * pi/4], [0., 0., -pi/2], [0., 0., -pi], [0., 0., -pi/4],
[0., 0., -5 * pi/4]])
]
@pytest.mark.parametrize('state, expected_platform_orientation, expected_sensor_orientations',
zip(*zip(*test_platform_base.orientation_tests_2d),
expected_orientations_2d()))
def test_rotation_offsets_2d(state, expected_platform_orientation, expected_sensor_orientations,
move, radars_2d, rotation_offsets_2d):
# Define time related variables
timestamp = datetime.datetime.now()
# Define transition model and position for platform
model_1d = ConstantVelocity(0.0) # zero noise so pure movement
trans_model = CombinedLinearGaussianTransitionModel(
[model_1d] * (radars_2d[0].ndim_state // 2))
platform_state = State(state, timestamp)
# This defines the position_mapping to the platforms state vector (i.e. x and y)
mounting_mapping = np.array([0, 2])
# create a platform with the simple radar mounted
for sensor, offset in zip(radars_2d, rotation_offsets_2d):
sensor.rotation_offset = offset
platform = MovingPlatform(
states=platform_state,
transition_model=trans_model,
sensors=radars_2d,
position_mapping=mounting_mapping
)
if move:
# Move the platform
platform.move(timestamp + datetime.timedelta(seconds=2))
assert np.allclose(platform.orientation, expected_platform_orientation)
assert np.allclose(all_sensor_orientations(platform), expected_sensor_orientations)
@pytest.mark.parametrize('state, expected_platform_orientation, expected_sensor_orientations',
zip(*zip(*test_platform_base.orientation_tests_3d),
expected_orientations_3d()))
def test_rotation_offsets_3d(state, expected_platform_orientation, expected_sensor_orientations,
move, radars_3d, rotation_offsets_3d):
# Define time related variables
timestamp = datetime.datetime.now()
# Define transition model and position for platform
model_1d = ConstantVelocity(0.0) # zero noise so pure movement
trans_model = CombinedLinearGaussianTransitionModel(
[model_1d] * (radars_3d[0].ndim_state // 2))
platform_state = State(state, timestamp)
# This defines the position_mapping to the platforms state vector (i.e. x and y)
mounting_mapping = np.array([0, 2, 4])
# create a platform with the simple radar mounted
for sensor, offset in zip(radars_3d, rotation_offsets_3d):
sensor.rotation_offset = offset
platform = MovingPlatform(
states=platform_state,
transition_model=trans_model,
sensors=radars_3d,
position_mapping=mounting_mapping
)
if move:
# Move the platform
platform.move(timestamp + datetime.timedelta(seconds=2))
assert np.allclose(platform.orientation, expected_platform_orientation)
assert np.allclose(all_sensor_orientations(platform), expected_sensor_orientations)
def all_sensor_orientations(platform):
sensor_orientations = np.concatenate([sensor.orientation for sensor in platform.sensors],
axis=1)
return sensor_orientations.T
def all_sensor_positions(platform):
sensor_positions = np.concatenate([sensor.position for sensor in platform.sensors], axis=1)
return sensor_positions.T
def test_defaults(radars_3d, platform_type, add_sensor):
platform_state = State(state_vector=StateVector([0, 1, 2, 1, 4, 1]),
timestamp=datetime.datetime.now())
platform_args = {}
if platform_type is MovingPlatform:
platform_args['transition_model'] = None
if add_sensor:
platform = platform_type(states=platform_state, sensors=[], position_mapping=[0, 2, 4],
**platform_args)
for sensor in radars_3d:
platform.add_sensor(sensor)
else:
platform = platform_type(states=platform_state, sensors=radars_3d,
position_mapping=[0, 2, 4], **platform_args)
for sensor in radars_3d:
assert np.array_equal(sensor.mounting_offset, StateVector([0, 0, 0]))
assert np.array_equal(sensor.rotation_offset, StateVector([0, 0, 0]))
assert np.array_equal(sensor.position, platform.position)
assert np.array_equal(sensor.orientation, platform.orientation)
def sensor_positions_test(expected_offset, platform):
"""
This function asserts that the sensor positions on the platform have been
correctly updated when the platform has been moved or sensor mounted on the
platform.
:param expected_offset: nD array of expected sensor position post rotation
:param platform: platform object
:return:
"""
radar_position = all_sensor_positions(platform)
platform_position = platform.position
expected_radar_position = expected_offset + platform_position.T
assert np.allclose(expected_radar_position, radar_position)
|
#!/usr/bin/env python3
from __future__ import print_function
import argparse
import io
import os
import subprocess
import sys
import tempfile
import time
from contextlib import ExitStack
from functools import partial
from threading import Thread
import pysam
class VariantCallingError (RuntimeError):
"""Exception class for issues with samtools and varscan subprocesses."""
def __init__(self, message=None, call='', error=''):
self.message = message
self.call = call.strip()
self.error = error.strip()
def __str__(self):
if self.message is None:
return ''
if self.error:
msg_header = '"{0}" failed with:\n{1}\n\n'.format(
self.call, self.error
)
else:
msg_header = '{0} failed.\n'
'No further information about this error is available.\n\n'.format(
self.call
)
return msg_header + self.message
class VarScanCaller (object):
def __init__(self, ref_genome, bam_input_files,
max_depth=None,
min_mapqual=None, min_basequal=None,
threads=1, verbose=False, quiet=True
):
self.ref_genome = ref_genome
self.bam_input_files = bam_input_files
self.max_depth = max_depth
self.min_mapqual = min_mapqual
self.min_basequal = min_basequal
self.threads = threads
self.verbose = verbose
self.quiet = quiet
with pysam.FastaFile(ref_genome) as ref_fa:
self.ref_contigs = ref_fa.references
self.ref_lengths = ref_fa.lengths
self.pileup_engine = ['samtools', 'mpileup']
self.varcall_engine = ['varscan', 'somatic']
self.requires_stdout_redirect = False
self.TemporaryContigVCF = partial(
tempfile.NamedTemporaryFile,
mode='wb', suffix='', delete=False, dir=os.getcwd()
)
self.tmpfiles = []
def _get_pysam_pileup_args(self):
param_dict = {}
if self.max_depth is not None:
param_dict['max_depth'] = self.max_depth
if self.min_mapqual is not None:
param_dict['min_mapping_quality'] = self.min_mapqual
if self.min_basequal is not None:
param_dict['min_base_quality'] = self.min_basequal
param_dict['compute_baq'] = False
param_dict['stepper'] = 'samtools'
return param_dict
def varcall_parallel(self, normal_purity=None, tumor_purity=None,
min_coverage=None,
min_var_count=None,
min_var_freq=None, min_hom_freq=None,
p_value=None, somatic_p_value=None,
threads=None, verbose=None, quiet=None
):
if not threads:
threads = self.threads
if verbose is None:
verbose = self.verbose
if quiet is None:
quiet = self.quiet
# mapping of method parameters to varcall engine command line options
varcall_engine_option_mapping = [
('--normal-purity', normal_purity),
('--tumor-purity', tumor_purity),
('--min-coverage', min_coverage),
('--min-reads2', min_var_count),
('--min-var-freq', min_var_freq),
('--min-freq-for-hom', min_hom_freq),
('--p-value', p_value),
('--somatic-p-value', somatic_p_value),
('--min-avg-qual', self.min_basequal)
]
varcall_engine_options = []
for option, value in varcall_engine_option_mapping:
if value is not None:
varcall_engine_options += [option, str(value)]
pileup_engine_options = ['-B']
if self.max_depth is not None:
pileup_engine_options += ['-d', str(self.max_depth)]
if self.min_mapqual is not None:
pileup_engine_options += ['-q', str(self.min_mapqual)]
if self.min_basequal is not None:
pileup_engine_options += ['-Q', str(self.min_basequal)]
# Create a tuple of calls to samtools mpileup and varscan for
# each contig. The contig name is stored as the third element of
# that tuple.
# The calls are stored in the reverse order of the contig list so
# that they can be popped off later in the original order
calls = [(
self.pileup_engine + pileup_engine_options + [
'-r', contig + ':',
'-f', self.ref_genome
] + self.bam_input_files,
self.varcall_engine + [
'-', '{out}', '--output-vcf', '1', '--mpileup', '1'
] + varcall_engine_options,
contig
) for contig in self.ref_contigs[::-1]]
if verbose:
print('Starting variant calling ..')
# launch subprocesses and monitor their status
subprocesses = []
error_table = {}
tmp_io_started = []
tmp_io_finished = []
self.tmpfiles = []
def enqueue_stderr_output(out, stderr_buffer):
for line in iter(out.readline, b''):
# Eventually we are going to print the contents of
# the stderr_buffer to sys.stderr so we can
# decode things here using its encoding.
# We do a 'backslashreplace' just to be on the safe side.
stderr_buffer.write(line.decode(sys.stderr.encoding,
'backslashreplace'))
out.close()
try:
while subprocesses or calls:
while calls and len(subprocesses) < threads:
# There are still calls waiting for execution and we
# have unoccupied threads so we launch a new combined
# call to samtools mpileup and the variant caller.
# pop the call arguments from our call stack
call = calls.pop()
# get the name of the contig that this call is going
# to work on
contig = call[2]
# Based on the contig name, generate a readable and
# file system-compatible prefix and use it to create
# a named temporary file, to which the call output
# will be redirected.
# At the moment we create the output file we add it to
# the list of all temporary output files so that we can
# remove it eventually during cleanup.
call_out = self.TemporaryContigVCF(
prefix=''.join(
c if c.isalnum() else '_' for c in contig
) + '_',
)
# maintain a list of variant call outputs
# in the order the subprocesses got launched
tmp_io_started.append(call_out.name)
if self.requires_stdout_redirect:
# redirect stdout to the temporary file just created
stdout_p2 = call_out
stderr_p2 = subprocess.PIPE
else:
# variant caller wants to write output to file directly
stdout_p2 = subprocess.PIPE
stderr_p2 = subprocess.STDOUT
call[1][call[1].index('{out}')] = call_out.name
call_out.close()
# for reporting purposes, join the arguments for the
# samtools and the variant caller calls into readable
# strings
c_str = (' '.join(call[0]), ' '.join(call[1]))
error_table[c_str] = [io.StringIO(), io.StringIO()]
# start the subprocesses
p1 = subprocess.Popen(
call[0],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p2 = subprocess.Popen(
call[1],
stdin=p1.stdout,
stdout=stdout_p2,
stderr=stderr_p2
)
# subprocess bookkeeping
subprocesses.append((c_str, p1, p2, call_out, contig))
# make sure our newly launched call does not block
# because its buffers fill up
p1.stdout.close()
t1 = Thread(
target=enqueue_stderr_output,
args=(p1.stderr, error_table[c_str][0])
)
t2 = Thread(
target=enqueue_stderr_output,
args=(
p2.stderr
if self.requires_stdout_redirect else
p2.stdout,
error_table[c_str][1]
)
)
t1.daemon = t2.daemon = True
t1.start()
t2.start()
if verbose:
print(
'Calling variants for contig: {0}'
.format(call[2])
)
# monitor all running calls to see if any of them are done
for call, p1, p2, ofo, contig in subprocesses:
p1_stat = p1.poll()
p2_stat = p2.poll()
if p1_stat is not None or p2_stat is not None:
# There is an outcome for this process!
# Lets see what it is
if p1_stat or p2_stat:
print()
print(
error_table[call][0].getvalue(),
error_table[call][1].getvalue(),
file=sys.stderr
)
raise VariantCallingError(
'Variant Calling for contig {0} failed.'
.format(contig),
call='{0} | {1}'.format(call[0], call[1])
)
if p1_stat == 0 and p2_stat is None:
# VarScan is not handling the no output from
# samtools mpileup situation correctly so maybe
# that's the issue here
last_words = error_table[call][1].getvalue(
).splitlines()[-4:]
if len(last_words) < 4 or any(
not msg.startswith('Input stream not ready')
for msg in last_words
):
break
# lets give this process a bit more time
# VarScan is waiting for input it will never
# get, stop it.
p2.terminate()
subprocesses.remove((call, p1, p2, ofo, contig))
ofo.close()
break
if p2_stat == 0:
# Things went fine.
# maintain a list of variant call outputs
# that finished successfully (in the order
# they finished)
tmp_io_finished.append(ofo.name)
if verbose:
print()
print('Contig {0} finished.'.format(contig))
if not quiet:
print()
print(
'stderr output from samtools mpileup/'
'bcftools:'.upper(),
file=sys.stderr
)
print(
error_table[call][0].getvalue(),
error_table[call][1].getvalue(),
file=sys.stderr
)
# Discard the collected stderr output from
# the call, remove the call from the list of
# running calls and close its output file.
del error_table[call]
subprocesses.remove((call, p1, p2, ofo, contig))
# Closing the output file is important or we
# may hit the file system limit for open files
# if there are lots of contigs.
ofo.close()
break
# wait a bit in between monitoring cycles
time.sleep(2)
finally:
for call, p1, p2, ofo, contig in subprocesses:
# make sure we do not leave running subprocesses behind
for proc in (p1, p2):
try:
proc.terminate()
except Exception:
pass
# close currently open files
ofo.close()
# store the files with finished content in the order that
# the corresponding jobs were launched
self.tmpfiles = [f for f in tmp_io_started if f in tmp_io_finished]
# Make sure remaining buffered stderr output of
# subprocesses does not get lost.
# Currently, we don't screen this output for real errors,
# but simply rewrite everything.
if not quiet and error_table:
print()
print(
'stderr output from samtools mpileup/bcftools:'.upper(),
file=sys.stderr
)
for call, errors in error_table.items():
print(' | '.join(call), ':', file=sys.stderr)
print('-' * 20, file=sys.stderr)
print('samtools mpileup output:', file=sys.stderr)
print(errors[0].getvalue(), file=sys.stderr)
print('varscan somatic output:', file=sys.stderr)
print(errors[1].getvalue(), file=sys.stderr)
def _add_ref_contigs_to_header(self, header):
for chrom, length in zip(self.ref_contigs, self.ref_lengths):
header.add_meta(
'contig',
items=[('ID', chrom), ('length', length)]
)
def _add_filters_to_header(self, header):
varscan_fpfilters = {
'VarCount': 'Fewer than {min_var_count2} variant-supporting reads',
'VarFreq': 'Variant allele frequency below {min_var_freq2}',
'VarAvgRL':
'Average clipped length of variant-supporting reads < '
'{min_var_len}',
'VarReadPos': 'Relative average read position < {min_var_readpos}',
'VarDist3':
'Average distance to effective 3\' end < {min_var_dist3}',
'VarMMQS':
'Average mismatch quality sum for variant reads > '
'{max_var_mmqs}',
'VarMapQual':
'Average mapping quality of variant reads < {min_var_mapqual}',
'VarBaseQual':
'Average base quality of variant reads < {min_var_basequal}',
'Strand':
'Strand representation of variant reads < {min_strandedness}',
'RefAvgRL':
'Average clipped length of ref-supporting reads < '
'{min_ref_len}',
'RefReadPos':
'Relative average read position < {min_ref_readpos}',
'RefDist3':
'Average distance to effective 3\' end < {min_ref_dist3}',
'RefMapQual':
'Average mapping quality of reference reads < '
'{min_ref_mapqual}',
'RefBaseQual':
'Average base quality of ref-supporting reads < '
'{min_ref_basequal}',
'RefMMQS':
'Average mismatch quality sum for ref-supporting reads > '
'{max_ref_mmqs}',
'MMQSdiff':
'Mismatch quality sum difference (var - ref) > '
'{max_mmqs_diff}',
'MinMMQSdiff':
'Mismatch quality sum difference (var - ref) < '
'{max_mmqs_diff}',
'MapQualDiff':
'Mapping quality difference (ref - var) > {max_mapqual_diff}',
'MaxBAQdiff':
'Average base quality difference (ref - var) > '
'{max_basequal_diff}',
'ReadLenDiff':
'Average supporting read length difference (ref - var) > '
'{max_relative_len_diff}',
}
for filter_id, description in varscan_fpfilters.items():
header.filters.add(filter_id, None, None, description)
def _add_indel_info_flag_to_header(self, header):
header.info.add(
'INDEL', 0, 'Flag', 'Indicates that the variant is an INDEL'
)
def _compile_common_header(self, varcall_template, no_filters=False):
# fix the header generated by VarScan
# by adding reference and contig information
common_header = pysam.VariantHeader()
common_header.add_meta('reference', value=self.ref_genome)
self._add_ref_contigs_to_header(common_header)
if not no_filters:
# add filter info
self._add_filters_to_header(common_header)
# change the source information
common_header.add_meta('source', value='varscan.py')
# declare an INDEL flag for record INFO fields
self._add_indel_info_flag_to_header(common_header)
# take the remaining metadata from the template header produced by
# VarScan
with pysam.VariantFile(varcall_template, 'r') as original_data:
varscan_header = original_data.header
for sample in varscan_header.samples:
common_header.samples.add(sample)
common_header.merge(varscan_header)
return common_header
def pileup_masker(self, mask):
def apply_mask_on_pileup(piled_items):
for item, status in zip(piled_items, mask):
if status:
yield item
return apply_mask_on_pileup
def get_allele_specific_pileup_column_stats(
self, allele, pile_column, ref_fetch
):
# number of reads supporting the given allele on
# forward and reverse strand, and in total
var_reads_plus = var_reads_minus = 0
var_supp_read_mask = []
for base in pile_column.get_query_sequences():
if base == allele:
# allele supporting read on + strand
var_reads_plus += 1
var_supp_read_mask.append(True)
elif base.upper() == allele:
# allele supporting read on - strand
var_reads_minus += 1
var_supp_read_mask.append(True)
else:
var_supp_read_mask.append(False)
var_reads_total = var_reads_plus + var_reads_minus
if var_reads_total == 0:
# No stats without reads!
return None
var_supp_only = self.pileup_masker(var_supp_read_mask)
# average mapping quality of the reads supporting the
# given allele
avg_mapping_quality = sum(
mq for mq in var_supp_only(
pile_column.get_mapping_qualities()
)
) / var_reads_total
# for the remaining stats we need access to complete
# read information
piled_reads = [
p for p in var_supp_only(pile_column.pileups)
]
assert len(piled_reads) == var_reads_total
sum_avg_base_qualities = 0
sum_dist_from_center = 0
sum_dist_from_3prime = 0
sum_clipped_length = 0
sum_unclipped_length = 0
sum_num_mismatches_as_fraction = 0
sum_mismatch_qualities = 0
for p in piled_reads:
sum_avg_base_qualities += sum(
p.alignment.query_qualities
) / p.alignment.infer_query_length()
sum_clipped_length += p.alignment.query_alignment_length
unclipped_length = p.alignment.infer_read_length()
sum_unclipped_length += unclipped_length
read_center = p.alignment.query_alignment_length / 2
sum_dist_from_center += 1 - abs(
p.query_position - read_center
) / read_center
if p.alignment.is_reverse:
sum_dist_from_3prime += p.query_position / unclipped_length
else:
sum_dist_from_3prime += 1 - p.query_position / unclipped_length
sum_num_mismatches = 0
for qpos, rpos in p.alignment.get_aligned_pairs():
if qpos is not None and rpos is not None:
if p.alignment.query_sequence[qpos] != ref_fetch(
rpos, rpos + 1
).upper(): # ref bases can be lowercase!
sum_num_mismatches += 1
sum_mismatch_qualities += p.alignment.query_qualities[
qpos
]
sum_num_mismatches_as_fraction += (
sum_num_mismatches / p.alignment.query_alignment_length
)
avg_basequality = sum_avg_base_qualities / var_reads_total
avg_pos_as_fraction = sum_dist_from_center / var_reads_total
avg_num_mismatches_as_fraction = (
sum_num_mismatches_as_fraction / var_reads_total
)
avg_sum_mismatch_qualities = sum_mismatch_qualities / var_reads_total
avg_clipped_length = sum_clipped_length / var_reads_total
avg_distance_to_effective_3p_end = (
sum_dist_from_3prime / var_reads_total
)
return (
avg_mapping_quality,
avg_basequality,
var_reads_plus,
var_reads_minus,
avg_pos_as_fraction,
avg_num_mismatches_as_fraction,
avg_sum_mismatch_qualities,
avg_clipped_length,
avg_distance_to_effective_3p_end
)
def _postprocess_variant_records(self, invcf,
min_var_count2, min_var_count2_lc,
min_var_freq2, max_somatic_p,
max_somatic_p_depth,
min_ref_readpos, min_var_readpos,
min_ref_dist3, min_var_dist3,
min_ref_len, min_var_len,
max_relative_len_diff,
min_strandedness, min_strand_reads,
min_ref_basequal, min_var_basequal,
max_basequal_diff,
min_ref_mapqual, min_var_mapqual,
max_mapqual_diff,
max_ref_mmqs, max_var_mmqs,
min_mmqs_diff, max_mmqs_diff,
**args):
# set FILTER field according to Varscan criteria
# multiple FILTER entries must be separated by semicolons
# No filters applied should be indicated with MISSING
# since posterior filters are always applied to just one sample,
# a better place to store the info is in the FT genotype field:
# can be PASS, '.' to indicate that filters have not been applied,
# or a semicolon-separated list of filters that failed
# unfortunately, gemini does not support this field
with ExitStack() as io_stack:
normal_reads, tumor_reads = (
io_stack.enter_context(
pysam.Samfile(fn, 'rb')) for fn in self.bam_input_files
)
refseq = io_stack.enter_context(pysam.FastaFile(self.ref_genome))
pileup_args = self._get_pysam_pileup_args()
for record in invcf:
if any(len(allele) > 1 for allele in record.alleles):
# skip indel postprocessing for the moment
yield record
continue
# get pileup for genomic region affected by this variant
if record.info['SS'] == '2':
# a somatic variant => generate pileup from tumor data
pile = tumor_reads.pileup(
record.chrom, record.start, record.stop,
**pileup_args
)
sample_of_interest = 'TUMOR'
elif record.info['SS'] in ['1', '3']:
# a germline or LOH variant => pileup from normal data
pile = normal_reads.pileup(
record.chrom, record.start, record.stop,
**pileup_args
)
sample_of_interest = 'NORMAL'
else:
# TO DO: figure out if there is anything interesting to do
# for SS status codes 0 (reference) and 5 (unknown)
yield record
continue
# apply false-positive filtering a la varscan fpfilter
# find the variant site in the pileup columns
for pile_column in pile:
if pile_column.reference_pos == record.start:
break
# extract required information
# overall read depth at the site
read_depth = pile_column.get_num_aligned()
assert read_depth > 0
# no multiallelic sites in varscan
assert len(record.alleles) == 2
if record.samples[sample_of_interest]['RD'] > 0:
ref_stats, alt_stats = [
self.get_allele_specific_pileup_column_stats(
allele,
pile_column,
partial(
pysam.FastaFile.fetch, refseq, record.chrom
)
)
for allele in record.alleles
]
else:
ref_stats = None
alt_stats = self.get_allele_specific_pileup_column_stats(
record.alleles[1],
pile_column,
partial(pysam.FastaFile.fetch, refseq, record.chrom)
)
ref_count = 0
if ref_stats:
ref_count = ref_stats[2] + ref_stats[3]
if ref_stats[1] < min_ref_basequal:
record.filter.add('RefBaseQual')
if ref_count >= 2:
if ref_stats[0] < min_ref_mapqual:
record.filter.add('RefMapQual')
if ref_stats[4] < min_ref_readpos:
record.filter.add('RefReadPos')
# ref_stats[5] (avg_num_mismatches_as_fraction
# is not a filter criterion in VarScan fpfilter
if ref_stats[6] > max_ref_mmqs:
record.filter.add('RefMMQS')
if ref_stats[7] < min_ref_len:
# VarScan fpfilter does not apply this filter
# for indels, but there is no reason
# not to do it.
record.filter.add('RefAvgRL')
if ref_stats[8] < min_ref_dist3:
record.filter.add('RefDist3')
if alt_stats:
alt_count = alt_stats[2] + alt_stats[3]
if (
alt_count < min_var_count2_lc
) or (
read_depth >= max_somatic_p_depth and
alt_count < min_var_count2
):
record.filter.add('VarCount')
if alt_count / read_depth < min_var_freq2:
record.filter.add('VarFreq')
if alt_stats[1] < min_var_basequal:
record.filter.add('VarBaseQual')
if alt_count > min_strand_reads:
if (
alt_stats[2] / alt_count < min_strandedness
) or (
alt_stats[3] / alt_count < min_strandedness
):
record.filter.add('Strand')
if alt_stats[2] + alt_stats[3] >= 2:
if alt_stats[0] < min_var_mapqual:
record.filter.add('VarMapQual')
if alt_stats[4] < min_var_readpos:
record.filter.add('VarReadPos')
# alt_stats[5] (avg_num_mismatches_as_fraction
# is not a filter criterion in VarScan fpfilter
if alt_stats[6] > max_var_mmqs:
record.filter.add('VarMMQS')
if alt_stats[7] < min_var_len:
# VarScan fpfilter does not apply this filter
# for indels, but there is no reason
# not to do it.
record.filter.add('VarAvgRL')
if alt_stats[8] < min_var_dist3:
record.filter.add('VarDist3')
if ref_count >= 2 and alt_count >= 2:
if (ref_stats[0] - alt_stats[0]) > max_mapqual_diff:
record.filter.add('MapQualDiff')
if (ref_stats[1] - alt_stats[1]) > max_basequal_diff:
record.filter.add('MaxBAQdiff')
mmqs_diff = alt_stats[6] - ref_stats[6]
if mmqs_diff < min_mmqs_diff:
record.filter.add('MinMMQSdiff')
if mmqs_diff > max_mmqs_diff:
record.filter.add('MMQSdiff')
if (
1 - alt_stats[7] / ref_stats[7]
) > max_relative_len_diff:
record.filter.add('ReadLenDiff')
else:
# No variant-supporting reads for this record!
# This can happen in rare cases because of
# samtools mpileup issues, but indicates a
# rather unreliable variant call.
record.filter.add('VarCount')
record.filter.add('VarFreq')
yield record
def _indel_flagged_records(self, vcf):
for record in vcf:
record.info['INDEL'] = True
yield record
def _merge_generator(self, vcf1, vcf2):
try:
record1 = next(vcf1)
except StopIteration:
for record2 in vcf2:
yield record2
return
try:
record2 = next(vcf2)
except StopIteration:
yield record1
for record1 in vcf1:
yield record1
return
while True:
if (record1.start, record1.stop) < (record2.start, record2.stop):
yield record1
try:
record1 = next(vcf1)
except StopIteration:
yield record2
for record2 in vcf2:
yield record2
return
else:
yield record2
try:
record2 = next(vcf2)
except StopIteration:
yield record1
for record1 in vcf1:
yield record1
return
def merge_and_postprocess(self, snps_out, indels_out=None,
no_filters=False, **filter_args):
temporary_data = self.tmpfiles
self.tmpfiles = []
temporary_snp_files = [f + '.snp.vcf' for f in temporary_data]
temporary_indel_files = [f + '.indel.vcf' for f in temporary_data]
for f in temporary_data:
try:
os.remove(f)
except Exception:
pass
def noop_gen(data, **kwargs):
for d in data:
yield d
if no_filters:
apply_filters = noop_gen
else:
apply_filters = self._postprocess_variant_records
output_header = self._compile_common_header(
temporary_snp_files[0],
no_filters
)
if indels_out is None:
with open(snps_out, 'w') as o:
o.write(str(output_header).format(**filter_args))
for snp_f, indel_f in zip(
temporary_snp_files, temporary_indel_files
):
with pysam.VariantFile(snp_f, 'r') as snp_invcf:
# fix the input header on the fly
# to avoid Warnings from htslib about missing contig
# info
self._add_ref_contigs_to_header(snp_invcf.header)
self._add_filters_to_header(snp_invcf.header)
self._add_indel_info_flag_to_header(snp_invcf.header)
with pysam.VariantFile(indel_f, 'r') as indel_invcf:
# fix the input header on the fly
# to avoid Warnings from htslib about missing
# contig info
self._add_ref_contigs_to_header(indel_invcf.header)
self._add_filters_to_header(indel_invcf.header)
self._add_indel_info_flag_to_header(
indel_invcf.header
)
for record in apply_filters(
self._merge_generator(
snp_invcf,
self._indel_flagged_records(indel_invcf)
),
**filter_args
):
o.write(str(record))
try:
os.remove(snp_f)
except Exception:
pass
try:
os.remove(indel_f)
except Exception:
pass
else:
with open(snps_out, 'w') as o:
o.write(str(output_header).format(**filter_args))
for f in temporary_snp_files:
with pysam.VariantFile(f, 'r') as invcf:
# fix the input header on the fly
# to avoid Warnings from htslib about missing
# contig info and errors because of undeclared
# filters
self._add_ref_contigs_to_header(invcf.header)
self._add_filters_to_header(invcf.header)
for record in apply_filters(
invcf, **filter_args
):
o.write(str(record))
try:
os.remove(f)
except Exception:
pass
with open(indels_out, 'w') as o:
o.write(str(output_header))
for f in temporary_indel_files:
with pysam.VariantFile(f, 'r') as invcf:
# fix the input header on the fly
# to avoid Warnings from htslib about missing
# contig info and errors because of undeclared
# filters
self._add_ref_contigs_to_header(invcf.header)
self._add_filters_to_header(invcf.header)
self._add_indel_info_flag_to_header(invcf.header)
for record in apply_filters(
self._indel_flagged_records(invcf), **filter_args
):
o.write(str(record))
try:
os.remove(f)
except Exception:
pass
def varscan_call(ref_genome, normal, tumor, output_path, **args):
"""Preparse arguments and orchestrate calling and postprocessing."""
if args.pop('split_output'):
if '%T' in output_path:
out = (
output_path.replace('%T', 'snp'),
output_path.replace('%T', 'indel')
)
else:
out = (
output_path + '.snp',
output_path + '.indel'
)
else:
out = (output_path, None)
instance_args = {
k: args.pop(k) for k in [
'max_depth',
'min_mapqual',
'min_basequal',
'threads',
'verbose',
'quiet'
]
}
varscan_somatic_args = {
k: args.pop(k) for k in [
'normal_purity',
'tumor_purity',
'min_coverage',
'min_var_count',
'min_var_freq',
'min_hom_freq',
'somatic_p_value',
'p_value'
]
}
v = VarScanCaller(ref_genome, [normal, tumor], **instance_args)
v.varcall_parallel(**varscan_somatic_args)
v.merge_and_postprocess(*out, **args)
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument(
'ref_genome',
metavar='reference_genome',
help='the reference genome (in fasta format)'
)
p.add_argument(
'--normal',
metavar='BAM_file', required=True,
help='the BAM input file of aligned reads from the normal sample'
)
p.add_argument(
'--tumor',
metavar='BAM_file', required=True,
help='the BAM input file of aligned reads from the tumor sample'
)
p.add_argument(
'-o', '--ofile', required=True,
metavar='OFILE', dest='output_path',
help='Name of the variant output file. With --split-output, the name '
'may use the %%T replacement token or will be used as the '
'basename for the two output files to be generated (see '
'-s|--split-output below).'
)
p.add_argument(
'-s', '--split-output',
dest='split_output', action='store_true', default=False,
help='indicate that separate output files for SNPs and indels '
'should be generated (original VarScan behavior). If specified, '
'%%T in the --ofile file name will be replaced with "snp" and '
'"indel" to generate the names of the SNP and indel output '
'files, respectively. If %%T is not found in the file name, it '
'will get interpreted as a basename to which ".snp"/".indel" '
'will be appended.'
)
p.add_argument(
'-t', '--threads',
type=int, default=1,
help='level of parallelism'
)
p.add_argument(
'-v', '--verbose',
action='store_true',
help='be verbose about progress'
)
p.add_argument(
'-q', '--quiet',
action='store_true',
help='suppress output from wrapped tools'
)
call_group = p.add_argument_group('Variant calling parameters')
call_group.add_argument(
'--normal-purity',
dest='normal_purity', type=float,
default=1.0,
help='Estimated purity of the normal sample (default: 1.0)'
)
call_group.add_argument(
'--tumor-purity',
dest='tumor_purity', type=float,
default=1.0,
help='Estimated purity of the tumor sample (default: 1.0)'
)
call_group.add_argument(
'--max-pileup-depth',
dest='max_depth', type=int, default=8000,
help='Maximum depth of generated pileups (samtools mpileup -d option; '
'default: 8000)'
)
call_group.add_argument(
'--min-basequal',
dest='min_basequal', type=int,
default=13,
help='Minimum base quality at the variant position to use a read '
'(default: 13)'
)
call_group.add_argument(
'--min-mapqual',
dest='min_mapqual', type=int,
default=0,
help='Minimum mapping quality required to use a read '
'(default: 0)'
)
call_group.add_argument(
'--min-coverage',
dest='min_coverage', type=int,
default=8,
help='Minimum site coverage required in the normal and in the tumor '
'sample to call a variant (default: 8)'
)
call_group.add_argument(
'--min-var-count',
dest='min_var_count', type=int,
default=2,
help='Minimum number of variant-supporting reads required to call a '
'variant (default: 2)'
)
call_group.add_argument(
'--min-var-freq',
dest='min_var_freq', type=float,
default=0.1,
help='Minimum variant allele frequency for calling (default: 0.1)'
)
call_group.add_argument(
'--min-hom-freq',
dest='min_hom_freq', type=float,
default=0.75,
help='Minimum variant allele frequency for homozygous call '
'(default: 0.75)'
)
call_group.add_argument(
'--p-value',
dest='p_value', type=float,
default=0.99,
help='P-value threshold for heterozygous call (default: 0.99)'
)
call_group.add_argument(
'--somatic-p-value',
dest='somatic_p_value', type=float,
default=0.05,
help='P-value threshold for somatic call (default: 0.05)'
)
filter_group = p.add_argument_group('Posterior variant filter parameters')
filter_group.add_argument(
'--no-filters',
dest='no_filters', action='store_true',
help='Disable all posterior variant filters. '
'If specified, all following options will be ignored'
)
filter_group.add_argument(
'--min-var-count2',
dest='min_var_count2', type=int,
default=4,
help='Minimum number of variant-supporting reads (default: 4)'
)
filter_group.add_argument(
'--min-var-count2-lc',
dest='min_var_count2_lc', type=int,
default=2,
help='Minimum number of variant-supporting reads when depth below '
'--somatic-p-depth (default: 2)'
)
filter_group.add_argument(
'--min-var-freq2',
dest='min_var_freq2', type=float,
default=0.05,
help='Minimum variant allele frequency (default: 0.05)'
)
filter_group.add_argument(
'--max-somatic-p',
dest='max_somatic_p', type=float,
default=0.05,
help='Maximum somatic p-value (default: 0.05)'
)
filter_group.add_argument(
'--max-somatic-p-depth',
dest='max_somatic_p_depth', type=int,
default=10,
help='Depth required to run --max-somatic-p filter (default: 10)'
)
filter_group.add_argument(
'--min-ref-readpos',
dest='min_ref_readpos', type=float,
default=0.1,
help='Minimum average relative distance of site from the ends of '
'ref-supporting reads (default: 0.1)'
)
filter_group.add_argument(
'--min-var-readpos',
dest='min_var_readpos', type=float,
default=0.1,
help='Minimum average relative distance of site from the ends of '
'variant-supporting reads (default: 0.1)'
)
filter_group.add_argument(
'--min-ref-dist3',
dest='min_ref_dist3', type=float,
default=0.1,
help='Minimum average relative distance of site from the effective '
'3\'end of ref-supporting reads (default: 0.1)'
)
filter_group.add_argument(
'--min-var-dist3',
dest='min_var_dist3', type=float,
default=0.1,
help='Minimum average relative distance of site from the effective '
'3\'end of variant-supporting reads (default: 0.1)'
)
filter_group.add_argument(
'--min-ref-len',
dest='min_ref_len', type=int,
default=90,
help='Minimum average trimmed length of reads supporting the ref '
'allele (default: 90)'
)
filter_group.add_argument(
'--min-var-len',
dest='min_var_len', type=int,
default=90,
help='Minimum average trimmed length of reads supporting the variant '
'allele (default: 90)'
)
filter_group.add_argument(
'--max-len-diff',
dest='max_relative_len_diff', type=float,
default=0.25,
help='Maximum average relative read length difference (ref - var; '
'default: 0.25)'
)
filter_group.add_argument(
'--min-strandedness',
dest='min_strandedness', type=float,
default=0.01,
help='Minimum fraction of variant reads from each strand '
'(default: 0.01)'
)
filter_group.add_argument(
'--min-strand-reads',
dest='min_strand_reads', type=int,
default=5,
help='Minimum allele depth required to run --min-strandedness filter '
'(default: 5)'
)
filter_group.add_argument(
'--min-ref-basequal',
dest='min_ref_basequal', type=int,
default=15,
help='Minimum average base quality for the ref allele (default: 15)'
)
filter_group.add_argument(
'--min-var-basequal',
dest='min_var_basequal', type=int,
default=15,
help='Minimum average base quality for the variant allele '
'(default: 15)'
)
filter_group.add_argument(
'--max-basequal-diff',
dest='max_basequal_diff', type=int,
default=50,
help='Maximum average base quality diff (ref - var; default: 50)'
)
filter_group.add_argument(
'--min-ref-mapqual',
dest='min_ref_mapqual', type=int,
default=15,
help='Minimum average mapping quality of reads supporting the ref '
'allele (default: 15)'
)
filter_group.add_argument(
'--min-var-mapqual',
dest='min_var_mapqual', type=int,
default=15,
help='Minimum average mapping quality of reads supporting the variant '
'allele (default: 15)'
)
filter_group.add_argument(
'--max-mapqual-diff',
dest='max_mapqual_diff', type=int,
default=50,
help='Maximum average mapping quality difference (ref - var; '
'default: 50)'
)
filter_group.add_argument(
'--max-ref-mmqs',
dest='max_ref_mmqs', type=int,
default=100,
help='Maximum mismatch quality sum of reads supporting the ref '
'allele (default: 100)'
)
filter_group.add_argument(
'--max-var-mmqs',
dest='max_var_mmqs', type=int,
default=100,
help='Maximum mismatch quality sum of reads supporting the variant '
'allele (default: 100)'
)
filter_group.add_argument(
'--min-mmqs-diff',
dest='min_mmqs_diff', type=int,
default=0,
help='Minimum mismatch quality sum difference (var - ref; default: 0)'
)
filter_group.add_argument(
'--max-mmqs-diff',
dest='max_mmqs_diff', type=int,
default=50,
help='Maximum mismatch quality sum difference (var - ref; default: 50)'
)
args = vars(p.parse_args())
varscan_call(**args)
|
'''
Created on 2021-04-21
see https://stackoverflow.com/a/66110795/1497139
'''
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import sys
class Watcher:
'''
watch the given path with the given callback
'''
def __init__(self, path,patterns=["*.pdf","*.jpg"],debug=False):
'''
construct me for the given path
Args:
path(str): the directory to observer
patterns(list): a list of wildcard patterns
debug(bool): True if debugging should be switched on
'''
self.observer = Observer()
self.path = path
self.patterns=patterns
self.debug=debug
def run(self,callback,sleepTime=1,limit=sys.maxsize):
'''
run me
Args:
callback(func): the function to trigger when a file appears
sleepTime(float): how often to check for incoming files - default: 1.0 secs
limit(float): the maximum time to run the server default: unlimited
'''
event_handler = Handler(callback,patterns=self.patterns,debug=self.debug)
self.observer.schedule(event_handler, self.path, recursive=True)
self.observer.start()
runTime=0
try:
while runTime<limit:
time.sleep(sleepTime)
runTime+=sleepTime
except Exception as ex:
self.observer.stop()
if self.debug:
print("Error %s " % str(ex))
# don't
# we won't terminate if we do
# self.observer.join()
class Handler(PatternMatchingEventHandler):
'''
handle changes for a given wildcard pattern
'''
def __init__(self,callback,patterns,debug=False):
'''
construct me
Args:
callback: the function to call
patterns: the patterns to trigger on
debug(bool): if True print debug output
'''
self.callback=callback
self.debug=debug
# Set the patterns for PatternMatchingEventHandler
PatternMatchingEventHandler.__init__(
self,
patterns=patterns,
ignore_directories=True,
case_sensitive=False,
)
def on_any_event(self, event):
if self.debug:
print(
"[{}] noticed: [{}] on: [{}] ".format(
time.asctime(), event.event_type, event.src_path
)
)
if "modified" == event.event_type:
self.callback(event.src_path)
|
import matplotlib.pyplot as plt
import numpy as np
from bandit import Bandit
if __name__ == '__main__':
k = 5
avg_reward1 = []
estimated_q = []
actual_q = []
action_values = np.random.uniform(low=-10, high=0, size=(k,))
for epsilon in ([0.01, .1, 1, 10]):
bdt = Bandit(k, epsilon, action_values)
bdt.play(1000)
avg_best_reward = bdt.best_avg_reward
avg_reward1 = bdt.avg_reward
estimated_q = bdt.Q
actual_q = action_values
print("Actual average value-action"f'{actual_q}')
print("Estimated average value-action"f'{estimated_q}')
plt.plot(avg_reward1, label=f"epsilon='{epsilon}'")
plt.xlabel("Steps")
plt.ylabel("Average Reward")
plt.title("5-armed Bandit Testbed")
plt.plot(avg_best_reward, linestyle='-.', label="best reward")
plt.legend()
plt.show()
|
import click
from ocrd.cli.ocrd_tool import ocrd_tool_cli
from ocrd.cli.workspace import workspace_cli
from ocrd.cli.generate_swagger import generate_swagger_cli
from ocrd.cli.process import process_cli
# TODO server CLI disabled
# from ocrd.cli.server import server_cli
from ocrd.cli.bashlib import bashlib_cli
@click.group()
@click.version_option()
def cli():
"""
CLI to OCR-D
"""
cli.add_command(ocrd_tool_cli)
cli.add_command(workspace_cli)
cli.add_command(generate_swagger_cli)
cli.add_command(process_cli)
# TODO server CLI disabled
# cli.add_command(server_cli)
cli.add_command(bashlib_cli)
|
# Copyright 2018 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add module docstring."""
from datetime import datetime, timedelta
from dateutil.tz import tzlocal
import responses
from sampledata import forward_zone
from vinyldns.record import RecordType, AData
from vinyldns.serdes import to_json_string
from vinyldns.batch_change import AddRecordChange, DeleteRecordSetChange, BatchChange, BatchChangeRequest, \
DeleteRecordSet, AddRecord, BatchChangeSummary, ListBatchChangeSummaries, \
ValidationError
def check_validation_errors_are_same(a, b):
assert a.error_type == b.error_type
assert a.message == b.message
def check_single_changes_are_same(a, b):
assert a.zone_id == b.zone_id
assert a.zone_name == b.zone_name
assert a.record_name == b.record_name
assert a.input_name == b.input_name
assert a.type == b.type
assert a.status == b.status
assert a.id == b.id
assert a.system_message == b.system_message
assert a.record_change_id == b.record_change_id
assert a.record_set_id == b.record_set_id
if a.type == 'Add':
assert a.ttl == b.ttl
assert a.record_data == b.record_data
if a.validation_errors:
for l, r in zip(a.validation_errors, b.validation_errors):
check_validation_errors_are_same(l, r)
def check_batch_changes_are_same(a, b):
assert a.user_id == b.user_id
assert a.user_name == b.user_name
assert a.comments == b.comments
assert a.created_timestamp == b.created_timestamp
assert a.status == b.status
assert a.id == b.id
assert a.owner_group_id == b.owner_group_id
assert a.owner_group_name == b.owner_group_name
assert a.approval_status == b.approval_status
assert a.reviewer_id == b.reviewer_id
assert a.reviewer_user_name == b.reviewer_user_name
assert a.review_comment == b.review_comment
assert a.review_timestamp == b.review_timestamp
assert a.scheduled_time == b.scheduled_time
for l, r in zip(a.changes, b.changes):
check_single_changes_are_same(l, r)
def test_create_batch_change(mocked_responses, vinyldns_client):
ar = AddRecord('foo.baa.com', RecordType.A, 100, AData('1.2.3.4'))
drs = DeleteRecordSet('baz.bar.com', RecordType.A)
arc = AddRecordChange(forward_zone.id, forward_zone.name, 'foo', 'foo.bar.com', RecordType.A, 200,
AData('1.2.3.4'), 'Complete', 'id1', [], 'system-message', 'rchangeid1', 'rsid1')
drc = DeleteRecordSetChange(forward_zone.id, forward_zone.name, 'baz', 'baz.bar.com', RecordType.A, 'Complete',
'id2', [], 'system-message', 'rchangeid2', 'rsid2')
# Python 2/3 compatibility
try:
tomorrow = datetime.now().astimezone() + timedelta(1)
except TypeError:
tomorrow = datetime.now(tzlocal()).astimezone(tzlocal()) + timedelta(1)
bc = BatchChange('user-id', 'user-name', datetime.utcnow(), [arc, drc],
'bcid', 'Scheduled', 'PendingReview',
comments='batch change test', owner_group_id='owner-group-id',
scheduled_time=tomorrow)
mocked_responses.add(
responses.POST, 'http://test.com/zones/batchrecordchanges',
body=to_json_string(bc), status=200
)
r = vinyldns_client.create_batch_change(
BatchChangeRequest(
changes=[ar, drs],
comments='batch change test',
owner_group_id='owner-group-id',
scheduled_time=tomorrow
))
check_batch_changes_are_same(r, bc)
def test_get_batch_change(mocked_responses, vinyldns_client):
arc = AddRecordChange(forward_zone.id, forward_zone.name, 'foo', 'foo.bar.com',
RecordType.A, 200, AData('1.2.3.4'), 'Complete', 'id1',
[], 'system-message', 'rchangeid1', 'rsid1')
drc = DeleteRecordSetChange(forward_zone.id, forward_zone.name, 'baz',
'baz.bar.com', RecordType.A, 'Complete',
'id2', [], 'system-message', 'rchangeid2', 'rsid2')
bc = BatchChange('user-id', 'user-name', datetime.utcnow(), [arc, drc],
'bcid', 'Complete', 'AutoApproved',
comments='batch change test', owner_group_id='owner-group-id')
mocked_responses.add(
responses.GET, 'http://test.com/zones/batchrecordchanges/bcid',
body=to_json_string(bc), status=200
)
r = vinyldns_client.get_batch_change('bcid')
check_batch_changes_are_same(r, bc)
def test_approve_batch_change(mocked_responses, vinyldns_client):
arc = AddRecordChange(forward_zone.id, forward_zone.name, 'foo', 'foo.bar.com',
RecordType.A, 200, AData('1.2.3.4'), 'PendingReview',
'id1', [], 'system-message', 'rchangeid1', 'rsid1')
drc = DeleteRecordSetChange(forward_zone.id, forward_zone.name, 'baz',
'baz.bar.com', RecordType.A, 'PendingReview',
'id2', [], 'system-message', 'rchangeid2', 'rsid2')
bc = BatchChange('user-id', 'user-name', datetime.utcnow(), [arc, drc],
'bcid', 'Complete', 'ManuallyApproved',
comments='batch change test', owner_group_id='owner-group-id',
reviewer_id='admin-id', reviewer_user_name='admin',
review_comment='looks good', review_timestamp=datetime.utcnow())
mocked_responses.add(
responses.POST, 'http://test.com/zones/batchrecordchanges/bcid/approve',
body=to_json_string(bc), status=200
)
r = vinyldns_client.approve_batch_change('bcid', 'looks good')
check_batch_changes_are_same(r, bc)
def test_reject_batch_change(mocked_responses, vinyldns_client):
error_message = "Zone Discovery Failed: zone for \"foo.bar.com\" does not exist in VinylDNS. \
If zone exists, then it must be connected to in VinylDNS."
error = ValidationError('ZoneDiscoveryError', error_message)
arc = AddRecordChange(forward_zone.id, forward_zone.name, 'reject',
'reject.bar.com', RecordType.A, 200, AData('1.2.3.4'),
'PendingReview', 'id1', [error], 'system-message', 'rchangeid1', 'rsid1')
drc = DeleteRecordSetChange(forward_zone.id, forward_zone.name, 'reject2',
'reject2.bar.com', RecordType.A, 'Complete',
'id2', [], 'system-message', 'rchangeid2', 'rsid2')
bc = BatchChange('user-id', 'user-name', datetime.utcnow(), [arc, drc],
'bcid', 'Rejected', 'Rejected',
comments='batch change test', owner_group_id='owner-group-id',
reviewer_id='admin-id', reviewer_user_name='admin',
review_comment='not good', review_timestamp=datetime.utcnow())
mocked_responses.add(
responses.POST, 'http://test.com/zones/batchrecordchanges/bcid/reject',
body=to_json_string(bc), status=200
)
r = vinyldns_client.reject_batch_change('bcid', 'not good')
check_batch_changes_are_same(r, bc)
def test_list_batch_change_summaries(mocked_responses, vinyldns_client):
bcs1 = BatchChangeSummary('user-id', 'user-name', datetime.utcnow(), 10, 'id1',
'Complete', 'AutoApproved', comments='comments',
owner_group_id='owner-group-id')
bcs2 = BatchChangeSummary('user-id2', 'user-name2', datetime.utcnow(), 20,
'id2', 'Complete', 'AutoApproved', comments='comments2')
lbcs = ListBatchChangeSummaries([bcs1, bcs2], 'start', 'next', 50)
mocked_responses.add(
responses.GET, 'http://test.com/zones/batchrecordchanges?startFrom=start&maxItems=50',
body=to_json_string(lbcs), status=200
)
r = vinyldns_client.list_batch_change_summaries('start', 50)
assert r.start_from == lbcs.start_from
assert r.next_id == lbcs.next_id
assert r.max_items == lbcs.max_items
assert r.ignore_access == lbcs.ignore_access
assert r.approval_status == lbcs.approval_status
for l, r in zip(r.batch_changes, lbcs.batch_changes):
assert l.user_id == r.user_id
assert l.user_name == r.user_name
assert l.comments == r.comments
assert l.created_timestamp == r.created_timestamp
assert l.total_changes == r.total_changes
assert l.status == r.status
assert l.id == r.id
assert l.owner_group_id == r.owner_group_id
|
"""
Authors: Pratik Bhatu.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import onnx
from onnx import helper
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from onnx import TensorProto
import pytest
# Athos DIR
import sys, os
import optparse
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
from tests.utils import (
ONNXConfig,
Compiler,
assert_almost_equal,
make_onnx_graph,
run_onnx,
Frontend,
)
@pytest.mark.parametrize(
"a_shape",
[
((4, 4, 4, 4)), # Normal[[2, 2], []]
pytest.param(
(2, 2),
marks=pytest.mark.skip(reason="non 4/5D input not handled"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
@pytest.mark.parametrize(
"Op",
[
pytest.param("Neg", marks=pytest.mark.skip(reason="Neg not implemented")),
pytest.param("Floor", marks=pytest.mark.skip(reason="Floor not implemented")),
pytest.param(
"Identity", marks=pytest.mark.skip(reason="Identity not implemented")
),
],
)
def test_uop(test_dir, backend, Op, a_shape, dtype):
a = dtype(np.random.randn(*a_shape))
if Op == "Neg":
out = np.negative(a)
elif Op == "Floor":
out = np.floor(a)
elif Op == "Identity":
out = np.negative(a)
node = helper.make_node(
Op,
inputs=["a"],
outputs=["out"],
)
graph = make_onnx_graph(
node,
inputs=[a],
outputs=[out],
tensors=[],
tensor_names=[],
name=Op + "_test",
)
expected_output = run_onnx(graph, [a])
config = ONNXConfig(backend).parse_io(graph)
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
mpc_output = compiler.compile_and_run([a])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, axes, keepdims",
[
pytest.param(
(3, 2, 2),
None,
1,
# marks=pytest.mark.skip(reason="axes can't be none. keepdims has to be 0"),
id="default_axes_keepdims",
),
pytest.param(
(3, 2, 2),
[1],
0,
marks=pytest.mark.skip(reason="axes length has to be 2"),
id="do_not_keepdims",
),
pytest.param(
(3, 2, 2),
[1],
1,
marks=pytest.mark.skip(reason="keepdims has to be 0"),
id="keepdims",
),
pytest.param(
(3, 2, 2, 4),
[1, 2],
0,
marks=pytest.mark.skip(reason="segfault"),
id="reduce_nc",
),
pytest.param((3, 2, 2, 4), [2, 3], 0, id="reduce_hw"),
pytest.param(
(3, 2, 2),
[-2],
1,
marks=pytest.mark.skip(reason="don't support negative axes"),
id="negative_axes_keepdims",
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_reducemean(test_dir, backend, a_shape, axes, keepdims, dtype):
Op = "ReduceMean"
a = dtype(np.random.randn(*a_shape))
out = np.mean(
a, axis=(None if axes is None else tuple(axes)), keepdims=keepdims == 1
)
kwargs = {"name": Op, "inputs": ["a"], "outputs": ["out"], "keepdims": keepdims}
if axes is not None:
kwargs["axes"] = axes
node = helper.make_node(Op, **kwargs)
graph = make_onnx_graph(
node,
inputs=[a],
outputs=[out],
tensors=[],
tensor_names=[],
name=Op + "_test",
)
expected_output = run_onnx(graph, [a])
config = ONNXConfig(backend).parse_io(graph)
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
mpc_output = compiler.compile_and_run([a])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, start, end",
[
pytest.param(
(4, 4, 4, 4),
None,
None,
marks=pytest.mark.skip(reason="bug in addOutputs"),
),
pytest.param(
(2, 2),
None,
None,
marks=pytest.mark.skip(reason="bug in addOutputs"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_shape(test_dir, backend, a_shape, start, end, dtype):
Op = "Shape"
a = dtype(np.random.randn(*a_shape))
out = np.array(a.shape[start:end]).astype(np.int64)
kwargs = {}
if start is not None:
kwargs["start"] = start
if end is not None:
kwargs["end"] = end
node = onnx.helper.make_node(Op, inputs=["a"], outputs=["out"], **kwargs)
graph = make_onnx_graph(
node,
inputs=[a],
outputs=[out],
tensors=[],
tensor_names=[],
name=Op + "_test",
)
expected_output = run_onnx(graph, [a])
config = ONNXConfig(backend).parse_io(graph)
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
mpc_output = compiler.compile_and_run([a])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, kernel_shape, pads, strides, auto_pad, output_shape",
[
pytest.param(
[1, 3, 32],
[2],
[0, 0],
[1],
"NOTSET",
[1, 3, 31],
id="averagepool_1d_default",
marks=pytest.mark.skip(
reason="bug helper_processPool: list index out of range"
),
),
pytest.param(
[1, 3, 32, 32],
[2, 2],
[0, 0, 0, 0],
[1, 1],
"NOTSET",
[1, 3, 31, 31],
id="averagepool_2d_default",
),
pytest.param(
[1, 3, 28, 28],
[3, 3],
[2, 2, 2, 2],
[1, 1],
"NOTSET",
[1, 3, 30, 30],
id="averagepool_2d_pads1",
marks=pytest.mark.skip(reason="bug correctness issue. 23% mismatch"),
),
pytest.param(
[1, 1, 5, 5],
[5, 5],
[2, 2, 2, 2],
[1, 1],
"NOTSET",
[1, 1, 5, 5],
id="averagepool_2d_pads2",
marks=pytest.mark.skip(reason="bug correctness issue. 80-90% mismatch"),
),
pytest.param(
[1, 1, 5, 5],
[3, 3],
None,
[2, 2],
"SAME_UPPER",
[1, 1, 3, 3],
id="averagepool_2d_same_upper",
marks=pytest.mark.skip(reason="non explicit padding not supported"),
),
pytest.param(
[1, 3, 32, 32],
[2, 2],
None,
[1, 1],
"SAME_LOWER",
[1, 3, 32, 32],
id="averagepool_2d_same_lower",
marks=pytest.mark.skip(reason="non explicit padding not supported"),
),
pytest.param(
[1, 3, 32, 32],
[5, 5],
[0, 0, 0, 0],
[3, 3],
"NOTSET",
[1, 3, 10, 10],
id="averagepool_2d_strides",
),
pytest.param(
[1, 3, 32, 32, 32],
[2, 2, 2],
[0, 0, 0, 0, 0, 0],
[1, 1, 1],
"NOTSET",
[1, 3, 31, 31, 31],
id="averagepool_3d_default",
marks=pytest.mark.skip(reason="averagepool_3d not supported"),
),
],
)
# we dont support ceil_mode, count_include_pad
@pytest.mark.parametrize("dtype", [np.single])
def test_avgpool(
test_dir,
backend,
a_shape,
kernel_shape,
pads,
strides,
auto_pad,
output_shape,
dtype,
):
Op = "AveragePool"
a = np.random.randn(*a_shape).astype(dtype)
# Only need this for its shape
out = np.zeros(output_shape).astype(dtype)
kwargs = {
"inputs": ["a"],
"outputs": ["output"],
"kernel_shape": kernel_shape,
"strides": strides,
}
if auto_pad is "NOTSET":
kwargs["pads"] = pads
else:
kwargs["auto_pad"] = auto_pad
node = onnx.helper.make_node(Op, **kwargs)
graph = make_onnx_graph(
node,
inputs=[a],
outputs=[out],
tensors=[],
tensor_names=[],
name=Op + "_test",
)
expected_output = run_onnx(graph, [a])
config = ONNXConfig(backend).parse_io(graph)
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
mpc_output = compiler.compile_and_run([a])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape, kernel_shape, pads, strides, auto_pad, output_shape",
[
pytest.param(
[1, 3, 32],
[2],
[0, 0],
[1],
"NOTSET",
[1, 3, 31],
id="maxpool_1d_default",
marks=pytest.mark.skip(
reason="bug helper_processPool: list index out of range"
),
),
pytest.param(
[1, 3, 32, 32],
[2, 2],
[0, 0, 0, 0],
[1, 1],
"NOTSET",
[1, 3, 31, 31],
id="maxpool_2d_default",
),
pytest.param(
[1, 3, 28, 28],
[3, 3],
[2, 2, 2, 2],
[1, 1],
"NOTSET",
[1, 3, 30, 30],
id="maxpool_2d_pads1",
marks=pytest.mark.skip(reason="bug correctness issue. 1.8% mismatch"),
),
pytest.param(
[1, 1, 5, 5],
[5, 5],
[2, 2, 2, 2],
[1, 1],
"NOTSET",
[1, 1, 5, 5],
id="maxpool_2d_pads2",
),
pytest.param(
[1, 1, 5, 5],
[3, 3],
None,
[2, 2],
"SAME_UPPER",
[1, 1, 3, 3],
id="maxpool_2d_same_upper",
marks=pytest.mark.skip(reason="non explicit padding not supported"),
),
pytest.param(
[1, 3, 32, 32],
[2, 2],
None,
[1, 1],
"SAME_LOWER",
[1, 3, 32, 32],
id="maxpool_2d_same_lower",
marks=pytest.mark.skip(reason="non explicit padding not supported"),
),
pytest.param(
[1, 3, 32, 32],
[5, 5],
[0, 0, 0, 0],
[3, 3],
"NOTSET",
[1, 3, 10, 10],
id="maxpool_2d_strides",
),
pytest.param(
[1, 3, 32, 32, 32],
[2, 2, 2],
[0, 0, 0, 0, 0, 0],
[1, 1, 1],
"NOTSET",
[1, 3, 31, 31, 31],
id="maxpool_3d_default",
marks=pytest.mark.skip(reason="maxpool_3d not supported"),
),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_maxpool(
test_dir,
backend,
a_shape,
kernel_shape,
pads,
strides,
auto_pad,
output_shape,
dtype,
):
Op = "MaxPool"
a = np.random.randn(*a_shape).astype(dtype)
# Only need this for its shape
out = np.zeros(output_shape).astype(dtype)
kwargs = {
"inputs": ["a"],
"outputs": ["output"],
"kernel_shape": kernel_shape,
"strides": strides,
}
if auto_pad is "NOTSET":
kwargs["pads"] = pads
else:
kwargs["auto_pad"] = auto_pad
node = onnx.helper.make_node(Op, **kwargs)
graph = make_onnx_graph(
node,
inputs=[a],
outputs=[out],
tensors=[],
tensor_names=[],
name=Op + "_test",
)
expected_output = run_onnx(graph, [a])
config = ONNXConfig(backend).parse_io(graph)
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
mpc_output = compiler.compile_and_run([a])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"a_shape",
[
((1, 3, 5, 5)),
],
)
@pytest.mark.parametrize("dtype", [np.single])
def test_global_avgpool(test_dir, backend, a_shape, dtype):
a = dtype(np.random.randn(*a_shape))
out = np.mean(a, axis=tuple(range(2, np.ndim(a))), keepdims=True)
Op = "GlobalAveragePool"
node = helper.make_node(
Op,
inputs=[
"a",
],
outputs=["out"],
)
graph = make_onnx_graph(
node,
inputs=[a],
outputs=[out],
tensors=[],
tensor_names=[],
name=Op + "_test",
)
expected_output = run_onnx(graph, [a])
config = ONNXConfig(backend).parse_io(graph)
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
mpc_output = compiler.compile_and_run([a])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"from_type, to_type",
[
pytest.param("FLOAT", "FLOAT", id="cast_identity"),
pytest.param("FLOAT", "FLOAT16", id="cast_f32_f16"),
pytest.param("FLOAT", "DOUBLE", id="cast_f32_d"),
pytest.param("FLOAT16", "FLOAT", id="cast_f16_f32"),
pytest.param("FLOAT16", "DOUBLE", id="cast_f16_d"),
pytest.param("DOUBLE", "FLOAT", id="cast_d_f32"),
pytest.param("DOUBLE", "FLOAT16", id="cast_d_f16"),
pytest.param("FLOAT", "STRING", id="cast_f32_string"),
pytest.param("STRING", "FLOAT", id="cast_string_f32"),
],
)
@pytest.mark.parametrize(
"compile_time",
[
pytest.param(True),
pytest.param(
False,
marks=pytest.mark.skip(
reason="""we don't support runtime casting.
Only casts of constants at compile time
are supported and no-ops casts (Identity)"""
),
),
],
)
@pytest.mark.skip(reason="[cast] Bug in add_outputs() - KeyError: 'output'")
def test_cast(test_dir, backend, from_type, to_type, compile_time):
Op = "Cast"
shape = (3, 4)
if "STRING" != from_type:
input = np.random.random_sample(shape).astype(
TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, from_type)]
)
if "STRING" == to_type:
# Converting input to str, then give it object dtype for generating script
ss = []
for i in input.flatten():
s = str(i).encode("utf-8")
su = s.decode("utf-8")
ss.append(su)
output = np.array(ss).astype(object).reshape([3, 4])
else:
output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])
else:
input = np.array(
[
"0.47892547",
"0.48033667",
"0.49968487",
"0.81910545",
"0.47031248",
"0.816468",
"0.21087195",
"0.7229038",
"NaN",
"INF",
"+INF",
"-INF",
],
dtype=np.dtype(object),
).reshape([3, 4])
output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])
node = onnx.helper.make_node(
Op,
inputs=["input"],
outputs=["output"],
to=getattr(TensorProto, to_type),
)
if compile_time == True:
graph = make_onnx_graph(
node,
inputs=[],
outputs=[output],
tensors=[input],
tensor_names=["input"],
name=Op + "_test",
)
expected_output = run_onnx(graph, [])
else:
graph = make_onnx_graph(
node,
inputs=[input],
outputs=[output],
tensors=[],
tensor_names=[],
name=Op + "_test",
)
expected_output = run_onnx(graph, [input])
config = ONNXConfig(backend).parse_io(graph)
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
if compile_time == True:
mpc_output = compiler.compile_and_run([])
else:
mpc_output = compiler.compile_and_run([input])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
@pytest.mark.parametrize(
"shape, attribute",
[
pytest.param((3, 4), "value", id="constant_tensor"),
pytest.param((1), "value_float", id="constant_float_scalar"),
pytest.param((20), "value_floats", id="constant_floats"),
pytest.param((1), "value_int", id="constant_int_scalar"),
pytest.param((20), "value_ints", id="constant_ints"),
pytest.param(
(3, 4),
"sparse_value",
marks=pytest.mark.skip(reason="We don't support sparse tensors"),
),
pytest.param(
(1),
"value_string",
marks=pytest.mark.skip(reason="We don't support string tensors"),
),
pytest.param(
(20),
"value_strings",
marks=pytest.mark.skip(reason="We don't support string tensors"),
),
],
)
@pytest.mark.skip(
reason="""[constant] onnxsim gives runtime error.
Issue is it doesn't support opset version 13 of this node.
Need to fix onnxoptimize upstream"""
)
def test_constant(test_dir, backend, shape, attribute):
Op = "Constant"
kwargs = {}
print("Shape = ", shape)
if attribute == "value":
values = np.random.randn(*shape).astype(np.float32)
kwargs[attribute] = onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
)
elif attribute == "value_float":
values = np.random.randn(1).astype(np.float32)
kwargs[attribute] = values[0]
elif attribute == "value_floats":
values = np.random.randn(*shape).astype(np.float32)
kwargs[attribute] = values.flatten().astype(float)
elif attribute == "value_int":
values = np.array(np.random.randint(-(2 ** 32 - 1), 2 ** 32 - 1)).astype(
np.int64
)
kwargs[attribute] = int(values)
elif attribute == "value_ints":
values = np.random.randint(-(2 ** 32 - 1), 2 ** 32 - 1, shape).astype(np.int32)
print(values)
kwargs[attribute] = values.flatten().astype(int)
kwargs["inputs"] = []
kwargs["outputs"] = ["values"]
node = helper.make_node(Op, **kwargs)
graph = make_onnx_graph(
node,
inputs=[],
outputs=[values],
tensors=[],
tensor_names=[],
name=Op + "_test",
)
expected_output = run_onnx(graph, [])
config = ONNXConfig(backend).parse_io(graph)
compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
mpc_output = compiler.compile_and_run([])
assert_almost_equal(
model_output=expected_output, mpc_tensor=mpc_output, precision=2
)
return
|
import json
from http import HTTPStatus
from json.decoder import JSONDecodeError
import jwt
import requests
from flask import request, current_app, jsonify, g
from jwt import InvalidSignatureError, DecodeError, InvalidAudienceError
from requests.exceptions import (
SSLError,
ConnectionError,
InvalidURL,
HTTPError
)
from api.errors import (
AuthorizationError,
InvalidInputError,
PulsediveKeyError,
PulsediveSSLError,
UnexpectedPulsediveError
)
NO_AUTH_HEADER = 'Authorization header is missing'
WRONG_AUTH_TYPE = 'Wrong authorization type'
WRONG_PAYLOAD_STRUCTURE = 'Wrong JWT payload structure'
WRONG_JWT_STRUCTURE = 'Wrong JWT structure'
WRONG_AUDIENCE = 'Wrong configuration-token-audience'
KID_NOT_FOUND = 'kid from JWT header not found in API response'
WRONG_KEY = ('Failed to decode JWT with provided key. '
'Make sure domain in custom_jwks_host '
'corresponds to your SecureX instance region.')
JWKS_HOST_MISSING = ('jwks_host is missing in JWT payload. Make sure '
'custom_jwks_host field is present in module_type')
WRONG_JWKS_HOST = ('Wrong jwks_host in JWT payload. Make sure domain follows '
'the visibility.<region>.cisco.com structure')
def set_ctr_entities_limit(payload):
try:
ctr_entities_limit = int(payload['CTR_ENTITIES_LIMIT'])
assert ctr_entities_limit > 0
except (KeyError, ValueError, AssertionError):
ctr_entities_limit = current_app.config['CTR_DEFAULT_ENTITIES_LIMIT']
current_app.config['CTR_ENTITIES_LIMIT'] = ctr_entities_limit
def get_public_key(jwks_host, token):
expected_errors = (
ConnectionError,
InvalidURL,
JSONDecodeError,
HTTPError,
)
try:
response = requests.get(f"https://{jwks_host}/.well-known/jwks")
response.raise_for_status()
jwks = response.json()
public_keys = {}
for jwk in jwks['keys']:
kid = jwk['kid']
public_keys[kid] = jwt.algorithms.RSAAlgorithm.from_jwk(
json.dumps(jwk)
)
kid = jwt.get_unverified_header(token)['kid']
return public_keys.get(kid)
except expected_errors:
raise AuthorizationError(WRONG_JWKS_HOST)
def get_jwt():
"""
Get Authorization token and validate its signature
against the public key from /.well-known/jwks endpoint.
"""
expected_errors = {
KeyError: WRONG_PAYLOAD_STRUCTURE,
AssertionError: JWKS_HOST_MISSING,
InvalidSignatureError: WRONG_KEY,
DecodeError: WRONG_JWT_STRUCTURE,
InvalidAudienceError: WRONG_AUDIENCE,
TypeError: KID_NOT_FOUND,
}
token = get_auth_token()
try:
jwks_payload = jwt.decode(token, options={'verify_signature': False})
assert 'jwks_host' in jwks_payload
jwks_host = jwks_payload.get('jwks_host')
key = get_public_key(jwks_host, token)
aud = request.url_root
payload = jwt.decode(
token, key=key, algorithms=['RS256'], audience=[aud.rstrip('/')]
)
set_ctr_entities_limit(payload)
return payload['key']
except tuple(expected_errors) as error:
message = expected_errors[error.__class__]
raise AuthorizationError(message)
def get_auth_token():
"""
Parse the incoming request's Authorization header and validate it.
"""
expected_errors = {
KeyError: NO_AUTH_HEADER,
AssertionError: WRONG_AUTH_TYPE
}
try:
scheme, token = request.headers['Authorization'].split()
assert scheme.lower() == 'bearer'
return token
except tuple(expected_errors) as error:
raise AuthorizationError(expected_errors[error.__class__])
def get_json(schema):
"""
Parse the incoming request's data as JSON.
Validate it against the specified schema.
Note. This function is just an example of how one can read and check
anything before passing to an API endpoint, and thus it may be modified in
any way, replaced by another function, or even removed from the module.
"""
data = request.get_json(force=True, silent=True, cache=False)
message = schema.validate(data)
if message:
raise InvalidInputError(message)
return data
def perform_request(params):
headers = {
'User-Agent': current_app.config['USER_AGENT']
}
url = current_app.config['API_URL']
response = requests.get(url, params=params, headers=headers)
if response.status_code == HTTPStatus.OK:
return response.json()
elif response.status_code in current_app.config['NOT_CRITICAL_ERRORS']:
return {}
raise UnexpectedPulsediveError(response)
def jsonify_data(data):
return jsonify({'data': data})
def jsonify_errors(error):
return jsonify({'errors': [error]})
def format_docs(docs):
return {'count': len(docs), 'docs': docs}
def jsonify_result():
result = {'data': {}}
if g.get('sightings'):
result['data']['sightings'] = format_docs(g.sightings)
if g.get('indicators'):
result['data']['indicators'] = format_docs(g.indicators)
if g.get('judgements'):
result['data']['judgements'] = format_docs(g.judgements)
if g.get('verdicts'):
result['data']['verdicts'] = format_docs(g.verdicts)
if g.get('relationships'):
result['data']['relationships'] = format_docs(g.relationships)
if g.get('errors'):
result['errors'] = g.errors
if not result['data']:
del result['data']
return jsonify(result)
def key_error_handler(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
except KeyError:
raise PulsediveKeyError
return result
return wrapper
def ssl_error_handler(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except SSLError as error:
raise PulsediveSSLError(error)
return wrapper
|
class PPU:
pass
|
# import cv2
import deepul.pytorch_util as ptu
import numpy as np
import scipy.ndimage
import torch.nn as nn
import torch.utils.data
import torchvision
from PIL import Image as PILImage
from torchvision import transforms as transforms
from .hw4_utils.hw4_models import GoogLeNet
from .utils import *
CLASSES = ("plane", "car", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck")
import math
import sys
import numpy as np
softmax = None
model = None
device = torch.device("cuda:0")
def plot_gan_training(losses, title, fname):
plt.figure()
n_itr = len(losses)
xs = np.arange(n_itr)
plt.plot(xs, losses, label="loss")
plt.legend()
plt.title(title)
plt.xlabel("Training Iteration")
plt.ylabel("Loss")
savefig(fname)
def q1_gan_plot(data, samples, xs, ys, title, fname):
plt.figure()
plt.hist(samples, bins=50, density=True, alpha=0.7, label="fake")
plt.hist(data, bins=50, density=True, alpha=0.7, label="real")
plt.plot(xs, ys, label="discrim")
plt.legend()
plt.title(title)
savefig(fname)
######################
##### Question 1 #####
######################
def q1_data(n=20000):
assert n % 2 == 0
gaussian1 = np.random.normal(loc=-1, scale=0.25, size=(n // 2,))
gaussian2 = np.random.normal(loc=0.5, scale=0.5, size=(n // 2,))
data = (np.concatenate([gaussian1, gaussian2]) + 1).reshape([-1, 1])
scaled_data = (data - np.min(data)) / (np.max(data) - np.min(data) + 1e-8)
return 2 * scaled_data - 1
def visualize_q1_dataset():
data = q1_data()
plt.hist(data, bins=50, alpha=0.7, label="train data")
plt.legend()
plt.show()
def q1_save_results(part, fn):
data = q1_data()
losses, samples1, xs1, ys1, samples_end, xs_end, ys_end = fn(data)
# loss plot
plot_gan_training(losses, "Q1{} Losses".format(part), "results/q1{}_losses.png".format(part))
# samples
q1_gan_plot(data, samples1, xs1, ys1, "Q1{} Epoch 1".format(part), "results/q1{}_epoch1.png".format(part))
q1_gan_plot(data, samples_end, xs_end, ys_end, "Q1{} Final".format(part), "results/q1{}_final.png".format(part))
######################
##### Question 2 #####
######################
def calculate_is(samples):
assert type(samples[0]) == np.ndarray
assert len(samples[0].shape) == 3
model = GoogLeNet().to(ptu.device)
model.load_state_dict(torch.load("deepul/deepul/hw4_utils/classifier.pt"))
softmax = nn.Sequential(model, nn.Softmax(dim=1))
bs = 100
softmax.eval()
with torch.no_grad():
preds = []
n_batches = int(math.ceil(float(len(samples)) / float(bs)))
for i in range(n_batches):
sys.stdout.write(".")
sys.stdout.flush()
inp = ptu.FloatTensor(samples[(i * bs) : min((i + 1) * bs, len(samples))])
pred = ptu.get_numpy(softmax(inp))
preds.append(pred)
preds = np.concatenate(preds, 0)
kl = preds * (np.log(preds) - np.log(np.expand_dims(np.mean(preds, 0), 0)))
kl = np.mean(np.sum(kl, 1))
return np.exp(kl)
def load_q2_data():
train_data = torchvision.datasets.CIFAR10(
"./data", transform=torchvision.transforms.ToTensor(), download=True, train=True
)
return train_data
def visualize_q2_data():
train_data = load_q2_data()
imgs = train_data.data[:100]
show_samples(imgs, title=f"CIFAR-10 Samples")
def q2_save_results(fn):
train_data = load_q2_data()
train_data = train_data.data.transpose((0, 3, 1, 2)) / 255.0
train_losses, samples = fn(train_data)
print("Inception score:", calculate_is(samples.transpose([0, 3, 1, 2])))
plot_gan_training(train_losses, "Q2 Losses", "results/q2_losses.png")
show_samples(samples[:100] * 255.0, fname="results/q2_samples.png", title=f"CIFAR-10 generated samples")
######################
##### Question 3 #####
######################
def load_q3_data():
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
train_data = torchvision.datasets.MNIST(root="./data", train=True, download=True, transform=transform)
test_data = torchvision.datasets.MNIST(root="./data", train=False, download=True, transform=transform)
return train_data, test_data
def visualize_q3_data():
train_data, _ = load_q3_data()
imgs = train_data.data[:100]
show_samples(imgs.reshape([100, 28, 28, 1]) * 255.0, title=f"MNIST samples")
def plot_q3_supervised(pretrained_losses, random_losses, title, fname):
plt.figure()
xs = np.arange(len(pretrained_losses))
plt.plot(xs, pretrained_losses, label="bigan")
xs = np.arange(len(random_losses))
plt.plot(xs, random_losses, label="random init")
plt.legend()
plt.title(title)
savefig(fname)
def q3_save_results(fn):
train_data, test_data = load_q3_data()
gan_losses, samples, reconstructions, pretrained_losses, random_losses = fn(train_data, test_data)
plot_gan_training(gan_losses, "Q3 Losses", "results/q3_gan_losses.png")
plot_q3_supervised(
pretrained_losses, random_losses, "Linear classification losses", "results/q3_supervised_losses.png"
)
show_samples(samples * 255.0, fname="results/q3_samples.png", title="BiGAN generated samples")
show_samples(
reconstructions * 255.0, nrow=20, fname="results/q3_reconstructions.png", title=f"BiGAN reconstructions"
)
print("BiGAN final linear classification loss:", pretrained_losses[-1])
print("Random encoder linear classification loss:", random_losses[-1])
|
"""
Copyright (c) 2021, FireEye, Inc.
Copyright (c) 2021 Giorgio Severi
"""
import os
import shap
import joblib
import tensorflow as tf
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Dense, BatchNormalization, Activation, Input, Dropout
from sklearn.preprocessing import StandardScaler, KBinsDiscretizer
class EmberNN(object):
def __init__(self, n_features, aug_noise=False, aug_flip=False, epochs=10, batch_size=512):
self.n_features = n_features
self.normal = StandardScaler()
self.model = self.build_model()
self.exp = None
self.aug_noise = aug_noise
self.aug_flip = aug_flip
self.batch_size = batch_size
self.epochs = epochs
lr = 0.1
momentum = 0.9
decay = 0.000001
opt = SGD(lr=lr, momentum=momentum, decay=decay)
self.model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
def fit(self, X, y):
# if not self.discretize:
X = self.normal.fit_transform(X)
y_ori = y.copy()
x_ori = X.copy()
aug_size = 10 # X10 data size
if self.aug_noise and self.aug_flip:
aug_size = 5
if self.aug_noise:
for _ in range(aug_size):
X = np.vstack([X, x_ori + np.random.normal(0, 0.1, x_ori.shape)])
y = np.append(y, y_ori)
if self.aug_flip:
for _ in range(aug_size):
x_trans = x_ori.copy()
idx = np.arange(self.n_features)
for i in range(len(x_trans)):
np.random.shuffle(idx)
for j in range(int(0.01 * self.n_features)): # flip 1% of features
x_trans[i][idx[j]] = x_ori[np.random.randint(0, len(x_trans))][idx[j]]
X = np.vstack([X, x_trans])
y = np.append(y, y_ori)
# elif self.aug_flip:
# X = self.normal.fit_transform(X)
# # flip_mask = np.random.random(X.shape) < 0.1 # flip 1% of features
# # rand_indices = np.random.randint(0, len(X), X.shape)
# # X = (1 - flip_mask) * X + flip_mask * X[rand_indices, np.arange(self.n_features)]
# else:
# X = self.discretizer.fit_transform(X)
self.model.fit(X, y, batch_size=self.batch_size, epochs=self.epochs)
def predict(self, X):
# if not self.discretize or self.aug_flip:
X = self.normal.transform(X)
# else:
# X = self.discretizer.transform(X)
return self.model.predict(X, batch_size=self.batch_size)
def build_model(self):
input1 = Input(shape=(self.n_features,))
dense1 = Dense(2000, activation='relu')(input1)
norm1 = BatchNormalization()(dense1)
drop1 = Dropout(0.5)(norm1)
dense2 = Dense(1000, activation='relu')(drop1)
norm2 = BatchNormalization()(dense2)
drop2 = Dropout(0.5)(norm2)
dense3 = Dense(100, activation='relu')(drop2)
norm3 = BatchNormalization()(dense3)
drop3 = Dropout(0.5)(norm3)
dense4 = Dense(1)(drop3)
out = Activation('sigmoid')(dense4)
model = Model(inputs=[input1], outputs=[out])
return model
def explain(self, X_back, X_exp, n_samples=100):
if self.exp is None:
self.exp = shap.GradientExplainer(self.model, self.normal.transform(X_back))
return self.exp.shap_values(self.normal.transform(X_exp), nsamples=n_samples)
def save(self, save_path, file_name='ember_nn'):
# Save the trained scaler so that it can be reused at test time
joblib.dump(self.normal, os.path.join(save_path, file_name + '_scaler.pkl'))
save_model = self.model
save_model.save(os.path.join(save_path, file_name + '.h5'))
def load(self, save_path, file_name):
# Load the trained scaler
self.normal = joblib.load(os.path.join(save_path, file_name + '_scaler.pkl'))
self.model = load_model(os.path.join(save_path, file_name + '.h5'))
|
import os
import unittest
import shutil
from jtalks.Tomcat import Tomcat, TomcatNotFoundException
class TomcatTest(unittest.TestCase):
def setUp(self):
os.mkdir('test_tomcat')
def tearDown(self):
shutil.rmtree('test_tomcat')
def test_move_war_to_webapps_should_actually_moves_it(self):
# given
tmpfile = file('test_tomcat/tomcat-test-project.tmp', 'w')
os.mkdir('test_tomcat/webapps')
# when
dst_filename = Tomcat('test_tomcat').move_to_webapps(tmpfile.name, 'tomcat-test-project')
# then
self.assertEqual('test_tomcat/webapps/tomcat-test-project.war', dst_filename)
self.assertTrue(os.path.exists(dst_filename))
def test_move_war_deletes_prev_app_dir(self):
# given
tmpdir = os.path.join('test_tomcat', 'webapps', 'tomcat-test-project', 'tmpdir')
os.makedirs(tmpdir)
tmpfile = file('test_tomcat/tomcat-test-project', 'w')
# when
Tomcat('test_tomcat').move_to_webapps(tmpfile.name, 'tomcat-test-project')
# then:
self.assertFalse(os.path.exists(tmpdir), 'Tomcat did not remove previous app folder from webapps')
def test_move_war_to_webapps_raises_if_tomcat_location_is_wrong(self):
tomcat = Tomcat('./')
self.assertRaises(TomcatNotFoundException, tomcat.move_to_webapps, 'src_filepath', 'appname')
def test_start(self):
os.makedirs('test_tomcat/bin')
file('test_tomcat/bin/startup.sh', 'w').write('echo test > test_tomcat/test; sleep 5 &')
Tomcat('test_tomcat').start()
self.assertEqual('test\n', file('test_tomcat/test').readline())
|
import time
import json
import asyncio
import websockets
from config import host, port
from config import nginx_log_file_path
from tail_f import tail_f
from handle import handle
# async def handle_server(websocket, path):
@asyncio.coroutine
def handle_server(websocket, path):
request_count = 0
logfile = open(nginx_log_file_path)
lines = tail_f(logfile)
log_dicts = handle(lines)
start = time.time()
for log_dict in log_dicts:
# await websocket.send(str(log_dict))
request_count += 1
during_time = time.time() - start
log_dict.update({
'request_per_second': round(
(request_count / during_time), 2
)
})
yield from websocket.send(str(json.dumps(log_dict)))
start_server = websockets.serve(handle_server, host, int(port))
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
|
from django.contrib import admin
from .models import Spectacle, Movie, Play, Show
admin.site.register(Spectacle)
admin.site.register(Movie)
admin.site.register(Play)
admin.site.register(Show)
|
from flask import Flask, request
from flask_restful import Api, Resource
import sqlite3
app = Flask(__name__)
api = Api(app)
class Paperopoli(Resource):
def get(self):
return {'ciao': 'mondo'}
api.add_resource(Paperopoli, '/personaggi')
if __name__ == '__main__':
app.run(debug=True)
|
#!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import os, uuid
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from threading import Thread
from Utilities.file_logger import FileLogger
from Utilities.misc import Printer
#========================================================================
class Analyzer(Printer):
ANALYZED_EXPERIMENTS = []
def __init__(self, settings, verbose = True):
Printer.__init__(self, 'ANALYZER', color = 'grey')
self.settings = settings
self.verbose = verbose
def _analyze(self, request, observations):
# print(observations)
# get all the losses
losses = {}
for element in observations:
elem_losses = element['loss']
for key, value in elem_losses.items():
if not key in losses:
losses[key] = []
losses[key].append(value)
plt.clf()
for key, loss_list in losses.items():
domain = np.arange(len(loss_list)) + 1
loss_list = np.array(loss_list)
# plt.plot(domain, loss_list, ls = '', marker = 'o', color = 'w', markersize = 10)
# plt.plot(domain, loss_list, ls = '', marker = 'o', color = 'k', alpha = 0.8, markersize = 10)
plt.plot(domain, loss_list, ls = '', marker = 'o', alpha = 0.5, markersize = 10, label = key)
plt.legend()
plt.xlabel('# experiments')
file_name = '%s/%s.png' % (self.settings['scratch_dir'], str(uuid.uuid4()))
plt.savefig(file_name, bbox_to_inches = 'tight')
exp_dict = {'request_details': request, 'observations': observations, 'progress_file': file_name, 'status': 'new'}
self.ANALYZED_EXPERIMENTS.append(exp_dict)
def analyze(self, request, observations):
analysis_thread = Thread(target = self._analyze, args = (request, observations))
analysis_thread.start()
def get_analyzed_experiments(self):
analyzed_experiments = []
for exp_dict in self.ANALYZED_EXPERIMENTS:
if exp_dict['status'] == 'new':
analyzed_experiments.append(exp_dict)
exp_dict['status'] = 'fetched'
return analyzed_experiments
|
import os
from urllib.parse import urlparse
from ruamel import yaml
def get_db_instance_dict(db_url, tags):
url_components = urlparse(db_url)
host = url_components.hostname
port = url_components.port
username = url_components.username
password = url_components.password
database_instance = """\
- server: {host}
user: {username}
pass: '{password}'
port: {port}
tags: {tags}
options:
replication: true
galera_cluster: true
extra_status_metrics: true
extra_innodb_metrics: true
extra_performance_metrics: true
schema_size_metrics: false
disable_innodb_metrics: false
""".format(host=host, username=username, password=password, port=port, tags=tags)
return database_instance
def get_database_url_with_index(index):
return os.environ.get('DB_URL_{}'.format(index), None)
def get_database_tags_with_index(index):
tag_environment_key = 'DB_TAGS_{}'.format(index)
return os.environ[tag_environment_key].split(',')
def get_database_instances():
instances = []
database_number = 1
while get_database_url_with_index(database_number):
db = get_database_url_with_index(database_number)
tags = get_database_tags_with_index(database_number)
instances += [get_db_instance_dict(db, tags)]
database_number += 1
return instances
if __name__ == "__main__":
instances = "\n".join(get_database_instances())
yaml_dict = """\
init_config:
instances:
{instances}
""".format(instances=instances)
with open('/etc/datadog-agent/conf.d/mysql.d/conf.yaml', 'w') as outfile:
data = yaml.round_trip_load(yaml_dict, preserve_quotes=True)
yaml.round_trip_dump(data, outfile)
|
import config
import os,time
from slack import WebClient
def log(func):
def wrapper(*args, **kw):
start = time.time()
run_func= func(*args, **kw)
end = time.time()
print('%s executed in %s ms' % (func.__name__, (end - start) * 1000))
return run_func
return wrapper
class Channel:
def __init__(self,channelID,name=None):
self.name =None
self.ChannelID=channelID
self.Members=[]
self.Private =False
class CPrePareInfo:
def __init__(self,slack_web_client,QQBot):
self.slack_web_client = slack_web_client
self.QQBot = QQBot
self.global_userList={}
self.global_channels_List={} #{'channelID':{'name':'truename','member':[],'private':'True'}}
self.global_QQ_UserID={}
self.global_QQID_displayName={}
self.__needAlert_userList={}#{'needAlertChannelID':{'member':['wang','x','b']},'needAlertChannelID':{'member':['wang','x','b']}}
def getQQID(self,name,display_name):
dict={'name':name,'display_name':display_name}
try:
dict['QQ'] = self.global_QQ_UserID[name]
self.global_QQID_displayName[dict['QQ']] = display_name
except:
pass
return dict
@log
def getUserList(self):
response = self.slack_web_client.users_list()
#print(response)
if response['ok'] :
for a in response['members']:
#print(a['id'],a['name'])
self.global_userList[a['id']] = self.getQQID(a['name'],a['profile']['display_name'])
@property
def needAlert_userList(self):
return self.__needAlert_userList
def getchannelID_byName(self,name):
for a in self.global_channels_List:
if self.global_channels_List[a].name == name:
return a
return ''
@log
def getchannelName(self,channelID):
a= self.slack_web_client.conversations_info(channel=channelID)
if a['ok']==False:
return
if channelID not in self.global_channels_List:
self.global_channels_List[a['channel']['id']] =Channel(channelID)
self.global_channels_List[a['channel']['id']].name = a['channel']['name']
self.global_channels_List[channelID].Private = a['channel']['is_private']
@log
def getchannelMembers(self,channelID):
a = self.slack_web_client.conversations_members(channel=channelID)
if a['ok'] ==False:
return
#print(a)
needlist=[]
for userId in a['members']:
self.global_channels_List[channelID].Members.append(userId)
if 'QQ' in self.global_userList[userId]:
needlist.append(self.global_userList[userId]['QQ'])
if len(needlist)>0:
self.__needAlert_userList[channelID]=needlist
@log
def getchannels_info(self,channelID):
self.getchannelName(channelID)
self.getchannelMembers(channelID)
@log
def getMemberName(self,userID):
if userID in prepareInfo.global_userList:
return self.global_userList[userID]['name']
responseInfo = slack_web_client.users_info(user='U024UBGRR62')
if responseInfo.get('ok',False):
d = responseInfo.data['user']
print(d['real_name'])
self.global_userList[userID] = self.getQQID(d['real_name'],d['name'])
return d['real_name']
return 'NoName'
@log
def getChannels_list(self):
responsePrivate=self.slack_web_client.conversations_list(types='public_channel,private_channel')
#print(responsePrivate)
for a in responsePrivate['channels']:
self.global_channels_List[a['id']] =Channel(a['id'], a['name'])
self.getchannels_info(a['id'])
@log
def get_group_member_list(self):
a = self.QQBot.get_group_member_list(group_id=config.group_id)
print("get_group_member_list ",a)
for b in a:
self.global_QQ_UserID[b['card']] =b['user_id']
def findName_byQQId(self,QQId):
if QQId in self.global_QQID_displayName:
return self.global_QQID_displayName[QQId]
return 'None'
@log
def autoloadaaa(self):
try:
if len(self.global_userList)==0:
self.getUserList()
if len(self.global_channels_List)==0:
self.getChannels_list()
except Exception as inst:
print(inst)
pass
if __name__ == "__main__":
slack_web_client = WebClient(token=os.environ['SLACK_BOT_TOKEN'])
prepareInfo =CPrePareInfo(slack_web_client,None)
#prepareInfo.autoloadaaa()
try:
prepareInfo.getchannels_info('CUV4HHNSH')
except Exception as inst:
print(inst)
|
"""
Data structure for implementing experience replay
Author: Patrick Emami, Modified by: Sri Ramana
"""
from collections import deque
import random
import numpy as np
from copy import deepcopy
class ReplayBuffer(object):
def __init__(self, buffer_size, min_hlen = 0, random_seed=123):
"""
The right side of the deque contains the most recent experiences
"""
self.buffer_size = buffer_size
self.min_hlen = min_hlen
self.count = 0
self.ncount = 0
self.buffer = deque()
self.oa_batch = {}
self.oa2_batch = {}
self.seq = []
self.a_batch = {}
self.r_batch = {}
random.seed(random_seed)
def add(self, traj):
if self.count < self.buffer_size:
self.AppendData(traj)
# self.buffer.append(traj)
# self.count += 1
else:
self.AppendData(traj)
self.RemoveFirst(self.ncount)
# self.buffer.popleft()
# self.buffer.append(traj)
'''
def makeHistory(self, traj):
hist = {}
for id, seq in traj:
hist[id] = []
oa[id] = []
r[id] = []
for j, val in enumerate(seq):
o, a ,r = val
oa = np.concatenate([o,a], axis =1)
'''
def RemoveFirst(self, count):
if not self.oa_batch:
return
else:
print "Removing Shit", count
assert(len(self.oa2_batch[0]) >= self.buffer_size)
for key in self.oa_batch:
del self.oa_batch[key][:count]
del self.oa2_batch[key][:count]
del self.a_batch[key][:count]
del self.r_batch[key][:count]
del self.seq[:count]
self.count -= count
def AppendData(self, traj):
#oa_batch, oa2_batch, seq = self.MergeData([_[0] for _ in traj], True)
#a_batch, _, _ = self.MergeData([_[1] for _ in traj])
#r_batch, _, _ = self.MergeData([_[2] for _ in traj])
oa_batch, a_batch, r_batch = traj
oa2_batch = {}
length = 0
cnt = 0
oa_copy = {}
for key in oa_batch:
data = oa_batch[key]
oa2_batch[cnt] = deepcopy(data)[self.min_hlen + 1:]
oa_copy[cnt] = deepcopy(data)[self.min_hlen:-1]
length = len(data) - 1
cnt += 1
seq = (np.arange(self.min_hlen, length) + 1).tolist()
'''
if not self.oa_batch:
self.oa_batch = oa_batch
self.oa2_batch = oa2_batch
self.a_batch = a_batch
self.r_batch = r_batch
self.seq = seq
else:
'''
cnt = 0
for key in oa_batch:
if cnt not in self.oa_batch:
self.oa_batch[cnt] = []
self.oa2_batch[cnt] = []
self.a_batch[cnt] = []
self.r_batch[cnt] = []
self.oa_batch[cnt] += oa_copy[cnt]
self.oa2_batch[cnt] += oa2_batch[cnt]
self.a_batch[cnt] += a_batch[key][self.min_hlen:-1]
self.r_batch[cnt] += r_batch[key][self.min_hlen:-1]
cnt += 1
self.seq += seq
self.count += len(seq)
self.ncount = len(seq)
def MergeData(self, batch, next = False):
new_batch = {}
next_batch = {}
seq = []
length = None
for b in batch:
cnt = 0
for key in b:
length = len(b[key])
if cnt not in new_batch:
new_batch[cnt] = deepcopy(b[key])[:-1]
#new_batch[cnt].pop(-1)
if next:
next_batch[cnt] = deepcopy(b[key])[1:]
#next_batch[cnt].pop(-1)
else:
#data1 = deepcopy(b[key])
#data2 = deepcopy(b[key])
#data1.pop(-1)
new_batch[cnt] += deepcopy(b[key])[:-1]
if next:
#data2.pop(0)
next_batch[cnt] += deepcopy(b[key])[1:]
cnt += 1
seq = np.arange(length).tolist()*len(batch)
return new_batch, next_batch, seq
def size(self):
return self.count
def sample_batch(self, batch_size):
batch = []
if self.count < batch_size:
#batch = random.sample(self.buffer, self.count)
index = np.random.choice(self.count, self.count)
else:
#batch = random.sample(self.buffer, batch_size)
index = np.random.choice(self.count, batch_size)
index = index.tolist()
oa_batch = {}
a_batch = {}
r_batch = {}
oa2_batch = {}
for key in self.oa_batch:
oa_batch[key] = [self.oa_batch[key][i] for i in index]
a_batch[key] = [self.a_batch[key][i] for i in index]
r_batch[key] = [self.r_batch[key][i] for i in index]
oa2_batch[key] = [self.oa2_batch[key][i] for i in index]
print len(self.seq), self.count
seq = [self.seq[i] for i in index]
return oa_batch, a_batch, r_batch, oa2_batch, seq
'''
oa_batch, oa2_batch, seq = self.MergeData([_[0] for _ in batch], True)
a_batch, _, _ = self.MergeData([_[1] for _ in batch])
r_batch, _, _ = self.MergeData([_[2] for _ in batch])
return oa_batch, a_batch, r_batch, oa2_batch, seq
'''
'''
s_batch = np.array([_[0] for _ in batch])
a_batch = np.array([_[1] for _ in batch])
r_batch = np.array([_[2] for _ in batch])
t_batch = np.array([_[3] for _ in batch])
s2_batch = np.array([_[4] for _ in batch])
return s_batch, a_batch, r_batch, t_batch, s2_batch
'''
#return batch
def clear(self):
self.deque.clear()
self.count = 0
|
from __future__ import absolute_import
from lltk.corpus.corpus import Corpus,load_corpus
from lltk.text.text import Text
import os
"""
class LitHist(Corpus):
TEXT_CLASS=Text
PATH_TXT = 'lithist/_txt_lithist'
PATH_XML = 'lithist/_xml_lithist'
PATH_METADATA = 'lithist/corpus-metadata.LitHist.txt'
def __init__(self):
super(LitHist,self).__init__('LitHist',path_txt=self.PATH_TXT,path_xml=self.PATH_XML,path_metadata=self.PATH_METADATA)
self.path = os.path.dirname(__file__)
"""
from lltk.corpus.corpus import CorpusMeta,name2corpus
import os
def lithist_load_corpus(name,medium={},genre={}):
if name=='Chadwyck':
c=load_corpus(name)
c._texts = [t for t in c.texts() if t.year>1500 and t.year<1900]
elif name=='ChadwyckPoetry':
c=load_corpus(name)
#c._texts = [t for t in c.texts() if t.meta['posthumous']=='False' and t.year>1500 and t.year<2000]
elif name=='ChadwyckDrama':
c=load_corpus(name)
#c._texts = [t for t in c.texts() if t.meta['posthumous']=='False' and t.year>1500 and t.year<2000]
elif name=='ECCO_TCP_in_Sections':
c=load_corpus('ECCO_TCP').sections
c._texts = [t for t in c.texts() if t.year>=1700 and t.year<1800]
elif name=='ECCO_TCP':
c=load_corpus('ECCO_TCP')
c._texts = [t for t in c.texts() if t.year>=1700 and t.year<1800]
elif name=='EEBO_TCP_in_Sections':
c=load_corpus('EEBO_TCP').sections
c._texts = [t for t in c.texts() if t.year>=1500 and t.year<1700]
elif name=='EEBO_TCP':
c=load_corpus('EEBO_TCP')
c._texts = [t for t in c.texts() if t.year>=1500 and t.year<1700]
else:
c=load_corpus(name)
if medium:
c._texts=[t for t in c.texts() if t.medium in medium]
if genre:
c._texts=[t for t in c.texts() if t.genre in genre]
return c
class LitHist(CorpusMeta):
CORPORA=[
'Chadwyck','ChadwyckPoetry','ChadwyckDrama',
'ECCO_TCP','EEBO_TCP', #,'ECCO_TCP_in_Sections','EEBO_TCP_in_Sections' (too many files)
'Sellers', # 'TedJDH' (replicated in Sellers + ECCO_TCP)
'DialNarr', #LitLab (too noisy),
'MarkMark','Chicago','GildedAge',
'COHA','COCA','CLMET','OldBailey','EnglishDialogues',
'Spectator']
def __init__(self, name_meta='LitHist',corpora=None):
if not corpora:
corpora=[lithist_load_corpus(c) for c in self.CORPORA]
corpora=[x for x in corpora if x is not None]
#print(corpora)
super(LitHist,self).__init__(name=name_meta,corpora=corpora)
self.path=os.path.join(self.path,'lithist')
class LitHistProse(LitHist):
#CORPORA = [x for x in LitHist.CORPORA if not x in {'ChadwyckPoetry','ChadwyckDrama'}]
CORPORA=[
'Chadwyck',
#'ECCO_TCP','EEBO_TCP', #,
'ECCO_TCP_in_Sections','EEBO_TCP_in_Sections', # (too many files)
'Sellers', # 'TedJDH' (replicated in Sellers + ECCO_TCP)
'MarkMark','Chicago',
'COHA','Spectator']
def __init__(self,name='LitHistProse',corpora=None):
#super(LitHist,self).__init__(name=name,corpora=None)
if not corpora:
corpora=[lithist_load_corpus(c,medium='Prose') for c in self.CORPORA]
corpora=[x for x in corpora if x is not None] # and len(x.texts())>0]
#print(corpora)
super(LitHist,self).__init__(name=name,corpora=corpora)
# filter for prose
#print(len(self.texts()))
self._texts = [t for t in self.texts() if t.medium=='Prose']
#print(len(self.texts()))
self.path=os.path.join(self.path,'lithist')
class LitHistAuthors(CorpusMeta):
CORPORA={'Chadwyck','ChadwyckPoetry','ChadwyckDrama','COHA','ECCO_TCP','EEBO_TCP'} #,'Sellers','CLMET','Spectator','Chicago','MarkMark'}
YEAR_AUTHOR_30_MIN=1500
def __init__(self, name='LitHistAuthors',corpora=None):
if not corpora: corpora=[]
for name in self.CORPORA:
c=load_corpus(name)
if c is None: continue
c._texts = [t for t in c.texts() if t.year_author_is_30>self.YEAR_AUTHOR_30_MIN]
corpora.append(c)
super(LitHistAuthors,self).__init__(name=name,corpora=corpora)
self.path=os.path.join(self.path,'lithist')
class LitHistHathi(CorpusMeta):
CORPORA=LitHist.CORPORA + ['HathiEngLit','HathiBio']
def __init__(self, name_meta='LitHistHathi',corpora=None):
if not corpora:
corpora=[lithist_load_corpus(c) for c in self.CORPORA]
corpora=[x for x in corpora if x is not None]
#print(corpora)
super().__init__(name=name_meta,corpora=corpora)
|
#
# Copyright (c) 2019. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
#
import pytest
from lets_plot.plot.core import FeatureSpecArray, DummySpec
from lets_plot.plot.scale_convenience import *
@pytest.mark.parametrize('scale_spec, expected', [
(xlim(), {'dummy-feature': True}),
(ylim(), {'dummy-feature': True}),
])
def test_scale_spec_dummy(scale_spec, expected):
_check(expected, scale_spec.as_dict())
@pytest.mark.parametrize('scale_spec, expected', [
(xlim(-1, 1), {
'aesthetic': 'x',
'limits': [-1, 1],
}),
(xlim(None, 1), {
'aesthetic': 'x',
'limits': [None, 1],
}),
(xlim(-1, None), {
'aesthetic': 'x',
'limits': [-1, None],
}),
(xlim(-1), {
'aesthetic': 'x',
'limits': [-1],
}),
])
def test_scale_spec_continuous_x(scale_spec, expected):
# print('---------')
# print(expected)
# print(scale_spec.as_dict())
_check(expected, scale_spec.as_dict())
@pytest.mark.parametrize('scale_spec, expected', [
(ylim(-1, 1), {
'aesthetic': 'y',
'limits': [-1, 1],
}),
(ylim(None, 1), {
'aesthetic': 'y',
'limits': [None, 1],
}),
(ylim(-1, None), {
'aesthetic': 'y',
'limits': [-1, None],
}),
(ylim(-1), {
'aesthetic': 'y',
'limits': [-1],
}),
])
def test_scale_spec_continuous_y(scale_spec, expected):
# print('---------')
# print(expected)
# print(scale_spec.as_dict())
_check(expected, scale_spec.as_dict())
@pytest.mark.parametrize('scale_spec, expected', [
(xlim('a', 'b'), {
'aesthetic': 'x',
'limits': ['a', 'b'],
'discrete': True
}),
(xlim(1, 'b'), {
'aesthetic': 'x',
'limits': [1, 'b'],
'discrete': True
}),
])
def test_scale_spec_discrete_x(scale_spec, expected):
# print('---------')
# print(expected)
# print(scale_spec.as_dict())
_check(expected, scale_spec.as_dict())
@pytest.mark.parametrize('scale_spec, expected', [
(ylim('a', 'b'), {
'aesthetic': 'y',
'limits': ['a', 'b'],
'discrete': True
}),
(ylim(1, 'b'), {
'aesthetic': 'y',
'limits': [1, 'b'],
'discrete': True
}),
])
def test_scale_spec_discrete_y(scale_spec, expected):
# print('---------')
# print(expected)
# print(scale_spec.as_dict())
_check(expected, scale_spec.as_dict())
@pytest.mark.parametrize('spec, expected_x, expected_y', [
(lims([-1, 1], [-2, 2]), {'limits': [-1, 1]}, {'limits': [-2, 2]}),
(lims((-1, 1), (-2, 2)), {'limits': [-1, 1]}, {'limits': [-2, 2]}),
(lims(None, [-2, 2]), None, {'limits': [-2, 2]}),
(lims([-1, 1], None), {'limits': [-1, 1]}, None),
(lims(None, None), None, None),
(lims(['a', 'b', 'c'], ['d', 'e', 'f']), {'limits': ['a', 'b', 'c'], 'discrete': True}, {'limits': ['d', 'e', 'f'], 'discrete': True}),
])
def test_scale_spec_lims_all(spec, expected_x, expected_y):
# print('---------')
# print(expected_x)
# print(expected_y)
# print(spec.as_dict())
if expected_x is None and expected_y is None:
assert isinstance(spec, DummySpec)
elif expected_x is None:
_check({'aesthetic': 'y', **expected_y}, spec.as_dict())
elif expected_y is None:
_check({'aesthetic': 'x', **expected_x}, spec.as_dict())
else:
assert isinstance(spec, FeatureSpecArray)
assert len(spec.elements()) == 2
scale_x = spec.elements()[0]
_check({'aesthetic': 'x', **expected_x}, scale_x.as_dict())
scale_y = spec.elements()[1]
_check({'aesthetic': 'y', **expected_y}, scale_y.as_dict())
def _check(expected, actual):
for key in set(expected) | set(actual):
if key in expected:
assert actual[key] == expected[key]
else:
assert actual[key] is None
|
"""Console script for {{cookiecutter.pkg_name}}."""
{% if cookiecutter.command_line_interface|lower == 'y' -%}
import typer
main = typer.Typer()
@main.command()
def run() -> None:
"""Main entrypoint."""
typer.secho("{{ cookiecutter.project_slug }}", fg=typer.colors.BRIGHT_WHITE)
typer.secho("=" * len("{{ cookiecutter.project_slug }}"), fg=typer.colors.BRIGHT_WHITE)
typer.secho(
"{{ cookiecutter.project_short_description }}",
fg=typer.colors.BRIGHT_WHITE,
)
if __name__ == "__main__":
main() # pragma: no cover
{%- endif %}
|
#!/usr/bin/env python3
import argparse
import datetime
import glob
import inspect
import os
import subprocess
import yaml
from collections import defaultdict
class SafeDict(dict):
def __missing__(self, key):
return ''
def get_script_dir():
return os.path.dirname(inspect.getabsfile(get_script_dir))
SCRIPTDIR = get_script_dir()
parser = argparse.ArgumentParser(description='Convert a batch config to a collection of run scripts')
parser.add_argument('config', type=str, help='config file' )
parser.add_argument('outdir', type=str, help='directory of output')
parser.add_argument('-v', '--verbose', action='store_true', default=False)
args = parser.parse_args()
SCRIPT_PREAMBLE = '''#!/bin/sh
# don't allow unset variables
set -o nounset
# be verbose as we execute
set -x
TIMESTAMP=`date +'%Y%m%d_%H%M%S'`
# make sure python log files are in order
export PYTHONUNBUFFERED=true
'''
SCRIPT_PARAMS = '''
SCRIPTDIR={scriptdir}
SCRATCHDIR={scratchdir}
BENCH_TARGETS={bench_targets}
BENCH_CORE={bench_core}
ENVIRONMENT={environment}
EXEC_SPEC={exec_spec}
CODESPEED_URL={codespeed_url}
CODESPEED_DB={ocamlspeed_dir}/data/data.db
ARCHIVE_DIR={ocamlspeed_dir}/artifacts/
GITHUB_USER={github_user}
GITHUB_REPO={github_repo}
BRANCH={branch}
FIRST_COMMIT={first_commit}
MAX_HASHES={max_hashes}
OCAML_VERSION={ocaml_version}
RUN_PATH_TAG={run_path_tag}
CONFIGURE_OPTIONS="{configure_options}"
OCAMLRUNPARAM="{ocamlrunparam}"
CODESPEED_NAME={codespeed_name}
'''
SCRIPT_BODY = '''
RUNDIR=${SCRATCHDIR}/${RUN_PATH_TAG}
RUN_STAGES=setup,bench,archive,upload
# needed to get the path to include a dune binary
# NB: a full eval $(opam config env) breaks the sandmark build in a strange way...
eval $(opam config env | grep ^PATH=)
mkdir -p ${ARCHIVE_DIR}
## STAGES:
## - get local copy of git repo
## - setup target codespeed db to see project
## - run backfill script to do it
cd $SCRIPTDIR
## get local copy of git repo
REPO=${GITHUB_USER}__${GITHUB_REPO}
if [ ! -d ${REPO} ]; then
git clone https://github.com/${GITHUB_USER}/${GITHUB_REPO}.git ${REPO}
fi
## setup target codespeed db to see project
sqlite3 ${CODESPEED_DB} "INSERT INTO codespeed_project (name,repo_type,repo_path,repo_user,repo_pass,commit_browsing_url,track,default_branch) SELECT '${CODESPEED_NAME}', 'G', 'https://github.com/${GITHUB_USER}/${GITHUB_REPO}', '${GITHUB_USER}', '', 'https://github.com/${GITHUB_USER}/${GITHUB_REPO}/commit/{commitid}',1,'${BRANCH}' WHERE NOT EXISTS(SELECT 1 FROM codespeed_project WHERE name = '${CODESPEED_NAME}')"
## run backfill script
./run_sandmark_backfill.py --run_stages ${RUN_STAGES} --branch ${BRANCH} --main_branch ${BRANCH} --repo ${REPO} --repo_pull --repo_reset_hard --use_repo_reference --max_hashes ${MAX_HASHES} --incremental_hashes --commit_choice_method from_hash=${FIRST_COMMIT} --executable_spec=${EXEC_SPEC} --environment ${ENVIRONMENT} --sandmark_comp_fmt https://github.com/${GITHUB_USER}/${GITHUB_REPO}/archive/{tag}.tar.gz --sandmark_tag_override ${OCAML_VERSION} --sandmark_iter 1 --sandmark_pre_exec="'taskset --cpu-list "${BENCH_CORE}" setarch `uname -m` --addr-no-randomize'" --sandmark_run_bench_targets ${BENCH_TARGETS} --archive_dir ${ARCHIVE_DIR} --codespeed_url ${CODESPEED_URL} --configure_options="${CONFIGURE_OPTIONS}" --ocamlrunparam="${OCAMLRUNPARAM}" --upload_project_name ${CODESPEED_NAME} -v ${RUNDIR}
'''
def shell_exec(cmd, verbose=args.verbose, check=False, stdout=None, stderr=None):
if verbose:
print('+ %s'%cmd)
return subprocess.run(cmd, shell=True, check=check, stdout=stdout, stderr=stderr)
outdir = os.path.abspath(args.outdir)
if args.verbose: print('making directory: %s'%outdir)
shell_exec('mkdir -p %s'%outdir)
global_conf = {
'scriptdir': SCRIPTDIR,
'bench_targets': 'run_orun'
}
# read in yaml config
with open(args.config, 'r') as stream:
try:
conf = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print('YAMLError: %s'%exc)
sys.exit(1)
# output the script
for run_conf in conf['tracked_branches']:
fname = os.path.join(outdir, '%s.sh'%run_conf['codespeed_name'])
with open(fname, 'w') as outfile:
outfile.write(SCRIPT_PREAMBLE)
conf_str = SCRIPT_PARAMS.format_map(SafeDict(**{**global_conf, **conf, **run_conf}))
outfile.write(conf_str)
outfile.write(SCRIPT_BODY)
shell_exec('chmod +x %s'%fname)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.