repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Sw4T/Warband-Development
|
mb_warband_module_system_1166/Module_system 1.166/module_parties.py
|
Python
|
mit
| 34,291
| 0.065207
|
from compiler import *
####################################################################################################################
# Each party record contains the following fields:
# 1) Party id: used for referencing parties in other files.
|
# The prefix p_ is automatically added before e
|
ach party id.
# 2) Party name.
# 3) Party flags. See header_parties.py for a list of available flags
# 4) Menu. ID of the menu to use when this party is met. The value 0 uses the default party encounter system.
# 5) Party-template. ID of the party template this party belongs to. Use pt.none as the default value.
# 6) Faction.
# 7) Personality. See header_parties.py for an explanation of personality flags.
# 8) Ai-behavior
# 9) Ai-target party
# 10) Initial coordinates.
# 11) List of stacks. Each stack record is a triple that contains the following fields:
# 11.1) Troop-id.
# 11.2) Number of troops in this stack.
# 11.3) Member flags. Use pmf_is_prisoner to note that this member is a prisoner.
# 12) Party direction in degrees [optional]
####################################################################################################################
no_menu = 0
#pf_town = pf_is_static|pf_always_visible|pf_hide_defenders|pf_show_faction
pf_town = pf_is_static|pf_always_visible|pf_show_faction|pf_label_large
pf_castle = pf_is_static|pf_always_visible|pf_show_faction|pf_label_medium
pf_village = pf_is_static|pf_always_visible|pf_hide_defenders|pf_label_small
#sample_party = [(trp.swadian_knight,1,0), (trp.swadian_peasant,10,0), (trp.swadian_crossbowman,1,0), (trp.swadian_man_at_arms, 1, 0), (trp.swadian_footman, 1, 0), (trp.swadian_militia,1,0)]
# NEW TOWNS:
# NORMANDY: Rouen, Caen, Bayeux, Coutances, Evreux, Avranches
# Brittany: Rennes, Nantes,
# Maine: Le Mans
# Anjou: Angers
parties = [
("main_party","Main Party",icon.player|pf_limit_members, no_menu, pt.none,fac.player_faction,0,ai_bhvr_hold,0,(17, 52.5),[(trp.player,1,0)]),
#WARP Arrays
("agents_array","{!}agents_array",pf_disabled, no_menu, pt.none, fac.commoners,0,ai_bhvr_hold,0,(0,0),[]),
("temp_party","{!}temp_party",pf_disabled, no_menu, pt.none, fac.commoners,0,ai_bhvr_hold,0,(0,0),[]),
("warp_temp","{!}warp_temp",pf_disabled, no_menu, pt.none, fac.commoners,0,ai_bhvr_hold,0,(0,0),[]),
("camp_bandits","{!}camp_bandits",pf_disabled, no_menu, pt.none, fac.outlaws,0,ai_bhvr_hold,0,(1,1),[(trp.temp_troop,3,0)]),
#parties before this point are hardwired. Their order should not be changed.
("temp_party_2","{!}temp_party_2",pf_disabled, no_menu, pt.none, fac.commoners,0,ai_bhvr_hold,0,(0,0),[]),
#Used for calculating casulties.
("temp_casualties","{!}casualties",pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("temp_casualties_2","{!}casualties",pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("temp_casualties_3","{!}casualties",pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("temp_wounded","{!}enemies_wounded",pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("temp_killed", "{!}enemies_killed", pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("main_party_backup","{!}_", pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("encountered_party_backup","{!}_", pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
# ("ally_party_backup","_", pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("collective_friends_backup","{!}_", pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("player_casualties","{!}_", pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("enemy_casualties","{!}_", pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("ally_casualties","{!}_", pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("collective_enemy","{!}collective_enemy",pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
#TODO: remove this and move all to collective ally
("collective_ally","{!}collective_ally",pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("collective_friends","{!}collective_ally",pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
("total_enemy_casualties","{!}_", pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]), #ganimet hesaplari icin #new:
("routed_enemies","{!}routed_enemies",pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]), #new:
# ("village_reinforcements","village_reinforcements",pf_is_static|pf_disabled, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(1,1),[]),
###############################################################
("zendar","Zendar",pf_disabled|icon.town|pf_is_static|pf_always_visible|pf_hide_defenders, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(18,60),[]),
("town_1","Sargoth", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-17.6, 79.7),[], 170),
("town_2","Tihr", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-53.5, 78.4),[], 120),
("town_3","Veluca", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-57.4, -44.5),[], 80),
("town_4","Suno", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-70, 15.4),[], 290),
("town_5","Jelkala", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-74.6, -79.7),[], 90),
("town_6","Praven", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-96, 26.4),[], 155),
("town_7","Uxkhal", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-50, -8.5),[], 240),
("town_8","Reyvadin", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(48.44, 39.3),[], 175),
("town_9","Khudan", icon.town_snow|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(94, 65.2),[], 90),
("town_10","Tulga", icon.town_steppe|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(135.5, -22),[], 310),
("town_11","Curaw", icon.town_snow|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(43, 67.5),[], 150),
("town_12","Wercheg", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-1.2, 108.9),[], 25),
("town_13","Rivacheg",icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(64.8, 113.7),[], 60),
("town_14","Halmar", icon.town_steppe|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(55.5, -45),[], 135),
("town_15","Yalen", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-132.8, -47.3),[], 45),
("town_16","Dhirim", icon.town|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(14, -2),[], 0),
("town_17","Ichamur", icon.town_steppe|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(121.8, 8.6),[], 90),
("town_18","Narra", icon.town_steppe|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(88, -26.5),[], 135),
("town_19","Shariz", icon.town_desert|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(15, -107),[], 45),
("town_20","Durquba", icon.town_desert|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(90, -95.1),[], 270),
("town_21","Ahmerrad", icon.town_desert|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(130.5, -78.5),[], 330),
("town_22","Bariyye", icon.town_desert|pf_town, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(165, -106.7),[], 225),
# Aztaq_Castle
# Malabadi_Castle
("castle_1","Culmarr_Castle",icon.castle_a|pf_castle, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-101.3, -21),[],50),
("castle_2","Malayurg_Castle",icon.castle_b|pf_castle, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(97.5, -2.2),[],75),
("castle_3","Bulugha_Castle",icon.castle_a|pf_castle, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(47.5, 111.3),[],100),
("castle_4","Radoghir_Castle",icon.castle_c|pf_castle, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(32.5, 47.8),[],180),
("castle_5","Tehlrog_Castle",icon.castle_c|pf_castle, no_menu, pt.none, fac.neutral,0,ai_bhvr_hold,0,(-4.8, 63.7),[],90),
("
|
daanwierstra/pybrain
|
pybrain/rl/environments/cartpole/balancetask.py
|
Python
|
bsd-3-clause
| 4,061
| 0.008126
|
__author__ = 'Thomas Rueckstiess and Tom Schaul'
from pybrain.rl.environments.cartpole.nonmarkovpole import NonMarkovPoleEnvironment
from pybrain.rl.tasks import EpisodicTask
from cartpole import CartPoleEnvironment
from scipy import pi, dot, array
class BalanceTask(EpisodicTask):
""" The task of balancing some pole(s) on a cart """
def __init__(self, env = None, maxsteps = 1000):
"""
@param env: (optional) an instance of a CartPoleEnvironment (or a subclass thereof)
@param maxsteps: maximal number of steps (default: 1000)
"""
if env == None:
env = CartPoleEnvironment()
EpisodicTask.__init__(self, env)
self.N = maxsteps
self.t = 0
# scale position and angle, don't scale velocities (unknown maximum)
self.sensor_limits = [(-3, 3)]#, None, (-pi, pi), None]
for i in range(1,self.outdim):
if isinstance(self.env, NonMarkovPoleEnvironment) and i%2 == 0:
self.sensor_limits.append(None)
else:
self.sensor_limits.append((-pi, pi))
self.sensor_limits = [None]*4
# actor between -10 and 10 Newton
self.actor_limits = [(-10, 10)]
def reset(self):
EpisodicTask.reset(self)
self.t = 0
def performAction(self, action):
self.t += 1
EpisodicTask.performAction(self, action)
def isFinished(self):
if max(map(abs, self.env.getPoleAngles())) > 0.7:
# pole has fallen
return True
elif abs(self.env.getCartPosition()) > 2.4:
# cart is out of it's border conditions
return True
elif self.t >= self.N:
# maximal timesteps
return True
return False
def getReward(self):
angles = map(abs, self.env.getPoleAngles())
s = abs(self.env.getCartPosition())
reward = 0
if min(angles) < 0.05 and abs(s) < 0.05:
reward = 0
elif max(angles) > 0.7 or abs(s) > 2.4:
reward = -2 * (self.N - self.t)
else:
reward = -1
return reward
def setMaxLength(self, n):
self.N = n
class JustBalanceTask(BalanceTask):
""" this task does not require the cart to be moved to the middle. """
def getReward(self):
angles = map(abs, self.env.getPoleAngles())
s = abs(self.env.getCartPosition())
reward = 0
i
|
f min(angles) < 0.05:
reward = 0
elif max(angles) > 0.7 or abs(s) > 2.4:
reward = -2 * (self.N - self.t)
else:
reward = -1
return reward
class EasyBalanceTask(BalanceTask):
""" th
|
is task is a bit easier to learn because it gives gradual feedback
about the distance to the centre. """
def getReward(self):
angles = map(abs, self.env.getPoleAngles())
s = abs(self.env.getCartPosition())
reward = 0
if min(angles) < 0.05 and abs(s) < 0.05:
reward = 0
elif max(angles) > 0.7 or abs(s) > 2.4:
reward = -2 * (self.N - self.t)
else:
reward = -abs(s)/2
return reward
class LinearizedBalanceTask(BalanceTask):
""" Here we follow the setup in
Peters J, Vijayakumar S, Schaal S (2003) Reinforcement learning for humanoid robotics.
TODO: This stuff is not yet compatible to any other cartpole environment. """
Q = array([12., 0.25, 1.25, 1.0])
def getReward(self):
return dot(self.env.sensors**2, self.Q) + self.env.action[0]**2*0.01
def isFinished(self):
if abs(self.env.getPoleAngles()[0]) > 0.5235988: # pi/6
# pole has fallen
return True
elif abs(self.env.getCartPosition()) > 1.5:
# cart is out of it's border conditions
return True
elif self.t >= self.N:
# maximal timesteps
return True
return False
|
kustomzone/augur-core
|
pyrpctools/__init__.py
|
Python
|
gpl-3.0
| 1,125
| 0.008
|
import os
import sys
import math
import time
import json
from rpc_client import RPC_Client
ROOT = os.path.dirname(os.path.realpath(sys.argv[0]))
DBPATH = os.path.join(ROOT, 'build.json')
MAXGAS = hex(int(math.pi*1e6))
def get_db():
with open(DBPATH) as dbfile:
return json.load(dbfile)
def save_db(db):
with open(DBPATH, 'w') as dbfile:
json.dump(db, dbfile, sort_keys=True, indent=4)
def confirmed_send(
to=None, sender=None, gas=MAXGAS,
data=None, value=None, blocktime=12,
rpc=None):
if rpc is None:
rpc = RPC_Client()
response = rpc.eth_sendTransaction({'to
|
':to,
'from':sender,
'gas':gas,
'data':data,
'value':value})
assert 'error' not in response, json.dumps(response, indent=4, sort_keys=True)
txhash = response['result']
while True:
receipt = rpc.eth_getTransactionReceipt(txhash)
i
|
f receipt['result']:
return receipt
time.sleep(blocktime)
|
philbull/ghost
|
halomodel.py
|
Python
|
mit
| 6,878
| 0.00916
|
#!/usr/bin/python
"""
Halo mass function and halo bias model.
"""
import numpy as np
import scipy.integrate
import pylab as P
#om = 0.3
#h = 0.7
#gamma = 0.55
class HaloModel(object):
def __init__(self, pkfile, om=0.272, h=0.728, gamma=0.55, ampfac=1.):
"""
Initialise HaloModel class.
"""
# Cosmo params
self.om = om
self.h = h
self.gamma=gamma
# Define critical density and Sheth-Tormen params (see
# Sheth & Tormen 1999)
self.delta_c = 1.68647
self.a = 0.707
self.A = 0.322
self.p = 0.3
# Define Tinker et al. parameters (see their Table 2)
self.A0 = 0.186
self.a0 = 1.47
self.b0 = 2.57
self.c = 1.19
# Load matter power spectrum, P(k) (assumed to be in non-h^-1 units)
self.k, self.pk = np.genfromtxt(pkfile).T[:2]
self.pk *= ampfac**2.
# Convert to non-h^-1 units
#self.k *= 1. / self.h
#self.pk *= self.h**3.
def fgrowth(self, z):
"""
Generalised form for the growth rate.
"""
Ez2 = self.om * (1. + z)**3. + (1. - self.om)
Oma = self.om * (1. + z)**3. / Ez2
return Oma**self.gamma
def growth_fn(self, z):
"""
Calculate growth function, D(z), at a given redshift. Normalised to
D=1 at z=0.
"""
_z = np.linspace(0., z, 200)
a = 1. / (1. + _z)
_f = self.fgrowth(_z)
_D = np.concatenate( ([0.,], scipy.integrate.cumtrapz(_f, np.log(a))) )
_D = np.exp(_D)
return _D[-1]
def M_for_R(self, R, z=0.):
"""
Mass contained within a sphere of radius R in the background.
"""
rho = self.h*self.h*self.om * (1. + z)**3. * 2.776e11 # in Msun/Mpc^3
return 4./3. * np.pi * R**3. * rho
def R_for_M(self, M, z=0.):
"""
Comoving radius as a function of mass (mass in M_sun).
"""
rho = self.h*self.h*self.om * (1. + z)**3. * 2.776e11 # in Msun/Mpc^3
return (3.*M / (4. * np.pi * rho))**(1./3.)
def sigma_R(self, R, z=0.):
"""
Calculate the linear rms fluctuation, sigma(R), as a function of tophat
smoothing scale, R.
"""
# FIXME: Doesn't deal with redshift properly
k = self.k
pk = self.pk
D = self.growth_fn(z)
W = 3. * (np.sin(k*R) - k*R*np.cos(k*R)) / (k*R)**3.
# Integrate over window function
sig_r = scipy.integrate.simps(pk*(D*k*W)**2., k)
sig_r /= (2. * np
|
.pi**2.)
return np.sqrt(sig_r)
def dlogsigM_dlogM(self, M, sig):
"""
Logarithmic derivative o
|
f sigma(M) with respect to M, i.e.
d log(sigma(M)) / d log(M)
"""
coeffs = np.polyfit(np.log(M), np.log(sig), deg=4)
p = np.poly1d(coeffs)
return p.deriv()(np.log(M))
def bias(self, M, z=0.):
"""
Calculate the halo bias, b(M, z), using Eq. 12 of Sheth & Tormen 1999.
"""
delta_c = self.delta_c
A = self.A
a = self.a
p = self.p
# Calculate sigma(R) for this mass scale
R = self.R_for_M(M)
sigR = np.array([self.sigma_R(_R, z) for _R in R])
# Calculate Eulerian bias
delta1 = delta_c # FIXME: Not strictly correct, should be fn. of Omega_m
v1 = (delta1 / sigR)**2.
b = 1. + (a*v1 - 1.) / delta1 + (2.*p/delta1) / (1. + (a*v1)**p)
return b
def dndlogm(self, M, z=0., type='tinker'):
"""
Halo mass function in log mass.
"""
if type == 'tinker':
return M * self.n_tinker(M, z)
else:
return M * self.n_sheth_tormen(M, z)
def n_sheth_tormen(self, M, z=0.):
"""
Halo mass function, dn/dm, as a function of mass and redshift, from
Sheth & Tormen (1999).
"""
delta_c = self.delta_c
A = self.A
a = self.a
p = self.p
rho = self.h*self.h*self.om * (1. + z)**3. * 2.776e11 # in Msun/Mpc^3
# Integrate to find sigma(R)
R = self.R_for_M(M)
sigR = np.array([self.sigma_R(_R, z) for _R in R])
# Get logarithmic derivative
dlogsig = self.dlogsigM_dlogM(M, sigR)
# Evaluate mass function shape
v = (self.delta_c / sigR)**2.
vfv = A * (1. + 1./(a*v)**p) * np.sqrt(a*v/(2.*np.pi)) * np.exp(-a*v/2.)
# Evaluate halo mass function
nm = -2. * rho/M**2. * vfv * dlogsig
return nm
def n_tinker(self, M, z=0.):
"""
Halo mass function, dn/dm, as a function of mass and redshift. Taken
from Eqs. 1, 2, 5-8, and Table 2 of Tinker et al. 2008 [arXiv:0803.2706].
"""
# Redshift scaling of parameters, from Eqs. 5-8 of Tinker et al.
Delta = 200. # Define standard overdensity
alpha = 10.**( -(0.75 / np.log10(Delta / 75.))**1.2 )
A = self.A0 * (1. + z)**(-0.14)
a = self.a0 * (1. + z)**(-0.06)
b = self.b0 * (1. + z)**alpha
c = self.c
rho = self.h*self.h*self.om * (1. + z)**3. * 2.776e11 # in Msun/Mpc^3
# Integrate to find sigma(R)
R = self.R_for_M(M)
sigR = np.array([self.sigma_R(_R, z) for _R in R])
# Get logarithmic derivative
dlogsig = self.dlogsigM_dlogM(M, sigR)
# Evaluate shape function
fsig = A * ((sigR/b)**(-a) + 1.) * np.exp(-c / sigR**2.)
# Return halo mass function
return -fsig * rho / M**2. * dlogsig
def MHI(self, M, z=0.):
"""
HI mass as a function of halo mass.
(Eq. 3.2 of arXiv:1405.6713; Bagla model)
"""
f3 = 0.014405 # Should be obtained by matching Omega_HI = obs ~ 10^-3.
vmin = 30. # km/s
vmax = 200. # km/s
Mmin = 1e10 * (vmin/60.)**3. * ((1.+z)/4.)**-1.5
Mmax = 1e10 * (vmax/60.)**3. * ((1.+z)/4.)**-1.5
# Calculate M_HI(M)
M_HI = f3 * M / (1. + (M / Mmax))
M_HI[np.where(M < Mmin)] = 0.
return M_HI
def cumulative_hi_mass(self, M, z):
"""
Cumulative fraction of total HI mass as a function of M_halo.
"""
Ez2 = self.om * (1. + z)**3. + (1. - self.om)
rho_c = self.h*self.h * Ez2 * 2.776e11 # in Msun/Mpc^3
M_HI = self.MHI(M, z)
nm = self.n(M, z)
# Vague n(M) axion modification
#nm[np.where(M < 1e10)] *= 0.7
omega_hi = scipy.integrate.simps(nm*M_HI, M) / rho_c
cumul_mhi = scipy.integrate.cumtrapz(nm*M_HI, M, initial=0.)
return cumul_mhi / cumul_mhi[-1]
|
Imperium-Software/resolver
|
tests/test_factory.py
|
Python
|
mit
| 511
| 0.009785
|
import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
print(myPath)
sys.
|
path.insert(0, myPath + '/../SATSolver')
from unittest import TestCase
from SATSolver.individual import Factory
class TestFactory(TestCase):
"""
Test class for Factory
|
"""
def test_create(self):
factory = Factory()
population = factory.create(10,50)
self.assertEqual(50, len(population))
for individual in population:
self.assertEqual(individual.length, 10)
|
uppsaladatavetare/foobar-api
|
src/wallet/tests/factories.py
|
Python
|
mit
| 926
| 0
|
import uuid
import factory.fuzzy
from .. import models, enums
from moneyed import Money
from utils.factories import FuzzyMoney
class WalletFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Wallet
owner_id = factory.Sequence(lambda n: str(uuid.uuid4()))
balance = Money(0, 'SEK')
class WalletTrxFactory(factory.django.DjangoModelFactory):
class Meta:
model
|
= models.WalletTransaction
wallet = factory.SubFactory(WalletFactory)
amount = FuzzyMoney(0, 100000)
class WalletTrxStatusFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.Wallet
|
TransactionStatus
trx = factory.SubFactory(WalletTrxFactory)
status = enums.TrxStatus.FINALIZED
class WalletTrxWithStatusFactory(WalletTrxFactory):
states = factory.RelatedFactory(
WalletTrxStatusFactory,
'trx',
status=enums.TrxStatus.FINALIZED
)
|
xylophonw/cwspy
|
cwspy/data.py
|
Python
|
mit
| 1,465
| 0.005461
|
from datetime import datetime
from collections import namedtuple
BASE_URL = 'http://conworkshop.com/'
class User(namedtuple('User', 'uid name gender bio country karma')):
@property
def link(self):
'''Return a URL in a string to
|
the user's profile page on CWS.'''
return ''.join([BASE_URL, 'view_profile.php?m=', self.uid])
@property
def avatar(self):
'''Return a URL in a string to the user's avatar image on CWS.'''
return ''.join([BASE_URL, 'ava/', self.uid, '.png'])
defaultUser = User('', '', 'Other', '', 'Unknown', [0,0])
Type = namedtuple('Type', 'code desc')
defaultType = Type('', '')
Status = namedtuple('Status', 'code desc')
defaultStatus = Stat
|
us('', '')
class Language(namedtuple('Language', ['code', 'name', 'native_name', 'ipa',
'lang_type', 'owners', 'overview', 'public',
'status', 'registered', 'word_count', 'karma'])):
@property
def link(self):
'''Return a URL in a string to the language's page on CWS.'''
return ''.join([BASE_URL, 'view_language.php?l=', self.code])
@property
def flag(self):
'''Return a URL in a string to the language's flag image on CWS.'''
return ''.join([BASE_URL, 'img/flags/', self.code, '.png'])
defaultLanguage = Language('', '', '', '', defaultType, [], '', True,
defaultStatus, datetime.now(), 0, [0,0])
|
les69/calvin-base
|
calvin/runtime/south/plugins/storage/twistedimpl/securedht/dht_server.py
|
Python
|
apache-2.0
| 9,399
| 0.002873
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback
import time
import Queue
import os
import OpenSSL.crypto
from twisted.internet import reactor, defer, threads
from calvin.runtime.south.plugins.storage.twistedimpl.securedht.append_server import AppendServer
from calvin.runtime.south.plugins.storage.twistedimpl.securedht.service_discovery_ssdp import SSDPServiceDiscovery
from calvin.utilities import certificate
from calvin.runtime.north.plugins.storage.storage_base import StorageBase
from calvin.utilities import calvinlogger
from calvin.utilities import calvinconfig
_conf = calvinconfig.get()
_log = calvinlogger.get_logger(__name__)
def logger(message):
_log.debug(message)
#print message
class ServerApp(object):
def __init__(self, server_type, identifier):
self.kserver = None
self.port = None
self.server_type = server_type
self.id = identifier
def start(self, port=0, iface='', bootstrap=None):
if bootstrap is None:
bootstrap = []
self.kserver = self.server_type(id=self.id)
self.kserver.b
|
ootstrap(bootstrap)
self.port = reactor.listenUDP(port,
self.kserver.protocol,
interface=iface)
return self.port.getHost().host, self.port.getHost().port
def __getattr__(self, name):
if hasattr(self.kserver, name) and callable(getattr(self.kserver, name)):
|
return getattr(self.kserver, name)
else:
# Default behaviour
raise AttributeError
def get_port(self):
return self.port
def stop(self):
if self.port:
return self.port.stopListening()
class ThreadWrapper(object):
def __init__(self, obj, *args, **kwargs):
self._obj = threads.blockingCallFromThread(reactor, obj, *args, **kwargs)
def _call(self, func, *args, **kwargs):
return threads.blockingCallFromThread(reactor, func, *args, **kwargs)
def __getattr__(self, name):
class Caller(object):
def __init__(self, f, func):
self.f = f
self.func = func
def __call__(self, *args, **kwargs):
# _log.debug("Calling %s(%s, %s, %s)" %(self.f, self.func, args, kwargs))
return self.func(*args, **kwargs)
if hasattr(self._obj, name):
if callable(getattr(self._obj, name)):
return Caller(self._call, getattr(self._obj, name))
else:
return getattr(self._obj, name)
else:
# Default behaviour
raise AttributeError
class TwistedWaitObject(object):
def __init__(self, func, **kwargs):
self._value = None
self._q = Queue.Queue()
self._done = False
self._func = func
self._kwargs = kwargs
self._callback_class = kwargs.pop("cb")
d = func(**kwargs)
d.addCallback(self._callback)
def _callback(self, value):
self._value = value
if self._callback_class:
self._callback_class(self._kwargs['key'], value)
# reactor.callFromThread(self._callback_class, self._kwargs['key'], value)
self._q.put(self._value)
self._done = True
def done(self):
return self._done
def wait(self, timeout=5):
if self.done():
return self._value
try:
value = self._q.get(timeout=timeout)
except Queue.Empty:
logger("Timeout in %s(%s)" % (self._func, self._kwargs))
raise
return value
def get(self):
return self._value
class AutoDHTServer(StorageBase):
def __init__(self):
super(AutoDHTServer, self).__init__()
self.dht_server = None
self._ssdps = None
self._started = False
self.cert_conf = certificate.Config(_conf.get("security", "certificate_conf"),
_conf.get("security", "certificate_domain")).configuration
def start(self, iface='', network=None, bootstrap=None, cb=None, name=None):
if bootstrap is None:
bootstrap = []
name_dir = os.path.join(self.cert_conf["CA_default"]["runtimes_dir"], name)
filename = os.listdir(os.path.join(name_dir, "mine"))
st_cert = open(os.path.join(name_dir, "mine", filename[0]), 'rt').read()
cert_part = st_cert.split(certificate.BEGIN_LINE)
certstr = "{}{}".format(certificate.BEGIN_LINE, cert_part[1])
try:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
certstr)
except:
raise
# Derive DHT node id
key = cert.digest("sha256")
newkey = key.replace(":", "")
bytekey = newkey.decode("hex")
if network is None:
network = _conf.get_in_order("dht_network_filter", "ALL")
self.dht_server = ServerApp(AppendServer, bytekey[-20:])
ip, port = self.dht_server.start(iface=iface)
dlist = []
dlist.append(self.dht_server.bootstrap(bootstrap))
self._ssdps = SSDPServiceDiscovery(iface, cert=certstr)
dlist += self._ssdps.start()
logger("Register service %s %s:%s" % (network, ip, port))
self._ssdps.register_service(network, ip, port)
logger("Set client filter %s" % (network))
self._ssdps.set_client_filter(network)
start_cb = defer.Deferred()
def bootstrap_proxy(addrs):
def started(args):
logger("DHT Started %s" % (args))
if not self._started:
reactor.callLater(.2, start_cb.callback, True)
if cb:
reactor.callLater(.2, cb, True)
self._started = True
def failed(args):
logger("DHT failed to bootstrap %s" % (args))
#reactor.callLater(.5, bootstrap_proxy, addrs)
logger("Trying to bootstrap with %s" % (repr(addrs)))
d = self.dht_server.bootstrap(addrs)
d.addCallback(started)
d.addErrback(failed)
def start_msearch(args):
logger("** msearch %s args: %s" % (self, repr(args)))
reactor.callLater(0,
self._ssdps.start_search,
bootstrap_proxy,
stop=False)
# Wait until servers all listen
dl = defer.DeferredList(dlist)
dl.addBoth(start_msearch)
# Only for logging
self.dht_server.kserver.protocol.sourceNode.port = port
self.dht_server.kserver.protocol.sourceNode.ip = "0.0.0.0"
#FIXME handle inside ServerApp
self.dht_server.kserver.name = name
self.dht_server.kserver.protocol.name = name
self.dht_server.kserver.protocol.storeOwnCert(certstr)
self.dht_server.kserver.protocol.setPrivateKey()
return start_cb
def set(self, key, value, cb=None):
return TwistedWaitObject(self.dht_server.set, key=key, value=value, cb=cb)
def get(self, key, cb=None):
return TwistedWaitObject(self.dht_server.get, key=key, cb=cb)
def get_concat(self, key, cb=None):
return TwistedWaitObject(self.dht_server.get_concat, key=key, cb=cb)
def append(self, key, value, cb=None):
return TwistedWaitObject(self.dht_server.append, key=key, value=value, cb=cb)
def remove(self, key, value, cb=None):
return Twiste
|
ClaudioNahmad/Servicio-Social
|
Parametros/CosmoMC/prerrequisitos/plc-2.0/waf_tools/pmclib.py
|
Python
|
gpl-3.0
| 653
| 0.047473
|
#try to support many flavours of lapack
import autoinstall_lib as atl
from waflib import Logs
import os.path as osp
def options(ctx):
at
|
l.add_lib_option("pmc",ctx,install=False)
def configure(ctx):
ctx.env.has_pmc = False
#pmc_config_path = ctx.find_program("pmc-config",path_list=[ctx.options.pmc_prefix+"/bin"])[0]
try:
pmc_config_path = ctx.find_program("pmc-config",path_list=[ctx.options.pmc_prefix+"/bin"])
pmcflagline = ctx.cmd_and_log(pmc_config_path)
except Exception,e:
pmcflagline=''
atl.conf_lib(ctx,"pmc","pmc","init_distribution","pmc.
|
h",["pmclib","pmctools"],defines=["HAS_PMC"],flagline=pmcflagline)
|
baylee-d/cos.io
|
common/blocks/collapsebox.py
|
Python
|
apache-2.0
| 705
| 0
|
from wagtail.wagtailcore.blocks import RichTextBlock, CharBlock, ListBlock, \
StructBlock
class CollapseEntryBlock(Struc
|
tBlock):
title = CharBlock()
content = RichTextBlock()
class Meta:
form_template = 'common/block_forms/collapse_entry.html'
template = 'common/blocks/collapse_entry.html'
class CollapseBoxListBlock(ListBlock):
def __init__(self, **kwargs):
return super(CollapseBoxListBlock, self).__init__(
CollapseEntryBlock
|
(), **kwargs)
class CollapseBoxBlock(StructBlock):
title = CharBlock()
list = CollapseBoxListBlock()
class Meta:
template = 'common/blocks/collapse_box_block.html'
icon = 'list-ul'
|
WPMedia/dd-agent
|
checks.d/jenkins.py
|
Python
|
bsd-3-clause
| 8,601
| 0.002558
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
from glob import glob
import os
import time
from xml.etree.ElementTree import ElementTree
# project
from checks import AgentCheck
class Skip(Exception):
"""
Raised by :class:`Jenkins` when it comes across
a build or job that should be excluded from being checked.
"""
def __init__(self, reason, dir_name):
message = 'skipping build or job at %s because %s' % (dir_name, reason)
Exception.__init__(self, message)
class Jenkins(AgentCheck):
datetime_format = '%Y-%m-%d_%H-%M-%S'
def __init__(self, name, init_config, agentConfig):
AgentCheck.__init__(self, name, init_config, agentConfig)
self.high_watermarks = {}
def _timestamp_from_build_file(self, dir_name, tree):
timestamp = tree.find('timestamp')
if timestamp is None or not timestamp.text:
raise Skip('the timestamp cannot be found', dir_name)
else:
return int(timestamp.text) / 1000.0
def _timestamp_from_dirname(self, dir_name):
if not os.path.isdir(dir_name):
raise Skip('its not a build directory', dir_name)
try:
# Parse the timestamp from the directory name
date_str = os.path.basename(dir_name)
time_tuple = time.strptime(date_str, self.datetime_format)
return time.mktime(time_tuple)
except ValueError:
return None
def _get_build_metadata(self, dir_name, watermark):
if os.path.exists(os.path.join(dir_name, 'jenkins_build.tar.gz')):
raise Skip('the build has already been archived', dir_name)
timestamp = self._ti
|
mestamp_from_dirname(dir_name)
# This is not the latest build
if timestamp is not None and timestamp <= watermark:
return None
# Read the build.xml metadata file that Jenkins generates
build_metadata = os.path.join(dir_n
|
ame, 'build.xml')
if not os.access(build_metadata, os.R_OK):
self.log.debug("Can't read build file at %s" % (build_metadata))
raise Exception("Can't access build.xml at %s" % (build_metadata))
else:
tree = ElementTree()
tree.parse(build_metadata)
if timestamp is None:
try:
timestamp = self._timestamp_from_build_file(dir_name, tree)
# This is not the latest build
if timestamp <= watermark:
return None
except ValueError:
return None
keys = ['result', 'number', 'duration']
kv_pairs = ((k, tree.find(k)) for k in keys)
d = dict([(k, v.text) for k, v in kv_pairs if v is not None])
d['timestamp'] = timestamp
try:
d['branch'] = tree.find('actions')\
.find('hudson.plugins.git.util.BuildData')\
.find('buildsByBranchName')\
.find('entry')\
.find('hudson.plugins.git.util.Build')\
.find('revision')\
.find('branches')\
.find('hudson.plugins.git.Branch')\
.find('name')\
.text
except Exception:
pass
return d
def _get_build_results(self, instance_key, job_dir):
job_name = os.path.basename(job_dir)
try:
dirs = glob(os.path.join(job_dir, 'builds', '*_*'))
# Before Jenkins v1.597 the build folders were named with a timestamp (eg: 2015-03-10_19-59-29)
# Starting from Jenkins v1.597 they are named after the build ID (1, 2, 3...)
# So we need try both format when trying to find the latest build and parsing build.xml
if len(dirs) == 0:
dirs = glob(os.path.join(job_dir, 'builds', '[0-9]*'))
if len(dirs) > 0:
# versions of Jenkins > 1.597 need to be sorted by build number (integer)
try:
dirs = sorted(dirs, key=lambda x: int(x.split('/')[-1]), reverse=True)
except ValueError:
dirs = sorted(dirs, reverse=True)
# We try to get the last valid build
for dir_name in dirs:
watermark = self.high_watermarks[instance_key][job_name]
try:
build_metadata = self._get_build_metadata(dir_name, watermark)
except Exception:
build_metadata = None
if build_metadata is not None:
build_result = build_metadata.get('result')
if build_result is None:
break
output = {
'job_name': job_name,
'event_type': 'build result'
}
output.update(build_metadata)
if 'number' not in output:
output['number'] = dir_name.split('/')[-1]
self.high_watermarks[instance_key][job_name] = output.get('timestamp')
self.log.debug("Processing %s results '%s'" % (job_name, output))
yield output
# If it not a new build, stop here
else:
break
except Exception as e:
self.log.error("Error while working on job %s, exception: %s" % (job_name, e))
def check(self, instance, create_event=True):
"""
DEPRECATED:
This Jenkins check is deprecated and not actively developed anymore. It will be
removed in a future version of the Datadog Agent. Please move to using the Datadog
plugin for Jenkins. More information can be found on the Jenkins Integration panel
under the Configuration tab (https://app.datadoghq.com/account/settings#integrations/jenkins)
"""
self.warning("This check is deprecated in favor of our Jenkins Datadog plugin."
" It will be removed in a future version of the Datadog Agent."
" More information can be found on the Jenkins Integration panel"
" under the Configuration tab"
" (https://app.datadoghq.com/account/settings#integrations/jenkins)")
if self.high_watermarks.get(instance.get('name'), None) is None:
# On the first run of check(), prime the high_watermarks dict
# so that we only send events that occured after the agent
# started.
# (Setting high_watermarks in the next statement prevents
# any kind of infinite loop (assuming nothing ever sets
# high_watermarks to None again!))
self.high_watermarks[instance.get('name')] = defaultdict(lambda: 0)
self.check(instance, create_event=False)
jenkins_home = instance.get('jenkins_home')
if not jenkins_home:
raise Exception("No jenkins_home directory set in the config file")
jenkins_jobs_dir = os.path.join(jenkins_home, 'jobs', '*')
job_dirs = glob(jenkins_jobs_dir)
if not job_dirs:
raise Exception('No jobs found in `%s`! '
'Check `jenkins_home` in your config' % (jenkins_jobs_dir))
for job_dir in job_dirs:
for output in self._get_build_results(instance.get('name'), job_dir):
output['host'] = self.hostname
if create_event:
self.log.debug("Creating event for job: %s" % output['job_name'])
self.event(output)
tags = [
'job_name:%s' % output['job_name'],
'result:%s' % output['result'],
'build_number:%s' % output['number']
]
|
samuelmaudo/yepes
|
tests/modelmixins/tests.py
|
Python
|
bsd-3-clause
| 44,994
| 0.000622
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from decimal import Decimal
from unittest import skipIf
from django import test
from django import VERSION as DJANGO_VERSION
from django.utils import timezone
from django.utils import translation
from yepes.contrib.registry import registry
from yepes.model_mixins import Displayable
from yepes.model_mixins.multilingual import TranslationDoesNotExist
from .models import (
Article,
Book,
BookTranslation,
Category,
Image,
Language,
Product,
ProductVariant,
RichArticle,
Volcano,
)
class ActivatableTest(test.TestCase):
def setUp(self):
self.volcano_1 = Volcano.objects.create(
name='Luzon',
active_status=Volcano.ACTIVE,
active_from=None,
active_to=None,
)
self.volcano_2 = Volcano.objects.create(
name='Sulu',
active_status=Volcano.INACTIVE,
active_from=None,
active_to=None,
)
self.volcano_3 = Volcano.objects.create(
name='Visayas',
active_status=Volcano.ACTIVE,
active_from=datetime(2000, 1, 1, tzinfo=timezone.utc),
active_to=datetime(2004, 12, 31, tzinfo=timezone.utc),
)
def test_instance_methods(self):
self.assertTrue(self.volcano_1.is_active())
self.assertFalse(self.volcano_2.is_active())
self.assertFalse(self.volcano_3.is_active())
timestamp = datetime(1999, 12, 31, tzinfo=timezone.utc)
self.assertFalse(self.volcano_3.is_active(date=timestamp))
timestamp = datetime(2005, 1, 1, tzinfo=timezone.utc)
self.assertFalse(self.volcano_3.is_active(date=timestamp))
timestamp = datetime(2002, 6, 15, tzinfo=timezone.utc)
self.assertTrue(self.volcano_3.is_active(date=timestamp))
def test_manager_methods(self):
self.assertEqual(
list(Volcano.objects.active()),
[self.volcano_1]
)
self.assertEqual(
list(Volcano.objects.inactive()),
[self.volcano_2, self.volcano_3]
)
timestamp = datetime(2002, 6, 15, tzinfo=timezone.utc)
self.assertEqual(
list(Volcano.objects.active(date=timestamp)),
[self.volcano_1, self.volcano_3]
)
self.assertEqual(
list(Volcano.objects.inactive(date=timestamp)),
[self.volcano_2]
)
class LoggedTest(test.TestCase):
def test_timestamps(self):
article = Article(title='Django for Dummies')
self.assertIsNone(article.creation_date)
self.assertIsNone(article.last_modified)
article.save()
self.assertIsNotNone(article.creation_date)
self.assertIsNotNone(article.last_modified)
creation_date = article.creation_date
last_modified = article.last_modified
article.save()
self.assertEqual(
article.creation_date,
creation_date,
)
self.assertNotEqual(
article.last_modified,
last_modified,
)
last_modified = article.last_modified
article.save(update_fields=['title'])
self.assertNotEqual(
article.last_modified,
last_modified,
)
class MetaDataTest(test.TestCase):
maxDiff = None
def test_meta_attributes(self):
article = Article.objects.create(
meta_title='The Definitive Guide to Django',
meta_description=(
'Django, the Python-based equivalent to the Ruby on Rails web'
' development framework, is hottest topics in web development.'
' In _The Definitive Guide to Django: Web Development Done'
' Right_, **Adrian Holovaty**, one of Django\'s creators, and'
' Django lead developer **Jacob Kaplan-Moss** show you how'
' they use this framework to create award-winning web sites.'
),
meta_keywords=[
'Django', 'Definitive', 'Guide',
'Web', 'Development',
],
title='Two Scoops of Django',
content=(
'This book is chock-full of material that will help you with'
' your Django projects.\n'
'We\'ll introduce you to various tips, tricks, patterns, code'
' snippets, and techniques that we\'ve picked up over the years.'
' This book is a significant revision of the previous edition.'
),
)
self.assertEqual(
article.get_meta_title(),
'The Definitive Guide to Django',
)
self.assertEqual(
article.get_meta_title(max_length=15, end_text='.....'),
'The Defini.....',
)
self.assertEqual(
article.get_meta_description(),
'Django, the Python-based equivalent to the Ruby on Rails web'
' development framework, is hottest topics in web development.'
' In _The Definitive Guide to Django: Web Development Done'
' Right_, **Adrian Holovaty**,...',
)
self.assertEqual(
article.get_meta_description(max_words=5, end_text='......'),
'Django, the Python-based equivalent to......',
)
self.assertEqual(
article.get_meta_keywords(),
'Django, Definitive, Guide, Web, Development',
)
self.assertEqual(
article.get_meta_keywor
|
ds(max_words=3),
'Django, Definitive, Guide',
)
def test_title_and_excerpt_fields(self):
article = RichArticle.objects.create(
title='The Definitive Guide to Django',
headline='Two Scoops of Django',
name='Two Scoops of Django',
excerpt=(
'Django, the Python-based equivalent to the Ruby on Rails web'
' development framework, is h
|
ottest topics in web development.'
' In _The Definitive Guide to Django: Web Development Done'
' Right_, **Adrian Holovaty**, one of Django\'s creators, and'
' Django lead developer **Jacob Kaplan-Moss** show you how'
' they use this framework to create award-winning web sites.'
),
description=(
'This book is chock-full of material that will help you with'
' your Django projects.\n\n'
'We\'ll introduce you to various tips, tricks, patterns, code'
' snippets, and techniques that we\'ve picked up over the years.'
' This book is a significant revision of the previous edition.'
),
content=(
'This book is chock-full of material that will help you with'
' your Django projects.\n\n'
'We\'ll introduce you to various tips, tricks, patterns, code'
' snippets, and techniques that we\'ve picked up over the years.'
' This book is a significant revision of the previous edition.'
),
)
self.assertEqual(
article.get_meta_title(),
'The Definitive Guide to Django',
)
self.assertEqual(
article.get_meta_title(max_length=15, end_text='.....'),
'The Defini.....',
)
self.assertEqual(
article.get_meta_description(),
'Django, the Python-based equivalent to the Ruby on Rails web'
' development framework, is hottest topics in web development.'
' In The Definitive Guide to Django: Web Development Done'
' Right, Adrian Holovaty,...',
)
self.assertEqual(
article.get_meta_description(max_words=5, end_text='......'),
'Django, the Python-based equivalent to......',
)
registry['core:STOP_WORDS'] = (
'i', 'you', 'he', 'she', 'we', 'they',
'my', 'your', 'his', 'her', 'our', 'their',
'of',
|
omg-insa/server
|
api/utils.py
|
Python
|
bsd-3-clause
| 372
| 0.021505
|
import re
import string
import random
__author__ = 'schitic'
def tokenGenerator(size=16, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def validateEmail(email):
if len(email) > 3:
if re.ma
|
tch("^.+\\@(\\[?)[a-zA-Z0-9\\
|
-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email):
return True
return False
|
jamielennox/tempest
|
tempest/auth.py
|
Python
|
apache-2.0
| 24,953
| 0.00004
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import datetime
import exceptions
import re
import urlparse
import six
from tempest import config
from tempest.openstack.common import log as logging
from tempest.services.identity.json import token_client as json_id
from tempest.services.identity.v3.json import token_client as json_v3id
CONF = config.CONF
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class AuthProvider(object):
"""
Provide authentication
"""
def __init__(self, credentials, interface=None):
"""
:param credentials: credentials for authentication
:param interface: 'json' or 'xml'. Applicable for tempest client only
(deprecated: only json now supported)
"""
credentials = self._convert_credentials(credentials)
if self.check_credentials(credentials):
self.credentials = credentials
else:
raise TypeError("Invalid credentials")
self.interface = 'json'
self.cache = None
self.alt_auth_data = None
self.alt_part = None
def _convert_credentials(self, credentials):
# Support dict credentials for backwards compatibility
if isinstance(credentials, dict):
return get_credentials(**credentials)
else:
return credentials
def __str__(self):
return "Creds :{creds}, interface: {interface}, " \
"cached auth data: {cache}".format(
creds=self.credentials, interface=self.interface,
cache=self.cache)
@abc.abstractmethod
def _decorate_request(self, filters, method, url, headers=None, body=None,
auth_data=None):
"""
Decorate request with authentication data
"""
return
@abc.abstractmethod
def _get_auth(self):
return
@abc.abstractmethod
def _fill_credentials(self, auth_data_body):
return
def fill_credentials(self):
"""
Fill credentials object with data from auth
"""
auth_data = self.get_auth()
self._fill_credentials(auth_data[1])
return self.credentials
@classmethod
def check_credentials(cls, credentials):
"""
Verify credentials are valid.
"""
return isinstance(credentials, Credentials) and credentials.is_valid()
@property
def auth_data(self):
return self.get_auth()
@auth_data.deleter
def auth_data(self):
self.clear_auth()
def get_auth(self):
"""
Returns auth from cache if available, else auth first
"""
if self.cache is None or self.is_expired(self.cache):
self.set_auth()
return self.cache
def set_auth(self):
"""
Forces setting auth, ignores cache if it exists.
Refills credentials
"""
self.cache = self._get_auth()
self._fill_credentials(self.cache[1])
def clear_auth(self):
"""
Can be called to clear the access cache so that next request
will fetch a new token and base_url.
"""
self.cache = None
self.credentials.reset()
@abc.abstractmethod
def is_expired(self, auth_data):
return
def auth_request(self, method, url, headers=None, body=None, filters=None):
"""
Obtains auth data and decorates a request with that.
:param method: HTTP method of the request
:param url: relative URL of the request (path)
:param headers: HTTP headers of the request
:param body: HTTP body in case of POST / PUT
:param filters: select a base URL out of the catalog
:returns a Tuple (url, headers, body)
"""
orig_req = dict(url=url, headers=headers, body=body)
auth_url, auth_headers, auth_body = self._decorate_request(
filters, method, url, headers, body)
auth_req = dict(url=auth_url, headers=auth_headers, body=auth_body)
# Overwrite part if the request if it has been requested
if self.alt_part is not None:
if self.alt_auth_data is not None:
alt_url, alt_headers, alt_body = self._decorate_request(
filters, method,
|
url, headers, body,
auth_data=self.alt_auth_data)
|
alt_auth_req = dict(url=alt_url, headers=alt_headers,
body=alt_body)
auth_req[self.alt_part] = alt_auth_req[self.alt_part]
else:
# If alt auth data is None, skip auth in the requested part
auth_req[self.alt_part] = orig_req[self.alt_part]
# Next auth request will be normal, unless otherwise requested
self.reset_alt_auth_data()
return auth_req['url'], auth_req['headers'], auth_req['body']
def reset_alt_auth_data(self):
"""
Configure auth provider to provide valid authentication data
"""
self.alt_part = None
self.alt_auth_data = None
def set_alt_auth_data(self, request_part, auth_data):
"""
Configure auth provider to provide alt authentication data
on a part of the *next* auth_request. If credentials are None,
set invalid data.
:param request_part: request part to contain invalid auth: url,
headers, body
:param auth_data: alternative auth_data from which to get the
invalid data to be injected
"""
self.alt_part = request_part
self.alt_auth_data = auth_data
@abc.abstractmethod
def base_url(self, filters, auth_data=None):
"""
Extracts the base_url based on provided filters
"""
return
class KeystoneAuthProvider(AuthProvider):
token_expiry_threshold = datetime.timedelta(seconds=60)
def __init__(self, credentials, interface=None):
super(KeystoneAuthProvider, self).__init__(credentials, interface)
self.auth_client = self._auth_client()
def _decorate_request(self, filters, method, url, headers=None, body=None,
auth_data=None):
if auth_data is None:
auth_data = self.auth_data
token, _ = auth_data
base_url = self.base_url(filters=filters, auth_data=auth_data)
# build authenticated request
# returns new request, it does not touch the original values
_headers = copy.deepcopy(headers) if headers is not None else {}
_headers['X-Auth-Token'] = str(token)
if url is None or url == "":
_url = base_url
else:
# Join base URL and url, and remove multiple contiguous slashes
_url = "/".join([base_url, url])
parts = [x for x in urlparse.urlparse(_url)]
parts[2] = re.sub("/{2,}", "/", parts[2])
_url = urlparse.urlunparse(parts)
# no change to method or body
return str(_url), _headers, body
@abc.abstractmethod
def _auth_client(self):
return
@abc.abstractmethod
def _auth_params(self):
return
def _get_auth(self):
# Bypasses the cache
auth_func = getattr(self.auth_client, 'get_token')
auth_params = self._auth_params()
# returns token, auth_data
token, auth_data = auth_func(**auth_params)
return token, auth_data
def get_token(self):
return self.auth_data[0]
class Key
|
Vagab0nd/SiCKRAGE
|
lib3/github/Path.py
|
Python
|
gpl-3.0
| 3,820
| 0.007068
|
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2018 Justin Kufro <jkufro@andrew.cmu.edu> #
# Copyright 2018 Ivan Minno <iminno@andrew.cmu.edu> #
# Copyright 2018 Zilei Gu <zileig@andrew.cmu.edu> #
# Copyright 2018 Yves Zumbach <yzumbach@andrew.cmu.edu> #
# Copyright 2018 Leying Chen <leyingc@andrew.cmu.edu> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
class Path(github.GithubObject.NonCompletableGithubObject):
"""
This class represents a popular Path for a GitHub repository.
The reference can be found here https://developer.github.com/v3/repos/traffic/
"""
def __repr__(self):
return self.get__repr__(
{
"path": self._path.value,
"title": self._title.value,
"count": self._count.value,
"uniques": self._uniques.value,
}
)
@property
def path(self):
"""
:type: string
"""
return self._path.value
@property
def title
|
(self):
"""
:type: string
"""
return self._title.value
@property
def count(self):
"""
:type: integer
"""
return self._count.value
@property
def uniques(self):
"""
:type: integer
"""
return self._uniques.value
def _initAttributes(self):
self._path = github.GithubObject.NotSet
self._title = github.GithubObject.NotSet
self._count = github
|
.GithubObject.NotSet
self._uniques = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(attributes["path"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "count" in attributes: # pragma no branch
self._count = self._makeIntAttribute(attributes["count"])
if "uniques" in attributes: # pragma no branch
self._uniques = self._makeIntAttribute(attributes["uniques"])
|
yosshy/osclient2
|
osclient2/neutron/v2/lb/vip.py
|
Python
|
apache-2.0
| 4,709
| 0
|
# Copyright 2014-2017 by Akira Yoshiyama <akirayoshiyama@gmail.com>.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource class and its manager for LB virtual IPs in Networking V2 API
"""
from osclient2 import base
from osclient2 import mapper
from osclient2 import utils
ATTRIBUTE_MAPPING = [
('id', 'id', mapper.Noop),
('name', 'name', mapper.Noop),
('description', 'description', mapper.Noop),
('protocol', 'protocol', mapper.Noop),
('address', 'address', mapper.Noop),
('protocol_port', 'protocol_port', mapper.Noop),
('connection_limit', 'connection_limit', mapper.Noop),
('pool', 'pool_id', mapper.Resource('neutron.lb.pool')),
('subnet', 'subnet_id', mapper.Resource('neutron.subnet')),
('p
|
roject', 'tenant_id', mapper.Resource('project')),
('port', 'port_id', mapper.Resource('neutron.port')),
('is_enabled', 'admin_state_up', mapper.Noop),
('is_session_persistent', 'session_persistence', mapper.Noop),
('status', 'status', mapper.Noop),
]
class Resource(base.Resource):
"""Resource class for
|
LB virtual IPs in Networking V2 API"""
def update(self, name=None, description=None, session_persistence=None,
connection_limit=None, is_enabled=None):
"""
Update a VIP for a LB pool
@keyword name: VIP name (str)
@type name: str
@keyword description: VIP description (str)
@type description: str
@keyword is_session_persistent: Whether session is persistent
@type is_session_persistent: bool
@keyword connection_limit: Maximum connection number
@type connection_limit: int
@keyword is_enabled: Whether the VIP is enabled
@type is_enabled: bool
@rtype: None
"""
pool = self.parent_resource
super(Resource, self).update(
name=name,
description=description,
pool=pool,
session_persistence=session_persistence,
connection_limit=connection_limit,
is_enabled=is_enabled)
class Manager(base.SubManager):
"""Manager class for LB virtual IPs in Networking V2 API"""
resource_class = Resource
service_type = 'network'
_attr_mapping = ATTRIBUTE_MAPPING
_json_resource_key = 'vip'
_json_resources_key = 'vips'
_url_resource_path = '/v2.0/lb/vips'
def create(self, name=None, description=None, subnet=None, address=None,
protocol=None, protocol_port=None, session_persistence=None,
connection_limit=None, is_enabled=None):
"""
Create a VIP for a LB pool
@keyword name: VIP name
:param name: str
@keyword description: VIP description
:param description: str
@keyword subnet: Subnet object (required)
:param subnet: osclient2.neutron.v2.subnet.Resource
@keyword address: Address
:param address: str
@keyword protocol: Protocol; 'TCP', 'HTTP', or 'HTTPS' (required)
:param protocol: str
@keyword protocol_port: Port number
:param protocol_port: int
@keyword pool: LB pool
:param pool: osclient2.neutron.v2.lb_pool.Resource
@keyword is_session_persistent: Whether the session is persistent
:param is_session_persistent: bool
@keyword connection_limit: Maximum connection number
:param connection_limit: int
@keyword is_enabled: Whether the VIP is enabled
:param is_enabled: bool
@return: Created VIP
@rtype: osclient2.neutron.v2.lb.vip.Resource
"""
pool = self.parent_resource
return super(Manager, self).create(
name=name,
description=description,
subnet=subnet,
address=address,
protocol=protocol,
protocol_port=protocol_port,
pool=pool,
session_persistence=session_persistence,
connection_limit=connection_limit,
is_enabled=is_enabled)
def _find_gen(self, **kwargs):
kwargs['pool'] = self.parent_resource
return super(Manager, self)._find_gen(**kwargs)
|
markusmichel/Tworpus-Client
|
session/views.py
|
Python
|
apache-2.0
| 9,915
| 0.001614
|
import datetime
import time
from django.utils.timezone import utc
from django.core.servers.basehttp import FileWrapper
from django.http import HttpResponse
from django import forms, http
import signal
import shutil
from uuid import uuid4
import ntpath
import json
import glob
import os
from StringIO import StringIO
from enum import Enum
from zipfile import ZipFile
from tworpus.models import TworpusSettings
import tworpus_fetcher
from tworpus import settings as settings
import TweetIO
from session.models import Session
class Task(Enum):
idle = 0
fetching = 1
saving = 2
class CreateCorpusForm(forms.Form):
subject = forms.CharField(max_length=100)
message = forms.CharField()
sender = forms.EmailField()
cc_myself = forms.BooleanField(required=False)
language = forms.ChoiceField(widget=forms.ChoiceField())
class TweetsDownloadListener(TweetIO.FetcherProgressListener):
def onSuccess(self, values):
pass
def onError(self, values):
pass
def onFinish(self):
pass
def startCreateCorpus(request):
"""
Actually start to create a corpus
- save values as a model (Session) to the database
- download information file from tworpus server
- fetch actual tweets from twitter
Status codes:
- 409: No tweets found to fetch
"""
if request.method == 'POST':
# AJAX request: parse body
if request.is_ajax():
data = json.loads(request.body)
minWordCount = int(data["numMinWords"])
minCharsCount = int(data["numMinChars"])
language = str(data["language"])
numTweets = int(data["numTweets"])
title = str(data["title"])
startDate = str(data["startDateTimestamp"])
endDate = str(data["endDateTimestamp"])
converters = data["converters"]
# "normal" POST request: parse request.POST
else:
minWordCount = request.POST["minWords"]
minCharsCount = request.POST["minChars"]
language = request.POST["language"]
numTweets = request.POST["limit"]
title = request.POST["title"]
startDate = request.POST["startDateTimestamp"]
endDate = request.POST["endDateTimestamp"]
converters = request.POST["converters"]
folderName = str(uuid4())
startDateObj = datetime.datetime.utcfromtimestamp(int(startDate) / 1000).replace(tzinfo=utc)
endDateObj = datetime.datetime.utcfromtimestamp(int(endDate) / 1000).replace(tzinfo=utc)
session = Session.objects.create(title=title, startDate=startDateObj, endDate=endDateObj)
session.language = language
session.minCharsPerTweet = minCharsCount
session.minWordsPerTweet = minWordCount
session.numTweets = numTweets
session.folder = folderName
session.converters = json.dumps(converters)
session.save()
try:
start_create_corpus(session)
except CsvEmptyException:
return HttpResponse(status=444)
except CsvPartiallyEmptyException:
return HttpResponse(status=206)
# Notify corpus creation initialization
response_data = {}
response
|
_data['message'] = 'Start fetching tweets'
return HttpResponse(json.dumps(response_data), content_type="a
|
pplication/json")
else:
return http.HttpResponseServerError("Error fetching tweets")
def invokeCorpusCreation(csvFile, folder, session):
"""
fetches tweets by calling fetcher jar
"""
tw_settings = TworpusSettings.objects.first()
listener = TweetIO.TweetProgressEventHandler(session.id)
fetcher = TweetIO.TweetsFetcher(
tweetsCsvFile=csvFile.name, outputDir=folder, tweetsPerXml=tw_settings.tweets_per_xml
)
fetcher.addListener(listener)
fetcher.fetch()
fetchersManager = TweetIO.getManager()
fetchersManager.add(fetcher, session.id)
def getSessions(request):
"""
Returns a list of all sessions (completed, working, active/inactive).
"""
sessions = [session.as_json() for session in Session.objects.all()]
return HttpResponse(json.dumps(sessions))
def getActiveSessions(request):
"""
Returns a list of corpus creations in progress (currently working or already finished).
"""
sessions = [session.as_json() for session in Session.objects.all().filter(completed=False)]
return HttpResponse(json.dumps(sessions))
#-------------------------------------------------------
# Corpus CRUD operations
#-------------------------------------------------------
def getSession(request):
"""
Return one specific session by its id
"""
sid = int(request.GET["id"])
session = Session.objects.all().filter(id=sid).first()
return HttpResponse(json.dumps(session.as_json()))
def exit_application(request):
fetchersManager = TweetIO.getManager()
for key, fetcher in enumerate(fetchersManager.fetchers.items()):
fetcher[1].cancel()
if fetchersManager.fetchers.__len__() > 0:
fetchersManager.fetchers.clear()
pid = os.getpid()
os.kill(pid, signal.SIGTERM)
def removeCorpus(request):
"""
Deletes an finished or unfinished corpus from the database
and removes all downloaded files.
"""
corpusid = request.GET["corpusid"] if request.method == "GET" else request.POST["corpusid"]
session = Session.objects.all().filter(id=corpusid).first()
folder = os.path.join(settings.BASE_PROJECT_DIR, session.folder)
manager = TweetIO.getManager()
fetcher = manager.get(corpusid)
class OnCancelListener(TweetIO.FetcherProgressListener):
def onCancel(self):
shutil.rmtree(folder)
session.delete()
if str(corpusid) in manager.fetchers:
manager.fetchers.pop(str(corpusid))
if fetcher is not None:
fetcher.addListener(OnCancelListener())
fetcher.cancel()
else:
shutil.rmtree(folder)
session.delete()
return HttpResponse("success")
def pauseCorpus(request):
"""
Sets the corpus with a specific corpusid to NOT working
and cancels its' running subprocesses.
"""
sid = request.GET["id"]
fetchersManager = TweetIO.getManager()
fetchersManager.remove(sid)
session = Session.objects.all().filter(id=sid).first()
session.working = False
session.save()
return HttpResponse(json.dumps("success"), status=200)
def resumeCorpus(request):
"""
Resumes (=restarting subprocess) a corpus creation process.
"""
sid = request.GET["id"]
session = Session.objects.all().filter(id=sid).first()
session.working = True
session.completed = False
session.save()
folderPath = os.path.join(settings.BASE_PROJECT_DIR, session.folder)
csvFile = open(os.path.join(folderPath, "tweets.csv"))
invokeCorpusCreation(folder=folderPath, csvFile=csvFile, session=session)
return HttpResponse(json.dumps("success"), status=200)
def downloadCorpus(request):
sid = request.GET["id"]
session = Session.objects.all().filter(id=sid).first()
baseFolder = os.path.join(settings.BASE_PROJECT_DIR, session.folder)
xmlfiles = glob.glob(os.path.join(baseFolder, "*.xml"))
if xmlfiles.__len__() == 1:
tweetsFileLocation = os.path.join(settings.BASE_PROJECT_DIR, session.folder, xmlfiles.pop(0))
tweetsFile = open(tweetsFileLocation)
response = HttpResponse(FileWrapper(tweetsFile), content_type='application/xml')
response['Content-Disposition'] = 'attachment; filename=tweets.xml'
return response
else:
zip_memory = StringIO()
xmlzip = ZipFile(zip_memory, 'w')
for xmlfile in xmlfiles:
filename = ntpath.basename(xmlfile)
xmlzip.write(xmlfile, filename)
xmlzip.close()
response = HttpResponse(zip_memory.getvalue(), content_type='application/x-zip-compressed')
response['Content-Disposition'] = 'attachment; filename=tweets.zip'
return response
def recreateCorpus(request):
sid = request.GET["id"]
|
bhardesty/qpid-dispatch
|
tests/system_tests_fallback_dest.py
|
Python
|
apache-2.0
| 29,872
| 0.003214
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from proton import Message, symbol
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, MgmtMsgProxy, TestTimeout
from system_test import unittest
from proton.handlers import MessagingHandler
from proton.reactor import Container
class AddrTimer(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.check_address()
class RouterTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
"""Start a router"""
super(RouterTest, cls).setUpClass()
def router(name, mode, connection, extra=None):
config = [
('router', {'mode': mode, 'id': name}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),
('listener', {'port': cls.tester.get_port(), 'role': 'route-container', 'name': 'WP'}),
('address', {'prefix': 'dest', 'enableFallback': 'yes'}),
('autoLink', {'connection': 'WP', 'address': 'dest.al', 'dir': 'out', 'fallback': 'yes'}),
('autoLink', {'connection': 'WP', 'address': 'dest.al', 'dir': 'in', 'fallback': 'yes'}),
connection
]
if extra:
config.append(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
edge_port_A = cls.tester.get_port()
edge_port_B = cls.tester.get_port()
router('INT.A', 'interior', ('listener', {'role': 'inter-router', 'port': inter_router_port}),
('listener', {'role': 'edge', 'port': edge_port_A}))
router('INT.B', 'interior', ('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port}),
('listener', {'role': 'edge', 'port': edge_port_B}))
router('EA1', 'edge', ('connector', {'name': 'edge', 'role': 'edge', 'port': edge_port_A}))
router('EA2', 'edge', ('connector', {'name': 'edge', 'role': 'edge', 'port': edge_port_A}))
router('EB1', 'edge', ('connector', {'name': 'edge', 'role': 'edge', 'port': edge_port_B}))
router('EB2', 'edge', ('connector', {'name': 'edge', 'role': 'edge', 'port': edge_port_B}))
cls.routers[0].wait_router_connected('INT.B')
cls.routers[1].wait_router_connected('INT.A')
def test_01_sender_first_primary_same_interior(self):
test = SenderFirstTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
'dest.01', False)
test.run()
self.assertEqual(None, test.error)
def test_02_sender_first_fallback_same_interior(self):
test = SenderFirstTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
'dest.02', True)
test.run()
self.assertEqual(None, test.error)
def test_03_sender_first_primary_same_edge(self):
test = SenderFirstTest(self.routers[2].addresses[0],
self.routers[2].addresses[0],
'dest.03', False)
test.run()
self.assertEqual(None, test.error)
def test_04_sender_first_fallback_same_edge(self):
test = SenderFirstTest(self.routers[2].addresses[0],
self.routers[2].addresses[0],
'dest.04', True)
test.run()
self.assertEqual(None, test.error)
def test_05_sender_first_primary_interior_interior(self):
test = SenderFirstTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
'dest.05', False)
test.run()
self.assertEqual(None, test.error)
def test_06_sender_first_fallback_interior_interior(self):
test = SenderFirstTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
'dest.06', True)
test.run()
self.assertEqual(None, test.error)
def test_07_sender_first_primary_edge_interior(self):
test = SenderFirstTest(self.routers[2].addresses[0],
self.routers[1].addresses[0],
'dest.07', False)
test.run()
self.assertEqual(None, test.error)
def test_08_sender_first_fallback_edge_interior(self):
test = SenderFirstTest(self.routers[2].addresses[0],
self.routers[1].addresses[0],
'dest.08', True)
test.run()
self.assertEqual(None, test.error)
def test_09_sender_first_primary_interior_edge(self):
test = SenderFirstTest(self.routers[1].addresses[0],
self.routers[2].addresses[0],
'dest.09', False)
test.run()
self.assertEqual(None, test.error)
def test_10_sender_first_fallback_interior_edge(self):
test = SenderFirstTest(self.routers[1].addresses[0],
self.routers[2].addresses[0],
|
'dest.10', True)
tes
|
t.run()
self.assertEqual(None, test.error)
def test_11_sender_first_primary_edge_edge(self):
test = SenderFirstTest(self.routers[2].addresses[0],
self.routers[4].addresses[0],
'dest.11', False)
test.run()
self.assertEqual(None, test.error)
def test_12_sender_first_fallback_edge_edge(self):
test = SenderFirstTest(self.routers[2].addresses[0],
self.routers[4].addresses[0],
'dest.12', True)
test.run()
self.assertEqual(None, test.error)
def test_13_receiver_first_primary_same_interior(self):
test = ReceiverFirstTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
'dest.13', False)
test.run()
self.assertEqual(None, test.error)
def test_14_receiver_first_fallback_same_interior(self):
test = ReceiverFirstTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
'dest.14', True)
test.run()
self.assertEqual(None, test.error)
def test_15_receiver_first_primary_same_edge(self):
test = ReceiverFirstTest(self.routers[2].addresses[0],
self.routers[2].addresses[0],
'dest.15', False)
test.run()
self.assertEqual(None, test.error)
def test_16_receiver_first_fallback_same_edge(self):
test = ReceiverFirstTest(self.routers[2].addresses[0],
self.routers[2].addresses[0],
'dest.16', True)
test.run()
self.assertEqual(None, test.error)
def test_17_receiver_first_primary_interior_
|
SRJ9/django-driver27
|
driver27/management/commands/export_seats_for_csv.py
|
Python
|
mit
| 1,566
| 0.001916
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from driver27.models import Driver, Team, Seat
import sys
if sys.version_info < (3, 0):
try:
import unicodecsv as csv
except ImportError:
import csv
else:
import csv
class Command(BaseCommand):
help = 'Export seats to csv'
def get_config(self, export_attr):
if export_attr == 'drivers':
fieldnames = ['id', 'first_name', 'last_name', 'country', 'year_of_birth']
|
export_cls = Driver
elif export_attr == 'teams':
fieldnames = ['id', 'name', 'full_name', 'country']
export_cls = Team
else:
fieldnames = ['id', 'driver_id', 'driver__last_name', 'driver__f
|
irst_name', 'team_id', 'team__name']
export_cls = Seat
objects = list(export_cls.objects.values(*fieldnames))
return {'fieldnames': fieldnames, 'objects': objects}
def add_arguments(self, parser):
parser.add_argument('csv',)
parser.add_argument(
'--export',
default='seats',
help='By default, export seats. Options: seats, drivers, teams',
)
def handle(self, *args, **options):
with open(options['csv'], 'wb') as csvfile:
export_config = self.get_config(options['export'])
writer = csv.DictWriter(csvfile, fieldnames=export_config['fieldnames'])
writer.writeheader()
for entry in export_config['objects']:
writer.writerow(entry)
|
ecoal95/angle
|
src/libANGLE/renderer/d3d/d3d11/gen_dxgi_format_table.py
|
Python
|
bsd-3-clause
| 3,280
| 0.006402
|
#!/usr/bin/python
# Copyright 2016 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_dxgi_format_table.py:
# Code generation for DXGI format map.
from datetime import date
import sys
sys.path.append('../..')
import angle_format
template_cpp = """// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {data_source_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// DXGI format info:
// Determining metadata about a DXGI format.
#include "libANGLE/renderer/Format.h"
using namespace angle;
namespace rx
{{
namespace d3d11
{{
GLenum GetComponentType(DXGI_FORMAT dxgiFormat)
{{
switch (dxgiFormat)
{{
{component_type_cases} default:
break;
}}
UNREACHABLE();
return GL_NONE;
}}
}} // namespace d3d11
namespace d3d11_angle
{{
const Format &GetFormat(DXGI_FORMAT dxgiFormat)
{{
switch (dxgiFormat)
{{
{format_cases} default:
break;
}}
UNREACHABLE();
return Format::Get(Format::ID::NONE);
}}
}} // namespace d3d11_angle
}} // namespace rx
"""
template_format_case = """ case DXGI_FORMAT_{dxgi_format}:
return {result};
"""
template_undefined_case = """ case DXGI_FORMAT_{dxgi_format}:
break;
"""
def format_case(dxgi_format, result):
return template_format_case.format(
dxgi_format = dxgi_format,
result = result)
def undefined_case(dxgi_format):
return template_undefined_case.format(dxgi_format = dxgi_format)
component_cases = ""
format_cases = ""
input_data = 'dxgi_format_data.json'
dxgi_map = angle_format.load_json(input_data)
types = {
'SNORM': 'GL_SIGNED_NORMALIZED',
'UNORM': 'GL_UNSIGNED_NORMALIZED',
'SINT': 'GL_INT',
'UINT': 'GL_UNSIGNED_INT',
'FLOAT': 'GL_FLOAT',
'SHAREDEXP': 'GL_FLOAT'
}
angle_to_gl = angle_format.load_inverse_table('../../angle_format_map.json')
all_angle = angle_to_gl.keys()
for dxgi_format, angle_format in sorted(dxgi_map.iteritems()):
found = [ctype i
|
n dxgi_format for ctype in
|
types.keys()]
count = reduce((lambda a, b: int(a) + int(b)), found)
component_type = 'GL_NONE'
if count == 1:
gltype = next(gltype for ctype, gltype in types.iteritems() if ctype in dxgi_format)
component_cases += format_case(dxgi_format, gltype)
else:
component_cases += undefined_case(dxgi_format)
if angle_format == "":
angle_format = dxgi_format
if angle_format in all_angle:
angle_format = "Format::Get(Format::ID::" + angle_format + ")"
format_cases += format_case(dxgi_format, angle_format)
else:
format_cases += undefined_case(dxgi_format)
with open('dxgi_format_map_autogen.cpp', 'wt') as out_file:
output_cpp = template_cpp.format(
script_name = sys.argv[0],
data_source_name = input_data,
copyright_year = date.today().year,
component_type_cases = component_cases,
format_cases = format_cases)
out_file.write(output_cpp)
out_file.close()
|
pombredanne/https-git.fedorahosted.org-git-kobo
|
kobo/admin/commands/cmd_start_worker_task.py
|
Python
|
lgpl-2.1
| 815
| 0.002454
|
# -*- coding: utf-8 -*-
import os
import kobo.cli
import kobo.admin
class Start_Worker_Task(kobo.cli.Command):
"""create a worker task module in the current directory"""
enab
|
led = True
def options(self):
self.parser.usage = "%%prog %s [options] <task_name>" % self.normalized_name
self.parser.add_option("-d", "--dir", help="target directory")
def run(self, *args, **kwargs):
if len(args) < 1:
self.parser.error("Please specify a name of the task.")
name = args[0]
directory = kwargs.pop("dir")
if not directory:
directo
|
ry = os.getcwd()
try:
kobo.admin.copy_helper(name, directory, "task___project_name__.py.template")
except kobo.admin.TemplateError, ex:
self.parser.error(ex)
|
cpe/VAMDC-VALD
|
nodes/jpl/node/forms.py
|
Python
|
gpl-3.0
| 2,202
| 0.011807
|
from node.models import *
from django.forms import ModelForm
from django.forms.formsets import BaseFormSet
from django.forms.models import modelformset_factory
from .cdmsportalfunc import *
from django.core.exceptions import Validatio
|
nError
from django import forms
class MoleculeForm(ModelForm):
class Meta:
model = Molecules
fields = '__all__'
class SpecieForm(ModelForm):
datearchived = forms.DateField(
widget=forms.TextInput(attrs={'readonly':'readonly'})
)
dateactivated = forms.DateField(
widget=forms.TextInput(attrs={'readonly':'readonly'})
)
class Meta:
model = Spec
|
ies
fields = '__all__'
class FilterForm(ModelForm):
class Meta:
model = QuantumNumbersFilter
fields = '__all__'
class XsamsConversionForm(forms.Form):
inurl = forms.URLField(label='Input URL',required=False, widget=forms.TextInput(attrs={'size': 50, 'title': 'Paste here a URL that delivers an XSAMS document.',}))
#inurl = forms.CharField(max_length=50)
infile = forms.FileField()
format = forms.ChoiceField( choices = [("RAD 3D", "RAD 3D"),("CSV","CSV")],
)
def clean(self):
infile = self.cleaned_data.get('infile')
inurl = self.cleaned_data.get('inurl')
if (infile and inurl):
raise ValidationError('Give either input file or URL!')
if inurl:
try: data = urlopen(inurl)
except Exception as err:
raise ValidationError('Could not open given URL: %s'%err)
elif infile: data = infile
else:
raise ValidationError('Give either input file or URL!')
try: self.cleaned_data['result'] = applyStylesheet2File(data)
except Exception as err:
raise ValidationError('Could not transform XML file: %s'%err)
# try: xml=e.parse(data)
# except Exception,err:
# raise ValidationError('Could not parse XML file: %s'%err)
# try: self.cleaned_data['sme'] = xsl(xml)
# except Exception,err:
# raise ValidationError('Could not transform XML file: %s'%err)
return self.cleaned_data
|
npo-poms/scripts
|
python/vpro/check_with_sitemap_vpro.py
|
Python
|
gpl-2.0
| 6,894
| 0.004787
|
#!/usr/bin/env python3
import os
import re
import subprocess
import sys
import threading
import time
import urllib
from subprocess import Popen, PIPE
sys.path.append("..")
from check_with_sitemap import CheckWithSitemap
DEFAULT_JAVA_PATH = 'java'
class CheckWithSiteMapVpro(CheckWithSitemap):
"""
This specialization is customized for VPRO.
It can connect via JMX to VPRO's Mangolia CMS which contains the original pages, and request it to index missing pages
This wraps a command line client for jmx: https://github.com/jiaqi/jmxterm/
"""
def __init__(self, java_path: str = DEFAULT_JAVA_PATH):
super().__init__()
self.jmx_url = self.args.jmx_url
self.jmxterm_binary = self.args.jmxterm_binary
self.java_path = java_path
self._get_jmx_term_if_necessary()
if self.args.tunnel:
tunnel = SshTunnel(self.log)
tunnel.start()
def add_arguments(self):
super().add_arguments()
api = self.api
api.add_argument('--jmx_url', type=str, default=None, help='use JMX to trigger reindex. An url like "localhost:500" where this is tunneled to the magnolia backend server')
api.add_argument('--jmxterm_binary', type=str, default=None, help='location of jmxterm binary')
api.add_argument('--tunnel', action='store_true', default=False, help='set up jmx tunnel too')
def perform_add_to_api(self, not_in_api: list):
"""
Actually add to api
"""
if self.jmx_url:
self.jmxterm = [self.java_path, '-jar', self.jmxterm_binary, '--url', self.jmx_url, "-n", "-v", "silent"]
not_in_api = self._reindex_3voor12(not_in_api)
not_in_api = self._reindex_cinema_films(not_in_api)
not_in_api = self._reindex_cinema_person(not_in_api)
not_in_api = self._reindex_mids(not_in_api)
self._reindex_urls(not_in_api)
else:
self.log.info("No jmx_url configured, not trying to implicitly add to api via JMX")
def _reindex_mids(self, not_in_api: list) -> list:
urls_with_mid = list(filter(lambda m: m[0] is not None, map(self._find_mid, not_in_api)))
return self._reindex_ids(not_in_api, urls_with_mid, "nl.vpro.magnolia:name=IndexerMaintainerImpl", "reindexMediaObjects", 100, "media objects")
def _reindex_3voor12(self, not_in_api: list) -> list:
urls_with_uuids = list(filter(lambda m: m[0] is not None, map(self._find_update_uuid, not_in_api)))
return self._reindex_ids(not_in_api, urls_with_uuids, "nl.vpro.magnolia:name=DrieVoorTwaalfUpdateIndexer", "reindexUUIDs", 100, "3voor12 updates")
def _reindex_cinema_films(self, not_in_api: list) -> list:
cinema_ids = list(filter(lambda m: m[0] is not None, map(self._find_cinema_film_id, not_in_api)))
return self._reindex_ids(not_in_api, cinema_ids, "nl.vpro.magnolia:name=CinemaObjectIndexer", "reindex", 100, "cinema films")
def _reindex_cinema_person(self, not_in_api: list) -> list:
cinema_ids = list(filter(lambda m: m[0] is not None, map(self._find_cinema_person_uid, not_in_api)))
return self._reindex_ids(not_in_api, cinema_ids, "nl.vpro.magnolia:name=CinemaPersonIndexer", "reindex", 100, "cinema persons")
def _reindex_urls(self, not_in_api: list) -> None:
page_size = 20
self.log.info("Reindexing %d urls" % len(not_in_api))
for i in range(0, len(not_in_api), page_size ):
self._call_jmx_operation("nl.vpro.magnolia:name=IndexerMaintainerImpl", "reindexUrls", not_in_api[i: i + page_size ])
def _find_mid(self, url: str) -> list:
return sel
|
f._find_by_regexp(".*?~(.*?)~.*", url)
def _find_update_uuid(self, url: str) -> list:
return self._find_by_regexp(".*?update~(.*?)~.*", url)
def _find_cinema_film_id(self, url: str) -> list:
return self._f
|
ind_by_regexp(".*?film~(.*?)~.*", url)
def _find_cinema_person_uid(self, url: str) -> list:
return self._find_by_regexp(".*?persoon~(.*?)~.*", url)
@staticmethod
def _find_by_regexp(regex: str, url: str) -> list:
matcher = re.match(regex, url)
if matcher:
return [matcher.group(1), url]
else:
return [None, url]
def _reindex_ids(
self, not_in_api: list,
ids: list,
bean: str,
operation: str, page_size: int, name: str) -> list:
self.log.info("Reindexing %d %s" % (len(ids), name))
for i in range(0, len(ids), page_size):
self._call_jmx_operation(bean, operation, list(map(lambda m : m[0], ids[i: i + page_size])))
urls = list(map(lambda u: u[1], ids))
self.log.debug("Associated with %s" % str(urls))
return [e for e in not_in_api if e not in urls]
def _call_jmx_operation(self, bean: str, operation: str, sub_list: list):
p = Popen(self.jmxterm, stdin=PIPE, stdout=PIPE, encoding='utf-8')
input = "bean " + bean +"\nrun " + operation + " " + ",".join(sub_list)
self.log.info("input\n%s" % input)
out, error = p.communicate(input=input, timeout=100)
self.log.info("output\n%s" % out)
if error:
self.log.info("error\n%s" % error)
if "still busy" in out:
self.log.info("Jmx reports that still busy. Let's wait a bit then")
time.sleep(20)
def _get_jmx_term_if_necessary(self):
if self.jmx_url and not self.jmxterm_binary:
from_env = os.getenv('JMXTERM_BINARY')
if not from_env is None:
self.jmxterm_binary=from_env
else:
jmxtermversion = "1.0.2"
jmxterm = "jmxterm-" + jmxtermversion + "-uber.jar"
path = os.path.dirname(os.path.realpath(__file__))
self.jmxterm_binary = os.path.join(path, jmxterm)
if not os.path.exists(self.jmxterm_binary):
get_url = "https://github.com/jiaqi/jmxterm/releases/download/v" + jmxtermversion + "/" + jmxterm
self.log.info("Downloading %s -> %s" % (get_url, self.jmxterm_binary))
urllib.request.urlretrieve (get_url, self.jmxterm_binary)
class SshTunnel(threading.Thread):
def __init__(self, log):
threading.Thread.__init__(self)
self.daemon = True # So that thread will exit when
# main non-daemon thread finishes
self.log = log
def run(self):
self.log.info("Setting up tunnel")
if subprocess.call([
'ssh', '-N', '-4',
'-L', '5000:localhost:5000',
'os2-magnolia-backend-prod-01'
]):
raise Exception ('ssh tunnel setup failed')
if __name__ == "__main__":
CheckWithSiteMapVpro().main()
|
jawilson/home-assistant
|
homeassistant/components/zwave/sensor.py
|
Python
|
apache-2.0
| 3,679
| 0.000815
|
"""Support for Z-Wave sensors."""
from homeassistant.components.sensor import DEVICE_CLASS_BATTERY, DOMAIN, SensorEntity
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ZWaveDeviceEntity, const
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Sensor from Config Entry."""
@callback
def async_add_sensor(sensor):
"""Add Z-Wave Sensor."""
async_add_entities([sensor])
async_dispatcher_connect(hass, "zwave_new_sensor", async_add_sensor)
def get_device(node, values, **kwargs):
"""Create Z-Wave entity device."""
# Generic Device mappings
if values.primary.command_class == const.COMMAND_CLASS_BATTERY:
return ZWaveBatterySensor(values)
if node.has_command_class(const.COMMAND_CLASS_SENSOR_MULTILEVEL):
return ZWaveMultilevelSensor(values)
if (
node.has_command_class(const.COMMAND_CLASS_METER)
and values.primary.type == const.TYPE_DECIMAL
):
return ZWaveMultilevelSensor(values)
if node.has_command_class(const.COMMAND_CLASS_ALARM) or node.has_command_class(
const.COMMAND_CLASS_SENSOR_ALARM
):
return ZWaveAlarmSensor(values)
return None
class ZWaveSensor(ZWaveDeviceEntity, SensorEntity):
"""Representation of a Z-Wave sensor."""
def __init__(self, values):
"""Initialize the sensor."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self.update_properties()
def update_properties(self):
"""Handle the data changes for node values."""
self._state = self.values.primary.data
self._units = self.values.primary.units
@property
def force_update(self):
"""Return force_update."""
return True
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement the value is expressed in."""
return self._units
class ZWaveMultilevelSensor(ZWaveSensor):
"""Representation of a multi level sensor Z-Wave sensor."""
@property
|
def native_value(self):
"""Return the state of the sensor."""
if self._units in ("C", "F"):
retu
|
rn round(self._state, 1)
if isinstance(self._state, float):
return round(self._state, 2)
return self._state
@property
def device_class(self):
"""Return the class of this device."""
if self._units in ["C", "F"]:
return DEVICE_CLASS_TEMPERATURE
return None
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._units == "C":
return TEMP_CELSIUS
if self._units == "F":
return TEMP_FAHRENHEIT
return self._units
class ZWaveAlarmSensor(ZWaveSensor):
"""Representation of a Z-Wave sensor that sends Alarm alerts.
Examples include certain Multisensors that have motion and vibration
capabilities. Z-Wave defines various alarm types such as Smoke, Flood,
Burglar, CarbonMonoxide, etc.
This wraps these alarms and allows you to use them to trigger things, etc.
COMMAND_CLASS_ALARM is what we get here.
"""
class ZWaveBatterySensor(ZWaveSensor):
"""Representation of Z-Wave device battery level."""
@property
def device_class(self):
"""Return the class of this device."""
return DEVICE_CLASS_BATTERY
|
wathen/PhD
|
MHD/FEniCS/ShiftCurlCurl/saddle.py
|
Python
|
mit
| 5,740
| 0.022997
|
#!/usr/bin/python
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4p
|
y import PETSc
Print = PETSc.Sy
|
s.Print
# from MatrixOperations import *
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import scipy.sparse as sps
import scipy.sparse.linalg as slinalg
import os
import scipy.io
import PETScIO as IO
import MatrixOperations as MO
def StoreMatrix(A,name):
test ="".join([name,".mat"])
scipy.io.savemat( test, {name: A},oned_as='row')
parameters['num_threads'] = 10
m = 6
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
# set_log_level(DEBUG)
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'Direct'
ShowResultPlots = 'yes'
ShowErrorPlots = 'no'
EigenProblem = 'no'
SavePrecond = 'no'
CheckMu = 'no'
case = 4
parameters['linear_algebra_backend'] = 'uBLAS'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
nn = 2**(xx)/2
if (CheckMu == 'yes'):
if (xx != 1):
MU[xx-1] = MU[xx-2]/10
else:
if (xx != 1):
MU[xx-1] = MU[xx-2]
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn
parameters["form_compiler"]["quadrature_degree"] = 3
parameters["form_compiler"]["optimize"] = True
parameters["form_compiler"]["representation"] = 'quadrature'
# mesh = BoxMesh(-1,-1,-1,1, 1, 1, nn, nn, nn)
mesh = UnitCubeMesh(nn,nn,nn)
parameters['reorder_dofs_serial'] = False
V = FunctionSpace(mesh, "N1curl",2)
Q = FunctionSpace(mesh, "CG",2)
Vdim[xx-1] = V.dim()
print "\n\n\n V-dim", V.dim()
def boundary(x, on_boundary):
return on_boundary
if case == 1:
u0 = Expression(("x[1]*x[1]*(x[1]-1)","x[0]*x[0]*(x[0]-1)","0"))
elif case == 2:
u0 = Expression(("sin(2*pi*x[1])*cos(2*pi*x[0])","-sin(2*pi*x[0])*cos(2*pi*x[1])"))
elif case == 3:
u0 = Expression(("x[0]*x[0]*(x[0]-1)","x[1]*x[1]*(x[1]-1)","0"))
elif case == 4:
u0 = Expression(("x[0]*x[1]*x[2]*(x[0]-1)","x[0]*x[1]*x[2]*(x[1]-1)","x[0]*x[1]*x[2]*(x[2]-1)"))
bcs = DirichletBC(V,u0, boundary)
# (u1) = TrialFunctions(V)
# (v1) = TestFunctions(V)
c = .5
if case == 1:
# f= Expression(("(8*pow(pi,2)-C)*sin(2*pi*x[1])*cos(2*pi*x[0])","-(8*pow(pi,2)-C)*sin(2*pi*x[0])*cos(2*pi*x[1])"),C = c)
f = Expression(("-6*x[1]+2","-6*x[0]+2"))+c*u0
elif case == 2:
f = 8*pow(pi,2)*u0+c*u0
elif case == 3:
f = Expression(("0","0","0"),C = c)
f = c*u0
elif case == 4:
f = Expression(("x[2]*(2*x[1]-1)+x[1]*(2*x[2]-1)","x[0]*(2*x[2]-1)+x[2]*(2*x[0]-1)","x[1]*(2*x[0]-1)+x[0]*(2*x[1]-1)"))+c*u0
(u) = TrialFunction(V)
(v) = TestFunction(V)
a = dot(curl(u),curl(v))*dx+c*inner(u, v)*dx
L1 = inner(v, f)*dx
tic()
AA, bb = assemble_system(a, L1, bcs)
As = AA.sparray()
StoreMatrix(As,'A')
A = PETSc.Mat().createAIJ(size=As.shape,csr=(As.indptr, As.indices, As.data))
# exit
# A = as_backend_type(AA).mat()
print toc()
b = bb.array()
zeros = 0*b
x = IO.arrayToVec(zeros)
bb = IO.arrayToVec(b)
if (Solving == 'Direct'):
ksp = PETSc.KSP().create()
ksp.setOperators(A)
ksp.setFromOptions()
ksp.setType(ksp.Type.PREONLY)
ksp.pc.setType(ksp.pc.Type.LU)
# print 'Solving with:', ksp.getType()
# Solve!
tic()
ksp.solve(bb, x)
SolTime[xx-1] = toc()
print "time to solve: ",SolTime[xx-1]
del AA
if (Solving == 'Iterative' or Solving == 'Direct'):
if case == 1:
ue = Expression(("x[1]*x[1]*(x[1]-1)","x[0]*x[0]*(x[0]-1)"))
elif case == 2:
ue = Expression(("sin(2*pi*x[1])*cos(2*pi*x[0])","-sin(2*pi*x[0])*cos(2*pi*x[1])"))
elif case == 3:
ue=u0
elif case == 4:
ue=u0
Ve = FunctionSpace(mesh, "N1curl",4)
u = interpolate(ue,Ve)
Nv = u.vector().array().shape
X = IO.vecToArray(x)
x = X[0:Nv[0]]
ua = Function(V)
ua.vector()[:] = x
parameters["form_compiler"]["quadrature_degree"] = 4
parameters["form_compiler"]["optimize"] = True
ErrorB = Function(V)
ErrorB.vector()[:] = interpolate(ue,V).vector().array()-ua.vector().array()
errL2b[xx-1] = sqrt(assemble(inner(ErrorB, ErrorB)*dx))
errCurlb[xx-1] = sqrt(assemble(inner(curl(ErrorB), curl(ErrorB))*dx))
if xx == 1:
a = 1
else:
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
print errL2b[xx-1]
print errCurlb[xx-1]
import pandas as pd
print "\n\n Magnetic convergence"
MagneticTitles = ["Total DoF","Soln Time","B-L2","B-order","B-Curl","Curl-order"]
MagneticValues = np.concatenate((Vdim,SolTime,errL2b,l2border,errCurlb,Curlborder),axis=1)
MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
pd.set_option('precision',3)
MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
print MagneticTable
if (SavePrecond == 'yes'):
scipy.io.savemat('eigenvalues/Wdim.mat', {'Wdim':Wdim-1},oned_as = 'row')
if (ShowResultPlots == 'yes'):
plot(ua)
plot(interpolate(ue,V))
interactive()
|
jchodera/MSMs
|
jchodera/src-11401/pyemma/cluster.py
|
Python
|
gpl-2.0
| 3,309
| 0.012088
|
#!/usr/bin/env python
import pyemma
import numpy as np
import mdtraj
import time
import os
# Source directory
source_directory = '/cbio/jclab/projects/fah/fah-data/munged3/no-solvent/11401' # Src ensembler
################################################################################
# Load reference topology
################################################################################
print ('loading reference topology...')
reference_pdb_filename = 'protein.pdb'
reference_trajectory = os.path.join(source_directory, 'run0-clone0.h5')
traj = mdtraj.load(reference_trajectory)
traj[0].save_pdb(reference_pdb_filename)
################################################################################
# Initialize featurizer
################################################################################
print('Initializing featurizer...')
import pyemma.coordinates
featurizer = pyemma.coordinates.featurizer(reference_pdb_filename)
#featurizer.add_all() # all atoms
featurizer.add_selection( featurizer.select_Backbone() )
print('Featurizer has %d features.' % featurizer.dimension())
################################################################################
# Define coordinates source
########################################
|
########################################
nskip = 40 # number of initial frames to skip
import pyemma.coordinates
from glob import glob
trajectory_filenames = glob(os.path.join(source_directory, 'run*-clone*.h5'))
coordinates_source = pyemma.coordinates.source(trajectory_filenames, features=featu
|
rizer)
print("There are %d frames total in %d trajectories." % (coordinates_source.n_frames_total(), coordinates_source.number_of_trajectories()))
################################################################################
# Cluster
################################################################################
print('Clustering...')
generator_ratio = 250
nframes = coordinates_source.n_frames_total()
nstates = int(nframes / generator_ratio)
stride = 1
metric = 'minRMSD'
initial_time = time.time()
clustering = pyemma.coordinates.cluster_uniform_time(data=coordinates_source, k=nstates, stride=stride, metric=metric)
#clustering = pyemma.coordinates.cluster_kmeans(data=coordinates_source, k=nstates, stride=stride, metric=metric, max_iter=10)
#clustering = pyemma.coordinates.cluster_mini_batch_kmeans(data=coordinates_source, batch_size=0.1, k=nstates, stride=stride, metric=metric, max_iter=10)
final_time = time.time()
elapsed_time = final_time - initial_time
print('Elapsed time %.3f s' % elapsed_time)
# Save cluster centers
np.save('clustercenters', clustering.clustercenters)
# Save discrete trajectories.
dtrajs = clustering.dtrajs
dtrajs_dir = 'dtrajs'
clustering.save_dtrajs(output_dir=dtrajs_dir, output_format='npy', extension='.npy')
################################################################################
# Make timescale plots
################################################################################
import matplotlib as mpl
mpl.use('Agg') # Don't use display
import matplotlib.pyplot as plt
from pyemma import msm
from pyemma import plots
lags = [1,2,5,10,20,50]
#its = msm.its(dtrajs, lags=lags, errors='bayes')
its = msm.its(dtrajs, lags=lags)
plots.plot_implied_timescales(its)
plt.savefig('plot.pdf')
|
croepha/django-filer
|
filer/tests/helpers.py
|
Python
|
mit
| 1,697
| 0.007661
|
#-*- coding: utf-8 -*-
from PIL import Image, ImageChops, ImageDraw
from django.contrib.auth.models import User
from filer.models.foldermodels import Folder
from filer.models.clipboardmodels import Clipboard, ClipboardItem
def create_superuser():
superuser = User.objects.create_superuser('admin',
'admin@free.fr',
'secret')
return superuser
def create_folder_structure(depth=2, sibling=2, parent=None):
"""
This method creates a folder structure of the specified depth.
* depth: is an integer (default=2)
* sibling: is an integer (default=2)
* parent: is the folder instance of the parent.
"""
if depth > 0 and sibling > 0:
depth_range = range(1, depth+1)
depth_range.reverse()
for d in depth_range:
for s in range(1,sibling+1):
name = "folder: %s -- %s" %(str(d), str(s))
folder = Folder(name=name, parent=parent)
|
folder.save()
create_folder_structure(depth=d-1, sibling=sibling, parent=folder)
def create_clipboard_item(user, file):
clipbo
|
ard, was_clipboard_created = Clipboard.objects.get_or_create(user=user)
clipboard_item = ClipboardItem(clipboard=clipboard, file=file)
return clipboard_item
def create_image(mode='RGB', size=(800, 600)):
image = Image.new(mode, size)
draw = ImageDraw.Draw(image)
x_bit, y_bit = size[0] // 10, size[1] // 10
draw.rectangle((x_bit, y_bit * 2, x_bit * 7, y_bit * 3), 'red')
draw.rectangle((x_bit * 2, y_bit, x_bit * 3, y_bit * 8), 'red')
return image
|
dipapaspyros/bdo_platform
|
aggregator/management/commands/compare_mongo_postgres_joins.py
|
Python
|
mit
| 13,096
| 0.003894
|
import json
import random
import time
import traceback
from optparse import make_option
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import connection
from aggregator.converters.random_cnv import RandomDataConverter
from aggregator.management.commands.generate_dataset import generate_dataset
from mongo_client import get_mongo_db
class Command(BaseCommand):
help = 'Compares Mongo & Postgres backends using random dataset generation'
option_list = BaseCommand.option_list + (
make_option(
"-s",
"--sizes",
dest="sizes",
help="Comma-separate the number of entries for dimensions <time,lat,lng>",
metavar="SIZES"
),
make_option(
"-t",
"--target",
dest="target",
help="Where data should be stored (either `postgres` or `mongo`)",
metavar="TARGET"
),
make_option(
"-N",
"--no-joins",
dest="no_joins",
action="store_true",
help="Skip all join queries",
metavar="NO_JOINS"
),
make_option(
"-n",
"--no-mongo-joins",
dest="no_mongo_joins",
action="store_true",
help="Skip join queries for mongo",
metavar="NO_MONGO_JOINS"
),
make_option(
"-i",
"--index",
dest="index",
action="store_true",
help="Add indices",
metavar="INDEX"
),
)
def prepare_queries(self, pg_datasets, mongo_datasets):
queries = [
# simple select/filter
{
'title': 'Simple select/filter',
'type': 'filter',
'postgres':
"""
SELECT * FROM (
SELECT <v1a>, <v2a>, <v3a>, value
FROM <t1>
WHERE <v1a> >= -9.8 AND <v1a> <= -9.6
) AS Q1
ORDER BY value
""",
'mongo': {
'collection': "<c1>",
'find': {
'lat': {'$gte': -9.8, '$lte': -9.6},
},
}
},
# paginated select/filter
{
'title': 'Paginated select/filter',
'type': 'filter',
'postgres':
"""
SELECT * FROM (
SELECT <v1a>, <v2a>, <v3a>, value
FROM <t1>
WHERE <v1a> >= -9.8 AND <v1a> <= -9.6
) AS Q1
ORDER BY value
LIMIT 10000000
OFFSET 20000000
""",
'mongo': {
'collection': "<c1>",
'find': {
'lat': {'$gte': -9.8, '$lte': -9.6},
},
'limit': 10000000,
'skip': 20000000
}
},
# strict join
{
'title': 'Value difference in exact location & time',
'type': 'join',
'postgres':
"""
SELECT * FROM (
SELECT <v1a>, <v2a>, <v3a>, (<t2>.value - <t1>.value) AS difv
FROM <t1>
JOIN <t2> ON <v1a>=<v1b> AND <v2a>=<v2b> AND <v3a>=<v3b>
) AS Q1
ORDER BY difv
""",
'mongo': {
'collection': "<c1>",
'aggregates': [
{
"$lookup":
{
"from": "<c2>",
"localField": "<v1>",
"foreignField": "<v1>",
"as": "c2"
}
}, {
"$unwind": "$c2"
}, {
"$project": {
'lat': 1,
'lng': 1,
'time': 1,
'isLatEqual': { "$eq" : [ "$lat", "$c2.lat" ] },
'isLngEqual': { "$eq" : [ "$lng", "$c2.lng" ] },
'isTimeEqual': { "$eq" : [ "$time", "$c2.time" ] },
'diff': {'$subtract': ["$value", "$c2.value"]},
},
},
{"$match": {'isLngEqual': True, 'isTimeEqual': True}},
{"$sort": {'diff': 1}},
]
}
},
{
'title': 'Value difference at the same time',
'type': 'join',
'postgres':
"""
SELECT * FROM (
SELECT <v1a>, <v2a>, <v3a>, (<t2>.value - <t1>.value) AS difv
FROM <t1>
JOIN <t2> ON <v3a>=<v3b>
) AS Q1
ORDER BY difv
""",
'mongo': {
'collection': "<c1>",
'aggregates': [
{
|
"$lookup":
{
"from": "<c2>",
"localField": "<v3>",
"foreignField": "<v3>",
"as": "c2"
}
}, {
|
"$unwind": "$c2"
}, {
"$project": {
'lat': 1,
'lng': 1,
'time': 1,
'diff': {'$subtract': ["$value", "$c2.value"]},
},
},
{"$sort": {'diff': 1}},
]
}
},
]
for query in queries:
# PG replacements
q = query['postgres']
for d_id in (['a', 'b'] if pg_datasets[1] else ['a']):
# replace table names
q = q.replace('<t%d>' % (['a', 'b'].index(d_id) + 1),
pg_datasets[['a', 'b'].index(d_id)].variables.get().data_table_name)
# replace column names
for dim_id in range(1, pg_datasets[['a', 'b'].index(d_id)].variables.get().dimensions.all().count() + 1):
q = q.replace('<v%d%s>' % (dim_id, d_id),
pg_datasets[['a', 'b'].index(d_id)].variables.get().dimensions.all()[dim_id - 1].data_column_name)
query['postgres'] = q
# MONGO replacements
q = json.dumps(query['mongo'])
c1 = get_mongo_db().variables.find_one({'dataset_id': mongo_datasets[0]})
if mongo_datasets[1]:
c2 = get_mongo_db().variables.find_one({'dataset_id': mongo_datasets[1]})
else:
c2 = {'name': ''}
q = q.replace('<c1>', c1['name']).replace('<c2>', c2['name'])
for idx, dim in enumerate(c1['dimensions']):
q = q.replace('<v%d>' % (idx + 1), dim)
query['mongo'] = json.loads(q)
return queries
def handle(self, *args, **options):
skip_mongo_joins = options['no_mongo_joins'] or False
index = options['index'] or False
skip_joins = options['no_joins'] or False
if skip_joins:
skip_mongo_joins = True
v_name = 'rnd_%s' % ''.join([str(random.choice(range(1, 10))) for _ in range(1, 5)])
# call for postgres
pd1, p_size, p_time = generate_dataset(target='postgres', variable=v_name + '_1',
|
gumblex/tg-chatdig
|
vendor/chinesename.py
|
Python
|
mit
| 6,353
| 0.008223
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import pickle
import random
import bisect
import operator
import functools
import itertools
from math import log
from .common_surnames import d as common_surnames
from .lookuptable import chrevlookup, pinyintrie, surnamerev
for py in tuple(chrevlookup.keys()):
for ch in range(len(py)):
frag = py[:ch+1]
if frag not in chrevlookup:
chrevlookup[frag] = ''
logtotal = log(sum(len(s) for s in chrevlookup.values()))
i
|
g1 = operator.itemgetter(1)
phonetic_symbol = {
"ā": "a",
"á": "a",
"ǎ": "a",
"à": "a",
"ē": "e",
"é": "e",
"ě": "e",
"è": "e",
"ō": "o",
"ó": "o",
"ǒ": "o",
"ò": "o",
"ī": "i",
"í": "i",
"ǐ": "i",
"ì": "i",
"ū": "u",
"ú": "u",
"ǔ": "u",
"ù": "u",
"ü": "v",
"ǖ": "v",
"ǘ": "v",
"ǚ": "v",
"ǜ": "v",
"ń": "n",
"ň": "n",
"": "m"
}
def untone(text):
# This is a limited version
|
only for entities defined in xml_escape_table
for k, v in phonetic_symbol.items():
text = text.replace(k, v)
return text
class WeightedRandomGenerator(object):
def __init__(self, weights):
self.totals = list(itertools.accumulate(weights))
self.total = self.totals[-1]
def __iter__(self):
return self
def __next__(self):
rnd = random.random() * self.total
return bisect.bisect_right(self.totals, rnd)
def __call__(self):
return self.__next__()
def _pyword_tokenize(word):
DAG = {}
N = len(word)
for k in range(N):
tmplist = []
i = k
frag = word[k]
while i < N and frag in chrevlookup:
if chrevlookup[frag]:
tmplist.append(i)
i += 1
frag = word[k:i + 1]
if not tmplist:
tmplist.append(k)
DAG[k] = tmplist
route = {N: (0, 0)}
for idx in range(N - 1, -1, -1):
route[idx] = max((log(len(chrevlookup.get(word[idx:x + 1], '')) or 1) -
logtotal + route[x + 1][0], x) for x in DAG[idx])
result = []
x = 0
while x < N:
y = route[x][1] + 1
result.append(word[x:y])
x = y
return result
pytokenize = lambda s: list(itertools.chain.from_iterable(_pyword_tokenize(w) for w in s.replace("'", ' ').lower().split()))
surnamesortkey = lambda n: -common_surnames.get(n, 0.00001)
class NameModel(object):
def __init__(self, modelname):
with open(modelname, 'rb') as f:
self.firstchar, self.secondchar = pickle.load(f)
del self.secondchar['']
self.snlst, snprb = tuple(zip(*common_surnames.items()))
self.fclst, fcprb = tuple(zip(*self.firstchar.items()))
self.sclst, scprb = tuple(zip(*self.secondchar.items()))
self.sngen = WeightedRandomGenerator(snprb)
self.fcgen = WeightedRandomGenerator(fcprb)
self.scgen = WeightedRandomGenerator(scprb)
initlookup = functools.lru_cache(maxsize=10)(lambda self, ch: ''.join(set(''.join(chrevlookup[p] for p in pinyintrie.get(ch)))) if ch in pinyintrie else ch)
lookupsurname = lambda self, pychars: ((list(itertools.chain.from_iterable(surnamerev.get(p, ()) for p in pinyintrie[pychars[0]])) if pychars[0] in pinyintrie else [pychars[0]]) if len(pychars) == 1 and len(pychars[0]) == 1 else surnamerev.get(' '.join(pychars), []))
lookupchar = lambda self, ch: (self.initlookup(ch) if len(ch) == 1 else (chrevlookup.get(ch) or self.initlookup(ch[0])))
fullnamesortkey = lambda self, n: -common_surnames.get(n[0], 0.00001)*self.firstchar.get(n[1])*self.secondchar.get(n[2:])
namesortkey = lambda self, n: -self.firstchar.get(n[0])*self.secondchar.get(n[1:])
def splitname(self, romanization):
words = romanization.split()
tok = name = pytokenize(romanization)
if not name:
return [], []
if len(words) == 1:
words = name
surnames = self.lookupsurname(pytokenize(words[0]))
name = pytokenize(' '.join(words[1:]))
if not surnames:
surnames = self.lookupsurname(pytokenize(words[-1]))
name = pytokenize(' '.join(words[:-1]))
if len(words) > 2 and not surnames:
surnames = self.lookupsurname(pytokenize(' '.join(words[:2])))
name = pytokenize(' '.join(words[2:]))
if surnames:
surnames = sorted(frozenset(surnames), key=surnamesortkey)
else:
name = tok
return surnames, name
def selectname(self, name, num=10):
if not name:
return []
evalnum = int(num ** (1/len(name))) + 1
namechars = [sorted(filter(ig1, ((n, self.firstchar.get(n, 1e-10 if 0x4E00 <= ord(n) < 0x9FCD else 0)) for n in self.lookupchar(name[0]))), key=ig1, reverse=1)]
namechars.extend(sorted(filter(ig1, ((n, self.secondchar.get(n, 1e-10 if 0x4E00 <= ord(n) < 0x9FCD else 0)) for n in self.lookupchar(l))), key=ig1, reverse=1)[:evalnum] for l in name[1:])
namechars = list(filter(None, namechars))[:10]
if not namechars:
return []
candidates = []
for group in itertools.product(*namechars):
gz = tuple(zip(*group))
gname = ''.join(gz[0])
gfreq = functools.reduce(operator.mul, gz[1])
candidates.append((gname, gfreq))
candidates.sort(key=ig1, reverse=1)
return [x[0] for x in candidates][:num]
def processinput(self, userinput, num=10):
if not userinput:
return [], [self.snlst[self.sngen()] + self.fclst[self.fcgen()] + self.sclst[self.scgen()] for i in range(num)]
try:
surnames, names = self.splitname(untone(userinput).lower())
names = self.selectname(names, num=num)
if not names:
names = [self.fclst[self.fcgen()] + self.sclst[self.scgen()] for i in range(num)]
return surnames, names
except Exception:
raise
return [], []
def getname(self):
return self.snlst[self.sngen()] + self.fclst[self.fcgen()] + self.sclst[self.scgen()]
__call__ = getname
if __name__ == '__main__':
while 1:
nm = NameModel('namemodel.m')
fullname = nm.getname()
#if name not in names:
#print(fullname)
print(fullname)
|
yassen-itlabs/py-linux-traffic-control
|
tests/plugins_tests/test_netsim.py
|
Python
|
mit
| 2,138
| 0.003742
|
import unittest
from pyltc.plugins.simnet import SimNetPlugin
class TestNetSim(unittest.TestCase):
def test_configure_default(self):
netsim = SimNetPlugin()
self.assertEqual([], netsim._ar
|
gs.upload)
self.assertEqual([], netsim._args.download)
self.assertEqual('lo', netsim._args.interface)
self.assertIsNone(netsim._args.ifbdevice)
self.assertFalse(netsim._args.clear)
self.assertFalse(netsim._args.verbose)
self.asser
|
tFalse(netsim._args.clearonly_mode)
def test_configure(self):
netsim = SimNetPlugin()
netsim.configure(clear=True, verbose=True, interface='eth0', ifbdevice='ifb0')
self.assertEqual([], netsim._args.upload)
self.assertEqual([], netsim._args.download)
self.assertEqual('eth0', netsim._args.interface)
self.assertEqual('ifb0', netsim._args.ifbdevice)
self.assertTrue(netsim._args.clear)
self.assertTrue(netsim._args.verbose)
self.assertFalse(netsim._args.clearonly_mode)
def test_setup(self):
netsim = SimNetPlugin()
netsim.setup(upload=True, protocol="tcp", porttype="dport", range="5000", rate="512kbit")
self.assertEqual(['tcp:dport:5000:512kbit'], netsim._args.upload)
self.assertEqual([], netsim._args.download)
def test_setup_complex(self):
netsim = SimNetPlugin()
netsim.setup(upload=True, protocol="tcp", porttype="dport", range="5000", rate="512kbit")
netsim.setup(upload=True, protocol="udp", range="all", rate="1mbit", jitter="5%")
netsim.setup(download=True, protocol="tcp", porttype="sport", range="8000-8080", jitter="10%")
self.assertEqual(['tcp:dport:5000:512kbit', 'udp:all:1mbit:5%'], netsim._args.upload)
self.assertEqual(['tcp:sport:8000-8080:10%'], netsim._args.download)
def test_setup_assertion(self):
netsim = SimNetPlugin()
self.assertRaises(AssertionError, netsim.setup, upload=True, download=True)
self.assertRaises(AssertionError, netsim.setup, upload=False, download=False)
if __name__ == '__main__':
unittest.main()
|
llinmeng/PythonStudy
|
maiziedu/3-Pycharm-Study/maiziblog2/manage.py
|
Python
|
mit
| 253
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maiziblog2.settings")
from django.cor
|
e.management import execute_from_command_line
execute_from_command_line
|
(sys.argv)
|
timwu/pypcap
|
src/setup.py
|
Python
|
bsd-3-clause
| 673
| 0.026746
|
from distutils.core import setup
from distutils.extension import Extension
from distutils import util
from Pyrex.Distutils im
|
port build_ext
import os.path
# Hack to get around build_ext's inability to handle multiple
# libraries in its --libraries= argument.
libs = []
if util.get_platform() == 'win32':
libs = [ "wpcap", "iphlpapi" ]
else:
libs = [ "pcap" ]
pcap_extension = Extension( name="pcap",
sources=["pcap.p
|
yx", "pcap_ex.c"],
libraries=libs
)
setup( name = "pypcap",
version = "1.1",
ext_modules=[pcap_extension],
cmdclass = {'build_ext' : build_ext}
)
|
HEPData/hepdata3
|
fixes/missing_record_ids.py
|
Python
|
gpl-2.0
| 2,910
| 0.002062
|
from datetime import datetime
from flask import current_app
from flask.cli import with_appcontext
from invenio_db import db
from hepdata.cli import fix
from hepdata.ext.elasticsearch.api import index_record_ids, push_data_keywords
from hepdata.modules.submission.models import HEPSubmission, DataSubmission
from hepdata.modules.records.utils.common import get_record_by_id
from hepdata.modules.records.utils.doi_minter import generate_doi_for_table
from hepdata.modules.records.utils.submission import finalise_datasubmission
@fix.command()
@with_appcontext
def create_missing_datasubmission_records():
# Get submissions with missing IDs
missing_submissions = DataSubmission.query \
.join(HEPSubmission, HEPSubmission.publication_recid == DataSubmission.publication_recid) \
.filter(
DataSubmission.associated_recid == None,
DataSubmission.publication_inspire_id == None,
DataSubmission.version == HEPSubmission.version,
HEPSubmission.overall_status == 'finished')
missing_submissions = missing_submissions.all()
if not missing_submissions:
print("No datasubmissions found with missing record or inspire ids.")
return
# Organise missing submissions by publication
submissions_by_publication = {}
for submission in missing_submissions:
if submission.publication_recid in submissions_by_publication:
submissions_by_publication[submission.publication_recid].append(submission)
else:
submissions_by_publication[submission.publication_recid] = [submission]
# Loop through each publication
for publication_recid, submissions in submissions_by_publication.items():
publication_record = get_record_by_id(publication_recid)
current_time = "{:%Y-%m-%d %H:%M:%S}".format(datetime.utcnow())
generated_record_ids = []
for submission in submissions:
# Finalise each data submission that does not have a record
finalise_datasubmission(current_time, {},
generated_record_ids,
publication_record, publication_recid,
submission,
submission.version)
# Regist
|
er the datasubmission's DOI
if not current_app.config.get('TESTING', False):
generate_doi_for_table.delay(submission.doi)
print(f"Generated DOI {submission.doi}")
else:
print(f"Would generate DOI {submission.doi}")
# finalise_datasubmission does not commit, so commit once for each publication
db.session.commit()
# Reindex the publication and its updated datasubmissions
index_record_id
|
s([publication_recid] + generated_record_ids)
push_data_keywords(pub_ids=[publication_recid])
|
elainenaomi/sciwonc-dataflow-examples
|
sbbd2016/experiments/4-mongodb-rp-3sh/9_workflow_full_10files_primary_3sh_noannot_with_proj_9s/calculateratio_0/CalculateRatioCpuMemory_0.py
|
Python
|
gpl-3.0
| 3,196
| 0.003129
|
#!/usr/bin/env python
"""
This activity will calculate the ratio between CPU request and Memory request by (job ID, task index, event type).
These fields are optional and could be null.
"""
# It will connect to DataStoreClient
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_TaskEvent_0
import ConfigDB_Calc_MaxMinCPU_0
import ConfigDB_Calc_AverageCPU_0
import ConfigDB_Calc_MedianCPU_0
import math
##################################################################
client_maxmincpu = DataStoreClient("mongodb", ConfigDB_Calc_MaxMinCPU_0)
data_maxmincpu = client_maxmincpu.getData()
if data_maxmincpu:
while True:
doc = data_maxmincpu.next()
if doc is None:
break;
print doc
max_cpu = doc['max cpu']
min_cpu = doc['min cpu']
##################################################################
client_mediancpu = DataStoreClient("mongodb", ConfigDB_Calc_MedianCPU_0)
data_mediancpu = client_mediancpu.getData()
if data_mediancpu:
while True:
doc = data_mediancpu.next()
if doc is None:
break;
print doc
median_cpu = doc['median cpu']
##################################################################
client_avgcpu = DataStoreClient("mongodb", ConfigDB_Calc_AverageCPU_0)
data_avgcpu = client_avgcpu.getData()
if data_avgcpu:
while True:
doc = data_avgcpu.next()
if doc is None:
break;
print doc
avg_cpu = doc['average cpu']
##################################################################
# task_events
client_task = DataStoreClient("mongodb", ConfigDB_TaskEvent_0)
data_task = client_task.getData() # return an array of docs (like a csv reader)
if(data_task):
# processing
while True:
doc = data_task.next()
if doc is None:
break;
#print doc
cpu = 0 if (not doc['CPU request']) else float(doc['CPU request'])
memory = 0 if not doc['memory request'] else float(doc['memory request'])
ratio = cpu/memory if (memory != 0) else None
newline = {}
newline['job ID'] = doc['job ID']
newline['task index'] = doc['task index']
newline['event type'] = doc['event type']
|
newline['time'] = doc['time']
newline['ratio cpu memory'] = ratio
if max_cpu and min_cpu:
if cpu == max_cpu:
newline['max cpu
|
'] = 'true'
else:
newline['max cpu'] = 'false'
if cpu == min_cpu:
newline['min cpu'] = 'true'
else:
newline['min cpu'] = 'false'
if avg_cpu:
if cpu == avg_cpu:
newline['avg cpu'] = 'equal'
elif cpu > avg_cpu:
newline['avg cpu'] = 'greater'
else:
newline['avg cpu'] = 'less'
if median_cpu:
if cpu == median_cpu:
newline['median cpu'] = 'equal'
elif cpu > median_cpu:
newline['median cpu'] = 'greater'
else:
newline['median cpu'] = 'less'
client_task.saveData(newline)
|
nedbat/zellij
|
zellij/path.py
|
Python
|
apache-2.0
| 8,958
| 0.001563
|
"""A zigzag path, a sequence of points."""
import collections
from .defuzz import Defuzzer
from .euclid import collinear, Point, Line, Segment, Bounds, EmptyBounds
from .postulates import adjacent_pairs, triples
class Path:
def __init__(self, points):
self.points = tuple(points)
def __repr__(self):
return f"<Path {list(self.points)}>"
def __eq__(self, other):
return self.points == other.points
def __hash__(self):
return hash(self.points)
def __lt__(self, other):
return self.points < other.points
def __len__(self):
return len(self.points)
def __iter__(self):
return iter(self.points)
def __getitem__(self, idx):
# Lots of code tries to get the endpoints by index. Allow that but
# nothing else.
assert idx in [0, -1]
return self.points[idx]
@property
def closed(self):
"""Does the path loop? Start and end are the same points."""
return self.points[0] == self.points[-1]
def length(self):
"""The euclidean distance along the path."""
return sum(p1.distance(p2) for p1, p2 in adjacent_pairs(self.points))
def ends(self):
yield self.points[0]
yield self.points[-1]
def bounds(self):
"""What is the `Bounds` for this path?"""
return Bounds.points(self.points)
def segments(self):
for p1, p2 in adjacent_pairs(self.points):
yield Segment(tuple(p1), tuple(p2))
def transform(self, xform):
"""Transform the Path through the affine `xform`."""
return Path(pt.transform(xform) for pt in self)
def any_collinear(self):
"""Are any of the parts of this path collinear?"""
return any(collinear(*them) for them in triples(self.points))
def clean(self):
"""Remove unneeded points from a path."""
if len(self.points) <= 2:
return self
# Points are unneeded if they are collinear with their neighbors.
new_points = []
if not self.closed:
new_points.append(self.points[0])
for a, b, c in triples(self.points):
if not collinear(a, b, c):
new_points.append(b)
if self.closed:
new_points.append(new_points[0])
else:
new_points.append(self.points[-1])
return Path(new_points)
def reversed(self):
return Path(self.points[::-1])
def draw(self, ctx, append=False, reverse=False):
points = self.points
if reverse:
points = points[::-1]
(ctx.line_to if append else ctx.move_to)(*points[0])
for pt in points[1:-1]:
ctx.line_to(*pt)
if self.closed:
ctx.close_path()
else:
ctx.line_to(*points[-1])
def offset_path(self, offset):
lines = []
for p1, p2 in adjacent_pairs(self.points):
lines.append(Line(p1, p2).offset(offset))
points = []
if self.closed:
p0 = lines[-1].intersect(lines[0])
points.append(p0)
else:
points.append(lines[0].p1)
for l1, l2 in adjacent_pairs(lines):
points.append(l1.intersect(l2))
if self.closed:
points.append(p0)
else:
points.append(lines[-1].p2)
return Path(points)
def defuzz(self, defuzz):
return Path([Point(*defuzz(pt)) for pt in self.points])
def perturb(self, jitter):
"""Jostle around all the points in the path."""
pts = self.points
if self.closed:
pts = pts[:-1]
pts = [pt.perturb(jitter) for pt in pts]
if self.closed:
pts.append(pts[0])
return Path(pts)
def penultimate(self, point):
"""The second-to-last point from whichever end ends with `point`."""
if self.points[0] == point:
return self.points[1]
else:
assert self.points[-1] == point
return self.points[-2]
def join(self, p2):
"""Join `self` and `p2` together by their common endpoint."""
p1 = self.points
p2 = p2.points
# Find the ends that are the same point. Rearrange p1 and p2 so that p1+p2
# is the join we need, and remove the duplicate point at p2[0].
if p1[-1] == p2[0]:
p2 = p2[1:]
elif p1[-1] == p2[-1]:
p2 = p2[-2::-1]
elif p1[0] == p2[-1]:
p1, p2 = p2, p1[1:]
elif p1[0] == p2[0]:
p1, p2 = p1[::-1], p2[1:]
else:
return None
# If the join would have a redundant point because of three collinear
# points in a row, then remove the middle point.
if collinear(p1[-2], p1[-1], p2[0]):
p1 = p1[:-1]
return Path(p1 + p2)
def trim(self, end, trimmers):
"""Trim one end of path where trimmers (paths) cross it."""
points = list(self.points)
seg = Segment(*points[[None, -2][end]:[2, None][end]])
cuts = [pt for t in trimmers for pt in seg_path_intersections(seg, t)]
if cuts:
cuts = seg.sort_along(cuts)
if end == 0:
points = [cuts[-1]] + points[1:]
else:
points = points[:-1] + [cuts[0]]
return Path(points)
else:
return self
def canonicalize(self):
"""Produce an equivalent canonical path."""
if self.closed:
points = list(self.points[:-1])
points = min((points[i:]+points[:i])[::s] for i in range(len(points)) for s in [1, -1])
points.append(points[0])
return Path(points)
else:
return Path(min(self.points, self.points[::-1]))
def defuzz_paths(paths):
defuzz = Defuzzer().defuzz
return [path.defuzz(defuzz) for path in paths]
def combine_paths(paths):
paths = defuzz_paths(paths)
pm = collections.defaultdict(list)
for path in paths:
for end in path.ends():
pm[end].append(path)
combined = []
used = set()
for path in paths:
if id(path) in used:
continue
for end in [0, -1]:
while True:
target = path[end]
possibilities = pm[target]
possibilities = [p for p in possibilities if id(p) not in used]
if not possibilities:
break
other = best_join(path, target, possibilities)
if other is not None:
used.add(id(path))
used.add(id(other))
path = path.join(other)
pm[path[0]].append(path)
pm[path[-1]].append(path)
else:
break
|
u
|
sed.add(id(path))
combined.append(path.clean())
return combined
def draw_paths(paths, ctx):
for path in paths:
path.draw(ctx)
def best_join(path, join_point, possibilities):
others = [p for p in possibilities if p != path]
# If there's only one other path, then join to that one.
if len(others) == 1:
return others[0]
# If there's more than one, find one we are collinear with.
path_pen = path.penultimate(join_point)
for other in others:
other_pen = other.penultimate(join_point)
if collinear(path_pen, join_point, other_pen):
return other
return None
def show_path(path):
if path is None:
return "None"
return f"Path[{path[0]}..{len(path)}..{path[-1]}]@{id(path)}"
def show_paths(paths):
ret = "[\n"
for path in paths:
ret += f" {show_path(path)}\n"
ret += "]"
return ret
def paths_bounds(paths):
"""Return the `Bounds` of the paths."""
bounds = EmptyBounds()
for path in paths:
bounds |= path.bounds()
return bounds
def clip_paths(paths, bounds):
"""Return the paths that overlap the bounds."""
return [path for path in paths if path.bounds().overlap(bounds)]
def equal_path(path1, path2):
return path1.canonicalize() == path2.can
|
Boquete/activity-labyrinth
|
src/BaseThought.py
|
Python
|
gpl-2.0
| 13,410
| 0.03997
|
# BaseThought.py
# This file is part of Labyrinth
#
# Copyright (C) 2006 - Don Scorgie <DonScorgie@Blueyonder.co.uk>
#
# Labyrinth is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Labyrinth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labyrinth; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
from gi.repository import GObject
from gi.repository import Gtk
import utils
from gi.repository import Pango
import TextBufferMarkup
import UndoManager
UNDO_RESIZE = 0
UNDO_DRAW = 1
UNDO_ERASE = 2
MIN_SIZE = 20
DEFAULT_WIDTH = 100
DEFAULT_HEIGHT = 70
class BaseThought (GObject.GObject):
''' The basic class to derive other thoughts from. \
Instructions for creating derivative thought types are \
given as comments'''
# These are general signals. They are available to all thoughts to
# emit. If you emit other signals, the chances are they'll be ignored
# by the MMapArea. It's you're responsiblity to catch and handle them.
# All these signals are handled correctly by the MMapArea.
__gsignals__ = dict (select_thought = (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_PYOBJECT,)),
update_view = (GObject.SignalFlags.RUN_LAST,
None,
()),
create_link = (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_PYOBJECT,)),
title_changed = (GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_STRING,)),
text_selection_changed = (GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_INT, GObject.TYPE_INT, GObject.TYPE_STRING)),
change_mouse_cursor = (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_INT,)),
update_links = (GObject.SignalFlags.RUN_LAST,
None,
()),
grab_focus = (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_BOOLEAN,)),
update_attrs = (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_BOOLEAN, GObject.TYPE_BOOLEAN, GObject.TYPE_BOOLEAN, Pango.FontDescription)))
# The first thing that should be called is this constructor
# It sets some basic properties of all thoughts and should be called
# before you start doing you're own thing with thoughts
# save: the save document passed into the derived constructor
# elem_type: a string representing the thought type (e.g. "image_thought")
def __init__ (self, save, elem_type, undo, background_color, foreground_color):
# Note: Once the thought has been successfully initialised (i.e. at the end
# of the constructor) you MUST set all_okay to True
# Otherwise, bad things will happen.
self.all_okay = False
super (BaseThought, self).__init__()
self.ul = self.lr = None
self.am_primary = False
self.am_selected = False
self.sensitive = 5
self.editing = False
self.identity = -1
self.index = 0
self.end_index = 0
self.text = ""
self.undo = undo
self.background_color = background_color
self.foreground_color = foreground_color
self.model_iter = None
extended_elem = save.createElement ("Extended")
self.extended_buffer = TextBufferMarkup.ExtendedBuffer (self.undo, extended_elem, save)
self.extended_buffer.set_text("")
self.extended_buffer.connect ("set_focus", self.focus_buffer)
self.extended_buffer.connect ("set_attrs", self.set_extended_attrs)
self.element = save.createElement (elem_type)
self.element.appendChild (extended_elem)
self.creating = True
# These are self-explanitory. You probably don't want to
# overwrite these methods, unless you have a very good reason
def get_save_element (self):
return self.element
def make_primary (self):
self.am_primary = True
def select (self):
self.am_selected = True
def unselect (self):
self.am_selected = False
def get_max_area (self):
if not self.ul or not self.lr:
return 999,999,-999,-999
return self.ul[0], self.ul[1], self.lr[0], self.lr[1]
def okay (sel
|
f):
return self.all_okay
def move_content_by (self, x, y):
pass
def move_by (self, x, y):
|
pass
def focus_buffer (self, buf):
self.emit ("select_thought", None)
self.emit ("grab_focus", True)
def set_extended_attrs(self, buf, bold, underline, italics, pango_font):
self.emit("update_attrs", bold, underline, italics, pango_font)
def can_be_parent (self):
return True
# This, you may want to change. Though, doing so will only affect
# thoughts that are "parents"
def find_connection (self, other):
if not self.ul or not self.lr or not other.ul \
or not other.lr:
return None, None
if utils.use_bezier_curves:
if other.ul[0] > self.lr[0]:
xfrom = self.lr[0]
xto = other.ul[0]
else:
xfrom = self.ul[0]
xto = other.lr[0]
else:
xfrom = self.ul[0]-((self.ul[0]-self.lr[0]) / 2.)
xto = other.ul[0]-((other.ul[0]-other.lr[0]) / 2.)
yfrom = self.ul[1]-((self.ul[1]-self.lr[1]) / 2.)
yto = other.ul[1]-((other.ul[1]-other.lr[1]) / 2.)
return (xfrom, yfrom), (xto, yto)
# All the rest of these should be handled within you're thought
# type, supposing you actually want to handle them.
# You almost certianly do want to ;)
def process_button_down (self, event, transformed):
return False
def process_button_release (self, event, transformed):
return False
def process_key_press (self, event, mode):
return False
def handle_motion (self, event, transformed):
return False
def includes (self, coords):
pass
def draw (self, context):
pass
def load (self, node, tar):
pass
def update_save (self):
pass
def save (self, tar):
pass
def copy_text (self, clip):
pass
def cut_text (self, clip):
pass
def paste_text (self, clip):
pass
def export (self, context, move_x, move_y):
pass
def commit_text (self, im_context, string, mode):
pass
def recalc_edges (self):
pass
def delete_surroundings(self, imcontext, offset, n_chars, mode):
pass
def preedit_changed (self, imcontext, mode):
pass
def preedit_end (self, imcontext, mode):
pass
def preedit_start (self, imcontext, mode):
pass
def retrieve_surroundings (self, imcontext, mode):
pass
def set_bold (self, active):
pass
def inside (self, inside):
pass
def enter (self):
pass
def leave (self):
pass
RESIZE_NONE = 0
RESIZE_LEFT = 1
RESIZE_RIGHT = 2
RESIZE_TOP = 4
RESIZE_BOTTOM = 8
CURSOR = {}
CURSOR[RESIZE_LEFT] = Gdk.LEFT_SIDE;
CURSOR[RESIZE_RIGHT] = Gdk.RIGHT_SIDE;
CURSOR[RESIZE_TOP] = Gdk.TOP_SIDE;
CURSOR[RESIZE_BOTTOM] = Gdk.BOTTOM_SIDE;
CURSOR[RESIZE_LEFT|RESIZE_TOP] = Gdk.TOP_LEFT_CORNER;
CURSOR[RESIZE_LEFT|RESIZE_BOTTOM] = Gdk.BOTTOM_LEFT_CORNER;
CURSOR[RESIZE_RIGHT|RESIZE_TOP] = Gdk.TOP_RIGHT_CORNER;
CURSOR[RESIZE_RIGHT|RESIZE_BOTTOM] = Gdk.BOTTOM_RIGHT_CORNER;
class ResizableThought (BaseThought):
''' A resizable thought base class. This allows the sides and corners \
of the thought to be dragged around. It only provides the very basic \
functionality. Other stuff must be done within the derived classes'''
# Possible types of resizing - where the user selected to resize
def __init__ (self, coords, save, elem_type, undo, background_color, foreground_color):
super (ResizableThought, self).__init__(save, elem_type, undo, background_color, foreground_color)
self.resizing = RESIZE_NONE
self.button_down = False
self.orig_size = None
if coords:
margin = utils.margin_required (utils.STYLE_NORMAL)
self.ul = (coords[0]-margin[0], coords[1]-margin[1])
self.lr = (coords[0]+margin[2], coords[1]+margin[3])
self.width = 1
self.he
|
bohlian/frappe
|
frappe/model/sync.py
|
Python
|
mit
| 2,605
| 0.0238
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
"""
Sync's doctype and docfields from txt files to database
perms will get synced only if none exist
"""
import frappe
import os
from frappe.modules.import_file import import_file_by_path
from frappe.modules.patch_handler import block_user
from frappe.utils import update_progress_bar
def sync_all(force=0, verbose=False, reset_permissions=False):
block_user(True)
for app in frappe.get_installed_apps():
sync_for(app, force, verbose=verbose, reset_permissions=reset_permissions)
block_user(False)
frappe.clear_cache()
def sync_for(app_name, force=0, sync_everything = False, verbose=False, reset_permissions=False):
files = []
if app_name == "frappe":
# these need to go first at time of install
for d in (("core", "docfield"), ("core", "docperm"), ("core", "has_role"), ("core", "doctype"),
("core", "user"), ("core", "role"), ("custom", "custom_field"),
("custom", "property_setter"), ("website", "web_form"),
("website", "web_form_field"), ("website", "portal_menu_item")):
files.append(os.path.join(frappe.get_app_path("frappe"), d[0],
"doctype", d[1], d[1] + ".json"))
for module_name in frappe.local.app_modules.get(app_name) or []:
folder = os.path.dirname(frappe.get_module(app_name + "." + module_name).__file__)
get_doc_files(files, folder, force, sync_everything, verbose=verbose)
l = len(files)
if l:
for i, doc_path in enumerate(files):
import_file_by_path(doc_path, force=force, ignore_version=True,
reset_permissions=reset_permissions,
|
for_sync=True)
#print module_name + ' | ' + doctype + ' | ' + name
frappe.db.commit()
# show progress bar
update_progress_bar("Updating DocTypes for {0}".format(app_name), i, l)
print()
def get_doc_files(files, start_path, force=0, sync_everything = False, verbose=False):
"""walk and sync all doctypes and pages"""
# load in sequence - warning for devs
document_types = ['doctype', 'page', 'report', 'print_format',
'website
|
_theme', 'web_form', 'email_alert', 'print_style',
'data_migration_mapping', 'data_migration_plan']
for doctype in document_types:
doctype_path = os.path.join(start_path, doctype)
if os.path.exists(doctype_path):
for docname in os.listdir(doctype_path):
if os.path.isdir(os.path.join(doctype_path, docname)):
doc_path = os.path.join(doctype_path, docname, docname) + ".json"
if os.path.exists(doc_path):
if not doc_path in files:
files.append(doc_path)
|
rajashreer7/autotest-client-tests
|
linux-tools/perl_WWW_RobotRules/perl_WWW_RobotRules.py
|
Python
|
gpl-2.0
| 1,298
| 0.005393
|
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class perl_WWW_RobotRules(test.test):
"""
Autotest module for testing basic functionality
of perl_WWW_RobotRules
@author Hariharan T.S. <harihare@in.ibm.com> ##
"""
version = 1
nfail = 0
path = ''
def initial
|
ize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigg
|
er test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./perl-WWW-RobotRules.sh'], cwd="%s/perl_WWW_RobotRules" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
chaos-soft/chocola
|
files/admin.py
|
Python
|
mit
| 479
| 0
|
from django.contrib
|
import admin
from .models import File, Link
from .forms import FileForm
class FileAdmin(admin.ModelAdmin):
list_display = ('id', 'md5', 'file', 'size')
list_
|
per_page = 100
list_display_links = ('md5',)
form = FileForm
class LinkAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'file', 'user')
list_per_page = 100
list_display_links = ('name',)
admin.site.register(File, FileAdmin)
admin.site.register(Link, LinkAdmin)
|
sushengyang/Data-Science-45min-Intros
|
python-oop/life/__init__.py
|
Python
|
unlicense
| 57
| 0.035088
|
__all__ = [
|
"beast"
|
, "human"
]
|
hkariti/ansible
|
lib/ansible/modules/network/vyos/vyos_banner.py
|
Python
|
gpl-3.0
| 5,186
| 0.001928
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_banner
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage multiline banners on VyOS devices
description:
- This will configure both pre-login and post-login banners on remote
devices running VyOS. It allows playbooks to add or remote
banner text from the active running configuration.
notes:
- Tested against VYOS 1.1.7
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
default: null
choices: ['pre-login', 'post-login']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is present in the current
devices active running configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: configure the pre-login banner
vyos_banner:
banner: pre-login
text: |
this is my pre-login banner
that contains a multiline
string
state: present
- name: remove the post-login banner
vyos_banner:
banner: post-login
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner pre-login
- this is my pre-login banner
- that contains a multiline
- string
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.vyos import get_config, load_config
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def spec_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
if state == 'absent':
if have.get('state') != 'absent' or (have.get('state') != 'absent' and
'text' in have.keys() and have['text']):
commands.append('delete system login banner %s' % module.params['banner'])
elif state == 'present':
if want['text'] and want['text'].encode().decode('unicode_escape') != have.get('text'):
banner_cmd = 'set system login banner %s ' % module.params['banner']
banner_cmd += want['text'].strip()
commands.append(banner_cmd)
return commands
def config_to_dict(module):
data = get_config(module)
output = None
obj = {'banner': module.params['banner'], 'state': 'absent'}
for line in data.split('\n'):
if line.startswith('set system login banner %s' % obj['banner']):
match = re.findall(r'%s
|
(.*)' % obj['banner'], line, re.M)
output = match
if output:
obj['text'] = output[0].encode().decode('unicode_escape')
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = "%r" % (str(text).strip())
return {
|
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['pre-login', 'post-login']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(vyos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
cjaymes/pyscap
|
src/scap/model/ocil_2_0/QuestionResultsType.py
|
Python
|
gpl-3.0
| 1,472
| 0.003397
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Softw
|
are Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# alon
|
g with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.Model import Model
import logging
logger = logging.getLogger(__name__)
class QuestionResultsType(Model):
MODEL_MAP = {
'elements': [
# TODO: at least one of *_question_result
{'tag_name': 'boolean_question_result', 'list': 'question_results', 'class': 'BooleanQuestionResultType', 'min': 0, 'max': None},
{'tag_name': 'choice_question_result', 'list': 'question_results', 'class': 'ChoiceQuestionResultType', 'min': 0, 'max': None},
{'tag_name': 'numeric_question_result', 'list': 'question_results', 'class': 'NumericQuestionResultType', 'min': 0, 'max': None},
{'tag_name': 'string_question_result', 'list': 'question_results', 'class': 'StringQuestionResultType', 'min': 0, 'max': None},
],
}
|
nioo-knaw/hydra
|
uparse_scripts/die.py
|
Python
|
mit
| 446
| 0.042601
|
import sys
imp
|
ort traceback
def Die(Msg):
print >> sys.stderr
print >> sys.stderr
traceback.print_stack()
s = ""
for i in range(0
|
, len(sys.argv)):
if i > 0:
s += " "
s += sys.argv[i]
print >> sys.stderr, s
print >> sys.stderr, "**ERROR**", Msg
print >> sys.stderr
print >> sys.stderr
sys.exit(1)
print "NOTHERE!!"
def Warning(Msg):
print >> sys.stderr
print >> sys.stderr, sys.argv
print >> sys.stderr, "**WARNING**", Msg
|
datamade/large-lots
|
lots_admin/migrations/0022_auto_20160927_1051.py
|
Python
|
mit
| 462
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-27 15:51
from __future__ import unicode_literal
|
s
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lots_admin', '0021_auto_20160927_0941'),
]
operations = [
migrations.AlterField(
model_name='address',
name='ward',
field=models.CharField(max_length
|
=10, null=True),
),
]
|
mjs7231/pkmeter
|
pkm/plugins/network.py
|
Python
|
bsd-3-clause
| 2,803
| 0.004638
|
# -*- coding: utf-8 -*-
"""
Network Plugin
Network usage and connections
"""
import os, netifaces, psutil, time
from pkm import utils, SHAREDIR
from pkm.decorators import never_raise, threaded_method
from pkm.plugin import BasePlugin, BaseConfig
from pkm.filters import register_filter
NAME = 'Network'
DEFAULT_IGNORES = 'lxc tun'
class Plugin(BasePlugin):
DEFAULT_INTERVAL = 1
@threaded_method
def enable(self):
self.nics = {}
self.ignores = self.pkmeter.config.get(self.namespace, 'ignores', '')
self.ignores = list(filter(None, self.ignores.split(' ')))
super(Plugin, self).enable()
@never_raise
def update(self):
for iface, newio in psutil.net_io_counters(True).items():
if not iface.startswith('lo'):
netinfo = netifaces.ifaddresses(iface)
if netinfo.get(netifaces.AF_INET) and not self._is_ignored(if
|
ace):
newio = self._net_io_counters(newio)
newio['iface'] = iface
newio.update(netinfo[netifaces.AF_INET][0])
self._deltas(self.nics.get(iface,{}), newio)
self.nics[iface] = newio
elif iface in self.nics:
del self.nics[iface]
self.data['nics'] =
|
sorted(self.nics.values(), key=lambda n:n['iface'])
self.data['total'] = self._deltas(self.data.get('total',{}), self._net_io_counters())
super(Plugin, self).update()
def _is_ignored(self, iface):
if self.ignores:
for ignore in self.ignores:
if iface.startswith(ignore):
return True
return False
def _net_io_counters(self, io=None):
io = io or psutil.net_io_counters()
return {
'bytes_sent': io.bytes_sent,
'bytes_recv': io.bytes_recv,
'packets_sent': io.packets_sent,
'packets_recv': io.packets_recv,
'errin': io.errin,
'errout': io.errout,
'dropin': io.dropin,
'dropout': io.dropout,
}
def _deltas(self, previo, newio):
now = time.time()
tdelta = now - previo.get('updated',0)
for key in ['bytes_sent', 'bytes_recv']:
newio['%s_per_sec' % key] = int((newio[key] - previo.get(key,0)) / tdelta)
newio['updated'] = now
return newio
class Config(BaseConfig):
TEMPLATE = os.path.join(SHAREDIR, 'templates', 'network_config.html')
FIELDS = utils.Bunch(BaseConfig.FIELDS,
ignores = {'default':DEFAULT_IGNORES}
)
@register_filter()
def network_friendly_iface(iface):
iface = iface.replace('eth', 'Ethernet ')
iface = iface.replace('wlan', 'Wireless ')
iface = iface.replace(' 0', '')
return iface
|
globaltoken/globaltoken
|
test/functional/test_runner.py
|
Python
|
mit
| 23,006
| 0.003043
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_accounts.py',
'p2p_segwit.py',
'wallet_dump.py',
'rpc_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'wallet_import_rescan.py',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'feature_minchainwork.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_config_args.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'feature_bip9_softforks.py',
'p2p_feefilter.py',
'rpc_bind.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action=
|
'store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] !
|
= "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_globaltokend = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/b
|
tchaly-bethmaure/Emotes
|
script/script_tools/framework_file_generator.py
|
Python
|
gpl-2.0
| 1,143
| 0.013123
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Developped with python 2.7.3
import os
import sys
import tools
import json
print("The frame :")
name = raw_input("-> name of the framework ?")
kmin = float(raw_input("-> Minimum boundary ?"))
kmax = float(raw_input("-> Maximum boundary ?"))
precision = float(raw_input("-> Precision (graduation axe) ?"))
nb_agent_per_graduation = int(raw_input("-> Number of agent per graduation ?"))
print("\nThis script generates the population distribution automatically : nb_agent_per_graduation is mapped.")
print("\n(!) Note : it needs to be improved by following a law (gaussian law for instance), currently it only distributes uniformly.")
i=kmin
distribution = {}
while i < kmax+precision:
distribution[i] = nb_agent_per_graduation
i+= precision
i = round(i, tools.get_round_precision(precision)) # fix : python >> 0.2 * 0.4
#print json.dumps(distribution); exit()
o = open(name+".frmwrk",'w')
o.write("# A framework is described as above : \n# in following order,
|
we define min_boundary max_boundar
|
y precision frequences)\n")
o.write(json.dumps([kmin, kmax, precision, distribution]))
o.close()
|
JordiCarreraVentura/spellchecker
|
lib/CategoryTree.py
|
Python
|
gpl-3.0
| 7,145
| 0.004479
|
import json
from collections import (
Counter,
defaultdict as deft
)
from copy import deepcopy as cp
# from cPickle import (
# dump as to_pickle,
# load as from_pickle
# )
from StringIO import StringIO
from TfIdfMatrix import TfIdfMatrix
from Tools import from_csv
class CategoryTree:
def __init__(self, categories_by_concept, terms,
categories, tfidf, max_depth=5, min_df=20
):
self.min_df = min_df
self.path_categories_by_concept = categories_by_concept
self.path_categories = categories
self.path_terms = terms
self.max_depth = max_depth
self.observed_category = deft(bool)
self.id_by_concept = dict([])
self.concept_by_id = dict([])
self.term_is_category = deft(bool)
self.parents_by_category = dict([])
self.parents_by_concept = deft(list)
self.id_by_term = dict([])
self.term_by_id = dict([])
self.has_parents = deft(bool)
self.tfidf = tfidf
self.pulling = set([])
self.vector_by_category = deft(Counter)
self.contributors_by_category = deft(set)
self.projected = Counter()
def build(self):
for i, c in enumerate(self.concept_by_id.values()):
self(c)
if not i % 100:
t = float(len(self.concept_by_id.keys()))
print i, int(t), round(i / t, 2)
# if i >= 5000:
# break
def dump(self):
# Simulate a file with StringIO
out = open('vector.dump.txt', 'wb')
for i, (_id, projections) in enumerate(self.projected.items()):
if not i % 100:
print i, len(self.projected.keys())
if not projections:
continue
features = [
(self.tfidf.word_by_id[wid], round(weight, 4))
for wid, weight in self.vector_by_category[_id].most_common()
if round(weight, 4)
]
record = (
_id,
self.concept_by_id[_id],
features
)
out.write('%s\n' % str(record))
out.close()
def __call__(self, category):
self.pulling = set([])
return self.__pull(None, 0, category, dict([]))
def __get_parents(self, _id):
parents = []
name = self.concept_by_id[_id]
if (
not self.observed_category[name] or
not self.observed_category[_id] or
not self.has_parents[_id]
):
return []
else:
for i in self.parents_by_category[_id]:
if not self.observed_category[i]:
continue
_name = self.concept_by_id[i]
parents.append(_name)
return set(parents) - self.pulling
def __pull(self, vector, depth, category, tree):
_id = self.id_by_concept[category]
if not self.pulling:
# print
# print
# print category, _id
# print [self.term_by_id[x] for x in self.contributors_by_category[_id]]
#
|
print s
|
elf.vector_by_category[_id].most_common(20)
vector = self.vector_by_category[_id]
if not self.observed_category[category]:
return dict([])
parents = self.__get_parents(_id)
if not parents or depth >= self.max_depth:
tree[category] = dict([])
else:
subtree = dict([])
self.pulling.update(parents)
for parent in parents:
subtree = self.__pull(vector, depth + 1, parent, subtree)
tree[category] = subtree
self.__project(vector, tree)
return tree
def __project(self, vector, tree):
if not tree.keys():
return
else:
for key, subtree in tree.items():
_id = self.id_by_concept[key]
self.projected[_id] += 1
self.__add2vec(vector, _id)
self.__project(vector, subtree)
def __add2vec(self, vector, _id):
# for w, weight in vector.items():
# __id = self.tfidf.id_by_word[w]
for __id, weight in vector.items():
self.vector_by_category[_id][__id] += weight
def load(self):
self.__load_terms()
self.__load_categories()
self.__load_assignments()
def __load_categories(self):
for concept, _id in from_csv(self.path_categories):
_id = int(_id)
self.id_by_concept[concept] = _id
self.concept_by_id[_id] = concept
self.observed_category[_id] = True
self.observed_category[concept] = True
# print concept, _id, len(self.id_by_concept.keys())
# exit()
def __load_terms(self):
for term, _id in from_csv(self.path_terms):
_id = int(_id)
self.term_by_id[_id] = term
self.id_by_term[term] = _id
if not term.startswith('Category:'):
continue
self.term_is_category[term] = True
self.term_is_category[_id] = True
def __load_assignments(self):
for row in from_csv(self.path_categories_by_concept):
ints = [int(field) for field in row]
term_id = ints[0]
term = self.term_by_id[term_id]
if self.term_is_category[term_id] and \
self.observed_category[term]:
term = self.term_by_id[term_id]
cat_id = self.id_by_concept[term]
assignments = [i for i in ints[1:] if self.observed_category[i]]
self.parents_by_category[cat_id] = assignments
self.has_parents[cat_id] = True
else:
vector = self.tfidf.content(term_id)
assignments = [i for i in ints[1:] if self.observed_category[i]]
self.parents_by_concept[term_id] = assignments
for a_id in assignments:
for w, weight in vector:
if self.tfidf.df[w] < self.min_df:
continue
#print term, term_id, self.concept_by_id[a_id], w, self.vector_by_category[a_id][w], '\t+%f' % weight
self.vector_by_category[a_id][w] += weight
self.contributors_by_category[a_id].update([term_id])
if __name__ == '__main__':
import random
from random import shuffle as randomize
tfidf = TfIdfMatrix()
tfidf.load_features('bkp.big.out/vector.term.csv')
tfidf.load_distribution('bkp.big.out/vector.index.csv')
# tfidf.load_features('vector.term.csv')
# tfidf.load_distribution('vector.index.csv')
ctree = CategoryTree(
'bkp.big.out/category.index.csv',
'bkp.big.out/term.csv',
'bkp.big.out/category.csv',
# 'category.index.csv',
# 'term.csv',
# 'category.csv',
tfidf,
max_depth=1
)
ctree.load()
ctree.build()
ctree.dump()
|
mganeva/mantid
|
Framework/PythonInterface/test/python/plugins/algorithms/AbinsBasicTest.py
|
Python
|
gpl-3.0
| 10,112
| 0.003362
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Labor
|
atory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid import log
|
ger
# noinspection PyUnresolvedReferences
from mantid.simpleapi import mtd, Abins, Scale, CompareWorkspaces, Load, DeleteWorkspace
from AbinsModules import AbinsConstants, AbinsTestHelpers
import numpy as np
class AbinsBasicTest(unittest.TestCase):
_si2 = "Si2-sc_Abins"
_squaricn = "squaricn_sum_Abins"
_ab_initio_program = "CASTEP"
_temperature = 10.0 # temperature 10 K
_scale = 1.0
_sample_form = "Powder"
_instrument_name = "TOSCA"
_atoms = "" # if no atoms are specified then all atoms are taken into account
_sum_contributions = True
# this is a string; once it is read it is converted internally to integer
_quantum_order_events_number = str(AbinsConstants.FUNDAMENTALS)
_cross_section_factor = "Incoherent"
_workspace_name = "output_workspace"
_tolerance = 0.0001
def tearDown(self):
AbinsTestHelpers.remove_output_files(list_of_names=["explicit", "default", "total", "squaricn_sum_Abins",
"squaricn_scale", "benzene_exp", "benzene_Abins",
"experimental"])
mtd.clear()
def test_wrong_input(self):
"""Test if the correct behaviour of algorithm in case input is not valid"""
# invalid CASTEP file missing: Number of branches 6 in the header file
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile="Si2-sc_wrong.phonon",
OutputWorkspace=self._workspace_name)
# wrong extension of phonon file in case of CASTEP
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile="Si2-sc.wrong_phonon",
OutputWorkspace=self._workspace_name)
# wrong extension of phonon file in case of CRYSTAL
self.assertRaises(RuntimeError, Abins, AbInitioProgram="CRYSTAL", VibrationalOrPhononFile="MgO.wrong_out",
OutputWorkspace=self._workspace_name)
# in case of molecular calculations AllKpointsGiven cannot be False
self.assertRaises(RuntimeError, Abins, AbInitioProgram="CRYSTAL",
VibrationalOrPhononFile="toluene_molecule_BasicAbins.out",
AllKpointsGiven=False, OutputWorkspace=self._workspace_name)
# no name for workspace
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._si2 + ".phonon",
TemperatureInKelvin=self._temperature)
# keyword total in the name of the workspace
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._si2 + ".phonon",
TemperatureInKelvin=self._temperature, OutputWorkspace=self._workspace_name + "total")
# negative temperature in K
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._si2 + ".phonon", TemperatureInKelvin=-1.0,
OutputWorkspace=self._workspace_name)
# negative scale
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._si2 + ".phonon", Scale=-0.2,
OutputWorkspace=self._workspace_name)
# test if intermediate results are consistent
def test_non_unique_atoms(self):
"""Test scenario in which a user specifies non unique atoms (for example in squaricn that would be "C,C,H").
In that case Abins should terminate and print a meaningful message.
"""
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._squaricn + ".phonon", Atoms="C,C,H",
OutputWorkspace=self._workspace_name)
def test_non_existing_atoms(self):
"""Test scenario in which a user requests to create workspaces for atoms which do not exist in the system.
In that case Abins should terminate and give a user a meaningful message about wrong atoms to analyse.
"""
# In _squaricn there is no C atoms
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._squaricn + ".phonon", Atoms="N",
OutputWorkspace=self._workspace_name)
def test_scale(self):
"""
Test if scaling is correct.
@return:
"""
wrk_ref = Abins(AbInitioProgram=self._ab_initio_program,
VibrationalOrPhononFile=self._squaricn + ".phonon",
TemperatureInKelvin=self._temperature,
SampleForm=self._sample_form,
Instrument=self._instrument_name,
Atoms=self._atoms,
Scale=self._scale,
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace=self._squaricn + "_ref")
wrk = Abins(AbInitioProgram=self._ab_initio_program,
VibrationalOrPhononFile=self._squaricn + ".phonon",
TemperatureInKelvin=self._temperature,
SampleForm=self._sample_form,
Instrument=self._instrument_name,
Atoms=self._atoms,
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
Scale=10,
ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace="squaricn_scale")
ref = Scale(wrk_ref, Factor=10)
(result, messages) = CompareWorkspaces(wrk, ref, Tolerance=self._tolerance)
self.assertEqual(result, True)
def test_exp(self):
"""
Tests if experimental data is loaded correctly.
@return:
"""
Abins(AbInitioProgram=self._ab_initio_program,
VibrationalOrPhononFile="benzene_Abins.phonon",
ExperimentalFile="benzene_Abins.dat",
TemperatureInKelvin=self._temperature,
SampleForm=self._sample_form,
Instrument=self._instrument_name,
Atoms=self._atoms,
Scale=self._scale,
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace="benzene_exp")
# load experimental data
Load(Filename="benzene.dat", OutputWorkspace="benzene_only_exp")
(result, messages) = CompareWorkspaces(Workspace1=mtd["experimental_wrk"],
Workspace2=mtd["benzene_only_exp"],
CheckAxes=False,
Tolerance=self._tolerance)
self.assertEqual(result, True)
def test_partial(self):
# By default workspaces for all atoms should be created. Test this default behaviour.
experimental_file = ""
wrk_ref = Abins(AbInitioProgram=self._ab_initio_program,
VibrationalOrPhononFile=self._squaricn + ".phonon",
ExperimentalFile=experimental_file,
TemperatureInKelvin=self._temperature,
SampleForm=self._sample_form,
Instrument=self._instrument_name,
Atoms=self._atoms,
Scale=self._scale,
SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=self._quantum_order_events_number,
ScaleByCrossSection=self._cross_section_factor,
|
westurner/provis
|
setup.py
|
Python
|
bsd-3-clause
| 2,041
| 0.00294
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
datadir = os.path.dirname(__file__)
with open(os.path.join(datadir, 'README.rst')) as f:
readme = f.read()
with open(os.path.join(datadir, 'HISTORY.rst')) as f:
history = f.read().replace('.. :changelog:', '')
class PyTestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys
import subprocess
errno = subprocess.call([sys.executable, 'runtests.py', '-v'])
raise SystemExit(errno)
#data_files = [(path, [os.path.join(path, f) for f in files])
# for dir, dirs, files in os.walk(datadir)]
#print(data_files)
setup(
name='provis',
version='0.1.1',
description=(
'Infrastructure Provisioning Scripts, Configuration, and Tests'),
long_description=readme + '\n\n' + history,
author='Wes Turner',
author_email='wes@wrd.nu',
url='https://github.com/westurner/provis',
packages=[
'provis',
],
package_dir={'provis': 'provis'},
include_package_data=True,
#data_files = data_files,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='provis',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='
|
tests',
tests_require=['pytest', 'pytest-capturelo
|
g'],
cmdclass = {
'test': PyTestCommand,
},
)
|
lorin/umdinst
|
test/testrunprogram.py
|
Python
|
bsd-3-clause
| 1,972
| 0.022312
|
import unittest
import sys
import os
sys.path.append('bin')
from umdinst import wrap
class TestRunProgram(unittest.TestCase):
def setUp(self):
self.tempfilename = 'emptyfile' # This is in createfile.sh
self.failIf(os.path.exists(self.tempfilename))
# Find the "touch" program
if os.path.exists('/usr/bin/touch'):
self.touchprog = '/usr/bin/touch'
elif os.path.exists('/bin/touch'):
self.touchprog = '/bin/touch'
else:
raise ValueError, "Cannot locate the 'touch' program, which is needed for testing"
# Build a "failing" program, that just returns non-zero status
stat
|
us = os.system("gcc -o fail test/testsource/fail.c")
self.failIf(status!=0)
self.failprog = './fail'
# Build a "succeeding" program, that returns zero status
status = os.system("gcc -o success test/testsource/success.c")
self.failIf(
|
status!=0)
self.successprog = './success'
def tearDown(self):
if os.path.exists(self.tempfilename):
os.unlink(self.tempfilename)
def testRunWithArgs(self):
prog = self.touchprog
# Make sure the file doesn't exist
self.failIf(os.path.exists(self.tempfilename))
# Create a temporary file
args = [self.tempfilename]
wrap.run(prog,args)
self.failUnless(os.path.exists(self.tempfilename))
def testRunNoArgs(self):
# Run a program with no arguments
os.chmod('test/testsource/createfile.sh',0755)
s = wrap.run('test/testsource/createfile.sh',[])
self.failUnless(os.path.exists(self.tempfilename))
def testRunSuccess(self):
# Run a program that succeeds
s = wrap.run(self.successprog,[self.successprog])
self.failUnless(s)
def testRunFailure(self):
# Runa program that fails and test for failure
s = wrap.run(self.failprog,[self.failprog])
self.failIf(s)
if __name__ == '__main__':
unittest.main()
|
iamthekyt/POS-System
|
src/controller.py
|
Python
|
gpl-3.0
| 5,417
| 0.006464
|
# -*- coding: utf-8 -*-
import kivy
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.properties import ObjectProperty
from kivy.uix.popup import Popup
from pos_system import POS, Item
from db import Database
from buttonex import ButtonEx
from loginscreen import LoginScreen
from logoutscreen import LogoutScreen
from baroptions import BarOptions
kivy.require('1.0.5')
# from kivy.config import Config
# Config.set('graphics', 'fullscreen', 'auto')
APPERTIZERS_NAME = 'Appetizers'
MEAT_NAME = 'Meat'
FISH_NAME = 'Fish'
VEGAN_NAME = 'Vegan'
DRINKS_NAME = 'Drinks'
DESSERT_NAME = 'Dessert'
class Controller(FloatLayout):
a_buylist = ObjectProperty()
a_articles = ObjectProperty()
a_baroptions = ObjectProperty()
a_price = ObjectProperty()
def __init__(self, **kwargs):
super(Controller, self).__init__(**kwargs)
self.pos_system = POS()
self.database = Database()
self.baroptions = BarOptions(pos_system=self.pos_system)
self.atual_menu = 0
self.menu_page = 0
self.user_login()
def register_logs(self, log_type):
self.database.register_logs(self.pos_system.get_user_id(), log_type)
def user_login(self):
LoginScreen(controller=self)
def user_logout(self, instance):
LogoutScreen(controller=self)
def load_main_window(self):
self.atual_menu = 0
op_n = 1
main_options = [APPERTIZERS_NAME, MEAT_NAME, FISH_NAME, VEGAN_NAME, DRINKS_NAME, DESSERT_NAME]
self.a_articles.clear_widgets()
for op in main_options:
button = Button(text=op)
button.bind(on_press=self.open_new_menu)
op_n = op_n + 1
self.a_articles.add_widget(button)
for a in range(op_n-1, 9):
self.a_articles.add_widget(Label(text=''))
self.bt_next.enabled = False
self.bt_previous.enabled = False
if self.pos_system.get_buy_list() is None:
self.bt_newlist.enabled = True
self.bt_clearlist.enabled = False
self.bt_finishlist.enabled = False
self.menu_page = 0
def load_bar_options(self):
self.a_baroptions.add_widget(Button(text='Close Session', on_press=self.user_logout))
self.bt_newlist = ButtonEx(text = 'New List', on_press=self.baroptions.start_new_buy_list)
self.a_baroptions.add_widget(self.bt_newlist)
self.bt_clearlist = ButtonEx(text = 'Clear List', on_press=self.baroptions.clear_buy_list)
self.a_baroptions.add_widget(self.bt_clearlist)
self.bt_finishlist = ButtonEx(text = 'Finish List', on_press=self.baroptions.finish_buy_list)
self.a_baroptions.add_widget(self.bt_finishlist)
self.bt_next = ButtonEx(text = 'Next', on_press = self.baroptions.next_page)
self.a_baroptions.add_widget(self.bt_next)
self.bt_previous = ButtonEx(text = 'Previous', on_press = self.baroptions.previous_page)
self.a_baroptions.add_widget(self.bt_previous)
self.a_baroptions.add_widget(Button(text = 'Menu', on_press = self.load_main_window))
def open_new_menu(self, instance):
op_n = 1
total_rows = 0
menu_type = ''
if instance != self.bt_next and instance != self.bt_previous:
menu_type = instance.text.lower()
cursor = self.database.load_articles(menu_type, self.menu_page)
self.a_articles.clear_widgets()
for row in cursor:
total_rows = total_rows + 1
if total_rows > 9:
break
button = Button(text=row[1])
button.bind(on_press=self.add_to_buy_list)
button.item = Item(id_p=row[0], name=row[1], price=row[2], tax=0.2)
op_n = op_n + 1
self.a_articles.add_widget(button)
for a in range(op_n-1, 9):
self.a_articles.add_widget(Label(text=''))
self.bt_next.enabled = (total_rows > 9)
self.bt_previous.enabled = (self.menu_page > 0)
def add_to_buy_list(self, instance):
if self.pos_system.get_buy_list() is None:
|
po
|
pup = Popup(
title='No Buy List',
content=Label(text='You need to start a new list!'),
size_hint=(None, None),
size=(400, 100)
)
popup.open()
return
button = Button(text=instance.text, size_hint_y = None, height = 40)
button.bind(on_press=self.remove_from_buy_list)
self.a_buylist.add_widget(button)
self.pos_system.get_buy_list().add_item(instance.item)
self.update_total_price()
def remove_from_buy_list(self, instance):
self.a_buylist.remove_widget(instance)
self.pos_system.get_buy_list().remove_item(item_name = instance.text)
self.update_total_price()
def register_buy(self, instance):
self.database.register_buy(self.pos_system.get_buy_list(), 5533, self.pos_system.get_username())
self.clear_buy_list()
self.pos_system.close_buy_list();
self.bt_clearlist.enabled = False
self.bt_finishlist.enabled = False
self.bt_newlist.enabled = True
self.popup.dismiss()
def update_total_price(self):
self.a_price.label_price.text = str(self.pos_system.get_buy_list().get_total_price()) + '€'
|
smarbos/adopteitor-server
|
adopteitor_core/migrations/0011_auto_20170221_2157.py
|
Python
|
mit
| 368
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencie
|
s = [
('adopteitor_cor
|
e', '0010_ipn'),
]
operations = [
migrations.AlterModelOptions(
name='ipn',
options={'verbose_name_plural': 'IpnS'},
),
]
|
ostree/plaso
|
plaso/parsers/mcafeeav.py
|
Python
|
apache-2.0
| 5,046
| 0.005747
|
# -*- coding: utf-8 -*-
"""Parser for McAfee Anti-Virus Logs.
McAfee AV uses 4 logs to track when scans were run, when virus databases were
updated, and when files match the virus database."""
from plaso.events import text_events
from plaso.lib import errors
from plaso.lib import timelib
from plaso.parsers import manager
from plaso.parsers import text_parser
class McafeeAVEvent(text_events.TextEvent):
"""Convenience class for McAfee AV Log events """
DATA_TYPE = u'av:mcafee:accessprotectionlog'
def __init__(self, timestamp, offset, attributes):
"""Initializes a McAfee AV Log Event.
Args:
timestamp: the timestamp time value. The timestamp contains the
number of seconds since Jan 1, 1970 00:00:00 UTC.
offset: the offset of the attributes.
attributes: dict of elements from the AV log line.
"""
del attributes[u'time']
del attributes[u'date']
super(McafeeAVEvent, self).__init__(timestamp, offset, attributes)
self.full_path = attributes[u'filename']
class McafeeAccessProtectionParser(text_parser.TextCSVParser):
"""Parses the McAfee AV Access Protection Log."""
NAME = u'mcafee_protection'
DESCRIPTION = u'Parser for McAfee AV Access Protection log files.'
VALUE_SEPARATOR = b'\t'
# Define the columns of the McAfee AV Access Protection Log.
COLUMNS = [u'date', u'time', u'status', u'username', u'filename',
u'trigger_location', u'rule', u'action']
def _GetTimestamp(self, date, time, timezone):
"""Determines a timestamp from the time string.
The date and time are made up of two strings, the date and the time,
separated by a tab. The time is in local time. The month and day can
be either 1 or 2 characters long, e.g.: 7/30/2013\\t10:22:48 AM
Args:
date: the string representing the date.
time: the string representing the time.
timezone: timezone (instance of pytz.timezone) that the data and time
values represent.
Returns:
The timestamp time value. The timestamp contains the number of
microseconds since Jan 1, 1970 00:00:00 UTC or None if the time string
could not be parsed.
Raises:
TimestampError: if the timestamp is badly formed or unable to transfer
the supplied date and time into a timestamp.
"""
# TODO: check if this is correct, likely not date or not time
# is more accurate.
if not (date and time):
raise errors.TimestampError(
u'Unable to extract timestamp from McAfee AV logline.')
# TODO: Figure out how McAfee sets Day First and use that here.
# The in-file time format is '07/30/2013\t10:22:48 AM'.
try:
time_string = u'{0:s} {1:s}'.format(date, time)
except UnicodeDecodeError:
raise errors.TimestampError(u'Unable to form a timestamp string.')
return timelib.Timestamp.FromTimeString(time_string, timezone=timezone)
def VerifyRow(self, parser_mediator, row):
"""Verify that this is a McAfee AV Access Protection Log file.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
row: a single row from the CSV file.
Returns:
True if this is the correct parser, False otherwise.
"""
if len(row) != 8:
return False
# This file can have a UTF-8 byte-order-marker at the beginning of
# the first row.
# TODO: Find out all the code pages this can have. Asked McAfee 10/31.
if row[u'date'][0:3] == b'\xef\xbb\xbf':
row[u'date'] = row[u'date'][3:]
self.encoding = u'utf-8'
# Check the date format!
# If it doesn't parse, then this isn't a McAfee AV Access Protection Log
try:
timestamp = self._GetTimestamp(
row[u'd
|
ate'], row[u'time'], parser_mediator.timezone)
except errors.TimestampError:
return False
if timestamp is None:
return False
# U
|
se the presence of these strings as a backup or in case of partial file.
if (not u'Access Protection' in row[u'status'] and
not u'Would be blocked' in row[u'status']):
return False
return True
def ParseRow(self, parser_mediator, row_offset, row):
"""Parses a row and extract event objects.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
row_offset: the offset of the row.
row: a dictionary containing all the fields as denoted in the
COLUMNS class list.
"""
try:
timestamp = self._GetTimestamp(
row[u'date'], row[u'time'], parser_mediator.timezone)
except errors.TimestampError as exception:
parser_mediator.ProduceParseError(
u'Unable to parse time string: [{0:s} {1:s}] with error {2:s}'.format(
repr(row[u'date']), repr(row[u'time']), exception))
return
if timestamp is None:
return
event_object = McafeeAVEvent(timestamp, row_offset, row)
parser_mediator.ProduceEvent(event_object)
manager.ParsersManager.RegisterParser(McafeeAccessProtectionParser)
|
botswana-harvard/bhp065_project
|
bhp065/apps/hnscc_subject/admin/__init__.py
|
Python
|
gpl-2.0
| 263
| 0
|
from .main import HnsccVisitAdmin, HnsccOffStudyAdmin
from .enrollment_admin import EnrollmentAdmin
from .contemp
|
orary_admin import ContemporaryAdmin
# from .historical_admin import HistoricalAdmin
from .hnscc_off_study_mod
|
el_admin import HnsccOffStudyModelAdmin
|
flagxor/rainbowforth
|
iconforth/iconforth.py
|
Python
|
gpl-3.0
| 17,696
| 0.010793
|
import datetime
import os
import pickle
import pngcanvas
import jinja2
import random
import re
import sys
import webapp2
import zlib
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=False)
class Word(db.Model):
description = db.BlobProperty()
created = db.DateTimeProperty(auto_now_add=True)
last_used = db.DateTimeProperty(auto_now_add=True)
author = db.UserProperty()
remote_addr = db.StringProperty()
user_agent = db.StringProperty()
version = db.IntegerProperty(default=1)
intrinsic = db.IntegerProperty(default=0)
definition = db.StringListProperty()
keywords = db.StringListProperty()
score = db.FloatProperty(default=0.0)
class WordIcon(db.Model):
icon = db.BlobProperty()
class WordSource(db.Model):
source = db.BlobProperty()
class WordExecutable(db.Model):
executable = db.BlobProperty()
colors = [
[0xff, 0xff, 0xff, 0xff],
[0xc0, 0xc0, 0xc0, 0xc0],
[0x00, 0x00, 0x00, 0xff],
[0xff, 0x00, 0x00, 0xff],
[0xff, 0xc0, 0x00, 0xff],
[0xff, 0xff, 0x00, 0xff],
[0x00, 0xff, 0x00, 0xff],
[0x00, 0xff, 0xff, 0xff],
[0x00, 0x00, 0xff, 0xff],
[0xff, 0x00, 0xff, 0xff],
];
# From http://github.com/DocSavage/bloog/blob/master/models/search.py
# Apache license.
STOP_WORDS = frozenset([
'a', 'about', 'according', 'accordingly', 'affected', 'affecting', 'after',
'again', 'against', 'all', 'almost', 'already', 'also', 'although',
'always', 'am', 'among', 'an', 'and', 'any', 'anyone', 'apparently', 'are',
'arise', 'as', 'aside', 'at', 'away', 'be', 'became', 'because', 'become',
'becomes', 'been', 'before', 'being', 'between', 'both', 'briefly', 'but',
'by', 'came', 'can', 'cannot', 'certain', 'certainly', 'could', 'did', 'do',
'does', 'done', 'during', 'each', 'either', 'else', 'etc', 'ever', 'every',
'following', 'for', 'found', 'from', 'further', 'gave', 'gets', 'give',
'given', 'giving', 'gone', 'got', 'had', 'hardly', 'has', 'have', 'having',
'here', 'how', 'however', 'i', 'if', 'in', 'into', 'is', 'it', 'itself',
'just', 'keep', 'kept', 'knowledge', 'largely', 'like', 'made', 'mainly',
'make', 'many', 'might', 'more', 'most', 'mostly', 'much', 'must', 'nearly',
'necessarily', 'neither', 'next', 'no', 'none', 'nor', 'normally', 'not',
'noted', 'now', 'obtain', 'obtained', 'of', 'often', 'on', 'only', 'or',
'other', 'our', 'out', 'owing', 'particularly', 'past', 'perhaps', 'please',
'poorly', 'possible', 'possibly', 'potentially', 'predominantly', 'present',
'previously', 'primarily', 'probably', 'prompt', 'promptly', 'put',
'quickly', 'quite', 'rather', 'readily', 'really', 'recently', 'regarding',
'regardless', 'relatively', 'respectively', 'resulted', 'resulting',
'results', 'said', 'same', 'seem', 'seen', 'several', 'shall', 'should',
'show', 'showed', 'shown', 'shows', 'significantly', 'similar', 'similarly',
'since', 'slightly', 'so', 'some', 'sometime', 'somewhat', 'soon',
'specifically', 'state', 'states', 'strongly', 'substantially',
'successfully', 'such', 'sufficiently', 'than', 'that', 'the', 'their',
'theirs', 'them', 'then', 'there', 'therefore', 'these', 'they', 'this',
'those', 'though', 'through', 'throughout', 'to', 'too', 'toward', 'under',
'unless', 'until', 'up', 'upon', 'use', 'used', 'usefully', 'usefulness',
'using', 'usually', 'various', 'very', 'was', 'we', 'were', 'what', 'when',
'where', 'whether', 'which', 'while', 'who', 'whose', 'why', 'widely',
'will', 'with', 'within', 'without', 'would', 'yet', 'you'])
def ValidKeyword(word):
return len(word) >= 3 and word not in STOP_WORDS
def FindKeywords(str):
ret = set()
word = ''
for ch in str:
if ((ch >= 'A' and ch <= 'Z') or
(ch >= 'a' and ch <= 'z') or
(ch >= '0' and ch <= '9')):
word += ch.lower()
else:
if ValidKeyword(word):
ret.add(word)
word = ''
if ValidKeyword(word):
ret.add(word)
return list(ret)
def UpdateScore(key):
# Fetch it.
w = Word.get(key)
if w:
# Find users of this word.
query = db.GqlQuery('SELECT __key__ FROM Word '
'WHERE definition=:1', key)
use_count = query.count(1000)
# Update score and last used.
w.score = float(use_count)
w.last_used = datetime.datetime.now()
w.put()
def ChromeFrameMe(handler):
agent = handler.request.headers.get('USER_AGENT', '')
if agent.find('MSIE') >= 0 and agent.find('chromeframe') < 0:
template = JINJA_ENVIRONMENT.get_template('templates/chrome_frame.html')
handler.response.out.write(template.render({}))
return True
return False
class ReadWord(webapp2.RequestHandler):
def get(self):
if ChromeFrameMe(self): return
id = self.request.get('id', '')
w = Word.get(id)
if w:
# Find users of this word.
query = db.GqlQuery('SELECT __key__ FROM Word '
'WHERE definition=:1 '
'ORDER BY score DESC, last_used DESC', str(w.key()))
words_used = query.fetch(1000)
if not words_used:
words_used = []
# Decide if they can delete this.
can_delete = (users.is_current_user_admin() or
(users.get_current_user() is not None and
users.get_current_user() == w.author))
# Output info on word.
template = JINJA_ENVIRONMENT.get_template('templates/read.html')
self.response.out.write(template.render({
'id': id,
'description': w.description,
'definition': w.definition,
'created': str(w.created),
'last_used': str(w.last_used),
'words_used': words_used,
'can_delete': can_delete,
}))
else:
template = JINJA_ENVIRONMENT.get_template('templates/read_notfound.html')
self.response.out.write(template.render({}))
class ListWord(webapp2.RequestHandler):
def get(self):
# Get the source.
lookup_id = self.request.get('id', '')
try:
query = db.GqlQuery('SELECT * FROM WordSource WHERE ANCESTOR is :1',
lookup_id)
src = query.fetch(1)
except:
query = db.GqlQuery(
'SELECT * FROM WordSource WHERE ANCESTOR is :1',
db.Key.from_path(*db.Key(lookup_id).to_path()))
src = query.fetch(1)
if src:
source = DecodeSource(lookup_id, src[0].source)
else:
source = {}
# Handle raw output more directly.
if self.request.get('raw'):
self.response.headers['Content-Type'] = 'text/plain'
for w, d in sou
|
rce.iteritems():
dfn = ' '.join((str(i) for i in d))
|
self.response.out.write('%s %s\n' % (w, dfn))
return
# Display it in a sensible order.
results = []
pending_ids = [lookup_id]
needed_ids = set([lookup_id])
while pending_ids:
# Pick one.
id = pending_ids[0]
pending_ids = pending_ids[1:]
# Grab out its parts.
intrinsic = source[id][0]
definition = source[id][1:]
# Collect each word.
results.append({
'id': id,
'intrinsic': intrinsic,
'definition': definition,
})
# Add new words needed.
for w in definition:
if w not in needed_ids:
needed_ids.add(w)
pending_ids.append(w)
else:
template = JINJA_ENVIRONMENT.get_template('templates/list.html')
self.response.out.write(template.render({
'results': results,
}))
class RunWord(webapp2.RequestHandler):
def get(self):
if ChromeFrameMe(self): return
id = self.request.get('id', '')
# Get the executable.
query = db.GqlQuery('SELECT * FROM WordExecutable WHERE ANCESTOR is :1', id)
exe = query.fetch(1)
if exe:
template = JINJA_ENVIRONMENT.get_template('templates/run.html')
self.response.out.write(template.render({
'id': id,
'exe': ',' + exe[0].executable,
}))
else:
template = JINJA_ENVIRONMENT
|
TuDatTr/OkBot
|
main.py
|
Python
|
apache-2.0
| 10,102
| 0.001292
|
# requires:
# pip install discord.py
# pip install asyncio
# pip install bs4
# pip install imgurpython
# pip install youtube-dl
# pip install chatterbot
# put this (view raw) in the base directory for windows:
# https://github.com/Just-Some-Bots/MusicBot/blob/ea5e0daebd384ec8a14c9a585da399934e2a6252/libopus-0.x64.dll
import discord
import random
import asyncio
import requests
import config
from chatterbot import ChatBot
from bs4 import BeautifulSoup
from discord.ext import commands
from os import listdir
from imgurpython import ImgurClient
client = ImgurClient(config.client_id, config.client_secret)
discord.opus.load_opus("libopus-0.x64.dll")
# discord.opus.load_opus("/usr/lib/libopus.so")
description = '''An Ok-ish bot'''
bot = commands.Bot(command_prefix='?', description=description)
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': 'music/%(id)s.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
if not discord.opus.is_loaded():
# libopus-0.x64.dll is required to run voice
discord.opus.load_opus("libopus-0.x64.dll")
chatbot = ChatBot('OkBot',
input_adapter="chatterbot.input.VariableInputTypeAdapter",
logic_adapters=[
'chatterbot.logic.MathematicalEvaluation',
'chatterbot.logic.BestMatch'
],
trainer='chatterbot.trainers.ChatterBotCorpusTrainer')
DEFAULT_SESSION_ID = chatbot.default_session.id
chatting = 0
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
@bot.event
async def on_disconnect():
bot.connect()
@bot.command()
async def roll(dice: str):
"""Rolls a dice in NdN format."""
try:
rolls, limit = map(int, dice.split('d'))
except Exception:
await bot.say('Format has to be in NdN!')
return
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
await bot.say(result)
@bot.command()
async def joined(member : discord.Member):
"""Says when a member joined."""
await bot.say('{0.name} joined in {0.joined_at}'.format(member))
@bot.command(pass_context=True)
async def sb(ctx, sound, member: discord.Member = None):
"""Plays given Sound"""
if member is None:
member = ctx.message.author
channel = member.voice.voice_channel
voice = await bot.join_voice_channel(channel)
player = voice.create_ffmpeg_player("soundboard/"+sound+'.mp3', options="-af volume=-25dB")
player.start()
while not player.is_done():
await asyncio.sleep(0.002)
await voice.disconnect()
@bot.command(pass_context=True)
async def sblist(ctx, member: discord.Member = None):
"""Sends PM with available soundboard sounds"""
if member is None:
member = ctx.message.author
message = "Available Sounds: \n"
for f in listdir("soundboard/"):
message += (f.split(".")[0] + "\n")
await bot.send_message(member, message)
@bot.command(pass_context=True)
async def sbadd(ctx, member: discord.Member = None):
"""Adds Sound to soundboard"""
if member is None:
member = ctx.message.author
if member.server_permissions.administrator:
if ctx.message.attachments:
url = ctx.message.attachments[0]['url']
local_filename = "soundboard/" + url.split('/')[-1]
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
else:
await bot.say("no attachments")
else:
await bot.add_reaction(ctx.message, "😞")
@bot.command(pass_context=True)
async def play(ctx, url, member: discord.Member = None):
"""Streams given YouTube or Soundcloud URLs"""
if member is None:
member = ctx.message.author
game = discord.Game()
game.name = "Ok-ish Music"
await bot.change_presence(game=game)
channel = member.voice.voice_channel
await bot.add_reaction(ctx.message, "👌")
if bot.voice_client_in(member.server):
await bot.voice_client_in(member.server).disconnect()
voice = await bot.join_voice_channel(channel)
player = await voice.create_ytdl_player(url, ytdl_options=ydl_opts, options="-af volume=-20dB")
player.start()
while not player.is_done():
await asyncio.sleep(0.002)
await voice.disconnect()
@bot.command(pass_context=True)
async def monstercat(ctx, member: discord.Member = None):
"""Streams Monstercat from twitch"""
if member is None:
member = ctx.message.author
game = discord.Game()
game.name = "Monstercat"
await bot.change_presence(game=game)
channel = member.voice.voice_channel
await bot.add_reaction(ctx.message, "👌")
if bot.voice_client_in(member.server):
await bot.voice_client_in(member.server).disconnect()
voice = await bot.join_voice_channel(channel)
player = await voice.create_ytdl_player('https://www.twitch.tv/monstercat', ytdl_options=ydl_opts, options="-af volume=-25dB")
player.start()
@bot.command(pass_context=True)
async def leave(ctx, member: discord.Member = None):
"""Throws bot out of voice"""
if member is None:
member = ctx.message.author
if bot.voice_client_in(member.server):
await bot.voice_client_in(member.server).disconnect()
await bot.change_presence(game=None)
else:
await bot.add_reaction(ctx.message, "😞")
@bot.command()
async def otter():
"""Sends random otter picture from Imgur"""
items = client.gallery_tag("otter", sort='viral', page=0, window='year').items
item = items[random.randint(0, 59)]
while item.is_album:
item = items[random.randint(0, 59)]
if item.type == "image/gif":
a
|
wait bot.say(item.gifv)
else:
await bot.say(item.link)
@bot.command()
async def cat():
"""Sends random cat picture from Imgur"""
items = client.gallery_tag("cat", sort='viral', page=0, window='year').items
item = items[random.randint(0, 59)]
whil
|
e item.is_album:
item = items[random.randint(0, 59)]
if item.type == "image/gif":
await bot.say(item.gifv)
else:
await bot.say(item.link)
@bot.command()
async def dog():
"""Sends random dog picture from Imgur"""
items = client.gallery_tag("dog", sort='viral', page=0, window='year').items
item = items[random.randint(0, 59)]
while item.is_album:
item = items[random.randint(0, 59)]
if item.type == "image/gif":
await bot.say(item.gifv)
else:
await bot.say(item.link)
@bot.command()
async def panda():
"""Sends random dog picture from Imgur"""
items = client.gallery_tag("panda", sort='viral', page=0, window='year').items
item = items[random.randint(0, 20)]
while item.is_album:
item = items[random.randint(0, 20)]
if item.type == "image/gif":
await bot.say(item.gifv)
else:
await bot.say(item.link)
@bot.command()
async def fse():
"""Shows current statusm0n status of the FSE Uni Duisburg-Essen"""
data = requests.get("http://www.fse.uni-due.de/")
soup = BeautifulSoup(data.content, "lxml")
div = soup.find('span', id='statusm0nText')
await bot.say(''.join(map(str, div.contents)))
@bot.command(pass_context=True)
async def delall(ctx, member: discord.Member = None):
"""Deletes all (up to 100) last Messages in current channel"""
if member is None:
member = ctx.message.author
if member.server_permissions.manage_messages:
await bot.purge_from(ctx.message.channel, limit=100)
await bot.say("http://i.imgur.com/YkuLCd8.gifv")
else:
await bot.add_reaction(ctx.message, "😞")
@bot.command(pass_context=True)
async def delete(ctx, count, member: discord.Member = None):
"""Deletes a given number (up to 100) of the last Messages in current channel"""
if member is None:
member = ctx.message.author
if int(count)
|
analysiscenter/dataset
|
batchflow/models/tf/fcn.py
|
Python
|
apache-2.0
| 9,221
| 0.002061
|
"""
Shelhamer E. et al "`Fully Convolutional Networks for Semantic Segmentation
<https://arxiv.org/abs/1605.06211>`_"
"""
import tensorflow as tf
from . import TFModel, VGG16
from .layers import conv_block
class FCN(TFModel):
""" Base Fully convolutional network (FCN) """
@classmethod
def default_config(cls):
""" Define model defaults. See :meth: `~.TFModel.default_config` """
config = TFModel.default_config()
config['common/dropout_rate'] = .5
config['initial_block/base_network'] = VGG16
config['body/filters'] = 100
config['body/upsample'] = dict(layout='t', kernel_size=4)
config['head/upsample'] = dict(layout='t')
config['loss'] = 'ce'
return config
def build_config(self, names=None):
""" Define model's architecture configuration. See :meth: `~.TFModel.build_config` """
config
|
= super().build_config(names)
config['body/num_classes'] = self.num_classes('targets')
config['head/num_classes'] = self.num_classes('targets')
return config
@classmethod
def initial_block(cls, inputs, base_network, name='initial_block', **kwargs):
""" Base n
|
etwork
Parameters
----------
inputs : tf.Tensor
input tensor
base_network : class
base network class
name : str
scope name
Returns
-------
tf.Tensor
"""
with tf.variable_scope(name):
x = base_network.initial_block(inputs, name='initial_block', **kwargs)
x = base_network.body(x, name='body', **kwargs)
return x
@classmethod
def body(cls, inputs, name='body', **kwargs):
""" Base layers
Parameters
----------
inputs : tf.Tensor
input tensor
Returns
-------
tf.Tensor
"""
raise NotImplementedError()
@classmethod
def head(cls, inputs, targets, num_classes, name='head', **kwargs):
""" Base layers
Parameters
----------
inputs : tf.Tensor
input tensor
targets : tf.Tensor
the tensor with source images (provide the shape to upsample to)
num_classes : int
number of classes
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('head', **kwargs)
upsample_args = cls.pop('upsample', kwargs)
x = cls.upsample(inputs, filters=num_classes, name=name, **upsample_args, **kwargs)
x = cls.crop(x, targets, kwargs.get('data_format'))
return x
class FCN32(FCN):
""" Fully convolutional network (FCN32)
**Configuration**
inputs : dict
dict with 'images' and 'masks' (see :meth:`~.TFModel._make_inputs`)
initial_block : dict
base_network : class
base network (VGG16 by default)
body : dict
filters : int
number of filters in convolutions after base network (default=100)
upsample : dict
upsampling parameters (default={factor:2, layout:t, kernel_size:4)
head : dict
upsample : dict
upsampling parameters (default={factor:32, layout:t, kernel_size:64)
"""
@classmethod
def default_config(cls):
config = FCN.default_config()
config['head']['upsample'].update(dict(factor=32, kernel_size=64))
config['body'].update(dict(layout='cnad cnad', dropout_rate=.5, kernel_size=[7, 1]))
return config
@classmethod
def body(cls, inputs, num_classes, name='body', **kwargs):
""" Base layers
Parameters
----------
inputs : tf.Tensor
input tensor
num_classes : int
number of classes
Returns
-------
tf.Tensor
"""
_ = num_classes
kwargs = cls.fill_params('body', **kwargs)
return conv_block(inputs, name=name, **kwargs)
class FCN16(FCN):
""" Fully convolutional network (FCN16)
**Configuration**
inputs : dict
dict with 'images' and 'masks' (see :meth:`~.TFModel._make_inputs`)
initial_block : dict
base_network : class
base network (VGG16 by default)
skip_name : str
tensor name for the skip connection.
Default='block-3/output:0' for VGG16.
body : dict
filters : int
number of filters in convolutions after base network (default=100)
upsample : dict
upsampling parameters (default={factor:2, layout:t, kernel_size:4)
head : dict
upsample : dict
upsampling parameters (default={factor:16, layout:t, kernel_size:32)
"""
@classmethod
def default_config(cls):
config = FCN.default_config()
config['head']['upsample'].update(dict(factor=16, kernel_size=32))
config['initial_block']['skip_name'] = '/initial_block/body/block-3/output:0'
return config
@classmethod
def initial_block(cls, inputs, name='initial_block', **kwargs):
kwargs = cls.fill_params('initial_block', **kwargs)
x = FCN.initial_block(inputs, name=name, **kwargs)
skip_name = tf.get_default_graph().get_name_scope() + kwargs['skip_name']
skip = tf.get_default_graph().get_tensor_by_name(skip_name)
return x, skip
@classmethod
def body(cls, inputs, num_classes, name='body', **kwargs):
""" Base layers
Parameters
----------
inputs : tf.Tensor
two input tensors
num_classes : int
number of classes
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body', **kwargs)
filters = cls.pop('filters', kwargs)
upsample_args = kwargs['upsample']
with tf.variable_scope(name):
x, skip = inputs
inputs = None
x = FCN32.body(x, filters=filters, num_classes=num_classes, name='fcn32', **kwargs)
x = cls.upsample(x, factor=2, filters=num_classes, name='fcn32_upsample', **upsample_args, **kwargs)
skip = conv_block(skip, layout='c', filters=num_classes, kernel_size=1, name='pool4', **kwargs)
x = cls.crop(x, skip, kwargs.get('data_format'))
output = tf.add(x, skip, name='output')
return output
class FCN8(FCN):
""" Fully convolutional network (FCN8)
**Configuration**
inputs : dict
dict with 'images' and 'masks' (see :meth:`~.TFModel._make_inputs`)
initial_block : dict
base_network : class
base network (VGG16 by default)
skip1_name : str
tensor name for the first skip connection.
Default='block-3/output:0' for VGG16.
skip2_name : str
tensor name for the second skip connection.
Default='block-2/output:0' for VGG16.
body : dict
filters : int
number of filters in convolutions after base network (default=100)
upsample : dict
upsampling parameters (default={factor:2, layout:t, kernel_size:4)
head : dict
upsample : dict
upsampling parameters (default={factor:8, layout:t, kernel_size:16)
"""
@classmethod
def default_config(cls):
config = FCN.default_config()
config['head']['upsample'].update(dict(factor=8, kernel_size=16))
config['initial_block']['skip1_name'] = '/initial_block/body/block-3/output:0'
config['initial_block']['skip2_name'] = '/initial_block/body/block-2/output:0'
return config
@classmethod
def initial_block(cls, inputs, name='initial_block', **kwargs):
kwargs = cls.fill_params('initial_block', **kwargs)
x = FCN.initial_block(inputs, name=name, **kwargs)
skip1_name = tf.get_default_graph().get_name_scope() + kwargs['skip1_name']
skip1 = tf.get_default_graph().get_tensor_by_name(skip1_name)
skip2_name = tf.get_default_graph().get_name_scope() + kwargs['skip2_name']
skip2 = tf.get_default_graph().get_tensor_by_name(skip2_name)
return
|
vntarasov/openpilot
|
opendbc/can/dbc.py
|
Python
|
mit
| 8,588
| 0.009432
|
import re
import os
import struct
import sys
import numbers
from collections import namedtuple, defaultdict
def int_or_float(s):
# return number, trying to maintain int format
if s.isdigit():
return int(s, 10)
else:
return float(s)
DBCSignal = namedtuple(
"DBCSignal", ["name", "start_bit", "size", "is_little_endian", "is_signed",
"factor", "offset", "tmin", "tmax", "units"])
class dbc():
def __init__(self, fn):
self.name, _ = os.path.splitext(os.path.basename(fn))
with open(fn, encoding="ascii") as f:
self.txt = f.readlines()
self._warned_addresses = set()
# regexps from https://github.com/ebroecker/canmatrix/blob/master/canmatrix/importdbc.py
bo_regexp = re.compile(r"^BO\_ (\w+) (\w+) *: (\w+) (\w+)")
sg_regexp = re.compile(r"^SG\_ (\w+) : (\d+)\|(\d+)@(\d+)([\+|\-]) \(([0-9.+\-eE]+),([0-9.+\-eE]+)\) \[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] \"(.*)\" (.*)")
sgm_regexp = re.compile(r"^SG\_ (\w+) (\w+) *: (\d+)\|(\d+)@(\d+)([\+|\-]) \(([0-9.+\-eE]+),([0-9.+\-eE]+)\) \[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] \"(.*)\" (.*)")
val_regexp = re.compile(r"VAL\_ (\w+) (\w+) (\s*[-+]?[0-9]+\s+\".+?\"[^;]*)")
# A dictionary which maps message ids to tuples ((name, size), signals).
# name is the ASCII name of the message.
# size is the size of the message in bytes.
# signals is a list signals contained in the message.
# signals is a list of DBCSignal in order of increasing start_bit.
self.msgs = {}
# A dictionary which maps message ids to a list of tuples (signal name, definition value pairs)
self.def_vals = defaultdict(list)
# lookup to bit reverse each byte
self.bits_index = [(i & ~0b111) + ((-i - 1) & 0b111) for i in range(64)]
for l in self.txt:
l = l.strip()
if l.startswith("BO_ "):
# new group
dat = bo_regexp.match(l)
if dat is None:
print("bad BO {0}".format(l))
name = dat.group(2)
size = int(dat.group(3))
ids = int(dat.group(1), 0) # could be hex
if ids in self.msgs:
sys.exit("Duplicate address detected %d %s" % (ids, self.name))
self.msgs[ids] = ((name, size), [])
if l.startswith("SG_ "):
# new signal
dat = sg_regexp.match(l)
go = 0
if dat is None:
dat = sgm_regexp.match(l)
go = 1
if dat is
|
None:
print("bad SG {0}".format(l))
sgname = dat.group(1)
start_bit =
|
int(dat.group(go + 2))
signal_size = int(dat.group(go + 3))
is_little_endian = int(dat.group(go + 4)) == 1
is_signed = dat.group(go + 5) == '-'
factor = int_or_float(dat.group(go + 6))
offset = int_or_float(dat.group(go + 7))
tmin = int_or_float(dat.group(go + 8))
tmax = int_or_float(dat.group(go + 9))
units = dat.group(go + 10)
self.msgs[ids][1].append(
DBCSignal(sgname, start_bit, signal_size, is_little_endian,
is_signed, factor, offset, tmin, tmax, units))
if l.startswith("VAL_ "):
# new signal value/definition
dat = val_regexp.match(l)
if dat is None:
print("bad VAL {0}".format(l))
ids = int(dat.group(1), 0) # could be hex
sgname = dat.group(2)
defvals = dat.group(3)
defvals = defvals.replace("?", r"\?") # escape sequence in C++
defvals = defvals.split('"')[:-1]
# convert strings to UPPER_CASE_WITH_UNDERSCORES
defvals[1::2] = [d.strip().upper().replace(" ", "_") for d in defvals[1::2]]
defvals = '"' + "".join(str(i) for i in defvals) + '"'
self.def_vals[ids].append((sgname, defvals))
for msg in self.msgs.values():
msg[1].sort(key=lambda x: x.start_bit)
self.msg_name_to_address = {}
for address, m in self.msgs.items():
name = m[0][0]
self.msg_name_to_address[name] = address
def lookup_msg_id(self, msg_id):
if not isinstance(msg_id, numbers.Number):
msg_id = self.msg_name_to_address[msg_id]
return msg_id
def reverse_bytes(self, x):
return ((x & 0xff00000000000000) >> 56) | \
((x & 0x00ff000000000000) >> 40) | \
((x & 0x0000ff0000000000) >> 24) | \
((x & 0x000000ff00000000) >> 8) | \
((x & 0x00000000ff000000) << 8) | \
((x & 0x0000000000ff0000) << 24) | \
((x & 0x000000000000ff00) << 40) | \
((x & 0x00000000000000ff) << 56)
def encode(self, msg_id, dd):
"""Encode a CAN message using the dbc.
Inputs:
msg_id: The message ID.
dd: A dictionary mapping signal name to signal data.
"""
msg_id = self.lookup_msg_id(msg_id)
msg_def = self.msgs[msg_id]
size = msg_def[0][1]
result = 0
for s in msg_def[1]:
ival = dd.get(s.name)
if ival is not None:
ival = (ival / s.factor) - s.offset
ival = int(round(ival))
if s.is_signed and ival < 0:
ival = (1 << s.size) + ival
if s.is_little_endian:
shift = s.start_bit
else:
b1 = (s.start_bit // 8) * 8 + (-s.start_bit - 1) % 8
shift = 64 - (b1 + s.size)
mask = ((1 << s.size) - 1) << shift
dat = (ival & ((1 << s.size) - 1)) << shift
if s.is_little_endian:
mask = self.reverse_bytes(mask)
dat = self.reverse_bytes(dat)
result &= ~mask
result |= dat
result = struct.pack('>Q', result)
return result[:size]
def decode(self, x, arr=None, debug=False):
"""Decode a CAN message using the dbc.
Inputs:
x: A collection with elements (address, time, data), where address is
the CAN address, time is the bus time, and data is the CAN data as a
hex string.
arr: Optional list of signals which should be decoded and returned.
debug: True to print debugging statements.
Returns:
A tuple (name, data), where name is the name of the CAN message and data
is the decoded result. If arr is None, data is a dict of properties.
Otherwise data is a list of the same length as arr.
Returns (None, None) if the message could not be decoded.
"""
if arr is None:
out = {}
else:
out = [None] * len(arr)
msg = self.msgs.get(x[0])
if msg is None:
if x[0] not in self._warned_addresses:
# print("WARNING: Unknown message address {}".format(x[0]))
self._warned_addresses.add(x[0])
return None, None
name = msg[0][0]
if debug:
print(name)
st = x[2].ljust(8, b'\x00')
le, be = None, None
for s in msg[1]:
if arr is not None and s[0] not in arr:
continue
start_bit = s[1]
signal_size = s[2]
little_endian = s[3]
signed = s[4]
factor = s[5]
offset = s[6]
if little_endian:
if le is None:
le = struct.unpack("<Q", st)[0]
tmp = le
shift_amount = start_bit
else:
if be is None:
be = struct.unpack(">Q", st)[0]
tmp = be
b1 = (start_bit // 8) * 8 + (-start_bit - 1) % 8
shift_amount = 64 - (b1 + signal_size)
if shift_amount < 0:
continue
tmp = (tmp >> shift_amount) & ((1 << signal_size) - 1)
if signed and (tmp >> (signal_size - 1)):
tmp -= (1 << signal_size)
tmp = tmp * factor + offset
# if debug:
# print("%40s %2d %2d %7.2f %s" % (s[0], s[1], s[2], tmp, s[-1]))
if arr is None:
out[s[0]] = tmp
else:
out[arr.index(s[0])] = tmp
return name, out
def get_signals(self, msg):
msg = self.lookup_msg_id(msg)
return [sgs.name for sgs in self.msgs[msg][1]]
if __name__ == "__main__":
from opendbc import DBC_PATH
dbc_test = dbc(os.path.join(DBC_PATH, 'toyota_prius_2017_pt_generated.dbc'))
msg = ('STEER_ANGLE_SENSOR', {'STEER_ANGLE': -6.0, 'STEER_RATE': 4, 'STEER_FRACTION': -0.2})
encoded = dbc_test.encode(*msg)
decoded = dbc_test.decode((0x25, 0, encoded))
assert decoded == msg
dbc_test = dbc(os.path.
|
Impactstory/total-impact-core
|
totalimpact/providers/linkedin.py
|
Python
|
mit
| 2,365
| 0.017336
|
import os, re, requests
from bs4 import BeautifulSoup
from totalimpact.providers import provider
from totalimpact.providers.provider import Provider, ProviderContentMalformedError, ProviderRateLimitError
import logging
logger = logging.getLogger('ti.providers.linkedin')
class Linkedin(Provider):
example_id = ("url", "https://www.linkedin.com/in/heatherpiwowar")
url = "http://www.crossref.org/"
descr = "An official Digital Object Identifier (DOI) Registration Agency of the International DOI Foundation."
aliases_url_template = "http://dx.doi.org/%s"
biblio_url_template = "http://dx.doi.org/%s"
def __init__(self):
super(Linkedin, self).__init__()
@property
def provides_members(self):
return True
@property
def provides_biblio(self):
return True
def is_relevant_alias(self, alias):
(namespace, nid) = alias
if (namespace == "url") and ("linkedin.com" in nid):
|
return True
return False
def member_items(self,
|
linkedin_url,
provider_url_template=None,
cache_enabled=True):
return [("url", linkedin_url)]
@property
def provides_aliases(self):
return True
@property
def provides_biblio(self):
return True
def aliases(self,
aliases,
provider_url_template=None,
cache_enabled=True):
return None
def biblio(self,
aliases,
provider_url_template=None,
cache_enabled=True):
linkedin_url = self.get_best_id(aliases)
biblio_dict = {}
biblio_dict["repository"] = "LinkedIn"
biblio_dict["is_account"] = True # special key to tell webapp to render as genre heading
biblio_dict["genre"] = "account"
biblio_dict["account"] = linkedin_url
try:
r = requests.get(linkedin_url, timeout=20)
except requests.exceptions.Timeout:
return None
soup = BeautifulSoup(r.text)
try:
bio = soup.find("p", {'class': "description"}).get_text() #because class is reserved
biblio_dict["bio"] = bio
except AttributeError:
logger.warning("AttributeError in linkedin")
logger.warning(r.text)
return biblio_dict
|
Ubuntu-Solutions-Engineering/glance-simplestreams-sync-charm
|
hooks/charmhelpers/contrib/saltstack/__init__.py
|
Python
|
agpl-3.0
| 2,778
| 0
|
"""Charm Helpers saltstack - declare the state of your machines.
This helper enables you to declare your machine state, rather than
program it procedurally (and have to test each change to your procedures).
Your install hook can be as simple as:
{{{
from charmhelpers.contrib.saltstack import (
install_salt_support,
update_machine_state,
)
def install():
install_salt_support()
update_machine_state('machine_states/dependencies.yaml')
update_machine_state('machine_states/installed.yaml')
}}}
and won't need to change (nor will its tests) when you change the machine
state.
It's using a python package called salt-minion which allows various formats for
specifying resources, such as:
{{{
/srv/{{ basedir }}:
file.directory:
- group: ubunet
- user: ubunet
- require:
- user: ubunet
- recurse:
- user
- group
ubunet:
group.present:
- gid: 1500
user.present:
- uid: 1500
- gid: 1500
- createhome: False
- require:
- group: ubunet
}}}
The docs for all the different state definitions are at:
http://docs.saltstack.com/ref/states/all/
TODO:
* Add test helpers which will ensure that machine state definitions
are functionally (but not necessarily logically) correct (ie. getting
salt to parse all state defs.
* Add a link to a public bootstrap charm example / blogpost.
* Find a way to obviate the need to use the grains['charm_dir'] syntax
in templates.
"""
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
import subprocess
import charmhelpers.contrib.templating.contexts
import charmhelpers.core.host
import charmhelpers.core.hookenv
salt_grains_path = '/etc/salt/grains'
def install_salt_support(from_ppa=True):
"""Installs the salt-minion helper for machine state.
By default the
|
salt-minion package is installed from
the saltstack PPA. If from_ppa is False you must ensure
that the salt-minion package is available in the apt cache.
"""
if from_ppa:
su
|
bprocess.check_call([
'/usr/bin/add-apt-repository',
'--yes',
'ppa:saltstack/salt',
])
subprocess.check_call(['/usr/bin/apt-get', 'update'])
# We install salt-common as salt-minion would run the salt-minion
# daemon.
charmhelpers.fetch.apt_install('salt-common')
def update_machine_state(state_path):
"""Update the machine state using the provided state declaration."""
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
salt_grains_path)
subprocess.check_call([
'salt-call',
'--local',
'state.template',
state_path,
])
|
BehavioralInsightsTeam/edx-platform
|
openedx/features/enterprise_support/tests/test_signals.py
|
Python
|
agpl-3.0
| 1,328
| 0.000753
|
"""Tests of email marketing signal handlers."""
import logging
import ddt
from django.test import TestCase
from mock import patch
from student.tests.factories import UserFactory
from openedx.features.enterprise_support.tests.factories import EnterpriseCustomerFactory, EnterpriseCustomerUserFactory
log = logging.getLogger(__name__)
LOGGER_NAME = "enterprise_support.signals"
TEST_EMAIL = "test@edx.org"
@ddt.ddt
class EnterpriseSupportSignals(TestCase):
"""
Tests for the enterprise support signals.
"""
de
|
f setUp(self):
self.user = UserFactory.create(username='test', email=TEST_EMAIL)
super(EnterpriseS
|
upportSignals, self).setUp()
@patch('openedx.features.enterprise_support.signals.update_user.delay')
def test_register_user(self, mock_update_user):
"""
make sure marketing enterprise user call invokes update_user
"""
enterprise_customer = EnterpriseCustomerFactory()
EnterpriseCustomerUserFactory(
user_id=self.user.id,
enterprise_customer=enterprise_customer
)
mock_update_user.assert_called_with(
sailthru_vars={
'is_enterprise_learner': True,
'enterprise_name': enterprise_customer.name,
},
email=self.user.email
)
|
mkelcb/knet
|
knet/com/io/pyplink.py
|
Python
|
mit
| 18,683
| 0.000054
|
"""Module that reads binary Plink files."""
# This file is part of pyplink.
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Louis-Philippe Lemieux Perreault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import logging
from itertools import repeat
from collections import Counter
from io import UnsupportedOperation
try:
from itertools import zip_longest as zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import numpy as np
import pandas as pd
from six.moves import range
__author__ = "Louis-Philippe Lemieux Perreault"
__copyright__ = "Copyright 2014 Louis-Philippe Lemieux Perreault"
__license__ = "MIT"
|
__all__ = ["PyPlink"]
# The logger
logger = logging.getLogger(__name__)
# The recoding values
_geno_recode = {1: -1, # Unknown genotype
2: 1, # Heterozygous genotype
0: 2, # Homozygous A1
3: 0} # Homozygous A2
_byte_recode = dict(v
|
alue[::-1] for value in _geno_recode.items())
class PyPlink(object):
"""Reads and store a set of binary Plink files.
Args:
prefix (str): The prefix of the binary Plink files.
mode (str): The open mode for the binary Plink file.
bed_format (str): The type of bed (SNP-major or INDIVIDUAL-major).
Reads or write binary Plink files (BED, BIM and FAM).
.. code-block:: python
from pyplink import PyPlink
# Reading BED files
with PyPlink("plink_file_prefix") as bed:
pass
# Writing BED files
with PyPlink("plink_file_prefix", "w") as bed:
pass
"""
# The genotypes values
_geno_values = np.array(
[
[_geno_recode[(i >> j) & 3] for j in range(0, 7, 2)]
for i in range(256)
],
dtype=np.int8,
)
def __init__(self, prefix, mode="r", bed_format="SNP-major"):
"""Initializes a new PyPlink instance."""
# The mode
self._mode = mode
# The bed format
if bed_format not in {"SNP-major", "INDIVIDUAL-major"}:
raise ValueError("invalid bed format: {}".format(bed_format))
self._bed_format = bed_format
# These are the name of the files
self.bed_filename = "{}.bed".format(prefix)
self.bim_filename = "{}.bim".format(prefix)
self.fam_filename = "{}.fam".format(prefix)
if self._mode == "r":
if self._bed_format != "SNP-major":
raise ValueError("only SNP-major format is supported "
"with mode 'r'")
# Checking that all the files exists (otherwise, error...)
for filename in (self.bed_filename, self.bim_filename,
self.fam_filename):
if not os.path.isfile(filename):
raise IOError("No such file: '{}'".format(filename))
# Setting BIM and FAM to None
self._bim = None
self._fam = None
# Reading the input files
self._read_bim()
self._read_fam()
self._read_bed()
# Where we're at
self._n = 0
elif self._mode == "w":
# The dummy number of samples and bytes
self._nb_values = None
# Opening the output BED file
self._bed = open(self.bed_filename, "wb")
self._write_bed_header()
else:
raise ValueError("invalid mode: '{}'".format(self._mode))
def __repr__(self):
"""The representation of the PyPlink object."""
if self._mode == "r":
return "PyPlink({:,d} samples; {:,d} markers)".format(
self.get_nb_samples(),
self.get_nb_markers(),
)
return 'PyPlink(mode="w")'
def __iter__(self):
"""The __iter__ function."""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
return self
def __next__(self):
"""The __next__ function."""
return self.next()
def __enter__(self):
"""Entering the context manager."""
return self
def __exit__(self, *args):
"""Exiting the context manager."""
self.close()
def close(self):
"""Closes the BED file."""
# Closing the BED file
self._bed.close()
def next(self):
"""Returns the next marker.
Returns:
tuple: The marker name as a string and its genotypes as a
:py:class:`numpy.ndarray`.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
self._n += 1
if self._n > self._nb_markers:
raise StopIteration()
return self._bim.index[self._n - 1], self._read_current_marker()
def _read_current_marker(self):
"""Reads the current marker and returns its genotypes."""
return self._geno_values[
np.fromstring(self._bed.read(self._nb_bytes), dtype=np.uint8)
].flatten(order="C")[:self._nb_samples]
def seek(self, n):
"""Gets to a certain marker position in the BED file.
Args:
n (int): The index of the marker to seek to.
"""
if self._mode != "r":
raise UnsupportedOperation("not available in 'w' mode")
if 0 <= n < self._nb_markers:
self._n = n
self._bed.seek(self._get_seek_position(n))
else:
# Invalid seek value
raise ValueError("invalid position in BED: {}".format(n))
def _get_seek_position(self, n):
"""Gets the seek position in the file (including special bytes).
Args:
n (int): The index of the marker to seek to.
"""
return 3 + self._nb_bytes * n
def _read_bim(self):
"""Reads the BIM file."""
# Reading the BIM file and setting the values
bim = pd.read_csv(self.bim_filename, delim_whitespace=True,
names=["chrom", "snp", "cm", "pos", "a1", "a2"],
dtype=dict(snp=str, a1=str, a2=str))
# Saving the index as integer
bim["i"] = bim.index
# Checking for duplicated markers
try:
bim = bim.set_index("snp", verify_integrity=True)
self._has_duplicated = False
except ValueError as e:
# Setting this flag to true
self._has_duplicated = True
# Finding the duplicated markers
duplicated = bim.snp.duplicated(keep=False)
duplicated_markers = bim.loc[duplicated, "snp"]
duplicated_marker_counts = duplicated_markers.value_counts()
# The dictionary that will contain information about the duplicated
# markers
self._dup_markers = {
m: [] for m in duplicated_marker_counts.index
}
# Logging a warning
logger.warning("Duplicated markers found")
for marker, count in duplicated_marker_counts.iteritems():
logger.warning(" - {}: {:,d} ti
|
thecardcheat/egauge-api-examples
|
python/eGauge.py
|
Python
|
mit
| 5,735
| 0.013426
|
# Copyright (c) 2013 eGauge Systems LLC
# 4730 Walnut St, Suite 110
# Boulder, CO 80301
# voice: 720-545-9767
# email: davidm@egauge.net
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import sys, urllib2
from lxml import etree
class error (Exception):
pass
class PushData:
_tdesc = {
"P": {
"doc" : "Power",
"units" : ["W", "Ws"],
"scale" : 1,
},
"S": {
"doc" : "Apparent power",
"units" : ["VA", "VAs"],
"scale" : 1,
},
"V": {
"doc" : "Voltage",
"units" : ["V", "Vs"],
"scale" : 1e-3,
},
"I": {
"doc" : "Current",
"units" : ["A", "As"],
"scale" : 1e-3,
},
"F": {
"doc" : "Frequency",
"units" : ["Hz", "Hzs"],
"scale" : 1e-3,
},
"THD": {
"doc" : "Total Harmonic Distortion",
"units" : ["%", "%s"],
"scale" : 1e-3,
},
"T": {
"doc" : "Temperature",
"units" : ["C", "Cs"],
"scale" : 1e-3,
},
"Q": {
"doc" : "Mass flow-rate",
"units" : ["g/s", "g"],
"scale" : 1e-3,
},
"v": {
"doc" : "Speed",
"units" : ["m/s", "m"],
"scale" : 1e-3,
},
"R": {
"doc" : "Resistance",
"units" : ["Ohm", "Ohm*s"],
"scale" : 1,
},
"Ee": {
"doc" : "Irradiance",
"units" : ["W/m^2", "W/m^2*s"],
"scale" : 1,
},
"PQ": {
"doc" : "Reactive power",
"units" : ["VAr", "VArh"],
"scale" : 1,
},
"$": {
"doc" : "Money",
"units" : ["$", "$s"],
"scale" : 1,
},
"a": {
"doc" : "Angle"
|
,
"units" : ["DEG",
|
"DEGs"],
"scale" : 1,
},
"h": {
"doc" : "Humidity",
"units" : ["%", "%s"],
"scale" : 1e-1,
},
"Qv": {
"doc" : "Volumetric flow-rate",
"units" : ["m^3/s", "m^3"],
"scale" : 1e-9,
},
"Pa": {
"doc" : "Pressure",
"units" : ["Pa", "Pa*s"],
"scale" : 1,
}
}
def __init__ (self, xml_string):
self.config_serial_number = None
self.num_registers = 0
self.regname = []
self.regtype = []
self.ts = []
self.row = []
xml = etree.fromstring (xml_string)
if xml.tag != 'group':
raise error, ('Expected <group> as the top element')
self.config_serial_number = int (xml.attrib['serial'], 0)
for data in xml:
ts = None
delta = None
if data.tag != 'data':
raise error, ('Expected <data> elements within <group>')
if 'columns' in data.attrib:
self.num_registers = int (data.attrib['columns'])
if 'time_stamp' in data.attrib:
ts = long (data.attrib['time_stamp'], 0)
if 'time_delta' in data.attrib:
delta = long (data.attrib['time_delta'], 0)
if 'epoch' in data.attrib:
self.epoch = int (data.attrib['epoch'], 0)
if ts == None:
raise error, ('<data> element is missing time_stamp attribute')
if delta == None:
raise error, ('<data> element is missing time_delta attribute')
for el in data:
if el.tag == 'r':
row = []
for c in el:
row.append (long (c.text))
self.ts.append (ts)
self.row.append (row)
ts -= delta
elif el.tag == 'cname':
t = "P"
if 't' in el.attrib:
t = el.attrib['t']
self.regname.append (el.text)
self.regtype.append (t)
return
def __str__ (self):
ret = ""
ret += "serial # = %d, " % self.config_serial_number
ret += "names = %s, " % self.regname
ret += "types = %s, rows=[" % self.regtype
for i in range (len (self.ts)):
if i > 0:
ret += ", "
ret += "0x%08x, " % self.ts[i]
ret += "%s" % self.row[i]
ret += "]"
return ret
|
mupi/tecsaladeaula
|
core/migrations/0039_auto__add_field_course_riw_style.py
|
Python
|
agpl-3.0
| 19,646
| 0.007839
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Course.riw_style'
db.add_column(u'core_course', 'riw_style',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Course.riw_style'
db.delete_column(u'core_course', 'riw_style')
models = {
u'accounts.city': {
'Meta': {'object_name': 'City'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'uf': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.State']"})
},
u'accounts.discipline': {
'Meta': {'object_name': 'Discipline'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'accounts.educationlevel': {
'Meta': {'object_name': 'EducationLevel'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '3', 'primary_key': 'True'})
},
u'accounts.occupation': {
'Meta': {'object_name': 'Occupation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'accounts.school': {
'Meta': {'object_name': 'School'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'school_type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'accounts.state': {
'Meta': {'object_name': 'State'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'uf': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'uf_code': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
u'accounts.timtecuser': {
'Meta': {'object_name': 'TimtecUser'},
'accepted_terms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'business_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"or
|
m['accounts.City']", 'null': 'True', 'blank': 'True'}),
'cpf': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'd
|
isciplines': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['accounts.Discipline']", 'null': 'True', 'blank': 'True'}),
'education_levels': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['accounts.EducationLevel']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'occupations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['accounts.Occupation']", 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'rg': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'schools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['accounts.School']", 'null': 'True', 'through': u"orm['accounts.TimtecUserSchool']", 'blank': 'True'}),
'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'accounts.timtecuserschool': {
'Meta': {'object_name': 'TimtecUserSchool'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'professor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.TimtecUser']"}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.School']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '
|
jskksj/cv2stuff
|
cv2stuff/tests/test_hypothesis_code.py
|
Python
|
isc
| 206
| 0
|
from hypothesis import given
from hypothesis.s
|
trategies import text
from cv2stuff.hypothesis_code import encode, decode
@given(text())
def test_decode_inverts_encod
|
e(s):
assert decode(encode(s)) == s
|
fitnr/addfips
|
tests/test_cli.py
|
Python
|
gpl-3.0
| 3,471
| 0.000576
|
# -*- coding: utf-8 -*-
# This file is part of addfips.
# http://github.com/fitnr/addfips
# Licensed under the GPL-v3.0 license:
# http://opensource.org/licenses/GPL-3.0
# Copyright (c) 2016, fitnr <fitnr@fakeisthenewreal>
# pylint: disable=missing-docstring,invalid-name
import csv
import io
import subprocess
import sys
import unittest
from os import path
from addfips import __main__ as addfips_cli
class TestCli(unittest.TestCase):
def setUp(self):
dirname = path.join(path.dirname(__file__), 'data')
self.st_args = ['addfips', path.join(dirname, 'state.csv'), '-s', 'name']
self.co_args = ['addfips', path.join(dirname, 'county.csv'), '-c', 'county', '-s', 'state']
def test_state_cli_subprocess(self):
out = subprocess.check_output(self.st_args)
f = io.StringIO(out.decode('utf8'))
reader = csv.DictReader(f)
row = next(reader)
self.assertIn('name', row.keys())
self.assertIn('fips', row.keys())
self.assertEqual(row.get('name'), 'Alabama')
assert row.get('fips') == '01'
def test_county_cli_subprocess(self):
p = subprocess.Popen(self.co_args, stdout=subprocess.PIPE)
out, err = p.communicate()
assert err is None
f = io.StringIO(out.decode('utf-8'))
reader = csv.DictReader(f)
row = next(reader)
self.assertIn('county', row.keys())
self.assertIn('fips', row.keys())
assert row.get('county') == 'Autauga County'
assert row['fips'] == '01001'
def test_county_cli_call(self):
sys.argv = self.co_args
sys.stdout = io.StringIO()
addfips_cli.main()
sys.stdout.seek(0)
reader = csv.DictReader(sys.stdout)
row = next(reader)
self.assertIn('county', row.keys())
self.assertIn('fips', row.keys())
assert row['county'] == 'Autauga County'
assert row['fips'] == '01001'
def test_state_cli_call(self):
sys.argv = self.st_args
sys.stdout = io.StringIO()
addfips_cli.main()
sys.stdout.seek(0)
reader = csv.DictReader(sys.stdout)
row = next(reader)
self.assertIn('name', row.keys())
self.assertIn('fips', row.keys())
assert row['name'] == 'Alabama'
assert row['fips'] == '01'
def test_state_name_cli_call(self
|
):
|
sys.argv = self.co_args[:-2] + ['--state-name', 'Alabama']
sys.stdout = io.StringIO()
addfips_cli.main()
sys.stdout.seek(0)
reader = csv.DictReader(sys.stdout)
row = next(reader)
self.assertIn('county', row.keys())
self.assertIn('fips', row.keys())
assert row['county'] == 'Autauga County'
assert row['fips'] == '01001'
def test_state_cli_call_noheader(self):
sys.argv = self.st_args[:2] + ['-s', '1', '--no-header']
sys.stdout = io.StringIO()
addfips_cli.main()
sys.stdout.seek(0)
reader = csv.reader(sys.stdout)
next(reader)
row = next(reader)
assert row[1] == 'Alabama'
assert row[0] == '01'
def test_unmatched(self):
self.assertTrue(addfips_cli.unmatched({'fips': None}))
self.assertTrue(addfips_cli.unmatched([None, 'foo']))
self.assertFalse(addfips_cli.unmatched(['01001', 'foo']))
self.assertFalse(addfips_cli.unmatched({'fips': '01001'}))
if __name__ == '__main__':
unittest.main()
|
dhinakg/BitSTAR
|
api/database/table.py
|
Python
|
apache-2.0
| 1,329
| 0.004515
|
# Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License
|
");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from api.database import DAL
from api.database.db import DB
from api.database.DAL import SQLite
class Table:
name = None
table_type = None
def __init__(self, name_in, type_in):
self.name = name_in
self.table_type = type_in
DAL.db_create_table(DB, self.name)
def insert(self, dataDict):
return DAL.db_insert(DB, self, dataDict)
def search(self, searchTerm, searchFor):
return SQLite.db_search(DB, self, searchTerm, searchFor)
def getContents(self, rows):
return DAL.db_get_contents_of_table(DB, self, rows)
def getLatestID(self):
return DAL.db_get_latest_id(DB, self)
class TableTypes:
pServer = 1
pGlobal = 2
|
douglasdecouto/py-concord
|
ConcordAlarm.indigoPlugin/Contents/Server Plugin/concord/__init__.py
|
Python
|
bsd-3-clause
| 17
| 0
|
# con
|
cord mo
|
dule
|
samjy/acmeclient
|
acmeclient/__init__.py
|
Python
|
mit
| 73
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__VERSI
|
ON__ =
|
""
# EOF
|
nasfarley88/thebslparlour
|
bslparloursite/tgbot/models.py
|
Python
|
cc0-1.0
| 651
| 0.00768
|
from django.db import models
from django.utils import timezone
from videolibrary.models import SourceVideo
# Create y
|
our models here.
# TODO consider whether this is needed anymore
class RequestedSign(models.Model):
short_description = models.CharField(max_length=100)
description = models.TextField()
date_added = models.DateTimeField(default=timezone.now, editable=False)
def __str__(self):
return self.short_description+" ("+self.description+")"
class TelegramGif(models.Model):
file_id = models.CharField(max_length=200) # TODO change to telegram's max length
source_video = mode
|
ls.ForeignKey(SourceVideo)
|
StartupsPoleEmploi/labonneboite
|
labonneboite/common/load_data.py
|
Python
|
agpl-3.0
| 8,478
| 0.003303
|
import os
import pickle
import csv
import pandas as pd
import math
from functools import lru_cache, reduce
from collections import defaultdict
USE_ROME_SLICING_DATASET = False # Rome slicing dataset is not ready yet
if USE_ROME_SLICING_DATASET:
OGR_ROME_FILE = "rome_slicing_dataset/ogr_rome_mapping.csv"
ROME_FILE = "rome_slicing_dataset/rome_labels.csv"
ROME_NAF_FILE = "rome_slicing_dataset/rome_naf_mapping.csv"
else:
OGR_ROME_FILE = "ogr_rome_mapping.csv"
ROME_FILE = "rome_labels.csv"
ROME_NAF_FILE = "rome_naf_mapping.csv"
def load_file(func, filename):
full_filename = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "data/%s" % filename)
return func(full_filename)
def load_pickle_file(filename):
def f(full_filename):
return pickle.load(open(full_filename, "r"))
return load_file(f, filename)
def load_pd_dataframe(filename, delimiter='', dtype=None):
def f(full_filename):
return pd.read_csv(open(full_filename, "r"), dtype=dtype)
return load_file(f, filename)
def load_csv_file(filename, delimiter='|'):
def f(full_filename):
csv_file = open(full_filename, 'r')
reader = csv.reader(csv_file, delimiter=delimiter)
return reader
reader = load_file(f, filename)
rows = []
len_previous_row = None
for row in reader:
if len_previous_row:
# at least second line of CSV file
if len(row) == 0:
# skip empty rows
continue
elif len(row) != len_previous_row:
raise IndexError(
"found row with abnormal number of fields : %s" % row)
rows.append(row)
else:
# first line of CSV file: headers should be ignored
pass
len_previous_row = len(row)
return rows
def load_rows_as_set(rows):
for row in rows:
if len(row) != 1:
raise IndexError("wrong number of fields")
return set([row[0] for row in rows])
def load_rows_as_dict(rows):
d = {}
for row in rows:
if len(row) != 2:
raise IndexError("wrong number of fields")
if row[0] in d:
raise ValueError("duplicate key")
d[row[0]] = row[1]
return d
def load_rows_as_dict_of_dict(rows):
d = {}
for row in rows:
if len(row) != 3:
raise IndexError("wrong number of fields")
# values of 3 fields
f1 = row[0]
f2 = row[1]
f3 = row[2]
if f1 in d:
if f2 in d[f1]:
raise ValueError("duplicate key")
else:
d[f1][f2] = f3
else:
d[f1] = {f2: f3}
return d
@lru_cache(maxsize=None)
def load_related_rome_areas():
"""
Build a dict with department code (code insee) as keys and area code as values (bassins d'emploi).
Used for PSE study in 2021.
"""
rows = load_csv_file("lbb-pse_bassin-emploi_code-insee.csv", delimiter=',')
return reduce(reduceRelateRomesAreas, rows, {})
def reduceRelateRomesAreas(aggr, row):
[code_insee, code_area] = row
aggr[code_insee] = code_area
return aggr
@lru_cache(maxsize=None)
def load_related_rome():
"""
Build a dict with area code (bassin d'emploi) as keys.
The values are dict with rome code as keys and a list of related rome codes as values.
Each related rome is a dict with `rome` and `score` properties.
Used for PSE study.
"""
rows = load_csv_file("lbb-pse_bassin-emploi_rome-connexe.csv", delimiter=',')
return reduce(reduceRelateRomes, rows, {})
def reduceRelateRomes(aggr, row):
[code_area, rome, rome_connexe, score] = row
entry_code_area = aggr.get(code_area, {})
entry_rome = entry_code_area.get(rome, [])
entry_rome.append({'rome': rome_connexe, 'score': float(score)})
entry_code_area[rome] = entry_rome
aggr[code_area] = entry_code_area
return aggr
@lru_cache(maxsize=None)
def load_city_codes():
rows = load_csv_file("city_codes.csv")
commune_id_to_commune_name = load_rows_as_dict(rows)
return commune_id_to_commune_name
@lru_cache(maxsize=None)
def load_contact_modes():
"""
Use comma delimiter instead of pipe so that it is recognized by github
and can easily be edited online by the intrapreneurs.
"""
rows = load_csv_file("contact_modes.csv", delimiter=',')
naf_prefix_to_rome_to_contact_mode = load_rows_as_dict_of_dict(rows)
return naf_prefix_to_rome_to_contact_mode
@lru_cache(maxsize=None)
def load_ogr_labels():
rows = load_csv_file("ogr_labels.csv")
ogr_to_label = load_rows_as_dict(rows)
return ogr_to_label
@lru_cache(maxsize=None)
def load_groupements_employeurs():
rows = load_csv_file("groupements_employeurs.csv")
sirets = load_rows_as_set(rows)
return sirets
@lru_cache(maxsize=None)
def load_ogr_rome_mapping():
rows = load_csv_file(OGR_ROME_FILE)
OGR_COLUMN = 0
ROME_COLUMN = 1
ogr_to_rome = {}
for row in rows:
ogr = row[OGR_COLUMN]
if ogr not in load_ogr_labels():
raise ValueError("missing label for OGR %s" % ogr)
rome = row[ROME_COLUMN]
if rome not in load_rome_labels():
raise ValueError("missing label for ROME %s" % rome)
ogr_to_rome[ogr] = rome
return ogr_to_rome
@lru_cache(maxsize=None)
def load_rome_labels():
rows = load_csv_file(ROME_FILE)
rome_to_label = load_rows_as_dict(rows)
return rome_to_label
@lru_cache(maxsize=None)
def load_naf_labels():
rows = load_csv_file("naf_labels.csv")
naf_to_label = load_rows_as_dict(rows)
return naf_to_label
@lru_cache(maxsize=None)
def load_rome_naf_mapping():
return load_csv_file(ROME_NAF_FILE, delimiter=',')
@lru_cache(maxsize=None)
def load_metiers_tension():
csv_metiers_tension = load_csv_file("metiers_tension.csv", ',')
rome_to_tension = defaultdict(int)
for row in csv_metiers_tension:
tension_pct = row[2]
rome_code = row[3]
# FIXME : remove rows where tension is #N/A in the dataset, to remove this ugly check ?
if tension_pct != '#N/A':
tension_pct = float(tension_pct)
if 0 <= tension_pct <= 100:
# As a single ROME can have multiple tensions,
# It has been decided to take the higher tension for a rome
rome_to_tension[rome_code] = max(rome_to_tension[rome_code], tension_pct)
else:
raise ValueError
return rome_to_tension
#Used for PSE study, it returns a list of SIRET that must not b be seen on LBB
@lru_cache(maxsize=None)
def load_siret_to_remove():
rows = load_csv_file("untreated_BB.csv", ',')
sirets_to_remove = load_rows_as_set(rows)
return sirets_to_remove
#Used by importer job to extract etablissement
@lru_cache(maxsize=None)
def load_effectif_labels():
'''
Dataframe to load look like this.
code label
0 0 0-0
1 1 1-2
2 2 3-5
3 3 6-9
4 11 10-19
5 12 20-49
6 21 50-99
7 22 100-199
8 31 200-249
9 32 250-499
10 41 500-999
11 42 1000-1999
12 51 2000-4999
13 52 5000-9999
14 53 10000+
'''
def create_column(row, which='start_effectif'):
'''
From the label, we want to create a start and end column to delimitate the interval
We'll be able to use it to simply determine from a number of employees in an office, in which category
|
the office belongs to
'''
#we split on the label which is from type "10-19" OR 10000+
splitted_label = row['label'].split('-')
if len(splitted_label) == 1: #10000+
value = math.inf if which == 'end_effectif' else 10000
else:
if which == 'start_effectif':
value = int(splitted_label[0])
else:
value = int(spl
|
itted_label[1])
return value
df = load_pd_dataframe("helpers/effectif_labels.csv", ',', dtype=
|
google-research/scenic
|
scenic/common_lib/video_utils.py
|
Python
|
apache-2.0
| 1,353
| 0.007391
|
# Copyright 2022 The Scenic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video-related utility functions."""
import jax.numpy as jnp
def sample_frames_uniformly(x: jnp.ndarray,
n_sampled_frames: int) -> jnp.ndarray:
"""Sample frames from the input video."""
if x.ndim != 5:
raise ValueError('Input shape should be [bs, t, h, w, c].')
num_frames = x.shape[1]
if n_sampled_frames < num_frames:
t_start_idx = num_frames / (n_sampled_frames + 1)
t_step = t_start_idx
else:
t_start_idx = 0
t_step = 1
t_end_idx = num_frames
temporal_indices = jnp.arange(t_start_idx, t_end_idx, t_step)
temporal_indices = jnp.round
|
(temporal_indices).astype(jnp.int32)
temporal_indices = jnp.minimum(temporal_indices, num_frames - 1)
return x[:, temporal_indices] # [n, t_s,
|
in_h, in_w, c]
|
zr40/scc
|
lib/hardware.py
|
Python
|
mit
| 3,587
| 0.02983
|
from serial import Serial
class Hardware(object):
def __init__(self, port, debug=False):
self.debug = debug
self.port = Serial(port, timeout=0.01)
self.resetConnection()
def resetConnection(self):
print 'Establishing connection...'
repeatCount = 0
while True:
if repeatCount == 100:
print 'Mon51 does not respond. Please reset the board to continue.'
repeatCount += 1
self.send([0x11])
if self.read(wait=False) != [0xff]:
continue
self.send([0x11])
if self.read(wait=False) != [0x00]:
continue
self.sendWithChecksum([0x10, 0x02])
if self.read() != [0x06, 0x00, ord('V'), ord('3'), ord('.'), ord('0'), 0x04]:
raise Exception('Unknown handshake')
print 'Connection established.'
return
def step(self):
self.sendWithChecksum([0x0C])
self.read()
def run(self):
self.sendWithChecksum([0x08, 0x00, 0x01, 0x05, 0x00, 0x00, 0x05, 0xff, 0xff])
self.read()
def stop(self):
self.send([0x1B])
self.resetConnection()
def getPC(self):
inbytes = self.readMemory(0x00, 0x00, 0x00)
return inbytes[0] << 8 | inbytes[1]
def setPC(self, address):
self.writeMemory(0x00, 0x00, [address >> 8, address & 0xff])
# HACK to work around strange response
self.resetConnection()
def getRegisters(self):
self.sendWithChecksum([0x08, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
return self.readMemoryResponse()
def writeCode(self, address, bytes):
self.writeMemory(0x05, address, bytes)
def readCode(self, address, length):
return self.readMemory(0x05, address, length)
def writeDirect(self, address, bytes):
self.writeMemory(0x04, address, bytes)
def readDirect(self, address, length):
return self.readMemory(0x04, address, length)
def writeIndirect(self, address, bytes):
self.writeMemory(0x01, address, bytes)
def readIndirect(self, address, length):
return self.readMemory(0x01, address, length)
def writeExternal(self, address, bytes):
self.writeMemory(0x02, address, bytes)
def readExternal(self, address, length):
return self.readMemory(0x02, address, length)
def writeMemory(self, type, address, bytes):
self.sendWithChecksum([0x02, type, address >> 8, address & 0xff, len(bytes)] + bytes)
self.read()
def readMemory(self, type, address, length):
self.sendWithChecksum([0x04, type, address >> 8, address & 0xff, length])
return self.readMemoryResponse()
def readMemoryResponse(self):
inbytes = self.readWithChecksum()
if inbytes[0] != 0x02:
raise Exception('Invalid response')
del inbytes[0]
return inbytes
def read(self, wait=True):
inbytes = []
while len(inbytes) < 512:
data = self.port.read(256)
if not data and (inbytes or not wait):
if self.debug:
print 'IN: ' + ' '.join('%02X' % byte for byte in inbytes)
print '%02X %s' % (len(inbytes), repr(''.join(chr(byte) for byte in inbytes)))
print
return inbytes
inbytes += (ord(byte) for byte in data)
raise Exception('Huge response, driver bug?')
def readWithChecksum(self):
inbytes = self.read()
if sum(inbytes) % 0x100 != 0:
raise Exception('Invalid checksum received')
|
inbytes.pop()
return inbytes
def send(self, data):
if self.debug:
print 'OUT: ' + ' '.join('%02X' % byte for byte in data)
print ' ' + repr(''.join(chr(byte) for byte in data))
print
self
|
.port.write(''.join(chr(byte) for byte in data))
def sendWithChecksum(self, data):
sum = 0
for byte in data:
sum += byte
checksum = (0x100 - sum) % 0x100
self.send(data + [checksum])
def reset(self):
self.setPC(0x0000)
|
ekarlso/partizan
|
tests/functional/fixtures/database.py
|
Python
|
apache-2.0
| 2,371
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2015 Hewlett-Packard Develo
|
pment Co
|
mpany, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import fixtures
from oslo_config import cfg
from partizan.db import base as db_base
CONF = cfg.CONF
class Database(fixtures.Fixture):
def __init__(self, db_session, db_migrate, sql_connection, sqlite_db,
sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_session.get_engine()
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
self.setup_sqlite(db_migrate)
else:
testdb = os.path.join(CONF.state_path, sqlite_db)
db_migrate.upgrade('head')
if os.path.exists(testdb):
return
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = os.path.join(CONF.state_path, sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose) # pylint: disable=E1101
else:
shutil.copyfile(
os.path.join(CONF.state_path, self.sqlite_clean_db),
os.path.join(CONF.state_path, self.sqlite_db),
)
def setup_sqlite(self, db_migrate):
if db_migrate.version():
return
db_base.BASE.metadata.create_all(self.engine)
db_migrate.stamp('head')
|
thundergolfer/mAIcroft
|
maicroft/social_info_extraction.py
|
Python
|
mit
| 15,272
| 0.000262
|
import datetime
try:
import urlparse
except (ImportError):
import urllib.parse as urlparse
import calendar
import pytz
import re
from maicroft.util import Util
from maicroft.activity_metrics_proc import process_metrics
from maicroft.activity_metrics_proc import process_submission_metrics
from maicroft.subreddits import subreddits_dict, ignore_text_subs
from maicroft.text_parser import TextParser
"""
Generalising the information extraction processing of social media content.
Currently only Reddit social content is supported, but we want the code to be
reusable for other major social media content eg. Twitter, Facebook, LinkedIn...
"""
parser = TextParser()
def process_comment(user, comment):
"""
Process a single comment.
* Updates metrics
* Sanitizes and extracts chunks from comment.
"""
text = Util.sanitize_text(comment.text) # Sanitize comment text.
user.corpus += text.lower() # Add comment text to corpus.
comment_timestamp = datetime.datetime.fromtimestamp(
comment.created_utc, tz=pytz.utc
)
process_metrics(user, comment) # Process the comment for metrics
# If comment is in a subreddit in which comments/user text
# are to be ignored (such as /r/jokes, /r/writingprompts, etc), do not process it further.
if comment.subreddit in ignore_text_subs:
return False
# TODO: This stopping if not I/My found is dodgy behaviour
# If comment text does not contain "I" or "my", why even bother?
if not re.search(r"\b(i|my)\b", text, re.I):
return False
# Now, this is a comment that needs to be processed.
(chunks, sentiments) = parser.extract_chunks(text)
user.sentiments += sentiments
for chunk in chunks:
user.load_attributes(chunk, comment)
return True
def process_submission(user, submission):
"""
Process a single submission.
* Updates metrics
* Sanitizes and extracts chunks from user text.
"""
if(submission.is_user):
text = Util.sanitize_text(submission.text)
user.corpus += text.lower()
process_submission_metrics(user, submission) # add metrics info to user
submission_type = None
submission_domain = None
submission_url_path = urlparse(submission.url).path
if submission.domain.startswith("user."):
submission_type = "Self"
submission_domain = submission.subreddit
elif (
submission_url_path.endswith(tuple(user.IMAGE_EXTENSIONS)) or
submission.domain.endswith(tuple(user.IMAGE_DOMAINS))
):
submission_type = "Image"
submission_domain = submission.domain
elif submission.domain.endswith(tuple(user.VIDEO_DOMAINS)):
submission_type = "Video"
submission_domain = submission.domain
else:
submission_type = "Other"
submission_domain = submission.domain
t = [
x for x in user.submissions_by_type["children"] if x["name"] == submission_type
][0]
d = (
[x for x in t["children"] if x["name"] == submission_domain] or [None]
)[0]
if d:
d["size"] += 1
else:
t["children"].append({
"name": submission_domain,
"size": 1
})
# If submission is in a subreddit in which comments/user text
# are to be ignored (such as /r/jokes, /r/writingprompts, etc),
# do not process it further.
if submission.subreddit in ignore_text_subs:
return False
# Only process user texts that contain "I" or "my"
if not submission.is_user or not re.search(r"\b(i|my)\b", text, re.I):
return False
(chunks, sentiments) = parser.extract_chunks(text)
user.sentiments += sentiments
for chunk in chunks:
user.load_attributes(chunk, submission)
return True
def load_attributes(user, chunk, post_permalink):
"""
Given an extracted chunk, load appropriate attributes from it.
"""
# Is this chunk a possession/belonging?
if chunk["kind"] == "possession" and chunk["noun_phrase"]:
# Extract noun from chunk
noun_phrase = chunk["noun_phrase"]
noun_phrase_text = " ".join([w for w, t in noun_phrase])
norm_nouns = " ".join([
|
parser.normalize(w, t) for w, t in noun_phrase if t.startswith("N")
])
noun = next(
(w for w, t in noun_phrase if t.startswith("N")), None
)
if noun:
# See if noun is a pet, family member or a relationship partner
pet = parser.pet_animal(noun)
family_
|
member = parser.family_member(noun)
relationship_partner = parser.relationship_partner(noun)
if pet:
user.pets.append((pet, post_permalink))
elif family_member:
user.family_members.append((family_member, post_permalink))
elif relationship_partner:
user.relationship_partners.append(
(relationship_partner, post_permalink)
)
else:
user.possessions_extra.append((norm_nouns, post_permalink))
# Is this chunk an action?
elif chunk["kind"] == "action" and chunk["verb_phrase"]:
verb_phrase = chunk["verb_phrase"]
verb_phrase_text = " ".join([w for w, t in verb_phrase])
# Extract verbs, adverbs, etc from chunk
norm_adverbs = [
parser.normalize(w, t)
for w, t in verb_phrase if t.startswith("RB")
]
adverbs = [w.lower() for w, t in verb_phrase if t.startswith("RB")]
norm_verbs = [
parser.normalize(w, t)
for w, t in verb_phrase if t.startswith("V")
]
verbs = [w.lower() for w, t in verb_phrase if t.startswith("V")]
prepositions = [w for w, t in chunk["prepositions"]]
noun_phrase = chunk["noun_phrase"]
noun_phrase_text = " ".join(
[w for w, t in noun_phrase if t not in ["DT"]]
)
norm_nouns = [
parser.normalize(w, t)
for w, t in noun_phrase if t.startswith("N")
]
proper_nouns = [w for w, t in noun_phrase if t == "NNP"]
determiners = [
parser.normalize(w, t)
for w, t in noun_phrase if t.startswith("DT")
]
prep_noun_phrase = chunk["prep_noun_phrase"]
prep_noun_phrase_text = " ".join([w for w, t in prep_noun_phrase])
pnp_prepositions = [
w.lower() for w, t in prep_noun_phrase if t in ["TO", "IN"]
]
pnp_norm_nouns = [
parser.normalize(w, t)
for w, t in prep_noun_phrase if t.startswith("N")
]
pnp_determiners = [
parser.normalize(w, t)
for w, t in prep_noun_phrase if t.startswith("DT")
]
full_noun_phrase = (
noun_phrase_text + " " + prep_noun_phrase_text
).strip()
# TODO - Handle negative actions (such as I am not...),
# but for now:
if any(
w in ["never", "no", "not", "nothing"]
for w in norm_adverbs+determiners
):
return
# I am/was ...
if (len(norm_verbs) == 1 and "be" in norm_verbs and not prepositions and noun_phrase):
# Ignore gerund nouns for now
if (
"am" in verbs and
any(n.endswith("ing") for n in norm_nouns)
):
user.attributes_extra.append(
(full_noun_phrase, post_permalink)
)
return
attribute = []
for noun in norm_nouns:
gender = None
orientation = None
if "am" in verbs:
gender = parser.gender(noun)
orientation = parser.orientation(noun)
if gender:
user.genders.append((gender, post_permalink))
elif orientation:
user.orientations.append(
(orientation, post_permalink)
)
# Include only "am" phrases
elif "am" in verbs:
|
vthorsteinsson/tensor2tensor
|
tensor2tensor/models/bytenet_test.py
|
Python
|
apache-2.0
| 1,719
| 0.004072
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ByteNet tests."""
from __future__ import absolute_import
|
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models import bytenet
import tensorflow as tf
class ByteNetTest(tf.test.TestCase):
def testByteNet(self):
vocab_size = 9
x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1))
hparams = bytenet.bytenet_base()
|
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
with self.test_session() as session:
features = {
"inputs": tf.constant(x, dtype=tf.int32),
"targets": tf.constant(y, dtype=tf.int32),
}
model = bytenet.ByteNet(
hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (3, 50, 1, 1, vocab_size))
if __name__ == "__main__":
tf.test.main()
|
cedricpradalier/vrep_ros_ws
|
src/ar_slam_base/nodes/rover_mapping.py
|
Python
|
bsd-3-clause
| 6,109
| 0.014241
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('ar_slam_base')
import rospy
from std_msgs.msg import Float64,Float32
from sensor_msgs.msg import JointState
from geometry_msgs.msg import PointStamped
import tf
import numpy
import message_filters
from ar_slam_base.mapping_kf import *
from ar_track_alvar_msgs.msg import AlvarMarkers
class RoverMapping:
def __init__(self,name):
self.name = name
self.encoder_precision = 0.05 # [m]
self.ar_precision = 0.50 # [m]
self.compass_precision = 10. * pi/180. # [rad]
self.target_frame = "/world"
self.ignore_id = False
self.initial_x = -5.0
self.initial_y = 2.5
self.initial_theta = -pi/4
self.encoder_precision = 0.05 # [m]
rospy.init_node('rover_mapping')
self.name = rospy.get_param("~rover_name",self.name)
self.ignore_id = rospy.get_param("~ignore_id",self.ignore_id)
self.target_frame = rospy.get_param("~target_frame",self.target_frame)
self.ar_precision = rospy.get_param("~ar_precision",self.ar_precision)
self.compass_precision = rospy.get_param("~compass_precision",self.compass_precision)
self.encoder_precision = rospy.get_param("~encoder_precision",self.encoder_precision)
self.initial_x = rospy.get_param("~initial_x",self.initial_x)
self.initial_y = rospy.get_param("~initial_y",self.initial_y)
self.initial_theta = rospy.get_param("~initial_theta",self.initial_theta)
rospy.loginfo("Starting rover driver for rover '%s'" % (self.name))
self.last_cmd = rospy.Time.now()
self.listener = tf.TransformListener()
self.ready = False
self.connected = False
self.steering_sub={}
self.drive_sub={}
# Instantiate the right filter based on launch parameters
initial_vec = [self.initial_x, self.initial_y, self.initial_theta]
initial_unc = [0.01, 0.01, 0.01]
self.mapper = MappingKF(initial_vec,initial_unc)
self.pose_pub = rospy.Publisher("~pose",PoseStamped,queue_size=1)
# print "Initialising wheel data structure"
for k in prefix:
self.steering_sub[k] = message_filters.Subscriber("/vrep/%s/%sSteerEncoder" % (self.name,k), JointState)
self.drive_sub[k] = message_filters.Subscriber("/vrep/%s/%sDriveEncoder" % (self.name,k), JointState)
# print "Initialised wheel " + k
self.ts = message_filters.TimeSynchronizer(self.steering_sub.values()+self.drive_sub.values(), 10)
self.ts.registerCallback(self.sync_odo_cb)
def sync_odo_cb(self,*args):
self.connected = True
if not self.ready:
return
if len(args)!=12:
rospy.logerr("Invalid number of argument in OdoCallback")
return
steering_val = [s.position[0] for s in args[0:6]]
drive_val = [s.position[0] for s in args[6:12]]
motors = RoverMotors()
motors.steering = dict(zip(self.steering_sub.keys(),steering_val))
motors.drive = dict(zip(self.drive_sub.keys(),drive_val))
self.odo_cb(args[0].header.stamp,motors)
def odo_cb(self,timestamp,motors):
# Get the pose of all drives
drive_cfg={}
for k in prefix:
# try:
# self.listener.waitForTransform('/%s/ground'%(self.name),
# '/%s/%sDrive'%(self.name,k), self.last_cmd, rospy.Duration(1.0))
((x,y,z),rot) = self.listener.lookupTransform('/%s/ground'%(self.name),
'/%s/%sDrive'%(self.name,k), rospy.Time(0))
drive_cfg[k] = DriveConfiguration(self.radius[k],x,y,z)
# except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
# return
self.mapper.predict(motors, drive_cfg, self.encoder_precision)
self.mapper.publish(self.t
|
arget_frame,timestamp)
def compass_cb(self, value):
self.mapper.update_compass(value.data,self.compass_precision)
def ar_cb(self, markers):
for m in markers.markers:
|
if m.id > 32:
continue
self.listener.waitForTransform("/%s/ground"%self.name,m.header.frame_id, m.header.stamp, rospy.Duration(1.0))
m_pose = PointStamped()
m_pose.header = m.header
m_pose.point = m.pose.pose.position
m_pose = self.listener.transformPoint("/%s/ground"%self.name,m_pose)
Z = vstack([m_pose.point.x,m_pose.point.y])
if self.ignore_id:
self.mapper.update_ar(Z,0,self.ar_precision)
else:
self.mapper.update_ar(Z,m.id,self.ar_precision)
self.mapper.publish(self.target_frame,markers.header.stamp)
def run(self):
timeout = True
rate = rospy.Rate(2)
rospy.loginfo("Waiting for VREP")
while (not rospy.is_shutdown()) and (not self.connected):
rate.sleep()
if rospy.is_shutdown():
return
rospy.loginfo("Waiting for initial transforms")
rospy.sleep(1.0)
self.radius={}
for k in prefix:
try:
self.listener.waitForTransform('/%s/ground'%(self.name),
'/%s/%sDrive'%(self.name,k), rospy.Time(0), rospy.Duration(5.0))
((x,y,z),rot) = self.listener.lookupTransform('/%s/ground'%(self.name),
'/%s/%sDrive'%(self.name,k), rospy.Time(0))
self.radius[k] = z
rospy.loginfo("Got transform for " + k)
except tf.Exception,e:
rospy.logerr("TF exception: " + repr(e))
self.ready = True
self.ar_sub = rospy.Subscriber("/ar_pose_marker", AlvarMarkers, self.ar_cb)
self.compass_sub = rospy.Subscriber("/vrep/rover/compass", Float32, self.compass_cb)
while not rospy.is_shutdown():
rate.sleep()
if __name__ == '__main__':
try:
rd = RoverMapping("rover")
rd.run()
except rospy.ROSInterruptException:
pass
|
SchrodingersGat/kicad-footprint-generator
|
scripts/Connector/Connector_JST/conn_jst_VH_tht_side-stabilizer.py
|
Python
|
gpl-3.0
| 12,829
| 0.027983
|
#!/usr/bin/env python3
'''
kicad-footprint-generator is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
kicad-footprint-generator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
'''
import sys
import os
# export PYTHONPATH="${PYTHONPATH}<path to kicad-footprint-generator directory>"
sys.path.append(os.path.join(sys.path[0], "..", "..", "..")) # load parent path of KicadModTree
import argparse
import yaml
from helpers import *
from KicadModTree import *
sys.path.append(os.path.join(sys.path[0], "..", "..", "tools")) # load parent path of tools
from footprint_text_fields import addTextFields
series = "VH"
manufacturer = 'JST'
orientation = 'H'
number_of_rows = 1
datasheet = 'http://www.jst-mfg.com/product/pdf/eng/eVH.pdf'
pitch = 3.96
pin_range = range(2, 8) #number of pins in each row
drill = 1.7 # 1.65 +0.1/-0.0 -> 1.7+/-0.05
pad_to_pad_clearance = 0.8
pad_copper_y_solder_length = 0.5 #How much copper should be in y direction?
min_annular_ring = 0.15
#FP name strings
part_base = "S{n}P-VH" #JST part number format string
#FP description and tags
# DISCLAIMER: This generator uses many magic numbers for the silk screen details. These might break if some parameters are changed.
def generate_one_footprint(pins, configuration):
silk_pad_clearance = configuration['silk_pad_clearance']+configuration['silk_line_width']/2
mpn = part_base.format(n=pins)
orientation_str = configuration['orientation_options'][orientation]
footprint_name = configuration['fp_name_format_string'].format(man=manufacturer,
series=series,
mpn=mpn, num_rows=number_of_rows, pins_per_row=pins, mounting_pad = "",
pitch=pitch, orientation=orientation_str)
kicad_mod = Footprint(footprint_name)
kicad_mod.setDescription("JST {:s} series connector, {:s} ({:s}), generated with kicad-footprint-generator".format(series, mpn, datasheet))
kicad_mod.setTags(configuration['keyword_fp_string'].format(series=series,
orientation=orientation_str, man=manufacturer,
entry=configuration['entry_direction'][orientation]))
#calculate fp dimensions
A = (pins - 1) * pitch
B = A + 3.9
#coordinate locations
# y1 x1 x3 x4 x2
# y2 | | | |
# y3 | |1||2||3||4||5||6||7| |
# y4 |_| |__|
# | |
# y5 |__________________|
# y6 || || || || || || ||
#generate pads
pad_size = [pitch - pad_to_pad_clearance, drill + 2*pad_copper_y_solder_length]
if pad_size[0] - drill < 2*min_annular_ring:
pad_size[0] = drill + 2*min_annular_ring
if pad_size[0] - drill > 2*pad_copper_y_solder_length:
pad_size[0] = drill + 2*pad_copper_y_solder_length
shape=Pad.SHAPE_OVAL
if pad_size[0] == pad_size[1]:
shape=Pad.SHAPE_CIRCLE
optional_pad_params = {}
if configuration['kicad4_compatible']:
optional_pad_params['tht_pad1_shape'] = Pad.SHAPE_RECT
else:
optional_pad_params['tht_pad1_shape'] = Pad.SHAPE_ROUNDRECT
kicad_mod.append(PadArray(
pincount=pins, x_spacing=pitch,
type=Pad.TYPE_THT, shape=shape,
size=pad_size, drill=drill,
layers=Pad.LAYERS_THT,
**optional_pad_params))
#draw the component outline
x1 = A/2 - B/2
x2 = x1 + B
x3 = -
|
0.9
x4 = pitch * (pins - 1) + 0.9
y6 = 13.4
y4 = y6 - 7.7
y1 = y4 - 7.7
y2 = y1 + 2
y3 = y1 + 4.5
y5 = y3 + 9.4
body_edge={'left':x1, 'right':x2, 'top':y4, 'bottom':y5}
#draw shroud outline on F.Fab layer
k
|
icad_mod.append(RectLine(start=[x3,y3],end=[x4,y5], layer='F.Fab', width=configuration['fab_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x4-0.2,'y':y3},{'x':x4-0.2,'y':y1},{'x':x2,'y':y1},{'x':x2,'y':y4},{'x':x4,'y':y4}], layer='F.Fab', width=configuration['fab_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x3,'y':y4},{'x':x1,'y':y4},{'x':x1,'y':y1},{'x':x3+0.2,'y':y1},{'x':x3+0.2,'y':y3}], layer='F.Fab', width=configuration['fab_line_width']))
########################### CrtYd #################################
cx1 = roundToBase(x1-configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cy1 = roundToBase(y1-configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cx2 = roundToBase(x2+configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cy2 = roundToBase(y6+configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
kicad_mod.append(RectLine(
start=[cx1, cy1], end=[cx2, cy2],
layer='F.CrtYd', width=configuration['courtyard_line_width']))
#draw pin outlines and plastic between pins on F.Fab (pin width is 1.4mm, so 0.7mm is half the pin width)
for pin in range(pins):
kicad_mod.append(PolygoneLine(polygone=[{'x':pin * pitch - 0.7,'y':y5},{'x':pin * pitch - 0.7,'y':y6},{'x':pin * pitch + 0.7,'y':y6},{'x':pin * pitch + 0.7,'y':y5}], layer='F.Fab', width=configuration['fab_line_width']))
if pin < (pins - 1):
kicad_mod.append(PolygoneLine(polygone=[{'x':pin * pitch + 1.38,'y':y3},{'x':pin * pitch + 1.38,'y':y2},{'x':pin * pitch + 2.58,'y':y2},{'x':pin * pitch + 2.58,'y':y3}], layer='F.Fab', width=configuration['fab_line_width']))
#draw pin1 mark on F.Fab
kicad_mod.append(PolygoneLine(polygone=[{'x':-0.8,'y':y3},{'x':0,'y':y3+0.8},{'x':0.8,'y':y3}], layer='F.Fab', width=configuration['fab_line_width']))
#draw silk outlines
off = configuration['silk_fab_offset']
x1 -= off
y1 -= off
x2 += off
y2 -= off
x3 -= off
y3 -= off
x4 += off
y4 += off
y5 += off
y6 += off
p1s_x = pad_size[0]/2 + silk_pad_clearance
p1s_y = pad_size[1]/2 + silk_pad_clearance
#silk around shroud; silk around stabilizers; silk long shroud between pin and shroud for first and last pins
#note that half of pin width is 0.7mm, so adding 0.12mm silk offset gives 0.82mm about pin center; 0.44 is double silk offset in caeses where 'off' is in the wrong direction
kicad_mod.append(PolygoneLine(polygone=[{'x':x3,'y':y4},{'x':x3,'y':y5},{'x':-0.82,'y':y5}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x4-0.44,'y':-1.6},{'x':x4-0.44,'y':y1},{'x':x2,'y':y1},{'x':x2,'y':y4},{'x':x4,'y':y4}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x4-0.44,'y':y3},{'x':x4-0.44,'y':1.6}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x3,'y':y4},{'x':x1,'y':y4},{'x':x1,'y':y1},{'x':x3+0.44,'y':y1},{'x':x3+0.44,'y':-p1s_y}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':x3+0.44,'y':1.7},{'x':x3+0.44,'y':y3}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':(pins - 1) * pitch + 0.82,'y':y5},{'x':x4,'y':y5},{'x':x4,'y':y4}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':-0.58,'y':y3},{'x':1.26,'y':y3}], layer='F.SilkS', width=configuration['silk_line_width']))
kicad_mod.append(PolygoneLine(polygone=[{'x':pin * pitch - 1.26,'y':y3},{'x':pin * pitch + 0.58,'y':y3}], layer='F.SilkS', width=configuration['silk_line_width']))
#per-pin silk
#pin silk
for pin in range(pins):
kicad_mod.append(PolygoneLine(polygone=[{'x':pin * pitch - 0.82,'y':y5},{'x':pin * pitch - 0.82,'y':y6},{'x':pin * pitch + 0.82,'y':y6},{'x':pin * pitch + 0.82,
|
kdart/pycopia
|
core/pycopia/OS/Linux/sysctl.py
|
Python
|
apache-2.0
| 52
| 0.019231
|
"""
|
Linux kernel system co
|
ntrol from Python.
"""
|
pypingou/pypass
|
pypass/pypobj.py
|
Python
|
gpl-3.0
| 4,872
| 0.003079
|
""" Object module for pypass
This module contains the objects from and to which the json is
generated/read.
"""
#-*- coding: utf-8 -*-
# Copyright (c) 2011 Pierre-Yves Chibon <pingou AT pingoured DOT fr>
# Copyright (c) 2011 Johan Cwiklinski <johan AT x-tnd DOT be>
#
# This file is part of pypass.
#
# pypass is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pypass is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pypass. If not, see <http://www.gnu.org/licenses/>.
def json_to_tree(jsontxt):
""" Converts a json string into a PypFolder with all the information
from the json string converted into PypFolder and PypAccount.
"""
data = PypFolder("rootdir")
if jsontxt is None:
return data
for key in jsontxt.keys():
data = load_pypdir(jsontxt[key], key)
return data
def load_pypdir(jsondir, name):
""" Convert a dictionary into a PypFolder with the specified name.
"""
desc = None
if "description" in jsondir.keys():
desc = jsondir["description"]
pypdir = PypFolder(name, desc)
for account in jsondir["accounts"]:
pypdir.accounts.append(load_account(account))
for account in jsondir["folders"]:
for key in account.keys():
pypdir.folders.append(load_pypdir(
account[key], key))
return pypdir
def load_account(json_account):
""" From an account entry in the json return a PypAccount object """
account = PypAccount(json_account["name"], json_account["password"])
for key in json_account.keys():
if key not in ["name","password"] and \
key not in account.extras.keys():
account.extras[key] = json_account[key]
return account
def create_set():
""" Creates a fake PypFolder for development """
root = PypFolder("rootfolder", "firstLevel")
account = PypAccount("p1", "mdp1")
root.accounts.append(account)
account = PypAccount("p2", "mdp2")
root.accounts.append(account)
folder1 = PypFolder("secfolder", "secLevel")
root.folders.append(folder1)
account = PypAccount("p3", "mdp3")
folder1.accounts.append(account)
folder2 = PypFolder("thirdfolder", "thirdLevel")
account = PypAcc
|
ount("p4", "mdp4")
folder2.accounts.append(account)
folder1.folders.append(folder2)
return root
def iterate_over_tree(obj, out, ite=0):
""" Iterate over the items in a PypFolder ""
|
"
out = '%s"%s": { ' % (out, obj.name)
if obj.description is not None and obj.description != "":
out = '%s "description": "%s",' % (out, obj.description)
ite = ite + 1
cnt = 0
out = '%s "accounts": [' % out
for item in obj.accounts:
cnt = cnt + 1
out = '%s { "name": "%s", "password": "%s"' % (out,
item.name, item.password)
for key in item.extras.keys():
out += ',"%s":"%s"' %(key, item.extras[key])
out += "}"
if cnt != len(obj.accounts):
out = "%s ," % out
out = '%s ], ' % out
out = '%s "folders": [{' % out
cnt = 0
for item in obj.folders:
cnt = cnt + 1
out = iterate_over_tree(item, out, ite)
if cnt != len(obj.folders):
out = "%s ," % out
out = '%s }] }' % out
return out
class PypFolder(object):
""" Represents PyPass folder, used to classify the accounts."""
def __init__(self, name="", description=None):
self.name = name
self.description = description
self.accounts = []
self.folders = []
def dump(self):
""" Dump the object to stdout """
out = "{"
out = iterate_over_tree(self, out)
out = out + "}"
return out
def __str__(self):
return 'Name: %s' % self.name
class PypAccount(object):
""" Represents a PyPass account"""
def __init__(self, name, password):
self.name = name
self.password = password
self.extras = {}
def __str__(self):
string = '''Name: %s
Password: %s ''' % (self.name, self.password)
for key in self.extras.keys():
string = string + '\n%s: %s' % (key.capitalize(),
self.extras[key])
return string
#if __name__ == "__main__":
#tree = create_set()
#tree.dump()
#import json
#f = open("../testjson")
#data = json.load(f)
#f.close()
#tree = json_to_tree(data)
#tree.dump()
|
damianpv/exercise
|
home/admin.py
|
Python
|
gpl-2.0
| 328
| 0.009146
|
from django.contrib import admin
from .models import Friend
class FriendAdmin(admin.ModelAdmin):
list_display = ('full_name', 'profile_image')
def profile_image(self, obj):
return '<img src="%s" width="50" heith="50">'
|
% obj.photo
profile_image.allow_tags = True
admin.site.register(Friend, FriendAdmin)
| |
kikusu/chainer
|
chainer/datasets/sub_dataset.py
|
Python
|
mit
| 7,241
| 0
|
import numpy
import six
from chainer.dataset import dataset_mixin
class SubDataset(dataset_mixin.DatasetMixin):
"""Subset of a base dataset.
SubDataset defines a subset of a given base dataset. The subset is defined
as an interval of indexes, optionally with a given permutation.
If ``order`` is given, then the ``i``-th example of this dataset is the
``order[start + i]``-th example of the base dataset, where ``i`` is a
non-negative integer. If ``order`` is not given, then the ``i``-th example
of this dataset is the ``start + i``-th example of the base dataset.
Negative indexing is also allowed: in this case, the term ``start + i`` is
replaced by ``finish + i``.
SubDataset is often used to split a dataset into training and validation
subsets. The training set is used for training, while the validation set is
used to track the generalization performance, i.e. how the learned model
works well on unseen data. We can tune hyperparameters (e.g. number of
hidden units, weight initializers, learning rate, etc.) by comparing the
validation performance. Note that we often use another set called test set
to measure the quality of the tuned hyperparameter, which can be made by
nesting multiple SubDatasets.
There are two ways to make training-validation splits. One is a single
split, where the dataset is split just into two subsets. It can be done by
:func:`split_dataset` or :func:`split_dataset_random`. T
|
he other one is a
:math:`k`-fold cross validation, in whi
|
ch the dataset is divided into
:math:`k` subsets, and :math:`k` different splits are generated using each
of the :math:`k` subsets as a validation set and the rest as a training
set. It can be done by :func:`get_cross_validation_datasets`.
Args:
dataset: Base dataset.
start (int): The first index in the interval.
finish (int): The next-to-the-last index in the interval.
order (sequence of ints): Permutation of indexes in the base dataset.
If this is ``None``, then the ascending order of indexes is used.
"""
def __init__(self, dataset, start, finish, order=None):
if start < 0 or finish > len(dataset):
raise ValueError('subset overruns the base dataset.')
self._dataset = dataset
self._start = start
self._finish = finish
self._size = finish - start
if order is not None and len(order) != len(dataset):
msg = ('order option must have the same length as the base '
'dataset: len(order) = {} while len(dataset) = {}'.format(
len(order), len(dataset)))
raise ValueError(msg)
self._order = order
def __len__(self):
return self._size
def get_example(self, i):
if i >= 0:
if i >= self._size:
raise IndexError('dataset index out of range')
index = self._start + i
else:
if i < -self._size:
raise IndexError('dataset index out of range')
index = self._finish + i
if self._order is not None:
index = self._order[index]
return self._dataset[index]
def split_dataset(dataset, split_at, order=None):
"""Splits a dataset into two subsets.
This function creates two instances of :class:`SubDataset`. These instances
do not share any examples, and they together cover all examples of the
original dataset.
Args:
dataset: Dataset to split.
split_at (int): Position at which the base dataset is split.
order (sequence of ints): Permutation of indexes in the base dataset.
See the document of :class:`SubDataset` for details.
Returns:
tuple: Two :class:`SubDataset` objects. The first subset represents the
examples of indexes ``order[:split_at]`` while the second subset
represents the examples of indexes ``order[split_at:]``.
"""
n_examples = len(dataset)
if split_at < 0:
raise ValueError('split_at must be non-negative')
if split_at >= n_examples:
raise ValueError('split_at exceeds the dataset size')
subset1 = SubDataset(dataset, 0, split_at, order)
subset2 = SubDataset(dataset, split_at, n_examples, order)
return subset1, subset2
def split_dataset_random(dataset, first_size):
"""Splits a dataset into two subsets randomly.
This function creates two instances of :class:`SubDataset`. These instances
do not share any examples, and they together cover all examples of the
original dataset. The split is automatically done randomly.
Args:
dataset: Dataset to split.
first_size (int): Size of the first subset.
Returns:
tuple: Two :class:`SubDataset` objects. The first subset contains
``first_size`` examples randomly chosen from the dataset without
replacement, and the second subset contains the rest of the
dataset.
"""
order = numpy.random.permutation(len(dataset))
return split_dataset(dataset, first_size, order)
def get_cross_validation_datasets(dataset, n_fold, order=None):
"""Creates a set of training/test splits for cross validation.
This function generates ``n_fold`` splits of the given dataset. The first
part of each split corresponds to the training dataset, while the second
part to the test dataset. No pairs of test datasets share any examples, and
all test datasets together cover the whole base dataset. Each test dataset
contains almost same number of examples (the numbers may differ up to 1).
Args:
dataset: Dataset to split.
n_fold (int): Number of splits for cross validation.
order (sequence of ints): Order of indexes with which each split is
determined. If it is ``None``, then no permutation is used.
Returns:
list of tuples: List of dataset splits.
"""
if order is None:
order = numpy.arange(len(dataset))
else:
order = numpy.array(order) # copy
whole_size = len(dataset)
borders = [whole_size * i // n_fold for i in six.moves.range(n_fold + 1)]
test_sizes = [borders[i + 1] - borders[i] for i in six.moves.range(n_fold)]
splits = []
for test_size in reversed(test_sizes):
size = whole_size - test_size
splits.append(split_dataset(dataset, size, order))
new_order = numpy.empty_like(order)
new_order[:test_size] = order[-test_size:]
new_order[test_size:] = order[:-test_size]
order = new_order
return splits
def get_cross_validation_datasets_random(dataset, n_fold):
"""Creates a set of training/test splits for cross validation randomly.
This function acts almost same as :func:`get_cross_validation_dataset`,
except automatically generating random permutation.
Args:
dataset: Dataset to split.
n_fold (int): Number of splits for cross validation.
Returns:
list of tuples: List of dataset splits.
"""
order = numpy.random.permutation(len(dataset))
return get_cross_validation_datasets(dataset, n_fold, order)
|
consciousnesss/learn_theano
|
learn_theano/deeplearning_tutorials/test_0_logistic_regression.py
|
Python
|
apache-2.0
| 4,748
| 0.004212
|
import theano
import theano.tensor as T
import numpy as np
from learn_theano.utils.download_all_datasets import get_dataset
import cPickle
import time
def one_zero_loss(prediction_labels, labels):
return T.mean(T.neq(prediction_labels, labels))
def negative_log_likelihood_loss(prediction_probailities, labels):
return -T.mean(T.log(prediction_probailities)[T.arange(labels.shape[0]), labels])
def load_dataset(dataset):
set_x = theano.shared(np.asarray(dataset[0], dtype=theano.config.floatX), borrow=True)
set_y = theano.shared(np.asarray(dataset[1], dtype=theano.config.floatX), borrow=True)
return set_x, T.cast(set_y, 'int32')
def run_0_logistic_regression():
batch_size = 600
learning_rate = 0.13
n_epochs = 1000
train_set, valid_set, test_set = get_dataset('mnist')
train_set_x, train_set_y = load_dataset(train_set)
valid_set_x, valid_set_y = load_dataset(valid_set)
test_set_x, test_set_y = load_dataset(test_set)
n_train_batches = train_set_x.get_value(borrow=True).shape[0]/batch_size
n_validation_batches = valid_set_x.get_value(borrow=True).shape[0]/batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]/batch_size
x = T.matrix('x')
y = T.ivector('y')
n_in=28*28
n_out=10
W = theano.shared(
np.zeros((n_in, n_out), dtype=theano.config.floatX),
name='W',
borrow=True)
b = theano.shared(
np.zeros((n_out,), dtype=theano.config.floatX),
name='b',
borrow=True
)
py_given_x = T.nnet.softmax(T.dot(x, W)+b)
y_predict = T.argmax(py_given_x, axis=1)
cost = negative_log_likelihood_loss(py_given_x, y)
minibatch_index = T.iscalar('minibatch_index')
train_model = theano.function(
inputs=[minibatch_index],
outputs=[],
updates=(
[W, W - learning_rate*T.grad(cost, W)],
[b, b - learning_rate*T.grad(cost, b)],
),
givens={
x: train_set_x[minibatch_index*batch_size:(minibatch_index+1)*batch_size],
y: train_set_y[minibatch_index*batch_size:(minibatch_index+1)*batch_size],
}
)
validation_model = theano.function(
inputs=[minibatch_index],
outputs=one_zero_loss(y_predict, y),
givens={
x: valid_set_x[minibatch_index*batch_size:(minibatch_index+1)*batch_size],
y: valid_set_y[minibatch_index*batch_size:(minibatch_index+1)*batch_size],
}
)
test_model = theano.function(
inputs=[minibatch_index],
outputs=one_zero_loss(y_predict, y),
givens={
x: test_set_x[minibatch_index*batch_size:(minibatch_index+1)*batch_size],
y: test_set_y[minibatch_index*batch_size:(minibatch_index+1)*batch_size],
}
)
start_time = time.time()
def main_loop():
patience = 5000
patience_increase = 2
improvement_threshold = 0.995
validation_frequency = n_train_batches
test_score = 0.
best_validation_loss = np.inf
for epoch in range(n_epochs):
for minibatc
|
h_index in ra
|
nge(n_train_batches):
train_model(minibatch_index)
iteration = epoch*n_train_batches + minibatch_index
if (iteration + 1) % validation_frequency == 0.:
validation_cost = np.mean([validation_model(i) for i in range(n_validation_batches)])
print('epoch %i, validation error %f %%' % (epoch, validation_cost * 100.))
if validation_cost < best_validation_loss:
if validation_cost < best_validation_loss*improvement_threshold:
patience = max(patience, iteration*patience_increase)
best_validation_loss = validation_cost
test_score = np.mean([test_model(i) for i in range(n_test_batches)])
print(' epoch %i, minibatch test error of best model %f %%' % (epoch, test_score * 100.))
if patience <= iteration:
return epoch, best_validation_loss, test_score
return epoch, best_validation_loss, test_score
epoch, best_validation_loss, test_score = main_loop()
total_time = time.time()-start_time
print('Optimization complete in %.1fs with best validation score of %f %%, with test performance %f %%' %
(total_time, best_validation_loss * 100., test_score * 100.))
print('The code run for %d epochs, with %f epochs/sec' % (epoch, epoch/total_time))
assert(abs(best_validation_loss - 0.075) < 1e-6)
assert(abs(test_score == 0.07489583) < 1e-6)
if __name__ == "__main__":
run_0_logistic_regression()
|
austinharris/gem5-riscv
|
src/mem/slicc/symbols/Transition.py
|
Python
|
bsd-3-clause
| 2,750
| 0.004
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRI
|
BUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WAR
|
RANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.symbols.Symbol import Symbol
class Transition(Symbol):
def __init__(self, table, machine, state, event, nextState, actions,
request_types, location):
ident = "%s|%s" % (state, event)
super(Transition, self).__init__(table, ident, location)
self.state = machine.states[state]
self.event = machine.events[event]
self.nextState = machine.states[nextState]
self.actions = [ machine.actions[a] for a in actions ]
self.request_types = [ machine.request_types[s] for s in request_types ]
self.resources = {}
for action in self.actions:
for var,value in action.resources.iteritems():
num = int(value)
if var in self.resources:
num += int(value)
self.resources[var] = str(num)
def __repr__(self):
return "[Transition: (%r, %r) -> %r, %r]" % \
(self.state, self.event, self.nextState, self.actions)
def getActionShorthands(self):
assert self.actions
return ''.join(a.short for a in self.actions)
__all__ = [ "Transition" ]
|
KamilWo/bestja
|
addons/bestja_offers/models/application.py
|
Python
|
agpl-3.0
| 8,916
| 0.000786
|
# -*- coding: utf-8 -*-
from datetime import date
from urllib import quote_plus
from openerp import models, fields, api, exceptions
class ApplicationRejectedReason(models.Model):
_name = 'offers.application.rejected'
name = fields.Char(required=True)
description = fields.Text(required=True)
class Application(models.Model):
"""
Volunteer's request to work for an organization.
"""
_name = 'offers.application'
_inherit = ['message_template.mixin']
_inherits = {
'res.users': 'user'
}
STATES = [
('new', 'Nowa aplikacja'),
('meeting', 'Pierwsze spotkanie'),
('meeting2', 'Drugie spotkanie'),
('accepted', 'Zaakceptowano'),
('rejected', 'Odmówiono'),
]
STATES_DICT = dict(STATES)
# States that should be folded in Kanban view
FOLDED_STATES = [
'meeting2',
]
QUALITY_CHOICES = [
('0', 'Brak oceny'),
('1', 'Słaba'),
('2', 'Średnia'),
('3', 'Dobra'),
('4', 'Doskonała')
]
MEETING_STATES = [
('pending', 'Oczekujące'),
('accepted', 'Potwierdzone'),
('rejected', 'Odrzucone'),
]
# The way to specify all possible groups for particular grouping in kanban
@api.model
def _state_groups(self, present_ids, domain, **kwargs):
folded = {key: (key in self.FOLDED_STATES) for key, _ in self.STATES}
# Need to copy self.STATES list before returning it,
# because odoo modifies the list it gets,
# emptying it in the process. Bad odoo!
return self.STATES[:], folded
_group_by_full = {
'state': _state_groups
}
user = fields.Many2one('res.users', required=True, ondelete='cascade')
offer = fields.Many2one('offer', required=True, ondelete='cascade')
state = fields.Selection(STATES, default='new', string=u"Stan")
quality = fields.Selection(QUALITY_CHOICES, string=u"Jakość")
age = fields.Integer(compute='_compute_age')
meeting = fields.Datetime()
meeting2 = fields.Datetime()
meeting1_state = fields.Selection(MEETING_STATES, default='pending')
meeting2_state = fields.Selection(MEETING_STATES, default='pending')
current_meeting_state = fields.Selection(
MEETING_STATES,
compute='_compute_current_meeting',
inverse='_inverse_current_meeting_state',
)
current_meeting = fields.Datetime(
compute='_compute_current_meeting',
inverse='_inverse_current_meeting',
search='_search_current_meeting'
)
rejected_reason = fields.Many2one('offers.application.rejected')
notes = fields.Text()
_sql_constraints = [
('user_offer_uniq', 'unique("user", "offer")', 'User can apply for an offer only once!')
]
@api.one
def _send_message_new(self):
self.send(
template='bestja_offers.msg_new_application',
recipients=self.sudo().offer.project.responsible_user,
)
@api.model
def create(self, vals):
record = super(Application, self).create(vals)
record._send_message_new()
return record
@api.one
@api.depends('birthdate')
def _compute_age(self):
if self.birthdate:
days_in_year = 365.25 # accounting for a leap year
birthdate = fields.Date.from_string(self.birthdate)
self.age = int((date.today() - birthdate).days / days_in_year)
else:
self.age = False
@api.one
@api.depends('state', 'meeting', 'meeting2')
def _compute_current_meeting(self):
if self.state == 'meeting':
self.current_meeting = self.meeting
self.current_meeting_state = self.meeting1_state
elif self.state == 'meeting2':
if self.meeting2 and self.meeting2 <= self.meeting:
raise exceptions.ValidationError("Drugie spotkanie musi odbyć się po pierwszym!")
self.current_meeting = self.meeting2
self.current_meeting_state = self.meeting2_state
else:
self.current_meeting = False
self.current_meeting_state = False
@api.one
def _inverse_current_meeting_state(self):
if self.state == 'meeting2':
self.meeting2_state = self.current_meeting_state
else:
self.meeting1_state = self.current_meeting_state
@api.one
def _inverse_current_meeting(self):
if self.state == 'meeting2':
self.meeting2 = self.current_meeting
elif self.state == 'meeting':
self.meeting = self.current_meeting
elif self.state == 'new':
self.meeting = self.current_meeting
self.state = 'meeting'
# Reset the meeting state
self.current_meeting_state = 'pending'
# Send message about the meeting
self.send(
template='bestja_offers.msg_application_meeting',
recipients=self.user,
record_name=self.offer.name,
sender=self.env.user,
)
def _search_current_meeting(self, operator, value):
return [
'|', # noqa (domain indent)
'&',
('state', '=', 'meeting'),
('meeting', operator, value),
'&',
('state', '=', 'meeting2'),
('meeting2', operator, value),
]
@api.multi
def action_accept(self):
for application in self:
application.state = 'accepted'
@api.multi
def action_reject(self):
for application in self:
application.state = 'rejected'
return {
'name': 'Podaj powód odrzucenia',
'view_mode': 'form',
'res_model': 'offers.application.rejected_wizard',
'type': 'ir.actions.act_window',
'context': self.env.context,
'target': 'new',
}
@api.one
def set_rejected_reason(self, reason):
self.rejected_reason = reason
self.send(
template='bestja_offers.msg_application_rejected',
recipients=self.user,
record_name=self.offer.name,
sender=self.env.user,
)
@api.one
def action_post_accepted(self):
"""
After application had been accepted,
add user to the
|
project and the organizatio
|
n
"""
offer = self.offer
offer.project.write({
'members': [(4, self.user.id)]
})
offer.sudo().project.organization.write({
'volunteers': [(4, self.user.id)]
})
# Unpublish if all vacancies filled
if offer.accepted_application_count >= offer.vacancies:
offer.state = 'archive'
# Send a message
self.send(
template='bestja_offers.msg_application_accepted',
recipients=self.user,
record_name=self.offer.name,
)
@api.one
def action_post_unaccepted(self):
"""
The application had been accepted, but now somebody
changed her mind. Remove user from project, but
leave her with the organization.
"""
self.offer.project.write({
'members': [(3, self.user.id)]
})
def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys,
aggregated_fields, count_field, read_group_result,
read_group_order=None, context=None):
"""
The `_read_group_fill_results` method from base model deals with creating
custom groupings using `_group_by_full` attribute, as shown at the top
of the class. Unfortunately it seems to support grouping using m2o fields
only, while we want to group by a simple status field. Hence the code
above - it replaces simple status values with so-called "m2o-like pairs".
"""
if groupby == 'state':
for result in read_group_result:
state = result['state']
result['state'] = (state, self.STATES_DICT.get(state))
return super(Application, self)._rea
|
felix9064/python
|
Demo/demo/demo003.py
|
Python
|
mit
| 721
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
编程练习:使用二分查找算法求一个任意非负数的平方根(近似值即可)
"""
while True:
x = input("请输入一个非负数:")
try:
x = i
|
nt(x)
if x < 0:
print(x, "
|
不是一个非负数")
else:
break
except ValueError:
print(x, " 不符合要求")
epsilon = 0.0001
num_guesses = 0
low = 0.0
high = max(1.0, x)
ans = (high + low) / 2.0
while abs(ans**2 - x) >= epsilon:
num_guesses += 1
if ans**2 < x:
low = ans
else:
high = ans
ans = (high + low) / 2.0
print("计算的次数:", num_guesses)
print(x, "的近似平方根为:", ans, -ans)
|
stackforge/tacker
|
samples/mgmt_driver/kubernetes_mgmt.py
|
Python
|
apache-2.0
| 147,274
| 0.000109
|
# Copyright (C) 2021 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import ipaddress
import json
import os
import re
import time
import yaml
from oslo_log import log as logging
from oslo_utils import uuidutils
import paramiko
from tacker.common import cmd_executer
from tacker.common import exceptions
from tacker.db.db_base import CommonDbMixin
from tacker.db.nfvo import nfvo_db
from tacker.nfvo.nfvo_plugin import NfvoPlugin
from tacker import objects
from tacker.vnflcm import utils as vnflcm_utils
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
CHECK_PV_AVAILABLE_RETRY = 5
CHECK_PV_DEL_COMPLETE_RETRY = 5
CONNECT_MASTER_RETRY_TIMES = 4
HELM_CMD_TIMEOUT = 30
HELM_INSTALL_TIMEOUT = 300
HELM_CHART_DIR = "/var/tacker/helm"
HELM_CHART_CMP_PATH = "/tmp/tacker-helm.tgz"
K8S_CMD_TIMEOUT = 30
K8S_INSTALL_TIMEOUT = 2700
LOG = logging.getLogger(__name__)
NFS_CMD_TIMEOUT = 30
NFS_INSTALL_TIMEOUT = 2700
SERVER_WAIT_COMPLETE_TIME = 60
# CLI timeout period when setting private registries connection
PR_CONNECT_TIMEOUT = 30
PR_CMD_TIMEOUT = 300
class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def __init__(self):
self._init_flag()
def get_type(self):
return 'mgmt-drivers-kubernetes'
def get_name(self):
return 'mgmt-drivers-kubernetes'
def get_description(self):
return 'Tacker Kubernetes VNFMgmt Driver'
def instantiate_start(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
pass
def _init_flag(self):
self.FLOATING_IP_FLAG = False
self.SET_NODE_LABEL_FLAG = False
self.SET_ZONE_ID_FLAG = False
def _check_is_cidr(self, cidr_str):
# instantiate: check cidr
try:
ipaddress.ip_network(cidr_str)
return True
except ValueError:
return False
def _execute_command(self, commander, ssh_command, timeout, type, retry):
eventlet.monkey_patch()
while retry >= 0:
try:
with eventlet.Timeout(timeout, True):
result = commander.execute_command(
ssh_command, input_data=None)
break
except eventlet.timeout.Timeout:
LOG.debug('It is time out, When execute command: '
|
'{}.'.format(ssh_command))
retry -= 1
if retry < 0:
LOG.error('It is time out, When execute command: '
'{}.'.format(ssh_command))
raise exceptions.MgmtDriverOtherError(
error_message='It is time out, When execute command: '
'{}.'.format(ssh_command))
time.sleep(30)
if type == 'common' or type == 'etcd':
err = result.get_stderr()
if err:
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
elif type == 'drain':
for res in result.get_stdout():
if 'drained' in res:
break
else:
err = result.get_stderr()
stdout = result.get_stdout()
LOG.debug(stdout)
LOG.debug(err)
elif type in ('certificate_key', 'install', 'scp'):
if result.get_return_code() != 0:
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
elif type == 'docker_login':
ret1 = result.get_stdout()
ret2 = result.get_stderr()
return ret1, ret2
elif type == 'helm_repo_list':
if result.get_return_code() != 0:
err = result.get_stderr()[0].replace('\n', '')
if err == 'Error: no repositories to show':
return []
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
return result.get_stdout()
def _create_vim(self, context, vnf_instance, server, bearer_token,
ssl_ca_cert, vim_name, project_name, master_vm_dict_list,
masternode_ip_list):
# ha: create vim
vim_info = {
'vim': {
'name': vim_name,
'auth_url': server,
'vim_project': {
'name': project_name
},
'auth_cred': {
'bearer_token': bearer_token,
'ssl_ca_cert': ssl_ca_cert
},
'type': 'kubernetes',
'tenant_id': context.project_id
}
}
if self.FLOATING_IP_FLAG:
if not master_vm_dict_list[0].get(
'k8s_cluster', {}).get('cluster_fip'):
register_ip = master_vm_dict_list[0].get('ssh').get('ipaddr')
else:
register_ip = master_vm_dict_list[0].get(
'k8s_cluster', {}).get('cluster_fip')
server = re.sub(r'(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})',
register_ip, server)
vim_info['vim']['auth_url'] = server
del vim_info['vim']['auth_cred']['ssl_ca_cert']
extra = {}
if masternode_ip_list:
username = master_vm_dict_list[0].get('ssh').get('username')
password = master_vm_dict_list[0].get('ssh').get('password')
helm_info = {
'masternode_ip': masternode_ip_list,
'masternode_username': username,
'masternode_password': password}
extra['helm_info'] = str(helm_info)
vim_info['vim']['extra'] = extra
try:
nfvo_plugin = NfvoPlugin()
created_vim_info = nfvo_plugin.create_vim(context, vim_info)
except Exception as e:
LOG.error("Failed to register kubernetes vim: {}".format(e))
raise exceptions.MgmtDriverOtherError(
error_message="Failed to register kubernetes vim: {}".format(
e))
id = uuidutils.generate_uuid()
vim_id = created_vim_info.get('id')
vim_type = 'kubernetes'
access_info = {
'auth_url': server
}
vim_connection_info = objects.VimConnectionInfo(
id=id, vim_id=vim_id, vim_type=vim_type,
access_info=access_info, interface_info=None, extra=extra
)
vim_connection_infos = vnf_instance.vim_connection_info
vim_connection_infos.append(vim_connection_info)
vnf_instance.vim_connection_info = vim_connection_infos
vnf_instance.save()
def _get_ha_group_resources_list(
self, heatclient, stack_id, node, additional_params):
# ha: get group resources list
nest_resources_list = heatclient.resources.list(stack_id=stack_id)
group_stack_name = node.get("aspect_id")
group_stack_id = ""
for nest_resources in nest_resources_list:
if nest_resources.resource_name == group_stack_name:
group_stack_id = nest_resources.physical_resource_id
if not group_stack_id:
LOG.error('No stack id matching the group was found.')
raise exceptions.MgmtDriverOtherError(
error_message="No stack id matching the group was found")
group_resourc
|
|
hmdavis/flask-mega-tutorial
|
app/views.py
|
Python
|
bsd-3-clause
| 4,940
| 0.018421
|
from flask import render_template, flash, redirect, session, url_for, request, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from app import app, db, lm, oid
from forms import LoginForm, EditForm
from models import User, ROLE_USER, ROLE_ADMIN
from datetime import datetime
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated():
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@app.errorhandler(404)
def internal_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
@app.route('/')
@app.route('/index')
@login_required
def index():
user = g.user
posts = [
{
'author': { 'nickname': 'John' },
'body': 'Beautiful day in Portland!'
},
{
'author': { 'nickname': 'Susan' },
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html',
title = 'Home',
user = user,
posts = posts)
@app.route('/login', methods = ['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated():
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for = ['nickname', 'email'])
return render_template('login.html',
title = 'Sign In',
form = form,
providers = app.config['OPENID_PROVIDERS'])
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email = resp.email).first()
if user is None:
nickname = resp.nickname
if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
nickname = User.make_unique_nickname(nickname)
user = User(nickname = nickname, email = resp.email, role = ROLE_USER)
db.session.add(user)
db.session.commit()
# make the us
|
er follow him/herself
db.session.add(user.follow(user))
db.session.commit()
remember_me = Fa
|
lse
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember = remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/user/<nickname>')
@login_required
def user(nickname):
user = User.query.filter_by(nickname = nickname).first()
if user == None:
flash('User ' + nickname + ' not found.')
return redirect(url_for('index'))
posts = [
{ 'author': user, 'body': 'Test post #1' },
{ 'author': user, 'body': 'Test post #2' }
]
return render_template('user.html',
user = user,
posts = posts)
@app.route('/edit', methods = ['GET', 'POST'])
@login_required
def edit():
form = EditForm(g.user.nickname)
if form.validate_on_submit():
g.user.nickname = form.nickname.data
g.user.about_me = form.about_me.data
db.session.add(g.user)
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit'))
elif request.method != "POST":
form.nickname.data = g.user.nickname
form.about_me.data = g.user.about_me
return render_template('edit.html',
form = form)
@app.route('/follow/<nickname>')
def follow(nickname):
user = User.query.filter_by(nickname = nickname).first()
if user == None:
flash('User ' + nickname + ' not found.')
return redirect(url_for('index'))
u = g.user.follow(user)
if u is None:
flash('Cannot follow ' + nickname + '.')
return redirect(url_for('user', nickname = nickname))
db.session.add(u)
db.session.commit()
flash('You are now following ' + nickname + '!')
return redirect(url_for('user', nickname = nickname))
@app.route('/unfollow/<nickname>')
def unfollow(nickname):
user = User.query.filter_by(nickname = nickname).first()
if user == None:
flash('User ' + nickname + ' not found.')
return redirect(url_for('index'))
u = g.user.unfollow(user)
if u is None:
flash('Cannot unfollow ' + nickname + '.')
return redirect(url_for('user', nickname = nickname))
db.session.add(u)
db.session.commit()
flash('You have stopped following ' + nickname + '.')
return redirect(url_for('user', nickname = nickname))
|
ulif/pulp
|
server/test/unit/server/webservices/test_urls.py
|
Python
|
gpl-2.0
| 33,452
| 0.001196
|
import unittest
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
from pulp.server.webservices.urls import handler404
def assert_url_match(expected_url, url_name, *args, **kwargs):
"""
Generate a url given args and kwargs and pass it through Django's reverse and
resolve functions.
Example use to match a url /v2/tasks/<task_argument>/:
assert_url_match('/v2/tasks/example_arg/', 'tasks', task_argument='example_arg')
:param expected_url: the url that should be generated given a url_name and args
:type expected_url: str
:param url_name : name given to a url as defined in the urls.py
:type url_name : str
:param args : optional positional arguments to place into a url's parameters
as specified by urls.py
:type args : tuple
:param kwargs : optional named arguments to place into a url's parameters as
specified by urls.py
:type kwargs : dict
"""
try:
# Invalid arguments will cause a NoReverseMatch.
url = reverse(url_name, args=args, kwargs=kwargs)
except NoReverseMatch:
raise AssertionError(
"Name: '{0}' could match a url with args '{1}'"
"and kwargs '{2}'".format(url_name, args, kwargs)
)
else:
# If the url exists but is not the expected url.
if url != expected_url:
raise AssertionError(
'url {0} not equal to expected url {1}'.format(url, expected_url))
# Run this url back through resolve and ensure that it matches the url_name.
matched_view = resolve(url)
if matched_view.url_name != url_name:
raise AssertionError('Url name {0} not equal to expected url name {1}'.format(
matched_view.url_name, url_name)
)
class TestNotFoundHandler(unittest.TestCase):
def test_not_found_handler(self):
"""
Test that the handler404 module attribute is set as expected.
"""
self.assertEqual(handler404, 'pulp.server.webservices.views.util.page_not_found')
class TestDjangoContentUrls(unittest.TestCase):
"""
Test the matching of the content urls
"""
def test_match_content_catalog_resource(self):
"""
Test url matching for content_catalog_resource.
"""
url = '/v2/content/catalog/mock-source/'
url_name = 'content_catalog_resource'
assert_url_match(url, url_name, source_id='mock-source')
def test_match_content_orphan_collection(self):
"""
Test url matching for content_orphan_collection.
"""
url = '/v2/content/orphans/'
url_name = 'content_orphan_collection'
assert_url_match(url, url_name)
def test_match_content_units_collection(self):
"""
Test the url matching for content_units_collection.
"""
url = '/v2/content/units/mock-type/'
url_name = 'content_units_collection'
assert_url_match(url, url_name, type_id='mock-type')
def test_match_content_unit_search(self):
"""
Test the url matching for content_unit_search.
"""
url = '/v2/content/units/mock-type/search/'
url_name = 'content_unit_search'
assert_url_match(ur
|
l, url_name, type_id='mock-type')
def test_match_content_unit_resource(self):
"""
Test url matching for content_unit_resource.
"""
url = '/v2/content/units/mock-type/mock-unit/'
url_name = 'content_unit_resource'
assert_url_match(url, url_name, type_id='mock-type', unit_id='mock-unit')
def test_match_content_unit_use
|
r_metadata_resource(self):
"""
Test url matching for content_unit_user_metadata_resource.
"""
url = '/v2/content/units/mock-type/mock-unit/pulp_user_metadata/'
url_name = 'content_unit_user_metadata_resource'
assert_url_match(url, url_name, type_id='mock-type', unit_id='mock-unit')
def test_match_content_upload_resource(self):
"""
Test url matching for content_upload_resource.
"""
url = '/v2/content/uploads/mock-upload/'
url_name = 'content_upload_resource'
assert_url_match(url, url_name, upload_id='mock-upload')
def test_match_content_upload_segment_resource(self):
"""
Test Url matching for content_upload_segment_resource.
"""
url = '/v2/content/uploads/mock-upload-id/8/'
url_name = 'content_upload_segment_resource'
assert_url_match(url, url_name, upload_id='mock-upload-id', offset='8')
def test_match_content_actions_delete_orphans(self):
"""
Test url matching for content_actions_delete_orphans.
"""
url = '/v2/content/actions/delete_orphans/'
url_name = 'content_actions_delete_orphans'
assert_url_match(url, url_name)
def test_match_content_orphan_resource(self):
"""
Test url matching for content_orphan_resource.
"""
url = '/v2/content/orphans/mock-type/mock-unit/'
url_name = 'content_orphan_resource'
assert_url_match(url, url_name, content_type='mock-type', unit_id='mock-unit')
def test_match_content_orphan_type_subcollection(self):
"""
Test url matching for content_orphan_type_subcollection.
"""
url = '/v2/content/orphans/mock_type/'
url_name = 'content_orphan_type_subcollection'
assert_url_match(url, url_name, content_type='mock_type')
def test_match_content_uploads(self):
"""
Test url matching for content_uploads.
"""
url = '/v2/content/uploads/'
url_name = 'content_uploads'
assert_url_match(url, url_name)
class TestDjangoPluginsUrls(unittest.TestCase):
"""
Test url matching for plugins urls.
"""
def test_match_distributor_resource_view(self):
"""
Test the url matching for the distributor resource view.
"""
url = '/v2/plugins/distributors/mock_distributor/'
url_name = 'plugin_distributor_resource'
assert_url_match(url, url_name, distributor_id='mock_distributor')
def test_match_distributors_view(self):
"""
Test the url matching for the Distributors view.
"""
url = '/v2/plugins/distributors/'
url_name = 'plugin_distributors'
assert_url_match(url, url_name)
def test_match_importer_resource_view(self):
"""
Test the url matching for plugin_importer_resource
"""
url = '/v2/plugins/importers/mock_importer_id/'
url_name = 'plugin_importer_resource'
assert_url_match(url, url_name, importer_id='mock_importer_id')
def test_match_importers_view(self):
"""
Test the url matching for the Importers view
"""
url = '/v2/plugins/importers/'
url_name = 'plugin_importers'
assert_url_match(url, url_name)
def test_match_type_resource_view(self):
"""
Test the url matching for the TypeResourceView.
"""
url = '/v2/plugins/types/type_id/'
url_name = 'plugin_type_resource'
assert_url_match(url, url_name, type_id='type_id')
def test_match_types_view(self):
"""
Test url matching for plugin_types.
"""
url = '/v2/plugins/types/'
url_name = 'plugin_types'
assert_url_match(url, url_name)
class TestDjangoLoginUrls(unittest.TestCase):
"""
Tests for root_actions urls.
"""
def test_match_login_view(self):
"""
Test url match for login.
"""
url = '/v2/actions/login/'
url_name = 'login'
assert_url_match(url, url_name)
class TestDjangoConsumerGroupsUrls(unittest.TestCase):
"""
Tests for consumer_groups urls
"""
def test_match_consumer_group_view(self):
"""
Test url matching for consumer_groups
"""
|
mylxiaoyi/mypyqtgraph-qt5
|
examples/MultiplePlotAxes.py
|
Python
|
mit
| 1,925
| 0.016623
|
# -*- coding: utf-8 -*-
"""
Demonstrates a way to put multiple axes around a single plot.
(This will eventually become a built-in feature of PlotItem)
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
import numpy as np
pg.mkQApp()
pw = pg.PlotWidget()
pw.show()
pw.setWindowTitle('pyqtgraph example: MultiplePlotAxes')
p1 = pw.plotItem
p1.setLabels(left='axis 1')
## create a new ViewBox, link the right axis to its coordinate system
p2 = pg.ViewBox()
p1.showAxis('right')
p1.scene().addItem(p2)
p1.getAxis('right').linkToView(p2)
p2.setXLink(p1)
p1.getAxis('right').setLabel('axis2', color='#0000ff')
## create third ViewBox.
## this time we need to create a new axis as well.
p3 = pg.ViewBox()
ax3 = pg.AxisItem('right')
p1.layout.addItem(ax3, 2, 3)
p1.scene().addI
|
tem(p3)
ax3.linkToView(p3)
p3.setXLink(p1)
ax3.setZValue(-10000)
ax3.setLabel('axis 3', color='#ff0000')
## Handle view resizing
def updateViews():
## view has resized; update auxiliary views to match
global p1, p2, p3
p2.setGeometry(p1.vb.sceneBoundingRect())
p3.setGeometry(p1.vb.sceneBo
|
undingRect())
## need to re-update linked axes since this was called
## incorrectly while views had different shapes.
## (probably this should be handled in ViewBox.resizeEvent)
p2.linkedViewChanged(p1.vb, p2.XAxis)
p3.linkedViewChanged(p1.vb, p3.XAxis)
updateViews()
p1.vb.sigResized.connect(updateViews)
p1.plot([1,2,4,8,16,32])
p2.addItem(pg.PlotCurveItem([10,20,40,80,40,20], pen='b'))
p3.addItem(pg.PlotCurveItem([3200,1600,800,400,200,100], pen='r'))
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtWidgets.QApplication.instance().exec_()
|
carvalhomb/tsmells
|
guess/src/Lib/xml/parsers/xmlproc/namespace.py
|
Python
|
gpl-2.0
| 5,187
| 0.022556
|
"""
A parser filter for namespace support. Placed externally to the parser
for efficiency reasons.
$Id: namespace.py,v 1.1 2005/10/05 20:19:37 eytanadar Exp $
"""
import string
import xmlapp
# --- ParserFilter
class ParserFilter(xmlapp.Application):
"A generic parser filter class."
def __init__(self):
xmlapp.Application.__init__(self)
self.app=xmlapp.Application()
def set_application(self,app):
"Sets the application to report events to."
self.app=app
# --- Methods inherited from xmlapp.Application
def set_locator(self,locator):
xmlapp.Application.set_locator(self,locator)
self.app.set_locator(locator)
def doc_start(self):
self.app.doc_start()
def doc_end(self):
self.app.doc_end()
def handle_comment(self,data):
self.app.handle_comment(data)
def handle_start_tag(self,name,attrs):
self.app.handle_start_tag(name,attrs)
def handle_end_tag(self,name):
self.app.handle_end_tag(name)
def handle_data(self,data,start,end):
self.app.handle_data(data,start,end)
def handle_ignorable_data(self,data,start,end):
self.app.handle_ignorable_data(data,start,end)
def handle_pi(self,target,data):
self.app.handle_pi(target,data)
def handle_doctype(self,root,pubID,sysID):
self.app.handle_doctype(root,pubID,sysID)
def set_entity_info(self,xmlver,enc,sddecl):
self.app.set_entity_info(xmlver,enc,sddecl)
# --- NamespaceFilter
class NamespaceFilter(ParserFilter):
"""An xmlproc application that processes qualified names and reports them
as 'URI local-part' names. It reports errors through the error reporting
mechanisms of the parser."""
def __init__(self,parser):
ParserFilter.__init__(self)
self.ns_map={} # Current prefix -> URI map
self.ns_stack=[] # Pushed for each element, used to maint ns_map
self.rep_ns_attrs=0 # Report xmlns-attributes?
self.parser=parser
def set_report_ns_attributes(self,action):
"Tells the filter whether to report or delete xmlns-attributes."
self.rep_ns_attrs=action
# --- Overridden event methods
def handle_start_tag(self,name,attrs):
old_ns={} # Reset ns_map to these values when we leave this element
del_ns=[] # Delete these prefixes from ns_map when we leave element
# attrs=attrs.copy() Will have to do this if more filters are made
# Find declarations, update self.ns_map and self.ns_stack
for (a,v) in attrs.items():
if a[:6]=="xmlns:":
prefix=a[6:]
if string.find(prefix,":")!=-1:
self.parser.report_error(1900)
if v=="":
self.parser.report_error(1901)
elif a=="xmlns":
prefix=""
else:
continue
if self.ns_map.has_key(prefix):
old_ns[prefix]=self.ns_map[prefix]
else:
del_ns.append(prefix)
if prefix=="" and v=="":
del self.ns_map[prefix]
else:
self.ns_map[prefix]=v
if not self.rep_ns_attrs:
del attrs[a]
self.ns_stack.append((old_ns,del_ns))
# Process elem and attr names
name=self.__process_name(name)
parts=string.split(name)
if len(parts)>1:
ns=parts[0]
else:
ns=None
for (a,v) in attrs.items():
del attrs[a]
aname=self.__process_name(a,ns)
if attrs.has_key(aname):
self.parser.report_error(1903)
attrs[aname]=v
# Report event
self.app.handle_start_tag(name,attrs)
def handle_end_tag(self,name):
name=self.__process_name(name)
# Clean up self.ns_map and self.ns_stack
(old_ns,del_ns)=self.ns_stack[-1]
del self.ns_stack[-1]
self.ns_map.update(old_ns)
|
for prefix in del_ns:
del self.ns_map[prefix]
self.app.handle_end_tag(name)
# --- Internal methods
def __process_name(self,name,default_to=None):
n=string.split(name,":")
if len(n)>2:
se
|
lf.parser.report_error(1900)
return name
elif len(n)==2:
if n[0]=="xmlns":
return name
try:
return "%s %s" % (self.ns_map[n[0]],n[1])
except KeyError:
self.parser.report_error(1902)
return name
elif default_to!=None:
return "%s %s" % (default_to,name)
elif self.ns_map.has_key("") and name!="xmlns":
return "%s %s" % (self.ns_map[""],name)
else:
return name
|
peter-wangxu/python_play
|
test/mock_test/MockChild.py
|
Python
|
apache-2.0
| 160
| 0.00625
|
import mock
class MockTest(m
|
ock.Mock):
def test_fun1(self, p1, p2):
pass
m = MockTest()
m.test_fun1(1, 2)
m.test_fun1.assert_call
|
ed_with(1, 2)
|
mapnik/python-mapnik
|
test/python_tests/topojson_plugin_test.py
|
Python
|
lgpl-2.1
| 3,919
| 0.000511
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
from nose.tools import assert_almost_equal, eq_
import mapnik
from .utilities import execution_path, run_all
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'topojson' in mapnik.DatasourceCache.plugin_names():
def test_topojson_init():
# topojson tests/data/json/escaped.geojson -o tests/data/topojson/escaped.topojson --properties
# topojson version 1.4.2
ds = mapnik.Datasource(
type='topojson',
file='../data/to
|
pojson/escaped.topojson')
e = ds.envelope(
|
)
assert_almost_equal(e.minx, -81.705583, places=7)
assert_almost_equal(e.miny, 41.480573, places=6)
assert_almost_equal(e.maxx, -81.705583, places=5)
assert_almost_equal(e.maxy, 41.480573, places=3)
def test_topojson_properties():
ds = mapnik.Datasource(
type='topojson',
file='../data/topojson/escaped.topojson')
f = list(ds.features_at_point(ds.envelope().center()))[0]
eq_(len(ds.fields()), 11)
desc = ds.describe()
eq_(desc['geometry_type'], mapnik.DataGeometryType.Point)
eq_(f['name'], u'Test')
eq_(f['int'], 1)
eq_(f['description'], u'Test: \u005C')
eq_(f['spaces'], u'this has spaces')
eq_(f['double'], 1.1)
eq_(f['boolean'], True)
eq_(f['NOM_FR'], u'Qu\xe9bec')
eq_(f['NOM_FR'], u'Québec')
ds = mapnik.Datasource(
type='topojson',
file='../data/topojson/escaped.topojson')
f = list(ds.all_features())[0]
eq_(len(ds.fields()), 11)
desc = ds.describe()
eq_(desc['geometry_type'], mapnik.DataGeometryType.Point)
eq_(f['name'], u'Test')
eq_(f['int'], 1)
eq_(f['description'], u'Test: \u005C')
eq_(f['spaces'], u'this has spaces')
eq_(f['double'], 1.1)
eq_(f['boolean'], True)
eq_(f['NOM_FR'], u'Qu\xe9bec')
eq_(f['NOM_FR'], u'Québec')
def test_geojson_from_in_memory_string():
ds = mapnik.Datasource(
type='topojson',
inline=open(
'../data/topojson/escaped.topojson',
'r').read())
f = list(ds.all_features())[0]
eq_(len(ds.fields()), 11)
desc = ds.describe()
eq_(desc['geometry_type'], mapnik.DataGeometryType.Point)
eq_(f['name'], u'Test')
eq_(f['int'], 1)
eq_(f['description'], u'Test: \u005C')
eq_(f['spaces'], u'this has spaces')
eq_(f['double'], 1.1)
eq_(f['boolean'], True)
eq_(f['NOM_FR'], u'Qu\xe9bec')
eq_(f['NOM_FR'], u'Québec')
# @raises(RuntimeError)
def test_that_nonexistant_query_field_throws(**kwargs):
ds = mapnik.Datasource(
type='topojson',
file='../data/topojson/escaped.topojson')
eq_(len(ds.fields()), 11)
# TODO - this sorting is messed up
eq_(ds.fields(), ['name', 'int', 'description',
'spaces', 'double', 'boolean', 'NOM_FR',
'object', 'array', 'empty_array', 'empty_object'])
eq_(ds.field_types(), ['str', 'int',
'str', 'str', 'float', 'bool', 'str',
'str', 'str', 'str', 'str'])
# TODO - should topojson plugin throw like others?
# query = mapnik.Query(ds.envelope())
# for fld in ds.fields():
# query.add_property_name(fld)
# # also add an invalid one, triggering throw
# query.add_property_name('bogus')
# fs = ds.features(query)
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/plugin.video.muchmovies.hd/default.py
|
Python
|
gpl-2.0
| 51,620
| 0.010965
|
# -*- coding: utf-8 -*-
'''
Much Movies HD XBMC Addon
Copyright (C) 2014 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib,urllib2,re,os,threading,datetime,xbmc,xbmcplugin,xbmcgui,xbmcaddon,xbmcvfs
from operator import itemgetter
try: import json
except: import simplejson as json
try: import CommonFunctions
except: import commonfunctionsdummy as CommonFunctions
from metahandler import metahandlers
from metahandler import metacontainers
language = xbmcaddon.Addon().getLocalizedString
setSetting = xbmcaddon.Addon().setSetting
getSetting = xbmcaddon.Addon().getSetting
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
addonDesc = language(30450).encode("utf-8")
addonIcon = os.path.join(addonPath,'icon.png')
addonFanart = os.path.join(addonPath,'fanart.jpg')
addonArt = os.path.join(addonPath,'resources/art')
addonDownloads = os.path.join(addonPath,'resources/art/Downloads.png')
addonPages = os.path.join(addonPath,'resources/art/Pages.png')
addonNext = os.path.join(addonPath,'resources/art/Next.png')
dataPath = xbmc.translatePath('special://profile/addon_data/%s' % (addonId))
viewData = os.path.join(dataPath,'views.cfg')
favData = os.path.join(dataPath,'favourites.cfg')
metaget = metahandlers.MetaData(preparezip=False)
common = CommonFunctions
action = None
class main:
def __init__(self):
global action
index().container_data()
params = {}
splitparams = sys.argv[2][sys.argv[2].find('?') + 1:].split('&')
for param in splitparams:
if (len(param) > 0):
splitparam = param.split('=')
key = splitparam[0]
try: value = splitparam[1].encode("utf-8")
except: value = splitparam[1]
params[key] = value
try: action = urllib.unquote_plus(params["action"])
except: action = None
try: name = urllib.unquote_plus(params["name"])
except: name = None
try: url = urllib.unquote_plus(params["url"])
except: url = None
try: image = urllib.unquote_plus(params["image"])
except: image = None
try: query = urllib.unquote_plus(params["query"])
except: query = None
try: title = urllib.unquote_plus(params["title"])
except: title = None
try: year = urllib.unquote_plus(params["year"])
except: year = None
try: imdb = urllib.unquote_plus(params["imdb"])
except: imdb = None
if action == None: root().get()
elif action == 'item_play': contextMenu().item_play()
elif action == 'item_random_play': contextMenu().item_random_play()
elif action == 'item_queue': contextMenu().item_queue()
elif action == 'favourite_add': contextMenu().favourite_add(favData, name, url, image, imdb)
elif action == 'favourite_from_search': contextMenu().favourite_from_search(favData, name, url, image, imdb)
elif action == 'favourite_delete': contextMenu().favourite_delete(favData, name, url)
elif action == 'favourite_moveUp': contextMenu().favourite_moveUp(favData, name, url)
elif action == 'favourite_moveDown': contextMenu().favourite_moveDown(favData, name, url)
elif action == 'playlist_open': contextMenu().playlist_open()
elif action == 'settings_open':
|
contextMenu().settings_open()
elif action == 'addon_home': contextMenu().addon_home()
elif action == 'view_movies': contextMenu().view('movies')
elif action == 'metadata_movies': contextMenu().metadata('movie', name, url, imdb, '', '')
elif action == 'metadata_movies2': contextMenu().metadata('movie',
|
name, url, imdb, '', '')
elif action == 'playcount_movies': contextMenu().playcount('movie', imdb, '', '')
elif action == 'library': contextMenu().library(name, url)
elif action == 'download': contextMenu().download(name, url)
elif action == 'trailer': contextMenu().trailer(name, url)
elif action == 'movies': movies().muchmovies(url)
elif action == 'movies_title': movies().muchmovies_title()
elif action == 'movies_release': movies().muchmovies_release()
elif action == 'movies_added': movies().muchmovies_added()
elif action == 'movies_rating': movies().muchmovies_rating()
elif action == 'movies_search': movies().muchmovies_search(query)
elif action == 'movies_favourites': favourites().movies()
elif action == 'pages_movies': pages().muchmovies()
elif action == 'genres_movies': genres().muchmovies()
elif action == 'play': resolver().run(url, name)
if action is None:
pass
elif action.startswith('movies'):
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
index().container_view('movies', {'skin.confluence' : 500})
xbmcplugin.setPluginFanart(int(sys.argv[1]), addonFanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
return
class getUrl(object):
def __init__(self, url, fetch=True, close=True, cookie=False, mobile=False, proxy=None, post=None, referer=None):
if not proxy is None:
proxy_handler = urllib2.ProxyHandler({'http':'%s' % (proxy)})
opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler)
opener = urllib2.install_opener(opener)
if cookie == True:
import cookielib
cookie_handler = urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar())
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
opener = urllib2.install_opener(opener)
if not post is None:
request = urllib2.Request(url, post)
else:
request = urllib2.Request(url,None)
if mobile == True:
request.add_header('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7')
else:
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0')
if not referer is None:
request.add_header('Referer', referer)
response = urllib2.urlopen(request, timeout=30)
if fetch == True:
result = response.read()
else:
result = response.geturl()
if close == True:
response.close()
self.result = result
class uniqueList(object):
def __init__(self, list):
uniqueSet = set()
uniq
|
bigswitch/sample-scripts
|
bcf/controller_bcf.py
|
Python
|
mit
| 7,886
| 0.006848
|
#
# Simple BCF config script
# No error checking
#
import requests
import json
import sys
requests.packages.urllib3.disable_warnings()
class Controller(object):
"""
controller version 4.x
"""
def __init__(self, controller_ip, access_token):
self.bcf_path = '/api/v1/data/controller/applications/bcf'
self.core_path = '/api/v1/data/controller/core'
self.controller_ip = controller_ip
self.access_token = access_token
def controller_request(self, method, path, data="", dry_run=False):
if not self.controller_ip:
print( 'You must set controller_ip to the IP address of your controller' )
controller_url = "https://%s:8443" % self.controller_ip
# append path to the controller url, e.g. "https://192.168.23.98:8443" + "/api/v1/auth/login"
url = controller_url + path
# if a cookie exists then use it in the header, otherwise create a header without a cookie
session_cookie = 'session_cookie=%s' % self.access_token
headers = {"content-type": "application/json", 'Cookie': session_cookie}
if dry_run:
print( 'METHOD ', method, ' URL ', url, ' DATA ', data, ' HEADERS ', headers )
return None
else:
# submit the request
response = requests.request(method, url, data=data, headers=headers, verify=False)
# if content exists then return it, otherwise return the HTTP status code
#if response.content:
# return response.content
#else:
return response
def make_request(self, verb, path, data, core_path = False):
if core_path:
return self.controller_request(verb, self.core_path + path, data=data, dry_run=False)
return self.controller_request(verb, self.bcf_path + path, data=data, dry_run=False)
def interface_group(self, name, mode='', origination='', action='add'):
path = '/interface-group[name="%s"]' % name
if origination and mode:
data = '{"name": "%s", "mode": "%s", "origination": "%s"}' % (name, mode, origination)
elif origination:
data = '{"name": "%s", "origination": "%s"}' % (name, origination)
else:
data = '{"name": "%s"}' % name
if action == 'add':
return self.make_request('PUT', path, data=data)
elif action == 'delete':
return self.make_request('DELETE', path, data=data)
elif action == 'get':
response = self.make_request('GET', path, data=data).json()
if response:
return response[0] if response else []
def interface_groups(self, interface_groups=[], action='add'):
""" add each interface_group in list of interface_groups using the function add_interface_group """
if action == 'add':
for interface_group in interface_groups:
interface_group(interface_group, action=action)
elif action == 'get':
path = '/info/fabric/interface-group/detail'
response = self.make_request('GET', path, data='{}').json()
return response
if response:
return response[0] if response else []
def interface_group_member(self, switch, interface, interface_group, action='add'):
path = '/interface-group[name="%s"]/member-interface[switch-name="%s"][interface-name="%s"]'% (interface_group, switch, interface)
data = '{"switch-name": "%s", "interface-name": "%s"}' % (switch, interface)
return self.make_request('PUT' if action == 'add' else 'DELETE', path, data=data)
def interface_group_members(self, switch_interface, interface_group, action='add'):
""" add each (switch, interface) pair in interfaces to interface-group interface_group """
for (switch, interface) in switch_interface:
self.interface_group_member(switch, interface, interface_group, action=action)
def tenant(self, name, origination='', action='add'):
path = '/tenant[name="%s"]?select=name&single=true' % name
response = self.make_request('GET', path, data='{}')
config_present = True if response.status_code != 404 else False
path = '/tenant[name="%s"]' % name
data = '{"name": "%s"}' % name
if origination:
data = '{"name": "%s", "origination": "%s"}' % (name, origination)
if action == 'add' and not config_present:
return self.make_request('PUT', path, data=data)
elif action != 'add' and config_present:
return self.make_request('DELETE', path, data=data)
def segment(self, name, tenant, interface_groups=[], origination='', action='add'):
path = '/tenant[name="%s"]/segment[name="%s"]?select=name&single=true' % (tenant, name)
response = self.make_request('GET', path, data='{}')
config_present = True if response.status_code != 404 else False
path = '/tenant[name="%s"]/segment[name="%s"]' %(tenant, name)
data = '{"name": "%s", "origination": "%s"}' % (name, origination)
if action == 'add' and not config_present:
response = self.make_request('PUT', path, data=data)
elif action != 'add' and config_present:
return self.make_request('DELETE', path, data=data)
if interface_groups:
for interface_group, vlan in interface_groups:
result = self.interface_group_segment_membership(interface_group, name, tenant, vlan=vlan)
def get_segments(self, tenant, prefix=''):
path = '/tenant[name="%s"]' % tenant
response = self.make_request('GET', path, data='{}')
segments = []
if 'segment' in response.json()[0]:
segments = response.json()[0]['segment']
return [segment for segment in segments if segment['name'].startswith(prefix)]
def interface_group_segment_membership(self, interface_group, segment, tenant, vlan='-1', action='add'):
""" """
path = '/tenant[name="%s"]/segment[name="%s"]/interface-group-membership-rule[vlan=%s][interface-group="%s"]' %(tenant, segment, vlan, interface_group)
data = '{"vlan": %s, "interface-group": "%s"}' %(vlan, interface_group)
return self.make_request('POST' if action == 'add' else 'DELETE', path, data=data)
def interface_stats(self, interface, switch_dpid, action='get'):
""" """
if action != 'clear':
path = '/info/statistic/interface-counter[interface/name="%s"][switch-dpid="%s"]?select=interface[name="%s"]' % (interface, switch_dpid, interface)
response = self.make_request('GET', path, data='{}').json()
return response[0] if response else []
else:
|
path = '/info/statistic/interface-counter[switch-dpid="%s"]/interface[name="%s"]' % (switch_dpid, interface)
response = self.make_request('DELETE', path, data='{}').json()
return response
def switch_dpid(self, switch):
""" """
path = '/switch-config[name="%s"]?select=dpid' % switch
response = self.make_request('GET', path, data='{}', core_path=True)
return response.json()[0]['dpid'].lower()
de
|
f interface(self, switch, interface, action='no-shutdown'):
""" """
if action == 'shutdown':
path = '/switch-config[name="%s"]/interface[name="%s"]' %(switch, interface)
data = '{"shutdown": true}'
return self.make_request('PATCH', path, data=data, core_path=True)
elif action == 'no-shutdown':
path = '/switch-config[name="%s"]/interface[name="%s"]/shutdown' %(switch, interface)
return self.make_request('DELETE', path, data='{}', core_path=True)
|
edisonlz/fruit
|
web_project/base/site-packages/django/contrib/gis/db/backends/postgis/creation.py
|
Python
|
apache-2.0
| 4,498
| 0.001779
|
from django.conf import settings
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.utils.functional import cached_property
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
@cached_property
def template_postgis(self):
template_postgis = getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis')
cursor = self.connection.cursor()
cursor.execute('SELECT 1 FROM pg_database WHERE datname = %s LIMIT 1;', (template_postgis,))
if cursor.fetchone():
return template_postgis
return None
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_ops = ''
elif self.connection.ops.geometry:
if f.dim > 2:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd)
|
else:
index_ops = ''
els
|
e:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_ops + ' );')
return output
def sql_table_creation_suffix(self):
if self.template_postgis is not None:
return ' TEMPLATE %s' % (
self.connection.ops.quote_name(self.template_postgis),)
return ''
def _create_test_db(self, verbosity, autoclobber):
test_database_name = super(PostGISCreation, self)._create_test_db(verbosity, autoclobber)
if self.template_postgis is None:
# Connect to the test database in order to create the postgis extension
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
cursor = self.connection.cursor()
cursor.execute("CREATE EXTENSION postgis")
cursor.connection.commit()
return test_database_name
|
zultron/virt-manager
|
tests/capabilities.py
|
Python
|
gpl-2.0
| 11,259
| 0.002487
|
# Copyright (C) 2013 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import os
import unittest
from virtinst import CapabilitiesParser as capabilities
def build_host_feature_dict(feature_list):
fdict = {}
for f in feature_list:
fdict[f] = capabilities.FEATURE_ON
return fdict
class TestCapabilities(unittest.TestCase):
def _compareGuest(self, (arch, os_type, domains, features), guest):
self.assertEqual(arch, guest.arch)
self.assertEqual(os_type, guest.os_type)
self.assertEqual(len(domains), len(guest.domains))
for n in range(len(domains)):
self.assertEqual(domains[n][0], guest.domains[n].hypervisor_type)
self.assertEqual(domains[n][1], guest.domains[n].emulator)
self.assertEqual(domains[n][2], guest.domains[n].machines)
for n in features:
self.assertEqual(features[n], guest.features[n])
def _buildCaps(self, filename):
path = os.path.join("tests/capabilities-xml", filename)
xml = file(path).read()
return capabilities.Capabilities(xml)
def _testCapabilities(self, path, (host_arch, host_features), guests,
secmodel=None):
caps = self._buildCaps(path)
if host_arch:
self.assertEqual(host_arch, caps.host.arch)
for n in host_features:
self.assertEqual(host_features[n], caps.host.features[n])
if secmodel:
self.assertEqual(secmodel[0], caps.host.secmodel.model)
self.assertEqual(secmodel[1], caps.host.secmodel.doi)
if secmodel[2]:
for k, v in secmodel[2].items():
self.assertEqual(v, caps.host.secmodel.baselabels[k])
for idx in range(len(guests)):
self._compareGuest(guests[idx], caps.guests[idx])
def testCapabilities1(self):
host = ('x86_64', {'vmx': capabilities.FEATURE_ON})
guests = [
('x86_64', 'xen',
[['xen', None, []]], {}),
('i686', 'xen',
[['xen', None, []]], {'pae': capabilities.FEATURE_ON}),
('i686', 'hvm',
[['xen', "/usr/lib64/xen/bin/qemu-dm", ['pc', 'isapc']]], {'pae': capabilities.FEATURE_ON | capabilities.FEATURE_OFF}),
('x86_64', 'hvm',
[['xen', "/usr/lib64/xen/bin/qemu-dm", ['pc', 'isapc']]], {})
]
self._testCapabilities("capabilities-xen.xml", host, guests)
def testCapabilities2(self):
host = ('x86_64', {})
secmodel = ('selinux', '0', None)
guests = [
('x86_64', 'hvm',
[['qemu', '/usr/bin/qemu-system-x86_64', ['pc', 'isapc']]], {}),
('i686', 'hvm',
[['qemu', '/usr/bin/qemu', ['pc', 'isapc']]], {}),
('mips', 'hvm',
[['qemu', '/usr/bin/qemu-system-mips', ['mips']]], {}),
('mipsel', 'hvm',
[['qemu', '/usr/bin/qemu-system-mipsel', ['mips']]], {}),
('sparc', 'hvm',
[['qemu', '/usr/bin/qemu-system-sparc', ['sun4m']]], {}),
('ppc', 'hvm',
[['qemu', '/usr/bin/qemu-system-ppc',
['g3bw', 'mac99', 'prep']]], {}),
]
self._testCapabilities("capabilities-qemu.xml", host, guests, secmodel)
def testCapabilities3(self):
host = ('i686', {})
guests = [
('i686', 'hvm',
[['qemu', '/usr/bin/qemu', ['pc', 'isapc']],
['kvm', '/usr/bin/qemu-kvm', ['pc', 'isapc']]], {}),
('x86_64', 'hvm',
[['qemu', '/usr/bin/qemu-system-x86_64', ['pc', 'isapc']]], {}),
('mips', 'hvm',
[['qemu', '/usr/bin/qemu-system-mips', ['mips']]], {}),
('mipsel', 'hvm',
[['qemu', '/usr/bin/qemu-system-mipsel', ['mips']]], {}),
('sparc', 'hvm',
[['qemu', '/usr/bin/qemu-system-sparc', ['sun4m']]], {}),
('ppc', 'hvm',
[['qemu', '/usr/bin/qemu-system-ppc',
['g3bw', 'mac99', 'prep']]], {}),
]
secmodel = ('dac', '0', {"kvm" : "+0:+0", "qemu" : "+0:+0"})
self._testCapabilities("capabilities-kvm.xml", host, guests, secmodel)
def testCapabilities4(self):
host = ('i686',
{'pae': capabilities.FEATURE_ON | capabilities.FEATURE_OFF})
guests = [
('i686', 'linux',
[['test', None, []]],
{'pae': capabilities.FEATURE_ON | capabilities.FEATURE_OFF}),
]
self._testCapabilities("capabilities-test.xml", host, guests)
def testCapsLXC(self):
guests = [
("x86_64", "exe", [["lxc", "/usr/libexec/libvirt_lxc", []]], {}),
("i686", "exe", [["lxc", "/usr/libexec/libvirt_lxc", []]], {}),
]
self._testCapabilities("capabilities-lxc.xml",
(None, None), guests)
def testCapsTopology(self):
filename = "capabilities-test.xml"
caps = self._buildCaps(filename)
self.assertTrue(bool(caps.host.topology))
self.assertTrue(len(caps.host.topology.cell
|
s) == 2)
self.assertTrue(len(caps.host.topology.cells[0].cpus) == 8)
self.assertTrue(len(caps.host.topology.cells[0].cpus) == 8)
def testCapsCPUFeaturesOldSyntax(self):
filename = "rhel5.4-xen-caps-virt-enabled.xml"
host_feature_list = ["vmx"]
feature_dict = build_host_feature_dict(host_feature_list)
caps
|
= self._buildCaps(filename)
for f in feature_dict.keys():
self.assertEquals(caps.host.features[f], feature_dict[f])
def testCapsCPUFeaturesOldSyntaxSVM(self):
filename = "rhel5.4-xen-caps.xml"
host_feature_list = ["svm"]
feature_dict = build_host_feature_dict(host_feature_list)
caps = self._buildCaps(filename)
for f in feature_dict.keys():
self.assertEquals(caps.host.features[f], feature_dict[f])
def testCapsCPUFeaturesNewSyntax(self):
filename = "libvirt-0.7.6-qemu-caps.xml"
host_feature_list = ['lahf_lm', 'xtpr', 'cx16', 'tm2', 'est', 'vmx',
'ds_cpl', 'pbe', 'tm', 'ht', 'ss', 'acpi', 'ds']
feature_dict = build_host_feature_dict(host_feature_list)
caps = self._buildCaps(filename)
for f in feature_dict.keys():
self.assertEquals(caps.host.features[f], feature_dict[f])
self.assertEquals(caps.host.cpu.model, "core2duo")
self.assertEquals(caps.host.cpu.vendor, "Intel")
self.assertEquals(caps.host.cpu.threads, "3")
self.assertEquals(caps.host.cpu.cores, "5")
self.assertEquals(caps.host.cpu.sockets, "7")
def testCapsUtilFuncs(self):
new_caps = self._buildCaps("libvirt-0.7.6-qemu-caps.xml")
new_caps_no_kvm = self._buildCaps(
"libvirt-0.7.6-qemu-no-kvmcaps.xml")
empty_caps = self._buildCaps("empty-caps.xml")
rhel_xen_enable_hvm_caps = self._buildCaps(
"rhel5.4-xen-caps-virt-enabled.xml")
rhel_xen_caps = self._buildCaps("rhel5.4-xen-caps.xml")
rhel_kvm_caps = self._buildCaps("rhel5.4-kvm-caps.xml")
def test_utils(caps, no_guests, is_hvm, is_kvm, is_bios_disable,
is_xenner):
self.assertEquals(caps.no_install_options(), no_guests)
|
specify/specify7
|
specifyweb/workbench/views.py
|
Python
|
gpl-2.0
| 37,610
| 0.002154
|
import json
import logging
from typing import List, Optional
from uuid import uuid4
from django import http
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.utils import OperationalError
from django.views.decorators.http import require_GET, require_POST, \
require_http_methods
from jsonschema import validate # type: ignore
from jsonschema.exceptions import ValidationError # type: ignore
from specifyweb.specify.api import create_obj, get_object_or_404, obj_to_data, \
toJson, uri_for_model
from specifyweb.specify.views import apply_access_control, login_maybe_required, \
openapi
from specifyweb.specify import models as specify_models
from ..notifications.models import Message
from . import models, tasks
from .upload import upload as uploader, upload_plan_schema
logger = logging.getLogger(__name__)
def regularize_rows(ncols: int, rows: List[List]) -> List[List[str]]:
n = ncols + 1 # extra row info such as disambiguation in hidden col at end
def regularize(row: List) -> Optional[List]:
data = (row + ['']*n)[:n] # pad / trim row length to match columns
cleaned = ['' if v is None else str(v).strip() for v in data] # convert values to strings
return None if all(v == '' for v in cleaned[0:ncols]) else cleaned # skip empty rows
return [r for r in map(regularize, rows) if r is not None]
open_api_components = {
'schemas': {
'wb_uploadresult': {
"oneOf": [
{
"type": "string",
"example": "null"
},
{
"type": "object",
"properties": {
"success": {
"type": "boolean",
},
"timestamp": {
"type": "string",
"format": "datetime",
"example": "2021-04-28T22:28:20.033117+00:00",
}
}
}
]
},
"wb_uploaderstatus": {
"oneOf": [
{
"type": "string",
"example": "null",
"description": "Nothing to report"
}, {
"type": "object",
"properties": {
"taskinfo": {
"type": "object",
"properties": {
"current": {
"type": "number",
"example": 4,
},
"total": {
"type": "number",
"example": 20,
}
}
},
"taskstatus": {
"type": "string",
"enum": [
"PROGRESS",
"PENDING",
"FAILURE",
]
},
"uploaderstatus": {
"type": "object",
"properties": {
"operation": {
"type": "string",
"enum": [
'validating',
'uploading',
'unuploading'
]
},
"taskid": {
"type": "string",
"maxLength": 36,
"example": "7d34dbb2-6e57-4c4b-9546-1f
|
e7bec1acca",
}
}
},
},
"description": "St
|
atus of the " +
"upload / un-upload / validation process",
}
]
},
"wb_rows": {
"type": "array",
"items": {
"type": "array",
"items": {
"type": "string",
"description": "Cell's value or null"
}
},
"description": "2D array of values",
},
"wb_visualorder": {
"oneOf": [
{
"type": "string",
"description": "null",
},
{
"type": "array",
"items": {
"type": "number",
},
"description": "The order to show columns in",
}
]
},
"wb_uploadplan": {
"type": "object",
"properties": {
},
"description": "Upload Plan. Schema - " +
"https://github.com/specify/specify7/blob/5fb51a7d25d549248505aec141ae7f7cdc83e414/specifyweb/workbench/upload/upload_plan_schema.py#L14"
},
"wb_validation_results": {
"type": "object",
"properties": {},
"description": "Schema: " +
"https://github.com/specify/specify7/blob/19ebde3d86ef4276799feb63acec275ebde9b2f4/specifyweb/workbench/upload/validation_schema.py",
},
"wb_upload_results": {
"type": "object",
"properties": {},
"description": "Schema: " +
"https://github.com/specify/specify7/blob/19ebde3d86ef4276799feb63acec275ebde9b2f4/specifyweb/workbench/upload/upload_results_schema.py",
}
}
}
@openapi(schema={
"get": {
"parameters": [
{
"name": "with_plan",
"in": "query",
"required": False,
"schema": {
"type": "string"
},
"description": "If parameter is present, limit results to data sets with upload plans."
}
],
"responses": {
"200": {
"description": "Data fetched successfully",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"type": "number",
"minimum": 0,
"description": "Data Set ID",
},
"name": {
"type": "string",
"description": "Data Set Name",
},
"uploadresult": {
"$ref": "#/components/schemas/wb_uploadresult"
},
"uploaderstatus": {
"$ref": "#/components/schemas/wb_uploaderstatus",
},
"timestampcreated": {
"type": "string",
"format": "datetime",
"example": "2021-04-28T13:16:07.774"
},
"timestampmodified": {
"type": "string",
"format": "datetime",
"example": "2021-04-
|
back-to/streamlink
|
tests/test_plugins_input.py
|
Python
|
bsd-2-clause
| 2,652
| 0.001131
|
import unittest
import os.path
from contextlib import contextmanager
from streamlink.plugin.plugin import UserInputRequester
from tests.mock import MagicMock, patch
from streamlink import Streamlink, PluginError
from streamlink_cli.console import ConsoleUserInputRequester
import streamlink_cli.console
from tests.plugins.testplugin import TestPlugin as _TestPlugin
class TestPluginUserInput(unittest.TestCase):
def setUp(self):
self.session = Streamlink()
@contextmanager
def _mock_console_input(self, isatty=True):
with patch('streamlink_cli.console.sys.stdin.isatty', return_va
|
lue=isatty):
mock_console = MagicMock()
mock_console.ask.return_value = "username"
mock_console.askpass.retur
|
n_value = "password"
yield ConsoleUserInputRequester(mock_console)
def test_user_input_bad_class(self):
p = _TestPlugin("http://example.com/stream")
self.assertRaises(RuntimeError, p.bind, self.session, 'test_plugin', object())
def test_user_input_not_implemented(self):
p = _TestPlugin("http://example.com/stream")
p.bind(self.session, 'test_plugin', UserInputRequester())
self.assertRaises(PluginError, p.input_ask, 'test')
self.assertRaises(PluginError, p.input_ask_password, 'test')
def test_user_input_console(self):
p = _TestPlugin("http://example.com/stream")
with self._mock_console_input() as console_input:
p.bind(self.session, 'test_plugin', console_input)
self.assertEqual("username", p.input_ask("username"))
self.assertEqual("password", p.input_ask_password("password"))
console_input.console.ask.assert_called_with("username: ")
console_input.console.askpass.assert_called_with("password: ")
def test_user_input_console_no_tty(self):
p = _TestPlugin("http://example.com/stream")
with self._mock_console_input(isatty=False) as console_input:
p.bind(self.session, 'test_plugin', console_input)
self.assertRaises(PluginError, p.input_ask, "username")
self.assertRaises(PluginError, p.input_ask_password, "password")
def test_set_via_session(self):
with self._mock_console_input() as console_input:
session = Streamlink({"user-input-requester": console_input})
session.load_plugins(os.path.join(os.path.dirname(__file__), "plugins"))
p = session.resolve_url("http://test.se/channel")
self.assertEqual("username", p.input_ask("username"))
self.assertEqual("password", p.input_ask_password("password"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.