repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
sunqm/pyscf
|
pyscf/pbc/gw/krgw_ac.py
|
1
|
25949
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tianyu Zhu <zhutianyu1991@gmail.com>
#
'''
PBC spin-restricted G0W0-AC QP eigenvalues with k-point sampling
This implementation has N^4 scaling, and is faster than GW-CD (N^4)
and analytic GW (N^6) methods.
GW-AC is recommended for valence states only, and is inaccuarate for core states.
Method:
See T. Zhu and G.K.-L. Chan, arxiv:2007.03148 (2020) for details
Compute Sigma on imaginary frequency with density fitting,
then analytically continued to real frequency.
Gaussian density fitting must be used (FFTDF and MDF are not supported).
'''
from functools import reduce
import numpy
import numpy as np
import h5py
from scipy.optimize import newton, least_squares
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import _conc_mos
from pyscf.pbc import df, dft, scf
from pyscf.pbc.mp.kmp2 import get_nocc, get_nmo, get_frozen_mask
from pyscf import __config__
einsum = lib.einsum
def kernel(gw, mo_energy, mo_coeff, orbs=None,
kptlist=None, nw=None, verbose=logger.NOTE):
'''GW-corrected quasiparticle orbital energies
Returns:
A list : converged, mo_energy, mo_coeff
'''
mf = gw._scf
if gw.frozen is None:
frozen = 0
else:
frozen = gw.frozen
assert (frozen == 0)
if orbs is None:
orbs = range(gw.nmo)
if kptlist is None:
kptlist = range(gw.nkpts)
nkpts = gw.nkpts
nklist = len(kptlist)
# v_xc
dm = np.array(mf.make_rdm1())
v_mf = np.array(mf.get_veff()) - np.array(mf.get_j(dm_kpts=dm))
for k in range(nkpts):
v_mf[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), v_mf[k], mo_coeff[k]))
nocc = gw.nocc
nmo = gw.nmo
# v_hf from DFT/HF density
if gw.fc:
exxdiv = 'ewald'
else:
exxdiv = None
rhf = scf.KRHF(gw.mol, gw.kpts, exxdiv=exxdiv)
rhf.with_df = gw.with_df
if getattr(gw.with_df, '_cderi', None) is None:
raise RuntimeError('Found incompatible integral scheme %s.'
'KGWAC can be only used with GDF integrals' %
gw.with_df.__class__)
vk = rhf.get_veff(gw.mol,dm_kpts=dm) - rhf.get_j(gw.mol,dm_kpts=dm)
for k in range(nkpts):
vk[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), vk[k], mo_coeff[k]))
# Grids for integration on imaginary axis
freqs,wts = _get_scaled_legendre_roots(nw)
# Compute self-energy on imaginary axis i*[0,iw_cutoff]
sigmaI, omega = get_sigma_diag(gw, orbs, kptlist, freqs, wts, iw_cutoff=5.)
# Analytic continuation
coeff = []
if gw.ac == 'twopole':
for k in range(nklist):
coeff.append(AC_twopole_diag(sigmaI[k], omega, orbs, nocc))
elif gw.ac == 'pade':
for k in range(nklist):
coeff_tmp, omega_fit = AC_pade_thiele_diag(sigmaI[k], omega)
coeff.append(coeff_tmp)
coeff = np.array(coeff)
conv = True
# This code does not support metals
homo = -99.
lumo = 99.
for k in range(nkpts):
if homo < mf.mo_energy[k][nocc-1]:
homo = mf.mo_energy[k][nocc-1]
if lumo > mf.mo_energy[k][nocc]:
lumo = mf.mo_energy[k][nocc]
ef = (homo+lumo)/2.
mo_energy = np.zeros_like(np.array(mf.mo_energy))
for k in range(nklist):
kn = kptlist[k]
for p in orbs:
if gw.linearized:
# linearized G0W0
de = 1e-6
ep = mf.mo_energy[kn][p]
#TODO: analytic sigma derivative
if gw.ac == 'twopole':
sigmaR = two_pole(ep-ef, coeff[k,:,p-orbs[0]]).real
dsigma = two_pole(ep-ef+de, coeff[k,:,p-orbs[0]]).real - sigmaR.real
elif gw.ac == 'pade':
sigmaR = pade_thiele(ep-ef, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real
dsigma = pade_thiele(ep-ef+de, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real - sigmaR.real
zn = 1.0/(1.0-dsigma/de)
e = ep + zn*(sigmaR.real + vk[kn,p,p].real - v_mf[kn,p,p].real)
mo_energy[kn,p] = e
else:
# self-consistently solve QP equation
def quasiparticle(omega):
if gw.ac == 'twopole':
sigmaR = two_pole(omega-ef, coeff[k,:,p-orbs[0]]).real
elif gw.ac == 'pade':
sigmaR = pade_thiele(omega-ef, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real
return omega - mf.mo_energy[kn][p] - (sigmaR.real + vk[kn,p,p].real - v_mf[kn,p,p].real)
try:
e = newton(quasiparticle, mf.mo_energy[kn][p], tol=1e-6, maxiter=100)
mo_energy[kn,p] = e
except RuntimeError:
conv = False
mo_coeff = mf.mo_coeff
if gw.verbose >= logger.DEBUG:
numpy.set_printoptions(threshold=nmo)
for k in range(nkpts):
logger.debug(gw, ' GW mo_energy @ k%d =\n%s', k,mo_energy[k])
numpy.set_printoptions(threshold=1000)
return conv, mo_energy, mo_coeff
def get_rho_response(gw, omega, mo_energy, Lpq, kL, kidx):
'''
Compute density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# Compute Pi for kL
Pi = np.zeros((naux,naux),dtype=np.complex128)
for i, kpti in enumerate(kpts):
# Find ka that conserves with ki and kL (-ki+ka+kL=G)
a = kidx[i]
eia = mo_energy[i,:nocc,None] - mo_energy[a,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pia = einsum('Pia,ia->Pia',Lpq[i][:,:nocc,nocc:],eia)
# Response from both spin-up and spin-down density
Pi += 4./nkpts * einsum('Pia,Qia->PQ',Pia,Lpq[i][:,:nocc,nocc:].conj())
return Pi
def get_sigma_diag(gw, orbs, kptlist, freqs, wts, iw_cutoff=None, max_memory=8000):
'''
Compute GW correlation self-energy (diagonal elements)
in MO basis on imaginary axis
'''
mo_energy = np.array(gw._scf.mo_energy)
mo_coeff = np.array(gw._scf.mo_coeff)
nocc = gw.nocc
nmo = gw.nmo
nkpts = gw.nkpts
kpts = gw.kpts
nklist = len(kptlist)
nw = len(freqs)
norbs = len(orbs)
mydf = gw.with_df
# possible kpts shift center
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# This code does not support metals
homo = -99.
lumo = 99.
for k in range(nkpts):
if homo < mo_energy[k][nocc-1]:
homo = mo_energy[k][nocc-1]
if lumo > mo_energy[k][nocc]:
lumo = mo_energy[k][nocc]
if (lumo-homo)<1e-3:
logger.warn(gw, 'This GW-AC code is not supporting metals!')
ef = (homo+lumo)/2.
# Integration on numerical grids
if iw_cutoff is not None:
nw_sigma = sum(iw < iw_cutoff for iw in freqs) + 1
else:
nw_sigma = nw + 1
# Compute occ for -iw and vir for iw separately
# to avoid branch cuts in analytic continuation
omega_occ = np.zeros((nw_sigma), dtype=np.complex128)
omega_vir = np.zeros((nw_sigma), dtype=np.complex128)
omega_occ[1:] = -1j*freqs[:(nw_sigma-1)]
omega_vir[1:] = 1j*freqs[:(nw_sigma-1)]
orbs_occ = [i for i in orbs if i < nocc]
norbs_occ = len(orbs_occ)
emo_occ = np.zeros((nkpts,nmo,nw_sigma),dtype=np.complex128)
emo_vir = np.zeros((nkpts,nmo,nw_sigma),dtype=np.complex128)
for k in range(nkpts):
emo_occ[k] = omega_occ[None,:] + ef - mo_energy[k][:,None]
emo_vir[k] = omega_vir[None,:] + ef - mo_energy[k][:,None]
sigma = np.zeros((nklist,norbs,nw_sigma),dtype=np.complex128)
omega = np.zeros((norbs,nw_sigma),dtype=np.complex128)
for p in range(norbs):
orbp = orbs[p]
if orbp < nocc:
omega[p] = omega_occ.copy()
else:
omega[p] = omega_vir.copy()
if gw.fc:
# Set up q mesh for q->0 finite size correction
q_pts = np.array([1e-3,0,0]).reshape(1,3)
q_abs = gw.mol.get_abs_kpts(q_pts)
# Get qij = 1/sqrt(Omega) * < psi_{ik} | e^{iqr} | psi_{ak-q} > at q: (nkpts, nocc, nvir)
qij = get_qij(gw, q_abs[0], mo_coeff)
for kL in range(nkpts):
# Lij: (ki, L, i, j) for looping every kL
Lij = []
# kidx: save kj that conserves with kL and ki (-ki+kj+kL=G)
# kidx_r: save ki that conserves with kL and kj (-ki+kj+kL=G)
kidx = np.zeros((nkpts),dtype=np.int64)
kidx_r = np.zeros((nkpts),dtype=np.int64)
for i, kpti in enumerate(kpts):
for j, kptj in enumerate(kpts):
# Find (ki,kj) that satisfies momentum conservation with kL
kconserv = -kscaled[i] + kscaled[j] + kscaled[kL]
is_kconserv = np.linalg.norm(np.round(kconserv) - kconserv) < 1e-12
if is_kconserv:
kidx[i] = j
kidx_r[j] = i
logger.debug(gw, "Read Lpq (kL: %s / %s, ki: %s, kj: %s)"%(kL+1, nkpts, i, j))
Lij_out = None
# Read (L|pq) and ao2mo transform to (L|ij)
Lpq = []
for LpqR, LpqI, sign \
in mydf.sr_loop([kpti, kptj], max_memory=0.1*gw._scf.max_memory, compact=False):
Lpq.append(LpqR+LpqI*1.0j)
# support uneqaul naux on different k points
Lpq = np.vstack(Lpq).reshape(-1,nmo**2)
tao = []
ao_loc = None
moij, ijslice = _conc_mos(mo_coeff[i], mo_coeff[j])[2:]
Lij_out = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=Lij_out)
Lij.append(Lij_out.reshape(-1,nmo,nmo))
Lij = np.asarray(Lij)
naux = Lij.shape[1]
if kL == 0:
for w in range(nw):
# body dielectric matrix eps_body
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
eps_body_inv = np.linalg.inv(np.eye(naux)-Pi)
if gw.fc:
# head dielectric matrix eps_00
Pi_00 = get_rho_response_head(gw, freqs[w], mo_energy, qij)
eps_00 = 1. - 4. * np.pi/np.linalg.norm(q_abs[0])**2 * Pi_00
# wings dielectric matrix eps_P0
Pi_P0 = get_rho_response_wing(gw, freqs[w], mo_energy, Lij, qij)
eps_P0 = -np.sqrt(4.*np.pi) / np.linalg.norm(q_abs[0]) * Pi_P0
# inverse dielectric matrix
eps_inv_00 = 1./(eps_00 - np.dot(np.dot(eps_P0.conj(),eps_body_inv),eps_P0))
eps_inv_P0 = -eps_inv_00 * np.dot(eps_body_inv, eps_P0)
# head correction
Del_00 = 2./np.pi * (6.*np.pi**2/gw.mol.vol/nkpts)**(1./3.) * (eps_inv_00 - 1.)
eps_inv_PQ = eps_body_inv
g0_occ = wts[w] * emo_occ / (emo_occ**2+freqs[w]**2)
g0_vir = wts[w] * emo_vir / (emo_vir**2+freqs[w]**2)
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),eps_inv_PQ-np.eye(naux))
Wmn = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
sigma[k][:norbs_occ] += -einsum('mn,mw->nw',Wmn[:,:norbs_occ],g0_occ[km])/np.pi
sigma[k][norbs_occ:] += -einsum('mn,mw->nw',Wmn[:,norbs_occ:],g0_vir[km])/np.pi
if gw.fc:
# apply head correction
assert(kn == km)
sigma[k][:norbs_occ] += -Del_00 * g0_occ[kn][orbs][:norbs_occ] /np.pi
sigma[k][norbs_occ:] += -Del_00 * g0_vir[kn][orbs][norbs_occ:] /np.pi
# apply wing correction
Wn_P0 = einsum('Pnm,P->nm',Lij[kn],eps_inv_P0).diagonal()
Wn_P0 = Wn_P0.real * 2.
Del_P0 = np.sqrt(gw.mol.vol/4./np.pi**3) * (6.*np.pi**2/gw.mol.vol/nkpts)**(2./3.) * Wn_P0[orbs]
sigma[k][:norbs_occ] += -einsum('n,nw->nw', Del_P0[:norbs_occ],
g0_occ[kn][orbs][:norbs_occ]) /np.pi
sigma[k][norbs_occ:] += -einsum('n,nw->nw', Del_P0[norbs_occ:],
g0_vir[kn][orbs][norbs_occ:]) /np.pi
else:
for w in range(nw):
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
Pi_inv = np.linalg.inv(np.eye(naux)-Pi)-np.eye(naux)
g0_occ = wts[w] * emo_occ / (emo_occ**2+freqs[w]**2)
g0_vir = wts[w] * emo_vir / (emo_vir**2+freqs[w]**2)
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),Pi_inv)
Wmn = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
sigma[k][:norbs_occ] += -einsum('mn,mw->nw',Wmn[:,:norbs_occ],g0_occ[km])/np.pi
sigma[k][norbs_occ:] += -einsum('mn,mw->nw',Wmn[:,norbs_occ:],g0_vir[km])/np.pi
return sigma, omega
def get_rho_response_head(gw, omega, mo_energy, qij):
'''
Compute head (G=0, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, nocc, nvir = qij.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi head
Pi_00 = 0j
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pi_00 += 4./nkpts * einsum('ia,ia->',eia,qij[i].conj()*qij[i])
return Pi_00
def get_rho_response_wing(gw, omega, mo_energy, Lpq, qij):
'''
Compute wing (G=P, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi wing
Pi = np.zeros(naux,dtype=np.complex128)
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
eia_q = eia * qij[i].conj()
Pi += 4./nkpts * einsum('Pia,ia->P',Lpq[i][:,:nocc,nocc:],eia_q)
return Pi
def get_qij(gw, q, mo_coeff, uniform_grids=False):
'''
Compute qij = 1/Omega * |< psi_{ik} | e^{iqr} | psi_{ak-q} >|^2 at q: (nkpts, nocc, nvir)
through kp perturbtation theory
Ref: Phys. Rev. B 83, 245122 (2011)
'''
nocc = gw.nocc
nmo = gw.nmo
nvir = nmo - nocc
kpts = gw.kpts
nkpts = len(kpts)
cell = gw.mol
mo_energy = gw._scf.mo_energy
if uniform_grids:
mydf = df.FFTDF(cell, kpts=kpts)
coords = cell.gen_uniform_grids(mydf.mesh)
else:
coords, weights = dft.gen_grid.get_becke_grids(cell,level=5)
ngrid = len(coords)
qij = np.zeros((nkpts,nocc,nvir),dtype=np.complex128)
for i, kpti in enumerate(kpts):
ao_p = dft.numint.eval_ao(cell, coords, kpt=kpti, deriv=1)
ao = ao_p[0]
ao_grad = ao_p[1:4]
if uniform_grids:
ao_ao_grad = einsum('mg,xgn->xmn',ao.T.conj(),ao_grad) * cell.vol / ngrid
else:
ao_ao_grad = einsum('g,mg,xgn->xmn',weights,ao.T.conj(),ao_grad)
q_ao_ao_grad = -1j * einsum('x,xmn->mn',q,ao_ao_grad)
q_mo_mo_grad = np.dot(np.dot(mo_coeff[i][:,:nocc].T.conj(), q_ao_ao_grad), mo_coeff[i][:,nocc:])
enm = 1./(mo_energy[i][nocc:,None] - mo_energy[i][None,:nocc])
dens = enm.T * q_mo_mo_grad
qij[i] = dens / np.sqrt(cell.vol)
return qij
def _get_scaled_legendre_roots(nw):
"""
Scale nw Legendre roots, which lie in the
interval [-1, 1], so that they lie in [0, inf)
Ref: www.cond-mat.de/events/correl19/manuscripts/ren.pdf
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs, wts = np.polynomial.legendre.leggauss(nw)
x0 = 0.5
freqs_new = x0*(1.+freqs)/(1.-freqs)
wts = wts*2.*x0/(1.-freqs)**2
return freqs_new, wts
def _get_clenshaw_curtis_roots(nw):
"""
Clenshaw-Curtis qaudrature on [0,inf)
Ref: J. Chem. Phys. 132, 234114 (2010)
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs = np.zeros(nw)
wts = np.zeros(nw)
a = 0.2
for w in range(nw):
t = (w+1.0)/nw * np.pi/2.
freqs[w] = a / np.tan(t)
if w != nw-1:
wts[w] = a*np.pi/2./nw/(np.sin(t)**2)
else:
wts[w] = a*np.pi/4./nw/(np.sin(t)**2)
return freqs[::-1], wts[::-1]
def two_pole_fit(coeff, omega, sigma):
cf = coeff[:5] + 1j*coeff[5:]
f = cf[0] + cf[1]/(omega+cf[3]) + cf[2]/(omega+cf[4]) - sigma
f[0] = f[0]/0.01
return np.array([f.real,f.imag]).reshape(-1)
def two_pole(freqs, coeff):
cf = coeff[:5] + 1j*coeff[5:]
return cf[0] + cf[1]/(freqs+cf[3]) + cf[2]/(freqs+cf[4])
def AC_twopole_diag(sigma, omega, orbs, nocc):
"""
Analytic continuation to real axis using a two-pole model
Returns:
coeff: 2D array (ncoeff, norbs)
"""
norbs, nw = sigma.shape
coeff = np.zeros((10,norbs))
for p in range(norbs):
if orbs[p] < nocc:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, -1.0, -0.5])
else:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, 1.0, 0.5])
#TODO: analytic gradient
xopt = least_squares(two_pole_fit, x0, jac='3-point', method='trf', xtol=1e-10,
gtol = 1e-10, max_nfev=1000, verbose=0, args=(omega[p], sigma[p]))
if xopt.success is False:
print('WARN: 2P-Fit Orb %d not converged, cost function %e'%(p,xopt.cost))
coeff[:,p] = xopt.x.copy()
return coeff
def thiele(fn,zn):
nfit = len(zn)
g = np.zeros((nfit,nfit),dtype=np.complex128)
g[:,0] = fn.copy()
for i in range(1,nfit):
g[i:,i] = (g[i-1,i-1]-g[i:,i-1])/((zn[i:]-zn[i-1])*g[i:,i-1])
a = g.diagonal()
return a
def pade_thiele(freqs,zn,coeff):
nfit = len(coeff)
X = coeff[-1]*(freqs-zn[-2])
for i in range(nfit-1):
idx = nfit-i-1
X = coeff[idx]*(freqs-zn[idx-1])/(1.+X)
X = coeff[0]/(1.+X)
return X
def AC_pade_thiele_diag(sigma, omega):
"""
Analytic continuation to real axis using a Pade approximation
from Thiele's reciprocal difference method
Reference: J. Low Temp. Phys. 29, 179 (1977)
Returns:
coeff: 2D array (ncoeff, norbs)
omega: 2D array (norbs, npade)
"""
idx = range(1,40,6)
sigma1 = sigma[:,idx].copy()
sigma2 = sigma[:,(idx[-1]+4)::4].copy()
sigma = np.hstack((sigma1,sigma2))
omega1 = omega[:,idx].copy()
omega2 = omega[:,(idx[-1]+4)::4].copy()
omega = np.hstack((omega1,omega2))
norbs, nw = sigma.shape
npade = nw // 2
coeff = np.zeros((npade*2,norbs),dtype=np.complex128)
for p in range(norbs):
coeff[:,p] = thiele(sigma[p,:npade*2], omega[p,:npade*2])
return coeff, omega[:,:npade*2]
class KRGWAC(lib.StreamObject):
linearized = getattr(__config__, 'gw_gw_GW_linearized', False)
# Analytic continuation: pade or twopole
ac = getattr(__config__, 'gw_gw_GW_ac', 'pade')
# Whether applying finite size corrections
fc = getattr(__config__, 'gw_gw_GW_fc', True)
def __init__(self, mf, frozen=0):
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
#TODO: implement frozen orbs
if frozen > 0:
raise NotImplementedError
self.frozen = frozen
# DF-KGW must use GDF integrals
if getattr(mf, 'with_df', None):
self.with_df = mf.with_df
else:
raise NotImplementedError
self._keys.update(['with_df'])
##################################################
# don't modify the following attributes, they are not input options
self._nocc = None
self._nmo = None
self.kpts = mf.kpts
self.nkpts = len(self.kpts)
# self.mo_energy: GW quasiparticle energy, not scf mo_energy
self.mo_energy = None
self.mo_coeff = mf.mo_coeff
self.mo_occ = mf.mo_occ
self.sigma = None
keys = set(('linearized','ac','fc'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('method = %s', self.__class__.__name__)
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
log.info('GW nocc = %d, nvir = %d, nkpts = %d', nocc, nvir, nkpts)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
logger.info(self, 'use perturbative linearized QP eqn = %s', self.linearized)
logger.info(self, 'analytic continuation method = %s', self.ac)
logger.info(self, 'GW finite size corrections = %s', self.fc)
return self
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def kernel(self, mo_energy=None, mo_coeff=None, orbs=None, kptlist=None, nw=100):
"""
Input:
kptlist: self-energy k-points
orbs: self-energy orbs
nw: grid number
Output:
mo_energy: GW quasiparticle energy
"""
if mo_coeff is None:
mo_coeff = np.array(self._scf.mo_coeff)
if mo_energy is None:
mo_energy = np.array(self._scf.mo_energy)
nmo = self.nmo
naux = self.with_df.get_naoaux()
nkpts = self.nkpts
mem_incore = (2*nkpts*nmo**2*naux) * 16/1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now > 0.99*self.max_memory):
logger.warn(self, 'Memory may not be enough!')
raise NotImplementedError
cput0 = (logger.process_clock(), logger.perf_counter())
self.dump_flags()
self.converged, self.mo_energy, self.mo_coeff = \
kernel(self, mo_energy, mo_coeff, orbs=orbs,
kptlist=kptlist, nw=nw, verbose=self.verbose)
logger.warn(self, 'GW QP energies may not be sorted from min to max')
logger.timer(self, 'GW', *cput0)
return self.mo_energy
if __name__ == '__main__':
from pyscf.pbc import gto
from pyscf.pbc.lib import chkfile
import os
# This test takes a few minutes
cell = gto.Cell()
cell.build(unit = 'angstrom',
a = '''
0.000000 1.783500 1.783500
1.783500 0.000000 1.783500
1.783500 1.783500 0.000000
''',
atom = 'C 1.337625 1.337625 1.337625; C 2.229375 2.229375 2.229375',
dimension = 3,
max_memory = 8000,
verbose = 4,
pseudo = 'gth-pade',
basis='gth-szv',
precision=1e-10)
kpts = cell.make_kpts([3,1,1],scaled_center=[0,0,0])
gdf = df.GDF(cell, kpts)
gdf_fname = 'gdf_ints_311.h5'
gdf._cderi_to_save = gdf_fname
if not os.path.isfile(gdf_fname):
gdf.build()
chkfname = 'diamond_311.chk'
if os.path.isfile(chkfname):
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
data = chkfile.load(chkfname, 'scf')
kmf.__dict__.update(data)
else:
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
kmf.conv_tol = 1e-12
kmf.chkfile = chkfname
kmf.kernel()
gw = KRGWAC(kmf)
gw.linearized = False
gw.ac = 'pade'
# without finite size corrections
gw.fc = False
nocc = gw.nocc
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.62045797))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.96574324))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.52639137))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-1.07513258))<1e-5)
# with finite size corrections
gw.fc = True
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.54277092))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.80148537))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.45073793))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-0.92910108))<1e-5)
|
apache-2.0
| -9,168,461,443,576,111,000
| 35.754958
| 120
| 0.541524
| false
| 2.845285
| false
| false
| false
|
USGM/suds
|
suds/sudsobject.py
|
1
|
11056
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sudsobject} module provides a collection of suds objects
that are primarily used for the highly dynamic interactions with
wsdl/xsd defined types.
"""
from logging import getLogger
from suds import *
log = getLogger(__name__)
def items(sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
for item in sobject:
yield item
def asdict(sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return dict(items(sobject))
def merge(a, b):
"""
Merge all attributes and metadata from I{a} to I{b}.
@param a: A I{source} object
@type a: L{Object}
@param b: A I{destination} object
@type b: L{Object}
"""
for item in a:
setattr(b, item[0], item[1])
b.__metadata__ = b.__metadata__
return b
def footprint(sobject):
"""
Get the I{virtual footprint} of the object.
This is really a count of the attributes in the branch with a significant value.
@param sobject: A suds object.
@type sobject: L{Object}
@return: The branch footprint.
@rtype: int
"""
n = 0
for a in sobject.__keylist__:
v = getattr(sobject, a)
if v is None: continue
if isinstance(v, Object):
n += footprint(v)
continue
if hasattr(v, '__len__'):
if len(v): n += 1
continue
n +=1
return n
class Factory:
cache = {}
@classmethod
def subclass(cls, name, bases, dict={}):
if not isinstance(bases, tuple):
bases = (bases,)
name = name
key = '.'.join((name, str(bases)))
subclass = cls.cache.get(key)
if subclass is None:
subclass = type(name, bases, dict)
cls.cache[key] = subclass
return subclass
@classmethod
def object(cls, classname=None, dict={}):
if classname is not None:
subclass = cls.subclass(classname, Object)
inst = subclass()
else:
inst = Object()
for a in list(dict.items()):
setattr(inst, a[0], a[1])
return inst
@classmethod
def metadata(cls):
return Metadata()
@classmethod
def property(cls, name, value=None):
subclass = cls.subclass(name, Property)
return subclass(value)
class Object:
def __init__(self):
self.__keylist__ = []
self.__printer__ = Printer()
self.__metadata__ = Metadata()
def __setattr__(self, name, value):
builtin = name.startswith('__') and name.endswith('__')
if not builtin and \
name not in self.__keylist__:
self.__keylist__.append(name)
self.__dict__[name] = value
def __delattr__(self, name):
try:
del self.__dict__[name]
builtin = name.startswith('__') and name.endswith('__')
if not builtin:
self.__keylist__.remove(name)
except:
cls = self.__class__.__name__
raise AttributeError("%s has no attribute '%s'" % (cls, name))
def __getitem__(self, name):
if isinstance(name, int):
name = self.__keylist__[int(name)]
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __iter__(self):
return Iter(self)
def __len__(self):
return len(self.__keylist__)
def __contains__(self, name):
return name in self.__keylist__
def __repr__(self):
return str(self)
def __str__(self):
return self.__printer__.tostr(self)
class Iter:
def __init__(self, sobject):
self.sobject = sobject
self.keylist = self.__keylist(sobject)
self.index = 0
def __next__(self):
keylist = self.keylist
nkeys = len(self.keylist)
while self.index < nkeys:
k = keylist[self.index]
self.index += 1
if hasattr(self.sobject, k):
v = getattr(self.sobject, k)
return (k, v)
raise StopIteration()
def __keylist(self, sobject):
keylist = sobject.__keylist__
try:
keyset = set(keylist)
ordering = sobject.__metadata__.ordering
ordered = set(ordering)
if not ordered.issuperset(keyset):
log.debug(
'%s must be superset of %s, ordering ignored',
keylist,
ordering)
raise KeyError()
return ordering
except:
return keylist
def __iter__(self):
return self
class Metadata(Object):
def __init__(self):
self.__keylist__ = []
self.__printer__ = Printer()
class Facade(Object):
def __init__(self, name):
Object.__init__(self)
md = self.__metadata__
md.facade = name
class Property(Object):
def __init__(self, value):
Object.__init__(self)
self.value = value
def items(self):
for item in self:
if item[0] != 'value':
yield item
def get(self):
return self.value
def set(self, value):
self.value = value
return self
class Printer:
"""
Pretty printing of a Object object.
"""
@classmethod
def indent(cls, n): return '%*s'%(n*3,' ')
def tostr(self, object, indent=-2):
""" get s string representation of object """
history = []
return self.process(object, history, indent)
def process(self, object, h, n=0, nl=False):
""" print object using the specified indent (n) and newline (nl). """
if object is None:
return 'None'
if isinstance(object, Object):
if len(object) == 0:
return '<empty>'
else:
return self.print_object(object, h, n+2, nl)
if isinstance(object, dict):
if len(object) == 0:
return '<empty>'
else:
return self.print_dictionary(object, h, n+2, nl)
if isinstance(object, (list,tuple)):
if len(object) == 0:
return '<empty>'
else:
return self.print_collection(object, h, n+2)
if isinstance(object, str):
return '"%s"' % tostr(object)
return '%s' % tostr(object)
def print_object(self, d, h, n, nl=False):
""" print complex using the specified indent (n) and newline (nl). """
s = []
cls = d.__class__
md = d.__metadata__
if d in h:
s.append('(')
s.append(cls.__name__)
s.append(')')
s.append('...')
return ''.join(s)
h.append(d)
if nl:
s.append('\n')
s.append(self.indent(n))
if cls != Object:
s.append('(')
if isinstance(d, Facade):
s.append(md.facade)
else:
s.append(cls.__name__)
s.append(')')
s.append('{')
for item in d:
if self.exclude(d, item):
continue
item = self.unwrap(d, item)
s.append('\n')
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(item[0])
s.append('[]')
else:
s.append(item[0])
s.append(' = ')
s.append(self.process(item[1], h, n, True))
s.append('\n')
s.append(self.indent(n))
s.append('}')
h.pop()
return ''.join(s)
def print_dictionary(self, d, h, n, nl=False):
""" print complex using the specified indent (n) and newline (nl). """
if d in h: return '{}...'
h.append(d)
s = []
if nl:
s.append('\n')
s.append(self.indent(n))
s.append('{')
for item in list(d.items()):
s.append('\n')
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(tostr(item[0]))
s.append('[]')
else:
s.append(tostr(item[0]))
s.append(' = ')
s.append(self.process(item[1], h, n, True))
s.append('\n')
s.append(self.indent(n))
s.append('}')
h.pop()
return ''.join(s)
def print_collection(self, c, h, n):
""" print collection using the specified indent (n) and newline (nl). """
if c in h: return '[]...'
h.append(c)
s = []
for item in c:
s.append('\n')
s.append(self.indent(n))
s.append(self.process(item, h, n-2))
s.append(',')
h.pop()
return ''.join(s)
def unwrap(self, d, item):
""" translate (unwrap) using an optional wrapper function """
nopt = ( lambda x: x )
try:
md = d.__metadata__
pmd = getattr(md, '__print__', None)
if pmd is None:
return item
wrappers = getattr(pmd, 'wrappers', {})
fn = wrappers.get(item[0], nopt)
return (item[0], fn(item[1]))
except:
pass
return item
def exclude(self, d, item):
""" check metadata for excluded items """
try:
md = d.__metadata__
pmd = getattr(md, '__print__', None)
if pmd is None:
return False
excludes = getattr(pmd, 'excludes', [])
return ( item[0] in excludes )
except:
pass
return False
|
lgpl-3.0
| 1,173,960,176,114,197,800
| 27.645078
| 84
| 0.510492
| false
| 4.040936
| false
| false
| false
|
tombstone/models
|
official/nlp/nhnet/optimizer.py
|
1
|
3030
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer and learning rate scheduler."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.modeling.hyperparams import params_dict
class LearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Learning rate schedule."""
def __init__(self, initial_learning_rate, hidden_size, warmup_steps):
"""Initialize configuration of the learning rate schedule.
Args:
initial_learning_rate: A float, the initial learning rate.
hidden_size: An integer, the model dimension in the hidden layers.
warmup_steps: An integer, the number of steps required for linear warmup.
"""
super(LearningRateSchedule, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.hidden_size = hidden_size
self.warmup_steps = tf.cast(warmup_steps, tf.float32)
def __call__(self, global_step):
"""Calculate learning rate with linear warmup and rsqrt decay.
Args:
global_step: An integer, the current global step used for learning rate
calculation.
Returns:
A float, the learning rate needs to be used for current global step.
"""
with tf.name_scope('learning_rate_schedule'):
global_step = tf.cast(global_step, tf.float32)
learning_rate = self.initial_learning_rate
learning_rate *= (self.hidden_size**-0.5)
# Apply linear warmup
learning_rate *= tf.minimum(1.0, global_step / self.warmup_steps)
# Apply rsqrt decay
learning_rate /= tf.sqrt(tf.maximum(global_step, self.warmup_steps))
return learning_rate
def get_config(self):
"""Get the configuration of the learning rate schedule."""
return {
'initial_learning_rate': self.initial_learning_rate,
'hidden_size': self.hidden_size,
'warmup_steps': self.warmup_steps,
}
def create_optimizer(params: params_dict.ParamsDict):
"""Creates optimizer."""
lr_schedule = LearningRateSchedule(
params.learning_rate,
params.hidden_size,
params.learning_rate_warmup_steps)
return tf.keras.optimizers.Adam(
learning_rate=lr_schedule,
beta_1=params.adam_beta1,
beta_2=params.adam_beta2,
epsilon=params.adam_epsilon)
|
apache-2.0
| 7,990,494,737,155,448,000
| 35.95122
| 80
| 0.690099
| false
| 4.034621
| false
| false
| false
|
gem/oq-hazardlib
|
openquake/hmtk/sources/complex_fault_source.py
|
1
|
9353
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2010-2017, GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
# -*- coding: utf-8 -*-
'''
Defines the :class:
`openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource`, which
represents the openquake.hmtk defition of a complex fault source. This extends
the :class:`nrml.models.ComplexFaultSource`
'''
import warnings
import numpy as np
from math import fabs
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.geo.line import Line
from openquake.hazardlib.geo.surface.complex_fault import ComplexFaultSurface
from openquake.hazardlib.source.complex_fault import ComplexFaultSource
import openquake.hmtk.sources.source_conversion_utils as conv
class mtkComplexFaultSource(object):
'''
New class to describe the mtk complex fault source object
:param str identifier:
ID code for the source
:param str name:
Source name
:param str trt:
Tectonic region type
:param geometry:
Instance of :class: nhlib.geo.surface.complex_fault.ComplexFaultSource
:param str mag_scale_rel:
Magnitude scaling relationsip
:param float rupt_aspect_ratio:
Rupture aspect ratio
:param mfd:
Magnitude frequency distribution as instance of
:class:`nrml.models.IncrementalMFD` or
:class:`nrml.models.TGRMFD`
:param float rake:
Rake of fault
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param catalogue:
Earthquake catalogue associated to source as instance of
:class:`openquake.hmtk.seismicity.catalogue.Catalogue` object
'''
def __init__(self, identifier, name, trt=None, geometry=None,
mag_scale_rel=None, rupt_aspect_ratio=None, mfd=None,
rake=None):
'''
Instantiate class with just the basic attributes: identifier and name
'''
self.typology = 'ComplexFault'
self.id = identifier
self.name = name
self.trt = trt
self.geometry = geometry
self.fault_edges = None
self.mag_scale_rel = mag_scale_rel
self.rupt_aspect_ratio = rupt_aspect_ratio
self.mfd = mfd
self.rake = rake
self.upper_depth = None
self.lower_depth = None
self.catalogue = None
self.dip = None
def create_geometry(self, input_geometry, mesh_spacing=1.0):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
List of at least two fault edges of the fault source from
shallowest to deepest. Each edge can be represented as as either
i) instance of nhlib.geo.polygon.Polygon class
ii) numpy.ndarray [Longitude, Latitude, Depth]
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0}
'''
if not isinstance(input_geometry, list) or len(input_geometry) < 2:
raise ValueError('Complex fault geometry incorrectly defined')
self.fault_edges = []
for edge in input_geometry:
if not isinstance(edge, Line):
if not isinstance(edge, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
else:
self.fault_edges.append(Line([Point(row[0], row[1], row[2])
for row in edge]))
else:
self.fault_edges.append(edge)
# Updates the upper and lower sesmogenic depths to reflect geometry
self._get_minmax_edges(edge)
# Build fault surface
self.geometry = ComplexFaultSurface.from_fault_data(self.fault_edges,
mesh_spacing)
# Get a mean dip
self.dip = self.geometry.get_dip()
def _get_minmax_edges(self, edge):
'''
Updates the upper and lower depths based on the input edges
'''
if isinstance(edge, Line):
# For instance of line class need to loop over values
depth_vals = np.array([node.depth for node in edge.points])
else:
depth_vals = edge[:, 2]
temp_upper_depth = np.min(depth_vals)
if not self.upper_depth:
self.upper_depth = temp_upper_depth
else:
if temp_upper_depth < self.upper_depth:
self.upper_depth = temp_upper_depth
temp_lower_depth = np.max(depth_vals)
if not self.lower_depth:
self.lower_depth = temp_lower_depth
else:
if temp_lower_depth > self.lower_depth:
self.lower_depth = temp_lower_depth
def select_catalogue(self, selector, distance,
distance_metric='joyner-boore', upper_eq_depth=None,
lower_eq_depth=None):
'''
Selects earthquakes within a distance of the fault
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param distance:
Distance from point (km) for selection
:param str distance_metric
Choice of fault source distance metric 'joyner-boore' or 'rupture'
:param float upper_eq_depth:
Upper hypocentral depth of hypocentres to be selected
:param float lower_eq_depth:
Lower hypocentral depth of hypocentres to be selected
'''
if selector.catalogue.get_number_events() < 1:
raise ValueError('No events found in catalogue!')
# If dip is != 90 and 'rupture' distance metric is selected
if ('rupture' in distance_metric) and (fabs(self.dip - 90) > 1E-5):
# Use rupture distance
self.catalogue = selector.within_rupture_distance(
self.geometry,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
else:
# Use Joyner-Boore distance
self.catalogue = selector.within_joyner_boore_distance(
self.geometry,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
if self.catalogue.get_number_events() < 5:
# Throw a warning regarding the small number of earthquakes in
# the source!
warnings.warn('Source %s (%s) has fewer than 5 events'
% (self.id, self.name))
def create_oqhazardlib_source(self, tom, mesh_spacing, use_defaults=False):
"""
Creates an instance of the source model as :class:
openquake.hazardlib.source.complex_fault.ComplexFaultSource
"""
if not self.mfd:
raise ValueError("Cannot write to hazardlib without MFD")
return ComplexFaultSource(
self.id,
self.name,
self.trt,
self.mfd,
mesh_spacing,
conv.mag_scale_rel_to_hazardlib(self.mag_scale_rel, use_defaults),
conv.render_aspect_ratio(self.rupt_aspect_ratio, use_defaults),
tom,
self.fault_edges,
self.rake)
|
agpl-3.0
| -315,525,468,201,625,000
| 37.331967
| 79
| 0.63338
| false
| 3.990188
| false
| false
| false
|
AutoGentoo/AutoGentoo
|
autogentoo/portage/resolve.py
|
1
|
22795
|
import warnings
from abc import ABC, abstractmethod
from queue import Queue
from typing import List, Optional, Dict, Generator, Tuple, Set
from autogentoo.cportage import (
get_portage,
Dependency,
Atom,
Ebuild,
UseFlag,
UseOperatorT,
Portage,
AtomUseT,
AtomUseDefaultT,
RequiredUse,
AtomBlockT,
)
from autogentoo.portage import (
RequiredUseException,
DependencyContainer,
ResolutionException,
UseSuggestion,
SuggestionExpression,
InvalidExpressionException,
)
__emerge_session__: Optional["Emerge"] = None
def emerge_init(emerge: "Emerge"):
global __emerge_session__
__emerge_session__ = emerge
def emerge_session() -> "Emerge":
global __emerge_session__
if __emerge_session__ is None:
raise RuntimeError(
"emerge_init() must be called before emerge_session() is called"
)
return __emerge_session__
def resolve_single(
parent: Optional["SelectedEbuild"], depend_expr: Dependency
) -> Optional["ResolveDependency"]:
if parent is None and depend_expr.atom is None:
raise ResolutionException(
"Use condition expressions are not valid at global scope"
)
if depend_expr.atom is not None: # Simple atom selection
# Check if this is a blocker
if depend_expr.atom.blocks != AtomBlockT.NONE:
emerge_session().add_block(parent, depend_expr.atom)
return None
sel_ebuild = emerge_session().select_atom(parent, depend_expr.atom)
if depend_expr.atom.useflags is None:
return sel_ebuild
for use in depend_expr.atom.useflags:
if not sel_ebuild.has_use(use.name):
# Use the use default because it doesn't exist in
# in the IUSE
default = AtomUseDefaultT(use.default)
if default == AtomUseDefaultT.NONE:
# We have no fallback when this useflag doesn't exist
# This is an error
raise KeyError(
"Invalid use flag '%s' for atom '%s'"
% (use.name, depend_expr.atom)
)
atom_flag = (
AtomUseT.ENABLE
if default == AtomUseDefaultT.ON
else AtomUseT.DISABLE
)
sel_ebuild.add_use(use.name, atom_flag == AtomUseT.ENABLE)
sel_ebuild.add_use_requirement(use.name, atom_flag)
else:
sel_ebuild.add_use_requirement(use.name, AtomUseT(use.option))
return sel_ebuild
else:
if depend_expr.use_condition != 0:
assert depend_expr.use_operator in (
UseOperatorT.ENABLE,
UseOperatorT.DISABLE,
)
# Simple use condition
use_flag = get_portage().get_use_flag(depend_expr.use_condition)
use_flag = UseFlag(
use_flag.name,
True if depend_expr.use_condition == UseOperatorT.ENABLE else False,
)
assert depend_expr.children is not None, "Invalid dependency expression"
conditional = UseConditional(parent, use_flag, depend_expr.children)
parent.add_use_hook(use_flag, conditional)
else:
warnings.warn(
"Complex use selection is not implemented yet (%s)" % parent.ebuild.key
)
def resolve_all(
parent: Optional["SelectedEbuild"], depend: Dependency
) -> Generator["ResolveDependency", None, None]:
for dep in depend:
resolved = resolve_single(parent, dep)
if resolved is not None:
yield resolved
class ResolveDependency(ABC):
_is_dirty: bool
def is_dirty(self) -> bool:
return self._is_dirty
@abstractmethod
def get_resolved(self) -> Optional["ResolveDependency"]:
...
class Hookable(ResolveDependency, ABC):
@abstractmethod
def run_hook(self, arg):
...
class UseSelection(Hookable):
enforcing: bool
target_value: bool
parent: "SelectedEbuild"
use_flag: UseFlag
flag: AtomUseT
def __init__(self, parent: "SelectedEbuild", use_flag: UseFlag, flag: AtomUseT):
self.parent = parent
self.use_flag = use_flag
self.flag = flag
self.target_value = use_flag.state
self.enforcing = False
if self.flag == AtomUseT.ENABLE:
self.target_value = True
self.enforcing = True
parent.schedule_use(UseFlag(use_flag.name, True))
elif self.flag == AtomUseT.DISABLE:
self.target_value = False
self.enforcing = True
parent.schedule_use(UseFlag(use_flag.name, False))
elif self.flag == AtomUseT.DISABLE_IF_OFF:
if not self.target_value:
self.enforcing = True
elif self.flag == AtomUseT.ENABLE_IF_ON:
if self.target_value:
self.enforcing = True
elif self.flag == AtomUseT.EQUAL:
self.enforcing = True
elif self.flag == AtomUseT.OPPOSITE:
self.enforcing = True
self.target_value = not self.target_value
parent.schedule_use(UseFlag(use_flag.name, self.target_value))
def run_hook(self, arg: bool):
"""
Make sure that this flag will
not get an invalid value
:param arg:
:return:
"""
if self.enforcing:
if arg != self.target_value:
raise RequiredUseException(
UseSuggestion(self.use_flag.name, self.target_value)
)
def get_resolved(self) -> Optional["ResolveDependency"]:
# This hook does not run any
# dependency resolution
return None
class RequiredUseHook(Hookable):
expression: RequiredUse
ebuild: "SelectedEbuild"
def __init__(self, selected_ebuild: "SelectedEbuild", required_use: RequiredUse):
self.expression = required_use
self.ebuild = selected_ebuild
def run_hook(self, arg: bool):
"""
Evaluate the flags in the required use
expression to make sure we have a match
:param arg: flag state that changed (unused)
:return: None
"""
def evaluate_required_use(
operator: SuggestionExpression.Operator, expr: RequiredUse
) -> Tuple[SuggestionExpression, int, int]:
"""
Count the number of expressions
that evaluate to True.
:param operator: operator for suggestion
:param expr: expression to verify
:return: (suggestions, num_true, total)
"""
n = 0
k = 0
suggestion = SuggestionExpression(operator)
for req_use in expr:
n += 1
op = UseOperatorT(req_use.operator)
if op == UseOperatorT.ENABLE or op == UseOperatorT.DISABLE:
target = op == UseOperatorT.ENABLE
state = self.ebuild.get_use(req_use.name).state
if req_use.depend is None and state == target:
k += 1
elif state == target:
# This is a conditional expression
child_suggestion, k_c, n_c = evaluate_required_use(
SuggestionExpression.Operator.AND, req_use.depend
)
if k_c == n_c:
k += 1
else:
# There are two different options here
# Either disable this useflag
# or try to meet its requirements
s = SuggestionExpression(
SuggestionExpression.Operator.LEAST_ONE
)
s.append(UseSuggestion(req_use.name, not state))
s.append(child_suggestion)
suggestion.append(s)
elif req_use.depend is not None and state == target:
k += 1
else:
suggestion.append(UseSuggestion(req_use.name, not state))
elif op == UseOperatorT.LEAST_ONE:
child_suggestion, k_c, n_c = evaluate_required_use(
SuggestionExpression.Operator.LEAST_ONE, req_use.depend
)
if k_c >= 1:
k += 1
else:
suggestion.append(child_suggestion)
elif op == UseOperatorT.EXACT_ONE:
child_suggestion, k_c, n_c = evaluate_required_use(
SuggestionExpression.Operator.EXACT_ONE, req_use.depend
)
if k_c == 1:
k += 1
else:
suggestion.append(child_suggestion)
elif op == UseOperatorT.MOST_ONE:
child_suggestion, k_c, n_c = evaluate_required_use(
SuggestionExpression.Operator.MOST_ONE, req_use.depend
)
if k_c <= 1:
k += 1
else:
suggestion.append(child_suggestion)
else:
raise InvalidExpressionException(
"Required use operator '%s' is not valid" % op
)
return suggestion, k, n
suggestions, g_k, g_n = evaluate_required_use(
SuggestionExpression.Operator.AND, self.expression
)
if g_k != g_n:
print(
UseOperatorT(self.expression.operator),
self.expression.name,
self.ebuild,
)
print("%d %d" % (g_k, g_n), flush=True)
raise RequiredUseException(suggestions)
def get_resolved(self) -> Optional["ResolveDependency"]:
return None
class UseConditional(Hookable):
parent: "SelectedEbuild"
# Should we raise an error if this condition is ever false?
required: bool
# The target state required for this expression to be analyzed
useflag: UseFlag
# The guarded dependency used to re-calculated
dependency: Optional[Dependency]
current_evaluation: Optional["ResolveDependency"]
def __init__(
self,
parent: "SelectedEbuild",
useflag: UseFlag,
expression: Optional[Dependency],
required=False,
):
self.parent = parent
self.useflag = useflag
self.required = required
self.dependency = expression
self.current_evaluation = None
self._is_dirty = False
def get_resolved(self) -> Optional["ResolveDependency"]:
if self.dependency is None:
return None
if self._is_dirty or self.current_evaluation is None:
self.current_evaluation = resolve_single(self.parent, self.dependency)
return self.current_evaluation
def run_hook(self, flag_state: bool):
# Only evaluate the expression if our condition is met
if self.useflag.state != flag_state:
if self.required:
raise RequiredUseException(
UseSuggestion(self.useflag.name, self.useflag.state)
)
# Mark this expression to re-evaluate the dependencies
self._is_dirty = True
self.current_evaluation = None
return
self._is_dirty = False
class SelectedEbuild(ResolveDependency):
selected_by: Dict[Atom, Optional["SelectedEbuild"]]
# The original ebuild
ebuild: Ebuild
# The useflag delta from IUSE of the original ebuild
useflags: Dict[str, UseFlag]
# Use requirements
use_requirements: Dict[str, AtomUseT]
# Triggers that run when a use flag is changes
use_flag_hooks: Dict[str, List[Hookable]]
global_flag_hooks: List[Hookable] # Trigger when any use flag is changed
# Flags to set next time we regenerate
flags: Queue[UseFlag]
generators: DependencyContainer[ResolveDependency]
resolved_deps: DependencyContainer[ResolveDependency]
resolve_session: "PackageResolutionSession"
def __init__(
self,
parent: Optional["SelectedEbuild"],
atom: Atom,
ebuild: Ebuild,
resolve_session: "PackageResolutionSession",
):
self.selected_by = {}
self.ebuild = ebuild
self.useflags = {}
self.use_flag_hooks = {}
self.global_flag_hooks = []
self.flags = Queue()
self.resolve_session = resolve_session
self.generators = DependencyContainer[ResolveDependency]()
self.resolved_deps = DependencyContainer[ResolveDependency]()
self._is_dirty = True
self.add_selected_by(parent, atom)
def get_resolved(self) -> Optional[ResolveDependency]:
if self.resolve_session.check_resolved(self):
return self
self.resolve_session.add_to_session(self)
self.regenerate()
return self
def add_selected_by(self, parent: Optional["SelectedEbuild"], atom: Atom):
self.selected_by[atom] = parent
def change_within_slot(self, atom: Atom) -> bool:
"""
Change the currently selected ebuild
to another ebuild within the same slot
:param atom: try to match an ebuild to every dependency+this
:return: True it can be done
"""
ebuild_match: Optional[Ebuild] = None
for ebuild in self.ebuild.package:
all_match = atom.matches(ebuild)
for prev_atom in self.selected_by:
all_match = prev_atom.matches(ebuild)
if not all_match:
break
if all_match:
ebuild_match = ebuild
break
if ebuild_match is None:
return False
self._is_dirty = True
self.ebuild = ebuild_match
return True
def regenerate(self):
"""
Refresh all children
:return:
"""
if self._is_dirty:
# A new ebuild was selected
# We need to regenerate the generators
self.generators.clear()
if not self.ebuild.metadata_init:
self.ebuild.initialize_metadata()
# Update use flags by flushing the flag buffer
self.flush_use()
# Make sure all use-flags conform to requirements
if self.ebuild.required_use is not None:
self.add_use_hook(None, RequiredUseHook(self, self.ebuild.required_use))
for i, dep_type in enumerate(
(
self.ebuild.bdepend,
self.ebuild.depend,
self.ebuild.rdepend,
self.ebuild.pdepend,
)
):
if dep_type is None:
continue
for dep in resolve_all(self, dep_type):
self.generators[i].append(dep)
self._is_dirty = False
# Regenerate children (recursively)
# All non-dirty expressions are already cached
# We can just remove everyone and re-append
self.resolved_deps.clear()
# Regenerate the dependency with the dirty flag enabled
i = 0
for dep_type in self.generators:
for generator in dep_type:
if generator is None:
continue
resolved = generator.get_resolved()
if resolved is None:
continue
self.resolved_deps[i].append(resolved)
i += 1
def has_use(self, name: str) -> bool:
return name in self.useflags or name in self.ebuild.iuse
def get_use(self, name: str) -> UseFlag:
if name not in self.useflags:
return self.ebuild.iuse[name]
return self.useflags[name]
def add_use(self, name: str, value: bool):
"""
Add a non-existent useflag
:param name: name of useflag
:param value: default value
:return:
"""
self.useflags[name] = UseFlag(name, value)
def add_use_requirement(self, name: str, flag: AtomUseT):
"""
Select a use flag required by an atom
:param name: name of the atom flag
:param flag: atom flag setting
:return:
"""
use = self.get_use(name)
self.add_use_hook(use, UseSelection(self, use, flag))
def add_use_hook(self, useflag: Optional[UseFlag], hook: Hookable):
"""
Associate an action with a flag changing
:param useflag: useflag to run hook on, None for all useflags
:param hook: action to take when useflag is changed
:return:
"""
if useflag is None:
self.global_flag_hooks.append(hook)
hook.run_hook(None)
return
if useflag.name not in self.use_flag_hooks:
self.use_flag_hooks[useflag.name] = []
self.use_flag_hooks[useflag.name].append(hook)
hook.run_hook(self.get_use(useflag.name).state)
def schedule_use(self, useflag: UseFlag):
"""
Update a useflag upon regeneration
:param useflag: useflag to update
:return: None
"""
self.flags.put(useflag)
self._is_dirty = True
def flush_use(self):
"""
Update all of the buffered useflags
and run their change hooks.
:return: None
"""
while not self.flags.empty():
useflag = self.flags.get()
# Only run the hooks if there is a change in state
if self.get_use(useflag.name).state != useflag.state:
self.useflags[useflag.name] = useflag
# Run all of the use-hooks for this flag
if useflag.name in self.use_flag_hooks:
for hook in self.use_flag_hooks[useflag.name]:
hook.run_hook(useflag.state)
# Run global use-hooks
for hook in self.global_flag_hooks:
hook.run_hook(None)
def __hash__(self):
return id(self)
def __repr__(self):
return "SelectedEbuild<%s %s>" % (self.ebuild.key, id(self))
class InstallPackage:
key: str
selected_ebuild_slots: Dict[str, SelectedEbuild]
resolve_session: "PackageResolutionSession"
def __init__(self, key: str, resolve_session: "PackageResolutionSession"):
self.key = key
self.selected_ebuild_slots = {}
self.resolve_session = resolve_session
def match_atom(
self, parent: Optional["SelectedEbuild"], atom: Atom, ebuild: Ebuild
) -> SelectedEbuild:
for slot in self.selected_ebuild_slots:
if atom.matches(self.selected_ebuild_slots[slot].ebuild):
return self.selected_ebuild_slots[slot]
# Check if slot has already been selected
if ebuild.slot is not None and ebuild.slot in self.selected_ebuild_slots:
# See if this atom matches the selected ebuild
sel_ebuild = self.selected_ebuild_slots[ebuild.slot]
if atom.matches(sel_ebuild.ebuild):
return sel_ebuild # We're good
# See if we can change the selected ebuild to match this atom
if sel_ebuild.change_within_slot(atom):
return sel_ebuild # Works!
else:
raise NotImplementedError("Cannot split a slot into multi-slot yet!")
elif ebuild.slot is not None:
# See if this atom matches any of the currently scheduled slots
for key, sel_ebuild in self.selected_ebuild_slots.items():
if atom.matches(sel_ebuild.ebuild):
return sel_ebuild
# We need to create a new selected ebuild and add it here
return self.add_atom(parent, atom, ebuild)
def add_atom(
self, parent: Optional["SelectedEbuild"], atom: Atom, ebuild: Ebuild
) -> SelectedEbuild:
"""
Add a SelectedEbuild in its slot
:param parent: parent package that triggered this
:param atom: atom that selected with ebuild
:param ebuild: ebuild selected by atom
:return: SelectedEbuild generated from the atom+ebuild
"""
sel_ebuild = SelectedEbuild(parent, atom, ebuild, self.resolve_session)
self.selected_ebuild_slots[sel_ebuild.ebuild.slot] = sel_ebuild
return sel_ebuild
class Emerge:
portage: Portage
selected_packages: Dict[str, InstallPackage]
blocks: Dict[str, List[Atom]]
resolve_session: "PackageResolutionSession"
def __init__(self, resolve_session: "PackageResolutionSession"):
self.portage = get_portage()
self.selected_packages = {}
self.blocks = {}
self.resolve_session = resolve_session
def add_block(self, parent: Optional["SelectedEbuild"], atom: Atom):
"""
Block ebuilds match this atom from being selected
:param parent: ebuild that selected this block
:param atom: atom to block
:return:
"""
if atom.key not in self.blocks:
self.blocks[atom.key] = []
self.blocks[atom.key].append(atom)
def select_atom(
self, parent: Optional["SelectedEbuild"], atom: Atom
) -> SelectedEbuild:
ebuild = self.portage.match_atom(atom)
if ebuild is None:
raise ResolutionException("No ebuild to match '%s' could be found" % atom)
if ebuild.package_key in self.selected_packages:
# Ebuild with this key has already been selected
# See if we can match this to an existing slot
install_pkg = self.selected_packages[ebuild.package_key]
return install_pkg.match_atom(parent, atom, ebuild)
else:
pkg = InstallPackage(atom.key, self.resolve_session)
self.selected_packages[pkg.key] = pkg
return pkg.add_atom(parent, atom, ebuild)
class PackageResolutionSession:
current_resolution: Set[SelectedEbuild]
def __init__(self):
self.current_resolution = set()
def check_resolved(self, ebuild: SelectedEbuild) -> bool:
return ebuild in self.current_resolution
def add_to_session(self, ebuild):
self.current_resolution.add(ebuild)
def clear(self):
self.current_resolution.clear()
|
gpl-3.0
| 1,697,455,195,590,679,300
| 31.940751
| 88
| 0.567624
| false
| 4.288805
| false
| false
| false
|
rochacbruno/dynaconf
|
dynaconf/vendor/ruamel/yaml/scalarfloat.py
|
1
|
2326
|
from __future__ import print_function,absolute_import,division,unicode_literals
_B=False
_A=None
import sys
from .compat import no_limit_int
from .anchor import Anchor
if _B:from typing import Text,Any,Dict,List
__all__=['ScalarFloat','ExponentialFloat','ExponentialCapsFloat']
class ScalarFloat(float):
def __new__(D,*E,**A):
F=A.pop('width',_A);G=A.pop('prec',_A);H=A.pop('m_sign',_A);I=A.pop('m_lead0',0);J=A.pop('exp',_A);K=A.pop('e_width',_A);L=A.pop('e_sign',_A);M=A.pop('underscore',_A);C=A.pop('anchor',_A);B=float.__new__(D,*E,**A);B._width=F;B._prec=G;B._m_sign=H;B._m_lead0=I;B._exp=J;B._e_width=K;B._e_sign=L;B._underscore=M
if C is not _A:B.yaml_set_anchor(C,always_dump=True)
return B
def __iadd__(A,a):return float(A)+a;B=type(A)(A+a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __ifloordiv__(A,a):return float(A)//a;B=type(A)(A//a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __imul__(A,a):return float(A)*a;B=type(A)(A*a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;B._prec=A._prec;return B
def __ipow__(A,a):return float(A)**a;B=type(A)(A**a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __isub__(A,a):return float(A)-a;B=type(A)(A-a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
@property
def anchor(self):
A=self
if not hasattr(A,Anchor.attrib):setattr(A,Anchor.attrib,Anchor())
return getattr(A,Anchor.attrib)
def yaml_anchor(A,any=_B):
if not hasattr(A,Anchor.attrib):return _A
if any or A.anchor.always_dump:return A.anchor
return _A
def yaml_set_anchor(A,value,always_dump=_B):A.anchor.value=value;A.anchor.always_dump=always_dump
def dump(A,out=sys.stdout):out.write('ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n'.format(A,A._width,A._prec,A._m_sign,A._m_lead0,A._underscore,A._exp,A._e_width,A._e_sign))
class ExponentialFloat(ScalarFloat):
def __new__(A,value,width=_A,underscore=_A):return ScalarFloat.__new__(A,value,width=width,underscore=underscore)
class ExponentialCapsFloat(ScalarFloat):
def __new__(A,value,width=_A,underscore=_A):return ScalarFloat.__new__(A,value,width=width,underscore=underscore)
|
mit
| -8,955,087,405,345,137,000
| 69.515152
| 311
| 0.677988
| false
| 2.522777
| false
| false
| false
|
Wen777/beets
|
beetsplug/freedesktop.py
|
1
|
2206
|
# This file is part of beets.
# Copyright 2014, Matt Lichtenberg.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Creates freedesktop.org-compliant .directory files on an album level.
"""
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.ui import decargs
import os
import logging
log = logging.getLogger('beets.freedesktop')
def process_query(lib, opts, args):
for album in lib.albums(decargs(args)):
process_album(album)
def process_album(album):
albumpath = album.item_dir()
if album.artpath:
fullartpath = album.artpath
artfile = os.path.split(fullartpath)[1]
create_file(albumpath, artfile)
else:
log.debug(u'freedesktop: album has no art')
def create_file(albumpath, artfile):
file_contents = "[Desktop Entry]\nIcon=./" + artfile
outfilename = os.path.join(albumpath, ".directory")
if not os.path.exists(outfilename):
file = open(outfilename, 'w')
file.write(file_contents)
file.close()
class FreedesktopPlugin(BeetsPlugin):
def __init__(self):
super(FreedesktopPlugin, self).__init__()
self.config.add({
'auto': False
})
self.register_listener('album_imported', self.imported)
def commands(self):
freedesktop_command = Subcommand("freedesktop",
help="Create .directory files")
freedesktop_command.func = process_query
return [freedesktop_command]
def imported(self, lib, album):
automatic = self.config['auto'].get(bool)
if not automatic:
return
process_album(album)
|
mit
| 3,143,787,163,430,786,000
| 30.070423
| 72
| 0.677244
| false
| 3.946333
| false
| false
| false
|
dokipen/trac-announcer-plugin
|
announcer/util/mail.py
|
1
|
3241
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009, Robert Corsaro
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from base64 import b32encode, b32decode
try:
from email.header import Header
except:
from email.Header import Header
MAXHEADERLEN = 76
def next_decorator(event, message, decorates):
"""
Helper method for IAnnouncerEmailDecorators. Call the next decorator
or return.
"""
if decorates and len(decorates) > 0:
next = decorates.pop()
return next.decorate_message(event, message, decorates)
def set_header(message, key, value, charset=None):
if not charset:
charset = message.get_charset() or 'ascii'
value = Header(value, charset, MAXHEADERLEN-(len(key)+2))
if message.has_key(key):
message.replace_header(key, value)
else:
message[key] = value
return message
def uid_encode(projurl, realm, target):
"""
Unique identifier used to track resources in relation to emails.
projurl included to avoid Message-ID collisions. Returns a base32 encode
UID string.
Set project_url in trac.ini for proper results.
"""
if hasattr(target, 'id'):
id = str(target.id)
elif hasattr(target, 'name'):
id = target.name
else:
id = str(target)
uid = ','.join((projurl, realm, id))
return b32encode(uid)
def uid_decode(encoded_uid):
"""
Returns a tuple of projurl, realm, id and change_num.
"""
uid = b32decode(encoded_uid)
return uid.split(',')
def msgid(uid, host='localhost'):
"""
Formatted id for email headers.
ie. <UIDUIDUIDUIDUID@localhost>
"""
return "<%s@%s>"%(uid, host)
|
bsd-3-clause
| -2,618,730,130,924,297,700
| 35.829545
| 79
| 0.685591
| false
| 4.155128
| false
| false
| false
|
HaroldMills/Vesper
|
vesper/signal/wave_audio_file.py
|
1
|
6118
|
"""Module containing class `WaveAudioFileType`."""
import os.path
import wave
import numpy as np
from vesper.signal.audio_file_reader import AudioFileReader
from vesper.signal.unsupported_audio_file_error import UnsupportedAudioFileError
'''
audio_file_utils:
read_audio_file(file_path)
write_audio_file(file_path, waveform)
class AudioFileType:
name
reader_class
writer_class
is_recognized_file(file_path)
class AudioFileReader:
file_type
num_channels
length
sample_rate
dtype
read(start_index=0, length=None, samples=None)
close()
class AudioFileWriter:
file_type
num_channels
length
sample_rate
dtype
append(samples)
close()
class WaveFileReader(AudioFileReader):
__init__(file_path)
class WaveFileWriter(AudioFileWriter):
__init__(file_path, num_channels, sample_rate, dtype=None)
'''
class WaveAudioFileReader(AudioFileReader):
def __init__(self, file_, mono_1d=False):
"""
Initializes this file reader for the specified file.
`file_` may be either a string or a file-like object. If it is a
string it should be the path of a WAV file. If it is a file-like
object, its contents should be a WAV file.
"""
if isinstance(file_, str):
# `file_` is a file path
file_path = file_
if not os.path.exists(file_path):
raise ValueError('File "{}" does not exist.'.format(file_path))
if not WaveAudioFileType.is_supported_file(file_path):
raise UnsupportedAudioFileError(
'File "{}" does not appear to be a WAV file.'.format(
file_path))
self._name = 'WAV file "{}"'.format(file_path)
else:
# `file_` is a file-like object
file_path = None
self._name = 'WAV file'
try:
self._reader = wave.open(file_, 'rb')
except:
raise OSError('Could not open {}.'.format(self._name))
try:
(num_channels, sample_width, sample_rate, length, compression_type,
compression_name) = self._reader.getparams()
except:
self._reader.close()
raise OSError('Could not read metadata from {}.'.format(self._name))
sample_size = 8 * sample_width
if compression_type != 'NONE':
raise UnsupportedAudioFileError((
'{} appears to contain compressed data (with '
'compression name "{}"), which is not '
'supported.').format(self._name, compression_name))
# TODO: support additional sample sizes, especially 24 bits.
if sample_size != 8 and sample_size != 16:
raise UnsupportedAudioFileError((
'{} contains {}-bit samples, which are '
'not supported.').format(self._name, sample_size))
if sample_size == 8:
dtype = np.uint8 # unsigned as per WAVE file spec
else:
dtype = np.dtype('<i2')
super().__init__(
file_path, WaveAudioFileType, num_channels, length, sample_rate,
dtype, mono_1d)
def read(self, start_index=0, length=None):
if self._reader is None:
raise OSError('Cannot read from closed {}.'.format(self._name))
if start_index < 0 or start_index > self.length:
raise ValueError((
'Read start index {} is out of range [{}, {}] for '
'{}.').format(start_index, 0, self.length, self._name))
if length is None:
# no length specified
length = self.length - start_index
else:
# length specified
stop_index = start_index + length
if stop_index > self.length:
# stop index exceeds file length
raise ValueError((
'Read stop index {} implied by start index {} and read '
'length {} exceeds file length {} for {}.').format(
stop_index, start_index, length, self.length,
self._name))
try:
self._reader.setpos(start_index)
except:
self._reader.close()
raise OSError(
'Set of read position failed for {}.'.format(self._name))
try:
buffer = self._reader.readframes(length)
except:
self._reader.close()
raise OSError('Samples read failed for {}.'.format(self._name))
samples = np.frombuffer(buffer, dtype=self.dtype)
if len(samples) != length * self.num_channels:
raise OSError(
'Got fewer samples than expected from read of {}.'.format(
self._name))
if self.num_channels == 1 and self.mono_1d:
samples = samples.reshape((length,))
else:
samples = samples.reshape((length, self.num_channels)).transpose()
# TODO: Deinterleave samples?
# TODO: Byte swap samples on big endian platforms?
return samples
def close(self):
if self._reader is not None:
self._reader.close()
self._reader = None
class WaveAudioFileType:
name = 'WAV Audio File Type'
reader_class = WaveAudioFileReader
# writer_class = WaveAudioFileWriter
file_name_extensions = frozenset(['.wav', '.WAV'])
@staticmethod
def is_supported_file(file_path):
extension = os.path.splitext(file_path)[1]
return extension in WaveAudioFileType.file_name_extensions
|
mit
| 6,694,778,441,508,051,000
| 27.588785
| 80
| 0.52746
| false
| 4.620846
| false
| false
| false
|
asshinator/CodeScraps
|
utilities/timinator.py
|
1
|
1604
|
"""This module is used to time the execution of other modules,
and is executed through tasks.json"""
import sys
import timeit
import cProfile
if len(sys.argv) < 2:
raise AssertionError("NoScript specified to time!")
elif ".py" not in sys.argv[1]:
print(str(sys.argv[1])+ " is not a python Script!")
exit(1)
scriptToExecute = sys.argv[1]
numberOfIterations = int(sys.argv[2]) if len(sys.argv) > 2 else 100
if "timinator" not in scriptToExecute:
#Construct the code block to execute the string.
stringerino = r'exec(open(r'
stringerino += "'"
stringerino += scriptToExecute
stringerino += "'"
stringerino += r').read())'
#Print out the name of the script we're executing
print("ScriptName:\t"+sys.argv[1])
#Mention what the script outputs, so we can always use this.
print("PythonSnippet:\t"+stringerino)
print("Execution:\t",end='')
out = exec(stringerino)
if out != None:
print(str(out))
#mention Execution time.
baseExecutionTime = timeit.timeit(stringerino, number=numberOfIterations)
print("Execution time over "+str(numberOfIterations)+" runs:\t"
+str(baseExecutionTime))
nullExecutionTime = timeit.timeit(stringerino,"isNullRun=True",number=numberOfIterations)
print("Null Execution time over "+str(numberOfIterations)+" runs:\t"
+str(nullExecutionTime))
deltaExecutionTime = (baseExecutionTime - nullExecutionTime) / numberOfIterations
print("average delta execution time:"+str(deltaExecutionTime))
else:
print("timinator can't time itself. That's a conundrum!")
|
mit
| 1,828,312,055,412,470,800
| 37.214286
| 93
| 0.696384
| false
| 3.687356
| false
| true
| false
|
rjw57/openni-skeleton-export
|
examples/labelbones.py
|
1
|
4166
|
#!/usr/bin/env python
#
# An example script for extracting labelled images by associating points with
# their closest bone.
"""
Usage:
labelbones.py (-h | --help)
labelbones.py [--verbose] <logfile> <frame-prefix>
Options:
-h, --help Show a brief usage summary.
-v, --verbose Increase verbosity of output.
"""
import logging
import docopt
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import tables
LOG = logging.getLogger()
def main():
"""Main entry point."""
opts = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if opts['--verbose'] else logging.WARN
)
LOG.info('Opening log file {0}'.format(opts['<logfile>']))
log_root = tables.open_file(opts['<logfile>']).root
for frame in log_root.frames:
frame_idx = frame._v_attrs.idx
if frame_idx % 30 == 0:
LOG.info('Processing frame {0}...'.format(frame_idx))
user = None
for tracked_user in frame.users:
try:
if tracked_user.joints.shape[0] > 0:
user = tracked_user
except AttributeError:
# it's ok
pass
# If we have a user, detect labels
if user is None:
label_im = np.asanyarray(frame.label)
else:
label_im = bone_labels(frame, user)
label_im = label_im / float(max(1.0, label_im.max()))
label_color_im = (plt.cm.jet(label_im)[...,:3] * 255).astype(np.uint8)
Image.fromarray(label_color_im).save(
'{0}-{1:05d}.png'.format(opts['<frame-prefix>'], frame_idx))
def distances_to_line_segment(pts, line):
"""pts is a Nx3 array of 3d points.
line = (p1, p2) where p1 and p2 are 3-vectors.
"""
p1, p2 = line
p1, p2 = np.asarray(p1), np.asarray(p2)
# Let bone line be a + t * n
# Compute n
n = p2 - p1
line_len = np.sqrt(np.sum(n**2))
n /= line_len
# Compute points using p1 and p2 as origin
# Note, x = p - a
x, y = np.copy(pts), np.copy(pts)
for i in range(3):
x[:,i] -= p1[i]
y[:,i] -= p2[i]
# Squared distances to p1 and p2
d1 = np.sum(x**2, axis=1)
d2 = np.sum(y**2, axis=1)
# Compute t = (p - a) . n
xdotn = np.copy(x)
for i in range(3):
xdotn[:,i] *= n[i]
xdotn = np.sum(xdotn, axis=1)
# Compute squared distance to line
dl = np.zeros_like(xdotn)
for i in range(3):
dl += (x[:,i] - xdotn * n[i]) ** 2
# Compute length along line
norm_len = xdotn / line_len
# Which distance should we use?
d = np.where(norm_len < 0, d1, np.where(norm_len > 1, d2, dl))
return np.sqrt(d)
def bone_labels(frame, user):
# Get points for this user
pts = frame.points[:]
pt_labels = frame.point_labels[:]
user_pts = pts[pt_labels == user._v_attrs.idx, :]
joint_map = {}
for joint in user.joints:
joint_map[joint['id']] = (joint['x'], joint['y'], joint['z'])
# Get bones
bones = dict(
neck = (1,2),
left_forearm = (9,7), left_arm = (7,6),
right_forearm = (13,15), right_arm = (12,13),
left_chest = (6,17), right_chest = (12,21),
left_thigh = (17,18), left_calf = (18,20),
right_thigh = (21,22), right_calf = (22,24),
left_collar = (2,6), right_collar = (2,12),
# chest = (2,3)
)
bone_lines = {}
for bone_name, bone_joints in bones.items():
j1, j2 = bone_joints
if j1 not in joint_map or j2 not in joint_map:
continue
j1_loc, j2_loc = tuple(joint_map[j] for j in (j1,j2))
bone_lines[bone_name] = np.array((j1_loc, j2_loc))
bone_names = sorted(bone_lines.keys())
bone_dists = np.zeros((user_pts.shape[0], len(bone_names)))
for i, n in enumerate(bone_names):
bone_dists[:,i] = distances_to_line_segment(user_pts, bone_lines[n])
closest_bone_indices = np.argmin(bone_dists, axis=1)
label_image = np.zeros_like(frame.depth)
label_image[frame.label == user._v_attrs.idx] = closest_bone_indices + 1
return label_image
if __name__ == '__main__':
main()
|
apache-2.0
| -3,595,863,162,251,792,400
| 27.930556
| 78
| 0.56097
| false
| 3.034232
| false
| false
| false
|
malept/gmusicprocurator
|
docs/conf.py
|
1
|
12282
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import json
import os
import re
import sys
THIS_DIR = os.path.abspath('.')
BASE_DIR = os.path.abspath('..')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, BASE_DIR)
bower_metadata = json.load(open(os.path.join(BASE_DIR, 'bower.json')))
npm_metadata = json.load(open(os.path.join(BASE_DIR, 'package.json')))
def setup(app):
app.add_config_value('readthedocs', False, True)
readthedocs = os.environ.get('READTHEDOCS') == 'True'
if readthedocs:
os.environ['GMUSICPROCURATOR_SETTINGS'] = 'default_settings.py'
# -- General configuration ----------------------------------------------------
AUTHORS = u', '.join(bower_metadata['authors'])
TITLE = u'GMusicProcurator'
LONG_TITLE = u'{0} Documentation'.format(TITLE)
SUMMARY = bower_metadata['description']
SHORT_COPYRIGHT = u'2014, {0}. Some Rights Reserved.'.format(AUTHORS)
COPYRIGHT = u'''{0}
This work is licensed under a
Creative Commons Attribution-ShareAlike 4.0
International License'''.format(SHORT_COPYRIGHT)
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.autohttp.flask',
]
if not readthedocs:
extensions += [
'sphinxcontrib.coffeedomain',
]
try:
import rst2pdf
except ImportError:
rst2pdf = None
if rst2pdf:
extensions.append('rst2pdf.pdfbuilder')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = TITLE
copyright = COPYRIGHT
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = re.match(r'\d+\.\d+', npm_metadata['version']).group(0)
# The full version, including alpha/beta/rc tags.
release = npm_metadata['version']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
if readthedocs:
exclude_patterns += [
'coffeescript.rst',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# intersphinx extension
intersphinx_mapping = {
'py': ('http://docs.python.org/2.7/', None)
}
mdn_inv = os.path.join(THIS_DIR, 'mdn-js-objects.inv')
bb_inv = os.path.join(THIS_DIR, 'backbone.inv')
if not readthedocs:
if os.path.exists(mdn_inv):
mdn = 'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/'
intersphinx_mapping['js'] = (mdn, mdn_inv)
if os.path.exists(bb_inv):
intersphinx_mapping['backbone'] = ('http://backbonejs.org/', bb_inv)
# coffeedomain extension
coffee_src_dir = os.path.join(BASE_DIR, 'gmusicprocurator', 'static', 'cs')
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html',
'copyright_sidebar.html',
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GMusicProcuratordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GMusicProcurator.tex', LONG_TITLE, AUTHORS, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gmusicprocurator', LONG_TITLE, [AUTHORS], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GMusicProcurator', LONG_TITLE, AUTHORS,
'GMusicProcurator', SUMMARY, 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = TITLE
epub_author = AUTHORS
epub_publisher = AUTHORS
epub_copyright = COPYRIGHT
# The basename for the epub file. It defaults to the project name.
# epub_basename = TITLE
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to
# save visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
('index', u'gmusicprocurator', TITLE, AUTHORS),
]
|
gpl-3.0
| 5,358,588,643,516,297,000
| 29.552239
| 82
| 0.693535
| false
| 3.608108
| false
| false
| false
|
micahflee/onionshare
|
desktop/src/onionshare/tab/mode/receive_mode/__init__.py
|
1
|
15632
|
# -*- coding: utf-8 -*-
"""
OnionShare | https://onionshare.org/
Copyright (C) 2014-2021 Micah Lee, et al. <micah@micahflee.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from PySide2 import QtCore, QtWidgets, QtGui
from onionshare_cli.web import Web
from ..history import History, ToggleHistory, ReceiveHistoryItem
from .. import Mode
from .... import strings
from ....widgets import MinimumSizeWidget, Alert
from ....gui_common import GuiCommon
class ReceiveMode(Mode):
"""
Parts of the main window UI for receiving files.
"""
def init(self):
"""
Custom initialization for ReceiveMode.
"""
# Create the Web object
self.web = Web(self.common, True, self.settings, "receive")
# Receive image
self.image_label = QtWidgets.QLabel()
self.image_label.setPixmap(
QtGui.QPixmap.fromImage(
QtGui.QImage(
GuiCommon.get_resource_path(
"images/{}_mode_receive.png".format(self.common.gui.color_mode)
)
)
)
)
self.image_label.setFixedSize(250, 250)
image_layout = QtWidgets.QVBoxLayout()
image_layout.addWidget(self.image_label)
self.image = QtWidgets.QWidget()
self.image.setLayout(image_layout)
# Settings
# Data dir
data_dir_label = QtWidgets.QLabel(
strings._("mode_settings_receive_data_dir_label")
)
self.data_dir_lineedit = QtWidgets.QLineEdit()
self.data_dir_lineedit.setReadOnly(True)
self.data_dir_lineedit.setText(self.settings.get("receive", "data_dir"))
data_dir_button = QtWidgets.QPushButton(
strings._("mode_settings_receive_data_dir_browse_button")
)
data_dir_button.clicked.connect(self.data_dir_button_clicked)
data_dir_layout = QtWidgets.QHBoxLayout()
data_dir_layout.addWidget(data_dir_label)
data_dir_layout.addWidget(self.data_dir_lineedit)
data_dir_layout.addWidget(data_dir_button)
self.mode_settings_widget.mode_specific_layout.addLayout(data_dir_layout)
# Disable text or files
self.disable_text_checkbox = self.settings.get("receive", "disable_files")
self.disable_text_checkbox = QtWidgets.QCheckBox()
self.disable_text_checkbox.clicked.connect(self.disable_text_checkbox_clicked)
self.disable_text_checkbox.setText(
strings._("mode_settings_receive_disable_text_checkbox")
)
self.disable_files_checkbox = self.settings.get("receive", "disable_files")
self.disable_files_checkbox = QtWidgets.QCheckBox()
self.disable_files_checkbox.clicked.connect(self.disable_files_checkbox_clicked)
self.disable_files_checkbox.setText(
strings._("mode_settings_receive_disable_files_checkbox")
)
disable_layout = QtWidgets.QHBoxLayout()
disable_layout.addWidget(self.disable_text_checkbox)
disable_layout.addWidget(self.disable_files_checkbox)
disable_layout.addStretch()
self.mode_settings_widget.mode_specific_layout.addLayout(disable_layout)
# Webhook URL
webhook_url = self.settings.get("receive", "webhook_url")
self.webhook_url_checkbox = QtWidgets.QCheckBox()
self.webhook_url_checkbox.clicked.connect(self.webhook_url_checkbox_clicked)
self.webhook_url_checkbox.setText(
strings._("mode_settings_receive_webhook_url_checkbox")
)
self.webhook_url_lineedit = QtWidgets.QLineEdit()
self.webhook_url_lineedit.editingFinished.connect(
self.webhook_url_editing_finished
)
self.webhook_url_lineedit.setPlaceholderText(
"https://example.com/post-when-file-uploaded"
)
webhook_url_layout = QtWidgets.QHBoxLayout()
webhook_url_layout.addWidget(self.webhook_url_checkbox)
webhook_url_layout.addWidget(self.webhook_url_lineedit)
if webhook_url is not None and webhook_url != "":
self.webhook_url_checkbox.setCheckState(QtCore.Qt.Checked)
self.webhook_url_lineedit.setText(
self.settings.get("receive", "webhook_url")
)
self.show_webhook_url()
else:
self.webhook_url_checkbox.setCheckState(QtCore.Qt.Unchecked)
self.hide_webhook_url()
self.mode_settings_widget.mode_specific_layout.addLayout(webhook_url_layout)
# Set title placeholder
self.mode_settings_widget.title_lineedit.setPlaceholderText(
strings._("gui_tab_name_receive")
)
# Server status
self.server_status.set_mode("receive")
self.server_status.server_started_finished.connect(self.update_primary_action)
self.server_status.server_stopped.connect(self.update_primary_action)
self.server_status.server_canceled.connect(self.update_primary_action)
# Tell server_status about web, then update
self.server_status.web = self.web
self.server_status.update()
# Upload history
self.history = History(
self.common,
QtGui.QPixmap.fromImage(
QtGui.QImage(
GuiCommon.get_resource_path("images/receive_icon_transparent.png")
)
),
strings._("gui_receive_mode_no_files"),
strings._("gui_all_modes_history"),
)
self.history.hide()
# Toggle history
self.toggle_history = ToggleHistory(
self.common,
self,
self.history,
QtGui.QIcon(GuiCommon.get_resource_path("images/receive_icon_toggle.png")),
QtGui.QIcon(
GuiCommon.get_resource_path("images/receive_icon_toggle_selected.png")
),
)
# Header
header_label = QtWidgets.QLabel(strings._("gui_new_tab_receive_button"))
header_label.setStyleSheet(self.common.gui.css["mode_header_label"])
# Receive mode warning
receive_warning = QtWidgets.QLabel(strings._("gui_receive_mode_warning"))
receive_warning.setMinimumHeight(80)
receive_warning.setWordWrap(True)
# Top bar
top_bar_layout = QtWidgets.QHBoxLayout()
top_bar_layout.addStretch()
top_bar_layout.addWidget(self.toggle_history)
# Main layout
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.addWidget(header_label)
self.main_layout.addWidget(receive_warning)
self.main_layout.addWidget(self.primary_action, stretch=1)
self.main_layout.addWidget(MinimumSizeWidget(525, 0))
# Row layout
content_row = QtWidgets.QHBoxLayout()
content_row.addLayout(self.main_layout)
content_row.addWidget(self.image)
row_layout = QtWidgets.QVBoxLayout()
row_layout.addLayout(top_bar_layout)
row_layout.addLayout(content_row, stretch=1)
row_layout.addWidget(self.server_status)
# Column layout
self.column_layout = QtWidgets.QHBoxLayout()
self.column_layout.addLayout(row_layout)
self.column_layout.addWidget(self.history, stretch=1)
# Wrapper layout
self.wrapper_layout = QtWidgets.QVBoxLayout()
self.wrapper_layout.addLayout(self.column_layout)
self.setLayout(self.wrapper_layout)
def get_type(self):
"""
Returns the type of mode as a string (e.g. "share", "receive", etc.)
"""
return "receive"
def data_dir_button_clicked(self):
"""
Browse for a new OnionShare data directory, and save to tab settings
"""
data_dir = self.data_dir_lineedit.text()
selected_dir = QtWidgets.QFileDialog.getExistingDirectory(
self, strings._("mode_settings_receive_data_dir_label"), data_dir
)
if selected_dir:
# If we're running inside a flatpak package, the data dir must be inside ~/OnionShare
if self.common.gui.is_flatpak:
if not selected_dir.startswith(os.path.expanduser("~/OnionShare")):
Alert(self.common, strings._("gui_receive_flatpak_data_dir"))
return
self.common.log(
"ReceiveMode",
"data_dir_button_clicked",
f"selected dir: {selected_dir}",
)
self.data_dir_lineedit.setText(selected_dir)
self.settings.set("receive", "data_dir", selected_dir)
def disable_text_checkbox_clicked(self):
self.settings.set(
"receive", "disable_text", self.disable_text_checkbox.isChecked()
)
def disable_files_checkbox_clicked(self):
self.settings.set(
"receive", "disable_files", self.disable_files_checkbox.isChecked()
)
def webhook_url_checkbox_clicked(self):
if self.webhook_url_checkbox.isChecked():
if self.settings.get("receive", "webhook_url"):
self.webhook_url_lineedit.setText(
self.settings.get("receive", "webhook_url")
)
self.show_webhook_url()
else:
self.settings.set("receive", "webhook_url", None)
self.hide_webhook_url()
def webhook_url_editing_finished(self):
self.settings.set("receive", "webhook_url", self.webhook_url_lineedit.text())
def hide_webhook_url(self):
self.webhook_url_lineedit.hide()
def show_webhook_url(self):
self.webhook_url_lineedit.show()
def get_stop_server_autostop_timer_text(self):
"""
Return the string to put on the stop server button, if there's an auto-stop timer
"""
return strings._("gui_receive_stop_server_autostop_timer")
def autostop_timer_finished_should_stop_server(self):
"""
The auto-stop timer expired, should we stop the server? Returns a bool
"""
# If there were no attempts to upload files, or all uploads are done, we can stop
if (
self.web.receive_mode.cur_history_id == 0
or not self.web.receive_mode.uploads_in_progress
):
self.server_status.stop_server()
self.server_status_label.setText(strings._("close_on_autostop_timer"))
return True
# An upload is probably still running - hold off on stopping the share, but block new shares.
else:
self.server_status_label.setText(
strings._("gui_receive_mode_autostop_timer_waiting")
)
self.web.receive_mode.can_upload = False
return False
def start_server_custom(self):
"""
Starting the server.
"""
# Reset web counters
self.web.receive_mode.cur_history_id = 0
self.web.reset_invalid_passwords()
# Hide and reset the uploads if we have previously shared
self.reset_info_counters()
# Set proxies for webhook URL
if self.common.gui.local_only:
self.web.proxies = None
else:
(socks_address, socks_port) = self.common.gui.onion.get_tor_socks_port()
self.web.proxies = {
"http": f"socks5h://{socks_address}:{socks_port}",
"https": f"socks5h://{socks_address}:{socks_port}",
}
def start_server_step2_custom(self):
"""
Step 2 in starting the server.
"""
# Continue
self.starting_server_step3.emit()
self.start_server_finished.emit()
def handle_tor_broke_custom(self):
"""
Connection to Tor broke.
"""
self.primary_action.hide()
def handle_request_load(self, event):
"""
Handle REQUEST_LOAD event.
"""
self.system_tray.showMessage(
strings._("systray_page_loaded_title"),
strings._("systray_page_loaded_message"),
)
def handle_request_started(self, event):
"""
Handle REQUEST_STARTED event.
"""
item = ReceiveHistoryItem(
self.common,
event["data"]["id"],
event["data"]["content_length"],
)
self.history.add(event["data"]["id"], item)
self.toggle_history.update_indicator(True)
self.history.in_progress_count += 1
self.history.update_in_progress()
self.system_tray.showMessage(
strings._("systray_receive_started_title"),
strings._("systray_receive_started_message"),
)
def handle_request_progress(self, event):
"""
Handle REQUEST_PROGRESS event.
"""
self.history.update(
event["data"]["id"],
{"action": "progress", "progress": event["data"]["progress"]},
)
def handle_request_upload_includes_message(self, event):
"""
Handle REQUEST_UPLOAD_INCLUDES_MESSAGE event.
"""
self.history.includes_message(event["data"]["id"], event["data"]["filename"])
def handle_request_upload_file_renamed(self, event):
"""
Handle REQUEST_UPLOAD_FILE_RENAMED event.
"""
self.history.update(
event["data"]["id"],
{
"action": "rename",
"old_filename": event["data"]["old_filename"],
"new_filename": event["data"]["new_filename"],
},
)
def handle_request_upload_set_dir(self, event):
"""
Handle REQUEST_UPLOAD_SET_DIR event.
"""
self.history.update(
event["data"]["id"],
{
"action": "set_dir",
"filename": event["data"]["filename"],
"dir": event["data"]["dir"],
},
)
def handle_request_upload_finished(self, event):
"""
Handle REQUEST_UPLOAD_FINISHED event.
"""
self.history.update(event["data"]["id"], {"action": "finished"})
self.history.completed_count += 1
self.history.in_progress_count -= 1
self.history.update_completed()
self.history.update_in_progress()
def handle_request_upload_canceled(self, event):
"""
Handle REQUEST_UPLOAD_CANCELED event.
"""
self.history.update(event["data"]["id"], {"action": "canceled"})
self.history.in_progress_count -= 1
self.history.update_in_progress()
def on_reload_settings(self):
"""
We should be ok to re-enable the 'Start Receive Mode' button now.
"""
self.primary_action.show()
def reset_info_counters(self):
"""
Set the info counters back to zero.
"""
self.history.reset()
self.toggle_history.indicator_count = 0
self.toggle_history.update_indicator()
def update_primary_action(self):
self.common.log("ReceiveMode", "update_primary_action")
|
gpl-3.0
| 5,165,836,029,262,839,000
| 35.269142
| 101
| 0.604273
| false
| 4.061315
| false
| false
| false
|
YingYang/STFT_R_git_repo
|
MNE_stft/mne_stft_regression_individual_G.py
|
1
|
12643
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 1 12:55:06 2014
@author: ying
"""
import mne
import numpy as np
#import matplotlib.pyplot as plt
from mne.minimum_norm.inverse import (apply_inverse, _check_method, _check_ori,
prepare_inverse_operator, _pick_channels_inverse_operator, _check_ch_names,
_assemble_kernel, combine_xyz)
from mne.io.constants import FIFF
from mne.time_frequency import stft, istft
import numpy.linalg as la
# ============================================================================
def _apply_inverse_evoked_list(evoked_list, inverse_operator, lambda2, method="MNE",
labels=None, nave=1, pick_ori=None,
verbose=None, pick_normal=None):
""" Utility function for applying the inverse solution to a list of evoked object
Assume that the info for each evoked object in the list is the same
Input:
evoked_list,
inverse_operator,
lambda2,
method,
labels, list of label objects
nave = 1,
pick_ori = None,
verbos = none,
pick_normal = None
Output: stc_Data, [n_sources_labels, n_times, n_trials]
"""
info = evoked_list[0].info
method = _check_method(method)
pick_ori = _check_ori(pick_ori)
_check_ch_names(inverse_operator, info)
inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
sel = _pick_channels_inverse_operator(info['ch_names'], inv)
labels_union = None
if labels is not None:
labels_union = labels[0]
if len(labels) > 1:
for i in range(1,len(labels)):
labels_union += labels[i]
K, noise_norm, vertno = _assemble_kernel(inv, labels_union, method, pick_ori)
is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
and pick_ori is None)
if not is_free_ori and noise_norm is not None:
# premultiply kernel with noise normalization
K *= noise_norm
n_channels = len(sel)
n_times = len(evoked_list[0].times)
n_trials = len(evoked_list)
n_sources = K.shape[0]
stc_Data = np.zeros([n_sources,n_times, n_trials])
for i in range(n_trials):
if is_free_ori:
# Compute solution and combine current components (non-linear)
sol = np.dot(K, evoked_list[i].data) # apply imaging kernel
if is_free_ori:
sol = combine_xyz(sol)
if noise_norm is not None:
sol *= noise_norm
else:
# Linear inverse: do computation here or delayed
sol = np.dot(K, evoked_list[i].data)
stc_Data[:,:,i] = sol
return stc_Data
# ===========================================================================
def mne_stft_regression_individual_G(evoked_list, inverse_operator_list, G_ind, X,
labels = None, pick_ori=None, pick_normal=None,
snr=1, wsize = 16, tstep = 4, Flag_reg_stats = False,
method = "MNE"):
''' Get the MNE solution for a given snr(lambda value)
Input:
evoked_list, a list of evoked instances
inverse_operator, the inverse operator for MNE
X, [n_trials, p] array
labels, ROI labels list, if None, use the whole brain
snr, controls lambda
wsize, window size of the stft transform
tstep, time step of the stft transform
method, "MNE", "dSPM", "sLORETA",
Note that dSPM and sLORETA can not be used for prediction,
and the coefficients are normalized too.
Output:
result_dict = dict(coef = coef, F = F, sel = sel,roi_data = roi_data)
['coef']: Regression coefficients, complex arrays [n_dipoles,n_coefs,n_steps,p]
['F'],F-statistics, complex arrays
['sel'], selction of the source points, columns of G
['roi_data'] the source data in the ROI
'''
n_trials = len(evoked_list)
sel = []
# The following line is wrong
n_dipoles = inverse_operator_list[0]['nsource']
# if label is specified, only do the regression on the labels
# otherwise, select the data for the whole brain.
if labels is not None:
for i in range(len(labels)):
_, sel_tmp = mne.source_space.label_src_vertno_sel(labels[i],
inverse_operator_list[0]['src'])
sel = np.hstack([sel, sel_tmp])
sel = sel.astype(np.int)
else:
sel = np.arange(0,n_dipoles,1)
sel.astype(np.int)
# tested, the result is the same as running apply_inverse()
n_run = len(np.unique(G_ind))
n_dipoles = len(sel)
n_times = evoked_list[0].data.shape[1]
roi_data = np.zeros([n_dipoles, n_times, n_trials])
n_trials = len(evoked_list)
for run_id in range(n_run):
tmp_evoked_list = [evoked_list[k] for k in range(n_trials) if G_ind[k] == run_id]
tmp = _apply_inverse_evoked_list(tmp_evoked_list, inverse_operator_list[run_id],
lambda2= 1.0/snr**2, method=method,
labels=labels, nave=1, pick_ori=pick_ori,
verbose=None, pick_normal=None)
roi_data[:,:, G_ind == run_id] = tmp
# stft transform, F means the coefficients
F_roi_data = list()
for i in range(n_trials):
F_roi_data.append(stft(roi_data[:,:,i], wsize= wsize, tstep = tstep))
# put the stft transform into a matrix
dim0,dim1,dim2 = F_roi_data[0].shape
F_roi_data_3d = np.zeros([dim0,dim1,dim2,n_trials],dtype = np.complex)
for i in range(n_trials):
F_roi_data_3d[:,:,:,i] = F_roi_data[i]
del(F_roi_data)
# regression, return coefficients and F-values
p = X.shape[1]
coef = np.zeros([dim0,dim1,dim2,p], dtype = np.complex)
F = np.zeros([dim0,dim1,dim2], dtype = np.complex) if Flag_reg_stats else None
linreg_op = np.dot(la.inv(X.T.dot(X)),X.T)
for i in range(dim0):
for j in range(dim1):
for k in range(dim2):
tmpY = np.real(F_roi_data_3d[i,j,k,:])
tmp_coef = linreg_op.dot(tmpY)
# debug
#tmp_coef2 = np.linalg.lstsq(X,tmpY)[0]
#print np.linalg.norm(tmp_coef-tmp_coef2)
coef[i,j,k,:] += tmp_coef
if Flag_reg_stats:
tmpY_hat = np.dot(X,tmp_coef)
tmp_res = tmpY_hat-tmpY
SSE = np.dot(tmp_res,tmp_res)
SST = np.sum((tmpY-np.mean(tmpY))**2)
if SSE== 0:
F[i,j,k] += 0
else:
F[i,j,k] += (SST-SSE)/(p-1)/(SSE/(n_trials-p))
# imaginary
tmpY = np.imag(F_roi_data_3d[i,j,k,:])
tmp_coef = linreg_op.dot(tmpY)
coef[i,j,k,:] += tmp_coef*1j
if Flag_reg_stats:
tmpY_hat = np.dot(X,tmp_coef)
tmp_res = tmpY_hat-tmpY
SSE = np.dot(tmp_res,tmp_res)
SST = np.sum((tmpY-np.mean(tmpY))**2)
if SSE== 0:
F[i,j,k] += 0
else:
F[i,j,k] += (SST-SSE)/(p-1)/(SSE/(n_trials-p))*1j
result_dict = dict(coef = coef, F = F, sel = sel,roi_data_3D = roi_data)
return result_dict
#===============================================================
def get_MSE_mne_stft_regression_individual_G(evoked_list, fwd_list, G_ind, X, coef, labels,
wsize = 16, tstep = 4):
'''
Use the mne regression coefficients to get predicted sensor data,
then abtain the sum of squared error
Input:
evoked_list, a list of evoked objects
fwd, the forward solution
X, the design matrix,
coef, the regression coefficients, [n_dipoles,n_coefs,n_steps,p]
wsize, STFT window size
tstep, STFT time step
Output:
MSE, the sum of squared error across trials
'''
sel = []
n_dipoles = fwd_list[0]['nsource']
if labels is not None:
for i in range(len(labels)):
_, sel_tmp = mne.source_space.label_src_vertno_sel(labels[i],fwd_list[0]['src'])
sel = np.hstack([sel, sel_tmp])
sel = sel.astype(np.int)
else:
sel = np.arange(0,n_dipoles,1)
sel.astype(np.int)
# prepair the forward solution
evoked_ch_names = evoked_list[0].info['ch_names']
fwd_ch_names = fwd_list[0]['info']['ch_names']
channel_sel = [i for i in range(len(fwd_ch_names)) \
if fwd_ch_names[i] in evoked_ch_names]
ntimes = len(evoked_list[0].times)
G_list = list()
n_run = len(np.unique(G_ind))
for run_id in range(n_run):
G = fwd_list[run_id]['sol']['data'][channel_sel,:]
G = G[:,sel]
G_list.append(G)
n_trials,p = X.shape
if n_trials != len(evoked_list):
raise ValueError("the numbers of trials do not match")
SSE = 0.0
for r in range(n_trials):
# STFT coefficients of current trial
#predicted_stft_coef = np.zeros(coef.shape[0:3], dtype = np.complex)
#for j in range(p):
# predicted_stft_coef += coef[:,:,:,j]*X[r,j]
predicted_stft_coef = np.sum(coef*X[r,:],axis = 3)
# istft
G = G_list[G_ind[r]]
predicted_sensor = G.dot(np.real(istft(predicted_stft_coef, tstep = tstep, Tx = ntimes)))
SSE += np.sum((evoked_list[r].data - predicted_sensor)**2)
MSE = SSE/(n_trials)
return MSE
# ==============================================================
def select_lambda_tuning_mne_stft_regression_cv_individual_G(evoked_list, inverse_operator_list,
fwd_list, G_ind, X, cv_partition_ind,
snr_tuning_seq,
labels = None,
wsize= 16, tstep = 4):
'''
Use cross-validation to select the best lambda (tuning snr values)
All source points across the whole brain must be used,
This may require a large membory
Input:
evoked_list, n_trials of evoked objects
inverse_operator, the inverse_operator,
fwd, the forward solution
X, [n_trials,p] the design matrix
cv_partition_ind, [n_trials,] parition index for cross validcation
snr_tuning_seq, a sequence of "snr" parameter
wsize, STFT window size
tstep, STFT time step
Output:
best_snr_tuning, the best snr paramter
cv_MSE, the cross validated SSE for each snr parameters
'''
n_fold = len(np.unique(cv_partition_ind))
# number of tuning paramters
n_par_tuning = len(snr_tuning_seq)
cv_MSE = np.ones([len(snr_tuning_seq),n_fold], dtype = np.float)*np.Inf
for j in range(n_fold):
# partition
test_trials = np.nonzero(cv_partition_ind == j)[0]
train_trials = np.nonzero(cv_partition_ind != j)[0]
evoked_list_train = [evoked_list[r] for r in range(len(evoked_list)) \
if r in train_trials]
Xtrain = X[train_trials,:]
evoked_list_test = [evoked_list[r] for r in range(len(evoked_list)) \
if r in test_trials]
Xtest = X[test_trials,:]
G_ind_train = G_ind[train_trials]
G_ind_test = G_ind[test_trials]
for i in range(n_par_tuning):
tmp_snr = snr_tuning_seq[i]
tmp_result = mne_stft_regression_individual_G(evoked_list_train, inverse_operator_list, G_ind_train,
Xtrain, labels = labels,
snr=tmp_snr, wsize = wsize, tstep = tstep)
coef = tmp_result['coef']
# Now do the prediction
tmp_MSE = get_MSE_mne_stft_regression_individual_G(evoked_list_test, fwd_list, G_ind_test, Xtest,
coef, labels = labels,
wsize = wsize, tstep = tstep)
cv_MSE[i,j] = tmp_MSE
cv_MSE = cv_MSE.mean(axis = 1)
best_ind = np.argmin(cv_MSE)
snr_tuning_star = snr_tuning_seq[best_ind]
return snr_tuning_star, cv_MSE
|
gpl-3.0
| 9,214,749,064,001,478,000
| 42.450172
| 113
| 0.529779
| false
| 3.4506
| true
| false
| false
|
google-research/google-research
|
stacked_capsule_autoencoders/capsules/math_ops.py
|
1
|
4235
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Math ops.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sonnet as snt
import tensorflow.compat.v1 as tf
def relu1(x):
return tf.nn.relu6(x * 6.) / 6.
def safe_log(tensor, eps=1e-16):
is_zero = tf.less(tensor, eps)
tensor = tf.where(is_zero, tf.ones_like(tensor), tensor)
tensor = tf.where(is_zero, tf.zeros_like(tensor) - 1e8, tf.log(tensor))
return tensor
def safe_ce(labels, probs, axis=-1):
return tf.reduce_mean(-tf.reduce_sum(labels * safe_log(probs), axis=axis))
def flat_reduce(tensor, reduce_type='sum', final_reduce='mean'):
"""Flattens the tensor and reduces it."""
def _reduce(tensor, how, *args):
return getattr(tf, 'reduce_{}'.format(how))(tensor, *args) # pylint:disable=not-callable
tensor = snt.BatchFlatten()(tensor)
tensor = _reduce(tensor, reduce_type, -1)
if final_reduce is not None:
tensor = _reduce(tensor, final_reduce)
return tensor
def to_homogenous(tensor):
one = tf.ones_like(tensor[Ellipsis, :1])
return tf.concat([tensor, one], -1)
def from_homogenous(tensor):
tensor = tensor[Ellipsis, :-1] / (tensor[Ellipsis, -1:] + 1e-8)
return tensor
def apply_transform(transform, tensor=None, affine=True):
"""Applies a linear transform to a tensor.
Returns the translation components of the transform if tensor=None.
Args:
transform: [..., d+1, d+1] tensor.
tensor: [..., d] tensor or None.
affine: boolean; assumes affine transformation if True and does a smaller
matmul + offset instead of matmul.
Returns:
[..., d] tensor.
"""
if tensor is None:
# extract translation
tensor = transform[Ellipsis, :-1, -1]
elif affine:
tensor = tf.matmul(tensor, transform[Ellipsis, :-1, :-1], transpose_b=True)
tensor = (tensor + transform[Ellipsis, :-1, -1])
else:
tensor = to_homogenous(tensor)
tensor = tf.matmul(tensor, transform, transpose_b=True)
tensor = from_homogenous(tensor)
return tensor
def geometric_transform(pose_tensor, similarity=False, nonlinear=True,
as_matrix=False):
"""Convers paramer tensor into an affine or similarity transform.
Args:
pose_tensor: [..., 6] tensor.
similarity: bool.
nonlinear: bool; applies nonlinearities to pose params if True.
as_matrix: bool; convers the transform to a matrix if True.
Returns:
[..., 3, 3] tensor if `as_matrix` else [..., 6] tensor.
"""
scale_x, scale_y, theta, shear, trans_x, trans_y = tf.split(
pose_tensor, 6, -1)
if nonlinear:
scale_x, scale_y = (tf.nn.sigmoid(i) + 1e-2
for i in (scale_x, scale_y))
trans_x, trans_y, shear = (
tf.nn.tanh(i * 5.) for i in (trans_x, trans_y, shear))
theta *= 2. * math.pi
else:
scale_x, scale_y = (abs(i) + 1e-2 for i in (scale_x, scale_y))
c, s = tf.cos(theta), tf.sin(theta)
if similarity:
scale = scale_x
pose = [scale * c, -scale * s, trans_x, scale * s, scale * c, trans_y]
else:
pose = [
scale_x * c + shear * scale_y * s, -scale_x * s + shear * scale_y * c,
trans_x, scale_y * s, scale_y * c, trans_y
]
pose = tf.concat(pose, -1)
# convert to a matrix
if as_matrix:
shape = pose.shape[:-1].as_list()
shape += [2, 3]
pose = tf.reshape(pose, shape)
zeros = tf.zeros_like(pose[Ellipsis, :1, 0])
last = tf.stack([zeros, zeros, zeros + 1], -1)
pose = tf.concat([pose, last], -2)
return pose
def normalize(tensor, axis):
return tensor / (tf.reduce_sum(tensor, axis, keepdims=True) + 1e-8)
|
apache-2.0
| -4,903,853,938,586,949,000
| 26.322581
| 93
| 0.647934
| false
| 3.174663
| false
| false
| false
|
1fish2/the-blue-alliance
|
controllers/backup_controller.py
|
1
|
12856
|
import cloudstorage
import csv
import datetime
import json
import logging
import os
import StringIO
import tba_config
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from helpers.award_manipulator import AwardManipulator
from helpers.event_manipulator import EventManipulator
from helpers.match_manipulator import MatchManipulator
from models.award import Award
from models.event import Event
from models.match import Match
from models.team import Team
from datafeeds.csv_alliance_selections_parser import CSVAllianceSelectionsParser
from datafeeds.csv_awards_parser import CSVAwardsParser
from datafeeds.offseason_matches_parser import OffseasonMatchesParser
class TbaCSVBackupEventsEnqueue(webapp.RequestHandler):
"""
Enqueues CSV backup
"""
def get(self, year=None):
if year is None:
years = range(1992, datetime.datetime.now().year + 1)
for y in years:
taskqueue.add(
url='/tasks/enqueue/csv_backup_events/{}'.format(y),
method='GET')
self.response.out.write("Enqueued backup for years: {}".format(years))
else:
event_keys = Event.query(Event.year == int(year)).fetch(None, keys_only=True)
for event_key in event_keys:
taskqueue.add(
url='/tasks/do/csv_backup_event/{}'.format(event_key.id()),
method='GET')
template_values = {'event_keys': event_keys}
path = os.path.join(os.path.dirname(__file__), '../templates/backup/csv_backup_enqueue.html')
self.response.out.write(template.render(path, template_values))
class TbaCSVBackupEventDo(webapp.RequestHandler):
"""
Backs up event awards, matches, team list, rankings, and alliance selection order
"""
AWARDS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_awards.csv' # % (year, event_key, event_key)
MATCHES_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_matches.csv' # % (year, event_key, event_key)
TEAMS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_teams.csv' # % (year, event_key, event_key)
RANKINGS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_rankings.csv' # % (year, event_key, event_key)
ALLIANCES_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/events/{}/{}/{}_alliances.csv' # % (year, event_key, event_key)
def get(self, event_key):
event = Event.get_by_id(event_key)
event.prepAwardsMatchesTeams()
if event.awards:
with cloudstorage.open(self.AWARDS_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as awards_file:
writer = csv.writer(awards_file, delimiter=',')
for award in event.awards:
for recipient in award.recipient_list:
team = recipient['team_number']
if type(team) == int:
team = 'frc{}'.format(team)
self._writerow_unicode(writer, [award.key.id(), award.name_str, team, recipient['awardee']])
if event.matches:
with cloudstorage.open(self.MATCHES_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as matches_file:
writer = csv.writer(matches_file, delimiter=',')
for match in event.matches:
red_score = match.alliances['red']['score']
blue_score = match.alliances['blue']['score']
self._writerow_unicode(writer, [match.key.id()] + match.alliances['red']['teams'] + match.alliances['blue']['teams'] + [red_score, blue_score])
if event.teams:
with cloudstorage.open(self.TEAMS_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as teams_file:
writer = csv.writer(teams_file, delimiter=',')
self._writerow_unicode(writer, [team.key.id() for team in event.teams])
if event.rankings:
with cloudstorage.open(self.RANKINGS_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as rankings_file:
writer = csv.writer(rankings_file, delimiter=',')
for row in event.rankings:
self._writerow_unicode(writer, row)
if event.alliance_selections:
with cloudstorage.open(self.ALLIANCES_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as alliances_file:
writer = csv.writer(alliances_file, delimiter=',')
for alliance in event.alliance_selections:
self._writerow_unicode(writer, alliance['picks'])
self.response.out.write("Done backing up {}!".format(event_key))
def _writerow_unicode(self, writer, row):
unicode_row = []
for s in row:
try:
unicode_row.append(s.encode("utf-8"))
except:
unicode_row.append(s)
writer.writerow(unicode_row)
class TbaCSVRestoreEventsEnqueue(webapp.RequestHandler):
"""
Enqueues CSV restore
"""
def get(self, year=None):
if tba_config.CONFIG["env"] == "prod": # disable in prod for now
logging.error("Tried to restore events from CSV for year {} in prod! No can do.".format(year))
return
if year is None:
years = range(1992, datetime.datetime.now().year + 1)
for y in years:
taskqueue.add(
url='/tasks/enqueue/csv_restore_events/{}'.format(y),
method='GET')
self.response.out.write("Enqueued restore for years: {}".format(years))
else:
event_keys = Event.query(Event.year == int(year)).fetch(None, keys_only=True)
for event_key in event_keys:
taskqueue.add(
url='/tasks/do/csv_restore_event/{}'.format(event_key.id()),
method='GET')
template_values = {'event_keys': event_keys}
path = os.path.join(os.path.dirname(__file__), '../templates/backup/csv_restore_enqueue.html')
self.response.out.write(template.render(path, template_values))
class TbaCSVRestoreEventDo(webapp.RequestHandler):
"""
Restores event awards, matches, team list, rankings, and alliance selection order
"""
BASE_URL = 'https://raw.githubusercontent.com/the-blue-alliance/tba-data-backup/master/events/{}/{}/' # % (year, event_key)
ALLIANCES_URL = BASE_URL + '{}_alliances.csv' # % (year, event_key, event_key)
AWARDS_URL = BASE_URL + '{}_awards.csv' # % (year, event_key, event_key)
MATCHES_URL = BASE_URL + '{}_matches.csv' # % (year, event_key, event_key)
RANKINGS_URL = BASE_URL + '{}_rankings.csv' # % (year, event_key, event_key)
# TEAMS_URL = BASE_URL + '{}_teams.csv' # % (year, event_key, event_key) # currently unused
def get(self, event_key):
if tba_config.CONFIG["env"] == "prod": # disable in prod for now
logging.error("Tried to restore {} from CSV in prod! No can do.".format(event_key))
return
event = Event.get_by_id(event_key)
# alliances
result = urlfetch.fetch(self.ALLIANCES_URL.format(event.year, event_key, event_key))
if result.status_code != 200:
logging.warning('Unable to retreive url: ' + (self.ALLIANCES_URL.format(event.year, event_key, event_key)))
else:
data = result.content.replace('frc', '')
alliance_selections = CSVAllianceSelectionsParser.parse(data)
if alliance_selections and event.alliance_selections != alliance_selections:
event.alliance_selections_json = json.dumps(alliance_selections)
event._alliance_selections = None
event.dirty = True
EventManipulator.createOrUpdate(event)
# awards
result = urlfetch.fetch(self.AWARDS_URL.format(event.year, event_key, event_key))
if result.status_code != 200:
logging.warning('Unable to retreive url: ' + (self.AWARDS_URL.format(event.year, event_key, event_key)))
else:
# convert into expected input format
data = StringIO.StringIO()
writer = csv.writer(data, delimiter=',')
for row in csv.reader(StringIO.StringIO(result.content), delimiter=','):
writer.writerow([event.year, event.event_short, row[1], row[2].replace('frc', ''), row[3]])
awards = []
for award in CSVAwardsParser.parse(data.getvalue()):
awards.append(Award(
id=Award.render_key_name(event.key_name, award['award_type_enum']),
name_str=award['name_str'],
award_type_enum=award['award_type_enum'],
year=event.year,
event=event.key,
event_type_enum=event.event_type_enum,
team_list=[ndb.Key(Team, 'frc{}'.format(team_number)) for team_number in award['team_number_list']],
recipient_json_list=award['recipient_json_list']
))
AwardManipulator.createOrUpdate(awards)
# matches
result = urlfetch.fetch(self.MATCHES_URL.format(event.year, event_key, event_key))
if result.status_code != 200:
logging.warning('Unable to retreive url: ' + (self.MATCHES_URL.format(event.year, event_key, event_key)))
else:
data = result.content.replace('frc', '').replace('{}_'.format(event_key), '')
match_dicts, _ = OffseasonMatchesParser.parse(data)
matches = [
Match(
id=Match.renderKeyName(
event.key.id(),
match.get("comp_level", None),
match.get("set_number", 0),
match.get("match_number", 0)),
event=event.key,
game=Match.FRC_GAMES_BY_YEAR.get(event.year, "frc_unknown"),
set_number=match.get("set_number", 0),
match_number=match.get("match_number", 0),
comp_level=match.get("comp_level", None),
team_key_names=match.get("team_key_names", None),
alliances_json=match.get("alliances_json", None)
)
for match in match_dicts]
MatchManipulator.createOrUpdate(matches)
# rankings
result = urlfetch.fetch(self.RANKINGS_URL.format(event.year, event_key, event_key))
if result.status_code != 200:
logging.warning('Unable to retreive url: ' + (self.RANKINGS_URL.format(event.year, event_key, event_key)))
else:
# convert into expected input format
rankings = list(csv.reader(StringIO.StringIO(result.content), delimiter=','))
if rankings and event.rankings != rankings:
event.rankings_json = json.dumps(rankings)
event._rankings = None
event.dirty = True
EventManipulator.createOrUpdate(event)
self.response.out.write("Done restoring {}!".format(event_key))
class TbaCSVBackupTeamsEnqueue(webapp.RequestHandler):
"""
Enqueues CSV teams backup
"""
def get(self):
taskqueue.add(
url='/tasks/do/csv_backup_teams',
method='GET')
self.response.out.write("Enqueued CSV teams backup")
class TbaCSVBackupTeamsDo(webapp.RequestHandler):
"""
Backs up teams
"""
TEAMS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/teams/teams.csv'
def get(self):
team_keys = Team.query().order(Team.team_number).fetch(None, keys_only=True)
team_futures = ndb.get_multi_async(team_keys)
if team_futures:
with cloudstorage.open(self.TEAMS_FILENAME_PATTERN, 'w') as teams_file:
writer = csv.writer(teams_file, delimiter=',')
for team_future in team_futures:
team = team_future.get_result()
self._writerow_unicode(writer, [team.key.id(), team.nickname, team.name, team.address, team.website, team.rookie_year])
self.response.out.write("Done backing up teams!")
def _writerow_unicode(self, writer, row):
unicode_row = []
for s in row:
try:
unicode_row.append(s.encode("utf-8"))
except:
unicode_row.append(s)
writer.writerow(unicode_row)
|
mit
| 3,481,643,148,437,116,000
| 44.588652
| 163
| 0.596842
| false
| 3.739383
| false
| false
| false
|
neingeist/azulejo
|
azulejo/WindowTools.py
|
1
|
3160
|
'''
Created on Jul 12, 2012
@author: gillesB
'''
from Window import Window
from Workarea import Workarea
class WindowTools(object):
"""
Some utilities for the windows
"""
@staticmethod
def get_active_window():
"""
Returns the active window
:return: the active window
:rtype: Window
"""
XID = Workarea._root_window.get_full_property(Workarea.atom("_NET_ACTIVE_WINDOW"), 0).value[0]
return Window(XID)
@staticmethod
def print_window_info(keybinding, param):
"""
Prints some information of the currently active window.
:param keybinding:
:type keybinding:
:param param:
:type param:
"""
window = WindowTools.get_active_window()
assert isinstance(window, Window)
window_geometry = window.get_geometry()
print "Screen resolution: "
print "Workarea width and height: ", Workarea.get_workarea_width(), Workarea.get_workarea_height()
print "Window title: ", window.get_name()
print "Window width and height", window_geometry["width"], window_geometry["height"] , "+ frame size: ", window.get_frame_extents()
print "Window position", window_geometry["x"], window_geometry["y"]
@staticmethod
def get_normal_windows_on_current_desktop():
"""
Returns all 'normal' windows which are visible on the current desktop.
:return: all 'normal' windows which are visible on the current desktop
:rtype: list[Window]
"""
def m_get_window_from_XID(XID):
return Window(XID)
def f_normal_window(window):
if WindowTools.window_is_on_current_desktop(window) and WindowTools.window_is_window_type_normal(window):
return True
return False
XIDs = Workarea.get_all_XIDs()
windows = map(m_get_window_from_XID, XIDs)
filtered_windows = filter(f_normal_window, windows)
filtered_windows.reverse()
return filtered_windows
@staticmethod
def window_is_on_current_desktop(window):
"""
Returns True if window is on current desktop, False otherwise
:param window:
:type window: Window
:return: True if window is on current desktop, False otherwise
:rtype: bool
"""
if Workarea.get_current_desktop() == window.get_desktop_id():
return True
return False
@staticmethod
def window_is_window_type_normal(window):
"""
Returns True if window is a normal window, False otherwise
:param window:
:type window: Window
:return: True if window is a normal window, False otherwise
:rtype: bool
"""
window_type = window.get_window_type()
if (window_type == Workarea.atom("_NET_WM_WINDOW_TYPE_NORMAL")
or (window_type is None and window.get_transient_for() is None)):
return True
return False
def __init__(self, params):
'''
Constructor
'''
|
mit
| -2,038,185,904,146,247,700
| 29.990196
| 139
| 0.592089
| false
| 4.31105
| false
| false
| false
|
HomeRad/TorCleaner
|
wc/dns/rdtypes/IN/PX.py
|
1
|
3823
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import wc.dns.exception
import wc.dns.rdata
import wc.dns.name
class PX(wc.dns.rdata.Rdata):
"""PX record.
@ivar preference: the preference value
@type preference: int
@ivar map822: the map822 name
@type map822: wc.dns.name.Name object
@ivar mapx400: the mapx400 name
@type mapx400: wc.dns.name.Name object
@see: RFC 2163"""
__slots__ = ['preference', 'map822', 'mapx400']
def __init__(self, rdclass, rdtype, preference, map822, mapx400):
super(PX, self).__init__(rdclass, rdtype)
self.preference = preference
self.map822 = map822
self.mapx400 = mapx400
def to_text(self, origin=None, relativize=True, **kw):
map822 = self.map822.choose_relativity(origin, relativize)
mapx400 = self.mapx400.choose_relativity(origin, relativize)
return '%d %s %s' % (self.preference, map822, mapx400)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
preference = tok.get_uint16()
map822 = tok.get_name()
map822 = map822.choose_relativity(origin, relativize)
mapx400 = tok.get_name(None)
mapx400 = mapx400.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, preference, map822, mapx400)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
pref = struct.pack("!H", self.preference)
file.write(pref)
self.map822.to_wire(file, None, origin)
self.mapx400.to_wire(file, None, origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(preference, ) = struct.unpack('!H', wire[current : current + 2])
current += 2
rdlen -= 2
(map822, cused) = wc.dns.name.from_wire(wire[: current + rdlen],
current)
if cused > rdlen:
raise wc.dns.exception.FormError
current += cused
rdlen -= cused
if not origin is None:
map822 = map822.relativize(origin)
(mapx400, cused) = wc.dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise wc.dns.exception.FormError
if not origin is None:
mapx400 = mapx400.relativize(origin)
return cls(rdclass, rdtype, preference, map822, mapx400)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.map822 = self.map822.choose_relativity(origin, relativize)
self.mapx400 = self.mapx400.choose_relativity(origin, relativize)
def _cmp(self, other):
sp = struct.pack("!H", self.preference)
op = struct.pack("!H", other.preference)
v = cmp(sp, op)
if v == 0:
v = cmp(self.map822, other.map822)
if v == 0:
v = cmp(self.mapx400, other.mapx400)
return v
|
gpl-2.0
| -6,621,502,497,215,966,000
| 38.010204
| 79
| 0.63458
| false
| 3.647901
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_05_01/models/_models_py3.py
|
1
|
64701
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._compute_management_client_enums import *
class AccessUri(msrest.serialization.Model):
"""A disk access SAS uri.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar access_sas: A SAS uri for accessing a disk.
:vartype access_sas: str
"""
_validation = {
'access_sas': {'readonly': True},
}
_attribute_map = {
'access_sas': {'key': 'accessSAS', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessUri, self).__init__(**kwargs)
self.access_sas = None
class ApiError(msrest.serialization.Model):
"""Api error.
:param details: The Api error details.
:type details: list[~azure.mgmt.compute.v2020_05_01.models.ApiErrorBase]
:param innererror: The Api inner error.
:type innererror: ~azure.mgmt.compute.v2020_05_01.models.InnerError
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'details': {'key': 'details', 'type': '[ApiErrorBase]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["ApiErrorBase"]] = None,
innererror: Optional["InnerError"] = None,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiError, self).__init__(**kwargs)
self.details = details
self.innererror = innererror
self.code = code
self.target = target
self.message = message
class ApiErrorBase(msrest.serialization.Model):
"""Api error base.
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiErrorBase, self).__init__(**kwargs)
self.code = code
self.target = target
self.message = message
class CreationData(msrest.serialization.Model):
"""Data used when creating a disk.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param create_option: Required. This enumerates the possible sources of a disk's creation.
Possible values include: "Empty", "Attach", "FromImage", "Import", "Copy", "Restore", "Upload".
:type create_option: str or ~azure.mgmt.compute.v2020_05_01.models.DiskCreateOption
:param storage_account_id: Required if createOption is Import. The Azure Resource Manager
identifier of the storage account containing the blob to import as a disk.
:type storage_account_id: str
:param image_reference: Disk source information.
:type image_reference: ~azure.mgmt.compute.v2020_05_01.models.ImageDiskReference
:param gallery_image_reference: Required if creating from a Gallery Image. The id of the
ImageDiskReference will be the ARM id of the shared galley image version from which to create a
disk.
:type gallery_image_reference: ~azure.mgmt.compute.v2020_05_01.models.ImageDiskReference
:param source_uri: If createOption is Import, this is the URI of a blob to be imported into a
managed disk.
:type source_uri: str
:param source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot
or disk.
:type source_resource_id: str
:ivar source_unique_id: If this field is set, this is the unique id identifying the source of
this resource.
:vartype source_unique_id: str
:param upload_size_bytes: If createOption is Upload, this is the size of the contents of the
upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for
the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
:type upload_size_bytes: long
"""
_validation = {
'create_option': {'required': True},
'source_unique_id': {'readonly': True},
}
_attribute_map = {
'create_option': {'key': 'createOption', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'image_reference': {'key': 'imageReference', 'type': 'ImageDiskReference'},
'gallery_image_reference': {'key': 'galleryImageReference', 'type': 'ImageDiskReference'},
'source_uri': {'key': 'sourceUri', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'source_unique_id': {'key': 'sourceUniqueId', 'type': 'str'},
'upload_size_bytes': {'key': 'uploadSizeBytes', 'type': 'long'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOption"],
storage_account_id: Optional[str] = None,
image_reference: Optional["ImageDiskReference"] = None,
gallery_image_reference: Optional["ImageDiskReference"] = None,
source_uri: Optional[str] = None,
source_resource_id: Optional[str] = None,
upload_size_bytes: Optional[int] = None,
**kwargs
):
super(CreationData, self).__init__(**kwargs)
self.create_option = create_option
self.storage_account_id = storage_account_id
self.image_reference = image_reference
self.gallery_image_reference = gallery_image_reference
self.source_uri = source_uri
self.source_resource_id = source_resource_id
self.source_unique_id = None
self.upload_size_bytes = upload_size_bytes
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class Disk(Resource):
"""Disk resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar managed_by: A relative URI containing the ID of the VM that has the disk attached.
:vartype managed_by: str
:ivar managed_by_extended: List of relative URIs containing the IDs of the VMs that have the
disk attached. maxShares should be set to a value greater than one for disks to allow attaching
them to multiple VMs.
:vartype managed_by_extended: list[str]
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2020_05_01.models.DiskSku
:param zones: The Logical zone list for Disk.
:type zones: list[str]
:ivar time_created: The time when the disk was created.
:vartype time_created: ~datetime.datetime
:param os_type: The Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2020_05_01.models.OperatingSystemTypes
:param hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS
disks only. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2020_05_01.models.HyperVGeneration
:param creation_data: Disk source information. CreationData information cannot be changed after
the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2020_05_01.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:ivar disk_size_bytes: The size of the disk in bytes. This field is read only.
:vartype disk_size_bytes: long
:ivar unique_id: Unique Guid identifying the resource.
:vartype unique_id: str
:param encryption_settings_collection: Encryption settings collection used for Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsCollection
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
:param disk_iops_read_write: The number of IOPS allowed for this disk; only settable for
UltraSSD disks. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_write: long
:param disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD
disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of
10.
:type disk_m_bps_read_write: long
:param disk_iops_read_only: The total number of IOPS that will be allowed across all VMs
mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_only: long
:param disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs
mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses
the ISO notation, of powers of 10.
:type disk_m_bps_read_only: long
:ivar disk_state: The state of the disk. Possible values include: "Unattached", "Attached",
"Reserved", "ActiveSAS", "ReadyToUpload", "ActiveUpload".
:vartype disk_state: str or ~azure.mgmt.compute.v2020_05_01.models.DiskState
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2020_05_01.models.Encryption
:param max_shares: The maximum number of VMs that can attach to the disk at the same time.
Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
:type max_shares: int
:ivar share_info: Details of the list of all VMs that have the disk attached. maxShares should
be set to a value greater than one for disks to allow attaching them to multiple VMs.
:vartype share_info: list[~azure.mgmt.compute.v2020_05_01.models.ShareInfoElement]
:param network_access_policy: Policy for accessing the disk via network. Possible values
include: "AllowAll", "AllowPrivate", "DenyAll".
:type network_access_policy: str or ~azure.mgmt.compute.v2020_05_01.models.NetworkAccessPolicy
:param disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks.
:type disk_access_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'managed_by_extended': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'disk_state': {'readonly': True},
'share_info': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'managed_by_extended': {'key': 'managedByExtended', 'type': '[str]'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'zones': {'key': 'zones', 'type': '[str]'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'},
'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'},
'disk_m_bps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'},
'disk_state': {'key': 'properties.diskState', 'type': 'str'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'max_shares': {'key': 'properties.maxShares', 'type': 'int'},
'share_info': {'key': 'properties.shareInfo', 'type': '[ShareInfoElement]'},
'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'},
'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
zones: Optional[List[str]] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
disk_iops_read_only: Optional[int] = None,
disk_m_bps_read_only: Optional[int] = None,
encryption: Optional["Encryption"] = None,
max_shares: Optional[int] = None,
network_access_policy: Optional[Union[str, "NetworkAccessPolicy"]] = None,
disk_access_id: Optional[str] = None,
**kwargs
):
super(Disk, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.managed_by_extended = None
self.sku = sku
self.zones = zones
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.disk_iops_read_only = disk_iops_read_only
self.disk_m_bps_read_only = disk_m_bps_read_only
self.disk_state = None
self.encryption = encryption
self.max_shares = max_shares
self.share_info = None
self.network_access_policy = network_access_policy
self.disk_access_id = disk_access_id
class DiskAccess(Resource):
"""disk access resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar private_endpoint_connections: A readonly collection of private endpoint connections
created on the disk. Currently only one endpoint connection is supported.
:vartype private_endpoint_connections:
list[~azure.mgmt.compute.v2020_05_01.models.PrivateEndpointConnection]
:ivar provisioning_state: The disk access resource provisioning state.
:vartype provisioning_state: str
:ivar time_created: The time when the disk access was created.
:vartype time_created: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'private_endpoint_connections': {'readonly': True},
'provisioning_state': {'readonly': True},
'time_created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(DiskAccess, self).__init__(location=location, tags=tags, **kwargs)
self.private_endpoint_connections = None
self.provisioning_state = None
self.time_created = None
class DiskAccessList(msrest.serialization.Model):
"""The List disk access operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disk access resources.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.DiskAccess]
:param next_link: The uri to fetch the next page of disk access resources. Call ListNext() with
this to fetch the next page of disk access resources.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DiskAccess]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DiskAccess"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskAccessList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskAccessUpdate(msrest.serialization.Model):
"""Used for updating a disk access resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(DiskAccessUpdate, self).__init__(**kwargs)
self.tags = tags
class DiskEncryptionSet(Resource):
"""disk encryption set resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The managed identity for the disk encryption set. It should be given
permission on the key vault before it can be used to encrypt disks.
:type identity: ~azure.mgmt.compute.v2020_05_01.models.EncryptionSetIdentity
:param encryption_type: The type of key used to encrypt the data of the disk. Possible values
include: "EncryptionAtRestWithPlatformKey", "EncryptionAtRestWithCustomerKey",
"EncryptionAtRestWithPlatformAndCustomerKeys".
:type encryption_type: str or ~azure.mgmt.compute.v2020_05_01.models.EncryptionType
:param active_key: The key vault key which is currently used by this disk encryption set.
:type active_key: ~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndKeyReference
:ivar previous_keys: A readonly collection of key vault keys previously used by this disk
encryption set while a key rotation is in progress. It will be empty if there is no ongoing key
rotation.
:vartype previous_keys: list[~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndKeyReference]
:ivar provisioning_state: The disk encryption set provisioning state.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'previous_keys': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'EncryptionSetIdentity'},
'encryption_type': {'key': 'properties.encryptionType', 'type': 'str'},
'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'},
'previous_keys': {'key': 'properties.previousKeys', 'type': '[KeyVaultAndKeyReference]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["EncryptionSetIdentity"] = None,
encryption_type: Optional[Union[str, "EncryptionType"]] = None,
active_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(DiskEncryptionSet, self).__init__(location=location, tags=tags, **kwargs)
self.identity = identity
self.encryption_type = encryption_type
self.active_key = active_key
self.previous_keys = None
self.provisioning_state = None
class DiskEncryptionSetList(msrest.serialization.Model):
"""The List disk encryption set operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disk encryption sets.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSet]
:param next_link: The uri to fetch the next page of disk encryption sets. Call ListNext() with
this to fetch the next page of disk encryption sets.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DiskEncryptionSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DiskEncryptionSet"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskEncryptionSetList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskEncryptionSetUpdate(msrest.serialization.Model):
"""disk encryption set update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param encryption_type: The type of key used to encrypt the data of the disk. Possible values
include: "EncryptionAtRestWithPlatformKey", "EncryptionAtRestWithCustomerKey",
"EncryptionAtRestWithPlatformAndCustomerKeys".
:type encryption_type: str or ~azure.mgmt.compute.v2020_05_01.models.EncryptionType
:param active_key: Key Vault Key Url and vault id of KeK, KeK is optional and when provided is
used to unwrap the encryptionKey.
:type active_key: ~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndKeyReference
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'encryption_type': {'key': 'properties.encryptionType', 'type': 'str'},
'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
encryption_type: Optional[Union[str, "EncryptionType"]] = None,
active_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(DiskEncryptionSetUpdate, self).__init__(**kwargs)
self.tags = tags
self.encryption_type = encryption_type
self.active_key = active_key
class DiskList(msrest.serialization.Model):
"""The List Disks operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disks.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.Disk]
:param next_link: The uri to fetch the next page of disks. Call ListNext() with this to fetch
the next page of disks.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Disk]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Disk"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskSku(msrest.serialization.Model):
"""The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The sku name. Possible values include: "Standard_LRS", "Premium_LRS",
"StandardSSD_LRS", "UltraSSD_LRS".
:type name: str or ~azure.mgmt.compute.v2020_05_01.models.DiskStorageAccountTypes
:ivar tier: The sku tier.
:vartype tier: str
"""
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "DiskStorageAccountTypes"]] = None,
**kwargs
):
super(DiskSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class DiskUpdate(msrest.serialization.Model):
"""Disk update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2020_05_01.models.DiskSku
:param os_type: the Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2020_05_01.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsCollection
:param disk_iops_read_write: The number of IOPS allowed for this disk; only settable for
UltraSSD disks. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_write: long
:param disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD
disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of
10.
:type disk_m_bps_read_write: long
:param disk_iops_read_only: The total number of IOPS that will be allowed across all VMs
mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_only: long
:param disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs
mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses
the ISO notation, of powers of 10.
:type disk_m_bps_read_only: long
:param max_shares: The maximum number of VMs that can attach to the disk at the same time.
Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
:type max_shares: int
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2020_05_01.models.Encryption
:param network_access_policy: Policy for accessing the disk via network. Possible values
include: "AllowAll", "AllowPrivate", "DenyAll".
:type network_access_policy: str or ~azure.mgmt.compute.v2020_05_01.models.NetworkAccessPolicy
:param disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks.
:type disk_access_id: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'},
'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'},
'disk_m_bps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'},
'max_shares': {'key': 'properties.maxShares', 'type': 'int'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'},
'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
disk_iops_read_only: Optional[int] = None,
disk_m_bps_read_only: Optional[int] = None,
max_shares: Optional[int] = None,
encryption: Optional["Encryption"] = None,
network_access_policy: Optional[Union[str, "NetworkAccessPolicy"]] = None,
disk_access_id: Optional[str] = None,
**kwargs
):
super(DiskUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.disk_iops_read_only = disk_iops_read_only
self.disk_m_bps_read_only = disk_m_bps_read_only
self.max_shares = max_shares
self.encryption = encryption
self.network_access_policy = network_access_policy
self.disk_access_id = disk_access_id
class Encryption(msrest.serialization.Model):
"""Encryption at rest settings for disk or snapshot.
:param disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling
encryption at rest.
:type disk_encryption_set_id: str
:param type: The type of key used to encrypt the data of the disk. Possible values include:
"EncryptionAtRestWithPlatformKey", "EncryptionAtRestWithCustomerKey",
"EncryptionAtRestWithPlatformAndCustomerKeys".
:type type: str or ~azure.mgmt.compute.v2020_05_01.models.EncryptionType
"""
_attribute_map = {
'disk_encryption_set_id': {'key': 'diskEncryptionSetId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
disk_encryption_set_id: Optional[str] = None,
type: Optional[Union[str, "EncryptionType"]] = None,
**kwargs
):
super(Encryption, self).__init__(**kwargs)
self.disk_encryption_set_id = disk_encryption_set_id
self.type = type
class EncryptionSetIdentity(msrest.serialization.Model):
"""The managed identity for the disk encryption set. It should be given permission on the key vault before it can be used to encrypt disks.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is
supported. Possible values include: "SystemAssigned".
:type type: str or ~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSetIdentityType
:ivar principal_id: The object id of the Managed Identity Resource. This will be sent to the RP
from ARM via the x-ms-identity-principal-id header in the PUT request if the resource has a
systemAssigned(implicit) identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the Managed Identity Resource. This will be sent to the RP
from ARM via the x-ms-client-tenant-id header in the PUT request if the resource has a
systemAssigned(implicit) identity.
:vartype tenant_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "DiskEncryptionSetIdentityType"]] = None,
**kwargs
):
super(EncryptionSetIdentity, self).__init__(**kwargs)
self.type = type
self.principal_id = None
self.tenant_id = None
class EncryptionSettingsCollection(msrest.serialization.Model):
"""Encryption settings for disk or snapshot.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Set this flag to true and provide DiskEncryptionKey and optional
KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and
KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object,
the existing settings remain unchanged.
:type enabled: bool
:param encryption_settings: A collection of encryption settings, one for each disk volume.
:type encryption_settings:
list[~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsElement]
:param encryption_settings_version: Describes what type of encryption is used for the disks.
Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption
with AAD app.'1.1' corresponds to Azure Disk Encryption.
:type encryption_settings_version: str
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[EncryptionSettingsElement]'},
'encryption_settings_version': {'key': 'encryptionSettingsVersion', 'type': 'str'},
}
def __init__(
self,
*,
enabled: bool,
encryption_settings: Optional[List["EncryptionSettingsElement"]] = None,
encryption_settings_version: Optional[str] = None,
**kwargs
):
super(EncryptionSettingsCollection, self).__init__(**kwargs)
self.enabled = enabled
self.encryption_settings = encryption_settings
self.encryption_settings_version = encryption_settings_version
class EncryptionSettingsElement(msrest.serialization.Model):
"""Encryption settings for one disk volume.
:param disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key.
:type disk_encryption_key: ~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndSecretReference
:param key_encryption_key: Key Vault Key Url and vault id of the key encryption key.
KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
:type key_encryption_key: ~azure.mgmt.compute.v2020_05_01.models.KeyVaultAndKeyReference
"""
_attribute_map = {
'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultAndSecretReference'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultAndKeyReference'},
}
def __init__(
self,
*,
disk_encryption_key: Optional["KeyVaultAndSecretReference"] = None,
key_encryption_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(EncryptionSettingsElement, self).__init__(**kwargs)
self.disk_encryption_key = disk_encryption_key
self.key_encryption_key = key_encryption_key
class GrantAccessData(msrest.serialization.Model):
"""Data used for requesting a SAS.
All required parameters must be populated in order to send to Azure.
:param access: Required. Possible values include: "None", "Read", "Write".
:type access: str or ~azure.mgmt.compute.v2020_05_01.models.AccessLevel
:param duration_in_seconds: Required. Time duration in seconds until the SAS access expires.
:type duration_in_seconds: int
"""
_validation = {
'access': {'required': True},
'duration_in_seconds': {'required': True},
}
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'duration_in_seconds': {'key': 'durationInSeconds', 'type': 'int'},
}
def __init__(
self,
*,
access: Union[str, "AccessLevel"],
duration_in_seconds: int,
**kwargs
):
super(GrantAccessData, self).__init__(**kwargs)
self.access = access
self.duration_in_seconds = duration_in_seconds
class ImageDiskReference(msrest.serialization.Model):
"""The source image used for creating the disk.
All required parameters must be populated in order to send to Azure.
:param id: Required. A relative uri containing either a Platform Image Repository or user image
reference.
:type id: str
:param lun: If the disk is created from an image's data disk, this is an index that indicates
which of the data disks in the image to use. For OS disks, this field is null.
:type lun: int
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
*,
id: str,
lun: Optional[int] = None,
**kwargs
):
super(ImageDiskReference, self).__init__(**kwargs)
self.id = id
self.lun = lun
class InnerError(msrest.serialization.Model):
"""Inner error details.
:param exceptiontype: The exception type.
:type exceptiontype: str
:param errordetail: The internal error message or exception dump.
:type errordetail: str
"""
_attribute_map = {
'exceptiontype': {'key': 'exceptiontype', 'type': 'str'},
'errordetail': {'key': 'errordetail', 'type': 'str'},
}
def __init__(
self,
*,
exceptiontype: Optional[str] = None,
errordetail: Optional[str] = None,
**kwargs
):
super(InnerError, self).__init__(**kwargs)
self.exceptiontype = exceptiontype
self.errordetail = errordetail
class KeyVaultAndKeyReference(msrest.serialization.Model):
"""Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the encryptionKey.
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the key or secret.
:type source_vault: ~azure.mgmt.compute.v2020_05_01.models.SourceVault
:param key_url: Required. Url pointing to a key or secret in KeyVault.
:type key_url: str
"""
_validation = {
'source_vault': {'required': True},
'key_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'key_url': {'key': 'keyUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
key_url: str,
**kwargs
):
super(KeyVaultAndKeyReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.key_url = key_url
class KeyVaultAndSecretReference(msrest.serialization.Model):
"""Key Vault Secret Url and vault id of the encryption key.
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the key or secret.
:type source_vault: ~azure.mgmt.compute.v2020_05_01.models.SourceVault
:param secret_url: Required. Url pointing to a key or secret in KeyVault.
:type secret_url: str
"""
_validation = {
'source_vault': {'required': True},
'secret_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'secret_url': {'key': 'secretUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
secret_url: str,
**kwargs
):
super(KeyVaultAndSecretReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.secret_url = secret_url
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(msrest.serialization.Model):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: private endpoint connection Id.
:vartype id: str
:ivar name: private endpoint connection name.
:vartype name: str
:ivar type: private endpoint connection type.
:vartype type: str
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~azure.mgmt.compute.v2020_05_01.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between DiskAccess and Virtual Network.
:type private_link_service_connection_state:
~azure.mgmt.compute.v2020_05_01.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.compute.v2020_05_01.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
private_endpoint: Optional["PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["PrivateLinkServiceConnectionState"] = None,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = None
class PrivateLinkResource(msrest.serialization.Model):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: private link resource Id.
:vartype id: str
:ivar name: private link resource name.
:vartype name: str
:ivar type: private link resource type.
:vartype type: str
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
*,
value: Optional[List["PrivateLinkResource"]] = None,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected".
:type status: str or
~azure.mgmt.compute.v2020_05_01.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[Union[str, "PrivateEndpointServiceConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[str] = None,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class ShareInfoElement(msrest.serialization.Model):
"""ShareInfoElement.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar vm_uri: A relative URI containing the ID of the VM that has the disk attached.
:vartype vm_uri: str
"""
_validation = {
'vm_uri': {'readonly': True},
}
_attribute_map = {
'vm_uri': {'key': 'vmUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShareInfoElement, self).__init__(**kwargs)
self.vm_uri = None
class Snapshot(Resource):
"""Snapshot resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar managed_by: Unused. Always Null.
:vartype managed_by: str
:param sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
:type sku: ~azure.mgmt.compute.v2020_05_01.models.SnapshotSku
:ivar time_created: The time when the snapshot was created.
:vartype time_created: ~datetime.datetime
:param os_type: The Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2020_05_01.models.OperatingSystemTypes
:param hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS
disks only. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2020_05_01.models.HyperVGeneration
:param creation_data: Disk source information. CreationData information cannot be changed after
the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2020_05_01.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:ivar disk_size_bytes: The size of the disk in bytes. This field is read only.
:vartype disk_size_bytes: long
:ivar unique_id: Unique Guid identifying the resource.
:vartype unique_id: str
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsCollection
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
:param incremental: Whether a snapshot is incremental. Incremental snapshots on the same disk
occupy less space than full snapshots and can be diffed.
:type incremental: bool
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2020_05_01.models.Encryption
:param network_access_policy: Policy for accessing the disk via network. Possible values
include: "AllowAll", "AllowPrivate", "DenyAll".
:type network_access_policy: str or ~azure.mgmt.compute.v2020_05_01.models.NetworkAccessPolicy
:param disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks.
:type disk_access_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'incremental': {'key': 'properties.incremental', 'type': 'bool'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'},
'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
incremental: Optional[bool] = None,
encryption: Optional["Encryption"] = None,
network_access_policy: Optional[Union[str, "NetworkAccessPolicy"]] = None,
disk_access_id: Optional[str] = None,
**kwargs
):
super(Snapshot, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.sku = sku
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.incremental = incremental
self.encryption = encryption
self.network_access_policy = network_access_policy
self.disk_access_id = disk_access_id
class SnapshotList(msrest.serialization.Model):
"""The List Snapshots operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of snapshots.
:type value: list[~azure.mgmt.compute.v2020_05_01.models.Snapshot]
:param next_link: The uri to fetch the next page of snapshots. Call ListNext() with this to
fetch the next page of snapshots.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Snapshot]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Snapshot"],
next_link: Optional[str] = None,
**kwargs
):
super(SnapshotList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SnapshotSku(msrest.serialization.Model):
"""The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The sku name. Possible values include: "Standard_LRS", "Premium_LRS",
"Standard_ZRS".
:type name: str or ~azure.mgmt.compute.v2020_05_01.models.SnapshotStorageAccountTypes
:ivar tier: The sku tier.
:vartype tier: str
"""
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "SnapshotStorageAccountTypes"]] = None,
**kwargs
):
super(SnapshotSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class SnapshotUpdate(msrest.serialization.Model):
"""Snapshot update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
:type sku: ~azure.mgmt.compute.v2020_05_01.models.SnapshotSku
:param os_type: the Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2020_05_01.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2020_05_01.models.EncryptionSettingsCollection
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2020_05_01.models.Encryption
:param network_access_policy: Policy for accessing the disk via network. Possible values
include: "AllowAll", "AllowPrivate", "DenyAll".
:type network_access_policy: str or ~azure.mgmt.compute.v2020_05_01.models.NetworkAccessPolicy
:param disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks.
:type disk_access_id: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'},
'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
encryption: Optional["Encryption"] = None,
network_access_policy: Optional[Union[str, "NetworkAccessPolicy"]] = None,
disk_access_id: Optional[str] = None,
**kwargs
):
super(SnapshotUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
self.encryption = encryption
self.network_access_policy = network_access_policy
self.disk_access_id = disk_access_id
class SourceVault(msrest.serialization.Model):
"""The vault id is an Azure Resource Manager Resource id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}.
:param id: Resource Id.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(SourceVault, self).__init__(**kwargs)
self.id = id
|
mit
| 8,620,681,322,227,704,000
| 39.012987
| 188
| 0.638676
| false
| 3.89507
| false
| false
| false
|
keen99/SickRage
|
sickbeard/dailysearcher.py
|
1
|
4533
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import threading
import traceback
import sickbeard
from sickbeard import logger
from sickbeard import db
from sickbeard import common
from sickbeard import helpers
from sickbeard import exceptions
from sickbeard import network_timezones
from sickbeard.exceptions import ex
from sickbeard.common import SKIPPED
from common import Quality, qualityPresetStrings, statusStrings
class DailySearcher():
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
def run(self, force=False):
if self.amActive:
return
self.amActive = True
logger.log(u"Searching for new released episodes ...")
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
curDate = (datetime.date.today() + datetime.timedelta(days=2)).toordinal()
curTime = datetime.datetime.now(network_timezones.sb_timezone)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND (airdate <= ? and airdate > 1)",
[common.UNAIRED, curDate])
sql_l = []
show = None
for sqlEp in sqlResults:
try:
if not show or int(sqlEp["showid"]) != show.indexerid:
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"]))
# for when there is orphaned series in the database but not loaded into our showlist
if not show or show.paused:
continue
except exceptions.MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + str(sqlEp['showid']))
continue
try:
end_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs,
show.network) + datetime.timedelta(
minutes=helpers.tryInt(show.runtime, 60))
# filter out any episodes that haven't aried yet
if end_time > curTime:
continue
except:
# if an error occured assume the episode hasn't aired yet
continue
UpdateWantedList = 0
ep = show.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"]))
with ep.lock:
if ep.season == 0:
logger.log(u"New episode " + ep.prettyName() + " airs today, setting status to SKIPPED because is a special season")
ep.status = common.SKIPPED
elif sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT:
ep.status = common.SKIPPED
UpdateWantedList = 1
else:
logger.log(u"New episode %s airs today, setting to default episode status for this show: %s" % (ep.prettyName(), common.statusStrings[ep.show.default_ep_status]))
ep.status = ep.show.default_ep_status
sql_l.append(ep.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
else:
logger.log(u"No new released episodes found ...")
sickbeard.traktRollingScheduler.action.updateWantedList()
# queue episode for daily search
dailysearch_queue_item = sickbeard.search_queue.DailySearchQueueItem()
sickbeard.searchQueueScheduler.action.add_item(dailysearch_queue_item)
self.amActive = False
|
gpl-3.0
| -3,312,056,714,057,756,700
| 37.415254
| 182
| 0.62056
| false
| 4.268362
| false
| false
| false
|
trackmastersteve/alienfx
|
alienfx/core/controller_m17xr4.py
|
1
|
5637
|
#
# controller_m17xr3.py
#
# Copyright (C) 2013-2014 Ashwin Menon <ashwin.menon@gmail.com>
# Copyright (C) 2015-2021 Track Master Steve <trackmastersteve@gmail.com>
#
# Alienfx is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Alienfx is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with alienfx. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
""" Specialization of the AlienFxController class for the M17xR4 controller.
This module provides the following classes:
AlienFXControllerM17xR4 : M17xR4 controller
"""
import alienfx.core.controller as alienfx_controller
class AlienFXControllerM17xR4(alienfx_controller.AlienFXController):
""" Specialization of the AlienFxController class for the M17xR4 controller.
"""
# Speed capabilities. The higher the number, the slower the speed of
# blink/morph actions. The min speed is selected by trial and error as
# the lowest value that will not result in strange blink/morph behaviour.
DEFAULT_SPEED = 75
MIN_SPEED = 1
# Zone codes
LEFT_KEYBOARD = 0x0008 # Code OK
MIDDLE_LEFT_KEYBOARD = 0x0004 # Code OK
MIDDLE_RIGHT_KEYBOARD = 0x0002 # Code OK
RIGHT_KEYBOARD = 0x0001 # Code OK
# 0x000F - Keyboard: all fields (0x1+0x2+0x4+0x8=0xF). You may have look at reverse-engineering-knowledgebase.md
RIGHT_SPEAKER = 0x0800 # Code OK, Bottom - Right light bar
LEFT_SPEAKER = 0x0400 # Code OK, Bottom - Left light bar
LEFT_DISPLAY = 0x1000 # Code OK, Display - Left light bar
RIGHT_DISPLAY = 0x2000 # Code OK, Display - Right light bar
ALIEN_HEAD = 0x0020 # Code OK
LOGO = 0x0040 # Code OK. Alienware-logo below screen.
# 0x0060 seems to bee alien head and logo (0x20+0x40=0x60). You may have look at reverse-engineering-knowledgebase.md
# Touchpad:
# Seems OK. You may need to set touchpad-lightning to always on in BIOS for this to work,
# as the on-touch-event seems to be not recognized correctly
TOUCH_PAD = 0x0080 # Code OK. Have a look at your BIOS settings.
MEDIA_BAR = 0x4000 # Seems OK. If Media_Bar should be Macro-Key-Bar
POWER_BUTTON = 0x0100 # Seems OK. Caution: S1 (Boot) conflicts with settings for other states...
# HDD_LEDS = ??? # Inactive: Device has no hdd indicator
# Reset codes
RESET_ALL_LIGHTS_OFF = 3
RESET_ALL_LIGHTS_ON = 4
# State codes
BOOT = 1 # Seems some zone can only be defined by Boot-State and have no effect on higher states
AC_SLEEP = 2
AC_CHARGED = 5
AC_CHARGING = 6
BATTERY_SLEEP = 7
BATTERY_ON = 8
BATTERY_CRITICAL = 9
#Controller Type
# Defines the controllertype:
# 1 = old pre Alienware 17R4 (4 bits per color)
# 2 = AW17R4 and probably others, which are using 8 bits per color
MYCONTROLLERREV = 2
def __init__(self):
# For new controller-defintions controller-revision should be provided as it defaults to 1!
# Wrong revision might result in packet errors 32 and 75 (Overflow and Pipeoverflow)
alienfx_controller.AlienFXController.__init__(self, self.MYCONTROLLERREV)
self.name = "Alienware M17xR4"
# USB VID and PID
self.vendor_id = 0x187c
self.product_id = 0x0530
# map the zone names to their codes
self.zone_map = {
self.ZONE_LEFT_KEYBOARD: self.LEFT_KEYBOARD,
self.ZONE_MIDDLE_LEFT_KEYBOARD: self.MIDDLE_LEFT_KEYBOARD,
self.ZONE_MIDDLE_RIGHT_KEYBOARD: self.MIDDLE_RIGHT_KEYBOARD,
self.ZONE_RIGHT_KEYBOARD: self.RIGHT_KEYBOARD,
self.ZONE_RIGHT_SPEAKER: self.RIGHT_SPEAKER,
self.ZONE_LEFT_SPEAKER: self.LEFT_SPEAKER,
self.ZONE_ALIEN_HEAD: self.ALIEN_HEAD,
self.ZONE_LOGO: self.LOGO,
self.ZONE_TOUCH_PAD: self.TOUCH_PAD,
self.ZONE_MEDIA_BAR: self.MEDIA_BAR,
self.ZONE_POWER_BUTTON: self.POWER_BUTTON,
self.ZONE_LEFT_DISPLAY: self.LEFT_DISPLAY,
self.ZONE_RIGHT_DISPLAY: self.RIGHT_DISPLAY
# self.ZONE_HDD_LEDS: self.HDD_LEDS, # Not used, as de AW17R4 does not have an HDD indicator
}
# zones that have special behaviour in the different power states
self.power_zones = [
self.ZONE_POWER_BUTTON # ,
# self.ZONE_HDD_LEDS
]
# map the reset names to their codes
self.reset_types = {
self.RESET_ALL_LIGHTS_OFF: "all-lights-off",
self.RESET_ALL_LIGHTS_ON: "all-lights-on"
}
# map the state names to their codes
self.state_map = {
self.STATE_BOOT: self.BOOT,
self.STATE_AC_SLEEP: self.AC_SLEEP,
self.STATE_AC_CHARGED: self.AC_CHARGED,
self.STATE_AC_CHARGING: self.AC_CHARGING,
self.STATE_BATTERY_SLEEP: self.BATTERY_SLEEP,
self.STATE_BATTERY_ON: self.BATTERY_ON,
self.STATE_BATTERY_CRITICAL: self.BATTERY_CRITICAL
}
alienfx_controller.AlienFXController.supported_controllers.append(
AlienFXControllerM17xR4())
|
gpl-3.0
| -5,833,071,122,726,631,000
| 38.697183
| 121
| 0.665247
| false
| 3.339455
| false
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/site_seal_request.py
|
1
|
1069
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SiteSealRequest(Model):
"""Site seal request.
:param light_theme: If <code>true</code> use the light color theme for
site seal; otherwise, use the default color theme.
:type light_theme: bool
:param locale: Locale of site seal.
:type locale: str
"""
_attribute_map = {
'light_theme': {'key': 'lightTheme', 'type': 'bool'},
'locale': {'key': 'locale', 'type': 'str'},
}
def __init__(self, light_theme=None, locale=None):
self.light_theme = light_theme
self.locale = locale
|
mit
| 4,656,552,938,915,643,000
| 32.40625
| 76
| 0.571562
| false
| 4.327935
| false
| false
| false
|
brandicted/nefertari
|
nefertari/renderers.py
|
2
|
6370
|
import json
import logging
from datetime import date, datetime
from nefertari import wrappers
from nefertari.utils import get_json_encoder
from nefertari.json_httpexceptions import JHTTPOk, JHTTPCreated
from nefertari.events import trigger_after_events
log = logging.getLogger(__name__)
class _JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime, date)):
return obj.strftime("%Y-%m-%dT%H:%M:%SZ") # iso
try:
return super(_JSONEncoder, self).default(obj)
except TypeError:
return str(obj) # fallback to str
class JsonRendererFactory(object):
def __init__(self, info):
""" Constructor: info will be an object having the
following attributes: name (the renderer name), package
(the package that was 'current' at the time the
renderer was registered), type (the renderer type
name), registry (the current application registry) and
settings (the deployment settings dictionary). """
pass
def _set_content_type(self, system):
""" Set response content type """
request = system.get('request')
if request:
response = request.response
ct = response.content_type
if ct == response.default_content_type:
response.content_type = 'application/json'
def _render_response(self, value, system):
""" Render a response """
view = system['view']
enc_class = getattr(view, '_json_encoder', None)
if enc_class is None:
enc_class = get_json_encoder()
return json.dumps(value, cls=enc_class)
def __call__(self, value, system):
""" Call the renderer implementation with the value
and the system value passed in as arguments and return
the result (a string or unicode object). The value is
the return value of a view. The system value is a
dictionary containing available system values
(e.g. view, context, and request).
"""
self._set_content_type(system)
# run after_calls on the value before jsonifying
value = self.run_after_calls(value, system)
value = self._trigger_events(value, system)
return self._render_response(value, system)
def _trigger_events(self, value, system):
view_obj = system['view'](system['context'], system['request'])
view_obj._response = value
evt = trigger_after_events(view_obj)
return evt.response
def run_after_calls(self, value, system):
request = system.get('request')
if request and hasattr(request, 'action'):
if request.action in ['index', 'show']:
value = wrappers.wrap_in_dict(request)(result=value)
return value
class DefaultResponseRendererMixin(object):
""" Renderer mixin that generates responses for all create/update/delete
view methods.
"""
def _get_common_kwargs(self, system):
""" Get kwargs common for all methods. """
enc_class = getattr(system['view'], '_json_encoder', None)
if enc_class is None:
enc_class = get_json_encoder()
return {
'request': system['request'],
'encoder': enc_class,
}
def _get_create_update_kwargs(self, value, common_kw):
""" Get kwargs common to create, update, replace. """
kw = common_kw.copy()
kw['body'] = value
if '_self' in value:
kw['headers'] = [('Location', value['_self'])]
return kw
def render_create(self, value, system, common_kw):
""" Render response for view `create` method (collection POST) """
kw = self._get_create_update_kwargs(value, common_kw)
return JHTTPCreated(**kw)
def render_update(self, value, system, common_kw):
""" Render response for view `update` method (item PATCH) """
kw = self._get_create_update_kwargs(value, common_kw)
return JHTTPOk('Updated', **kw)
def render_replace(self, *args, **kwargs):
""" Render response for view `replace` method (item PUT) """
return self.render_update(*args, **kwargs)
def render_delete(self, value, system, common_kw):
""" Render response for view `delete` method (item DELETE) """
return JHTTPOk('Deleted', **common_kw.copy())
def render_delete_many(self, value, system, common_kw):
""" Render response for view `delete_many` method (collection DELETE)
"""
if isinstance(value, dict):
return JHTTPOk(extra=value)
msg = 'Deleted {} {}(s) objects'.format(
value, system['view'].Model.__name__)
return JHTTPOk(msg, **common_kw.copy())
def render_update_many(self, value, system, common_kw):
""" Render response for view `update_many` method
(collection PUT/PATCH)
"""
msg = 'Updated {} {}(s) objects'.format(
value, system['view'].Model.__name__)
return JHTTPOk(msg, **common_kw.copy())
def _render_response(self, value, system):
""" Handle response rendering.
Calls mixin methods according to request.action value.
"""
super_call = super(DefaultResponseRendererMixin, self)._render_response
try:
method_name = 'render_{}'.format(system['request'].action)
except (KeyError, AttributeError):
return super_call(value, system)
method = getattr(self, method_name, None)
if method is not None:
common_kw = self._get_common_kwargs(system)
response = method(value, system, common_kw)
system['request'].response = response
return
return super_call(value, system)
class NefertariJsonRendererFactory(DefaultResponseRendererMixin,
JsonRendererFactory):
""" Special json renderer which will apply all after_calls(filters)
to the result.
"""
def run_after_calls(self, value, system):
request = system.get('request')
if request and hasattr(request, 'action'):
after_calls = getattr(request, 'filters', {})
for call in after_calls.get(request.action, []):
value = call(**dict(request=request, result=value))
return value
|
apache-2.0
| -4,746,659,759,339,364,000
| 36.692308
| 79
| 0.610204
| false
| 4.218543
| false
| false
| false
|
redondomarco/useradm
|
src/models/gestionssl.py
|
1
|
1494
|
def obtener_certificado(usuario, perfil):
"""devuelvo certificado, si no existe lo creo en seguinf"""
env.user = myconf.take('datos.seguinf_user')
env.warn_only = True
seguinf = FabricSupport()
comando='sudo ls -la /root/Clientes_ssl/'+str(usuario)+'-'+str(perfil)+'.p12'
seguinf.run(myconf.take('datos.seguinf_srv'),22,comando)
if "No existe el fichero o el directorio" in seguinf.result:
log(str(seguinf.result))
#creo el certificado
comando='sudo /usr/local/seguridad/bin/genero-cert '+str(usuario)+' '+str(perfil)
seguinf.run(myconf.take('datos.seguinf_srv'),22,comando)
#obtengo el certificado
seguinf.file_get(myconf.take('datos.seguinf_srv'),22,"/root/Clientes_ssl/","/tmp/",str(usuario)+"-"+str(perfil)+".p12")
#leo el certificado
with open("/tmp/"+str(usuario)+"-"+str(perfil)+".p12", 'rb') as f:
cert_p12 = f.read()
return cert_p12
def info_certificado(certificado):
from OpenSSL.crypto import load_pkcs12, FILETYPE_PEM, FILETYPE_ASN1
a = certificado
p = load_pkcs12(a, '')
certificate = p.get_certificate()
private_key = p.get_privatekey()
fields = certificate.get_subject().get_components()
resultado={}
for i in fields:
resultado[i[0]]=i[1]
return resultado
#para inspeccionar el resultado de obtener_certificado
#from OpenSSL.crypto import load_pkcs12, FILETYPE_PEM, FILETYPE_ASN1
#p = load_pkcs12(a, '')
#certificate = p.get_certificate()
#private_key = p.get_privatekey()
#fields = certificate.get_subject().get_components()
#print(fields)
|
gpl-3.0
| 3,485,720,391,723,372,000
| 37.307692
| 120
| 0.718206
| false
| 2.549488
| false
| false
| false
|
pizzathief/scipy
|
scipy/linalg/tests/test_decomp_cossin.py
|
1
|
5753
|
import pytest
import numpy as np
from numpy.random import seed
from numpy.testing import assert_allclose
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
from scipy.linalg import cossin, get_lapack_funcs
REAL_DTYPES = (np.float32, np.float64)
COMPLEX_DTYPES = (np.complex64, np.complex128)
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
@pytest.mark.parametrize('dtype_', DTYPES)
@pytest.mark.parametrize('m, p, q',
[
(2, 1, 1),
(3, 2, 1),
(3, 1, 2),
(4, 2, 2),
(4, 1, 2),
(40, 12, 20),
(40, 30, 1),
(40, 1, 30),
(100, 50, 1),
(100, 50, 50),
])
@pytest.mark.parametrize('swap_sign', [True, False])
def test_cossin(dtype_, m, p, q, swap_sign):
seed(1234)
if dtype_ in COMPLEX_DTYPES:
x = np.array(unitary_group.rvs(m), dtype=dtype_)
else:
x = np.array(ortho_group.rvs(m), dtype=dtype_)
u, cs, vh = cossin(x, p, q,
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
# Test for float32 or float 64
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
u, cs, vh = cossin([x[:p, :q], x[:p, q:], x[p:, :q], x[p:, q:]],
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
_, cs2, vh2 = cossin(x, p, q,
compute_u=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(vh, vh2, rtol=0., atol=10*np.finfo(dtype_).eps)
u2, cs2, _ = cossin(x, p, q,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(u, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
_, cs2, _ = cossin(x, p, q,
compute_u=False,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
def test_cossin_mixed_types():
seed(1234)
x = np.array(ortho_group.rvs(4), dtype=np.float)
u, cs, vh = cossin([x[:2, :2],
np.array(x[:2, 2:], dtype=np.complex128),
x[2:, :2],
x[2:, 2:]])
assert u.dtype == np.complex128
assert cs.dtype == np.float64
assert vh.dtype == np.complex128
assert_allclose(x, u @ cs @ vh, rtol=0.,
atol=1e4 * np.finfo(np.complex128).eps)
def test_cossin_error_incorrect_subblocks():
with pytest.raises(ValueError, match="be due to missing p, q arguments."):
cossin(([1, 2], [3, 4, 5], [6, 7], [8, 9, 10]))
def test_cossin_error_empty_subblocks():
with pytest.raises(ValueError, match="x11.*empty"):
cossin(([], [], [], []))
with pytest.raises(ValueError, match="x12.*empty"):
cossin(([1, 2], [], [6, 7], [8, 9, 10]))
with pytest.raises(ValueError, match="x21.*empty"):
cossin(([1, 2], [3, 4, 5], [], [8, 9, 10]))
with pytest.raises(ValueError, match="x22.*empty"):
cossin(([1, 2], [3, 4, 5], [2], []))
def test_cossin_error_missing_partitioning():
with pytest.raises(ValueError, match=".*exactly four arrays.* got 2"):
cossin(unitary_group.rvs(2))
with pytest.raises(ValueError, match=".*might be due to missing p, q"):
cossin(unitary_group.rvs(4))
def test_cossin_error_non_iterable():
with pytest.raises(ValueError, match="containing the subblocks of X"):
cossin(12j)
def test_cossin_error_non_square():
with pytest.raises(ValueError, match="only supports square"):
cossin(np.array([[1, 2]]), 1, 1)
def test_cossin_error_partitioning():
x = np.array(ortho_group.rvs(4), dtype=np.float)
with pytest.raises(ValueError, match="invalid p=0.*0<p<4.*"):
cossin(x, 0, 1)
with pytest.raises(ValueError, match="invalid p=4.*0<p<4.*"):
cossin(x, 4, 1)
with pytest.raises(ValueError, match="invalid q=-2.*0<q<4.*"):
cossin(x, 1, -2)
with pytest.raises(ValueError, match="invalid q=5.*0<q<4.*"):
cossin(x, 1, 5)
@pytest.mark.parametrize("dtype_", DTYPES)
def test_cossin_separate(dtype_):
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
X = np.array(X, dtype=dtype_)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'),[X])
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'],
lwval))
*_, theta, u1, u2, v1t, v2t, _ = \
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
(u1_2, u2_2), theta2, (v1t_2, v2t_2) = cossin(X, p, q, separate=True)
assert_allclose(u1_2, u1, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(u2_2, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v1t_2, v1t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v2t_2, v2t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(theta2, theta, rtol=0., atol=10*np.finfo(dtype_).eps)
|
bsd-3-clause
| -8,984,024,263,995,868,000
| 36.357143
| 78
| 0.530506
| false
| 2.977743
| true
| false
| false
|
ViciusChile/Scraper_Dealer
|
Metacritic.py
|
1
|
7087
|
from Scrapers.tools import tools
class MetacriticInfo:
"""Model for each elements"""
def __init__(self):
self.name = None
self.platform = None
self.developer = None
self.publisher = None
self.esrb = None
self.release = None
self.tags = None
self.metascore = None
self.official_site = None
self.description = None
self.num_players = None
self.sound = None
self.connectivity = None
self.resolution = None
self.num_online = None
self.customization = None
self.image_mini = None
self.url = None
self.url_amazon = None
class Metacritic:
"""Scrapping for www.metacritic.com"""
def __init__(self):
pass
@staticmethod
def scraper_dealer(url):
"""Get url index from one platform an letter
:param url: to scraping
http://www.metacritic.com/browse/games/title/ps4 /a
"""
url_pages = []
http = tools.get_html(url)
html = http[2]
http_code = http[1]
if html is not None:
pages = html.cssselect('#main > div.module.filter.alpha_filter > div.page_nav > div > div.pages > ul > li')
if pages:
q = len(pages)
else:
q = 1
for i in range(0, q):
url_pages.append('{0}?view=condensed&page={1}'.format(url, i))
return url_pages, http_code
@staticmethod
def scraper_links(url):
"""Get url index from one platform an letter
:param url: page with many links
:return: list of links urls
"""
urls = []
http = tools.get_html(url)
html = http[2]
http_code = http[1]
if html is not None:
links = html.cssselect(
'#main > div.module.filter.alpha_filter > div > div.body > div.body_wrap > div > ol > li > div > div.basic_stat.product_title > a')
if links:
for l in links:
urls.append(l.get('href') + '/details')
return urls, http_code
@staticmethod
def scraper_info(url):
"""Get all information of a game
:param url: game link
:return: class with all info
"""
http = tools.get_html(url)
page = http[2]
http_code = http[1]
product = MetacriticInfo()
name = page.cssselect('#main > div.content_head.product_content_head.game_content_head > div.product_title > a')
if not name:
name = page.cssselect('#main > div.content_head.product_content_head.game_content_head > h1 > a')
if name:
product.name = name[0].text_content().strip()
platform = page.cssselect('#main > div.content_head.product_content_head.game_content_head > div.product_title > span > a')
if not platform:
platform = page.cssselect('#main > div.content_head.product_content_head.game_content_head > h1 > span > a')
if platform:
platform = platform[0].text_content().strip()
product.platform = tools.clear_platform(platform).upper()
publisher = page.cssselect(
'#main > div.content_head.product_content_head.game_content_head > div.product_data > ul > li.summary_detail.publisher > span.data > a')
if publisher:
product.publisher = publisher[0].text_content().strip()
release = page.cssselect(
'#main > div.content_head.product_content_head.game_content_head > div.product_data > ul > li.summary_detail.release_data > span.data')
if release:
product.release = release[0].text_content().strip()
metascore = page.cssselect(
'#main > div.module.product_data > div > div.summary_wrap > div.section.product_scores > div.details.main_details > div > div > a > div > span')
if metascore:
product.metascore = metascore[0].text_content().strip()
product_description = page.cssselect(
'#main > div.module.product_data > div > div.summary_wrap > div.section.product_details > div > span.data')
if product_description:
product.description = product_description[0].text_content()
og_image = page.cssselect('meta[name="og:image"]')
if og_image:
product.image_mini = og_image[0].get('content')
product_details = page.cssselect('#main > div.product_details > table')
if product_details:
for i in product_details:
for e in i:
th = e.cssselect('th')
td = e.cssselect("td")
th_val = th[0].text_content().replace(":", "").strip()
td_val = td[0].text_content().strip()
if th_val == "Rating":
product.esrb = td_val
elif th_val == "Official Site":
product.official_site = td_val
elif th_val == "Developer":
product.developer = td_val
elif th_val == "Genre(s)":
product.tags = td_val
elif th_val == "Number of Players":
product.num_players = td_val
elif th_val == "Sound":
product.sound = td_val
elif th_val == "Connectivity":
product.connectivity = td_val
elif th_val == "Resolution":
product.resolution = td_val
elif th_val == "Number of Online Players":
product.num_online = td_val
elif th_val == "Customization":
product.customization = td_val
product_url = page.cssselect('#main > div.content_head.product_content_head.game_content_head > div.product_title > a')
if product_url:
product.url = product_url[0].get('href')
#url_amazon = page.cssselect('#main > div.module.product_data > div > div.summary_wrap > div.section.product_details > div.amazon_wrapper > a')
url_amazon = page.cssselect('#main > div.module.product_data > div > div.summary_wrap > div.section.product_details > div.esite_list > div.esite_items > div.esite_btn_wrapper > div.esite_btn > table > tr > td.esite_img_wrapper > a')
#print('url_amazon', url_amazon)
if url_amazon:
product.url_amazon = url_amazon[0].attrib['href']
return product, http_code
# ------------------------------------------------------------------------------ #
#from pprint import pprint
#pprint(Metacritic.scraper_pages('pc', letter))
#pprint(Metacritic.scraper_links('http://www.metacritic.com/browse/games/title/pc/u?view=condensed&page=1'))
#Metacritic.scraper_info('http://www.metacritic.com/game/playstation-4/fallout-4/details')
#platforms = ['ps4', 'xboxone', 'ps3', 'xbox360', 'pc', 'wii-u', '3ds', 'vita']
#letters = list('#' + string.ascii_lowercase)
|
gpl-2.0
| 7,326,300,751,974,268,000
| 38.372222
| 240
| 0.550444
| false
| 3.857921
| false
| false
| false
|
pedrosacramento/inkscape-animation
|
spritesheet.py
|
1
|
1674
|
# This class handles sprite sheets
# This was taken from www.scriptefun.com/transcript-2-using
# sprite-sheets-and-drawing-the-background
# I've added some code to fail if the file wasn't found..
# Note: When calling images_at the rect is the format:
# (x, y, x + offset, y + offset)
import pygame
class spritesheet(object):
def __init__(self, filename):
pygame.display.set_caption("Inkscape Animation Preview")
try:
self.sheet = pygame.image.load(filename).convert()
except pygame.error, message:
print 'Unable to load spritesheet image:', filename
raise SystemExit, message
# Load a specific image from a specific rectangle
def image_at(self, rectangle, colorkey = None):
"Loads image from x,y,x+offset,y+offset"
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
# Load a whole bunch of images and return them as a list
def images_at(self, rects, colorkey = None):
"Loads multiple images, supply a list of coordinates"
return [self.image_at(rect, colorkey) for rect in rects]
# Load a whole strip of images
def load_strip(self, rect, image_count, colorkey = None):
"Loads a strip of images and returns them as a list"
tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3])
for x in range(image_count)]
return self.images_at(tups, colorkey)
|
mit
| -8,077,948,598,029,545,000
| 43.052632
| 64
| 0.639785
| false
| 3.711752
| false
| false
| false
|
gaurav/phylo2owl
|
tests/test_shacl.py
|
1
|
1141
|
#!/usr/bin/env python
"""test_shacl.py: Test generated ontologies against SHACL shapes."""
import os
import libshacl
import pytest
def test_execute_testShacl():
""" Can we execute testShacl at all? """
(rc, stdout, stderr) = libshacl.exec_testShacl(["--version"])
print stdout
print stderr
assert rc == 0
assert stdout.startswith("testShacl ")
def test_validate_shacl_against_nodeshape(path_owl):
""" Execute testShacl on every OWL file against NodeShape.ttl. """
path_shacl = path_owl[:-3] + "shacl.ttl"
libshacl.validateShacl("tests/shapes/NodeShape.ttl", path_owl)
def test_validate_shacl_against_custom_shacl(path_owl):
""" Execute testShacl on the corresponding shacl.ttl file, if one exists. """
path_shacl = path_owl[:-3] + "shacl.ttl"
if os.path.isfile(path_shacl):
print "Validating {0} against its custom SHACL file, {1}".format(path_owl, path_shacl)
libshacl.validateShacl(path_shacl, path_owl)
else:
pytest.skip("OWL file '{0}' doesn't have a custom SHACL file to test at '{1}'".format(
path_owl,
path_shacl
))
|
mit
| 1,449,120,676,683,983,000
| 30.694444
| 94
| 0.659071
| false
| 3.223164
| true
| false
| false
|
coala/coala
|
coalib/processes/communication/LogMessage.py
|
1
|
1633
|
from datetime import datetime
from coalib.output.printers.LOG_LEVEL import LOG_LEVEL
class LogMessage:
def __init__(self,
log_level,
*messages,
delimiter=' ',
timestamp=None):
if log_level not in LOG_LEVEL.reverse:
raise ValueError('log_level has to be a valid LOG_LEVEL.')
str_messages = [str(message) for message in messages]
self.message = str(delimiter).join(str_messages).rstrip()
if self.message == '':
raise ValueError('Empty log messages are not allowed.')
self.log_level = log_level
self.timestamp = datetime.today() if timestamp is None else timestamp
def __str__(self):
log_level = LOG_LEVEL.reverse.get(self.log_level, 'ERROR')
return f'[{log_level}] {self.message}'
def __eq__(self, other):
return (isinstance(other, LogMessage) and
other.log_level == self.log_level and
other.message == self.message)
def __ne__(self, other):
return not self.__eq__(other)
def to_string_dict(self):
"""
Makes a dictionary which has all keys and values as strings and
contains all the data that the LogMessage has.
:return: Dictionary with keys and values as string.
"""
retval = {}
retval['message'] = str(self.message)
retval['timestamp'] = ('' if self.timestamp is None
else self.timestamp.isoformat())
retval['log_level'] = str(LOG_LEVEL.reverse.get(self.log_level, ''))
return retval
|
agpl-3.0
| 8,628,628,486,573,394,000
| 31.66
| 77
| 0.578077
| false
| 4.343085
| false
| false
| false
|
Larhard/tsp
|
tsp/solver.py
|
1
|
2482
|
# Copyright (c) 2015, Bartlomiej Puget <larhard@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Bartlomiej Puget nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL BARTLOMIEJ PUGET BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import queue
import tsp.utils
def solve(vertices, tactic=None):
tactic = tactic or tsp.utils.euclid_distance
todo = queue.PriorityQueue()
todo.put((0, [vertices[0]], ))
min_cost = None
min_path = None
while todo.qsize() > 0:
expected_cost, path = todo.get()
cost = tsp.utils.path_length(path)
if len(vertices) == len(path):
total_cost = cost + tsp.utils.euclid_distance(path[-1], path[0])
if min_cost is None or min_cost > total_cost:
min_cost = total_cost
min_path = path
else:
for v in vertices:
if v not in path:
new_cost = cost + tactic(path[-1], v, vertices=vertices,
path=path)
if min_cost is None or new_cost < min_cost:
todo.put((new_cost, path + [v]))
return min_path
|
bsd-3-clause
| -8,710,665,200,263,397,000
| 39.688525
| 80
| 0.680097
| false
| 4.129784
| false
| false
| false
|
OmeGak/indico
|
indico/modules/attachments/models/folders_test.py
|
1
|
1890
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.attachments import AttachmentFolder
def test_update_principal(dummy_user, dummy_event):
folder = AttachmentFolder(object=dummy_event, is_default=True)
assert not folder.acl_entries
# not changing anything -> shouldn't be added to acl
entry = folder.update_principal(dummy_user)
assert entry is None
assert not folder.acl_entries
# adding user with read access -> new acl entry since the user isn't in there yet
entry = initial_entry = folder.update_principal(dummy_user, read_access=True)
assert folder.acl_entries == {entry}
# not changing anything on existing principal -> shouldn't modify acl
entry = folder.update_principal(dummy_user)
assert entry is initial_entry
assert folder.acl_entries == {entry}
# granting permission which is already present -> shouldn't modify acl
entry = folder.update_principal(dummy_user, read_access=True)
assert entry is initial_entry
assert folder.acl_entries == {entry}
# removing read access -> acl entry is removed
entry = folder.update_principal(dummy_user, read_access=False)
assert entry is None
assert not folder.acl_entries
def test_remove_principal(dummy_user, dummy_event):
folder = AttachmentFolder(object=dummy_event, is_default=True)
assert not folder.acl_entries
entry = folder.update_principal(dummy_user, read_access=True)
assert folder.acl_entries == {entry}
folder.remove_principal(dummy_user)
assert not folder.acl_entries
# doesn't do anything but must not fail either
folder.remove_principal(dummy_user)
assert not folder.acl_entries
|
mit
| 6,373,904,373,456,727,000
| 40.086957
| 85
| 0.732275
| false
| 3.962264
| false
| false
| false
|
hsfzxjy/wisecitymbc
|
common/rest/decorators.py
|
1
|
1087
|
from django.utils.decorators import method_decorator
from django.core.cache import cache
from django.http import HttpResponse
def cache_view_func(func):
def wrapper(request, *args, **kwargs):
key = request.method + request.META['PATH_INFO'] + request.META['QUERY_STRING']
content = cache.get(key)
if content is None:
response = func(request, *args, **kwargs)
cache.set(key, response.rendered_content, 30)
return response
else:
return HttpResponse(content)
return wrapper
def cache_view_method(func):
def wrapper(self, request, *args, **kwargs):
key = request.method + request.META['PATH_INFO'] + request.META['QUERY_STRING']
content = cache.get(key)
if content is None:
response = func(self, request, *args, **kwargs)
self.finalize_response(request, response, *args, **kwargs)
cache.set(key, response.rendered_content, 30)
return response
else:
return HttpResponse(content)
return wrapper
|
gpl-2.0
| 774,915,620,516,630,300
| 31
| 87
| 0.624655
| false
| 4.213178
| false
| false
| false
|
andrew-lundgren/gwpy
|
gwpy/signal/filter.py
|
1
|
3049
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2016)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extensions to `scipy.signal.signaltools`.
"""
from numpy import (asarray, reshape)
from scipy.signal.signaltools import (sosfilt, sosfilt_zi)
from scipy.signal._arraytools import (axis_slice, axis_reverse, odd_ext,
even_ext, const_ext)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__all__ = ['sosfiltfilt']
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=0):
x = asarray(x)
# `method` is "pad"
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
edge = sos.shape[0] * 6
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = sosfilt_zi(sos)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
zix0 = reshape(zi * x0, (sos.shape[0], 2))
# Forward filter
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zix0)
# Backward filter
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
ziy0 = reshape(zi * y0, (sos.shape[0], 2))
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=ziy0)
# Reverse y
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
|
gpl-3.0
| 2,693,603,008,467,231,000
| 31.43617
| 77
| 0.610692
| false
| 3.278495
| false
| false
| false
|
sharkykh/SickRage
|
sickbeard/providers/elitetorrent.py
|
1
|
7522
|
# coding=utf-8
# Author: CristianBB
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
import time
import traceback
import six
import sickbeard
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickbeard.common import cpu_presets
from sickrage.helper.common import try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class EliteTorrentProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, "EliteTorrent")
self.onlyspasearch = None
self.minseed = None
self.minleech = None
self.cache = tvcache.TVCache(self) # Only poll EliteTorrent every 20 minutes max
self.urls = {
'base_url': 'http://www.elitetorrent.net',
'search': 'http://www.elitetorrent.net/torrents.php'
}
self.url = self.urls['base_url']
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
lang_info = '' if not ep_obj or not ep_obj.show else ep_obj.show.lang
"""
Search query:
http://www.elitetorrent.net/torrents.php?cat=4&modo=listado&orden=fecha&pag=1&buscar=fringe
cat = 4 => Shows
modo = listado => display results mode
orden = fecha => order
buscar => Search show
pag = 1 => page number
"""
search_params = {
'cat': 4,
'modo': 'listado',
'orden': 'fecha',
'pag': 1,
'buscar': ''
}
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
# Only search if user conditions are true
if self.onlyspasearch and lang_info != 'es' and mode != 'RSS':
logger.log("Show info is not spanish, skipping provider search", logger.DEBUG)
continue
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_string = re.sub(r'S0*(\d*)E(\d*)', r'\1x\2', search_string)
search_params['buscar'] = search_string.strip() if mode != 'RSS' else ''
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
continue
try:
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', class_='fichas-listado')
torrent_rows = torrent_table('tr') if torrent_table else []
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for row in torrent_rows[1:]:
try:
download_url = self.urls['base_url'] + row.find('a')['href']
"""
Trick for accents for this provider.
- data = self.get_url(self.urls['search'], params=search_params, returns='text') -
returns latin1 coded text and this makes that the title used for the search
and the title retrieved from the parsed web page doesn't match so I get
"No needed episodes found during backlog search for: XXXX"
This is not the best solution but it works.
First encode latin1 and then decode utf8 to remains six.text_type
"""
row_title = row.find('a', class_='nombre')['title']
title = self._processTitle(row_title.encode('latin-1').decode('utf8'))
seeders = try_int(row.find('td', class_='semillas').get_text(strip=True))
leechers = try_int(row.find('td', class_='clientes').get_text(strip=True))
#seeders are not well reported. Set 1 in case of 0
seeders = max(1, seeders)
# Provider does not provide size
size = -1
except (AttributeError, TypeError, KeyError, ValueError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except Exception:
logger.log("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.WARNING)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
@staticmethod
def _processTitle(title):
# Quality, if no literal is defined it's HDTV
if 'calidad' not in title:
title += ' HDTV x264'
title = title.replace('(calidad baja)', 'HDTV x264')
title = title.replace('(Buena calidad)', '720p HDTV x264')
title = title.replace('(Alta calidad)', '720p HDTV x264')
title = title.replace('(calidad regular)', 'DVDrip x264')
title = title.replace('(calidad media)', 'DVDrip x264')
# Language, all results from this provider have spanish audio, we append it to title (avoid to download undesired torrents)
title += ' SPANISH AUDIO'
title += '-ELITETORRENT'
return title.strip()
provider = EliteTorrentProvider()
|
gpl-3.0
| -8,951,345,537,618,904,000
| 39.659459
| 149
| 0.539484
| false
| 4.342956
| false
| false
| false
|
mdklatt/serial-python
|
src/serial/core/sort.py
|
1
|
3047
|
""" Sorted input and output.
"""
from collections import deque
from operator import itemgetter
from .buffer import _ReaderBuffer
from .buffer import _WriterBuffer
__all__ = "SortReader", "SortWriter"
class _Sorter(object):
""" Abstract base class for SortReader and SortWriter.
"""
def __init__(self, key, group=None):
""" Initialize this object.
The key argument determines sort order and is either a single field
name, a sequence of names, or a key function that returns a key value.
The optional group argument is like the key argument but is used to
group records that are already partially sorted. Records will be sorted
within each group rather than as a single sequence. If the groups are
small relative to the total sequence length this can significantly
improve performance and memory usage.
"""
def keyfunc(key):
""" Create a key function. """
if not key or callable(key):
return key
if isinstance(key, str):
key = (key,)
return itemgetter(*key)
self._get_key = keyfunc(key)
self._get_group = keyfunc(group)
self._group = None
self._buffer = []
self._output = None # initialized by derived classes
return
def _queue(self, record):
""" Process each incoming record.
"""
if self._get_group:
group = self._get_group(record)
if group != self._group:
# This is a new group; process the previous group.
self._flush()
self._group = group
self._buffer.append(record)
return
def _flush(self):
""" Send sorted records to the output queue.
"""
if not self._buffer:
return
self._buffer.sort(key=self._get_key)
self._output = deque(self._buffer)
self._buffer = []
return
class SortReader(_Sorter, _ReaderBuffer):
""" Sort input from another reader.
"""
def __init__(self, reader, key, group=None):
""" Initialize this object.
"""
_Sorter.__init__(self, key, group)
_ReaderBuffer.__init__(self, reader)
return
def _uflow(self):
""" Handle an underflow condition.
This is called when the input reader is exhausted and there are no
records in the output queue.
"""
if not self._buffer:
# All data has been output.
raise StopIteration
self._flush()
return
class SortWriter(_Sorter, _WriterBuffer):
""" Sort output for another writer.
"""
def __init__(self, writer, key, group=None):
""" Initialize this object.
"""
_Sorter.__init__(self, key, group)
_WriterBuffer.__init__(self, writer)
return
|
mit
| -4,089,957,522,293,261,300
| 27.476636
| 80
| 0.54808
| false
| 4.775862
| false
| false
| false
|
jdeguire/pjcontroller
|
software/updatepage.py
|
1
|
4410
|
#! /usr/bin/env python
#
# Copyright 2011-2013 Jesse DeGuire
#
# This file is part of Projector Controller.
#
# Projector Controller is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Projector Controller is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with Projector Controller. If not, see <http://www.gnu.org/licenses/>
"""
File: updatepage.py
Author: Jesse DeGuire
Contains the UpdatePage class.
"""
import os
import hashlib
from PySide import QtCore
from PySide.QtCore import *
from PySide.QtGui import *
from connmanager import ConnectionManager
class UpdatePage(QDialog):
"""The page used for performing firmware updates to the device.
"""
# new signals have to be declared out here, something the docs aren't very explicit about
updatestartclicked = QtCore.Signal(str)
def __init__(self, connmgr):
QDialog.__init__(self)
# widgets in the dialog box
self.fileline = QLineEdit()
self.fileline.setPlaceholderText('Select hex file...')
self.browsebutton = QPushButton('...')
# Set the appropriate size manually since the "standard" size is too big.
# It seems that buttons get a 10 pixel pad on each side.
browsefw = self.browsebutton.fontMetrics().width(self.browsebutton.text())
if browsefw > 15:
self.browsebutton.setFixedWidth(browsefw + 20)
else:
self.browsebutton.setFixedWidth(35)
self.hashlabel = QLabel("MD5 Sum")
self.hashline = QLineEdit()
self.hashline.setPlaceholderText('No file selected')
self.hashline.setReadOnly(True)
self.startbutton = QPushButton('Start')
self.startbutton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.progress = QProgressBar()
self.progress.setRange(0, 100)
self.progress.setFixedWidth(100)
# so our file dialog remembers where we last were (default to home directory)
self.lasthexdir = os.path.expanduser('~')
# set up external connections
connmgr.addSignal(self.updatestartclicked, 'StartUpdate')
connmgr.addSlot(self.setUpdateProgress, 'UpdateProgressed')
connmgr.addSlot(self.endUpdate, 'UpdateCompleted')
# connect signals to internal slots
self.browsebutton.clicked.connect(self.browseForHexFile)
self.startbutton.clicked.connect(self.startNewUpdate)
# set up our control layout
self.vbox = QVBoxLayout(self)
self.filehbox = QHBoxLayout()
self.starthbox = QHBoxLayout()
self.vbox.setAlignment(Qt.AlignCenter)
self.vbox.addLayout(self.filehbox)
self.filehbox.addWidget(self.fileline)
self.filehbox.addWidget(self.browsebutton)
self.vbox.addLayout(self.starthbox)
self.starthbox.setAlignment(Qt.AlignLeft)
self.starthbox.addWidget(self.startbutton)
self.starthbox.addWidget(self.progress)
self.vbox.addSpacing(10)
self.vbox.addWidget(self.hashlabel)
self.vbox.addWidget(self.hashline)
@QtCore.Slot()
def browseForHexFile(self):
hexpath = QFileDialog.getOpenFileName(self, 'Select hex file', self.lasthexdir,
'Intel hex files (*.hex);;All Files (*)')
if hexpath[0] != '':
self.fileline.setText(hexpath[0])
self.lasthexdir = os.path.dirname(hexpath[0])
h = hashlib.md5()
with open(hexpath[0], 'r') as hexfile:
for line in hexfile:
h.update(line)
self.hashline.setText(h.hexdigest())
@QtCore.Slot()
def startNewUpdate(self):
self.progress.reset()
self.updatestartclicked.emit(self.fileline.text())
@QtCore.Slot(int)
def setUpdateProgress(self, prog):
self.progress.setValue(prog)
@QtCore.Slot(bool)
def endUpdate(self, result):
self.progress.reset()
|
gpl-3.0
| -2,336,973,511,080,071,700
| 33.186047
| 93
| 0.66644
| false
| 3.969397
| false
| false
| false
|
1844144/django-blog-zinnia
|
zinnia/tests/implementations/settings.py
|
1
|
1200
|
"""Settings for testing zinnia"""
import os
from zinnia.xmlrpc import ZINNIA_XMLRPC_METHODS
SITE_ID = 1
USE_TZ = True
STATIC_URL = '/static/'
SECRET_KEY = 'secret-key'
ROOT_URLCONF = 'zinnia.tests.implementions.urls.default'
LOCALE_PATHS = [os.path.join(os.path.dirname(__file__), 'locale')]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.SHA1PasswordHasher'
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.core.context_processors.request',
'zinnia.context_processors.version'
]
TEMPLATE_LOADERS = [
['django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader']
]
]
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.auth',
'django_comments',
'django_xmlrpc',
'mptt',
'tagging',
'south',
'zinnia'
]
ZINNIA_PAGINATION = 3
XMLRPC_METHODS = ZINNIA_XMLRPC_METHODS
|
bsd-3-clause
| 9,107,452,346,140,057,000
| 20.818182
| 66
| 0.694167
| false
| 3.252033
| false
| false
| false
|
benkirk/mpi_playground
|
mpi4py/tests/test_rma.py
|
1
|
15531
|
from mpi4py import MPI
import mpiunittest as unittest
import arrayimpl
import sys
pypy_lt_53 = (hasattr(sys, 'pypy_version_info') and
sys.pypy_version_info < (5, 3))
def mkzeros(n):
if pypy_lt_53:
return b'\0' * n
return bytearray(n)
def memzero(m):
try:
m[:] = 0
except IndexError: # cffi buffer
m[0:len(m)] = b'\0'*len(m)
class BaseTestRMA(object):
COMM = MPI.COMM_NULL
INFO = MPI.INFO_NULL
def setUp(self):
nbytes = 100*MPI.DOUBLE.size
try:
self.mpi_memory = MPI.Alloc_mem(nbytes)
self.memory = self.mpi_memory
memzero(self.memory)
except MPI.Exception:
import array
self.mpi_memory = None
self.memory = array.array('B',[0]*nbytes)
self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM)
def tearDown(self):
self.WIN.Free()
if self.mpi_memory:
MPI.Free_mem(self.mpi_memory)
def testPutGet(self):
typemap = MPI._typedict
group = self.WIN.Get_group()
size = group.Get_size()
group.Free()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
for count in range(10):
for rank in range(size):
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
#
self.WIN.Fence()
self.WIN.Put(sbuf.as_mpi(), rank)
self.WIN.Fence()
self.WIN.Get(rbuf.as_mpi_c(count), rank)
self.WIN.Fence()
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertNotEqual(rbuf[i], -1)
self.assertEqual(rbuf[-1], -1)
#
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
target = sbuf.itemsize
self.WIN.Fence()
self.WIN.Put(sbuf.as_mpi(), rank, target)
self.WIN.Fence()
self.WIN.Get(rbuf.as_mpi_c(count), rank, target)
self.WIN.Fence()
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertNotEqual(rbuf[i], -1)
self.assertEqual(rbuf[-1], -1)
#
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
datatype = typemap[typecode]
target = (sbuf.itemsize, count, datatype)
self.WIN.Fence()
self.WIN.Put(sbuf.as_mpi(), rank, target)
self.WIN.Fence()
self.WIN.Get(rbuf.as_mpi_c(count), rank, target)
self.WIN.Fence()
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertNotEqual(rbuf[i], -1)
self.assertEqual(rbuf[-1], -1)
def testAccumulate(self):
group = self.WIN.Get_group()
size = group.Get_size()
group.Free()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
for count in range(10):
for rank in range(size):
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
self.WIN.Fence()
self.WIN.Accumulate(sbuf.as_mpi(), rank, op=op)
self.WIN.Fence()
self.WIN.Get(rbuf.as_mpi_c(count), rank)
self.WIN.Fence()
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertNotEqual(rbuf[i], -1)
self.assertEqual(rbuf[-1], -1)
@unittest.skipMPI('openmpi(>=1.10,<1.11)')
def testGetAccumulate(self):
group = self.WIN.Get_group()
size = group.Get_size()
rank = group.Get_rank()
group.Free()
self.WIN.Fence()
obuf = MPI.Alloc_mem(1); memzero(obuf)
rbuf = MPI.Alloc_mem(1); memzero(rbuf)
try:
try:
self.WIN.Get_accumulate([obuf, 0, MPI.BYTE], [rbuf, 0, MPI.BYTE], rank)
finally:
MPI.Free_mem(obuf)
MPI.Free_mem(rbuf)
except NotImplementedError:
self.skipTest('mpi-win-get_accumulate')
self.WIN.Fence()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
for count in range(10):
for rank in range(size):
ones = array([1]*count, typecode)
sbuf = array(range(count), typecode)
rbuf = array(-1, typecode, count+1)
gbuf = array(-1, typecode, count+1)
for op in (MPI.SUM, MPI.PROD,
MPI.MAX, MPI.MIN,
MPI.REPLACE, MPI.NO_OP):
self.WIN.Lock(rank)
self.WIN.Put(ones.as_mpi(), rank)
self.WIN.Flush(rank)
self.WIN.Get_accumulate(sbuf.as_mpi(),
rbuf.as_mpi_c(count),
rank, op=op)
self.WIN.Flush(rank)
self.WIN.Get(gbuf.as_mpi_c(count), rank)
self.WIN.Flush(rank)
self.WIN.Unlock(rank)
#
for i in range(count):
self.assertEqual(sbuf[i], i)
self.assertEqual(rbuf[i], 1)
self.assertEqual(gbuf[i], op(1, i))
self.assertEqual(rbuf[-1], -1)
self.assertEqual(gbuf[-1], -1)
def testFetchAndOp(self):
group = self.WIN.Get_group()
size = group.Get_size()
rank = group.Get_rank()
group.Free()
self.WIN.Fence()
obuf = MPI.Alloc_mem(1); memzero(obuf)
rbuf = MPI.Alloc_mem(1); memzero(rbuf)
try:
try:
self.WIN.Fetch_and_op([obuf, 1, MPI.BYTE], [rbuf, 1, MPI.BYTE], rank)
finally:
MPI.Free_mem(obuf)
MPI.Free_mem(rbuf)
except NotImplementedError:
self.skipTest('mpi-win-fetch_and_op')
self.WIN.Fence()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
obuf = array(+1, typecode)
rbuf = array(-1, typecode, 2)
for op in (MPI.SUM, MPI.PROD,
MPI.MAX, MPI.MIN,
MPI.REPLACE, MPI.NO_OP):
for rank in range(size):
for disp in range(3):
self.WIN.Lock(rank)
self.WIN.Fetch_and_op(obuf.as_mpi(),
rbuf.as_mpi_c(1),
rank, disp, op=op)
self.WIN.Unlock(rank)
self.assertEqual(rbuf[1], -1)
def testCompareAndSwap(self):
group = self.WIN.Get_group()
size = group.Get_size()
rank = group.Get_rank()
group.Free()
self.WIN.Fence()
obuf = MPI.Alloc_mem(1); memzero(obuf)
cbuf = MPI.Alloc_mem(1); memzero(cbuf)
rbuf = MPI.Alloc_mem(1); memzero(rbuf)
try:
try:
self.WIN.Compare_and_swap([obuf, 1, MPI.BYTE],
[cbuf, 1, MPI.BYTE],
[rbuf, 1, MPI.BYTE],
rank, 0)
finally:
MPI.Free_mem(obuf)
MPI.Free_mem(cbuf)
MPI.Free_mem(rbuf)
except NotImplementedError:
self.skipTest('mpi-win-compare_and_swap')
self.WIN.Fence()
for array in arrayimpl.ArrayTypes:
for typecode in arrayimpl.TypeMap:
if typecode in 'fdg': continue
obuf = array(+1, typecode)
cbuf = array( 0, typecode)
rbuf = array(-1, typecode, 2)
for rank in range(size):
for disp in range(3):
self.WIN.Lock(rank)
self.WIN.Compare_and_swap(obuf.as_mpi(),
cbuf.as_mpi(),
rbuf.as_mpi_c(1),
rank, disp)
self.WIN.Unlock(rank)
self.assertEqual(rbuf[1], -1)
def testPutProcNull(self):
self.WIN.Fence()
self.WIN.Put(None, MPI.PROC_NULL, None)
self.WIN.Fence()
def testGetProcNull(self):
self.WIN.Fence()
self.WIN.Get(None, MPI.PROC_NULL, None)
self.WIN.Fence()
def testAccumulateProcNullReplace(self):
self.WIN.Fence()
zeros = mkzeros(8)
self.WIN.Fence()
self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE)
self.WIN.Fence()
self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.REPLACE)
self.WIN.Fence()
def testAccumulateProcNullSum(self):
self.WIN.Fence()
zeros = mkzeros(8)
self.WIN.Fence()
self.WIN.Accumulate([zeros, MPI.INT], MPI.PROC_NULL, None, MPI.SUM)
self.WIN.Fence()
self.WIN.Accumulate([None, MPI.INT], MPI.PROC_NULL, None, MPI.SUM)
self.WIN.Fence()
def testGetAccumulateProcNull(self):
obuf = [mkzeros(8), 0, MPI.INT]
rbuf = [mkzeros(8), 0, MPI.INT]
self.WIN.Fence()
try:
self.WIN.Get_accumulate(obuf, rbuf, MPI.PROC_NULL)
except NotImplementedError:
self.skipTest('mpi-win-get_accumulate')
self.WIN.Fence()
##def testFetchAndOpProcNull(self):
## obuf = cbuf = rbuf = None
## self.WIN.Fence()
## try:
## self.WIN.Fetch_and_op(obuf, rbuf, MPI.PROC_NULL, 0)
## except NotImplementedError:
## self.skipTest('mpi-win-fetch_and_op')
## self.WIN.Fence()
##def testCompareAndSwapProcNull(self):
## obuf = cbuf = rbuf = None
## self.WIN.Fence()
## try:
## self.WIN.Compare_and_swap(obuf, cbuf, rbuf, MPI.PROC_NULL, 0)
## except NotImplementedError:
## self.skipTest('mpi-win-compare_and_swap')
## self.WIN.Fence()
def testFence(self):
win = self.WIN
LMODE = [0, MPI.MODE_NOSTORE, MPI.MODE_NOPUT,
MPI.MODE_NOSTORE|MPI.MODE_NOPUT]
GMODE = [0, MPI.MODE_NOPRECEDE, MPI.MODE_NOSUCCEED]
win.Fence()
for lmode in LMODE:
for gmode in GMODE:
assertion = lmode | gmode
win.Fence(assertion)
win.Fence()
@unittest.skipMPI('openmpi(==1.8.1)')
def testFenceAll(self):
win = self.WIN
assertion = 0
modes = [0,
MPI.MODE_NOSTORE,
MPI.MODE_NOPUT,
MPI.MODE_NOPRECEDE,
MPI.MODE_NOSUCCEED]
win.Fence()
for mode in modes:
win.Fence(mode)
assertion |= mode
win.Fence(assertion)
win.Fence()
@unittest.skipMPI('openmpi(==1.8.6)')
def testStartComplete(self):
self.WIN.Start(MPI.GROUP_EMPTY)
self.WIN.Complete()
@unittest.skipMPI('openmpi(==1.8.6)')
def testPostWait(self):
self.WIN.Post(MPI.GROUP_EMPTY)
self.WIN.Wait()
@unittest.skipMPI('openmpi(==1.8.7)')
@unittest.skipMPI('openmpi(==1.8.6)')
def testStartCompletePostWait(self):
win = self.WIN
wingroup = win.Get_group()
size = wingroup.Get_size()
rank = wingroup.Get_rank()
if size < 2: return wingroup.Free()
if rank == 0:
group = wingroup.Excl([0])
win.Start(group)
win.Complete()
win.Post(group)
win.Wait()
group.Free()
else:
group = wingroup.Incl([0])
win.Post(group)
win.Wait()
win.Start(group)
win.Complete()
group.Free()
wingroup.Free()
@unittest.skipMPI('openmpi(==1.8.7)')
@unittest.skipMPI('openmpi(==1.8.6)')
def testStartCompletePostTest(self):
comm = self.COMM
win = self.WIN
wingroup = win.Get_group()
size = wingroup.Get_size()
rank = wingroup.Get_rank()
if size < 2: return wingroup.Free()
if rank == 0:
group = wingroup.Excl([0])
win.Start(group)
comm.Barrier()
win.Complete()
comm.Barrier()
group.Free()
else:
group = wingroup.Incl([0])
win.Post(group)
flag = win.Test()
self.assertFalse(flag)
comm.Barrier()
comm.Barrier()
flag = win.Test()
self.assertTrue(flag)
group.Free()
wingroup.Free()
@unittest.skipMPI('MPI(<3.0)')
def testSync(self):
win = self.WIN
comm = self.COMM
rank = comm.Get_rank()
win.Lock(rank)
win.Sync()
win.Unlock(rank)
comm.Barrier()
@unittest.skipMPI('MPI(<3.0)')
def testFlush(self):
win = self.WIN
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
#
for i in range(size):
win.Lock(i)
win.Flush(i)
win.Unlock(i)
comm.Barrier()
for i in range(size):
if i == rank:
win.Lock_all()
win.Flush_all()
win.Unlock_all()
comm.Barrier()
#
for i in range(size):
win.Lock(i)
win.Flush_local(i)
win.Unlock(i)
comm.Barrier()
for i in range(size):
if i == rank:
win.Lock_all()
win.Flush_local_all()
win.Unlock_all()
comm.Barrier()
class TestRMASelf(BaseTestRMA, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestRMAWorld(BaseTestRMA, unittest.TestCase):
COMM = MPI.COMM_WORLD
SpectrumMPI = MPI.get_vendor()[0] == 'Spectrum MPI'
try:
if SpectrumMPI: raise NotImplementedError
MPI.Win.Create(None, 1, MPI.INFO_NULL, MPI.COMM_SELF).Free()
except (NotImplementedError, MPI.Exception):
unittest.disable(BaseTestRMA, 'mpi-rma')
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| 4,210,494,767,471,415,000
| 34.86836
| 87
| 0.463203
| false
| 3.88275
| true
| false
| false
|
dragonrider7225/PythonGames
|
main.py
|
1
|
2439
|
#!py -3
from solitaire import *
import sys
def main():
opt = int(input("Which game would you like to play?\n\t0: Quit program\n" +
"\t1: Klondike\n"))
if not opt:
sys.exit(0)
if opt == 1:
game = klondike
args = []
game.set_up(*args)
game.show_board()
while True:
if game.get_result() == game.VICTORY:
print("YOU WIN!")
return
if game.get_result() == game.LOSE:
print("YOU LOSE!")
return
m = input("Move: ").split()
if game == klondike:
if m[0][0] == "s":
game.draw()
game.show_board()
elif m[0][0] == "m":
if m[1][0] == "w":
if m[2][0] == "f":
game.m()
elif m[2][0] == "l":
game.m(m[1], int(m[2][1:]))
else:
print_valid_moves(game)
continue
elif m[1][0] == "l":
if m[2][0] == "f":
game.m(int(m[1][1:]), "f")
elif m[2][0] == "l":
if len(m) == 3:
game.m(int(m[1][1:]), int(m[2][1:]))
else:
game.m(int(m[1][1:]), int(m[3]), int(m[2][1:]))
else:
print_valid_moves(game)
elif m[1][0] == "f":
if m[2][0] == "l":
game.m("f", int(m[1][1:]), int(m[2][1:]))
else:
print_valid_moves(game)
else:
print_valid_moves(game)
elif m[0][0] == "q":
sys.exit(0)
else:
print_valid_moves(game)
def print_valid_moves(game):
game.show_board()
print("Please enter a valid move:")
if game == klondike:
print("s[tock]")
print("m[ove] w[aste] f[oundation]")
print("m[ove] w[aste] lN")
print("m[ove] lN f[oundation]")
print("m[ove] lN1 lN2 C")
print("m[ove] fM lN")
print("q[uit]")
print("0 <= N* <= 6, 0 <= M <= 3, C is the number of cards", end=" ")
print("that are below the card to move from one layout", end=" ")
print("pile to another.")
if __name__ == "__main__":
main()
|
apache-2.0
| -2,390,675,310,736,939,000
| 31.959459
| 79
| 0.373514
| false
| 3.454674
| false
| false
| false
|
jaredhasenklein/the-blue-alliance
|
tbans/models/notifications/event_schedule.py
|
1
|
2021
|
import calendar
from tbans.models.notifications.notification import Notification
class EventScheduleNotification(Notification):
def __init__(self, event, next_match=None):
self.event = event
self._event_feed = event.key_name
self._district_feed = event.event_district_abbrev
if not next_match:
from helpers.match_helper import MatchHelper
upcoming = MatchHelper.upcomingMatches(event.matches, 1)
self.next_match = upcoming[0] if upcoming and len(upcoming) > 0 else None
else:
self.next_match = next_match
@classmethod
def _type(cls):
from consts.notification_type import NotificationType
return NotificationType.SCHEDULE_UPDATED
@property
def fcm_notification(self):
body = 'The {} match schedule has been updated.'.format(self.event.normalized_name)
if self.next_match and self.next_match.time:
time = self.next_match.time.strftime("%H:%M")
body += ' The next match starts at {}.'.format(time)
from firebase_admin import messaging
return messaging.Notification(
title='{} Schedule Updated'.format(self.event.event_short.upper()),
body=body
)
@property
def platform_config(self):
from tbans.consts.fcm.platform_priority import PlatformPriority
from tbans.models.fcm.platform_config import PlatformConfig
return PlatformConfig(priority=PlatformPriority.HIGH)
@property
def data_payload(self):
payload = {
'event_key': self.event.key_name
}
if self.next_match and self.next_match.time:
payload['first_match_time'] = calendar.timegm(self.next_match.time.utctimetuple())
else:
payload['first_match_time'] = None
return payload
@property
def webhook_message_data(self):
payload = self.data_payload
payload['event_name'] = self.event.name
return payload
|
mit
| 268,706,569,295,279,700
| 32.131148
| 94
| 0.642751
| false
| 4.228033
| false
| false
| false
|
SF-Zhou/quite
|
quite/deferred_function.py
|
1
|
3424
|
from . import deferred_define
from . import Widget, QWidget
from . import QMainWindow, QDockWidget
from . import QHBoxLayout, SquareLayout
from . import WidgetController
from . import QSize, QSizeF, QPoint
from . import QPicture, QPixmap
from . import QPrinter, QPainter
@deferred_define
def set_central_widget(self: Widget, widget, del_pre_widget=True):
if isinstance(widget, WidgetController):
widget = widget.w
if not isinstance(widget, QWidget):
raise TypeError('Only Support Widget or WidgetController')
widget.setVisible(True) # ensure widget is visible
if hasattr(self, 'center_widget'):
self.layout().removeWidget(self.center_widget)
self.center_widget.setVisible(False) # hide pre widget, and widget can reuse
if del_pre_widget:
self.center_widget.deleteLater()
if isinstance(self, QMainWindow):
self.setCentralWidget(widget)
elif isinstance(self, QDockWidget):
self.setWidget(widget)
elif hasattr(self, 'center_widget'):
self.layout().addWidget(widget)
else:
layout = QHBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(widget)
self.setLayout(layout)
self.center_widget = widget
@deferred_define
def set_square_widget(self: Widget, widget: Widget, spacing=0):
if isinstance(widget, WidgetController):
widget = widget.w
if not isinstance(widget, QWidget):
raise TypeError('Only Support Widget or WidgetController')
layout = SquareLayout()
layout.setSpacing(spacing)
layout.addWidget(widget)
self.setLayout(layout)
self.center_widget = widget
@deferred_define
def set_layout_spacing(self: Widget, spacing):
layout = self.layout()
assert isinstance(layout, SquareLayout)
layout.setSpacing(spacing)
layout.update()
@deferred_define
def export_to_pdf(self: Widget, filename: str, export_size=QSize(1060, 730)):
assert isinstance(export_size, QSize)
w, h = self.size
if w > h:
self.resize(export_size.width(), export_size.height())
else:
self.resize(export_size.height(), export_size.width())
p = QPicture()
painter = QPainter(p)
self.render(painter, QPoint(0, 0))
painter.end()
printer = QPrinter()
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(filename)
if w > h:
printer.setOrientation(QPrinter.Landscape)
if export_size.width() != 1060 or export_size.height() != 730:
printer.setPageSize(QPrinter.Custom)
printer.setPaperSize(QSizeF(self.size[1] * 0.8 + 20, self.size[0] * 0.8 + 20), QPrinter.Point)
painter = QPainter()
ok = painter.begin(printer)
if ok:
painter.drawPicture(0, 0, p)
ok = painter.end()
self.resize(w, h)
return ok
@deferred_define
def export_to_bitmap(self: Widget, filename: str, export_size=QSize(1060, 730)):
if filename.endswith('pdf'):
return export_to_pdf(self, filename)
assert isinstance(export_size, QSize)
w, h = self.size
if w > h:
self.resize(export_size.width(), export_size.height())
else:
self.resize(export_size.height(), export_size.width())
p = QPixmap(*self.size)
painter = QPainter(p)
self.render(painter, QPoint(0, 0))
painter.end()
ok = p.save(filename)
self.resize(w, h)
return ok
|
mit
| 8,475,001,357,784,681,000
| 29.571429
| 102
| 0.669977
| false
| 3.58534
| false
| false
| false
|
strongrandom/awesome-fedora
|
generate.py
|
1
|
6196
|
# https://github.com/strongrandom/awesome-fedora
#
# This is a quick-and-dirty tool to convert packages.yml into README.md. It is not pretty. It is not efficient. It is
# nowhere near my best work... but it works.
#
# Use Python 3. Install PyYAML.
#
import operator
import textwrap
import dnf
import yaml
def write_packages(file, dictionary: dict):
if len(dictionary) > 0:
packages = "dnf install "
for wp_key_package_name in sorted(dictionary, key=operator.itemgetter(0)):
packages += wp_key_package_name + " "
file.write("\n```\n")
wp_wrap = textwrap.TextWrapper(break_long_words=False, break_on_hyphens=False, width=70,
initial_indent='', subsequent_indent=' ')
wp_lines = wp_wrap.wrap(packages)
count = 0
for wp_line in wp_lines:
file.write(wp_line)
if len(wp_lines) > 1 and count < len(wp_lines) - 1:
file.write(" \\\n")
else:
file.write("\n")
count += 1
file.write("```\n\n")
file.write("[back to index](#index)\n\n")
def parse_yaml(yaml_data_sorted, file_output):
for key_category, value_dict_packages in yaml_data_sorted:
if "__index" in key_category:
# *__index indicates where to put the index of groups
build_index(file_output, yaml_data_sorted)
elif "__" in key_category:
# __ in the key indicates plain text to output
file_output.write("{}\n\n".format(value_dict_packages))
elif type(value_dict_packages) is dict:
# otherwise, if a dict, process the group
file_output.write("***\n## {}\n".format(key_category))
build_group(file_output, value_dict_packages)
def build_group(file_output, value_dict_packages):
dict_group = dict()
dict_packages_sorted = sorted(value_dict_packages.items(), key=operator.itemgetter(0))
for key_package_name, value_package_attributes in dict_packages_sorted:
if "__" in key_package_name:
file_output.write("\n {}\n\n".format(value_package_attributes))
else:
dnf_result = dnf_query_available.filter(name=key_package_name)
if len(dnf_result) > 0:
# Ignore multiple results, just take the first (dnf.result[0])
build_package_entry(dict_group, dnf_result[0], file_output, key_package_name, value_package_attributes)
else:
print(" Package not found in DNF:", key_package_name)
write_packages(file_output, dict_group)
def build_package_entry(dict_group, dnf_package, file_output, key_package_name, value_package_attributes):
description = dnf_package.description
url = dnf_package.url
# Look up the repository and mark those from external repos
repository = dnf_package.repo.id
if repository == "fedora":
repository = ""
elif repository == "updates":
repository = ""
else:
repository = "{} ".format(repository)
if type(value_package_attributes) is dict:
if type(value_package_attributes.get("description")) is str:
description = value_package_attributes.get("description")
if type(value_package_attributes.get("url")) is str:
url = value_package_attributes.get("url")
if value_package_attributes.get("essential"):
dict_essential[key_package_name] = ""
# Hack in essential package checkmark
repository = "\u2713 " + repository
file_output.write(" * **{}[{}]({}): {}**\n".format(repository, key_package_name, url,
dnf_package.summary))
# Process the description field. Ugly, but works.
description = description.strip()
description = description.replace("\n", " ")
while " " in description:
description = description.replace(' ', ' ')
description = description.replace('`', "'")
description = description.replace('*', "'")
description = description.replace('>', ">")
description = description.replace('<', "<")
# Wrap and write out the description
wrap = textwrap.TextWrapper(width=70, max_lines=10,
initial_indent=' ', subsequent_indent=' ')
lines = wrap.wrap(description)
file_output.write("\n")
for line in lines:
file_output.write(line)
file_output.write("\n")
file_output.write("\n")
# Add to the dictionaries
dict_all[key_package_name] = ""
dict_group[key_package_name] = ""
def build_index(file_output, yaml_data_sorted):
# Hack to build an index
file_output.write("\n")
file_output.write("## Index\n")
for index_category, _ in yaml_data_sorted:
if "_" not in index_category:
index_link = str.lower(index_category)
index_link = index_link.replace(' ', '-')
index_link = index_link.replace('/', '')
index_link = index_link.replace('+', '')
file_output.write("- [{}](#{})\n".format(index_category, index_link))
file_output.write("\n")
if __name__ == '__main__':
print("Loading YAML ...")
stream = open('packages.yml', 'r')
yaml_data = yaml.load(stream)
file_output = open('README.md', 'w')
dict_all = dict()
dict_essential = dict()
with dnf.Base() as dnf_base:
print("Setting up DNF ...")
# DNF boilerplate, see http://dnf.readthedocs.io/en/latest/use_cases.html#id3
dnf_base.read_all_repos()
dnf_base.fill_sack()
dnf_query = dnf_base.sack.query()
dnf_query_available = dnf_query.available()
yaml_data_sorted = sorted(yaml_data.items(), key=operator.itemgetter(0))
print("Parsing ...")
parse_yaml(yaml_data_sorted, file_output)
file_output.write("# Everything #\n")
file_output.write("\nAll {} packages:\n".format(len(dict_all)))
write_packages(file_output, dict_all)
if len(dict_essential) > 0:
file_output.write("# Essential #\n")
write_packages(file_output, dict_essential)
file_output.close()
|
mit
| 2,793,258,951,590,367,000
| 33.614525
| 119
| 0.596191
| false
| 3.764277
| false
| false
| false
|
CiscoDevNet/netconf-examples
|
netconf-102/get_config_csr1000V.py
|
1
|
1234
|
#!/usr/bin/python
#
# Get configured interfaces using Netconf
#
# darien@sdnessentials.com
#
from ncclient import manager
import sys
import xml.dom.minidom
# the variables below assume the user is requesting access
# to a IOS-XE device running in the DevNet Always On SandBox
# use the IP address or hostname of your IOS-XE device
HOST = 'ios-xe-mgmt.cisco.com'
# use the NETCONF port for your IOS-XE device
PORT = 10000
# use the user credentials for your IOS-XE device
USER = 'root'
PASS = 'C!sc0123'
# XML file to open
FILE = 'get_interfaces.xml'
# create a main() method
def get_configured_interfaces():
"""Main method that retrieves the interfaces from config via NETCONF."""
with manager.connect(host=HOST, port=PORT, username=USER, password=PASS,
hostkey_verify=False, device_params={'name': 'default'},
allow_agent=False, look_for_keys=False) as m:
with open(FILE) as f:
return(m.get_config('running', f.read()))
def main():
"""Simple main method calling our function."""
interfaces = get_configured_interfaces()
print(xml.dom.minidom.parseString(interfaces.xml).toprettyxml())
if __name__ == '__main__':
sys.exit(main())
|
apache-2.0
| 4,805,675,626,811,635,000
| 27.045455
| 81
| 0.679092
| false
| 3.587209
| false
| false
| false
|
redixin/keyserver-ng
|
keyserver/key.py
|
1
|
1886
|
from datetime import datetime
import gpgme
from io import BytesIO
class PublicKey:
def __init__(self, fp):
if isinstance(fp, str):
fp = BytesIO(bytes(fp, "ascii"))
elif isinstance(fp, bytes):
fp = BytesIO(fp)
self.ctx = gpgme.Context()
self.ctx.armor = False
self._import = self.ctx.import_(fp)
self.fpr = self._import.imports[0][0]
self._key = self.ctx.get_key(self.fpr)
def __repr__(self):
return "<PublicKey: %s <%s>>: %s" % (self.name, self.email, self.fpr)
def encrypt(self, data):
data = BytesIO(bytes(data, "ascii"))
ciphertext = BytesIO()
self.ctx.encrypt([self._key], gpgme.ENCRYPT_ALWAYS_TRUST,
data, ciphertext)
ciphertext.seek(0)
return ciphertext.read()
def export(self, fp):
self.ctx.export(self.fpr, fp)
@property
def key(self):
fp = BytesIO()
self.export(fp)
fp.seek(0)
return fp.read()
@property
def expire(self):
expire = 0
for sk in self._key.subkeys:
if sk.expires > expire:
expire = sk.expires
if expire:
return datetime.fromtimestamp(expire)
@property
def ids(self):
return [sk.keyid for sk in self._key.subkeys]
@property
def name(self):
return self._key.uids[0].name
@property
def email(self):
return self._key.uids[0].email
class PrivateKey:
def __init__(self, data):
self.ctx = gpgme.Context()
self.ctx.armor = False
self._import = self.ctx.import_(BytesIO(bytes(data, "ascii")))
def decrypt(self, data):
plaintext = BytesIO()
ciphertext = BytesIO(data)
self.ctx.decrypt(ciphertext, plaintext)
plaintext.seek(0)
return plaintext.read()
|
apache-2.0
| 8,411,964,592,434,458,000
| 24.486486
| 77
| 0.560445
| false
| 3.779559
| false
| false
| false
|
maxis1314/pyutils
|
web/views/user.py
|
1
|
1484
|
# coding: utf-8
from flask import Flask,request,session,g,redirect,url_for,Blueprint
from flask import abort,render_template,flash
from helpers import getAvatar
import config
#from .base import BaseHandler
import base
config = config.rec()
user = Blueprint('user', __name__)
import pika
#class LoginHandler(BaseHandler):
@user.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
if base.isAdmin():
return redirect("/")
else:
return render_template("login.html",getAvatar=getAvatar)
username = request.form['username']
password = request.form['password']
#connection = pika.BlockingConnection(pika.ConnectionParameters(
# host='localhost'))
#channel = connection.channel()
#channel.queue_declare(queue='hello')
#channel.basic_publish(exchange='',
# routing_key='hello',
# body=u'u:'+username+' p:'+password)
#print(" [x] Sent 'RABBITQUEUE'")
#connection.close()
if base.userAuth(username, password):
flash('You were successfully logged in')
base.currentUserSet(username)
return redirect("/posts/")
else:
flash('User name or password error','error')
return redirect("/user/login")
#class LogoutHandler(BaseHandler):
@user.route('/logout')
def logout():
session.pop('user',None)
flash('You were successfully logged out')
return redirect('/user/login')
|
apache-2.0
| -7,283,646,547,762,287,000
| 29.285714
| 69
| 0.644879
| false
| 3.957333
| false
| false
| false
|
respawner/peering-manager
|
peering/migrations/0050_auto_20190806_2159.py
|
1
|
3436
|
# Generated by Django 2.2.4 on 2019-08-06 19:59
import taggit.managers
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("taggit", "0003_taggeditem_add_unique_index"),
("peering", "0049_auto_20190731_1946"),
]
operations = [
migrations.AddField(
model_name="autonomoussystem",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="bgpgroup",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="community",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="directpeeringsession",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="internetexchange",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="internetexchangepeeringsession",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="router",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="routingpolicy",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="template",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
]
|
apache-2.0
| 1,025,210,879,864,040,000
| 31.72381
| 60
| 0.489814
| false
| 4.655827
| false
| false
| false
|
lquirosd/TFM
|
ILA/code/trainGMM.py
|
1
|
5034
|
from __future__ import division
import sys, argparse #--- To handle console arguments
import numpy as np #--- To handle math processing
import scipy.ndimage as ndi #--- To handle image processing
from scipy import misc
import glob, os #--- To handle OS callbacks
import utils
from sklearn import mixture
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
try:
import cPickle as pickle
except:
import pickle
def main():
"""
#---------------------------------------------------------------------------#
#--- main ---#
#---------------------------------------------------------------------------#
Description:
main module
Inputs:
#--- To be updated
Outputs:
#--- To be updated
Author:
Quiros Diaz, Lorenzo
Date:
Jun/20/2016
#------------------------------------------------------------------------------#
"""
#--- processing arguments
parser = argparse.ArgumentParser(description='K-NN classifier')
parser.add_argument('-trDir', required=True, action="store", help="Pointer to Training images folder")
parser.add_argument('-o', '--out', required=True, default=".", action="store", help="Folder to save Out files")
parser.add_argument('-nU', '--nUpper', type=int, default=2, action="store", help="Number of Mixtures for Upper Model [Default=2]")
parser.add_argument('-nB', '--nBottom', type=int, default=3, action="store", help="Number of Mixtures for Bottom Model [Default=3]")
parser.add_argument('-s', '--statistics', action="store_true", help="Print some statistics about script execution")
parser.add_argument('-p', '--plot', action="store_true", help="Show plot on window")
parser.add_argument('--debug', action="store_true", help="Run script on Debugging mode")
args = parser.parse_args()
if (args.debug): print args
if (args.statistics): init = time.clock()
#--- Validate arguments
if (not os.path.isdir(args.trDir)):
print "Folder: %s does not exists\n" %args.trDir
parser.print_help()
sys.exit(2)
if (not os.path.isdir(args.out)):
print "Folder: %s does not exists\n" %args.out
parser.print_help()
sys.exit(2)
#--- Read images
allImgs = glob.glob(args.trDir + "/*.jpg")
nImgs = len(allImgs)
if nImgs <= 0:
print "Folder: %s contains no images\n" %args.trDir
parser.print_help()
sys.exit(2)
if (args.statistics): GPinit = time.clock()
#--- keep all image data, just to check memory usage
#--- TODO: remove unnecessary data on each iteration
imgData = np.empty(nImgs, dtype=object)
#--- Array of Upper corners
U = np.zeros((nImgs, 2), dtype=np.int)
#--- Array of Bottom corners
B = np.zeros((nImgs, 2), dtype=np.int)
#--- get U & B corners from all TR dataSet
for i, file in enumerate(allImgs):
print "Working on {0:}".format(file)
imgData[i] = utils.imgPage(file)
#imgData[i].readImage()
imgData[i].parseXML()
U[i] = imgData[i].getUpperPoints()
B[i] = imgData[i].getBottomPoints()
if (args.statistics): print 'Getting Data Points: {0:.5f} seconds'.format(time.clock() - GPinit)
if (args.statistics): TGinit = time.clock()
#--- Train GMM Models
#--- Upper GMM
uGMM = mixture.GMM(n_components = args.nUpper)
uGMM.fit(U)
#--- Bottom GMM
bGMM = mixture.GMM(n_components = args.nBottom, covariance_type='diag')
bGMM.fit(B)
GMM_models = {'Upper': uGMM, 'Bottom': bGMM}
#--- Save Models to file
#--- Out File Name
outFile = args.out + 'GMM_tr' + str(nImgs) + '_u' + str(args.nUpper) + '_b' + str(args.nBottom)
fh = open(outFile + '.model', 'w')
pickle.dump(GMM_models, fh)
fh.close()
if (args.statistics): print 'Training GMM: {0:.5f} seconds'.format(time.clock() - TGinit)
#--- Plot Mixtures and Data
m=9
imgData[m].readImage(full=True)
fig, axs = plt.subplots(1,1)
axs.scatter(U[:, 0], U[:, 1], .8, color='red')
axs.scatter(B[:, 0], B[:, 1], .8, color='blue')
x = np.linspace(0, imgData[m].imgShape[1])
y = np.linspace(0, imgData[m].imgShape[0])
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
uZ = -uGMM.score_samples(XX)[0]
uZ = uZ.reshape(X.shape)
bZ = -bGMM.score_samples(XX)[0]
bZ = bZ.reshape(X.shape)
CSu = axs.contour(X, Y, uZ, norm=LogNorm(vmin=np.min(uZ), vmax=np.max(uZ)),
levels=np.logspace(0, 3, 20))
CSb = axs.contour(X, Y, bZ, norm=LogNorm(vmin=np.min(bZ), vmax=np.max(bZ)),
levels=np.logspace(0, 3, 20))
#axs.clabel(CS, inline=1, fontsize=10)
CB = plt.colorbar(CSu, ax=axs, extend='both')
axs.imshow(imgData[m].img, cmap='gray')
plt.axis('off')
fig.savefig(outFile + '.png', bbox_inches='tight')
if (args.statistics): print 'Total Time: {0:.5f} seconds'.format(time.clock() - init)
if (args.plot): plt.show()
if __name__ == '__main__':
main()
|
apache-2.0
| -933,522,224,188,337,200
| 36.288889
| 135
| 0.591577
| false
| 3.212508
| false
| false
| false
|
offlinehacker/flumotion
|
flumotion/test/test_wizard_save.py
|
1
|
43842
|
# -*- Mode: Python; test-case-name: flumotion.test.test_wizard_models -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import unittest
from kiwi.python import Settable
from flumotion.admin.assistant.configurationwriter import ConfigurationWriter
from flumotion.admin.assistant.models import Component, Plug, Porter, \
AudioProducer, VideoProducer, AudioEncoder, VideoEncoder, HTTPServer
from flumotion.admin.assistant.save import AssistantSaver
from flumotion.common import testsuite
from flumotion.common.xmlwriter import XMLWriter
from flumotion.configure import configure
from flumotion.component.producers.firewire.wizard_gtk import FireWireProducer
from flumotion.component.consumers.httpstreamer.wizard_gtk import HTTPStreamer
from flumotion.component.encoders.vorbis.wizard_gtk import VorbisAudioEncoder
from flumotion.component.encoders.theora.wizard_gtk import TheoraVideoEncoder
from flumotion.component.producers.videotest.wizard_gtk import \
TestVideoProducer
from flumotion.component.producers.audiotest.wizard_gtk import \
TestAudioProducer
from flumotion.admin.gtk.overlaystep import Overlay
class TestXMLWriter(testsuite.TestCase):
def testEmpty(self):
writer = ConfigurationWriter('', [], [])
testsuite.diffStrings(
XMLWriter.encoding + \
("<planet>\n"
"</planet>\n"),
writer.getXML())
def testFlowComponent(self):
c = Component()
c.name = 'name'
c.componentType = 'streamer'
c.worker = 'worker'
writer = ConfigurationWriter('flow', [c], [])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <flow name="flow">\n'
' <component name="name"\n'
' type="streamer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
def testAtmosphereComponent(self):
c = Component()
c.name = 'name'
c.componentType = 'streamer'
c.worker = 'worker'
c.properties.foo = 'bar'
writer = ConfigurationWriter('', [], [c])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="name"\n'
' type="streamer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="foo">bar</property>\n'
' </component>\n'
' </atmosphere>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
def testComponentWithPlug(self):
c = Component()
c.name = 'name'
c.componentType = 'streamer'
c.worker = 'worker'
plug = Plug()
plug.plugType = 'plug-type'
plug.properties.foo = 'bar'
c.plugs.append(plug)
writer = ConfigurationWriter('flow', [c], [])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <flow name="flow">\n'
' <component name="name"\n'
' type="streamer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <plugs>\n'
' <plug type="plug-type">\n'
' \n'
' <property name="foo">bar</property>\n'
' </plug>\n'
' </plugs>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
def testComponentWithFeeders(self):
c1 = Component()
c1.name = 'name'
c1.componentType = 'first'
c1.worker = 'worker'
c2 = Component()
c2.name = 'name'
c2.componentType = 'second'
c2.worker = 'worker'
c2.link(c1)
writer = ConfigurationWriter('flow', [c1, c2], [])
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <flow name="flow">\n'
' <component name="name"\n'
' type="first"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>name</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="name"\n'
' type="second"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
writer.getXML())
class TestWizardSave(testsuite.TestCase):
def _createAudioProducer(self, componentType='audio-producer',
worker='audio-producer-worker'):
audioProducer = AudioProducer()
audioProducer.componentType = componentType
audioProducer.worker = worker
return audioProducer
def _createVideoProducer(self, componentType='video-producer',
worker='video-producer-worker'):
videoProducer = VideoProducer()
videoProducer.componentType = componentType
videoProducer.worker = worker
videoProducer.properties.width = 640
videoProducer.properties.height = 480
return videoProducer
def _createVideoOverlay(self, videoProducer):
overlay = Overlay(videoProducer)
overlay.worker = 'overlay-worker'
return overlay
def _createAudioEncoder(self):
audioEncoder = AudioEncoder()
audioEncoder.componentType = 'audio-encoder'
audioEncoder.worker = 'audio-encoder-worker'
return audioEncoder
def _createVideoEncoder(self):
videoEncoder = VideoEncoder()
videoEncoder.componentType = 'video-encoder'
videoEncoder.worker = 'video-encoder-worker'
return videoEncoder
def _createPorter(self):
return Porter('porter-worker',
port=8080,
username='username',
password='password',
socketPath='flu-XXXX.socket')
def _createHTTPStreamer(self):
streamer = HTTPStreamer()
streamer.worker = 'streamer-worker'
return streamer
def _createFirewireProducer(self):
producer = FireWireProducer()
producer.worker = 'firewire-video-producer-worker'
producer.properties.width = 640
producer.properties.height = 480
return producer
def testDefaultStream(self):
save = AssistantSaver()
save.setFlowName('flow')
save.setAudioProducer(self._createAudioProducer())
videoProducer = self._createVideoProducer()
save.setVideoProducer(videoProducer)
save.setVideoOverlay(self._createVideoOverlay(videoProducer))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
server = HTTPServer('server-worker', '/mount/')
save.addServerConsumer(server, 'audio-video')
save.setUseCCLicense(True)
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-audio-video"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount/</property>\n'
' </component>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="audio-producer"\n'
' project="flumotion"\n'
' worker="audio-producer-worker"\n'
' version="%(version)s">\n'
' </component>\n'
' <component name="producer-video"\n'
' type="video-producer"\n'
' project="flumotion"\n'
' worker="video-producer-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="height">480</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-video</feed>\n'
' </eater>\n'
' \n'
' <property name="cc-logo">True</property>\n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testMultiFeedProducer(self):
save = AssistantSaver()
save.setFlowName('flow')
save.setAudioProducer(self._createAudioProducer(
worker='both-producer-worker',
componentType='both-producer'))
save.setVideoProducer(self._createVideoProducer(
componentType='both-producer',
worker='both-producer-worker'))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio-video"\n'
' type="both-producer"\n'
' project="flumotion"\n'
' worker="both-producer-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="height">480</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testOggStream(self):
save = AssistantSaver()
save.setFlowName('flow')
audioProducer = TestAudioProducer()
audioProducer.worker = 'worker'
save.setAudioProducer(audioProducer)
videoProducer = TestVideoProducer()
videoProducer.worker = 'worker'
videoProducer.properties.width = 320
videoProducer.properties.height = 240
save.setVideoProducer(videoProducer)
save.setVideoOverlay(self._createVideoOverlay(videoProducer))
audioEncoder = VorbisAudioEncoder()
audioEncoder.worker = 'worker'
save.setAudioEncoder(audioEncoder)
videoEncoder = TheoraVideoEncoder()
videoEncoder.worker = 'worker'
save.setVideoEncoder(videoEncoder)
save.setMuxer('ogg-muxer', 'muxer-worker')
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="audiotest-producer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="samplerate">44100</property>\n'
' </component>\n'
' <component name="producer-video"\n'
' type="videotest-producer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="height">240</property>\n'
' <property name="pattern">0</property>\n'
' <property name="width">320</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-video</feed>\n'
' </eater>\n'
' \n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' <property name="xiph-logo">True</property>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="theora-encoder"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' \n'
' <property name="bitrate">400000</property>\n'
' <property name="keyframe-maxdistance">50</property>\n'
' <property name="speed">3</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="vorbis-encoder"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio</feed>\n'
' </eater>\n'
' \n'
' <property name="bitrate">64000</property>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="ogg-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testAudioOnlyStream(self):
save = AssistantSaver()
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
save.setFlowName('flow')
audioProducer = TestAudioProducer()
audioProducer.worker = 'worker'
save.setAudioProducer(audioProducer)
audioEncoder = VorbisAudioEncoder()
audioEncoder.worker = 'worker'
save.setAudioEncoder(audioEncoder)
videoProducer = self._createVideoEncoder()
self.assertRaises(ValueError, save.setVideoOverlay,
self._createVideoOverlay(videoProducer))
save.setMuxer('ogg-muxer', 'muxer')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="audiotest-producer"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="samplerate">44100</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="vorbis-encoder"\n'
' project="flumotion"\n'
' worker="worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio</feed>\n'
' </eater>\n'
' \n'
' <property name="bitrate">64000</property>\n'
' </component>\n'
' <component name="muxer-audio"\n'
' type="ogg-muxer"\n'
' project="flumotion"\n'
' worker="muxer"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testFirewireStreamer(self):
save = AssistantSaver()
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
save.setFlowName('flow')
producer = self._createFirewireProducer()
save.setAudioProducer(producer)
save.setVideoProducer(producer)
save.setVideoOverlay(self._createVideoOverlay(producer))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
streamer = self._createHTTPStreamer()
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
server = HTTPServer('server-worker', '/mount/')
save.addServerConsumer(server, 'audio-video')
save.setUseCCLicense(True)
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-audio-video"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount/</property>\n'
' </component>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio-video"\n'
' type="firewire-producer"\n'
' project="flumotion"\n'
' worker="firewire-video-producer-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="decoder">ffdec_dvvideo</property>\n'
' <property name="deinterlace-method">ffmpeg</property>\n'
' <property name="deinterlace-mode">auto</property>\n'
' <property name="framerate">25/2</property>\n'
' <property name="height">480</property>\n'
' <property name="is-square">True</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video:video</feed>\n'
' </eater>\n'
' \n'
' <property name="cc-logo">True</property>\n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio-video:audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testFirewireStreamerDifferentWorkers(self):
save = AssistantSaver()
porter = self._createPorter()
save.addPorter(porter, 'audio-video')
save.setFlowName('flow')
audioProducer = self._createFirewireProducer()
audioProducer.worker = 'audio-worker'
save.setAudioProducer(audioProducer)
videoProducer = self._createFirewireProducer()
videoProducer.worker = 'video-worker'
save.setVideoProducer(videoProducer)
save.setVideoOverlay(self._createVideoOverlay(videoProducer))
save.setAudioEncoder(self._createAudioEncoder())
save.setVideoEncoder(self._createVideoEncoder())
save.setMuxer('default-muxer', 'muxer-worker')
streamer = self._createHTTPStreamer()
streamer.has_bandwidth_limit = True
streamer.bandwidth_limit = 123
streamer.setPorter(porter)
save.addConsumer(streamer, 'audio-video')
server = HTTPServer('server-worker', '/mount/')
save.addServerConsumer(server, 'audio-video')
save.setUseCCLicense(True)
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-audio-video"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount/</property>\n'
' </component>\n'
' <component name="porter-audio-video"\n'
' type="porter"\n'
' project="flumotion"\n'
' worker="porter-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="password">password</property>\n'
' <property name="port">8080</property>\n'
' <property name="socket-path">flu-XXXX.socket</property>\n'
' <property name="username">username</property>\n'
' </component>\n'
' </atmosphere>\n'
' <flow name="flow">\n'
' <component name="producer-audio"\n'
' type="firewire-producer"\n'
' project="flumotion"\n'
' worker="audio-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="decoder">ffdec_dvvideo</property>\n'
' <property name="deinterlace-method">ffmpeg</property>\n'
' <property name="deinterlace-mode">auto</property>\n'
' <property name="framerate">25/2</property>\n'
' <property name="height">480</property>\n'
' <property name="is-square">True</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="producer-video"\n'
' type="firewire-producer"\n'
' project="flumotion"\n'
' worker="video-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="decoder">ffdec_dvvideo</property>\n'
' <property name="deinterlace-method">ffmpeg</property>\n'
' <property name="deinterlace-mode">auto</property>\n'
' <property name="framerate">25/2</property>\n'
' <property name="height">480</property>\n'
' <property name="is-square">True</property>\n'
' <property name="width">640</property>\n'
' </component>\n'
' <component name="overlay-video"\n'
' type="overlay-converter"\n'
' project="flumotion"\n'
' worker="overlay-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-video:video</feed>\n'
' </eater>\n'
' \n'
' <property name="cc-logo">True</property>\n'
' <property name="fluendo-logo">True</property>\n'
' <property name="show-text">True</property>\n'
' <property name="text">Flumotion</property>\n'
' </component>\n'
' <component name="encoder-audio"\n'
' type="audio-encoder"\n'
' project="flumotion"\n'
' worker="audio-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>producer-audio:audio</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="encoder-video"\n'
' type="video-encoder"\n'
' project="flumotion"\n'
' worker="video-encoder-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>overlay-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="muxer-audio-video"\n'
' type="default-muxer"\n'
' project="flumotion"\n'
' worker="muxer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>encoder-audio</feed>\n'
' <feed>encoder-video</feed>\n'
' </eater>\n'
' </component>\n'
' <component name="http-audio-video"\n'
' type="http-streamer"\n'
' project="flumotion"\n'
' worker="streamer-worker"\n'
' version="%(version)s">\n'
' <eater name="default">\n'
' <feed>muxer-audio-video</feed>\n'
' </eater>\n'
' \n'
' <property name="bandwidth-limit">123000000</property>\n'
' <property name="burst-on-connect">False</property>\n'
' <property name="port">8080</property>\n'
' <property name="porter-password">password</property>\n'
' <property name="porter-socket-path">flu-XXXX.socket'
'</property>\n'
' <property name="porter-username">username</property>\n'
' <property name="type">slave</property>\n'
' </component>\n'
' </flow>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
def testOndemand(self):
save = AssistantSaver()
server = HTTPServer('ondemand-server-worker', '/mount-point/')
save.addServerConsumer(server, 'ondemand')
configuration = save.getXML()
testsuite.diffStrings(
XMLWriter.encoding + \
('<planet>\n'
' <atmosphere>\n'
' <component name="http-server-ondemand"\n'
' type="http-server"\n'
' project="flumotion"\n'
' worker="ondemand-server-worker"\n'
' version="%(version)s">\n'
' \n'
' <property name="mount-point">/mount-point/</property>\n'
' </component>\n'
' </atmosphere>\n'
'</planet>\n' % dict(version=configure.version)),
configuration)
class TestNameConflicts(testsuite.TestCase):
def setUp(self):
self.save = AssistantSaver()
def _addServer(self, name):
server = HTTPServer('ondemand-server-worker', '/mount-point/')
self.save.addServerConsumer(server, name)
def testNameConflicts(self):
self.save.setExistingComponentNames(['http-server-ondemand'])
self._addServer('ondemand')
self.save.getXML()
components = self.save.getAtmosphereComponents()
self.assertEquals(components[0].name, 'http-server-ondemand2')
def testNameConflictsDoubleDigits(self):
componentNames = ['http-server-ondemand'] + [
'http-server-ondemand%d' % i for i in range(2, 10)]
self.save.setExistingComponentNames(componentNames)
self._addServer('ondemand')
self.save.getXML()
components = self.save.getAtmosphereComponents()
self.assertEquals(components[0].name, 'http-server-ondemand10')
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| -6,053,539,841,310,184,000
| 42.92986
| 78
| 0.467543
| false
| 4.172647
| true
| false
| false
|
mikicaivosevic/flask-cassandra-sessions
|
tests/test_session.py
|
1
|
2092
|
import json
from time import sleep
from unittest import TestCase
from cassandra.cluster import Cluster
from flask import Flask, session
from cassandra_flask_sessions import CassandraSessionInterface, AbstractConnectionProvider
class ConnectionProvider(AbstractConnectionProvider):
_connection = Cluster(['127.0.0.1']).connect('tests')
def get_connection(self):
return self._connection
_app = Flask(__name__)
_app.session_interface = CassandraSessionInterface(ConnectionProvider())
@_app.route('/set/<name>')
def set_session(name):
session['name'] = name
return 'ok'
@_app.route('/get')
def get_session():
return json.dumps(dict(session))
@_app.route('/delete')
def delete_session():
session.clear()
return 'ok'
_app.testing = True
_app.app_context().push()
class TestCassandraSessionInterface(TestCase):
def test_set_get_delete(self):
name = 'Mikica'
with _app.test_client() as client:
session_data = client.get('/set/%s' % name)
client.set_cookie('localhost', session_data.headers[2][0], session_data.headers[2][1])
session_data = client.get('/get')
self.assertEqual(json.dumps({'name': name}), session_data.data)
client.get('/delete')
session_data = client.get('/get')
self.assertEqual('{}', session_data.data)
def test_lifetime_interval(self):
name = 'Mikica'
session_lifetime = _app.config['PERMANENT_SESSION_LIFETIME']
_app.config.update({'PERMANENT_SESSION_LIFETIME': 1})
with _app.test_client() as client:
session_data = client.get('/set/%s' % name)
client.set_cookie('localhost', session_data.headers[2][0], session_data.headers[2][1])
session_data = client.get('/get')
self.assertEqual(json.dumps({'name': name}), session_data.data)
sleep(2)
session_data = client.get('/get')
self.assertEqual('{}', session_data.data)
_app.config.update({'PERMANENT_SESSION_LIFETIME': session_lifetime})
|
mit
| -7,179,838,590,307,754,000
| 26.168831
| 98
| 0.639101
| false
| 3.769369
| true
| false
| false
|
PapenfussLab/MHC-clogs
|
bin/fasta_rename_by_pos.py
|
1
|
1734
|
#!/usr/bin/env python
"""
fasta_rename_by_pos.py
"""
from argparse import ArgumentParser
from mungolite.fasta import FastaFile
from srt.intervals import GenomeIntersector
def get_position(tokens):
for token in tokens:
if "Position" in token:
for delim in ["=", ":", "(", ")"]:
token = token.replace(delim, " ")
tokens2 = token.split()
chrom = tokens2[1]
pos = tokens2[2]
strand = tokens2[3]
start, end = [int(x) for x in pos.split("-")]
return chrom, start, end, strand
raise Exception("Could not find position")
parser = ArgumentParser()
parser.add_argument("data_filename", type=str, help="Data filename")
parser.add_argument("input_filename", type=str, help="Input filename")
parser.add_argument("output_filename", type=str, help="Output filename")
args = parser.parse_args()
intersector = GenomeIntersector()
# >mdUT1 Chain=chain39 Position=1:705246778-705258088(+) GeneID=None ProteinID=None Score=82.8 E-value=6.8e-24 Length=11311 Comment=No overlapping annotations
for h,s in FastaFile(args.data_filename):
tokens = h.split()
name = tokens[0]
chrom, start, end, strand = get_position(tokens)
intersector.add((chrom, strand), start, end, name)
output_filename = FastaFile(args.output_filename, "w")
for h,s in FastaFile(args.input_filename):
tokens = h.split()
name = tokens[0]
chrom, start, end, strand = get_position(tokens)
rs = intersector.find((chrom, strand), start, end)
if rs:
new_name = rs[0].value
print "fasta_rename_UTs_by_pos.py:", name, new_name
h = "%s %s" % (new_name, " ".join(tokens[1:]))
output_filename.write(h, s)
|
artistic-2.0
| -8,437,597,413,394,435,000
| 30.527273
| 158
| 0.643022
| false
| 3.296578
| false
| false
| false
|
ibab/tensorprob
|
tensorprob/utilities.py
|
1
|
1469
|
from collections import defaultdict, Iterable
import itertools
import numpy as np
import tensorflow as tf
from six.moves import zip_longest
NAME_COUNTERS = defaultdict(lambda: 0)
def generate_name(obj):
"""Generate a unique name for the object in question
Returns a name of the form "{calling_class_name}_{count}"
"""
global NAME_COUNTERS
calling_name = obj.__name__
NAME_COUNTERS[calling_name] += 1
return '{0}_{1}'.format(calling_name, NAME_COUNTERS[calling_name])
class classproperty(object):
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner)
def grouper(iterable, n=2, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def flatten(l):
"""Recursivly flattens a interable argument, ignoring strings and bytes.
Taken from: http://stackoverflow.com/a/2158532
"""
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
for sub in flatten(el):
yield sub
else:
yield el
def is_finite(obj):
return isinstance(obj, tf.Tensor) or np.isfinite(obj)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
|
mit
| 3,968,733,909,219,394,000
| 23.081967
| 76
| 0.639891
| false
| 3.556901
| false
| false
| false
|
rseedorff/pi-kiosk
|
helloWorld.py
|
1
|
5178
|
from flask import Flask, jsonify
import datetime
import time
import atexit
import RPi.GPIO as GPIO
app = Flask(__name__)
#####################################################
# Initialise GPIO Board and setup all pins #
#####################################################
# Green LED at Pin 7
LED_GREEN = 7
LED_RED = 11
PIR = 12
GPIO.setmode(GPIO.BOARD) ## Use board pin numbering
GPIO.setup(LED_GREEN, GPIO.OUT, initial=GPIO.LOW) ## Setup GPIO Pin LED_GREEN to OUT (3.3V)
GPIO.setup(LED_RED, GPIO.OUT, initial=GPIO.LOW) ## Setup GPIO Pin LED_RED to OUT (3.3V)
GPIO.setup(PIR, GPIO.IN) ## Setup GPIO Pin PIR to IN
# Initialise PIT states
STATE_PIR_CURRENT = 0
STATE_PIR_LAST = 0
#####################################################
# REST Services #
#####################################################
# This route will return a object in JSON format
@app.route('/api/pir')
def pir():
try:
print "%s: Sensor initialisieren ..." % datetime.datetime.now()
# wait for PIR sensor
while (GPIO.input(PIR) == GPIO.HIGH):
STATE_PIR_CURRENT = 0
print "%s: Fertig! Warte auf Beweung ..." % datetime.datetime.now()
for i in range(0, 500):
STATE_PIR_CURRENT = GPIO.input(PIR)
print "Iteration " + str(i+1) + " current state:" + str(STATE_PIR_CURRENT)
if (STATE_PIR_CURRENT == 1 and STATE_PIR_LAST == 0 ):
print "%s: Bewegung erkannt!..." % datetime.datetime.now()
STATE_PIR_LAST = 1
elif (STATE_PIR_CURRENT == 0 and STATE_PIR_LAST == 1):
print "%s: Bewegung beendet!..." % datetime.datetime.now()
STATE_PIR_LAST = 0
time.sleep(0.05) ## Wait for sleep seconds
print " Done !"
except KeyboardInterrupt:
print "exit ..."
GPIO.cleanup()
return jsonify(result='Hello PIR !')
# This route will return a object in JSON format
@app.route('/')
def index():
now = datetime.datetime.now()
return jsonify(result='Hello World !')
# This route will turn on a LED_GREEN
@app.route('/api/led/on')
def led_on():
try:
if ( GPIO.input(LED_GREEN) == GPIO.LOW ):
print "Turn LED_GREEN 'ON' at PIN: '"+ str(LED_GREEN) +"' !"
GPIO.output(LED_GREEN, True) ## Turn on GPIO pin LED_GREEN, if it's off
else:
print "LED_GREEN is already 'ON' at PIN: '"+ str(LED_GREEN) +"' !"
except:
## do some logging...
GPIO.cleanup()
print "Unexpected error: ", sys.exc_info()[0]
return jsonify(led='on', pin=LED_GREEN)
# This route will turn on a LED_GREEN
@app.route('/api/led/off')
def led_off():
try:
if ( GPIO.input(LED_GREEN) == GPIO.HIGH ):
print "Turn LED_GREEN 'OFF' at PIN: '"+ str(LED_GREEN) +"' !"
GPIO.output(LED_GREEN, False) ## Turn off GPIO pin LED_GREEN, if it's on
else:
print "LED_GREEN is already 'OFF' at PIN: '"+ str(LED_GREEN) +"' !"
except:
## do some logging...
GPIO.cleanup()
print "Unexpected error: ", sys.exc_info()[0]
return jsonify(led='off', pin=LED_GREEN)
# This route will toogle some cool functions :)
@app.route('/api/led/toggle')
def toggle():
result = 'Hello Toggle !'
try:
if ( GPIO.input(LED_GREEN) == GPIO.HIGH ):
print "Toggle LED_GREEN ON!"
GPIO.output(LED_GREEN, False) ## Turn off GPIO pin 7, if it's on
result = 'Pin number 7 turned off (was on)'
else:
print "Toggle LED_GREEN OFF !"
GPIO.output(LED_GREEN, True) ## Turn on GPIO pin 7, if it's off
result = 'Pin number 7 turned on (was off)'
except:
## do some logging...
now = datetime.datetime.now()
GPIO.cleanup()
print "Exception!"
return jsonify(result=result, led=GPIO.input(LED_GREEN), pin=LED_GREEN)
# This route will toogle some cool functions :)
@app.route('/api/led/blink')
@app.route('/api/led/blink/<float:speed>/')
@app.route('/api/led/blink/<float:speed>/<int:numTimes>')
def blink(speed=0.1, numTimes=50):
try:
for i in range(0, numTimes):
print "Iteration " + str(i+1)
GPIO.output(LED_GREEN, True) ## Turn on GPIO pin LED_GREEN
time.sleep(speed) ## Wait for sleep seconds
GPIO.output(LED_GREEN, False) ## Turn off GPIO pin LED_GREEN
time.sleep(speed) ## Wait for sleep seconds
print " Done "
except:
## do some logging...
now = datetime.datetime.now()
GPIO.cleanup()
print "Exception!"
return jsonify(result="Blinking", led=GPIO.input(LED_GREEN), pin=LED_GREEN)
@app.errorhandler(Exception)
def catch_all_exception_handler(error):
GPIO.cleanup() ## On error cleanup all GPIO Pins (hard reset)!
return 'Error', 500
def cleanup():
GPIO.cleanup() ## On shutdown clean all GPIO Pins!
print "Cleanup due to shutdown this server!"
if __name__ == '__main__':
app.debug = True
app.run()
atexit.register(cleanup)
|
apache-2.0
| -4,223,882,474,366,889,000
| 31.772152
| 91
| 0.556779
| false
| 3.556319
| false
| false
| false
|
Austriker/LuxLogger
|
tsl2561.py
|
1
|
10465
|
'''Driver for the TSL2561 digital luminosity (light) sensors.
Pick one up at http://www.adafruit.com/products/439
Adafruit invests time and resources providing this open source code,
please support Adafruit and open-source hardware by purchasing
products from Adafruit!
Code ported from Adafruit Arduino library,
commit ced9f731da5095988cd66158562c2fde659e0510:
https://github.com/adafruit/Adafruit_TSL2561
'''
import time
from adafruit_i2c import Adafruit_I2C
from constants import *
import json
__author__ = 'Hugo SERRAT'
__credits__ = [
'K.Townsend (Adafruit Industries)',
'Georges Toth <georges@trypill.org>'
]
__license__ = 'BSD'
__version__ = 'v3.1'
'''HISTORY
v3.1 - Removed exception when sensor is saturated
v3.0 - Rewrote the i2c lib to make it work with python3
v2.0 - Rewrote driver for Adafruit_Sensor and Auto-Gain support, and
added lux clipping check (returns 0 lux on sensor saturation)
v1.0 - First release (previously TSL2561)
'''
class TSL2561(object):
'''Driver for the TSL2561 digital luminosity (light) sensors.'''
def __init__(self, address=None,
integration_time=TSL2561_DELAY_INTTIME_402MS,
gain=TSL2561_GAIN_1X, autogain=False, debug=False):
if address is not None:
self.address = address
else:
self.address = TSL2561_ADDR_FLOAT
self.i2c = Adafruit_I2C(self.address)
self.debug = debug
self.integration_time = integration_time
self.gain = gain
self.autogain = autogain
self._begin()
def _begin(self):
'''Initializes I2C and configures the sensor (call this function before
doing anything else)
'''
# Make sure we're actually connected
x = self.i2c.readU8(TSL2561_REGISTER_ID)
if not x & 0x0A:
raise Exception('TSL2561 not found!')
##########
# Set default integration time and gain
self.set_integration_time(self.integration_time)
self.set_gain(self.gain)
# Note: by default, the device is in power down mode on bootup
self.disable()
def enable(self):
'''Enable the device by setting the control bit to 0x03'''
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_CONTROL,
TSL2561_CONTROL_POWERON)
def disable(self):
'''Disables the device (putting it in lower power sleep mode)'''
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_CONTROL,
TSL2561_CONTROL_POWEROFF)
@staticmethod
def delay(value):
'''Delay times must be specified in milliseconds but as the python
sleep function only takes (float) seconds we need to convert the sleep
time first
'''
time.sleep(value / 1000.0)
def _get_data(self):
'''Private function to read luminosity on both channels'''
# Enable the device by setting the control bit to 0x03
self.enable()
# Wait x ms for ADC to complete
TSL2561.delay(self.integration_time)
# Reads a two byte value from channel 0 (visible + infrared)
broadband = self.i2c.readU16(TSL2561_COMMAND_BIT | TSL2561_WORD_BIT |
TSL2561_REGISTER_CHAN0_LOW)
# Reads a two byte value from channel 1 (infrared)
ir = self.i2c.readU16(TSL2561_COMMAND_BIT | TSL2561_WORD_BIT |
TSL2561_REGISTER_CHAN1_LOW)
# Turn the device off to save power
self.disable()
return (broadband, ir)
def set_integration_time(self, integration_time):
'''Sets the integration time for the TSL2561'''
# Enable the device by setting the control bit to 0x03
self.enable()
self.integration_time = integration_time
# Update the timing register
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_TIMING,
self.integration_time | self.gain)
# Turn the device off to save power
self.disable()
def set_gain(self, gain):
'''Adjusts the gain on the TSL2561 (adjusts the sensitivity to light)
'''
# Enable the device by setting the control bit to 0x03
self.enable()
self.gain = gain
# Update the timing register
self.i2c.write8(TSL2561_COMMAND_BIT | TSL2561_REGISTER_TIMING,
self.integration_time | self.gain)
# Turn the device off to save power
self.disable()
def set_auto_range(self, value):
'''Enables or disables the auto-gain settings when reading
data from the sensor
'''
self.autogain = value
def _get_luminosity(self):
'''Gets the broadband (mixed lighting) and IR only values from
the TSL2561, adjusting gain if auto-gain is enabled
'''
valid = False
# If Auto gain disabled get a single reading and continue
if not self.autogain:
return self._get_data()
# Read data until we find a valid range
_agcCheck = False
broadband = 0
ir = 0
while not valid:
if self.integration_time == TSL2561_INTEGRATIONTIME_13MS:
_hi = TSL2561_AGC_THI_13MS
_lo = TSL2561_AGC_TLO_13MS
elif self.integration_time == TSL2561_INTEGRATIONTIME_101MS:
_hi = TSL2561_AGC_THI_101MS
_lo = TSL2561_AGC_TLO_101MS
else:
_hi = TSL2561_AGC_THI_402MS
_lo = TSL2561_AGC_TLO_402MS
_b, _ir = self._get_data()
# Run an auto-gain check if we haven't already done so ...
if not _agcCheck:
if _b < _lo and self.gain == TSL2561_GAIN_1X:
# Increase the gain and try again
self.set_gain(TSL2561_GAIN_16X)
# Drop the previous conversion results
_b, _ir = self._get_data()
# Set a flag to indicate we've adjusted the gain
_agcCheck = True
elif _b > _hi and self.gain == TSL2561_GAIN_16X:
# Drop gain to 1x and try again
self.set_gain(TSL2561_GAIN_1X)
# Drop the previous conversion results
_b, _ir = self._get_data()
# Set a flag to indicate we've adjusted the gain
_agcCheck = True
else:
# Nothing to look at here, keep moving ....
# Reading is either valid, or we're already at the chips
# limits
broadband = _b
ir = _ir
valid = True
else:
# If we've already adjusted the gain once, just return the new
# results.
# This avoids endless loops where a value is at one extreme
# pre-gain, and the the other extreme post-gain
broadband = _b
ir = _ir
valid = True
return (broadband, ir)
def _calculate_lux(self, broadband, ir):
'''Converts the raw sensor values to the standard SI lux equivalent.
Returns 0 if the sensor is saturated and the values are unreliable.
'''
# Make sure the sensor isn't saturated!
if self.integration_time == TSL2561_INTEGRATIONTIME_13MS:
clipThreshold = TSL2561_CLIPPING_13MS
elif self.integration_time == TSL2561_INTEGRATIONTIME_101MS:
clipThreshold = TSL2561_CLIPPING_101MS
else:
clipThreshold = TSL2561_CLIPPING_402MS
# Return max value 65535 lux if the sensor is saturated
if broadband > clipThreshold or ir > clipThreshold:
return 65535
# Get the correct scale depending on the integration time
if self.integration_time == TSL2561_INTEGRATIONTIME_13MS:
chScale = TSL2561_LUX_CHSCALE_TINT0
elif self.integration_time == TSL2561_INTEGRATIONTIME_101MS:
chScale = TSL2561_LUX_CHSCALE_TINT1
else:
chScale = 1 << TSL2561_LUX_CHSCALE
# Scale for gain (1x or 16x)
if not self.gain:
chScale = chScale << 4
# Scale the channel values
channel0 = (broadband * chScale) >> TSL2561_LUX_CHSCALE
channel1 = (ir * chScale) >> TSL2561_LUX_CHSCALE
# Find the ratio of the channel values (Channel1/Channel0)
ratio1 = 0
if channel0 != 0:
ratio1 = (channel1 << (TSL2561_LUX_RATIOSCALE + 1)) // channel0
# round the ratio value
ratio = (ratio1 + 1) >> 1
b = 0
m = 0
if ratio >= 0 and ratio <= TSL2561_LUX_K1T:
b = TSL2561_LUX_B1T
m = TSL2561_LUX_M1T
elif ratio <= TSL2561_LUX_K2T:
b = TSL2561_LUX_B2T
m = TSL2561_LUX_M2T
elif ratio <= TSL2561_LUX_K3T:
b = TSL2561_LUX_B3T
m = TSL2561_LUX_M3T
elif ratio <= TSL2561_LUX_K4T:
b = TSL2561_LUX_B4T
m = TSL2561_LUX_M4T
elif ratio <= TSL2561_LUX_K5T:
b = TSL2561_LUX_B5T
m = TSL2561_LUX_M5T
elif ratio <= TSL2561_LUX_K6T:
b = TSL2561_LUX_B6T
m = TSL2561_LUX_M6T
elif ratio <= TSL2561_LUX_K7T:
b = TSL2561_LUX_B7T
m = TSL2561_LUX_M7T
elif ratio > TSL2561_LUX_K8T:
b = TSL2561_LUX_B8T
m = TSL2561_LUX_M8T
temp = (channel0 * b) - (channel1 * m)
# Do not allow negative lux value
if temp < 0:
temp = 0
# Round lsb (2^(LUX_SCALE-1))
temp += 1 << (TSL2561_LUX_LUXSCALE - 1)
# Strip off fractional portion
lux = temp >> TSL2561_LUX_LUXSCALE
# Signal I2C had no errors
return lux
def lux(self):
'''Read sensor data, convert it to LUX and return it'''
broadband, ir = self._get_luminosity()
return self._calculate_lux(broadband, ir)
def getLuminosityDict(self):
data = {}
data['broadband'], data['ir'] = self._get_luminosity()
data['lux'] = self._calculate_lux(data['broadband'], data['ir'])
return data
def getLuminosityJson(self):
return json.dumps(self.getLuminosityDict())
|
mit
| -6,918,371,752,110,139,000
| 32.758065
| 79
| 0.57592
| false
| 3.522383
| false
| false
| false
|
loaclhostjason/react-redux-admin
|
console/app/app.py
|
1
|
1394
|
# -*- coding: utf-8 -*-
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_babel import Babel
from flask_moment import Moment
from .assets import assets_env, bundles
from flask_caching import Cache
from config import Config
bootstrap = Bootstrap()
db = SQLAlchemy()
babel = Babel()
moment = Moment()
cache = Cache()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
# message in warning, error, success
login_manager.login_message = {'warning': "您还未登录!"}
login_manager.login_view = 'auth.login'
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
Config.init_app(app)
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
babel.init_app(app)
moment.init_app(app)
assets_env.init_app(app)
assets_env.register(bundles)
cache.init_app(app)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .deploy import deploy as deploy_blueprint
app.register_blueprint(deploy_blueprint, url_prefix='/deploy')
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api')
return app
|
mit
| 5,873,634,761,814,845,000
| 24.62963
| 66
| 0.723988
| false
| 3.521628
| false
| false
| false
|
afajl/sy
|
docs/conf.py
|
1
|
6337
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
import sy
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.todo', 'sphinx.ext.ifconfig']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'latin1'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sy'
copyright = u'2009, Paul Diaconescu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['sy.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sy.tex', u'sy documentation',
u'Paul Diaconescu', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
bsd-3-clause
| 8,141,396,965,501,481,000
| 31.167513
| 80
| 0.709957
| false
| 3.736439
| false
| false
| false
|
erudit/zenon
|
eruditorg/apps/userspace/library/subscription_information/views.py
|
1
|
1593
|
# -*- coding: utf-8 -*-
from django.contrib import messages
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import FormView
from django.contrib.auth.mixins import LoginRequiredMixin
from base.viewmixins import MenuItemMixin
from ..viewmixins import OrganisationScopePermissionRequiredMixin
from .forms import SubscriptionInformationForm
class SubscriptionInformationUpdateView(
LoginRequiredMixin, OrganisationScopePermissionRequiredMixin, MenuItemMixin, FormView):
form_class = SubscriptionInformationForm
menu_library = 'subscription_information'
permission_required = 'library.has_access_to_dashboard'
template_name = 'userspace/library/subscription_information/update.html'
def get_form_kwargs(self):
kwargs = super(SubscriptionInformationUpdateView, self).get_form_kwargs()
kwargs.update({'organisation': self.current_organisation})
return kwargs
def form_valid(self, form):
form.save()
return super(SubscriptionInformationUpdateView, self).form_valid(form)
def get_success_url(self):
messages.success(
self.request, _("Le logo institutionnel a été mis à jour avec succès."))
return reverse(
'userspace:library:subscription_information:update',
args=(self.current_organisation.pk, ))
def get_context_data(self, **kwargs):
context = super(SubscriptionInformationUpdateView, self).get_context_data(**kwargs)
context['section_aside'] = True
return context
|
gpl-3.0
| -7,446,036,456,903,838,000
| 36.833333
| 95
| 0.735053
| false
| 4.365385
| false
| false
| false
|
jeffmahoney/supybot
|
src/commands.py
|
2
|
33100
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009-2010, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Includes wrappers for commands.
"""
import time
import Queue
import types
import getopt
import inspect
import threading
import multiprocessing
from . import callbacks, conf, ircdb, ircmsgs, ircutils, log, utils, world
###
# Non-arg wrappers -- these just change the behavior of a command without
# changing the arguments given to it.
###
# Thread has to be a non-arg wrapper because by the time we're parsing and
# validating arguments, we're inside the function we'd want to thread.
def thread(f):
"""Makes sure a command spawns a thread when called."""
def newf(self, irc, msg, args, *L, **kwargs):
if world.isMainThread():
targetArgs = (self.callingCommand, irc, msg, args) + tuple(L)
t = callbacks.CommandThread(target=self._callCommand,
args=targetArgs, kwargs=kwargs)
t.start()
else:
f(self, irc, msg, args, *L, **kwargs)
return utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
class ProcessTimeoutError(Exception):
"""Gets raised when a process is killed due to timeout."""
pass
def process(f, *args, **kwargs):
"""Runs a function <f> in a subprocess.
Several extra keyword arguments can be supplied.
<pn>, the pluginname, and <cn>, the command name, are strings used to
create the process name, for identification purposes.
<timeout>, if supplied, limits the length of execution of target
function to <timeout> seconds."""
timeout = kwargs.pop('timeout', None)
q = multiprocessing.Queue()
def newf(f, q, *args, **kwargs):
try:
r = f(*args, **kwargs)
q.put(r)
except Exception as e:
q.put(e)
targetArgs = (f, q,) + args
p = callbacks.CommandProcess(target=newf,
args=targetArgs, kwargs=kwargs)
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
raise ProcessTimeoutError, "%s aborted due to timeout." % (p.name,)
try:
v = q.get(block=False)
except Queue.Empty:
v = "Nothing returned."
if isinstance(v, Exception):
v = "Error: " + str(v)
return v
def regexp_wrapper(s, reobj, timeout, plugin_name, fcn_name):
'''A convenient wrapper to stuff regexp search queries through a subprocess.
This is used because specially-crafted regexps can use exponential time
and hang the bot.'''
def re_bool(s, reobj):
"""Since we can't enqueue match objects into the multiprocessing queue,
we'll just wrap the function to return bools."""
if reobj.search(s) is not None:
return True
else:
return False
try:
v = process(re_bool, s, reobj, timeout=timeout, pn=plugin_name, cn=fcn_name)
return v
except ProcessTimeoutError:
return False
class UrlSnarfThread(world.SupyThread):
def __init__(self, *args, **kwargs):
assert 'url' in kwargs
kwargs['name'] = 'Thread #%s (for snarfing %s)' % \
(world.threadsSpawned, kwargs.pop('url'))
super(UrlSnarfThread, self).__init__(*args, **kwargs)
self.setDaemon(True)
def run(self):
try:
super(UrlSnarfThread, self).run()
except utils.web.Error, e:
log.debug('Exception in urlSnarfer: %s', utils.exnToString(e))
class SnarfQueue(ircutils.FloodQueue):
timeout = conf.supybot.snarfThrottle
def key(self, channel):
return channel
_snarfed = SnarfQueue()
class SnarfIrc(object):
def __init__(self, irc, channel, url):
self.irc = irc
self.url = url
self.channel = channel
def __getattr__(self, attr):
return getattr(self.irc, attr)
def reply(self, *args, **kwargs):
_snarfed.enqueue(self.channel, self.url)
return self.irc.reply(*args, **kwargs)
# This lock is used to serialize the calls to snarfers, so
# earlier snarfers are guaranteed to beat out later snarfers.
_snarfLock = threading.Lock()
def urlSnarfer(f):
"""Protects the snarfer from loops (with other bots) and whatnot."""
def newf(self, irc, msg, match, *L, **kwargs):
url = match.group(0)
channel = msg.args[0]
if not irc.isChannel(channel) or (ircmsgs.isCtcp(msg) and not
ircmsgs.isAction(msg)):
return
if ircdb.channels.getChannel(channel).lobotomized:
self.log.debug('Not snarfing in %s: lobotomized.', channel)
return
if _snarfed.has(channel, url):
self.log.info('Throttling snarf of %s in %s.', url, channel)
return
irc = SnarfIrc(irc, channel, url)
def doSnarf():
_snarfLock.acquire()
try:
# This has to be *after* we've acquired the lock so we can be
# sure that all previous urlSnarfers have already run to
# completion.
if msg.repliedTo:
self.log.debug('Not snarfing, msg is already repliedTo.')
return
f(self, irc, msg, match, *L, **kwargs)
finally:
_snarfLock.release()
if threading.currentThread() is not world.mainThread:
doSnarf()
else:
L = list(L)
t = UrlSnarfThread(target=doSnarf, url=url)
t.start()
newf = utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
return newf
###
# Converters, which take irc, msg, args, and a state object, and build up the
# validated and converted args for the method in state.args.
###
# This is just so we can centralize this, since it may change.
def _int(s):
base = 10
if s.startswith('0x'):
base = 16
s = s[2:]
elif s.startswith('0b'):
base = 2
s = s[2:]
elif s.startswith('0') and len(s) > 1:
base = 8
s = s[1:]
try:
return int(s, base)
except ValueError:
if base == 10:
try:
return int(float(s))
except OverflowError:
raise ValueError('I don\'t understand numbers that large.')
else:
raise
def getInt(irc, msg, args, state, type='integer', p=None):
try:
i = _int(args[0])
if p is not None:
if not p(i):
state.errorInvalid(type, args[0])
state.args.append(i)
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getNonInt(irc, msg, args, state, type='non-integer value'):
try:
i = _int(args[0])
state.errorInvalid(type, args[0])
except ValueError:
state.args.append(args.pop(0))
def getLong(irc, msg, args, state, type='long'):
getInt(irc, msg, args, state, type)
state.args[-1] = long(state.args[-1])
def getFloat(irc, msg, args, state, type='floating point number'):
try:
state.args.append(float(args[0]))
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getPositiveInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>0, type='positive integer', *L)
def getNonNegativeInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>=0, type='non-negative integer', *L)
def getIndex(irc, msg, args, state):
getInt(irc, msg, args, state, type='index')
if state.args[-1] > 0:
state.args[-1] -= 1
def getId(irc, msg, args, state, kind=None):
type = 'id'
if kind is not None and not kind.endswith('id'):
type = kind + ' id'
original = args[0]
try:
args[0] = args[0].lstrip('#')
getInt(irc, msg, args, state, type=type)
except Exception, e:
args[0] = original
raise
def getExpiry(irc, msg, args, state):
now = int(time.time())
try:
expires = _int(args[0])
if expires:
expires += now
state.args.append(expires)
del args[0]
except ValueError:
state.errorInvalid('number of seconds', args[0])
def getBoolean(irc, msg, args, state):
try:
state.args.append(utils.str.toBool(args[0]))
del args[0]
except ValueError:
state.errorInvalid('boolean', args[0])
def getNetworkIrc(irc, msg, args, state, errorIfNoMatch=False):
if args:
for otherIrc in world.ircs:
if otherIrc.network.lower() == args[0].lower():
state.args.append(otherIrc)
del args[0]
return
if errorIfNoMatch:
raise callbacks.ArgumentError
else:
state.args.append(irc)
def getHaveOp(irc, msg, args, state, action='do that'):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not even in %s.' % state.channel, Raise=True)
if not irc.state.channels[state.channel].isOp(irc.nick):
state.error('I need to be opped to %s.' % action, Raise=True)
def validChannel(irc, msg, args, state):
if irc.isChannel(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('channel', args[0])
def getHostmask(irc, msg, args, state):
if ircutils.isUserHostmask(args[0]):
state.args.append(args.pop(0))
else:
try:
hostmask = irc.state.nickToHostmask(args[0])
state.args.append(hostmask)
del args[0]
except KeyError:
state.errorInvalid('nick or hostmask', args[0])
def getBanmask(irc, msg, args, state):
getHostmask(irc, msg, args, state)
getChannel(irc, msg, args, state)
channel = state.channel
banmaskstyle = conf.supybot.protocols.irc.banmask
state.args[-1] = banmaskstyle.makeBanmask(state.args[-1])
def getUser(irc, msg, args, state):
try:
state.args.append(ircdb.users.getUser(msg.prefix))
except KeyError:
state.errorNotRegistered(Raise=True)
def getOtherUser(irc, msg, args, state):
# Although ircdb.users.getUser could accept a hostmask, we're explicitly
# excluding that from our interface with this check
if ircutils.isUserHostmask(args[0]):
state.errorNoUser(args[0])
try:
state.args.append(ircdb.users.getUser(args[0]))
del args[0]
except KeyError:
try:
getHostmask(irc, msg, [args[0]], state)
hostmask = state.args.pop()
state.args.append(ircdb.users.getUser(hostmask))
del args[0]
except (KeyError, callbacks.Error):
state.errorNoUser(name=args[0])
def _getRe(f):
def get(irc, msg, args, state, convert=True):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
_ = f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
if convert:
state.args.append(f(s))
else:
state.args.append(s)
else:
state.errorInvalid('regular expression', s)
except IndexError:
args[:] = original
state.errorInvalid('regular expression', s)
return get
getMatcher = _getRe(utils.str.perlReToPythonRe)
getReplacer = _getRe(utils.str.perlReToReplacer)
def getNick(irc, msg, args, state):
if ircutils.isNick(args[0]):
if 'nicklen' in irc.state.supported:
if len(args[0]) > irc.state.supported['nicklen']:
state.errorInvalid('nick', args[0],
'That nick is too long for this server.')
state.args.append(args.pop(0))
else:
state.errorInvalid('nick', args[0])
def getSeenNick(irc, msg, args, state, errmsg=None):
try:
_ = irc.state.nickToHostmask(args[0])
state.args.append(args.pop(0))
except KeyError:
if errmsg is None:
errmsg = 'I haven\'t seen %s.' % args[0]
state.error(errmsg, Raise=True)
def getChannel(irc, msg, args, state):
if state.channel:
return
if args and irc.isChannel(args[0]):
channel = args.pop(0)
elif irc.isChannel(msg.args[0]):
channel = msg.args[0]
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.channel = channel
state.args.append(channel)
def getChannelDb(irc, msg, args, state, **kwargs):
channelSpecific = conf.supybot.databases.plugins.channelSpecific
try:
getChannel(irc, msg, args, state, **kwargs)
channel = channelSpecific.getChannelLink(state.channel)
state.channel = channel
state.args[-1] = channel
except (callbacks.ArgumentError, IndexError):
if channelSpecific():
raise
channel = channelSpecific.link()
if not conf.get(channelSpecific.link.allow, channel):
log.warning('channelSpecific.link is globally set to %s, but '
'%s disallowed linking to its db.', channel, channel)
raise
else:
channel = channelSpecific.getChannelLink(channel)
state.channel = channel
state.args.append(channel)
def inChannel(irc, msg, args, state):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not in %s.' % state.channel, Raise=True)
def onlyInChannel(irc, msg, args, state):
if not (irc.isChannel(msg.args[0]) and msg.args[0] in irc.state.channels):
state.error('This command may only be given in a channel that I am in.',
Raise=True)
else:
state.channel = msg.args[0]
state.args.append(state.channel)
def callerInGivenChannel(irc, msg, args, state):
channel = args[0]
if irc.isChannel(channel):
if channel in irc.state.channels:
if msg.nick in irc.state.channels[channel].users:
state.args.append(args.pop(0))
else:
state.error('You must be in %s.' % channel, Raise=True)
else:
state.error('I\'m not in %s.' % channel, Raise=True)
else:
state.errorInvalid('channel', args[0])
def nickInChannel(irc, msg, args, state):
originalArgs = state.args[:]
inChannel(irc, msg, args, state)
state.args = originalArgs
if args[0] not in irc.state.channels[state.channel].users:
state.error('%s is not in %s.' % (args[0], state.channel), Raise=True)
state.args.append(args.pop(0))
def getChannelOrNone(irc, msg, args, state):
try:
getChannel(irc, msg, args, state)
except callbacks.ArgumentError:
state.args.append(None)
def checkChannelCapability(irc, msg, args, state, cap):
getChannel(irc, msg, args, state)
cap = ircdb.canonicalCapability(cap)
cap = ircdb.makeChannelCapability(state.channel, cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def getOp(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'op')
def getHalfop(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'halfop')
def getVoice(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'voice')
def getLowered(irc, msg, args, state):
state.args.append(ircutils.toLower(args.pop(0)))
def getSomething(irc, msg, args, state, errorMsg=None, p=None):
if p is None:
p = lambda _: True
if not args[0] or not p(args[0]):
if errorMsg is None:
errorMsg = 'You must not give the empty string as an argument.'
state.error(errorMsg, Raise=True)
else:
state.args.append(args.pop(0))
def getSomethingNoSpaces(irc, msg, args, state, errorMsg=None):
def p(s):
return len(s.split(None, 1)) == 1
if errorMsg is None:
errorMsg='You must not give a string containing spaces as an argument.'
getSomething(irc, msg, args, state, errorMsg=errorMsg, p=p)
def private(irc, msg, args, state):
if irc.isChannel(msg.args[0]):
state.errorRequiresPrivacy(Raise=True)
def public(irc, msg, args, state, errmsg=None):
if not irc.isChannel(msg.args[0]):
if errmsg is None:
errmsg = 'This message must be sent in a channel.'
state.error(errmsg, Raise=True)
def checkCapability(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def owner(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'owner')
def admin(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'admin')
def anything(irc, msg, args, state):
state.args.append(args.pop(0))
def getGlob(irc, msg, args, state):
glob = args.pop(0)
if '*' not in glob and '?' not in glob:
glob = '*%s*' % glob
state.args.append(glob)
def getUrl(irc, msg, args, state):
if utils.web.urlRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('url', args[0])
def getEmail(irc, msg, args, state):
if utils.net.emailRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('email', args[0])
def getHttpUrl(irc, msg, args, state):
if utils.web.httpUrlRe.match(args[0]):
state.args.append(args.pop(0))
elif utils.web.httpUrlRe.match('http://' + args[0]):
state.args.append('http://' + args.pop(0))
else:
state.errorInvalid('http url', args[0])
def getNow(irc, msg, args, state):
state.args.append(int(time.time()))
def getCommandName(irc, msg, args, state):
if ' ' in args[0]:
state.errorInvalid('command name', args[0])
else:
state.args.append(callbacks.canonicalName(args.pop(0)))
def getIp(irc, msg, args, state):
if utils.net.isIP(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('ip', args[0])
def getLetter(irc, msg, args, state):
if len(args[0]) == 1:
state.args.append(args.pop(0))
else:
state.errorInvalid('letter', args[0])
def getMatch(irc, msg, args, state, regexp, errmsg):
m = regexp.search(args[0])
if m is not None:
state.args.append(m)
del args[0]
else:
state.error(errmsg, Raise=True)
def getLiteral(irc, msg, args, state, literals, errmsg=None):
# ??? Should we allow abbreviations?
if isinstance(literals, basestring):
literals = (literals,)
abbrevs = utils.abbrev(literals)
if args[0] in abbrevs:
state.args.append(abbrevs[args.pop(0)])
elif errmsg is not None:
state.error(errmsg, Raise=True)
else:
raise callbacks.ArgumentError
def getTo(irc, msg, args, state):
if args[0].lower() == 'to':
args.pop(0)
def getPlugin(irc, msg, args, state, require=True):
cb = irc.getCallback(args[0])
if cb is not None:
state.args.append(cb)
del args[0]
elif require:
state.errorInvalid('plugin', args[0])
else:
state.args.append(None)
def getIrcColor(irc, msg, args, state):
if args[0] in ircutils.mircColors:
state.args.append(ircutils.mircColors[args.pop(0)])
else:
state.errorInvalid('irc color')
def getText(irc, msg, args, state):
if args:
state.args.append(' '.join(args))
args[:] = []
else:
raise IndexError
wrappers = ircutils.IrcDict({
'admin': admin,
'anything': anything,
'banmask': getBanmask,
'boolean': getBoolean,
'callerInGivenChannel': callerInGivenChannel,
'capability': getSomethingNoSpaces,
'channel': getChannel,
'channelDb': getChannelDb,
'checkCapability': checkCapability,
'checkChannelCapability': checkChannelCapability,
'color': getIrcColor,
'commandName': getCommandName,
'email': getEmail,
'expiry': getExpiry,
'filename': getSomething, # XXX Check for validity.
'float': getFloat,
'glob': getGlob,
'halfop': getHalfop,
'haveOp': getHaveOp,
'hostmask': getHostmask,
'httpUrl': getHttpUrl,
'id': getId,
'inChannel': inChannel,
'index': getIndex,
'int': getInt,
'ip': getIp,
'letter': getLetter,
'literal': getLiteral,
'long': getLong,
'lowered': getLowered,
'matches': getMatch,
'networkIrc': getNetworkIrc,
'nick': getNick,
'nickInChannel': nickInChannel,
'nonInt': getNonInt,
'nonNegativeInt': getNonNegativeInt,
'now': getNow,
'onlyInChannel': onlyInChannel,
'op': getOp,
'otherUser': getOtherUser,
'owner': owner,
'plugin': getPlugin,
'positiveInt': getPositiveInt,
'private': private,
'public': public,
'regexpMatcher': getMatcher,
'regexpReplacer': getReplacer,
'seenNick': getSeenNick,
'something': getSomething,
'somethingWithoutSpaces': getSomethingNoSpaces,
'text': getText,
'to': getTo,
'url': getUrl,
'user': getUser,
'validChannel': validChannel,
'voice': getVoice,
})
def addConverter(name, wrapper):
wrappers[name] = wrapper
class UnknownConverter(KeyError):
pass
def getConverter(name):
try:
return wrappers[name]
except KeyError, e:
raise UnknownConverter, str(e)
def callConverter(name, irc, msg, args, state, *L):
getConverter(name)(irc, msg, args, state, *L)
###
# Contexts. These determine what the nature of conversions is; whether they're
# defaulted, or many of them are allowed, etc. Contexts should be reusable;
# i.e., they should not maintain state between calls.
###
def contextify(spec):
if not isinstance(spec, context):
spec = context(spec)
return spec
def setDefault(state, default):
if callable(default):
state.args.append(default())
else:
state.args.append(default)
class context(object):
def __init__(self, spec):
self.args = ()
self.spec = spec # for repr
if isinstance(spec, tuple):
assert spec, 'tuple spec must not be empty.'
self.args = spec[1:]
self.converter = getConverter(spec[0])
elif spec is None:
self.converter = getConverter('anything')
elif isinstance(spec, basestring):
self.args = ()
self.converter = getConverter(spec)
else:
assert isinstance(spec, context)
self.converter = spec
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
self.converter(irc, msg, args, state, *self.args)
log.debug('args after %r: %r', self, args)
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self.spec)
class rest(context):
def __call__(self, irc, msg, args, state):
if args:
original = args[:]
args[:] = [' '.join(args)]
try:
super(rest, self).__call__(irc, msg, args, state)
except Exception, e:
args[:] = original
else:
raise IndexError
# additional means: Look for this (and make sure it's of this type). If
# there are no arguments for us to check, then use our default.
class additional(context):
def __init__(self, spec, default=None):
self.__parent = super(additional, self)
self.__parent.__init__(spec)
self.default = default
def __call__(self, irc, msg, args, state):
try:
self.__parent.__call__(irc, msg, args, state)
except IndexError:
log.debug('Got IndexError, returning default.')
setDefault(state, self.default)
# optional means: Look for this, but if it's not the type I'm expecting or
# there are no arguments for us to check, then use the default value.
class optional(additional):
def __call__(self, irc, msg, args, state):
try:
super(optional, self).__call__(irc, msg, args, state)
except (callbacks.ArgumentError, callbacks.Error), e:
log.debug('Got %s, returning default.', utils.exnToString(e))
state.errored = False
setDefault(state, self.default)
class any(context):
def __init__(self, spec, continueOnError=False):
self.__parent = super(any, self)
self.__parent.__init__(spec)
self.continueOnError = continueOnError
def __call__(self, irc, msg, args, state):
st = state.essence()
try:
while args:
self.__parent.__call__(irc, msg, args, st)
except IndexError:
pass
except (callbacks.ArgumentError, callbacks.Error), e:
if not self.continueOnError:
raise
else:
log.debug('Got %s, returning default.', utils.exnToString(e))
pass
state.args.append(st.args)
class many(any):
def __call__(self, irc, msg, args, state):
super(many, self).__call__(irc, msg, args, state)
if not state.args[-1]:
state.args.pop()
raise callbacks.ArgumentError
class first(context):
def __init__(self, *specs, **kw):
if 'default' in kw:
self.default = kw.pop('default')
assert not kw, 'Bad kwargs for first.__init__'
self.spec = specs # for __repr__
self.specs = map(contextify, specs)
def __call__(self, irc, msg, args, state):
errored = False
for spec in self.specs:
try:
spec(irc, msg, args, state)
return
except Exception, e:
errored = state.errored
state.errored = False
continue
if hasattr(self, 'default'):
state.args.append(self.default)
else:
state.errored = errored
raise e
class reverse(context):
def __call__(self, irc, msg, args, state):
args[:] = args[::-1]
super(reverse, self).__call__(irc, msg, args, state)
args[:] = args[::-1]
class commalist(context):
def __call__(self, irc, msg, args, state):
original = args[:]
st = state.essence()
trailingComma = True
try:
while trailingComma:
arg = args.pop(0)
if not arg.endswith(','):
trailingComma = False
for part in arg.split(','):
if part: # trailing commas
super(commalist, self).__call__(irc, msg, [part], st)
state.args.append(st.args)
except Exception, e:
args[:] = original
raise
class getopts(context):
"""The empty string indicates that no argument is taken; None indicates
that there is no converter for the argument."""
def __init__(self, getopts):
self.spec = getopts # for repr
self.getopts = {}
self.getoptL = []
for (name, spec) in getopts.iteritems():
if spec == '':
self.getoptL.append(name)
self.getopts[name] = None
else:
self.getoptL.append(name + '=')
self.getopts[name] = contextify(spec)
log.debug('getopts: %r', self.getopts)
log.debug('getoptL: %r', self.getoptL)
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
(optlist, rest) = getopt.getopt(args, '', self.getoptL)
getopts = []
for (opt, arg) in optlist:
opt = opt[2:] # Strip --
log.debug('opt: %r, arg: %r', opt, arg)
context = self.getopts[opt]
if context is not None:
st = state.essence()
context(irc, msg, [arg], st)
assert len(st.args) == 1
getopts.append((opt, st.args[0]))
else:
getopts.append((opt, True))
state.args.append(getopts)
args[:] = rest
log.debug('args after %r: %r', self, args)
###
# This is our state object, passed to converters along with irc, msg, and args.
###
class State(object):
log = log
def __init__(self, types):
self.args = []
self.kwargs = {}
self.types = types
self.channel = None
self.errored = False
def __getattr__(self, attr):
if attr.startswith('error'):
self.errored = True
return getattr(dynamic.irc, attr)
else:
raise AttributeError, attr
def essence(self):
st = State(self.types)
for (attr, value) in self.__dict__.iteritems():
if attr not in ('args', 'kwargs'):
setattr(st, attr, value)
return st
def __repr__(self):
return '%s(args=%r, kwargs=%r, channel=%r)' % (self.__class__.__name__,
self.args, self.kwargs,
self.channel)
###
# This is a compiled Spec object.
###
class Spec(object):
def _state(self, types, attrs={}):
st = State(types)
st.__dict__.update(attrs)
st.allowExtra = self.allowExtra
return st
def __init__(self, types, allowExtra=False):
self.types = types
self.allowExtra = allowExtra
utils.seq.mapinto(contextify, self.types)
def __call__(self, irc, msg, args, stateAttrs={}):
state = self._state(self.types[:], stateAttrs)
while state.types:
context = state.types.pop(0)
try:
context(irc, msg, args, state)
except IndexError:
raise callbacks.ArgumentError
if args and not state.allowExtra:
log.debug('args and not self.allowExtra: %r', args)
raise callbacks.ArgumentError
return state
def wrap(f, specList=[], name=None, **kw):
name = name or f.func_name
spec = Spec(specList, **kw)
def newf(self, irc, msg, args, **kwargs):
state = spec(irc, msg, args, stateAttrs={'cb': self, 'log': self.log})
self.log.debug('State before call: %s', state)
if state.errored:
self.log.debug('Refusing to call %s due to state.errored.', f)
else:
try:
f(self, irc, msg, args, *state.args, **state.kwargs)
except TypeError:
self.log.error('Spec: %s', specList)
self.log.error('Received args: %s', args)
code = f.func_code
funcArgs = inspect.getargs(code)[0][len(self.commandArgs):]
self.log.error('Extra args: %s', funcArgs)
raise
return utils.python.changeFunctionName(newf, name, f.__doc__)
__all__ = [
# Contexts.
'any', 'many',
'optional', 'additional',
'rest', 'getopts',
'first', 'reverse',
'commalist',
# Converter helpers.
'getConverter', 'addConverter', 'callConverter',
# Decorators.
'urlSnarfer', 'thread',
# Functions.
'wrap', 'process', 'regexp_wrapper',
# Stuff for testing.
'Spec',
]
# This doesn't work. Suck.
## if world.testing:
## __all__.append('Spec')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
bsd-3-clause
| 4,640,401,204,701,421,000
| 32.1
| 84
| 0.593505
| false
| 3.676144
| false
| false
| false
|
akhileshpillai/treeherder
|
treeherder/webapp/api/runnable_jobs.py
|
1
|
5374
|
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.status import HTTP_500_INTERNAL_SERVER_ERROR
from treeherder.etl.common import fetch_json
from treeherder.model import models
class RunnableJobsViewSet(viewsets.ViewSet):
"""
This viewset is responsible for the runnable_jobs endpoint.
"""
def list(self, request, project):
"""
GET method implementation for list of all runnable buildbot jobs
"""
decision_task_id = request.query_params.get('decisionTaskID')
if decision_task_id:
tc_graph_url = settings.TASKCLUSTER_TASKGRAPH_URL.format(task_id=decision_task_id)
tc_graph = None
validate = URLValidator()
try:
validate(tc_graph_url)
tc_graph = fetch_json(tc_graph_url)
except ValidationError:
# We pass here as we still want to schedule BuildBot jobs
pass
except Exception as ex:
return Response("Exception: {0}".format(ex), status=HTTP_500_INTERNAL_SERVER_ERROR)
else:
tc_graph = {}
repository = models.Repository.objects.get(name=project)
options_by_hash = models.OptionCollection.objects.all().select_related(
'option').values_list('option__name', 'option_collection_hash')
runnable_jobs = models.RunnableJob.objects.filter(
repository=repository
).select_related('build_platform', 'machine_platform',
'job_type', 'job_type__job_group')
ret = []
# Adding buildbot jobs
for datum in runnable_jobs:
options = ' '.join(option_name for (option_name, col_hash) in options_by_hash
if col_hash == datum.option_collection_hash)
ret.append({
'build_platform_id': datum.build_platform.id,
'build_platform': datum.build_platform.platform,
'build_os': datum.build_platform.os_name,
'build_architecture': datum.build_platform.architecture,
'machine_platform_id': datum.machine_platform.id,
'platform': datum.machine_platform.platform,
'machine_platform_os': datum.machine_platform.os_name,
'machine_platform_architecture': datum.machine_platform.architecture,
'job_group_id': datum.job_type.job_group.id,
'job_group_name': datum.job_type.job_group.name,
'job_group_symbol': datum.job_type.job_group.symbol,
'job_group_description': datum.job_type.job_group.description,
'job_type_id': datum.job_type.id,
'job_type_name': datum.job_type.name,
'job_type_symbol': datum.job_type.symbol,
'job_type_description': datum.job_type.description,
'option_collection_hash': datum.option_collection_hash,
'ref_data_name': datum.ref_data_name,
'build_system_type': datum.build_system_type,
'platform_option': options,
'job_coalesced_to_guid': None,
'state': 'runnable',
'result': 'runnable'})
for label, node in tc_graph.iteritems():
extra = node['task'].get('extra')
if not extra or not extra.get('treeherder'):
# some tasks don't have the treeherder information we need
# to be able to display them (and are not intended to be
# displayed). skip.
continue
treeherder_options = extra['treeherder']
task_metadata = node['task']['metadata']
build_platform = treeherder_options.get('machine', {}).get('platform', '')
# Not all tasks have a group name
job_group_name = treeherder_options.get('groupName', '')
# Not all tasks have a group symbol
job_group_symbol = treeherder_options.get('groupSymbol', '')
# Not all tasks have a collection
if 'collection' in treeherder_options:
platform_option = ' '.join(treeherder_options['collection'].keys())
else:
platform_option = ""
ret.append({
'build_platform': build_platform,
'platform': build_platform,
'job_group_name': job_group_name,
'job_group_symbol': job_group_symbol,
'job_type_name': task_metadata['name'],
'job_type_symbol': treeherder_options['symbol'],
'job_type_description': task_metadata['description'],
'ref_data_name': label,
'build_system_type': 'taskcluster',
'platform_option': platform_option,
'job_coalesced_to_guid': None,
'state': 'runnable',
'result': 'runnable'})
response_body = dict(meta={"repository": project,
"offset": 0,
"count": len(ret)},
results=ret)
return Response(response_body)
|
mpl-2.0
| -607,443,222,029,147,600
| 42.691057
| 99
| 0.566617
| false
| 4.351417
| false
| false
| false
|
avanzosc/avanzosc6.1
|
avanzosc_tire_management/wizard/wizard_interchange.py
|
1
|
15211
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011-2012 Daniel (Avanzosc) <http://www.avanzosc.com>
# 28/03/2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
import wizard
import pooler
import Image
class wizard_interchange (wizard.interface):
form1 = '''<?xml version="1.0"?>
<form string="Tire Change">
<field name="tire" width="250" height="50"/>
<separator string="Move Tire" colspan="4"/>
<field name="origin" width="250" height="50"/>
<field name="destination" width="250" height="50" domain="[('name','like','bus')]" />
<group string="Bus Location" colspan="4" attrs="{'invisible':[('destination','=','Tire Stock')]}">
<field name="locat" width="150" height="50" domain="[('location_id','=',destination)]" />
<field name="odometer" />
</group>
</form>'''
form1_fields = {
'tire': {
'string': 'Tire',
'type': 'many2one',
'relation': 'stock.production.lot',
'required': True,
'readonly': True
},
'origin': {
'string': 'Origin',
'type': 'many2one',
'relation': 'stock.location',
'required': True,
'readonly': True
},
'destination': {
'string': 'Destination',
'type': 'many2one',
'relation': 'stock.location',
'required': True
},
'locat': {
'string': 'Tire Location',
'type': 'many2one',
'relation': 'stock.location',
'required': True
},
'odometer': {
'string': 'Odometer',
'type': 'integer',
'required': True
},
}
form2 = '''<?xml version="1.0"?>
<form string="Tire move">
<label string="Location occupied! The chosen location already has a tire assigned, move it before assigning new one." colspan="4"/>
</form>'''
form2_fields = {}
form3 = '''<?xml version="1.0"?>
<form string="Tire move">
<separator string="Tire movement must be from a Vehicle!" colspan="4"/>
</form>'''
form3_fields = {}
form4 = '''<?xml version="1.0"?>
<form string="Tire move">
<separator string="Tire movement must be to a Vehicle!" colspan="4"/>
</form>'''
form4_fields = {}
form5 = '''<?xml version="1.0"?>
<form string="Tire move">
<separator string="Tire must in a Vehicle!" colspan="4"/>
</form>'''
form5_fields = {}
form6 = '''<?xml version="1.0"?>
<form string="Tire move">
<separator string="Tire correctly moved! " colspan="4"/>
</form>'''
form6_fields = {}
def tire_init (self,cr,uid, data,context):
move_data = {}
pool = pooler.get_pool(cr.dbname)
tire_obj = pool.get('stock.production.lot')
move_obj = pool.get('stock.move')
loc_obj = pool.get('stock.location')
company_obj = pool.get('res.company')
tire = tire_obj.browse(cr,uid,data['id'])
company=tire.company_id
move_list = move_obj.search(cr,uid,[('prodlot_id','=',tire.id)])
locat_default = company.tire_stock
destini = False
if move_list == []:
origin = locat_default.id
else:
loc_id = max(move_list)
move= move_obj.browse(cr,uid, loc_id)
origin = move.location_dest_id.id
move_data={'tire':tire.id, 'origin': origin, 'destination': destini}
return move_data
def tire_interchange (self,cr,uid, data,context):
pool = pooler.get_pool(cr.dbname)
tire_obj = pool.get('stock.production.lot')
tire_data_obj = pool.get('tire.stock.lot')
move_obj = pool.get('stock.move')
vehic_obj = pool.get('fleet.vehicles')
loc_obj = pool.get('stock.location')
company_obj = pool.get('res.company')
tire = tire_obj.browse(cr,uid,data['form']['tire'])
company=tire.company_id
move_list = move_obj.search(cr,uid,[('prodlot_id','=',tire.id)])
destination = loc_obj.browse (cr,uid,data['form']['destination'])
destination_name = destination.name
origin = loc_obj.browse (cr,uid,data['form']['origin'])
origin_name = origin.name
#Comprobar si el origen es un vehiculo
loc_parent_ori = origin.location_id.id
if loc_parent_ori:
vehic_list = vehic_obj.search(cr,uid,[('buslocat','=',loc_parent_ori)])
else : vehic_list = []
if vehic_list ==[]:
ori_vehicle = False
res = 'ori_vehi'
else:
ori_vehicle = True
vehicle_ori = vehic_obj.browse(cr,uid,vehic_list[0]) # Origin Vehicle
# Termina comprobación origen
#Comprobar destino es vehiculo
if data['form']['locat'] :
dest_vehi = True
location = loc_obj.browse (cr,uid,data['form']['locat'])
vehicle_list = vehic_obj.search(cr,uid,[('buslocat','=',destination.id)])
vehicle_dest = vehic_obj.browse(cr,uid,vehicle_list[0]) # Destination Vehicle
else:
dest_vehi = False
res= 'dest_vehi'
#Termina comprobación destino
if dest_vehi and ori_vehicle : # Destination AND Origin = Vehicle
res = 'moved'
# actualizar odometro rueda
odometer = data['form']['odometer']
if move_list == []:
res = 'error'
else:
loc_id = max(move_list)
move= move_obj.browse(cr,uid, loc_id)
result = int(odometer) - move.odometer
tire_odometer = tire.tire_km + result
if tire.odometers:
odometer_text = tire.odometers + "\n" + str(data['form']['odometer'])
else: odometer_text = str(data['form']['odometer'])
tire_val= {'tire_km' : tire_odometer, 'odometers' : odometer_text}
# Termina actualización odometro rueda
#Datos movimiento
product_id = tire.product_id
move_data = {'product_id' : tire.product_id.id,
'name' : origin.name + ' | ' + tire.name + ' => ' + destination.name,
'location_id' : origin.id,
'product_uom': tire.product_id.product_tmpl_id.uom_id.id,
'prodlot_id' : tire.id,
'location_dest_id': location.id,
'odometer': odometer
}
#Datos rueda
tire_data_list=tire_data_obj.search(cr,uid,[('lot_id','=',tire.id)])
tire_data_id = max(tire_data_list)
tire_data = tire_data_obj.browse(cr,uid,tire_data_id)
tire_data_val={
'name': origin.name + ' | ' + tire.name + ' => ' + destination.name,
'lot_id': tire.id,
'origin' : origin.id,
'destination': location.id,
# 'data':time.strftime('%Y-%m-%d %H:%M:%S'),
'odomold' : tire_data.odomnew,
'odomnew' : odometer,
'tire_km' : odometer - tire_data.odomnew,
'tire_km_total':tire_data.tire_km_total + odometer - tire_data.odomnew
}
#Fin datos rueda
occupied = False
if location.name.endswith("-1"): # Tire to right
mount = {'f_l_tire' : tire.id}
if vehicle_dest.f_l_tire.id: # Tire occupied
occupied = vehicle_dest.f_l_tire
elif location.name.endswith("-2"):
mount = {'f_r_tire' : tire.id}
if vehicle_dest.f_r_tire.id: # Tire occupied
occupied = vehicle_dest.f_r_tire
if vehicle_dest.tires == 6:
if location.name.endswith("-3"):
mount = {'r_l_tire1' : tire.id}
if vehicle_dest.r_l_tire1.id:
occupied = vehicle_dest.r_l_tire1
elif location.name.endswith("-4"):
mount = {'r_l_tire2' : tire.id}
if vehicle_dest.r_l_tire2.id:
occupied = vehicle_dest.r_l_tire2
elif location.name.endswith("-5"):
mount = {'r_r_tire2' : tire.id}
if vehicle_dest.r_r_tire2.id:
occupied = vehicle_dest.r_r_tire2
elif location.name.endswith("-6"):
mount = {'r_r_tire1' : tire.id}
if vehicle_dest.r_r_tire1.id:
occupied = vehicle_dest.r_r_tire1
if vehicle_dest.tires > 6:
if location.name.endswith("-3"):
mount = {'m_l_tire1' : tire.id}
if vehicle_dest.m_l_tire1.id:
occupied = vehicle_dest.m_l_tire1
elif location.name.endswith("-4"):
mount = {'m_l_tire2' : tire.id}
if vehicle_dest.m_l_tire2.id:
occupied = vehicle_dest.m_l_tire2
elif location.name.endswith("-5"):
mount = {'m_r_tire2' : tire.id}
if vehicle_dest.m_r_tire2.id:
occupied = vehicle_dest.m_r_tire2
elif location.name.endswith("-6"):
mount = {'m_r_tire1' : tire.id}
if vehicle_dest.m_r_tire1.id:
occupied = vehicle_dest.m_r_tire1
elif location.name.endswith("-7"):
mount = {'r_l_tire1' : tire.id}
if vehicle_dest.r_l_tire1.id:
occupied = vehicle_dest.r_l_tire1
elif location.name.endswith("-8"):
mount = {'r_r_tire1' : tire.id}
if vehicle_dest.r_r_tire1.id:
occupied = vehicle_dest.r_r_tire1
if not occupied:
#Actualiza rueda
tire_obj.write(cr,uid, tire.id,tire_val)
#actualiza vehiculo destino
vehic_obj.write(cr,uid, vehicle_dest.id, mount)
#actualiza movimiento
move_id = move_obj.create(cr,uid,move_data)
#crear datos neumático
move_data_reg = move_obj.browse(cr,uid,move_id)
tire_data_val['data']= move_data_reg.date
data_id= tire_data_obj.create(cr,uid,tire_data_val)
#actualiza vehiculo origen
if origin_name.endswith("-1"):
update ={ 'f_l_tire' : False}
elif origin_name.endswith("-2"):
update ={ 'f_r_tire' : False}
if vehicle_ori.tires == 6:
if origin_name.endswith("-3"):
update ={ 'r_l_tire1' : False}
elif origin_name.endswith("-4"):
update ={ 'r_l_tire2' : False}
elif origin_name.endswith("-5"):
update ={ 'r_r_tire2' : False}
elif origin_name.endswith("-6"):
update ={ 'r_r_tire1' : False}
elif vehicle_ori.tires > 6:
if origin_name.endswith("-3"):
update ={ 'm_l_tire1' : False}
elif origin_name.endswith("-4"):
update ={ 'm_l_tire2' : False}
elif origin_name.endswith("-5"):
update ={ 'm_r_tire2' : False}
elif origin_name.endswith("-6"):
update ={ 'm_r_tire1' : False}
elif origin_name.endswith("-7"):
update ={ 'r_l_tire1' : False}
elif origin_name.endswith("-8"):
update ={ 'r_r_tire1' : False}
vehic_obj.write(cr,uid,vehicle_ori.id,update)
elif occupied:
res = 'full'
return res
states = {
'init': {
'actions': [tire_init],
'result': {'type': 'form', 'arch':form1, 'fields':form1_fields, 'state': [('end', 'Cancel','gtk-cancel'),('mount', 'Accept','gtk-ok')]}
},
'mount': {
'actions' : [],
'result': {'type': 'choice', 'next_state': tire_interchange}
},
'full' : {
'actions' : [],
'result': {'type': 'form', 'arch':form2, 'fields':form2_fields,'state': [('end', 'Accept','gtk-ok')]}
},
'ori_vehi': {
'actions' : [],
'result': {'type': 'form', 'arch':form3, 'fields':form3_fields,'state': [('end', 'Accept','gtk-cancel')]}
},
'dest_vehi': {
'actions' : [],
'result': {'type': 'form', 'arch':form4, 'fields':form4_fields,'state': [('end', 'Accept','gtk-cancel')]}
},
'error': {
'actions' : [],
'result': {'type': 'form', 'arch':form5, 'fields':form5_fields,'state': [('end', 'Accept','gtk-cancel')]}
},
'moved': {
'actions' : [],
'result': {'type': 'form', 'arch':form6, 'fields':form6_fields,'state': [('end', 'Accept','gtk-ok')]}
}
}
wizard_interchange('tire.interchange')
|
agpl-3.0
| 5,135,022,820,725,371,000
| 42.827089
| 156
| 0.460577
| false
| 3.91026
| false
| false
| false
|
spark8103/ops17
|
tools/test_init.py
|
1
|
18237
|
#!/bin/env python
# coding: utf-8
from app import db
from app.models import User, Role, Department, Idc, Server, Software, Project, Module, Environment
import os
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
def department_insert_data():
departments = {
u'管理中心': (None,''),
u'技术中心': (None, ''),
u'营销中心': (None, ''),
u'行政部': (Department.query.filter_by(name=u"管理中心").first(),''),
u'财务部': (Department.query.filter_by(name=u"管理中心").first(), ''),
u'运维部': (Department.query.filter_by(name=u"技术中心").first(), ''),
u'DBA部': (Department.query.filter_by(name=u"技术中心").first(), ''),
u'开发部': (Department.query.filter_by(name=u"技术中心").first(), ''),
u'测试部': (Department.query.filter_by(name=u"技术中心").first(), ''),
u'市场部': (Department.query.filter_by(name=u"营销中心").first(), ''),
u'活动部': (Department.query.filter_by(name=u"营销中心").first(), ''),
}
for r in departments:
department = Department.query.filter_by(name=r).first()
if department is None:
department = Department(name=r)
if isinstance(departments[r][0], int):
department.parent_id = departments[r][0]
else:
department.parent = departments[r][0]
department.description = departments[r][1]
db.session.add(department)
db.session.commit()
print "Insert department test data."
def user_insert_data():
users = {
'admin': ('admin@example.com', 13465245521, Department.query.filter_by(name=u"管理中心").first(),
Role.query.filter_by(name="Administrator").first(), 'admin', True, "admin"),
'ops1': ('ops1@example.com', 13764110236, Department.query.filter_by(name=u"运维部").first(),
Role.query.filter_by(name="User").first(), 'ops1', False, "ops"),
'ops2': ('ops2@example.com', 13764110238, Department.query.filter_by(name=u"运维部").first(),
Role.query.filter_by(name="User").first(), 'ops2', False, "ops"),
'dev1': ('dev1@example.com', 13612451124, Department.query.filter_by(name=u"开发部").first(),
Role.query.filter_by(name="User").first(), 'dev1', False, "dev"),
'dev2': ('dev2@example.com', 13625412214, Department.query.filter_by(name=u"开发部").first(),
Role.query.filter_by(name="User").first(), 'dev2', False, "dev"),
'qa1': ('qa1@example.com', 13112453365, Department.query.filter_by(name=u"测试部").first(),
Role.query.filter_by(name="User").first(), 'qa1', False, "qa"),
'qa2': ('qa2@example.com', 13124556847, Department.query.filter_by(name=u"测试部").first(),
Role.query.filter_by(name="User").first(), 'qa2', False, "qa"),
'dba1': ('dba1@example.com', 13321542635, Department.query.filter_by(name=u"DBA部").first(),
Role.query.filter_by(name="User").first(), 'dba1', False, "dba"),
'dba2': ('dba2@example.com', 13214512245, Department.query.filter_by(name=u"DBA部").first(),
Role.query.filter_by(name="User").first(), 'dba2', False, "dba"),
'user1': ('user1@example.com', 13412115694, Department.query.filter_by(name=u"活动部").first(),
Role.query.filter_by(name="User").first(), 'user1', False, "user"),
'user2': ('user2@example.com', 13451489521, Department.query.filter_by(name=u"行政部").first(),
Role.query.filter_by(name="User").first(), 'user2', False, "user"),
'user3': ('user3@example.com', 13465218952, Department.query.filter_by(name=u"营销中心").first(),
Role.query.filter_by(name="User").first(), 'user3', False, "manager"),
'user4': ('user4@example.com', 13462548991, Department.query.filter_by(name=u"管理中心").first(),
Role.query.filter_by(name="User").first(), 'user4', False, "manager"),
}
for u in users:
user = User.query.filter_by(username=u).first()
if user is None:
user = User(username=u)
user.email = users[u][0]
user.mobile = users[u][1]
user.department = users[u][2]
user.role = users[u][3]
user.password = users[u][4]
user.allow_login = users[u][5]
user.type = users[u][6]
db.session.add(user)
db.session.commit()
print "Insert user test data."
def idc_insert_data():
idcs = {
u'周浦': '',
u'北京南路': '',
u'欧阳路': '',
u'万国数据中心': '',
u'Ucloud': '',
u'aliyun': '',
u'北京酒仙桥': '',
u'金华双线': '',
u'宁波三线': '',
u'无锡线路': '',
u'南京联通': '',
u'青岛联通': '',
}
for s in idcs:
idc = Idc.query.filter_by(name=s).first()
if idc is None:
idc = Idc(name=s)
idc.description = idcs[s]
db.session.add(idc)
db.session.commit()
print "Insert idc test data."
def server_insert_data():
servers = {
u'zp-prd-app-10': (
"zp-prd-app", Idc.query.filter_by(name=u"周浦").first(), "K1", '10.10.10.10', '', u'大数据', "PRD",
"server", "Online", ""),
u'zp-prd-app-11': (
"zp-prd-app", Idc.query.filter_by(name=u"周浦").first(), "K2", '10.10.10.11', '', u'大数据', "PRD",
"server", "Online", ""),
u'oyl-stg-app-101': (
"oyl-stg-app", Idc.query.filter_by(name=u"欧阳路").first(), "R11", '10.18.23.101', '', u'网站部',
"STG", "server", "Online", ""),
u'oyl-stg-app-102': (
"oyl-stg-app", Idc.query.filter_by(name=u"欧阳路").first(), "R11", '10.18.23.102', '', u'网站部',
"STG", "server", "Online", ""),
u'dev-oracle-21': (
"dev-oracle", Idc.query.filter_by(name=u"北京南路").first(), "A01", '172.16.11.21', '', u'IT部',
"DEV", "vserver", "Online", ""),
u'dev-oracle-22': (
"dev-oracle", Idc.query.filter_by(name=u"北京南路").first(), "A01", '172.16.11.22', '', u'IT据',
"DEV", "vserver", "Online", ""),
u'px-prd-app-10': (
"px-prd-app", Idc.query.filter_by(name=u"万国数据中心").first(), "K1", '10.88.10.10', '', u'大数据',
"PRD", "server", "Online", ""),
u'px-prd-app-11': (
"px-prd-app", Idc.query.filter_by(name=u"万国数据中心").first(), "K2", '10.88.10.11', '', u'大数据',
"PRD", "server", "Online", ""),
u'uc-stg-app-101': (
"uc-stg-app", Idc.query.filter_by(name=u"Ucloud").first(), "R11", '10.99.123.101', '', u'网站部',
"STG", "server", "Online", ""),
u'uc-stg-app-102': (
"uc-stg-app", Idc.query.filter_by(name=u"Ucloud").first(), "R11", '10.99.123.102', '', u'网站部',
"STG", "server", "Online", ""),
u'wx-oracle-21': (
"wx-oracle", Idc.query.filter_by(name=u"无锡线路").first(), "A01", '172.16.11.21', '', u'IT部',
"DEV", "vserver", "Online", ""),
u'wx-oracle-22': (
"wx-oracle", Idc.query.filter_by(name=u"无锡线路").first(), "A01", '172.16.11.22', '', u'IT据',
"DEV", "vserver", "Online", ""),
}
for s in servers:
server = Server.query.filter_by(name=s).first()
if server is None:
server = Server(name=s)
server.category_branch = servers[s][0]
server.idc = servers[s][1]
server.rack = servers[s][2]
server.private_ip = servers[s][3]
server.public_ip = servers[s][4]
server.category = servers[s][5]
server.env = servers[s][6]
server.type = servers[s][7]
server.status = servers[s][8]
server.description = servers[s][9]
db.session.add(server)
db.session.commit()
print "Insert server test data."
def project_insert_data():
projects = {
u'bd-blink': (Department.query.filter_by(name=u"管理中心").first(),
User.query.filter_by(username=u'user1').first(), '99999'),
u'bd-tiger': (Department.query.filter_by(name=u"管理中心").first(),
User.query.filter_by(username=u'user2').first(), '99999'),
u'bd-cmdb': (Department.query.filter_by(name=u"运维部").first(),
User.query.filter_by(username=u'ops1').first(), '999'),
u'bd-bdmp': (Department.query.filter_by(name=u"运维部").first(),
User.query.filter_by(username=u'ops2').first(), '999'),
u'bd-test': (Department.query.filter_by(name=u"开发部").first(),
User.query.filter_by(username=u'dev1').first(), '999'),
u'bd-test2': (Department.query.filter_by(name=u"开发部").first(),
User.query.filter_by(username=u'dev2').first(), '999'),
u'bd-test3': (Department.query.filter_by(name=u"开发部").first(),
User.query.filter_by(username=u'dev1').first(), '999'),
u'bd-jenkins': (Department.query.filter_by(name=u"测试部").first(),
User.query.filter_by(username=u'qa1').first(), '999'),
u'bd-qa': (Department.query.filter_by(name=u"测试部").first(),
User.query.filter_by(username=u'qa2').first(), '999'),
u'bd-oracle': (Department.query.filter_by(name=u"DBA部").first(),
User.query.filter_by(username=u'dba1').first(), '999'),
u'bd-mongodb': (Department.query.filter_by(name=u"DBA部").first(),
User.query.filter_by(username=u'dba2').first(), '999'),
}
for s in projects:
project = Project.query.filter_by(name=s).first()
if project is None:
project = Project(name=s)
project.department = projects[s][0]
project.pm = projects[s][1]
project.sla = projects[s][2]
db.session.add(project)
db.session.commit()
print "Insert project test data."
def module_insert_data():
modules = {
u'bd-blink-server': (Project.query.filter_by(name=u"bd-blink").first(), 'http://10.10.10.5/svn/bd-blink/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops1').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-tiger-web': (Project.query.filter_by(name=u"bd-tiger").first(), 'http://10.10.10.5/svn/bd-tiger/',
User.query.filter_by(username=u'dev2').first(), User.query.filter_by(username=u'qa2').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-cmdb': (Project.query.filter_by(name=u"bd-cmdb").first(), 'http://10.10.10.5/svn/bd-cmdb/',
User.query.filter_by(username=u'dev2').first(), User.query.filter_by(username=u'qa2').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-bdmp': (Project.query.filter_by(name=u"bd-bdmp").first(), 'http://10.10.10.5/svn/bd-bdmp/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-test': (Project.query.filter_by(name=u"bd-test").first(), 'http://10.10.10.5/svn/bd-test/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-test2': (Project.query.filter_by(name=u"bd-test2").first(), 'http://10.10.10.5/svn/bd-test2/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-test3': (Project.query.filter_by(name=u"bd-test3").first(), 'http://10.10.10.5/svn/bd-test3/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-jenkins': (Project.query.filter_by(name=u"bd-jenkins").first(), 'http://10.10.10.5/svn/bd-jenkins/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-qa': (Project.query.filter_by(name=u"bd-qa").first(), 'http://10.10.10.5/svn/bd-qa/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-oracle': (Project.query.filter_by(name=u"bd-oracle").first(), 'http://10.10.10.5/svn/bd-oracle/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
u'bd-mongodb': (Project.query.filter_by(name=u"bd-mongodb").first(), 'http://10.10.10.5/svn/bd-mongodb/',
User.query.filter_by(username=u'dev1').first(), User.query.filter_by(username=u'qa1').first(),
User.query.filter_by(username=u'ops2').first(),
Software.query.filter_by(version=u'tomcat_7.0.68').first()),
}
for m in modules:
module = Module.query.filter_by(name=m).first()
if module is None:
module = Module(name=m)
module.project = modules[m][0]
module.svn = modules[m][1]
module.dev = modules[m][2]
module.qa = modules[m][3]
module.ops = modules[m][4]
module.software = modules[m][5]
db.session.add(module)
db.session.commit()
print "Insert module test data."
def environment_insert_data():
environments = {
u'bd-blink-server': (Module.query.filter_by(name=u"bd-blink-server").first(), 'PRD',
Idc.query.filter_by(name=u'周浦').first(), "http://www.blink.com/status",
"/opt/app/bd-blink-server/", "www.blink.com"),
u'bd-tiger-web': (Module.query.filter_by(name=u"bd-tiger-web").first(), 'PRD',
Idc.query.filter_by(name=u'周浦').first(), "http://www.tiger.com/status",
"/opt/app/bd-tiger-web/", "www.tiger.com"),
u'bd-cmdb': (Module.query.filter_by(name=u"bd-cmdb").first(), 'PRD',
Idc.query.filter_by(name=u'周浦').first(), "http://www.cmdb.com/status",
"/opt/app/bd-cmdb/", "www.cmdb.com"),
u'bd-bdmp': (Module.query.filter_by(name=u"bd-bdmp").first(), 'PRD',
Idc.query.filter_by(name=u'周浦').first(), "http://www.bdmp.com/status",
"/opt/app/bd-bdmp/", "www.bdmp.com"),
u'bd-test': (Module.query.filter_by(name=u"bd-test").first(), 'DEV',
Idc.query.filter_by(name=u'周浦').first(), "http://www.test.com/status",
"/opt/app/bd-test/", "www.test.com"),
u'bd-test2': (Module.query.filter_by(name=u"bd-test2").first(), 'DEV',
Idc.query.filter_by(name=u'周浦').first(), "http://www.test2.com/status",
"/opt/app/bd-test2/", "www.test2.com"),
u'bd-test3': (Module.query.filter_by(name=u"bd-test3").first(), 'DEV',
Idc.query.filter_by(name=u'周浦').first(), "http://www.test3.com/status",
"/opt/app/bd-test3/", "www.test3.com"),
u'bd-jenkins': (Module.query.filter_by(name=u"bd-jenkins").first(), 'QA',
Idc.query.filter_by(name=u'周浦').first(), "http://www.jenkins.com/status",
"/opt/app/bd-jenkins/", "www.jenkins.com"),
u'bd-qa': (Module.query.filter_by(name=u"bd-qa").first(), 'QA',
Idc.query.filter_by(name=u'周浦').first(), "http://www.qa.com/status",
"/opt/app/bd-qa/", "www.qa.com"),
u'bd-oracle': (Module.query.filter_by(name=u"bd-oracle").first(), 'STG',
Idc.query.filter_by(name=u'周浦').first(), "http://www.oracle.com/status",
"/opt/app/bd-oracle/", "www.oracle.com"),
u'bd-mongodb': (Module.query.filter_by(name=u"bd-mongodb").first(), 'STG',
Idc.query.filter_by(name=u'周浦').first(), "http://www.mongodb.com/status",
"/opt/app/bd-mongodb/", "www.mongodb.com"),
}
for e in environments:
environment = Environment(
module=environments[e][0],
env=environments[e][1],
idc=environments[e][2],
check_point1=environments[e][3],
deploy_path=environments[e][4],
domain=environments[e][5])
db.session.add(environment)
db.session.commit()
print "Insert environment test data."
|
mit
| -3,189,856,236,642,584,000
| 46.867568
| 123
| 0.54085
| false
| 2.921643
| true
| false
| false
|
lexifdev/pyconkr-2015
|
pyconkr/views.py
|
1
|
12386
|
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.contrib.auth import login as user_login, logout as user_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import render, redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView, DetailView, UpdateView
from datetime import datetime, timedelta
from uuid import uuid4
from .forms import EmailLoginForm, SpeakerForm, ProgramForm, RegistrationForm
from .helper import sendEmailToken, render_json, send_email_ticket_confirm, render_io_error
from .models import (Room,
Program, ProgramDate, ProgramTime, ProgramCategory,
Speaker, Sponsor, Jobfair, Announcement,
EmailToken, Registration, Product)
from iamporter import get_access_token, Iamporter, IamporterError
logger = logging.getLogger(__name__)
payment_logger = logging.getLogger('payment')
def index(request):
return render(request, 'index.html', {
'base_content': FlatPage.objects.get(url='/index/').content,
'recent_announcements': Announcement.objects.all()[:3],
})
def schedule(request):
dates = ProgramDate.objects.all()
times = ProgramTime.objects.all()
rooms = Room.objects.all()
wide = {}
narrow = {}
processed = set()
for d in dates:
wide[d] = {}
narrow[d] = {}
for t in times:
wide[d][t] = {}
narrow[d][t] = {}
for r in rooms:
s = Program.objects.filter(date=d, times=t, rooms=r)
if s:
if s[0].times.all()[0] == t and s[0].id not in processed:
wide[d][t][r] = s[0]
narrow[d][t][r] = s[0]
processed.add(s[0].id)
else:
wide[d][t][r] = None
if len(narrow[d][t]) == 0:
del(narrow[d][t])
contexts = {
'wide': wide,
'narrow': narrow,
'rooms': rooms,
}
return render(request, 'schedule.html', contexts)
class RoomDetail(DetailView):
model = Room
class SponsorList(ListView):
model = Sponsor
class SponsorDetail(DetailView):
model = Sponsor
class SpeakerList(ListView):
model = Speaker
class SpeakerDetail(DetailView):
model = Speaker
def get_context_data(self, **kwargs):
context = super(SpeakerDetail, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
if self.request.user.email == self.object.email:
context['editable'] = True
return context
class SpeakerUpdate(UpdateView):
model = Speaker
form_class = SpeakerForm
def get_queryset(self):
queryset = super(SpeakerUpdate, self).get_queryset()
return queryset.filter(email=self.request.user.email)
class ProgramList(ListView):
model = ProgramCategory
template_name = 'pyconkr/program_list.html'
class ProgramDetail(DetailView):
model = Program
def get_context_data(self, **kwargs):
context = super(ProgramDetail, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
for speaker in self.object.speakers.all():
if self.request.user.email == speaker.email:
context['editable'] = True
return context
class ProgramUpdate(UpdateView):
model = Program
form_class = ProgramForm
def get_queryset(self):
queryset = super(ProgramUpdate, self).get_queryset()
return queryset.filter(speakers__email=self.request.user.email)
class JobfairList(ListView):
model = Jobfair
class AnnouncementList(ListView):
model = Announcement
def get_queryset(self):
now = datetime.now()
queryset = super(AnnouncementList, self).get_queryset()
return queryset.filter(Q(announce_after__isnull=True) | Q(announce_after__lt=now))
class AnnouncementDetail(DetailView):
model = Announcement
def robots(request):
return render(request, 'robots.txt', content_type='text/plain')
def login(request):
form = EmailLoginForm()
if request.method == 'POST':
form = EmailLoginForm(request.POST)
if form.is_valid():
# Remove previous tokens
email = form.cleaned_data['email']
EmailToken.objects.filter(email=email).delete()
# Create new
token = EmailToken(email=email)
token.save()
sendEmailToken(request, token)
return redirect(reverse('login_mailsent'))
return render(request, 'login.html', {
'form': form,
'title': _('Login'),
})
@never_cache
def login_req(request, token):
time_threshold = datetime.now() - timedelta(hours=1)
try:
token = EmailToken.objects.get(token=token, created__gte=time_threshold)
except ObjectDoesNotExist:
return render(request, 'login_notvalidtoken.html', {
'title': _('Not valid token')
})
email = token.email
# Create user automatically by email as id, token as password
try:
user = User.objects.get(email=email)
except ObjectDoesNotExist:
user = User.objects.create_user(email, email, token)
user.save()
token.delete()
# Set backend manually
user.backend = 'django.contrib.auth.backends.ModelBackend'
user_login(request, user)
return redirect(reverse('index'))
@never_cache
def login_mailsent(request):
return render(request, 'login_mailsent.html', {
'title': _('Mail sent'),
})
def logout(request):
user_logout(request)
return redirect(reverse('index'))
@login_required
def profile(request):
return render(request, 'profile.html')
@login_required
def registration_info(request):
is_ticket_open = is_registration_time()
return render(request, 'pyconkr/registration/info.html', {
"is_ticket_open" : is_ticket_open
})
@login_required
def registration_status(request):
try:
registration = Registration.objects.filter(user=request.user).get()
except Registration.DoesNotExist:
registration = None
return render(request, 'pyconkr/registration/status.html', {
'title': _('Registration'),
'registration': registration,
})
@login_required
def registration_payment(request):
if not is_registration_time():
return redirect('registration_info')
if request.method == 'GET':
product = Product()
registered = Registration.objects.filter(
user=request.user,
payment_status__in=['paid', 'ready']
).exists()
if registered:
return redirect('registration_status')
uid = str(uuid4()).replace('-', '')
form = RegistrationForm(initial={'email': request.user.email})
return render(request, 'pyconkr/registration/payment.html', {
'title': _('Registration'),
'IMP_USER_CODE': settings.IMP_USER_CODE, # TODO : Move to 'settings context processor'
'form': form,
'uid': uid,
'product_name': product.name,
'amount': product.price,
'vat': 0,
})
elif request.method == 'POST':
payment_logger.debug(request.POST)
form = RegistrationForm(request.POST)
# TODO : more form validation
# eg) merchant_uid
if not form.is_valid():
form_errors_string = '\n'.join(('%s:%s' % (k, v[0]) for k, v in form.errors.items()))
return render_json({
'success': False,
'message': form_errors_string, # TODO : ...
})
remain_ticket_count = (settings.MAX_TICKET_NUM - Registration.objects.filter(payment_status__in=['paid', 'ready']).count())
# sold out
if remain_ticket_count <= 0:
return render_json({
'success': False,
'message': u'티켓이 매진 되었습니다',
})
registration, created = Registration.objects.get_or_create(user=request.user)
registration.name = form.cleaned_data.get('name')
registration.email = request.user.email
registration.company = form.cleaned_data.get('company', '')
registration.phone_number = form.cleaned_data.get('phone_number', '')
registration.merchant_uid = request.POST.get('merchant_uid')
registration.save() # TODO : use form.save()
try:
product = Product()
access_token = get_access_token(settings.IMP_API_KEY, settings.IMP_API_SECRET)
imp_client = Iamporter(access_token)
if request.POST.get('payment_method') == 'card':
# TODO : use validated and cleaned data
imp_client.onetime(
token=request.POST.get('token'),
merchant_uid=request.POST.get('merchant_uid'),
amount=request.POST.get('amount'),
# vat=request.POST.get('vat'),
card_number=request.POST.get('card_number'),
expiry=request.POST.get('expiry'),
birth=request.POST.get('birth'),
pwd_2digit=request.POST.get('pwd_2digit'),
customer_uid=form.cleaned_data.get('email'),
)
confirm = imp_client.find_by_merchant_uid(request.POST.get('merchant_uid'))
if confirm['amount'] != product.price:
# TODO : cancel
return render_io_error("amount is not same as product.price. it will be canceled")
registration.payment_method = confirm.get('pay_method')
registration.payment_status = confirm.get('status')
registration.payment_message = confirm.get('fail_reason')
registration.vbank_name = confirm.get('vbank_name', None)
registration.vbank_num = confirm.get('vbank_num', None)
registration.vbank_date = confirm.get('vbank_date', None)
registration.vbank_holder = confirm.get('vbank_holder', None)
registration.save()
send_email_ticket_confirm(request, registration)
except IamporterError as e:
# TODO : other status code
return render_json({
'success': False,
'code': e.code,
'message': e.message,
})
else:
return render_json({
'success': True,
})
@csrf_exempt
def registration_payment_callback(request):
merchant_uid = request.POST.get('merchant_uid', None)
if not merchant_uid:
return render_io_error('merchant uid dose not exist')
product = Product()
# TODO : check stock
access_token = get_access_token(settings.IMP_API_KEY, settings.IMP_API_SECRET)
imp_client = Iamporter(access_token)
confirm = imp_client.find_by_merchant_uid(merchant_uid)
if confirm['amount'] != product.price:
# TODO : cancel
return render_io_error('amount is not product.price')
remain_ticket_count = (settings.MAX_TICKET_NUM - Registration.objects.filter(payment_status='paid').count())
if remain_ticket_count <= 0:
# TODO : cancel
return render_json({
'success': False,
'message': u'티켓이 매진 되었습니다'
})
registration = Registration.objects.filter(merchant_uid=merchant_uid).get()
registration.payment_status = 'paid'
registration.save()
send_email_ticket_confirm(request, registration)
return render_json({
'success': True
})
def is_registration_time():
ticket_open_date = datetime.strptime(settings.TICKET_OPEN_DATETIME, '%Y-%m-%d %H:%M:%S')
ticket_close_date = datetime.strptime(settings.TICKET_CLOSE_DATETIME, '%Y-%m-%d %H:%M:%S')
cur = datetime.now()
return ticket_open_date <= cur <= ticket_close_date
|
mit
| -7,112,165,041,920,347,000
| 30.098237
| 131
| 0.611291
| false
| 3.957051
| false
| false
| false
|
mgeorgehansen/FIFE_Technomage
|
engine/python/fife/extensions/pychan/widgets/scrollarea.py
|
1
|
4667
|
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2009 by the FIFE team
# http://www.fifengine.de
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from common import *
from widget import Widget
class ScrollArea(Widget):
"""
A wrapper around another (content) widget.
New Attributes
==============
- content: The wrapped widget.
- vertical_scrollbar: Boolean: Set this to False to hide the Vertical scrollbar
- horizontal_scrollbar: Boolean: Set this to False to hide the Horizontal scrollbar
"""
ATTRIBUTES = Widget.ATTRIBUTES + [ BoolAttr("vertical_scrollbar"),BoolAttr("horizontal_scrollbar") ]
DEFAULT_HEXPAND = 1
DEFAULT_VEXPAND = 1
def __init__(self,**kwargs):
self.real_widget = fife.ScrollArea()
self._content = None
super(ScrollArea,self).__init__(**kwargs)
def addChild(self,widget):
self.content = widget
widget.parent = self
def removeChild(self,widget):
if self._content != widget:
raise RuntimeError("%s does not have %s as direct child widget." % (str(self),str(widget)))
self.content = None
widget.parent = None
def _setContent(self,content):
if content is None:
self.real_widget.setContent(content)
else:
self.real_widget.setContent(content.real_widget)
self._content = content
def _getContent(self): return self._content
content = property(_getContent,_setContent)
def deepApply(self,visitorFunc, leaves_first = True):
if leaves_first:
if self._content: self._content.deepApply(visitorFunc, leaves_first = leaves_first)
visitorFunc(self)
if not leaves_first:
if self._content: self._content.deepApply(visitorFunc, leaves_first = leaves_first)
def resizeToContent(self,recurse=True):
if self._content is None: return
if recurse:
self.content.resizeToContent(recurse=recurse)
self.size = self.min_size
def _visibilityToScrollPolicy(self,visibility):
if visibility:
return fife.ScrollArea.SHOW_AUTO
return fife.ScrollArea.SHOW_NEVER
def _scrollPolicyToVisibility(self,policy):
if policy == fife.ScrollArea.SHOW_NEVER:
return False
return True
def _setHorizontalScrollbar(self,visibility):
self.real_widget.setHorizontalScrollPolicy( self._visibilityToScrollPolicy(visibility) )
def _setVerticalScrollbar(self,visibility):
self.real_widget.setVerticalScrollPolicy( self._visibilityToScrollPolicy(visibility) )
def _getHorizontalScrollbar(self):
return self._scrollPolicyToVisibility( self.real_widget.getHorizontalScrollPolicy() )
def _getVerticalScrollbar(self):
return self._scrollPolicyToVisibility( self.real_widget.getVerticalScrollPolicy() )
def sizeChanged(self):
if self.content:
self.content.width = max(self.content.width,self.width-5)
self.content.height = max(self.content.height,self.height-5)
def getVerticalMaxScroll(self):
return self.real_widget.getVerticalMaxScroll()
def getHorizontalMaxScroll(self):
return self.real_widget.getHorizontalMaxScroll()
def _getHorizontalScrollAmount(self):
return self.real_widget.getHorizontalScrollAmount()
def _setHorizontalScrollAmount(self, scroll_amount):
return self.real_widget.setHorizontalScrollAmount(scroll_amount)
def _getVerticalScrollAmount(self):
return self.real_widget.getVerticalScrollAmount()
def _setVerticalScrollAmount(self, scroll_amount):
return self.real_widget.setVerticalScrollAmount(scroll_amount)
vertical_scrollbar = property(_getVerticalScrollbar,_setVerticalScrollbar)
horizontal_scrollbar = property(_getHorizontalScrollbar,_setHorizontalScrollbar)
horizontal_scroll_amount = property(_getHorizontalScrollAmount, _setHorizontalScrollAmount)
vertical_scroll_amount = property(_getVerticalScrollAmount, _setVerticalScrollAmount)
|
lgpl-2.1
| -4,644,638,224,177,974,000
| 35.336
| 101
| 0.71952
| false
| 3.671912
| false
| false
| false
|
polera/rblwatch
|
rblwatch/rblwatch.py
|
1
|
6297
|
#!/usr/bin/env python
import sys
import socket
import re
from IPy import IP
from dns.resolver import Resolver, NXDOMAIN, NoNameservers, Timeout, NoAnswer
from threading import Thread
RBLS = [
'aspews.ext.sorbs.net',
'b.barracudacentral.org',
'bl.deadbeef.com',
'bl.emailbasura.org',
'bl.spamcannibal.org',
'bl.spamcop.net',
'blackholes.five-ten-sg.com',
'blacklist.woody.ch',
'bogons.cymru.com',
'cbl.abuseat.org',
'cdl.anti-spam.org.cn',
'combined.abuse.ch',
'combined.rbl.msrbl.net',
'db.wpbl.info',
'dnsbl-1.uceprotect.net',
'dnsbl-2.uceprotect.net',
'dnsbl-3.uceprotect.net',
'dnsbl.cyberlogic.net',
'dnsbl.dronebl.org',
'dnsbl.inps.de',
'dnsbl.njabl.org',
'dnsbl.sorbs.net',
'drone.abuse.ch',
'duinv.aupads.org',
'dul.dnsbl.sorbs.net',
'dul.ru',
'dyna.spamrats.com',
'dynip.rothen.com',
'http.dnsbl.sorbs.net'
'images.rbl.msrbl.net',
'ips.backscatterer.org',
'ix.dnsbl.manitu.net',
'korea.services.net',
'misc.dnsbl.sorbs.net',
'noptr.spamrats.com',
'ohps.dnsbl.net.au',
'omrs.dnsbl.net.au',
'orvedb.aupads.org',
'osps.dnsbl.net.au',
'osrs.dnsbl.net.au',
'owfs.dnsbl.net.au',
'owps.dnsbl.net.au'
'pbl.spamhaus.org',
'phishing.rbl.msrbl.net',
'probes.dnsbl.net.au'
'proxy.bl.gweep.ca',
'proxy.block.transip.nl',
'psbl.surriel.com',
'rdts.dnsbl.net.au',
'relays.bl.gweep.ca',
'relays.bl.kundenserver.de',
'relays.nether.net',
'residential.block.transip.nl',
'ricn.dnsbl.net.au',
'rmst.dnsbl.net.au',
'sbl.spamhaus.org',
'short.rbl.jp',
'smtp.dnsbl.sorbs.net',
'socks.dnsbl.sorbs.net',
'spam.abuse.ch',
'spam.dnsbl.sorbs.net',
'spam.rbl.msrbl.net',
'spam.spamrats.com',
'spamlist.or.kr',
'spamrbl.imp.ch',
't3direct.dnsbl.net.au',
'tor.dnsbl.sectoor.de',
'torserver.tor.dnsbl.sectoor.de',
'ubl.lashback.com',
'ubl.unsubscore.com',
'virbl.bit.nl',
'virus.rbl.jp',
'virus.rbl.msrbl.net',
'web.dnsbl.sorbs.net',
'wormrbl.imp.ch',
'xbl.spamhaus.org',
'zen.spamhaus.org',
'zombie.dnsbl.sorbs.net',
]
class Lookup(Thread):
def __init__(self, host, dnslist, listed, resolver):
Thread.__init__(self)
self.host = host
self.listed = listed
self.dnslist = dnslist
self.resolver = resolver
def run(self):
try:
host_record = self.resolver.query(self.host, "A")
if len(host_record) > 0:
self.listed[self.dnslist]['LISTED'] = True
self.listed[self.dnslist]['HOST'] = host_record[0].address
text_record = self.resolver.query(self.host, "TXT")
if len(text_record) > 0:
self.listed[self.dnslist]['TEXT'] = "\n".join(text_record[0].strings)
self.listed[self.dnslist]['ERROR'] = False
except NXDOMAIN:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = NXDOMAIN
except NoNameservers:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = NoNameservers
except Timeout:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = Timeout
except NameError:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = NameError
except NoAnswer:
self.listed[self.dnslist]['ERROR'] = True
self.listed[self.dnslist]['ERRORTYPE'] = NoAnswer
class RBLSearch(object):
def __init__(self, lookup_host):
self.lookup_host = lookup_host
self._listed = None
self.resolver = Resolver()
self.resolver.timeout = 0.2
self.resolver.lifetime = 1.0
def search(self):
if self._listed is not None:
pass
else:
ip = IP(self.lookup_host)
host = ip.reverseName()
if ip.version() == 4:
host = re.sub('.in-addr.arpa.', '', host)
elif ip.version() == 6:
host = re.sub('.ip6.arpa.', '', host)
self._listed = {'SEARCH_HOST': self.lookup_host}
threads = []
for LIST in RBLS:
self._listed[LIST] = {'LISTED': False}
query = Lookup("%s.%s" % (host, LIST), LIST, self._listed, self.resolver)
threads.append(query)
query.start()
for thread in threads:
thread.join()
return self._listed
listed = property(search)
def print_results(self):
listed = self.listed
print("")
print("--- DNSBL Report for %s ---" % listed['SEARCH_HOST'])
for key in listed:
if key == 'SEARCH_HOST':
continue
if not listed[key].get('ERROR'):
if listed[key]['LISTED']:
print("Results for %s: %s" % (key, listed[key]['LISTED']))
print(" + Host information: %s" % \
(listed[key]['HOST']))
if 'TEXT' in listed[key].keys():
print(" + Additional information: %s" % \
(listed[key]['TEXT']))
else:
#print "*** Error contacting %s ***" % key
pass
if __name__ == "__main__":
# Tests!
try:
if len(sys.argv) > 1:
print("Looking up: %s (please wait)" % sys.argv[1])
ip = sys.argv[1]
pat = re.compile("\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}")
is_ip_address = pat.match(ip)
if not is_ip_address:
try:
ip = socket.gethostbyname(ip)
print("Hostname %s resolved to ip %s" % (sys.argv[1],ip))
except socket.error:
print("IP %s can't be resolved" % ip)
ip = ""
if ip:
searcher = RBLSearch(ip)
searcher.print_results()
else:
print("""Usage summary:
rblwatch <ip address to lookup> """)
except KeyboardInterrupt:
pass
|
bsd-2-clause
| -1,166,850,851,907,662,000
| 30.80303
| 89
| 0.532476
| false
| 3.20458
| false
| false
| false
|
tbunnyman/FAiler
|
FAiler/faile.py
|
1
|
2872
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
from datetime import datetime
from FAiler.exceptions import FAError
class FAile():
"""
Represents a file downloaded from FurAffinity.
The base parameters of this class are public access read safe by design
FAile.directory: the directory to the supplied file or pwd if not supplied
FAile.filename: the full name of the supplied file. This never changes
FAile.date: When this was uploaded
FAile.artist: the name of the user who uploaded this
FAile.name: the name of the submitted file
FAile.fileType: The extension of the submitted file
Some example files
1201126929.[koh]_fooooom_toaster.jpg
1362739849.wolfy-nail_2013-03-01-djzing.jpg
"""
directory = None
filename = None
date = None
artist = None
name = None
fileType = None
def __init__(self, faFile):
"""
This accepts both a standard name or with path to file.
:param faFile: Name of or path to a file from FA
:raise: FAError if the file name cannot be parsed.
"""
self.directory = os.path.dirname(faFile)
self.filename = os.path.basename(faFile)
self._parse_name(self.filename) # Raises on fail
def __repr__(self):
return 'FAile({})'.format(os.path.join(self.directory, self.filename))
def __str__(self):
"""
:return: the filename as a string
"""
return str(self.filename)
def _parse_name(self, name):
"""
Don't repeat yourself.
This assigns everything from the filename and raises FAError on fail
:raises: FAError if name does not parse
"""
faRe = re.compile(r'(\d+)\.([\w\[\]~.-]+?)_(\S+)\.(\w{2,4})')
parsed = re.match(faRe, name)
if parsed is None:
raise FAError("Unable to parse file name: " + name)
self.date, self.artist, self.name, self.fileType = parsed.groups()
self.date = datetime.fromtimestamp(int(self.date))
def clean_reupload(self):
"""
Often enough someone downloads a file from FA and then re-uploads it
This checks for that and changes the Number, User, & Name to that of
the "original" uploader.
The basename is kept unchanged
ex;
>>> from FAiler import FAile
>>> f2 = FAile('1362168441.shim_1362116845.furball_shim_bday2013.jpg')
>>> "{} - {}.{}".format(f2.artist, f2.name, f2.fileType)
'shim - 1362116845.furball_shim_bday2013.jpg'
>>> f2.clean_reupload()
>>> "{0.artist} - {0.name}.{0.fileType}".format(f2)
'furball - shim_bday2013.jpg'
"""
try:
self._parse_name("{0.name}.{0.fileType}".format(self))
except FAError:
pass # We don't care if parse fails this time around
|
bsd-3-clause
| 931,305,739,904,286,800
| 32.788235
| 78
| 0.612465
| false
| 3.64467
| false
| false
| false
|
academichero/jobs
|
modules/board/migrations/0003_auto_20160622_2034.py
|
1
|
1227
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-22 23:34
from __future__ import unicode_literals
import datetime
import django.core.validators
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('board', '0002_job_featured'),
]
operations = [
migrations.AddField(
model_name='job',
name='internal_job',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='job',
name='open_job',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='job',
name='validity',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 22, 23, 34, 36, 512801, tzinfo=utc)),
preserve_default=False,
),
migrations.AddField(
model_name='job',
name='workload',
field=models.PositiveIntegerField(default=20, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(45)]),
preserve_default=False,
),
]
|
mit
| 998,880,232,990,725,600
| 29.675
| 162
| 0.600652
| false
| 4.305263
| false
| false
| false
|
pavel-paulau/perfrunner
|
perfrunner/tests/kv.py
|
1
|
12499
|
from logger import logger
from perfrunner.helpers.cbmonitor import timeit, with_stats
from perfrunner.helpers.worker import (
pillowfight_data_load_task,
pillowfight_task,
)
from perfrunner.tests import PerfTest
from perfrunner.workloads.pathoGen import PathoGen
from perfrunner.workloads.tcmalloc import WorkloadGen
class KVTest(PerfTest):
@with_stats
def access(self, *args):
super().access(*args)
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.reset_kv_stats()
self.access()
self.report_kpi()
class ReadLatencyTest(KVTest):
"""Enable reporting of GET latency."""
COLLECTORS = {'latency': True}
def _report_kpi(self):
self.reporter.post(
*self.metrics.kv_latency(operation='get')
)
class MixedLatencyTest(ReadLatencyTest):
"""Enable reporting of GET and SET latency."""
def _report_kpi(self):
for operation in ('get', 'set'):
self.reporter.post(
*self.metrics.kv_latency(operation=operation)
)
class DGMTest(KVTest):
COLLECTORS = {'disk': True, 'net': False}
class DGMCompactionTest(DGMTest):
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.reset_kv_stats()
self.compact_bucket(wait=False)
self.access()
self.report_kpi()
class DGMCompactedTest(DGMTest):
def run(self):
self.load()
self.wait_for_persistence()
self.compact_bucket()
self.hot_load()
self.reset_kv_stats()
self.access()
self.report_kpi()
class ReadLatencyDGMTest(KVTest):
COLLECTORS = {'disk': True, 'latency': True, 'net': False}
def _report_kpi(self):
self.reporter.post(
*self.metrics.kv_latency(operation='get')
)
class MixedLatencyDGMTest(ReadLatencyDGMTest):
def _report_kpi(self):
for operation in ('get', 'set'):
self.reporter.post(
*self.metrics.kv_latency(operation=operation)
)
class ReadLatencyDGMCompactionTest(DGMCompactionTest):
COLLECTORS = {'disk': True, 'latency': True, 'net': False}
def _report_kpi(self):
self.reporter.post(
*self.metrics.kv_latency(operation='get')
)
class ReadLatencyDGMCompactedTest(DGMCompactedTest):
COLLECTORS = {'disk': True, 'latency': True, 'net': False}
def _report_kpi(self):
for percentile in 99.9, 99.99:
self.reporter.post(
*self.metrics.kv_latency(operation='get', percentile=percentile)
)
class DurabilityTest(KVTest):
"""Enable reporting of persistTo=1 and replicateTo=1 latency."""
COLLECTORS = {'durability': True}
def _report_kpi(self):
for operation in ('replicate_to', 'persist_to'):
self.reporter.post(
*self.metrics.kv_latency(operation=operation,
collector='durability')
)
class SubDocTest(MixedLatencyTest):
"""Enable reporting of SubDoc latency."""
COLLECTORS = {'latency': True}
class XATTRTest(MixedLatencyTest):
"""Enable reporting of XATTR latency."""
COLLECTORS = {'latency': True}
def run(self):
self.load()
self.xattr_load()
self.wait_for_persistence()
self.access()
self.report_kpi()
class DrainTest(DGMCompactionTest):
"""Enable reporting of average disk write queue size."""
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_disk_write_queue()
)
class InitialLoadTest(DrainTest):
@with_stats
def load(self, *args, **kwargs):
super().load(*args, **kwargs)
def run(self):
self.load()
self.report_kpi()
class IngestionTest(KVTest):
COLLECTORS = {'disk': True, 'net': False}
@with_stats
def access(self, *args, **kwargs):
super(KVTest, self).access(*args, **kwargs)
self.wait_for_persistence()
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_total_queue_age()
)
class WarmupTest(PerfTest):
"""Measure the time it takes to perform cluster warm up."""
COLLECTORS = {'net': False}
@with_stats
def warmup(self):
self.remote.stop_server()
self.remote.drop_caches()
return self._warmup()
@timeit
def _warmup(self):
self.remote.start_server()
for master in self.cluster_spec.masters:
for bucket in self.test_config.buckets:
self.monitor.monitor_warmup(self.memcached, master, bucket)
def _report_kpi(self, time_elapsed):
self.reporter.post(
*self.metrics.elapsed_time(time_elapsed)
)
def run(self):
self.load()
self.wait_for_persistence()
self.access()
self.wait_for_persistence()
time_elapsed = self.warmup()
self.report_kpi(time_elapsed)
class FragmentationTest(PerfTest):
"""Implement the append-only workload.
Scenario:
1. Single node.
2. Load X items, 700-1400 bytes, average 1KB (11-22 fields).
3. Append data
3.1. Mark first 80% of items as working set.
3.2. Randomly update 75% of items in working set by adding 1 field at a time (62 bytes).
3.3. Mark first 40% of items as working set.
3.4. Randomly update 75% of items in working set by adding 1 field at a time (62 bytes).
3.5. Mark first 20% of items as working set.
3.6. Randomly update 75% of items in working set by adding 1 field at a time (62 bytes).
4. Repeat step #3 5 times.
See workloads/tcmalloc.py for details.
Scenario described above allows to spot issues with memory/allocator
fragmentation.
"""
COLLECTORS = {'net': False}
@with_stats
def load_and_append(self):
password = self.test_config.bucket.password
WorkloadGen(self.test_config.load_settings.items,
self.master_node, self.test_config.buckets[0],
password).run()
def calc_fragmentation_ratio(self) -> float:
ratios = []
for target in self.target_iterator:
port = self.rest.get_memcached_port(target.node)
stats = self.memcached.get_stats(target.node, port, target.bucket,
stats='memory')
ratio = int(stats[b'mem_used']) / int(stats[b'total_heap_bytes'])
ratios.append(ratio)
ratio = 100 * (1 - sum(ratios) / len(ratios))
ratio = round(ratio, 1)
logger.info('Fragmentation: {}'.format(ratio))
return ratio
def _report_kpi(self):
ratio = self.calc_fragmentation_ratio()
self.reporter.post(
*self.metrics.fragmentation_ratio(ratio)
)
def run(self):
self.load_and_append()
self.report_kpi()
class FragmentationLargeTest(FragmentationTest):
@with_stats
def load_and_append(self):
password = self.test_config.bucket.password
WorkloadGen(self.test_config.load_settings.items,
self.master_node, self.test_config.buckets[0], password,
small=False).run()
class PathoGenTest(FragmentationTest):
@with_stats
def access(self, *args):
for target in self.target_iterator:
pg = PathoGen(num_items=self.test_config.load_settings.items,
num_workers=self.test_config.load_settings.workers,
num_iterations=self.test_config.load_settings.iterations,
frozen_mode=False,
host=target.node, port=8091,
bucket=target.bucket, password=target.password)
pg.run()
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_memcached_rss()
)
self.reporter.post(
*self.metrics.max_memcached_rss()
)
def run(self):
self.access()
self.report_kpi()
class PathoGenFrozenTest(PathoGenTest):
@with_stats
def access(self):
for target in self.target_iterator:
pg = PathoGen(num_items=self.test_config.load_settings.items,
num_workers=self.test_config.load_settings.workers,
num_iterations=self.test_config.load_settings.iterations,
frozen_mode=True,
host=target.node, port=8091,
bucket=target.bucket, password=target.password)
pg.run()
class ThroughputTest(KVTest):
def _measure_curr_ops(self) -> int:
ops = 0
for bucket in self.test_config.buckets:
for server in self.cluster_spec.servers:
port = self.rest.get_memcached_port(server)
stats = self.memcached.get_stats(server, port, bucket)
for stat in b'cmd_get', b'cmd_set':
ops += int(stats[stat])
return ops
def _report_kpi(self):
total_ops = self._measure_curr_ops()
self.reporter.post(
*self.metrics.kv_throughput(total_ops)
)
class EvictionTest(KVTest):
COLLECTORS = {'net': False}
def reset_kv_stats(self):
pass
def _measure_ejected_items(self) -> int:
ejected_items = 0
for bucket in self.test_config.buckets:
for hostname, _ in self.rest.get_node_stats(self.master_node,
bucket):
host = hostname.split(':')[0]
port = self.rest.get_memcached_port(host)
stats = self.memcached.get_stats(host, port, bucket)
ejected_items += int(stats[b'vb_active_auto_delete_count'])
ejected_items += int(stats[b'vb_pending_auto_delete_count'])
ejected_items += int(stats[b'vb_replica_auto_delete_count'])
return ejected_items
def _report_kpi(self):
ejected_items = self._measure_ejected_items()
self.reporter.post(
*self.metrics.kv_throughput(ejected_items)
)
class PillowFightTest(PerfTest):
"""Use cbc-pillowfight from libcouchbase to drive cluster."""
ALL_BUCKETS = True
def load(self, *args):
PerfTest.load(self, task=pillowfight_data_load_task)
@with_stats
def access(self, *args):
self.download_certificate()
PerfTest.access(self, task=pillowfight_task)
def _report_kpi(self, *args):
self.reporter.post(
*self.metrics.max_ops()
)
def run(self):
self.load()
self.wait_for_persistence()
self.access()
self.report_kpi()
class CompressionTest(PillowFightTest):
COLLECTORS = {'iostat': False, 'net': False}
@with_stats
@timeit
def wait_for_compression(self):
for master in self.cluster_spec.masters:
for bucket in self.test_config.buckets:
self.monitor.monitor_compression(self.memcached, master, bucket)
def _report_kpi(self, time_elapsed: float):
self.reporter.post(
*self.metrics.compression_throughput(time_elapsed)
)
def run(self):
self.load()
time_elapsed = self.wait_for_compression()
self.report_kpi(time_elapsed)
class CompactionTest(KVTest):
COLLECTORS = {'net': False}
@with_stats
@timeit
def compact(self):
self.compact_bucket()
def _report_kpi(self, time_elapsed):
self.reporter.post(
*self.metrics.elapsed_time(time_elapsed)
)
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.access_bg()
time_elapsed = self.compact()
self.report_kpi(time_elapsed)
class MemoryOverheadTest(PillowFightTest):
COLLECTORS = {'iostat': False, 'net': False}
PF_KEY_SIZE = 20
def _report_kpi(self):
self.reporter.post(
*self.metrics.memory_overhead(key_size=self.PF_KEY_SIZE)
)
@with_stats
def access(self, *args):
self.sleep()
class CpuUtilizationTest(KVTest):
def _report_kpi(self, *args, **kwargs):
self.reporter.post(
*self.metrics.cpu_utilization()
)
|
apache-2.0
| -1,810,513,932,737,493,200
| 23.799603
| 96
| 0.587807
| false
| 3.711105
| true
| false
| false
|
guillempalou/scikit-cv
|
skcv/video/segmentation/region_tracking.py
|
1
|
3595
|
import networkx as nx
import numpy as np
def bipartite_region_tracking(partition, optical_flow, reliability,
matching_th=0.1, reliability_th=0.2):
"""
Parameters
----------
partition: numpy array
A 3D label array where each label represents a region
optical_flow: numpy array
A 3D,2 array representing optical flow values for each frame
reliability: numpy array
A 3D array representing the flow reliability
matching_th: float, optional
matching threshold for the bipartite matching
reliability_th: float, optional
reliability threshold to stop tracking
Returns
-------
A NetworkX graph object with adjacency relations
"""
dimensions = len(partition.shape)
if dimensions != 3: # pragma: no cover
raise ValueError("Dimensions must be 3")
# link regions across frames
# perform a weighted bipartite matchings
frames = partition.shape[0]
width = partition.shape[1]
height = partition.shape[2]
new_partition = np.zeros_like(partition)
#the first frame is the same
new_partition[0,...] = partition[0,...]
current_label = np.max(np.unique(partition[0,...]))+1
for frame in range(frames-1):
labels = np.unique(new_partition[frame, ...])
labels_next = np.unique(partition[frame+1, ...])
# create a graph matching contours
bipartite = nx.Graph()
bipartite.add_nodes_from([l for l in labels])
bipartite.add_nodes_from([l for l in labels_next])
# find the correspondence of each label to the next frame
for label in labels:
px, py = np.where(new_partition[frame, ...] == label)
# find the mean reliability
rel = np.mean(reliability[frame, px, py])
if rel < reliability_th: # pragma: no cover
continue
# find where the regions projects to the next frame
npx = px + optical_flow[frame, px, py, 0]
npy = py + optical_flow[frame, px, py, 1]
#check for bounds
in_x = np.logical_and(0 <= npx, npx < width)
in_y = np.logical_and(0 <= npy, npy < height)
idx = np.logical_and(in_x, in_y)
npx = npx[idx]
npy = npy[idx]
count = np.bincount(partition[frame+1,
npx.astype(np.int),
npy.astype(np.int)].astype(np.int))
# get the count and eliminate weak correspondences
max_count = max(count)
nodes = np.nonzero(count > max_count*matching_th)[0]
weight = count[nodes]/max_count
for i, n in enumerate(nodes):
bipartite.add_edge(label, n, weight=weight[i])
# max weighted matching
matchings = nx.max_weight_matching(bipartite)
# assign propagated labels to the matchings
for a in matchings:
b = matchings[a]
#print("Match {0}-{1}".format(a,b))
if b not in labels_next:
continue
px, py = np.where(partition[frame+1, ...] == b)
new_partition[frame+1, px, py] = a
# assign new labels to non-matched regions
for n in bipartite.nodes():
if n not in labels_next:
continue
if n not in matchings:
px, py = np.where(partition[frame+1, ...] == n)
new_partition[frame+1, px, py] = current_label + 1
current_label += 1
return new_partition
|
bsd-3-clause
| 868,079,825,263,690,500
| 31.107143
| 77
| 0.569124
| false
| 4.01676
| false
| false
| false
|
iradicek/clara
|
clara/modeltograph.py
|
1
|
1609
|
'''
Converts Program model to a image (graph)
'''
import pygraphviz as pgv
def stmts_to_str(title, types, ss):
l = [title]
if types:
l.append(', '.join(['%s: %s' % x for x in types]))
for (v, e) in ss:
ls = str(e)
ls = ls.replace(r'\n', r'\\n')
ls = ls.replace(r'\r', r'\\r')
ls = ls.replace(r'\t', r'\\t')
l.append('%s := %s' % (v, ls))
ml = max([len(x) for x in l])
l.insert(2 if types else 1, '-' * ml)
return '\n'.join(l)
def create_graph(pm):
G = pgv.AGraph(directed=True)
for name, fnc in list(pm.fncs.items()):
fnclab = 'fun %s (%s) : %s --- ' % (
fnc.name,
', '.join(['%s : %s' % x for x in fnc.params]),
fnc.rettype)
types = list(fnc.types.items())
for loc in fnc.locs():
fnclabel = fnclab if loc == fnc.initloc else ''
label = stmts_to_str('%sL%s' % (fnclabel, loc,), types,
fnc.exprs(loc))
types = None
G.add_node('%s-%s' % (name, loc), label=label, shape='rectangle',
fontname='monospace')
for loc in fnc.locs():
locs = '%s-%s' % (name, loc)
loc2 = fnc.trans(loc, True)
locs2 = '%s-%s' % (name, loc2)
if loc2:
G.add_edge(locs, locs2, label='True')
loc2 = fnc.trans(loc, False)
locs2 = '%s-%s' % (name, loc2)
if loc2:
G.add_edge(locs, locs2, label='False')
G.layout('dot')
return G
|
gpl-3.0
| -2,082,160,572,237,811,200
| 25.816667
| 77
| 0.444997
| false
| 3.124272
| false
| false
| false
|
willmcgugan/rich
|
rich/palette.py
|
1
|
3288
|
from math import sqrt
from functools import lru_cache
from typing import Sequence, Tuple, TYPE_CHECKING
from .color_triplet import ColorTriplet
if TYPE_CHECKING:
from rich.table import Table
class Palette:
"""A palette of available colors."""
def __init__(self, colors: Sequence[Tuple[int, int, int]]):
self._colors = colors
def __getitem__(self, number: int) -> ColorTriplet:
return ColorTriplet(*self._colors[number])
def __rich__(self) -> "Table":
from rich.color import Color
from rich.style import Style
from rich.text import Text
from rich.table import Table
table = Table(
"index",
"RGB",
"Color",
title="Palette",
caption=f"{len(self._colors)} colors",
highlight=True,
caption_justify="right",
)
for index, color in enumerate(self._colors):
table.add_row(
str(index),
repr(color),
Text(" " * 16, style=Style(bgcolor=Color.from_rgb(*color))),
)
return table
# This is somewhat inefficient and needs caching
@lru_cache(maxsize=1024)
def match(self, color: Tuple[int, int, int]) -> int:
"""Find a color from a palette that most closely matches a given color.
Args:
color (Tuple[int, int, int]): RGB components in range 0 > 255.
Returns:
int: Index of closes matching color.
"""
red1, green1, blue1 = color
_sqrt = sqrt
get_color = self._colors.__getitem__
def get_color_distance(index: int) -> float:
"""Get the distance to a color."""
red2, green2, blue2 = get_color(index)
red_mean = (red1 + red2) // 2
red = red1 - red2
green = green1 - green2
blue = blue1 - blue2
return _sqrt(
(((512 + red_mean) * red * red) >> 8)
+ 4 * green * green
+ (((767 - red_mean) * blue * blue) >> 8)
)
min_index = min(range(len(self._colors)), key=get_color_distance)
return min_index
if __name__ == "__main__": # pragma: no cover
import colorsys
from typing import Iterable
from rich.color import Color
from rich.console import Console, ConsoleOptions
from rich.segment import Segment
from rich.style import Style
class ColorBox:
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> Iterable[Segment]:
height = console.size.height - 3
for y in range(0, height):
for x in range(options.max_width):
h = x / options.max_width
l = y / (height + 1)
r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
r2, g2, b2 = colorsys.hls_to_rgb(h, l + (1 / height / 2), 1.0)
bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
yield Segment("▄", Style(color=color, bgcolor=bgcolor))
yield Segment.line()
console = Console()
console.print(ColorBox())
|
mit
| 1,800,412,422,958,308,600
| 31.86
| 82
| 0.532562
| false
| 4.002436
| false
| false
| false
|
liushuyu/DraftBin
|
pyscripts/acbs/lib/magic.py
|
1
|
8071
|
# coding: utf-8
'''
Python bindings for libmagic
'''
import ctypes
from collections import namedtuple
from ctypes import *
from ctypes.util import find_library
def _init():
"""
Loads the shared library through ctypes and returns a library
L{ctypes.CDLL} instance
"""
return ctypes.cdll.LoadLibrary(find_library('magic'))
_libraries = {}
_libraries['magic'] = _init()
# Flag constants for open and setflags
MAGIC_NONE = NONE = 0
MAGIC_DEBUG = DEBUG = 1
MAGIC_SYMLINK = SYMLINK = 2
MAGIC_COMPRESS = COMPRESS = 4
MAGIC_DEVICES = DEVICES = 8
MAGIC_MIME_TYPE = MIME_TYPE = 16
MAGIC_CONTINUE = CONTINUE = 32
MAGIC_CHECK = CHECK = 64
MAGIC_PRESERVE_ATIME = PRESERVE_ATIME = 128
MAGIC_RAW = RAW = 256
MAGIC_ERROR = ERROR = 512
MAGIC_MIME_ENCODING = MIME_ENCODING = 1024
MAGIC_MIME = MIME = 1040 # MIME_TYPE + MIME_ENCODING
MAGIC_APPLE = APPLE = 2048
MAGIC_NO_CHECK_COMPRESS = NO_CHECK_COMPRESS = 4096
MAGIC_NO_CHECK_TAR = NO_CHECK_TAR = 8192
MAGIC_NO_CHECK_SOFT = NO_CHECK_SOFT = 16384
MAGIC_NO_CHECK_APPTYPE = NO_CHECK_APPTYPE = 32768
MAGIC_NO_CHECK_ELF = NO_CHECK_ELF = 65536
MAGIC_NO_CHECK_TEXT = NO_CHECK_TEXT = 131072
MAGIC_NO_CHECK_CDF = NO_CHECK_CDF = 262144
MAGIC_NO_CHECK_TOKENS = NO_CHECK_TOKENS = 1048576
MAGIC_NO_CHECK_ENCODING = NO_CHECK_ENCODING = 2097152
MAGIC_NO_CHECK_BUILTIN = NO_CHECK_BUILTIN = 4173824
FileMagic = namedtuple('FileMagic', ('mime_type', 'encoding', 'name'))
class magic_set(Structure):
pass
magic_set._fields_ = []
magic_t = POINTER(magic_set)
_open = _libraries['magic'].magic_open
_open.restype = magic_t
_open.argtypes = [c_int]
_close = _libraries['magic'].magic_close
_close.restype = None
_close.argtypes = [magic_t]
_file = _libraries['magic'].magic_file
_file.restype = c_char_p
_file.argtypes = [magic_t, c_char_p]
_descriptor = _libraries['magic'].magic_descriptor
_descriptor.restype = c_char_p
_descriptor.argtypes = [magic_t, c_int]
_buffer = _libraries['magic'].magic_buffer
_buffer.restype = c_char_p
_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_error = _libraries['magic'].magic_error
_error.restype = c_char_p
_error.argtypes = [magic_t]
_setflags = _libraries['magic'].magic_setflags
_setflags.restype = c_int
_setflags.argtypes = [magic_t, c_int]
_load = _libraries['magic'].magic_load
_load.restype = c_int
_load.argtypes = [magic_t, c_char_p]
_compile = _libraries['magic'].magic_compile
_compile.restype = c_int
_compile.argtypes = [magic_t, c_char_p]
_check = _libraries['magic'].magic_check
_check.restype = c_int
_check.argtypes = [magic_t, c_char_p]
_list = _libraries['magic'].magic_list
_list.restype = c_int
_list.argtypes = [magic_t, c_char_p]
_errno = _libraries['magic'].magic_errno
_errno.restype = c_int
_errno.argtypes = [magic_t]
class Magic(object):
def __init__(self, ms):
self._magic_t = ms
def close(self):
"""
Closes the magic database and deallocates any resources used.
"""
_close(self._magic_t)
def file(self, filename):
"""
Returns a textual description of the contents of the argument passed
as a filename or None if an error occurred and the MAGIC_ERROR flag
is set. A call to errno() will return the numeric error code.
"""
if isinstance(filename, bytes):
bi = filename
else:
try: # keep Python 2 compatibility
bi = bytes(filename, 'utf-8')
except TypeError:
bi = bytes(filename)
r = _file(self._magic_t, bi)
if isinstance(r, str):
return r
else:
return str(r, 'utf-8').encode('utf-8')
def descriptor(self, fd):
"""
Like the file method, but the argument is a file descriptor.
"""
return _descriptor(self._magic_t, fd)
def buffer(self, buf):
"""
Returns a textual description of the contents of the argument passed
as a buffer or None if an error occurred and the MAGIC_ERROR flag
is set. A call to errno() will return the numeric error code.
"""
r = _buffer(self._magic_t, buf, len(buf))
if isinstance(r, str):
return r
else:
return str(r).encode('utf-8')
def error(self):
"""
Returns a textual explanation of the last error or None
if there was no error.
"""
e = _error(self._magic_t)
if isinstance(e, str):
return e
else:
return str(e).encode('utf-8')
def setflags(self, flags):
"""
Set flags on the magic object which determine how magic checking
behaves; a bitwise OR of the flags described in libmagic(3), but
without the MAGIC_ prefix.
Returns -1 on systems that don't support utime(2) or utimes(2)
when PRESERVE_ATIME is set.
"""
return _setflags(self._magic_t, flags)
def load(self, filename=None):
"""
Must be called to load entries in the colon separated list of database
files passed as argument or the default database file if no argument
before any magic queries can be performed.
Returns 0 on success and -1 on failure.
"""
return _load(self._magic_t, filename)
def compile(self, dbs):
"""
Compile entries in the colon separated list of database files
passed as argument or the default database file if no argument.
Returns 0 on success and -1 on failure.
The compiled files created are named from the basename(1) of each file
argument with ".mgc" appended to it.
"""
return _compile(self._magic_t, dbs)
def check(self, dbs):
"""
Check the validity of entries in the colon separated list of
database files passed as argument or the default database file
if no argument.
Returns 0 on success and -1 on failure.
"""
return _check(self._magic_t, dbs)
def list(self, dbs):
"""
Check the validity of entries in the colon separated list of
database files passed as argument or the default database file
if no argument.
Returns 0 on success and -1 on failure.
"""
return _list(self._magic_t, dbs)
def errno(self):
"""
Returns a numeric error code. If return value is 0, an internal
magic error occurred. If return value is non-zero, the value is
an OS error code. Use the errno module or os.strerror() can be used
to provide detailed error information.
"""
return _errno(self._magic_t)
def open(flags):
"""
Returns a magic object on success and None on failure.
Flags argument as for setflags.
"""
return Magic(_open(flags))
# Objects used by `detect_from_` functions
mime_magic = Magic(_open(MAGIC_MIME))
mime_magic.load()
none_magic = Magic(_open(MAGIC_NONE))
none_magic.load()
def _create_filemagic(mime_detected, type_detected):
mime_type, mime_encoding = mime_detected.split('; ')
return FileMagic(name=type_detected, mime_type=mime_type,
encoding=mime_encoding.replace('charset=', ''))
def detect_from_filename(filename):
'''Detect mime type, encoding and file type from a filename
Returns a `FileMagic` namedtuple.
'''
return _create_filemagic(mime_magic.file(filename),
none_magic.file(filename))
def detect_from_fobj(fobj):
'''Detect mime type, encoding and file type from file-like object
Returns a `FileMagic` namedtuple.
'''
file_descriptor = fobj.fileno()
return _create_filemagic(mime_magic.descriptor(file_descriptor),
none_magic.descriptor(file_descriptor))
def detect_from_content(byte_content):
'''Detect mime type, encoding and file type from bytes
Returns a `FileMagic` namedtuple.
'''
return _create_filemagic(mime_magic.buffer(byte_content),
none_magic.buffer(byte_content))
|
lgpl-2.1
| -4,565,356,466,739,291,600
| 28.137184
| 78
| 0.635237
| false
| 3.653689
| false
| false
| false
|
evansosenko/spin-lifetime-analysis
|
analysis/data.py
|
1
|
3319
|
import os
import numpy
import scipy_data_fitting
class Fig4(scipy_data_fitting.Data):
"""
Use this to load the data from Figure 4 in PhysRevLett.105.167202.
Should not be used directly, but only subclassed.
"""
def __init__(self, subfig):
super().__init__()
self.subfig = subfig
self.name = 'fig_4' + self.subfig
self.genfromtxt_args['delimiter'] = "\t"
self.genfromtxt_args['skip_header'] = 1
self.path = os.path.join('data', 'PhysRevLett.105.167202',
'figure_4' + subfig + '.tsv')
if subfig == 'd': self.scale = (1, 'milli')
class Fig4Parallel(Fig4):
"""
The parallel field data from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig)
self.name = self.name + '_parallel'
self.genfromtxt_args['usecols'] = (0, 1)
if subfig == 'c': self.path = self.path.replace('.tsv', '.1.tsv')
class Fig4Antiparallel(Fig4):
"""
The antiparallel field data from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig)
self.name = self.name + '_antiparallel'
if subfig == 'c':
self.path = self.path.replace('.tsv', '.2.tsv')
else:
self.genfromtxt_args['usecols'] = (0, 2)
class Fig4Difference(scipy_data_fitting.Data):
"""
The difference of the parallel and antiparallel field data
from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__()
self.subfig = subfig
self.name = 'fig_4' + self.subfig + '_difference'
parallel = Fig4Parallel(self.subfig)
antiparallel = Fig4Antiparallel(self.subfig)
self.array = numpy.array([
parallel.array[0],
abs(parallel.array[1] - antiparallel.array[1])
])
class Fig4Normalized(scipy_data_fitting.Data):
"""
The normalized field data from Figure 4 in PhysRevLett.105.167202.
Should not be used directly, but only subclassed.
"""
def __init__(self, subfig, data_class):
super().__init__()
self.subfig = subfig
self.name = 'fig_4' + self.subfig + '_normalized'
self.unnormalized = data_class(self.subfig).array
self.array = numpy.array([
self.unnormalized[0],
self.unnormalized[1] / max(abs(self.unnormalized[1]))
])
class Fig4NormalizedParallel(Fig4Normalized):
"""
The normalized parallel field data from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig, Fig4Parallel)
self.name = self.name + '_parallel'
class Fig4NormalizedAntiparallel(Fig4Normalized):
"""
The normalized antiparallel field data from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig, Fig4Antiparallel)
self.name = self.name + '_antiparallel'
class Fig4NormalizedDifference(Fig4Normalized):
"""
The difference of the normalized parallel and antiparallel field data
from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig, Fig4Difference)
self.name = self.name + '_difference'
|
mit
| 4,957,334,593,424,651,000
| 30.018692
| 83
| 0.6047
| false
| 3.450104
| false
| false
| false
|
miguelinux/vbox
|
src/VBox/ValidationKit/common/utils.py
|
1
|
58323
|
# -*- coding: utf-8 -*-
# $Id: utils.py $
# pylint: disable=C0302
"""
Common Utility Functions.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 108487 $"
# Standard Python imports.
import datetime;
import os;
import platform;
import re;
import stat;
import subprocess;
import sys;
import time;
import traceback;
import unittest;
if sys.platform == 'win32':
import ctypes;
import win32api; # pylint: disable=F0401
import win32con; # pylint: disable=F0401
import win32console; # pylint: disable=F0401
import win32process; # pylint: disable=F0401
else:
import signal;
# Python 3 hacks:
if sys.version_info[0] >= 3:
unicode = str; # pylint: disable=redefined-builtin,invalid-name
xrange = range; # pylint: disable=redefined-builtin,invalid-name
long = int; # pylint: disable=redefined-builtin,invalid-name
#
# Host OS and CPU.
#
def getHostOs():
"""
Gets the host OS name (short).
See the KBUILD_OSES variable in kBuild/header.kmk for possible return values.
"""
sPlatform = platform.system();
if sPlatform in ('Linux', 'Darwin', 'Solaris', 'FreeBSD', 'NetBSD', 'OpenBSD'):
sPlatform = sPlatform.lower();
elif sPlatform == 'Windows':
sPlatform = 'win';
elif sPlatform == 'SunOS':
sPlatform = 'solaris';
else:
raise Exception('Unsupported platform "%s"' % (sPlatform,));
return sPlatform;
g_sHostArch = None;
def getHostArch():
"""
Gets the host CPU architecture.
See the KBUILD_ARCHES variable in kBuild/header.kmk for possible return values.
"""
global g_sHostArch;
if g_sHostArch is None:
sArch = platform.machine();
if sArch in ('i386', 'i486', 'i586', 'i686', 'i786', 'i886', 'x86'):
sArch = 'x86';
elif sArch in ('AMD64', 'amd64', 'x86_64'):
sArch = 'amd64';
elif sArch == 'i86pc': # SunOS
if platform.architecture()[0] == '64bit':
sArch = 'amd64';
else:
try:
sArch = processOutputChecked(['/usr/bin/isainfo', '-n',]);
except:
pass;
sArch = sArch.strip();
if sArch != 'amd64':
sArch = 'x86';
else:
raise Exception('Unsupported architecture/machine "%s"' % (sArch,));
g_sHostArch = sArch;
return g_sHostArch;
def getHostOsDotArch():
"""
Gets the 'os.arch' for the host.
"""
return '%s.%s' % (getHostOs(), getHostArch());
def isValidOs(sOs):
"""
Validates the OS name.
"""
if sOs in ('darwin', 'dos', 'dragonfly', 'freebsd', 'haiku', 'l4', 'linux', 'netbsd', 'nt', 'openbsd', \
'os2', 'solaris', 'win', 'os-agnostic'):
return True;
return False;
def isValidArch(sArch):
"""
Validates the CPU architecture name.
"""
if sArch in ('x86', 'amd64', 'sparc32', 'sparc64', 's390', 's390x', 'ppc32', 'ppc64', \
'mips32', 'mips64', 'ia64', 'hppa32', 'hppa64', 'arm', 'alpha'):
return True;
return False;
def isValidOsDotArch(sOsDotArch):
"""
Validates the 'os.arch' string.
"""
asParts = sOsDotArch.split('.');
if asParts.length() != 2:
return False;
return isValidOs(asParts[0]) \
and isValidArch(asParts[1]);
def getHostOsVersion():
"""
Returns the host OS version. This is platform.release with additional
distro indicator on linux.
"""
sVersion = platform.release();
sOs = getHostOs();
if sOs == 'linux':
sDist = '';
try:
# try /etc/lsb-release first to distinguish between Debian and Ubuntu
oFile = open('/etc/lsb-release');
for sLine in oFile:
oMatch = re.search(r'(?:DISTRIB_DESCRIPTION\s*=)\s*"*(.*)"', sLine);
if oMatch is not None:
sDist = oMatch.group(1).strip();
except:
pass;
if sDist:
sVersion += ' / ' + sDist;
else:
asFiles = \
[
[ '/etc/debian_version', 'Debian v'],
[ '/etc/gentoo-release', '' ],
[ '/etc/oracle-release', '' ],
[ '/etc/redhat-release', '' ],
[ '/etc/SuSE-release', '' ],
];
for sFile, sPrefix in asFiles:
if os.path.isfile(sFile):
try:
oFile = open(sFile);
sLine = oFile.readline();
oFile.close();
except:
continue;
sLine = sLine.strip()
if len(sLine) > 0:
sVersion += ' / ' + sPrefix + sLine;
break;
elif sOs == 'solaris':
sVersion = platform.version();
if os.path.isfile('/etc/release'):
try:
oFile = open('/etc/release');
sLast = oFile.readlines()[-1];
oFile.close();
sLast = sLast.strip();
if len(sLast) > 0:
sVersion += ' (' + sLast + ')';
except:
pass;
elif sOs == 'darwin':
sOsxVersion = platform.mac_ver()[0];
codenames = {"4": "Tiger",
"5": "Leopard",
"6": "Snow Leopard",
"7": "Lion",
"8": "Mountain Lion",
"9": "Mavericks",
"10": "Yosemite",
"11": "El Capitan",
"12": "Sierra",
"13": "Unknown 13",
"14": "Unknown 14", }
sVersion += ' / OS X ' + sOsxVersion + ' (' + codenames[sOsxVersion.split('.')[1]] + ')'
return sVersion;
#
# File system.
#
def openNoInherit(sFile, sMode = 'r'):
"""
Wrapper around open() that tries it's best to make sure the file isn't
inherited by child processes.
This is a best effort thing at the moment as it doesn't synchronizes with
child process spawning in any way. Thus it can be subject to races in
multithreaded programs.
"""
try:
from fcntl import FD_CLOEXEC, F_GETFD, F_SETFD, fcntl; # pylint: disable=F0401
except:
return open(sFile, sMode);
oFile = open(sFile, sMode)
#try:
fcntl(oFile, F_SETFD, fcntl(oFile, F_GETFD) | FD_CLOEXEC);
#except:
# pass;
return oFile;
def noxcptReadLink(sPath, sXcptRet):
"""
No exceptions os.readlink wrapper.
"""
try:
sRet = os.readlink(sPath); # pylint: disable=E1101
except:
sRet = sXcptRet;
return sRet;
def readFile(sFile, sMode = 'rb'):
"""
Reads the entire file.
"""
oFile = open(sFile, sMode);
sRet = oFile.read();
oFile.close();
return sRet;
def noxcptReadFile(sFile, sXcptRet, sMode = 'rb'):
"""
No exceptions common.readFile wrapper.
"""
try:
sRet = readFile(sFile, sMode);
except:
sRet = sXcptRet;
return sRet;
def noxcptRmDir(sDir, oXcptRet = False):
"""
No exceptions os.rmdir wrapper.
"""
oRet = True;
try:
os.rmdir(sDir);
except:
oRet = oXcptRet;
return oRet;
def noxcptDeleteFile(sFile, oXcptRet = False):
"""
No exceptions os.remove wrapper.
"""
oRet = True;
try:
os.remove(sFile);
except:
oRet = oXcptRet;
return oRet;
def dirEnumerateTree(sDir, fnCallback, fIgnoreExceptions = True):
# type: (string, (string, stat) -> bool) -> bool
"""
Recursively walks a directory tree, calling fnCallback for each.
fnCallback takes a full path and stat object (can be None). It
returns a boolean value, False stops walking and returns immediately.
Returns True or False depending on fnCallback.
Returns None fIgnoreExceptions is True and an exception was raised by listdir.
"""
def __worker(sCurDir):
""" Worker for """
try:
asNames = os.listdir(sCurDir);
except:
if not fIgnoreExceptions:
raise;
return None;
rc = True;
for sName in asNames:
if sName not in [ '.', '..' ]:
sFullName = os.path.join(sCurDir, sName);
try: oStat = os.lstat(sFullName);
except: oStat = None;
if fnCallback(sFullName, oStat) is False:
return False;
if oStat is not None and stat.S_ISDIR(oStat.st_mode):
rc = __worker(sFullName);
if rc is False:
break;
return rc;
# Ensure unicode path here so listdir also returns unicode on windows.
## @todo figure out unicode stuff on non-windows.
if sys.platform == 'win32':
sDir = unicode(sDir);
return __worker(sDir);
def formatFileMode(uMode):
# type: (int) -> string
"""
Format a st_mode value 'ls -la' fasion.
Returns string.
"""
if stat.S_ISDIR(uMode): sMode = 'd';
elif stat.S_ISREG(uMode): sMode = '-';
elif stat.S_ISLNK(uMode): sMode = 'l';
elif stat.S_ISFIFO(uMode): sMode = 'p';
elif stat.S_ISCHR(uMode): sMode = 'c';
elif stat.S_ISBLK(uMode): sMode = 'b';
elif stat.S_ISSOCK(uMode): sMode = 's';
else: sMode = '?';
## @todo sticky bits.
sMode += 'r' if uMode & stat.S_IRUSR else '-';
sMode += 'w' if uMode & stat.S_IWUSR else '-';
sMode += 'x' if uMode & stat.S_IXUSR else '-';
sMode += 'r' if uMode & stat.S_IRGRP else '-';
sMode += 'w' if uMode & stat.S_IWGRP else '-';
sMode += 'x' if uMode & stat.S_IXGRP else '-';
sMode += 'r' if uMode & stat.S_IROTH else '-';
sMode += 'w' if uMode & stat.S_IWOTH else '-';
sMode += 'x' if uMode & stat.S_IXOTH else '-';
sMode += ' ';
return sMode;
def formatFileStat(oStat):
# type: (stat) -> string
"""
Format a stat result 'ls -la' fasion (numeric IDs).
Returns string.
"""
return '%s %3s %4s %4s %10s %s' \
% (formatFileMode(oStat.st_mode), oStat.st_nlink, oStat.st_uid, oStat.st_gid, oStat.st_size,
time.strftime('%Y-%m-%d %H:%M', time.localtime(oStat.st_mtime)), );
## Good buffer for file operations.
g_cbGoodBufferSize = 256*1024;
## The original shutil.copyfileobj.
g_fnOriginalShCopyFileObj = None;
def __myshutilcopyfileobj(fsrc, fdst, length = g_cbGoodBufferSize):
""" shutil.copyfileobj with different length default value (16384 is slow with python 2.7 on windows). """
return g_fnOriginalShCopyFileObj(fsrc, fdst, length);
def __installShUtilHacks(shutil):
""" Installs the shutil buffer size hacks. """
global g_fnOriginalShCopyFileObj;
if g_fnOriginalShCopyFileObj is None:
g_fnOriginalShCopyFileObj = shutil.copyfileobj;
shutil.copyfileobj = __myshutilcopyfileobj;
return True;
def copyFileSimple(sFileSrc, sFileDst):
"""
Wrapper around shutil.copyfile that simply copies the data of a regular file.
Raises exception on failure.
Return True for show.
"""
import shutil;
__installShUtilHacks(shutil);
return shutil.copyfile(sFileSrc, sFileDst);
#
# SubProcess.
#
def _processFixPythonInterpreter(aPositionalArgs, dKeywordArgs):
"""
If the "executable" is a python script, insert the python interpreter at
the head of the argument list so that it will work on systems which doesn't
support hash-bang scripts.
"""
asArgs = dKeywordArgs.get('args');
if asArgs is None:
asArgs = aPositionalArgs[0];
if asArgs[0].endswith('.py'):
if sys.executable is not None and len(sys.executable) > 0:
asArgs.insert(0, sys.executable);
else:
asArgs.insert(0, 'python');
# paranoia...
if dKeywordArgs.get('args') is not None:
dKeywordArgs['args'] = asArgs;
else:
aPositionalArgs = (asArgs,) + aPositionalArgs[1:];
return None;
def processCall(*aPositionalArgs, **dKeywordArgs):
"""
Wrapper around subprocess.call to deal with its absense in older
python versions.
Returns process exit code (see subprocess.poll).
"""
assert dKeywordArgs.get('stdout') is None;
assert dKeywordArgs.get('stderr') is None;
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
oProcess = subprocess.Popen(*aPositionalArgs, **dKeywordArgs);
return oProcess.wait();
def processOutputChecked(*aPositionalArgs, **dKeywordArgs):
"""
Wrapper around subprocess.check_output to deal with its absense in older
python versions.
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
oProcess = subprocess.Popen(stdout=subprocess.PIPE, *aPositionalArgs, **dKeywordArgs);
sOutput, _ = oProcess.communicate();
iExitCode = oProcess.poll();
if iExitCode is not 0:
asArgs = dKeywordArgs.get('args');
if asArgs is None:
asArgs = aPositionalArgs[0];
print(sOutput);
raise subprocess.CalledProcessError(iExitCode, asArgs);
return str(sOutput); # str() make pylint happy.
g_fOldSudo = None;
def _sudoFixArguments(aPositionalArgs, dKeywordArgs, fInitialEnv = True):
"""
Adds 'sudo' (or similar) to the args parameter, whereever it is.
"""
# Are we root?
fIsRoot = True;
try:
fIsRoot = os.getuid() == 0; # pylint: disable=E1101
except:
pass;
# If not, prepend sudo (non-interactive, simulate initial login).
if fIsRoot is not True:
asArgs = dKeywordArgs.get('args');
if asArgs is None:
asArgs = aPositionalArgs[0];
# Detect old sudo.
global g_fOldSudo;
if g_fOldSudo is None:
try:
sVersion = processOutputChecked(['sudo', '-V']);
except:
sVersion = '1.7.0';
sVersion = sVersion.strip().split('\n')[0];
sVersion = sVersion.replace('Sudo version', '').strip();
g_fOldSudo = len(sVersion) >= 4 \
and sVersion[0] == '1' \
and sVersion[1] == '.' \
and sVersion[2] <= '6' \
and sVersion[3] == '.';
asArgs.insert(0, 'sudo');
if not g_fOldSudo:
asArgs.insert(1, '-n');
if fInitialEnv and not g_fOldSudo:
asArgs.insert(1, '-i');
# paranoia...
if dKeywordArgs.get('args') is not None:
dKeywordArgs['args'] = asArgs;
else:
aPositionalArgs = (asArgs,) + aPositionalArgs[1:];
return None;
def sudoProcessCall(*aPositionalArgs, **dKeywordArgs):
"""
sudo (or similar) + subprocess.call
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
_sudoFixArguments(aPositionalArgs, dKeywordArgs);
return processCall(*aPositionalArgs, **dKeywordArgs);
def sudoProcessOutputChecked(*aPositionalArgs, **dKeywordArgs):
"""
sudo (or similar) + subprocess.check_output.
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
_sudoFixArguments(aPositionalArgs, dKeywordArgs);
return processOutputChecked(*aPositionalArgs, **dKeywordArgs);
def sudoProcessOutputCheckedNoI(*aPositionalArgs, **dKeywordArgs):
"""
sudo (or similar) + subprocess.check_output, except '-i' isn't used.
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
_sudoFixArguments(aPositionalArgs, dKeywordArgs, False);
return processOutputChecked(*aPositionalArgs, **dKeywordArgs);
def sudoProcessPopen(*aPositionalArgs, **dKeywordArgs):
"""
sudo (or similar) + subprocess.Popen.
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
_sudoFixArguments(aPositionalArgs, dKeywordArgs);
return subprocess.Popen(*aPositionalArgs, **dKeywordArgs);
#
# Generic process stuff.
#
def processInterrupt(uPid):
"""
Sends a SIGINT or equivalent to interrupt the specified process.
Returns True on success, False on failure.
On Windows hosts this may not work unless the process happens to be a
process group leader.
"""
if sys.platform == 'win32':
try:
win32console.GenerateConsoleCtrlEvent(win32con.CTRL_BREAK_EVENT, uPid); # pylint: disable=no-member
fRc = True;
except:
fRc = False;
else:
try:
os.kill(uPid, signal.SIGINT);
fRc = True;
except:
fRc = False;
return fRc;
def sendUserSignal1(uPid):
"""
Sends a SIGUSR1 or equivalent to nudge the process into shutting down
(VBoxSVC) or something.
Returns True on success, False on failure or if not supported (win).
On Windows hosts this may not work unless the process happens to be a
process group leader.
"""
if sys.platform == 'win32':
fRc = False;
else:
try:
os.kill(uPid, signal.SIGUSR1); # pylint: disable=E1101
fRc = True;
except:
fRc = False;
return fRc;
def processTerminate(uPid):
"""
Terminates the process in a nice manner (SIGTERM or equivalent).
Returns True on success, False on failure.
"""
fRc = False;
if sys.platform == 'win32':
try:
hProcess = win32api.OpenProcess(win32con.PROCESS_TERMINATE, False, uPid); # pylint: disable=no-member
except:
pass;
else:
try:
win32process.TerminateProcess(hProcess, 0x40010004); # DBG_TERMINATE_PROCESS # pylint: disable=no-member
fRc = True;
except:
pass;
win32api.CloseHandle(hProcess) # pylint: disable=no-member
else:
try:
os.kill(uPid, signal.SIGTERM);
fRc = True;
except:
pass;
return fRc;
def processKill(uPid):
"""
Terminates the process with extreme prejudice (SIGKILL).
Returns True on success, False on failure.
"""
if sys.platform == 'win32':
fRc = processTerminate(uPid);
else:
try:
os.kill(uPid, signal.SIGKILL); # pylint: disable=E1101
fRc = True;
except:
fRc = False;
return fRc;
def processKillWithNameCheck(uPid, sName):
"""
Like processKill(), but checks if the process name matches before killing
it. This is intended for killing using potentially stale pid values.
Returns True on success, False on failure.
"""
if processCheckPidAndName(uPid, sName) is not True:
return False;
return processKill(uPid);
def processExists(uPid):
"""
Checks if the specified process exits.
This will only work if we can signal/open the process.
Returns True if it positively exists, False otherwise.
"""
if sys.platform == 'win32':
fRc = False;
try:
hProcess = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, False, uPid); # pylint: disable=no-member
except:
pass;
else:
win32api.CloseHandle(hProcess); # pylint: disable=no-member
fRc = True;
else:
try:
os.kill(uPid, 0);
fRc = True;
except:
fRc = False;
return fRc;
def processCheckPidAndName(uPid, sName):
"""
Checks if a process PID and NAME matches.
"""
fRc = processExists(uPid);
if fRc is not True:
return False;
if sys.platform == 'win32':
try:
from win32com.client import GetObject; # pylint: disable=F0401
oWmi = GetObject('winmgmts:');
aoProcesses = oWmi.InstancesOf('Win32_Process');
for oProcess in aoProcesses:
if long(oProcess.Properties_("ProcessId").Value) == uPid:
sCurName = oProcess.Properties_("Name").Value;
#reporter.log2('uPid=%s sName=%s sCurName=%s' % (uPid, sName, sCurName));
sName = sName.lower();
sCurName = sCurName.lower();
if os.path.basename(sName) == sName:
sCurName = os.path.basename(sCurName);
if sCurName == sName \
or sCurName + '.exe' == sName \
or sCurName == sName + '.exe':
fRc = True;
break;
except:
#reporter.logXcpt('uPid=%s sName=%s' % (uPid, sName));
pass;
else:
if sys.platform in ('linux2', ):
asPsCmd = ['/bin/ps', '-p', '%u' % (uPid,), '-o', 'fname='];
elif sys.platform in ('sunos5',):
asPsCmd = ['/usr/bin/ps', '-p', '%u' % (uPid,), '-o', 'fname='];
elif sys.platform in ('darwin',):
asPsCmd = ['/bin/ps', '-p', '%u' % (uPid,), '-o', 'ucomm='];
else:
asPsCmd = None;
if asPsCmd is not None:
try:
oPs = subprocess.Popen(asPsCmd, stdout=subprocess.PIPE);
sCurName = oPs.communicate()[0];
iExitCode = oPs.wait();
except:
#reporter.logXcpt();
return False;
# ps fails with non-zero exit code if the pid wasn't found.
if iExitCode is not 0:
return False;
if sCurName is None:
return False;
sCurName = sCurName.strip();
if sCurName is '':
return False;
if os.path.basename(sName) == sName:
sCurName = os.path.basename(sCurName);
elif os.path.basename(sCurName) == sCurName:
sName = os.path.basename(sName);
if sCurName != sName:
return False;
fRc = True;
return fRc;
class ProcessInfo(object):
"""Process info."""
def __init__(self, iPid):
self.iPid = iPid;
self.iParentPid = None;
self.sImage = None;
self.sName = None;
self.asArgs = None;
self.sCwd = None;
self.iGid = None;
self.iUid = None;
self.iProcGroup = None;
self.iSessionId = None;
def loadAll(self):
"""Load all the info."""
sOs = getHostOs();
if sOs == 'linux':
sProc = '/proc/%s/' % (self.iPid,);
if self.sImage is None: self.sImage = noxcptReadLink(sProc + 'exe', None);
if self.sCwd is None: self.sCwd = noxcptReadLink(sProc + 'cwd', None);
if self.asArgs is None: self.asArgs = noxcptReadFile(sProc + 'cmdline', '').split('\x00');
elif sOs == 'solaris':
sProc = '/proc/%s/' % (self.iPid,);
if self.sImage is None: self.sImage = noxcptReadLink(sProc + 'path/a.out', None);
if self.sCwd is None: self.sCwd = noxcptReadLink(sProc + 'path/cwd', None);
else:
pass;
if self.sName is None and self.sImage is not None:
self.sName = self.sImage;
def windowsGrabProcessInfo(self, oProcess):
"""Windows specific loadAll."""
try: self.sName = oProcess.Properties_("Name").Value;
except: pass;
try: self.sImage = oProcess.Properties_("ExecutablePath").Value;
except: pass;
try: self.asArgs = oProcess.Properties_("CommandLine").Value; ## @todo split it.
except: pass;
try: self.iParentPid = oProcess.Properties_("ParentProcessId").Value;
except: pass;
try: self.iSessionId = oProcess.Properties_("SessionId").Value;
except: pass;
if self.sName is None and self.sImage is not None:
self.sName = self.sImage;
def getBaseImageName(self):
"""
Gets the base image name if available, use the process name if not available.
Returns image/process base name or None.
"""
sRet = self.sImage if self.sName is None else self.sName;
if sRet is None:
self.loadAll();
sRet = self.sImage if self.sName is None else self.sName;
if sRet is None:
if self.asArgs is None or len(self.asArgs) == 0:
return None;
sRet = self.asArgs[0];
if len(sRet) == 0:
return None;
return os.path.basename(sRet);
def getBaseImageNameNoExeSuff(self):
"""
Same as getBaseImageName, except any '.exe' or similar suffix is stripped.
"""
sRet = self.getBaseImageName();
if sRet is not None and len(sRet) > 4 and sRet[-4] == '.':
if (sRet[-4:]).lower() in [ '.exe', '.com', '.msc', '.vbs', '.cmd', '.bat' ]:
sRet = sRet[:-4];
return sRet;
def processListAll(): # pylint: disable=R0914
"""
Return a list of ProcessInfo objects for all the processes in the system
that the current user can see.
"""
asProcesses = [];
sOs = getHostOs();
if sOs == 'win':
from win32com.client import GetObject; # pylint: disable=F0401
oWmi = GetObject('winmgmts:');
aoProcesses = oWmi.InstancesOf('Win32_Process');
for oProcess in aoProcesses:
try:
iPid = int(oProcess.Properties_("ProcessId").Value);
except:
continue;
oMyInfo = ProcessInfo(iPid);
oMyInfo.windowsGrabProcessInfo(oProcess);
asProcesses.append(oMyInfo);
elif sOs in [ 'linux', 'solaris' ]:
try:
asDirs = os.listdir('/proc');
except:
asDirs = [];
for sDir in asDirs:
if sDir.isdigit():
asProcesses.append(ProcessInfo(int(sDir),));
elif sOs == 'darwin':
# Try our best to parse ps output. (Not perfect but does the job most of the time.)
try:
sRaw = processOutputChecked([ '/bin/ps', '-A',
'-o', 'pid=',
'-o', 'ppid=',
'-o', 'pgid=',
'-o', 'sess=',
'-o', 'uid=',
'-o', 'gid=',
'-o', 'comm=' ]);
except:
return asProcesses;
for sLine in sRaw.split('\n'):
sLine = sLine.lstrip();
if len(sLine) < 7 or not sLine[0].isdigit():
continue;
iField = 0;
off = 0;
aoFields = [None, None, None, None, None, None, None];
while iField < 7:
# Eat whitespace.
while off < len(sLine) and (sLine[off] == ' ' or sLine[off] == '\t'):
off += 1;
# Final field / EOL.
if iField == 6:
aoFields[6] = sLine[off:];
break;
if off >= len(sLine):
break;
# Generic field parsing.
offStart = off;
off += 1;
while off < len(sLine) and sLine[off] != ' ' and sLine[off] != '\t':
off += 1;
try:
if iField != 3:
aoFields[iField] = int(sLine[offStart:off]);
else:
aoFields[iField] = long(sLine[offStart:off], 16); # sess is a hex address.
except:
pass;
iField += 1;
if aoFields[0] is not None:
oMyInfo = ProcessInfo(aoFields[0]);
oMyInfo.iParentPid = aoFields[1];
oMyInfo.iProcGroup = aoFields[2];
oMyInfo.iSessionId = aoFields[3];
oMyInfo.iUid = aoFields[4];
oMyInfo.iGid = aoFields[5];
oMyInfo.sName = aoFields[6];
asProcesses.append(oMyInfo);
return asProcesses;
def processCollectCrashInfo(uPid, fnLog, fnCrashFile):
"""
Looks for information regarding the demise of the given process.
"""
sOs = getHostOs();
if sOs == 'darwin':
#
# On darwin we look for crash and diagnostic reports.
#
asLogDirs = [
u'/Library/Logs/DiagnosticReports/',
u'/Library/Logs/CrashReporter/',
u'~/Library/Logs/DiagnosticReports/',
u'~/Library/Logs/CrashReporter/',
];
for sDir in asLogDirs:
sDir = os.path.expanduser(sDir);
if not os.path.isdir(sDir):
continue;
try:
asDirEntries = os.listdir(sDir);
except:
continue;
for sEntry in asDirEntries:
# Only interested in .crash files.
_, sSuff = os.path.splitext(sEntry);
if sSuff != '.crash':
continue;
# The pid can be found at the end of the first line.
sFull = os.path.join(sDir, sEntry);
try:
oFile = open(sFull, 'r');
sFirstLine = oFile.readline();
oFile.close();
except:
continue;
if len(sFirstLine) <= 4 or sFirstLine[-2] != ']':
continue;
offPid = len(sFirstLine) - 3;
while offPid > 1 and sFirstLine[offPid - 1].isdigit():
offPid -= 1;
try: uReportPid = int(sFirstLine[offPid:-2]);
except: continue;
# Does the pid we found match?
if uReportPid == uPid:
fnLog('Found crash report for %u: %s' % (uPid, sFull,));
fnCrashFile(sFull, False);
elif sOs == 'win':
#
# Getting WER reports would be great, however we have trouble match the
# PID to those as they seems not to mention it in the brief reports.
# Instead we'll just look for crash dumps in C:\CrashDumps (our custom
# location - see the windows readme for the testbox script) and what
# the MSDN article lists for now.
#
# It's been observed on Windows server 2012 that the dump files takes
# the form: <processimage>.<decimal-pid>.dmp
#
asDmpDirs = [
u'%SystemDrive%/CrashDumps/', # Testboxes.
u'%LOCALAPPDATA%/CrashDumps/', # MSDN example.
u'%WINDIR%/ServiceProfiles/LocalServices/', # Local and network service.
u'%WINDIR%/ServiceProfiles/NetworkSerices/',
u'%WINDIR%/ServiceProfiles/',
u'%WINDIR%/System32/Config/SystemProfile/', # System services.
];
sMatchSuffix = '.%u.dmp' % (uPid,);
for sDir in asDmpDirs:
sDir = os.path.expandvars(sDir);
if not os.path.isdir(sDir):
continue;
try:
asDirEntries = os.listdir(sDir);
except:
continue;
for sEntry in asDirEntries:
if sEntry.endswith(sMatchSuffix):
sFull = os.path.join(sDir, sEntry);
fnLog('Found crash dump for %u: %s' % (uPid, sFull,));
fnCrashFile(sFull, True);
else:
pass; ## TODO
return None;
#
# Time.
#
#
# The following test case shows how time.time() only have ~ms resolution
# on Windows (tested W10) and why it therefore makes sense to try use
# performance counters.
#
# Note! We cannot use time.clock() as the timestamp must be portable across
# processes. See timeout testcase problem on win hosts (no logs).
#
#import sys;
#import time;
#from common import utils;
#
#atSeries = [];
#for i in xrange(1,160):
# if i == 159: time.sleep(10);
# atSeries.append((utils.timestampNano(), long(time.clock() * 1000000000), long(time.time() * 1000000000)));
#
#tPrev = atSeries[0]
#for tCur in atSeries:
# print 't1=%+22u, %u' % (tCur[0], tCur[0] - tPrev[0]);
# print 't2=%+22u, %u' % (tCur[1], tCur[1] - tPrev[1]);
# print 't3=%+22u, %u' % (tCur[2], tCur[2] - tPrev[2]);
# print '';
# tPrev = tCur
#
#print 't1=%u' % (atSeries[-1][0] - atSeries[0][0]);
#print 't2=%u' % (atSeries[-1][1] - atSeries[0][1]);
#print 't3=%u' % (atSeries[-1][2] - atSeries[0][2]);
g_fWinUseWinPerfCounter = sys.platform == 'win32';
g_fpWinPerfCounterFreq = None;
g_oFuncwinQueryPerformanceCounter = None;
def _winInitPerfCounter():
""" Initializes the use of performance counters. """
global g_fWinUseWinPerfCounter, g_fpWinPerfCounterFreq, g_oFuncwinQueryPerformanceCounter
uFrequency = ctypes.c_ulonglong(0);
if ctypes.windll.kernel32.QueryPerformanceFrequency(ctypes.byref(uFrequency)):
if uFrequency.value >= 1000:
#print 'uFrequency = %s' % (uFrequency,);
#print 'type(uFrequency) = %s' % (type(uFrequency),);
g_fpWinPerfCounterFreq = float(uFrequency.value);
# Check that querying the counter works too.
global g_oFuncwinQueryPerformanceCounter
g_oFuncwinQueryPerformanceCounter = ctypes.windll.kernel32.QueryPerformanceCounter;
uCurValue = ctypes.c_ulonglong(0);
if g_oFuncwinQueryPerformanceCounter(ctypes.byref(uCurValue)):
if uCurValue.value > 0:
return True;
g_fWinUseWinPerfCounter = False;
return False;
def _winFloatTime():
""" Gets floating point time on windows. """
if g_fpWinPerfCounterFreq is not None or _winInitPerfCounter():
uCurValue = ctypes.c_ulonglong(0);
if g_oFuncwinQueryPerformanceCounter(ctypes.byref(uCurValue)):
return float(uCurValue.value) / g_fpWinPerfCounterFreq;
return time.time();
def timestampNano():
"""
Gets a nanosecond timestamp.
"""
if g_fWinUseWinPerfCounter is True:
return long(_winFloatTime() * 1000000000);
return long(time.time() * 1000000000);
def timestampMilli():
"""
Gets a millisecond timestamp.
"""
if g_fWinUseWinPerfCounter is True:
return long(_winFloatTime() * 1000);
return long(time.time() * 1000);
def timestampSecond():
"""
Gets a second timestamp.
"""
if g_fWinUseWinPerfCounter is True:
return long(_winFloatTime());
return long(time.time());
def getTimePrefix():
"""
Returns a timestamp prefix, typically used for logging. UTC.
"""
try:
oNow = datetime.datetime.utcnow();
sTs = '%02u:%02u:%02u.%06u' % (oNow.hour, oNow.minute, oNow.second, oNow.microsecond);
except:
sTs = 'getTimePrefix-exception';
return sTs;
def getTimePrefixAndIsoTimestamp():
"""
Returns current UTC as log prefix and iso timestamp.
"""
try:
oNow = datetime.datetime.utcnow();
sTsPrf = '%02u:%02u:%02u.%06u' % (oNow.hour, oNow.minute, oNow.second, oNow.microsecond);
sTsIso = formatIsoTimestamp(oNow);
except:
sTsPrf = sTsIso = 'getTimePrefix-exception';
return (sTsPrf, sTsIso);
def formatIsoTimestamp(oNow):
"""Formats the datetime object as an ISO timestamp."""
assert oNow.tzinfo is None;
sTs = '%s.%09uZ' % (oNow.strftime('%Y-%m-%dT%H:%M:%S'), oNow.microsecond * 1000);
return sTs;
def getIsoTimestamp():
"""Returns the current UTC timestamp as a string."""
return formatIsoTimestamp(datetime.datetime.utcnow());
def getLocalHourOfWeek():
""" Local hour of week (0 based). """
oNow = datetime.datetime.now();
return (oNow.isoweekday() - 1) * 24 + oNow.hour;
def formatIntervalSeconds(cSeconds):
""" Format a seconds interval into a nice 01h 00m 22s string """
# Two simple special cases.
if cSeconds < 60:
return '%ss' % (cSeconds,);
if cSeconds < 3600:
cMins = cSeconds / 60;
cSecs = cSeconds % 60;
if cSecs == 0:
return '%sm' % (cMins,);
return '%sm %ss' % (cMins, cSecs,);
# Generic and a bit slower.
cDays = cSeconds / 86400;
cSeconds %= 86400;
cHours = cSeconds / 3600;
cSeconds %= 3600;
cMins = cSeconds / 60;
cSecs = cSeconds % 60;
sRet = '';
if cDays > 0:
sRet = '%sd ' % (cDays,);
if cHours > 0:
sRet += '%sh ' % (cHours,);
if cMins > 0:
sRet += '%sm ' % (cMins,);
if cSecs > 0:
sRet += '%ss ' % (cSecs,);
assert len(sRet) > 0; assert sRet[-1] == ' ';
return sRet[:-1];
def formatIntervalSeconds2(oSeconds):
"""
Flexible input version of formatIntervalSeconds for use in WUI forms where
data is usually already string form.
"""
if isinstance(oSeconds, int) or isinstance(oSeconds, long):
return formatIntervalSeconds(oSeconds);
if not isString(oSeconds):
try:
lSeconds = long(oSeconds);
except:
pass;
else:
if lSeconds >= 0:
return formatIntervalSeconds2(lSeconds);
return oSeconds;
def parseIntervalSeconds(sString):
"""
Reverse of formatIntervalSeconds.
Returns (cSeconds, sError), where sError is None on success.
"""
# We might given non-strings, just return them without any fuss.
if not isString(sString):
if isinstance(sString, int) or isinstance(sString, long) or sString is None:
return (sString, None);
## @todo time/date objects?
return (int(sString), None);
# Strip it and make sure it's not empty.
sString = sString.strip();
if len(sString) == 0:
return (0, 'Empty interval string.');
#
# Split up the input into a list of 'valueN, unitN, ...'.
#
# Don't want to spend too much time trying to make re.split do exactly what
# I need here, so please forgive the extra pass I'm making here.
#
asRawParts = re.split(r'\s*([0-9]+)\s*([^0-9,;]*)[\s,;]*', sString);
asParts = [];
for sPart in asRawParts:
sPart = sPart.strip();
if len(sPart) > 0:
asParts.append(sPart);
if len(asParts) == 0:
return (0, 'Empty interval string or something?');
#
# Process them one or two at the time.
#
cSeconds = 0;
asErrors = [];
i = 0;
while i < len(asParts):
sNumber = asParts[i];
i += 1;
if sNumber.isdigit():
iNumber = int(sNumber);
sUnit = 's';
if i < len(asParts) and not asParts[i].isdigit():
sUnit = asParts[i];
i += 1;
sUnitLower = sUnit.lower();
if sUnitLower in [ 's', 'se', 'sec', 'second', 'seconds' ]:
pass;
elif sUnitLower in [ 'm', 'mi', 'min', 'minute', 'minutes' ]:
iNumber *= 60;
elif sUnitLower in [ 'h', 'ho', 'hou', 'hour', 'hours' ]:
iNumber *= 3600;
elif sUnitLower in [ 'd', 'da', 'day', 'days' ]:
iNumber *= 86400;
elif sUnitLower in [ 'w', 'week', 'weeks' ]:
iNumber *= 7 * 86400;
else:
asErrors.append('Unknown unit "%s".' % (sUnit,));
cSeconds += iNumber;
else:
asErrors.append('Bad number "%s".' % (sNumber,));
return (cSeconds, None if len(asErrors) == 0 else ' '.join(asErrors));
def formatIntervalHours(cHours):
""" Format a hours interval into a nice 1w 2d 1h string. """
# Simple special cases.
if cHours < 24:
return '%sh' % (cHours,);
# Generic and a bit slower.
cWeeks = cHours / (7 * 24);
cHours %= 7 * 24;
cDays = cHours / 24;
cHours %= 24;
sRet = '';
if cWeeks > 0:
sRet = '%sw ' % (cWeeks,);
if cDays > 0:
sRet = '%sd ' % (cDays,);
if cHours > 0:
sRet += '%sh ' % (cHours,);
assert len(sRet) > 0; assert sRet[-1] == ' ';
return sRet[:-1];
def parseIntervalHours(sString):
"""
Reverse of formatIntervalHours.
Returns (cHours, sError), where sError is None on success.
"""
# We might given non-strings, just return them without any fuss.
if not isString(sString):
if isinstance(sString, int) or isinstance(sString, long) or sString is None:
return (sString, None);
## @todo time/date objects?
return (int(sString), None);
# Strip it and make sure it's not empty.
sString = sString.strip();
if len(sString) == 0:
return (0, 'Empty interval string.');
#
# Split up the input into a list of 'valueN, unitN, ...'.
#
# Don't want to spend too much time trying to make re.split do exactly what
# I need here, so please forgive the extra pass I'm making here.
#
asRawParts = re.split(r'\s*([0-9]+)\s*([^0-9,;]*)[\s,;]*', sString);
asParts = [];
for sPart in asRawParts:
sPart = sPart.strip();
if len(sPart) > 0:
asParts.append(sPart);
if len(asParts) == 0:
return (0, 'Empty interval string or something?');
#
# Process them one or two at the time.
#
cHours = 0;
asErrors = [];
i = 0;
while i < len(asParts):
sNumber = asParts[i];
i += 1;
if sNumber.isdigit():
iNumber = int(sNumber);
sUnit = 'h';
if i < len(asParts) and not asParts[i].isdigit():
sUnit = asParts[i];
i += 1;
sUnitLower = sUnit.lower();
if sUnitLower in [ 'h', 'ho', 'hou', 'hour', 'hours' ]:
pass;
elif sUnitLower in [ 'd', 'da', 'day', 'days' ]:
iNumber *= 24;
elif sUnitLower in [ 'w', 'week', 'weeks' ]:
iNumber *= 7 * 24;
else:
asErrors.append('Unknown unit "%s".' % (sUnit,));
cHours += iNumber;
else:
asErrors.append('Bad number "%s".' % (sNumber,));
return (cHours, None if len(asErrors) == 0 else ' '.join(asErrors));
#
# Introspection.
#
def getCallerName(oFrame=None, iFrame=2):
"""
Returns the name of the caller's caller.
"""
if oFrame is None:
try:
raise Exception();
except:
oFrame = sys.exc_info()[2].tb_frame.f_back;
while iFrame > 1:
if oFrame is not None:
oFrame = oFrame.f_back;
iFrame = iFrame - 1;
if oFrame is not None:
sName = '%s:%u' % (oFrame.f_code.co_name, oFrame.f_lineno);
return sName;
return "unknown";
def getXcptInfo(cFrames = 1):
"""
Gets text detailing the exception. (Good for logging.)
Returns list of info strings.
"""
#
# Try get exception info.
#
try:
oType, oValue, oTraceback = sys.exc_info();
except:
oType = oValue = oTraceback = None;
if oType is not None:
#
# Try format the info
#
asRet = [];
try:
try:
asRet = asRet + traceback.format_exception_only(oType, oValue);
asTraceBack = traceback.format_tb(oTraceback);
if cFrames is not None and cFrames <= 1:
asRet.append(asTraceBack[-1]);
else:
asRet.append('Traceback:')
for iFrame in range(min(cFrames, len(asTraceBack))):
asRet.append(asTraceBack[-iFrame - 1]);
asRet.append('Stack:')
asRet = asRet + traceback.format_stack(oTraceback.tb_frame.f_back, cFrames);
except:
asRet.append('internal-error: Hit exception #2! %s' % (traceback.format_exc(),));
if len(asRet) == 0:
asRet.append('No exception info...');
except:
asRet.append('internal-error: Hit exception! %s' % (traceback.format_exc(),));
else:
asRet = ['Couldn\'t find exception traceback.'];
return asRet;
#
# TestSuite stuff.
#
def isRunningFromCheckout(cScriptDepth = 1):
"""
Checks if we're running from the SVN checkout or not.
"""
try:
sFile = __file__;
cScriptDepth = 1;
except:
sFile = sys.argv[0];
sDir = os.path.abspath(sFile);
while cScriptDepth >= 0:
sDir = os.path.dirname(sDir);
if os.path.exists(os.path.join(sDir, 'Makefile.kmk')) \
or os.path.exists(os.path.join(sDir, 'Makefile.kup')):
return True;
cScriptDepth -= 1;
return False;
#
# Bourne shell argument fun.
#
def argsSplit(sCmdLine):
"""
Given a bourne shell command line invocation, split it up into arguments
assuming IFS is space.
Returns None on syntax error.
"""
## @todo bourne shell argument parsing!
return sCmdLine.split(' ');
def argsGetFirst(sCmdLine):
"""
Given a bourne shell command line invocation, get return the first argument
assuming IFS is space.
Returns None on invalid syntax, otherwise the parsed and unescaped argv[0] string.
"""
asArgs = argsSplit(sCmdLine);
if asArgs is None or len(asArgs) == 0:
return None;
return asArgs[0];
#
# String helpers.
#
def stricmp(sFirst, sSecond):
"""
Compares to strings in an case insensitive fashion.
Python doesn't seem to have any way of doing the correctly, so this is just
an approximation using lower.
"""
if sFirst == sSecond:
return 0;
sLower1 = sFirst.lower();
sLower2 = sSecond.lower();
if sLower1 == sLower2:
return 0;
if sLower1 < sLower2:
return -1;
return 1;
#
# Misc.
#
def versionCompare(sVer1, sVer2):
"""
Compares to version strings in a fashion similar to RTStrVersionCompare.
"""
## @todo implement me!!
if sVer1 == sVer2:
return 0;
if sVer1 < sVer2:
return -1;
return 1;
def formatNumber(lNum, sThousandSep = ' '):
"""
Formats a decimal number with pretty separators.
"""
sNum = str(lNum);
sRet = sNum[-3:];
off = len(sNum) - 3;
while off > 0:
off -= 3;
sRet = sNum[(off if off >= 0 else 0):(off + 3)] + sThousandSep + sRet;
return sRet;
def formatNumberNbsp(lNum):
"""
Formats a decimal number with pretty separators.
"""
sRet = formatNumber(lNum);
return unicode(sRet).replace(' ', u'\u00a0');
def isString(oString):
"""
Checks if the object is a string object, hiding difference between python 2 and 3.
Returns True if it's a string of some kind.
Returns False if not.
"""
if sys.version_info[0] >= 3:
return isinstance(oString, str);
return isinstance(oString, basestring);
def hasNonAsciiCharacters(sText):
"""
Returns True is specified string has non-ASCII characters.
"""
sTmp = unicode(sText, errors='ignore') if isinstance(sText, str) else sText
return not all(ord(cChar) < 128 for cChar in sTmp)
def chmodPlusX(sFile):
"""
Makes the specified file or directory executable.
Returns success indicator, no exceptions.
Note! Symbolic links are followed and the target will be changed.
"""
try:
oStat = os.stat(sFile);
except:
return False;
try:
os.chmod(sFile, oStat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH);
except:
return False;
return True;
def unpackZipFile(sArchive, sDstDir, fnLog, fnError = None, fnFilter = None):
# type: (string, string, (string) -> None, (string) -> None, (string) -> bool) -> list[string]
"""
Worker for unpackFile that deals with ZIP files, same function signature.
"""
import zipfile
if fnError is None:
fnError = fnLog;
fnLog('Unzipping "%s" to "%s"...' % (sArchive, sDstDir));
# Open it.
try: oZipFile = zipfile.ZipFile(sArchive, 'r')
except Exception as oXcpt:
fnError('Error opening "%s" for unpacking into "%s": %s' % (sArchive, sDstDir, oXcpt,));
return None;
# Extract all members.
asMembers = [];
try:
for sMember in oZipFile.namelist():
if fnFilter is None or fnFilter(sMember) is not False:
if sMember.endswith('/'):
os.makedirs(os.path.join(sDstDir, sMember.replace('/', os.path.sep)), 0x1fd); # octal: 0775 (python 3/2)
else:
oZipFile.extract(sMember, sDstDir);
asMembers.append(os.path.join(sDstDir, sMember.replace('/', os.path.sep)));
except Exception as oXcpt:
fnError('Error unpacking "%s" into "%s": %s' % (sArchive, sDstDir, oXcpt));
asMembers = None;
# close it.
try: oZipFile.close();
except Exception as oXcpt:
fnError('Error closing "%s" after unpacking into "%s": %s' % (sArchive, sDstDir, oXcpt));
asMembers = None;
return asMembers;
## Set if we've replaced tarfile.copyfileobj with __mytarfilecopyfileobj already.
g_fTarCopyFileObjOverriddend = False;
def __mytarfilecopyfileobj(src, dst, length = None, exception = OSError):
""" tarfile.copyfileobj with different buffer size (16384 is slow on windows). """
if length is None:
__myshutilcopyfileobj(src, dst, g_cbGoodBufferSize);
elif length > 0:
cFull, cbRemainder = divmod(length, g_cbGoodBufferSize);
for _ in xrange(cFull):
abBuffer = src.read(g_cbGoodBufferSize);
dst.write(abBuffer);
if len(abBuffer) != g_cbGoodBufferSize:
raise exception('unexpected end of source file');
if cbRemainder > 0:
abBuffer = src.read(cbRemainder);
dst.write(abBuffer);
if len(abBuffer) != cbRemainder:
raise exception('unexpected end of source file');
def unpackTarFile(sArchive, sDstDir, fnLog, fnError = None, fnFilter = None):
# type: (string, string, (string) -> None, (string) -> None, (string) -> bool) -> list[string]
"""
Worker for unpackFile that deals with tarballs, same function signature.
"""
import shutil;
import tarfile;
if fnError is None:
fnError = fnLog;
fnLog('Untarring "%s" to "%s"...' % (sArchive, sDstDir));
#
# Default buffer sizes of 16384 bytes is causing too many syscalls on Windows.
# 60%+ speedup for python 2.7 and 50%+ speedup for python 3.5, both on windows with PDBs.
# 20%+ speedup for python 2.7 and 15%+ speedup for python 3.5, both on windows skipping PDBs.
#
if True is True:
__installShUtilHacks(shutil);
global g_fTarCopyFileObjOverriddend;
if g_fTarCopyFileObjOverriddend is False:
g_fTarCopyFileObjOverriddend = True;
tarfile.copyfileobj = __mytarfilecopyfileobj;
#
# Open it.
#
# Note! We not using 'r:*' because we cannot allow seeking compressed files!
# That's how we got a 13 min unpack time for VBoxAll on windows (hardlinked pdb).
#
try: oTarFile = tarfile.open(sArchive, 'r|*', bufsize = g_cbGoodBufferSize);
except Exception as oXcpt:
fnError('Error opening "%s" for unpacking into "%s": %s' % (sArchive, sDstDir, oXcpt,));
return None;
# Extract all members.
asMembers = [];
try:
for oTarInfo in oTarFile:
try:
if fnFilter is None or fnFilter(oTarInfo.name) is not False:
if oTarInfo.islnk():
# Links are trouble, especially on Windows. We must avoid the falling that will end up seeking
# in the compressed tar stream. So, fall back on shutil.copy2 instead.
sLinkFile = os.path.join(sDstDir, oTarInfo.name.rstrip('/').replace('/', os.path.sep));
sLinkTarget = os.path.join(sDstDir, oTarInfo.linkname.rstrip('/').replace('/', os.path.sep));
sParentDir = os.path.dirname(sLinkFile);
try: os.unlink(sLinkFile);
except: pass;
if sParentDir is not '' and not os.path.exists(sParentDir):
os.makedirs(sParentDir);
try: os.link(sLinkTarget, sLinkFile);
except: shutil.copy2(sLinkTarget, sLinkFile);
else:
if oTarInfo.isdir():
# Just make sure the user (we) got full access to dirs. Don't bother getting it 100% right.
oTarInfo.mode |= 0x1c0; # (octal: 0700)
oTarFile.extract(oTarInfo, sDstDir);
asMembers.append(os.path.join(sDstDir, oTarInfo.name.replace('/', os.path.sep)));
except Exception as oXcpt:
fnError('Error unpacking "%s" member "%s" into "%s": %s' % (sArchive, oTarInfo.name, sDstDir, oXcpt));
for sAttr in [ 'name', 'linkname', 'type', 'mode', 'size', 'mtime', 'uid', 'uname', 'gid', 'gname' ]:
fnError('Info: %8s=%s' % (sAttr, getattr(oTarInfo, sAttr),));
for sFn in [ 'isdir', 'isfile', 'islnk', 'issym' ]:
fnError('Info: %8s=%s' % (sFn, getattr(oTarInfo, sFn)(),));
asMembers = None;
break;
except Exception as oXcpt:
fnError('Error unpacking "%s" into "%s": %s' % (sArchive, sDstDir, oXcpt));
asMembers = None;
#
# Finally, close it.
#
try: oTarFile.close();
except Exception as oXcpt:
fnError('Error closing "%s" after unpacking into "%s": %s' % (sArchive, sDstDir, oXcpt));
asMembers = None;
return asMembers;
def unpackFile(sArchive, sDstDir, fnLog, fnError = None, fnFilter = None):
# type: (string, string, (string) -> None, (string) -> None, (string) -> bool) -> list[string]
"""
Unpacks the given file if it has a know archive extension, otherwise do
nothing.
fnLog & fnError both take a string parameter.
fnFilter takes a member name (string) and returns True if it's included
and False if excluded.
Returns list of the extracted files (full path) on success.
Returns empty list if not a supported archive format.
Returns None on failure. Raises no exceptions.
"""
sBaseNameLower = os.path.basename(sArchive).lower();
#
# Zip file?
#
if sBaseNameLower.endswith('.zip'):
return unpackZipFile(sArchive, sDstDir, fnLog, fnError, fnFilter);
#
# Tarball?
#
if sBaseNameLower.endswith('.tar') \
or sBaseNameLower.endswith('.tar.gz') \
or sBaseNameLower.endswith('.tgz') \
or sBaseNameLower.endswith('.tar.bz2'):
return unpackTarFile(sArchive, sDstDir, fnLog, fnError, fnFilter);
#
# Cannot classify it from the name, so just return that to the caller.
#
fnLog('Not unpacking "%s".' % (sArchive,));
return [];
def getDiskUsage(sPath):
"""
Get free space of a partition that corresponds to specified sPath in MB.
Returns partition free space value in MB.
"""
if platform.system() == 'Windows':
oCTypeFreeSpace = ctypes.c_ulonglong(0);
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(sPath), None, None,
ctypes.pointer(oCTypeFreeSpace));
cbFreeSpace = oCTypeFreeSpace.value;
else:
oStats = os.statvfs(sPath); # pylint: disable=E1101
cbFreeSpace = long(oStats.f_frsize) * oStats.f_bfree;
# Convert to MB
cMbFreeSpace = long(cbFreeSpace) / (1024 * 1024);
return cMbFreeSpace;
#
# Unit testing.
#
# pylint: disable=C0111
class BuildCategoryDataTestCase(unittest.TestCase):
def testIntervalSeconds(self):
self.assertEqual(parseIntervalSeconds(formatIntervalSeconds(3600)), (3600, None));
self.assertEqual(parseIntervalSeconds(formatIntervalSeconds(1209438593)), (1209438593, None));
self.assertEqual(parseIntervalSeconds('123'), (123, None));
self.assertEqual(parseIntervalSeconds(123), (123, None));
self.assertEqual(parseIntervalSeconds(99999999999), (99999999999, None));
self.assertEqual(parseIntervalSeconds(''), (0, 'Empty interval string.'));
self.assertEqual(parseIntervalSeconds('1X2'), (3, 'Unknown unit "X".'));
self.assertEqual(parseIntervalSeconds('1 Y3'), (4, 'Unknown unit "Y".'));
self.assertEqual(parseIntervalSeconds('1 Z 4'), (5, 'Unknown unit "Z".'));
self.assertEqual(parseIntervalSeconds('1 hour 2m 5second'), (3725, None));
self.assertEqual(parseIntervalSeconds('1 hour,2m ; 5second'), (3725, None));
if __name__ == '__main__':
unittest.main();
# not reached.
|
gpl-2.0
| -3,673,735,913,334,923,000
| 31.87655
| 124
| 0.55815
| false
| 3.655239
| false
| false
| false
|
bright-sparks/wpull
|
wpull/url.py
|
1
|
20604
|
'''URL parsing based on WHATWG URL living standard.'''
import collections
import fnmatch
import functools
import gettext
import logging
import re
import string
import urllib.parse
import posixpath
from wpull.backport.logging import BraceMessage as __
import wpull.string
_logger = logging.getLogger(__name__)
_ = gettext.gettext
RELATIVE_SCHEME_DEFAULT_PORTS = {
'ftp': 21,
'gopher': 70,
'http': 80,
'https': 443,
'ws': 80,
'wss': 443,
}
DEFAULT_ENCODE_SET = frozenset(b' "#<>?`')
'''Percent encoding set as defined by WHATWG URL living standard.
Does not include U+0000 to U+001F nor U+001F or above.
'''
PASSWORD_ENCODE_SET = DEFAULT_ENCODE_SET | frozenset(b'/@\\')
'''Encoding set for passwords.'''
USERNAME_ENCODE_SET = PASSWORD_ENCODE_SET | frozenset(b':')
'''Encoding set for usernames.'''
QUERY_ENCODE_SET = frozenset(b'"#<>`')
'''Encoding set for query strings.
This set does not include U+0020 (space) so it can be replaced with
U+0043 (plus sign) later.
'''
FRAGMENT_ENCODE_SET = frozenset(b' "<>`')
'''Encoding set for fragment.'''
QUERY_VALUE_ENCODE_SET = QUERY_ENCODE_SET | frozenset(b'&+%')
'''Encoding set for a query value.'''
FORBIDDEN_HOSTNAME_CHARS = frozenset('#%/:?@[\\] ')
'''Forbidden hostname characters.
Does not include non-printing characters. Meant for ASCII.
'''
VALID_IPv6_ADDRESS_CHARS = frozenset(string.hexdigits + '.:')
'''Valid IPv6 address characters.'''
class URLInfo(object):
'''Represent parts of a URL.
Attributes:
raw (str): Original string.
scheme (str): Protocol (for example, HTTP, FTP).
authority (str): Raw userinfo and host.
path (str): Location of resource. This value always
begins with a slash (``/``).
query (str): Additional request parameters.
fragment (str): Named anchor of a document.
userinfo (str): Raw username and password.
username (str): Username.
password (str): Password.
host (str): Raw hostname and port.
hostname (str): Hostname or IP address.
port (int): IP address port number.
resource (int): Raw path, query, and fragment. This value always
begins with a slash (``/``).
query_map (dict): Mapping of the query. Values are lists.
url (str): A normalized URL without userinfo and fragment.
encoding (str): Codec name for IRI support.
If scheme is not something like HTTP or FTP, the remaining attributes
are None.
All attributes are read only.
For more information about how the URL parts are derived, see
https://medialize.github.io/URI.js/about-uris.html
'''
__slots__ = ('raw', 'scheme', 'authority', 'path', 'query', 'fragment',
'userinfo', 'username', 'password',
'host', 'hostname', 'port',
'resource',
'_query_map', '_url', 'encoding',
)
def __init__(self):
self.raw = None
self.scheme = None
self.authority = None
self.path = None
self.query = None
self.fragment = None
self.userinfo = None
self.username = None
self.password = None
self.host = None
self.hostname = None
self.port = None
self.resource = None
self._query_map = None
self._url = None
self.encoding = None
@classmethod
@functools.lru_cache()
def parse(cls, url, default_scheme='http', encoding='utf-8'):
'''Parse a URL and return a URLInfo.'''
url = url.strip()
if not url.isprintable():
raise ValueError('URL is not printable: {}'.format(ascii(url)))
scheme, sep, remaining = url.partition(':')
if not scheme:
raise ValueError('URL missing scheme: {}'.format(ascii(url)))
scheme = scheme.lower()
if not sep and default_scheme:
# Likely something like example.com/mystuff
remaining = url
scheme = default_scheme
elif not sep:
raise ValueError('URI missing colon: {}'.format(ascii(url)))
if default_scheme and '.' in scheme or scheme == 'localhost':
# Maybe something like example.com:8080/mystuff or
# maybe localhost:8080/mystuff
remaining = '{}:{}'.format(scheme, remaining)
scheme = default_scheme
info = URLInfo()
info.encoding = encoding
if scheme not in RELATIVE_SCHEME_DEFAULT_PORTS:
info.raw = url
info.scheme = scheme
info.path = remaining
return info
if remaining.startswith('//'):
remaining = remaining[2:]
path_index = remaining.find('/')
query_index = remaining.find('?')
fragment_index = remaining.find('#')
try:
index_tuple = (path_index, query_index, fragment_index)
authority_index = min(num for num in index_tuple if num >= 0)
except ValueError:
authority_index = len(remaining)
authority = remaining[:authority_index]
resource = remaining[authority_index:]
try:
index_tuple = (query_index, fragment_index)
path_index = min(num for num in index_tuple if num >= 0)
except ValueError:
path_index = len(remaining)
path = remaining[authority_index + 1:path_index] or '/'
if fragment_index >= 0:
query_index = fragment_index
else:
query_index = len(remaining)
query = remaining[path_index + 1:query_index]
fragment = remaining[query_index + 1:]
userinfo, host = cls.parse_authority(authority)
hostname, port = cls.parse_host(host)
username, password = cls.parse_userinfo(userinfo)
if not hostname:
raise ValueError('Hostname is empty: {}'.format(ascii(url)))
info.raw = url
info.scheme = scheme
info.authority = authority
info.path = normalize_path(path, encoding=encoding)
info.query = normalize_query(query, encoding=encoding)
info.fragment = normalize_fragment(fragment, encoding=encoding)
info.userinfo = userinfo
info.username = percent_decode(username, encoding=encoding)
info.password = percent_decode(password, encoding=encoding)
info.host = host
info.hostname = hostname
info.port = port or RELATIVE_SCHEME_DEFAULT_PORTS[scheme]
info.resource = resource
return info
@classmethod
def parse_authority(cls, authority):
'''Parse the authority part and return userinfo and host.'''
userinfo, sep, host = authority.partition('@')
if not sep:
return '', userinfo
else:
return userinfo, host
@classmethod
def parse_userinfo(cls, userinfo):
'''Parse the userinfo and return username and password.'''
username, sep, password = userinfo.partition(':')
return username, password
@classmethod
def parse_host(cls, host):
'''Parse the host and return hostname and port.'''
if host.endswith(']'):
return cls.parse_hostname(host), None
else:
hostname, sep, port = host.rpartition(':')
if sep:
port = int(port)
else:
hostname = port
port = None
return cls.parse_hostname(hostname), port
@classmethod
def parse_hostname(cls, hostname):
'''Parse the hostname and normalize.'''
if hostname.startswith('['):
return cls.parse_ipv6_hostname(hostname)
else:
new_hostname = normalize_hostname(hostname)
if any(char in new_hostname for char in FORBIDDEN_HOSTNAME_CHARS):
raise ValueError('Invalid hostname: {}'
.format(ascii(hostname)))
return new_hostname
@classmethod
def parse_ipv6_hostname(cls, hostname):
'''Parse and normalize a IPv6 address.'''
if not hostname.startswith('[') or not hostname.endswith(']'):
raise ValueError('Invalid IPv6 address: {}'
.format(ascii(hostname)))
hostname = hostname[1:-1]
if any(char not in VALID_IPv6_ADDRESS_CHARS for char in hostname):
raise ValueError('Invalid IPv6 address: {}'
.format(ascii(hostname)))
hostname = normalize_hostname(hostname)
return hostname
@property
def query_map(self):
if self._query_map is None:
self._query_map = query_to_map(self.query)
return self._query_map
@property
def url(self):
if self._url is None:
if self.scheme not in RELATIVE_SCHEME_DEFAULT_PORTS:
self._url = self.raw
return self._url
parts = [self.scheme, '://']
if self.username:
parts.append(normalize_username(self.username))
if self.password:
parts.append(':')
parts.append(normalize_password(self.password))
if self.username or self.password:
parts.append('@')
if self.is_ipv6():
parts.append('[{}]'.format(self.hostname))
else:
parts.append(self.hostname)
if RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] != self.port:
parts.append(':{}'.format(self.port))
parts.append(self.path)
if self.query:
parts.append('?')
parts.append(self.query)
self._url = ''.join(parts)
return self._url
def to_dict(self):
'''Return a dict of the attributes.'''
return dict(
raw=self.raw,
scheme=self.scheme,
authority=self.authority,
netloc=self.authority,
path=self.path,
query=self.query,
fragment=self.fragment,
userinfo=self.userinfo,
username=self.username,
password=self.password,
host=self.host,
hostname=self.hostname,
port=self.port,
resource=self.resource,
url=self.url,
encoding=self.encoding,
)
def is_port_default(self):
'''Return whether the URL is using the default port.'''
if self.scheme in RELATIVE_SCHEME_DEFAULT_PORTS:
return RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] == self.port
def is_ipv6(self):
'''Return whether the URL is IPv6.'''
if self.host:
return self.host.startswith('[')
@property
def hostname_with_port(self):
'''Return the host portion but omit default port if needed.'''
default_port = RELATIVE_SCHEME_DEFAULT_PORTS.get(self.scheme)
if not default_port:
return ''
assert '[' not in self.hostname
assert ']' not in self.hostname
if self.is_ipv6():
hostname = '[{}]'.format(self.hostname)
else:
hostname = self.hostname
if default_port != self.port:
return '{}:{}'.format(hostname, self.port)
else:
return hostname
def split_path(self):
'''Return the directory and filename from the path.
The results are not percent-decoded.
'''
return posixpath.split(self.path)
def __repr__(self):
return '<URLInfo at 0x{:x} url={} raw={}>'.format(
id(self), self.url, self.raw)
def __hash__(self):
return hash(self.raw)
def __eq__(self, other):
return self.raw == other.raw
def __ne__(self, other):
return self.raw != other.raw
def parse_url_or_log(url, encoding='utf-8'):
'''Parse and return a URLInfo.
This function logs a warning if the URL cannot be parsed and returns
None.
'''
try:
url_info = URLInfo.parse(url, encoding=encoding)
except ValueError as error:
_logger.warning(__(
_('Unable to parse URL ‘{url}’: {error}.'),
url=wpull.string.printable_str(url), error=error))
else:
return url_info
def normalize(url, **kwargs):
'''Normalize a URL.
This function is a convenience function that is equivalent to::
>>> URLInfo.parse('http://example.com').url
'http://example.com'
:seealso: :func:`URLInfo.parse`.
'''
return URLInfo.parse(url, **kwargs).url
@functools.lru_cache()
def normalize_hostname(hostname):
'''Normalizes a hostname so that it is ASCII and valid domain name.'''
new_hostname = hostname.encode('idna').decode('ascii').lower()
if hostname != new_hostname:
# Check for round-trip. May raise UnicodeError
new_hostname.encode('idna')
return new_hostname
def normalize_path(path, encoding='utf-8'):
'''Normalize a path string.
Flattens a path by removing dot parts,
percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
if not path.startswith('/'):
path = '/' + path
path = percent_encode(flatten_path(path, flatten_slashes=True), encoding=encoding)
return uppercase_percent_encoding(path)
def normalize_query(text, encoding='utf-8'):
'''Normalize a query string.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode_plus(text, encoding=encoding)
return uppercase_percent_encoding(path)
def normalize_fragment(text, encoding='utf-8'):
'''Normalize a fragment.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=FRAGMENT_ENCODE_SET)
return uppercase_percent_encoding(path)
def normalize_username(text, encoding='utf-8'):
'''Normalize a username
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=USERNAME_ENCODE_SET)
return uppercase_percent_encoding(path)
def normalize_password(text, encoding='utf-8'):
'''Normalize a password
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=PASSWORD_ENCODE_SET)
return uppercase_percent_encoding(path)
class PercentEncoderMap(collections.defaultdict):
'''Helper map for percent encoding.'''
# This class is based on urllib.parse.Quoter
def __init__(self, encode_set):
super().__init__()
self.encode_set = encode_set
def __missing__(self, char):
if char < 0x20 or char > 0x7E or char in self.encode_set:
result = '%{:02X}'.format(char)
else:
result = chr(char)
self[char] = result
return result
_percent_encoder_map_cache = {}
'''Cache of :class:`PercentEncoderMap`.'''
def percent_encode(text, encode_set=DEFAULT_ENCODE_SET, encoding='utf-8'):
'''Percent encode text.
Unlike Python's ``quote``, this function accepts a blacklist instead of
a whitelist of safe characters.
'''
byte_string = text.encode(encoding)
try:
mapping = _percent_encoder_map_cache[encode_set]
except KeyError:
mapping = _percent_encoder_map_cache[encode_set] = PercentEncoderMap(
encode_set).__getitem__
return ''.join([mapping(char) for char in byte_string])
def percent_encode_plus(text, encode_set=QUERY_ENCODE_SET,
encoding='utf-8'):
'''Percent encode text for query strings.
Unlike Python's ``quote_plus``, this function accepts a blacklist instead
of a whitelist of safe characters.
'''
if ' ' not in text:
return percent_encode(text, encode_set, encoding)
else:
result = percent_encode(text, encode_set, encoding)
return result.replace(' ', '+')
def percent_encode_query_value(text, encoding='utf-8'):
'''Percent encode a query value.'''
result = percent_encode_plus(text, QUERY_VALUE_ENCODE_SET, encoding)
return result
percent_decode = urllib.parse.unquote
percent_decode_plus = urllib.parse.unquote_plus
def schemes_similar(scheme1, scheme2):
'''Return whether URL schemes are similar.
This function considers the following schemes to be similar:
* HTTP and HTTPS
'''
if scheme1 == scheme2:
return True
if scheme1 in ('http', 'https') and scheme2 in ('http', 'https'):
return True
return False
def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False):
'''Return whether the a path is a subpath of another.
Args:
base_path: The base path
test_path: The path which we are testing
trailing_slash: If True, the trailing slash is treated with importance.
For example, ``/images/`` is a directory while ``/images`` is a
file.
wildcards: If True, globbing wildcards are matched against paths
'''
if trailing_slash:
base_path = base_path.rsplit('/', 1)[0] + '/'
test_path = test_path.rsplit('/', 1)[0] + '/'
else:
if not base_path.endswith('/'):
base_path += '/'
if not test_path.endswith('/'):
test_path += '/'
if wildcards:
return fnmatch.fnmatchcase(test_path, base_path)
else:
return test_path.startswith(base_path)
def uppercase_percent_encoding(text):
'''Uppercases percent-encoded sequences.'''
if '%' not in text:
return text
return re.sub(
r'%[a-f0-9][a-f0-9]',
lambda match: match.group(0).upper(),
text)
def split_query(qs, keep_blank_values=False):
'''Split the query string.
Note for empty values: If an equal sign (``=``) is present, the value
will be an empty string (``''``). Otherwise, the value will be ``None``::
>>> list(split_query('a=&b', keep_blank_values=True))
[('a', ''), ('b', None)]
No processing is done on the actual values.
'''
items = []
for pair in qs.split('&'):
name, delim, value = pair.partition('=')
if not delim and keep_blank_values:
value = None
if keep_blank_values or value:
items.append((name, value))
return items
def query_to_map(text):
'''Return a key-values mapping from a query string.
Plus symbols are replaced with spaces.
'''
dict_obj = {}
for key, value in split_query(text, True):
if key not in dict_obj:
dict_obj[key] = []
if value:
dict_obj[key].append(value.replace('+', ' '))
else:
dict_obj[key].append('')
return query_to_map(text)
@functools.lru_cache()
def urljoin(base_url, url, allow_fragments=True):
'''Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.'''
if url.startswith('//') and len(url) > 2:
scheme = base_url.partition(':')[0]
if scheme:
return urllib.parse.urljoin(
base_url,
'{0}:{1}'.format(scheme, url),
allow_fragments=allow_fragments
)
return urllib.parse.urljoin(
base_url, url, allow_fragments=allow_fragments)
def flatten_path(path, flatten_slashes=False):
'''Flatten an absolute URL path by removing the dot segments.
:func:`urllib.parse.urljoin` has some support for removing dot segments,
but it is conservative and only removes them as needed.
Arguments:
path (str): The URL path.
flatten_slashes (bool): If True, consecutive slashes are removed.
The path returned will always have a leading slash.
'''
# Based on posixpath.normpath
# Fast path
if not path or path == '/':
return '/'
# Take off leading slash
if path[0] == '/':
path = path[1:]
parts = path.split('/')
new_parts = collections.deque()
for part in parts:
if part == '.' or (flatten_slashes and not part):
continue
elif part != '..':
new_parts.append(part)
elif new_parts:
new_parts.pop()
# If the filename is empty string
if flatten_slashes and path.endswith('/') or not len(new_parts):
new_parts.append('')
# Put back leading slash
new_parts.appendleft('')
return '/'.join(new_parts)
|
gpl-3.0
| 7,461,028,754,398,663,000
| 28.219858
| 86
| 0.597282
| false
| 4.158256
| false
| false
| false
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awxkit/awxkit/api/pages/roles.py
|
1
|
1150
|
import logging
from awxkit.api.resources import resources
from . import base
from . import page
log = logging.getLogger(__name__)
class Role(base.Base):
NATURAL_KEY = ('name',)
def get_natural_key(self, cache=None):
if cache is None:
cache = page.PageCache()
natural_key = super(Role, self).get_natural_key(cache=cache)
related_objs = [
related for name, related in self.related.items()
if name not in ('users', 'teams')
]
if related_objs:
related_endpoint = cache.get_page(related_objs[0])
if related_endpoint is None:
log.error("Unable to obtain content_object %s for role %s",
related_objs[0], self.endpoint)
return None
natural_key['content_object'] = related_endpoint.get_natural_key(cache=cache)
return natural_key
page.register_page(resources.role, Role)
class Roles(page.PageList, Role):
pass
page.register_page([resources.roles,
resources.related_roles,
resources.related_object_roles], Roles)
|
apache-2.0
| 3,697,434,702,690,880,500
| 24
| 89
| 0.595652
| false
| 3.993056
| false
| false
| false
|
nyergler/pythonslides
|
readthedocs/builds/models.py
|
1
|
11261
|
import re
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
from guardian.shortcuts import assign, get_objects_for_user
from taggit.managers import TaggableManager
from projects.models import Project
from projects import constants
from .constants import BUILD_STATE, BUILD_TYPES, VERSION_TYPES
class VersionManager(models.Manager):
def _filter_queryset(self, user, project, privacy_level, only_active):
if isinstance(privacy_level, basestring):
privacy_level = (privacy_level,)
queryset = Version.objects.filter(privacy_level__in=privacy_level)
# Remove this so we can use public() for all active public projects
#if not user and not project:
#return queryset
if user and user.is_authenticated():
# Add in possible user-specific views
user_queryset = get_objects_for_user(user, 'builds.view_version')
queryset = user_queryset | queryset
elif user:
# Hack around get_objects_for_user not supporting global perms
global_access = user.has_perm('builds.view_version')
if global_access:
queryset = Version.objects.all()
if project:
# Filter by project if requested
queryset = queryset.filter(project=project)
if only_active:
queryset = queryset.filter(active=True)
return queryset
def active(self, user=None, project=None, *args, **kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PUBLIC, constants.PROTECTED,
constants.PRIVATE),
only_active=True,
)
return queryset.filter(*args, **kwargs)
def public(self, user=None, project=None, only_active=True, *args,
**kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PUBLIC),
only_active=only_active
)
return queryset.filter(*args, **kwargs)
def protected(self, user=None, project=None, only_active=True, *args,
**kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PUBLIC, constants.PROTECTED),
only_active=only_active
)
return queryset.filter(*args, **kwargs)
def private(self, user=None, project=None, only_active=True, *args,
**kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PRIVATE),
only_active=only_active
)
return queryset.filter(*args, **kwargs)
class Version(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='versions')
type = models.CharField(
_('Type'), max_length=20,
choices=VERSION_TYPES, default='unknown',
)
# used by the vcs backend
identifier = models.CharField(_('Identifier'), max_length=255)
verbose_name = models.CharField(_('Verbose Name'), max_length=255)
slug = models.CharField(_('Slug'), max_length=255)
supported = models.BooleanField(_('Supported'), default=True)
active = models.BooleanField(_('Active'), default=False)
built = models.BooleanField(_('Built'), default=False)
uploaded = models.BooleanField(_('Uploaded'), default=False)
privacy_level = models.CharField(
_('Privacy Level'), max_length=20, choices=constants.PRIVACY_CHOICES,
default='public', help_text=_("Level of privacy for this Version.")
)
tags = TaggableManager(blank=True)
objects = VersionManager()
class Meta:
unique_together = [('project', 'slug')]
ordering = ['-verbose_name']
permissions = (
# Translators: Permission around whether a user can view the
# version
('view_version', _('View Version')),
)
def __unicode__(self):
return ugettext(u"Version %(version)s of %(project)s (%(pk)s)" % {
'version': self.verbose_name,
'project': self.project,
'pk': self.pk
})
def get_absolute_url(self):
if not self.built and not self.uploaded:
return ''
return self.project.get_docs_url(version_slug=self.slug)
def save(self, *args, **kwargs):
"""
Add permissions to the Version for all owners on save.
"""
obj = super(Version, self).save(*args, **kwargs)
for owner in self.project.users.all():
assign('view_version', owner, self)
self.project.sync_supported_versions()
return obj
@property
def remote_slug(self):
if self.slug == 'latest':
if self.project.default_branch:
return self.project.default_branch
else:
return self.project.vcs_repo().fallback_branch
else:
return self.slug
def get_subdomain_url(self):
use_subdomain = getattr(settings, 'USE_SUBDOMAIN', False)
if use_subdomain:
return "/%s/%s/" % (
self.project.language,
self.slug,
)
else:
return reverse('docs_detail', kwargs={
'project_slug': self.project.slug,
'lang_slug': self.project.language,
'version_slug': self.slug,
'filename': ''
})
def get_subproject_url(self):
return "/projects/%s/%s/%s/" % (
self.project.slug,
self.project.language,
self.slug,
)
def get_downloads(self, pretty=False):
project = self.project
data = {}
if pretty:
if project.has_pdf(self.slug):
data['PDF'] = project.get_pdf_url(self.slug)
if project.has_htmlzip(self.slug):
data['HTML'] = project.get_htmlzip_url(self.slug)
if project.has_epub(self.slug):
data['Epub'] = project.get_epub_url(self.slug)
else:
if project.has_pdf(self.slug):
data['pdf_url'] = project.get_pdf_url(self.slug)
if project.has_htmlzip(self.slug):
data['htmlzip_url'] = project.get_htmlzip_url(self.slug)
if project.has_epub(self.slug):
data['epub_url'] = project.get_epub_url(self.slug)
if project.has_manpage(self.slug):
data['manpage_url'] = project.get_manpage_url(self.slug)
if project.has_dash(self.slug):
data['dash_url'] = project.get_dash_url(self.slug)
data['dash_feed_url'] = project.get_dash_feed_url(self.slug)
return data
def get_conf_py_path(self):
# Hack this for now.
return "/docs/"
conf_py_path = self.project.conf_file(self.slug)
conf_py_path = conf_py_path.replace(
self.project.checkout_path(self.slug), '')
return conf_py_path.replace('conf.py', '')
def get_github_url(self, docroot, filename):
GITHUB_REGEXS = [
re.compile('github.com/(.+)/(.+)(?:\.git){1}'),
re.compile('github.com/(.+)/(.+)'),
re.compile('github.com:(.+)/(.+).git'),
]
GITHUB_URL = 'https://github.com/{user}/{repo}/blob/{version}{docroot}{path}.rst'
repo_url = self.project.repo
if 'github' not in repo_url:
return ''
if not docroot:
return ''
for regex in GITHUB_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return GITHUB_URL.format(
user=user,
repo=repo,
version=self.remote_slug,
docroot=docroot,
path=filename,
)
def get_bitbucket_url(self, docroot, filename):
BB_REGEXS = [
re.compile('bitbucket.org/(.+)/(.+).git'),
re.compile('bitbucket.org/(.+)/(.+)/'),
re.compile('bitbucket.org/(.+)/(.+)'),
]
BB_URL = 'https://bitbucket.org/{user}/{repo}/src/{version}{docroot}{path}.rst'
repo_url = self.project.repo
if 'bitbucket' not in repo_url:
return ''
if not docroot:
return ''
for regex in BB_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return BB_URL.format(
user=user,
repo=repo,
version=self.remote_slug,
docroot=docroot,
path=filename,
)
class VersionAlias(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='aliases')
from_slug = models.CharField(_('From slug'), max_length=255, default='')
to_slug = models.CharField(_('To slug'), max_length=255, default='',
blank=True)
largest = models.BooleanField(_('Largest'), default=False)
def __unicode__(self):
return ugettext(u"Alias for %(project)s: %(from)s -> %(to)s" % {
'project': self.project,
'form': self.from_slug,
'to': self.to_slug,
})
class Build(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='builds')
version = models.ForeignKey(Version, verbose_name=_('Version'), null=True,
related_name='builds')
type = models.CharField(_('Type'), max_length=55, choices=BUILD_TYPES,
default='html')
state = models.CharField(_('State'), max_length=55, choices=BUILD_STATE,
default='finished')
date = models.DateTimeField(_('Date'), auto_now_add=True)
success = models.BooleanField(_('Success'))
setup = models.TextField(_('Setup'), null=True, blank=True)
setup_error = models.TextField(_('Setup error'), null=True, blank=True)
output = models.TextField(_('Output'), default='', blank=True)
error = models.TextField(_('Error'), default='', blank=True)
exit_code = models.IntegerField(_('Exit code'), max_length=3, null=True,
blank=True)
class Meta:
ordering = ['-date']
get_latest_by = 'date'
def __unicode__(self):
return ugettext(u"Build %(project)s for %(usernames)s (%(pk)s)" % {
'project': self.project,
'usernames': ' '.join(self.project.users.all()
.values_list('username', flat=True)),
'pk': self.pk,
})
@models.permalink
def get_absolute_url(self):
return ('builds_detail', [self.project.slug, self.pk])
|
mit
| 4,639,291,233,820,865,000
| 34.749206
| 89
| 0.552704
| false
| 4.181582
| false
| false
| false
|
mghpcc-projects/user_level_slurm_reservations
|
commands/hil_slurmctld_prolog.py
|
1
|
14582
|
"""
MassOpenCloud / Hardware Isolation Layer (HIL)
Slurm Control Daemon - HIL Reservation Prolog
May 2017, Tim Donahue tpd001@gmail.com
"""
import argparse
import hostlist
import inspect
import logging
import os
import sys
from datetime import datetime, timedelta
from time import strftime
libdir = os.path.realpath(os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '../common'))
sys.path.append(libdir)
from hil_slurm_helpers import (get_partition_data, get_job_data, get_object_data,
exec_scontrol_cmd, exec_scontrol_show_cmd,
get_hil_reservation_name, is_hil_reservation,
create_slurm_reservation, delete_slurm_reservation,
log_hil_reservation)
from hil_slurm_constants import (SHOW_OBJ_TIME_FMT, RES_CREATE_TIME_FMT,
SHOW_PARTITION_MAXTIME_HMS_FMT,
RES_CREATE_HIL_FEATURES,
HIL_RESERVE, HIL_RELEASE,
HIL_RESERVATION_COMMANDS,
RES_CREATE_FLAGS)
from hil_slurm_logging import log_init, log_info, log_debug, log_error
from hil_slurm_settings import (HIL_PARTITION_PREFIX,
RES_CHECK_DEFAULT_PARTITION,
RES_CHECK_EXCLUSIVE_PARTITION,
RES_CHECK_SHARED_PARTITION,
RES_CHECK_PARTITION_STATE,
HIL_RESERVATION_DEFAULT_DURATION,
HIL_RESERVATION_GRACE_PERIOD,
HIL_SLURMCTLD_PROLOG_LOGFILE,
HIL_ENDPOINT,
HIL_SLURM_PROJECT)
def _get_prolog_environment():
'''
Returns a job's prolog environment in dictionary form
'''
env_map = {'jobname': 'SLURM_JOB_NAME',
'partition': 'SLURM_JOB_PARTITION',
'username': 'SLURM_JOB_USER',
'job_id': 'SLURM_JOB_ID',
'job_uid': 'SLURM_JOB_UID',
'job_account': 'SLURM_JOB_ACCOUNT',
'nodelist': 'SLURM_JOB_NODELIST'
}
return {env_var: os.environ.get(slurm_env_var) for env_var, slurm_env_var in env_map.iteritems()}
def _check_hil_partition(env_dict, pdata_dict):
'''
Check if the partition exists and, if so, is properly named
Retrieve partition data via 'scontrol show'
'''
status = True
pname = pdata_dict['PartitionName']
if not pname.startswith(HIL_PARTITION_PREFIX):
log_info('Partition name `%s` does not match `%s*`' %
(pname, HIL_PARTITION_PREFIX))
status = False
# Verify the partition state is UP
if RES_CHECK_PARTITION_STATE:
if (pdata_dict['State'] != 'UP'):
log_info('Partition `%s` state (`%s`) is not UP' %
(pname, pdata_dict['State']))
status = False
# Verify the partition is not the default partition
if RES_CHECK_DEFAULT_PARTITION:
if (pdata_dict['Default'] == 'YES'):
log_info('Partition `%s` is the default partition, cannot be used for HIL' % pname)
status = False
# Verify the partition is not shared by checking 'Shared' and
# 'ExclusiveUser' attributes
if RES_CHECK_SHARED_PARTITION:
if (pdata_dict['Shared'] != 'NO'):
log_info('Partition `%s` is shared, cannot be used for HIL' % pname)
status = False
if RES_CHECK_EXCLUSIVE_PARTITION:
if (pdata_dict['ExclusiveUser'] != 'YES'):
log_info('Partition `%s` not exclusive to `%s`, cannot be used for HIL' % (pname, env_dict['username']))
status = False
return status
def _check_hil_command(env_dict):
'''
Get and validate the HIL command specified with srun / sbatch
'''
jobname = env_dict['jobname']
if jobname in HIL_RESERVATION_COMMANDS:
return jobname
else:
log_debug('Jobname `%s` is not a HIL reservation command, nothing to do.' % jobname)
return None
def _get_hil_reservation_times(env_dict, pdata_dict, jobdata_dict):
'''
Calculate the start time and end time of the reservation
Start time:
If the user specified a start time for the job, use that
Otherwise, use the current time
End time:
if the job has an end time, use that and extend it by the HIL
grace period.
If the job does not have an end time (e.g., TimeLimit UNLIMITED),
set the reservation end time to either the partition MaxTime,
if defined, or the HIL default maximum time.
'''
t_job_start_s = jobdata_dict['StartTime']
t_job_end_s = jobdata_dict['EndTime']
# log_debug('Job start %s Job end %s' % (t_job_start_s, t_job_end_s))
t_start_dt = datetime.strptime(t_job_start_s, SHOW_OBJ_TIME_FMT)
if 'Unknown' not in t_job_end_s:
log_debug('Using job end time for reservation')
# Job has a defined end time. Use it.
t_end_dt = datetime.strptime(t_job_end_s, SHOW_OBJ_TIME_FMT)
t_end_dt += timedelta(seconds=HIL_RESERVATION_GRACE_PERIOD)
else:
# Job does not have a defined end time. See if there's a time limit.
if 'UNLIMITED' in jobdata_dict['TimeLimit']:
# Job does not have a time limit. See if the partition has a
# max time. If so, use that. If not, use the HIL default duration.
p_max_time_s = pdata_dict['MaxTime']
log_debug('Partition MaxTime is %s' % p_max_time_s)
if 'UNLIMITED' in p_max_time_s:
# Partition does not have a max time, use HIL default.
log_debug('No job or partition time limit, using HIL default reservation duration')
t_end_dt = (t_start_dt +
timedelta(seconds=HIL_RESERVATION_DEFAULT_DURATION))
else:
# Partition has a max time, parse it. Output format is [days-]H:M:S.
log_debug('Using partition time limit to calculate reservation end time')
d_hms = p_max_time_s.split('-')
if (len(d_hms) == 1):
p_max_hms_dt = datetime.strptime(d_hms[0],
SHOW_PARTITION_MAXTIME_HMS_FMT)
p_max_timedelta = timedelta(hours=p_max_hms_dt.hour,
minutes=p_max_hms_dt.minute,
seconds=p_max_hms_dt.second)
elif (len(d_hms) == 2):
# Days field is present
p_max_days_timedelta = datetime.timedelta(days=int(d_hms[0]))
p_max_hms_dt = datetime.strptime(d_hms[1],
SHOW_PARTITION_MAXTIME_HMS_FMT)
p_max_hms_timedelta = timedelta(hours=p_max_hms_dt.hour,
minutes=p_max_hms_dt.minute,
seconds=p_max_hms_dt.second)
p_max_timedelta = p_max_days_timedelta + p_max_hms_timedelta
log_debug(p_max_timedelta)
t_end_dt = t_start_dt + p_max_timedelta
else:
log_error('Cannot parse partition MaxTime (`%s`)' % p_max_time_s)
else:
# Job has a time limit. Use it.
# $$$ FIX
log_debug('Job has a time limit! Unsupported!')
pass
# We now have a defined reservation t_start and t_end in datetime format.
# Convert to strings and return.
t_start_s = t_start_dt.strftime(RES_CREATE_TIME_FMT)
t_end_s = t_end_dt.strftime(RES_CREATE_TIME_FMT)
# log_debug('Start time %s' % t_start_s)
# log_debug('End time %s' % t_end_s)
return t_start_s, t_end_s
def _create_hil_reservation(restype_s, t_start_s, t_end_s, env_dict, pdata_dict, jobdata_dict):
'''
Create a HIL reservation
'''
# Generate a HIL reservation name
resname = get_hil_reservation_name(env_dict, restype_s, t_start_s)
# Check if reservation exists. If so, do nothing
resdata_dict_list, stdout_data, stderr_data = exec_scontrol_show_cmd('reservation', resname)
if (stderr_data) and ('not found' not in stderr_data):
log_info('HIL reservation `%s` already exists' % resname)
return resname, stderr_data
log_info('Creating HIL reservation `%s`, ending %s' % (resname, t_end_s))
stdout_data, stderr_data = create_slurm_reservation(resname, env_dict['username'],
t_start_s, t_end_s,
nodes=None, flags=RES_CREATE_FLAGS,
features=RES_CREATE_HIL_FEATURES,
debug=False)
return resname, stderr_data
def _delete_hil_reservation(env_dict, pdata_dict, jobdata_dict, resname):
'''
Delete a HIL reservation after validating HIL name prefix and owner name
The latter restricts 'hil_release' of a reservation to the owner
It is always possible to delete the reservation with 'scontrol delete'.
'''
# Minimally validate the specified reservation
if is_hil_reservation(resname, None):
log_info('Deleting HIL reservation `%s`' % resname)
return delete_slurm_reservation(resname, debug=False)
else:
log_info('Cannot delete HIL reservation, error in name (`%s`)' %
resname)
return None, 'hil_release: error: Invalid reservation name'
def _hil_reserve_cmd(env_dict, pdata_dict, jobdata_dict):
'''
Runs in Slurm control daemon prolog context
Create HIL reserve reservation if it does not already exist.
The HIL monitor will reserve the nodes and create the corresponding Slurm HIL release
reservation.
Reservation start and end times may overlap so long as the MAINT flag is set
'''
t_start_s, t_end_s = _get_hil_reservation_times(env_dict, pdata_dict, jobdata_dict)
resname, stderr_data = _create_hil_reservation(HIL_RESERVE, t_start_s, t_end_s,
env_dict, pdata_dict, jobdata_dict)
log_hil_reservation(resname, stderr_data, t_start_s, t_end_s)
def _hil_release_cmd(env_dict, pdata_dict, jobdata_dict):
'''
Runs in Slurm control daemon epilog context
Delete the reserve reservation in which the release job was run.
- Verify the reservation is a HIL reserve reservation
- Verify the reservation is owned by the user
- Get reserve reservation data via 'scontrol'
- Delete the reserve reservation in which the hil_release command was run
Release reservation will be deleted later by the HIL reservation monitor
'''
reserve_resname = jobdata_dict['Reservation']
if reserve_resname:
if not is_hil_reservation(reserve_resname, HIL_RESERVE):
log_error('Reservation `%s` is not a HIL reserve reservation' %
reserve_resname)
elif env_dict['username'] not in reserve_resname:
log_error('Reservation `%s` not owned by user `%s`' %
(reserve_resname, env_dict['username']))
else:
# Basic validation done
# Get reserve reservation data
reserve_rdata = get_object_data('reservation', reserve_resname)[0]
# Delete the reserve reservation
stdout_data, stderr_data = _delete_hil_reservation(env_dict, pdata_dict,
jobdata_dict, reserve_resname)
if (len(stderr_data) == 0):
log_info('Deleted HIL reserve reservation `%s`' % reserve_resname)
else:
log_error('Error deleting HIL reserve reservation `%s`' % reserve_resname)
log_error(stderr_data)
else:
log_error('No reservation name specified to `%s` command' %
jobdata_dict['JobName'])
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument('--hil_prolog', action='store_true', default=False,
help='Function as the HIL prolog')
parser.add_argument('--hil_epilog', action='store_true', default=False,
help='Function as the HIL epilog')
return parser.parse_args()
def main(argv=[]):
args = process_args()
log_init('hil_slurmctld.prolog', HIL_SLURMCTLD_PROLOG_LOGFILE,
logging.DEBUG)
if args.hil_prolog:
pass
elif args.hil_epilog:
pass
else:
log_debug('Must specify one of --hil_prolog or --hil_epilog',
separator=True)
return False
# Collect prolog/epilog environment, job data, and partition data into
# dictionaries, perform basic sanity checks.
# Since data for one partition and one job is expected, select the
# first dict in the list
env_dict = _get_prolog_environment()
if not env_dict['partition']:
log_debug('Missing Slurm control daemon prolog / epilog environment.')
return False
pdata_dict = get_partition_data(env_dict['partition'])[0]
jobdata_dict = get_job_data(env_dict['job_id'])[0]
if not pdata_dict or not jobdata_dict:
log_debug('One of pdata_dict, jobdata_dict, or env_dict is empty')
log_debug('Job data', jobdata_dict)
log_debug('P data', pdata_dict)
return False
if not _check_hil_partition(env_dict, pdata_dict):
return False
# Verify the command is a HIL command. If so, process it.
hil_cmd = _check_hil_command(env_dict)
if not hil_cmd:
return True
status = True
if args.hil_prolog:
if (hil_cmd == 'hil_reserve'):
log_info('HIL Slurmctld Prolog', separator=True)
log_debug('Processing reserve request')
status = _hil_reserve_cmd(env_dict, pdata_dict, jobdata_dict)
elif args.hil_epilog:
if (hil_cmd == 'hil_release'):
log_info('HIL Slurmctld Epilog', separator=True)
log_debug('Processing release request')
status = _hil_release_cmd(env_dict, pdata_dict, jobdata_dict)
return status
if __name__ == '__main__':
main(sys.argv[1:])
exit(0)
# EOF
|
mit
| -5,637,913,810,003,291,000
| 37.885333
| 116
| 0.577904
| false
| 3.81528
| false
| false
| false
|
AlJohri/nucraigslist
|
listings/management/commands/download.py
|
1
|
2466
|
from django.core.management.base import BaseCommand, CommandError
from listings.models import Listing, User, Comment, Group
import os, sys
from django.utils import timezone
from optparse import make_option
from listings.lib import save_obj, get_fb_graph_api, get_word_bank, filter_listing
from dateutil.parser import parse
from socialscraper.facebook.graphapi import get_feed
class Command(BaseCommand):
# args = '<poll_id poll_id ...>'
# help = 'Closes the specified poll for voting'
option_list = BaseCommand.option_list + (
make_option('--recent',
action='store_true',
dest='recent',
default=False,
help='Download most recent posts'
),
make_option('--backfill',
action='store_true',
dest='backfill',
default=False,
help='Backfill database'
),
)
def handle(self, *args, **options):
# mutually exclusive
if options['backfill'] and options['recent']: sys.exit()
if not options['backfill'] and not options['recent']: sys.exit()
api = get_fb_graph_api()
word_bank = get_word_bank(dl=False)
for group in Group.objects.all():
print "Downloading posts from %s" % group
if options['recent']:
print "Downloading most recent posts (no pagination)"
feed = api.get_object("%s/feed" % group.id)
for i,obj in enumerate(feed['data']):
listing, listing_created = save_obj(obj)
if listing_created:
filter_listing(listing, word_bank, i)
print ""
elif options['backfill']:
start = parse("01-1-2012")
if Listing.objects.filter(group_id=group.id).count() >= 1:
end = Listing.objects.filter(group_id=group.id).earliest('updated_time').updated_time.replace(tzinfo=None)
else:
end = timezone.now().replace(tzinfo=None)
print "Downloading from ", start, "to", end, "in reverse chronological order (latest first)."
for i,obj in enumerate(get_feed(api, str(group.id), start=start, end=end)):
listing, listing_created = save_obj(obj)
if listing_created:
filter_listing(listing, word_bank, i)
print ""
|
gpl-3.0
| -2,108,560,672,248,644,000
| 35.264706
| 126
| 0.564477
| false
| 4.303665
| false
| false
| false
|
Alymantara/maelstorm
|
tests/test_main.py
|
1
|
1272
|
from pytest import raises
# The parametrize function is generated, so this doesn't work:
#
# from pytest.mark import parametrize
#
import pytest
parametrize = pytest.mark.parametrize
from maelstorm import metadata
from maelstorm.main import main
class TestMain(object):
@parametrize('helparg', ['-h', '--help'])
def test_help(self, helparg, capsys):
with raises(SystemExit) as exc_info:
main(['progname', helparg])
out, err = capsys.readouterr()
# Should have printed some sort of usage message. We don't
# need to explicitly test the content of the message.
assert 'usage' in out
# Should have used the program name from the argument
# vector.
assert 'progname' in out
# Should exit with zero return code.
assert exc_info.value.code == 0
@parametrize('versionarg', ['-v', '--version'])
def test_version(self, versionarg, capsys):
with raises(SystemExit) as exc_info:
main(['progname', versionarg])
out, err = capsys.readouterr()
# Should print out version.
assert err == '{0} {1}\n'.format(metadata.project, metadata.version)
# Should exit with zero return code.
assert exc_info.value.code == 0
|
mit
| 4,045,476,757,722,540,500
| 34.333333
| 76
| 0.643082
| false
| 4
| true
| false
| false
|
jbenden/ansible
|
lib/ansible/modules/cloud/amazon/efs_facts.py
|
1
|
11177
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: efs_facts
short_description: Get information about Amazon EFS file systems
description:
- Module searches Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
required: false
default: None
id:
description:
- ID of Amazon EFS.
required: false
default: None
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- SubnetId - Mandatory. The ID of the subnet to add the mount target in.
- IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet.
- SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified."
required: false
default: None
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# find all existing efs
- efs_facts:
register: result
- efs_facts:
name: myTestNameTag
- efs_facts:
id: fs-1234abcd
# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
- efs_facts:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: str
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: str
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned: always
type: str
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned: always
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system
returned: always
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: str
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from collections import defaultdict
from time import sleep
try:
from botocore.exceptions import ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, get_aws_connection_info, ec2_argument_spec,
camel_dict_to_snake_dict, HAS_BOTO3)
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
self.region = region
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = iterate_all(
'Tags',
self.connection.describe_tags,
**kwargs
)
return dict((tag['Key'], tag['Value']) for tag in tags)
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from boto result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'MountTargetId',
'subnet-': 'SubnetId',
'eni-': 'NetworkInterfaceId',
'sg-': 'SecurityGroups'
}
prefix = first_or_default(filter(
lambda pref: str(attr_id).startswith(pref),
attr_by_prefix.keys()
))
if prefix:
return attr_by_prefix[prefix]
return 'IpAddress'
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount tager requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(),
name=dict(),
tags=dict(type="dict", default={}),
targets=dict(type="list", default=[])
))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name)
if tags:
file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = filter(lambda item:
has_targets(item['MountTargets'], targets), file_systems_info)
file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info]
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
if __name__ == '__main__':
main()
|
gpl-3.0
| -2,514,477,651,479,542,000
| 28.568783
| 156
| 0.585756
| false
| 4.039393
| false
| false
| false
|
pavolloffay/jaeger
|
plugin/storage/es/esCleaner.py
|
1
|
4828
|
#!/usr/bin/env python3
import curator
import elasticsearch
import os
import ssl
import sys
TIMEOUT=120
def main():
if len(sys.argv) != 3:
print('USAGE: [INDEX_PREFIX=(default "")] [ARCHIVE=(default false)] ... {} NUM_OF_DAYS http://HOSTNAME[:PORT]'.format(sys.argv[0]))
print('NUM_OF_DAYS ... delete indices that are older than the given number of days.')
print('HOSTNAME ... specifies which Elasticsearch hosts URL to search and delete indices from.')
print('TIMEOUT ... number of seconds to wait for master node response.'.format(TIMEOUT))
print('INDEX_PREFIX ... specifies index prefix.')
print('ARCHIVE ... specifies whether to remove archive indices (only works for rollover) (default false).')
print('ROLLOVER ... specifies whether to remove indices created by rollover (default false).')
print('ES_USERNAME ... The username required by Elasticsearch.')
print('ES_PASSWORD ... The password required by Elasticsearch.')
print('ES_TLS ... enable TLS (default false).')
print('ES_TLS_CA ... Path to TLS CA file.')
print('ES_TLS_CERT ... Path to TLS certificate file.')
print('ES_TLS_KEY ... Path to TLS key file.')
print('ES_TLS_SKIP_HOST_VERIFY ... (insecure) Skip server\'s certificate chain and host name verification.')
sys.exit(1)
client = create_client(os.getenv("ES_USERNAME"), os.getenv("ES_PASSWORD"), str2bool(os.getenv("ES_TLS", 'false')), os.getenv("ES_TLS_CA"), os.getenv("ES_TLS_CERT"), os.getenv("ES_TLS_KEY"), str2bool(os.getenv("ES_TLS_SKIP_HOST_VERIFY", 'false')))
ilo = curator.IndexList(client)
empty_list(ilo, 'Elasticsearch has no indices')
prefix = os.getenv("INDEX_PREFIX", '')
if prefix != '':
prefix += '-'
if str2bool(os.getenv("ARCHIVE", 'false')):
filter_archive_indices_rollover(ilo, prefix)
else:
if str2bool(os.getenv("ROLLOVER", 'false')):
filter_main_indices_rollover(ilo, prefix)
else:
filter_main_indices(ilo, prefix)
empty_list(ilo, 'No indices to delete')
for index in ilo.working_list():
print("Removing", index)
timeout = int(os.getenv("TIMEOUT", TIMEOUT))
delete_indices = curator.DeleteIndices(ilo, master_timeout=timeout)
delete_indices.do_action()
def filter_main_indices(ilo, prefix):
ilo.filter_by_regex(kind='regex', value=prefix + "jaeger-(span|service|dependencies)-\d{4}-\d{2}-\d{2}")
empty_list(ilo, "No indices to delete")
# This excludes archive index as we use source='name'
# source `creation_date` would include archive index
ilo.filter_by_age(source='name', direction='older', timestring='%Y-%m-%d', unit='days', unit_count=int(sys.argv[1]))
def filter_main_indices_rollover(ilo, prefix):
ilo.filter_by_regex(kind='regex', value=prefix + "jaeger-(span|service)-\d{6}")
empty_list(ilo, "No indices to delete")
# do not remove active write indices
ilo.filter_by_alias(aliases=[prefix + 'jaeger-span-write'], exclude=True)
empty_list(ilo, "No indices to delete")
ilo.filter_by_alias(aliases=[prefix + 'jaeger-service-write'], exclude=True)
empty_list(ilo, "No indices to delete")
ilo.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=int(sys.argv[1]))
def filter_archive_indices_rollover(ilo, prefix):
# Remove only rollover archive indices
# Do not remove active write archive index
ilo.filter_by_regex(kind='regex', value=prefix + "jaeger-span-archive-\d{6}")
empty_list(ilo, "No indices to delete")
ilo.filter_by_alias(aliases=[prefix + 'jaeger-span-archive-write'], exclude=True)
empty_list(ilo, "No indices to delete")
ilo.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=int(sys.argv[1]))
def empty_list(ilo, error_msg):
try:
ilo.empty_list_check()
except curator.NoIndices:
print(error_msg)
sys.exit(0)
def str2bool(v):
return v.lower() in ('true', '1')
def create_client(username, password, tls, ca, cert, key, skipHostVerify):
context = ssl.create_default_context()
if ca is not None:
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca)
elif skipHostVerify:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
if username is not None and password is not None:
return elasticsearch.Elasticsearch(sys.argv[2:], http_auth=(username, password), ssl_context=context)
elif tls:
context.load_cert_chain(certfile=cert, keyfile=key)
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
else:
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
if __name__ == "__main__":
main()
|
apache-2.0
| 3,711,925,970,792,277,000
| 42.107143
| 250
| 0.663422
| false
| 3.542186
| false
| false
| false
|
Lamelos/django-allauth-office365
|
allauth_office365/adapter.py
|
1
|
2506
|
from django.contrib import messages
from django.dispatch import receiver
from django.http import HttpResponseForbidden
from allauth.account.signals import user_logged_in
from allauth.exceptions import ImmediateHttpResponse
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter, get_adapter
from allauth.socialaccount.providers import registry
from .provider import Office365Provider
class SocialAccountAdapter(DefaultSocialAccountAdapter):
# based on: https://github.com/thenewguy/django-allauth-adfs/blob/master/allauth_adfs/socialaccount/adapter.py
def pre_social_login(self, request, sociallogin):
# new user logins are handled by populate_user
if sociallogin.is_existing:
changed, user = self.update_user_fields(request, sociallogin)
if changed:
user.save()
def populate_user(self, request, sociallogin, data):
user = super(SocialAccountAdapter, self).populate_user(request, sociallogin, data)
self.update_user_fields(request, sociallogin, user)
return user
def update_user_fields(self, request, sociallogin=None, user=None):
changed = False
if user is None:
user = sociallogin.account.user
office365_provider = registry.by_id(Office365Provider.id, request)
false_keys = ["is_staff", "is_superuser"]
boolean_keys = false_keys + ["is_active"]
copy_keys = boolean_keys + ["first_name", "last_name", "email", "username"]
if sociallogin is not None and sociallogin.account.provider == Office365Provider.id:
data = sociallogin.account.extra_data
values = office365_provider.extract_common_fields(data)
for key in copy_keys:
# it is assumed that values are cleaned and set for all
# fields and if any of the boolean_keys are not provided
# in the raw data they should be set to False by
# the extract_common_fields method
if key in values and getattr(user, key) != values[key]:
setattr(user, key, values[key])
changed = True
else:
for key in false_keys:
if getattr(user, key):
msg = "Staff users must authenticate via the %s provider!" % office365_provider.name
response = HttpResponseForbidden(msg)
raise ImmediateHttpResponse(response)
return changed, user
|
mit
| 1,764,432,020,011,735,000
| 44.563636
| 114
| 0.659218
| false
| 4.328152
| false
| false
| false
|
cyberang3l/sysdata-collector
|
libs/globalvars.py
|
1
|
2230
|
# Copyright (C) 2014 Vangelis Tasoulas <vangelis@tasoulas.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Define default constants
PROGRAM_NAME = 'sysdata-collector'
VERSION = '0.0.1'
AUTHOR = 'Vangelis Tasoulas'
# Default config file location where the program should
# look for a configuration file
CONFIG_FILE_LOCATIONS = [".", "/etc/template"]
# The default config filename which might exist
# in CONFIG_FILE_LOCATIONS
DEFAULT_CONFIG_FILENAME = PROGRAM_NAME + ".conf"
# Console logging level (If you change this to DEBUG)
# text sent to STDOUT will be too much
# CRITICAL = 50
# ERROR = 40
# WARNING = 30
# INFO = 20
# DEBUG = 10
CONSOLE_LOG_LEVEL = 20
class exitCode():
"""
Define static exit Codes
"""
SUCCESS = 0
FAILURE = 1
INCORRECT_USAGE = 2
PRINT_SEPARATOR = "#######################################"
# Define AND set default values for the global variables here
# Default file logging level
# CRITICAL = 50
# ERROR = 40
# WARNING = 30
# INFO = 20
# DEBUG = 10
FileLogLevel = 20
# Default absolute path for the log file
log_file = "{0}/{1}".format(".", PROGRAM_NAME + ".log")
# Conf will be found on runtime (if any)
conf_file = ""
# If your program can run in daemon mode,
# check this variable in runtime if it is true
daemonMode = False
##################################################
list_available_plugins = False
list_active_plugins = False
only_print_samples = False
append_file = False
test_plugin = None
output_file = 'data_collected-%{ts}.csv'
delimiter = ","
active_plugins_dir = "active-plugins"
plugin_directories = []
intervalBetweenSamples = 10
|
gpl-3.0
| -8,446,234,026,580,170,000
| 26.875
| 71
| 0.685202
| false
| 3.667763
| true
| false
| false
|
dcsch/pyif
|
pyif/story.py
|
1
|
3162
|
'''
Created on Nov 21, 2013
@author: david
'''
from thing import Thing, Player
import grammar
import parser
import action
import glk
class Story:
def __init__(self, name, headline, delegate):
self.name = name
self.headline = headline
self.release = 1
self.serial = 81001
self.delegate = delegate
self.root = Thing("root", None)
self.compass = Thing("compass", self.root)
self.north = Thing("north", self.compass)
self.north.nouns = ["north"]
self.east = Thing("east", self.compass)
self.east.nouns = ["east"]
self.south = Thing("south", self.compass)
self.south.nouns = ["south"]
self.west = Thing("west", self.compass)
self.west.nouns = ["west"]
self.northeast = Thing("northeast", self.compass)
self.northeast.nouns = ["northeast"]
self.northwest = Thing("northwest", self.compass)
self.northwest.nouns = ["northwest"]
self.southeast = Thing("southeast", self.compass)
self.southeast.nouns = ["southeast"]
self.southwest = Thing("southwest", self.compass)
self.southwest.nouns = ["southwest"]
self.up_above = Thing("up above", self.compass)
self.up_above.nouns = ["up", "above"]
self.ground = Thing("ground", self.compass)
self.ground.nouns = ["ground"]
self.inside = Thing("inside", self.compass)
self.inside.nouns = ["inside"]
self.outside = Thing("outside", self.compass)
self.outside.nouns = ["outside"]
# Player
self.player = Player("cretin", self.root)
self.player.nouns = ["cretin", "me"]
self.player.description = "As good looking as ever."
self.actor = self.player
self.nouns = []
# State and Parser
self.has_quit = False
self.deadflag = 0
self.keep_silent = False
self.grammar = grammar.Grammar(self)
self.parser = parser.Parser(self, self.grammar)
def run(self):
"The main story loop"
if self.delegate:
self.delegate.initialise()
# The initial text
action.version(self)
glk.put_char("\n")
action.look(self, True)
# while True:
# event = glk.select()
# if event.type == EVTYPE_LINEINPUT:
while self.parser.read_input():
if self.deadflag:
self.handle_deadflag()
def handle_deadflag(self):
"Report the player's end-of-game status"
glk.put_string("\n *** ")
handled = False
if self.delegate and "death_message" in dir(self.delegate):
handled = self.delegate.death_message()
if not handled:
self.death_message()
glk.put_string(" ***\n\n\n")
def death_message(self):
"The text of the death message"
if self.deadflag == 1:
glk.put_string("You have died")
elif self.deadflag == 2:
glk.put_string("You have won")
|
mit
| -1,471,477,336,399,294,700
| 29.114286
| 67
| 0.550285
| false
| 3.663963
| false
| false
| false
|
HomeRad/TorCleaner
|
doc/bfknav.py
|
1
|
7157
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2009 Bastian Kleineidam
"""
General navigation writer reading .nav file info.
"""
import sys
import os
import re
from cStringIO import StringIO
_slashes_ro = re.compile(r"/+")
_thisdir_ro = re.compile(r"^\./")
_samedir_ro = re.compile(r"/\./|/\.$")
_parentdir_ro = re.compile(r"^/(\.\./)+|/(?!\.\./)[^/]+/\.\.(/|$)")
_relparentdir_ro = re.compile(r"^(?!\.\./)[^/]+/\.\.(/|$)")
def collapse_segments(path):
"""
Remove all redundant segments from the given URL path.
Precondition: path is an unquoted url path
"""
# shrink multiple slashes to one slash
path = _slashes_ro.sub("/", path)
# collapse redundant path segments
path = _thisdir_ro.sub("", path)
path = _samedir_ro.sub("/", path)
# collapse parent path segments
# note: here we exploit the fact that the replacements happen
# to be from left to right (see also _parentdir_ro above)
newpath = _parentdir_ro.sub("/", path)
while newpath != path:
path = newpath
newpath = _parentdir_ro.sub("/", path)
# collapse parent path segments of relative paths
# (ie. without leading slash)
newpath = _relparentdir_ro.sub("", path)
while newpath != path:
path = newpath
newpath = _relparentdir_ro.sub("", path)
return path
class Node(object):
"""
Node class for use in a navigation tree, with abilities to write
HTML output.
"""
def __init__(self, name, order, filename):
"""Initialize node information"""
self.name = name
self.order = order
self.filename = filename
self.level = 0
self.children = []
self.sibling_right = None
self.active = False
self.parent = None
def get_url(self, level):
"""Get relative URL to this node."""
if self.children:
url = self.children[0].get_url(level)
else:
url = "../"*level + self.filename
return collapse_segments(url)
def addChildren(self, nodes):
"""
Add given nodes as children of this node, setting parent
and level information accordingly.
"""
for node in nodes:
node.parent = self
node.level = self.level + 1
self.children.append(node)
def write_nav(self, fp, active):
"""
Write HTML node navigation.
"""
descend = has_node(active, self.children)
if self.active or descend:
self.write_active(fp)
else:
self.write_inactive(fp, active.level)
if self.sibling_right:
self.sibling_right.write_nav(fp, active)
if descend:
# go to next level
self.write_nextlevel(fp)
self.children[0].write_nav(fp, active)
def write_inactive(self, fp, level):
"""
Write HTML of inactive navigation node.
"""
s = '<a href="%s">%s' % (self.get_url(level), self.name)
if self.children:
s += ' >'
s += "</a>\n"
fp.write(s)
def write_active(self, fp):
"""
Write HTML of active navigation node.
"""
s = "<span>"
#if not self.children:
# s += '> '
s += self.name
if self.children:
s += ' >'
s += "</span>\n"
fp.write(s)
def write_nextlevel(self, fp):
fp.write('</div>\n<div class="navrow" style="padding: 0em 0em 0em %dem;">'% (self.level+2))
def new_node(self):
return Node(self.name, sys.maxint, self.filename)
def __repr__(self):
return "<Node %r>"%self.name
def __lt__(self, other):
return self.order < other.order
def __le__(self, other):
return self.order <= other.order
def __eq__(self, other):
return self.order == other.order
def __ne__(self, other):
return self.order != other.order
def __gt__(self, other):
return self.order > other.order
def __ge__(self, other):
return self.order >= other.order
def parse_navtree(dirname):
"""
Parse a hierarchy of .nav files into a tree structure,
consisting of lists of lists. The list entries are sorted in
navigation order.
"""
nodes = []
files = os.listdir(dirname)
for f in files:
filename = os.path.join(dirname, f)
htmlname = os.path.join(dirname, os.path.splitext(f)[0]+".html")
if os.path.isfile(filename) and os.path.isfile(htmlname) and \
f.endswith('.nav'):
nodes.append(get_nav_node(filename, htmlname))
elif os.path.isdir(filename):
subnodes = parse_navtree(filename)
if subnodes:
if os.path.isfile(filename+".nav"):
node = get_nav_node(filename+".nav", filename)
else:
node = subnodes[0].new_node()
node.addChildren(subnodes)
nodes.append(node)
nodes.sort()
for i,n in enumerate(nodes):
if (i+1)<len(nodes):
n.sibling_right = nodes[i+1]
#print_nodes(nodes)
return nodes
def get_nav_node(navfile, htmlname):
"""
Get a Node() instance with info of given navfile.
"""
flocals = {}
execfile(navfile, {}, flocals)
order = flocals.get('order', sys.maxint)
name = flocals['name']
return Node(name, order, htmlname)
def print_nodes(nodes):
"""
Print a tree structure to stdout.
"""
for node in nodes:
print " "*node.level+node.name
if node.children:
print_nodes(node.children)
def has_node(node, nodes):
"""
Look for node in a tree structure.
@return True if node is found
"""
for n in nodes:
if node.filename == n.filename:
return True
if has_node(node, n.children):
return True
return False
def generate_nav(start, nodes):
"""
Write one navigation tree level into HTML files, with given
start node as root node.
"""
for node in nodes:
print node.filename
if node.children:
generate_nav(start, node.children)
else:
node.active = True
fp = StringIO()
start.write_nav(fp, node)
nav = """<div class="navigation">
<div class="navrow" style="padding: 0em 0em 0em 1em;">
%s
</div>
</div>
""" % fp.getvalue()
node.active = False
write_nav(node.filename, nav)
def write_nav(filename, nav):
"""
Write navigation into filename.
"""
lines = []
skip = False
f = open(filename)
for line in f:
if not skip:
lines.append(line)
if line.startswith("<!-- bfknav -->"):
skip = True
lines.append(nav)
elif line.startswith("<!-- /bfknav -->"):
skip = False
lines.append(line)
f.close()
f = open(filename, 'w')
for line in lines:
f.write(line)
f.close()
if __name__=='__main__':
nodes = parse_navtree(".")
if nodes:
generate_nav(nodes[0], nodes)
|
gpl-2.0
| 5,557,763,311,401,146,000
| 26.526923
| 99
| 0.551069
| false
| 3.794804
| false
| false
| false
|
EKiefer/edge-starter
|
py34env/Scripts/thresholder.py
|
1
|
1845
|
#!c:\users\ekiefer\projects\django\my_edge\py34env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates how a 1-bit BitmapImage can be used
# as a dynamically updated overlay
#
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
#
# an image viewer
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = eval(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
# --------------------------------------------------------------------
# main
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
|
mit
| 5,243,874,531,555,993,000
| 23.932432
| 74
| 0.58916
| false
| 3.534483
| false
| false
| false
|
DimensionDataCBUSydney/plumbery
|
plumbery/infrastructure.py
|
1
|
66146
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import time
from uuid import uuid4
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.common.dimensiondata import DimensionDataFirewallRule
from libcloud.common.dimensiondata import DimensionDataFirewallAddress
from libcloud.common.dimensiondata import TYPES_URN
from libcloud.utils.xml import findtext, findall
from plumbery.terraform import Terraform
from plumbery.exception import PlumberyException
from plumbery.plogging import plogging
__all__ = ['PlumberyInfrastructure']
class PlumberyInfrastructure(object):
"""
Infrastructure as code, for network and security
:param facility: the underlying physical facility
:type facility: :class:`plumbery.PlumberyFacility`
This is an abstraction of a virtual data center. It is a secured
container for multiple nodes.
Example::
from plumbery.infrastructure import PlumberyInfrastructure
infrastructure = PlumberyInfrastructure(facility)
infrastructure.build(blueprint)
In this example an infrastructure is initialised at the given facility, and
then it is asked to create pipes and plumbing described in the
provided blueprint. This is covering solely the network and the security,
not the nodes themselves.
Attributes:
facility (PlumberyFacility):
a handle to the physical facility where network domains
are implemented
"""
# the physical data center
facility = None
def __init__(self, facility=None):
"""A virtual data centre attached to a physical data centre"""
# handle to parent parameters and functions
self.facility = facility
self.region = facility.region
self.plumbery = facility.plumbery
self.network = None
self.domain = None
self.terraform = Terraform(facility.plumbery.working_directory)
self._cache_remote_vlan = []
self._cache_offshore_vlan = []
self._cache_firewall_rules = []
self._cache_balancers = None
self._cache_pools = None
self._network_domains_already_built = []
self._vlans_already_built = []
def get_region_id(self):
return self.facility.get_setting('regionId')
def get_default(self, label, default=None):
"""
Retrieves default value for a given name
"""
value = self.facility.get_setting(label)
if value is not None:
return value
return default
def get_container(self, blueprint):
"""
Retrieves a domain and a network attached to a blueprint
:param blueprint: the various attributes of the target fittings
:type blueprint: ``dict``
:return: the infrastructure associated to the provided blueprint
:rtype: :class:`plumbery.PlumberyInfrastructure` or `None``
The returned object has at least a network domain and an Ethernet
network, like in the following example::
>>>container = infrastructure.get_container(blueprint)
>>>print(container.domain.name)
...
>>>print(container.network.name)
...
"""
target = PlumberyInfrastructure(self.facility)
target.blueprint = blueprint
if ('domain' not in blueprint
or type(blueprint['domain']) is not dict):
raise PlumberyException(
"Error: no network domain has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
if ('ethernet' not in blueprint
or type(blueprint['ethernet']) is not dict):
raise PlumberyException(
"Error: no ethernet network has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
domainName = blueprint['domain']['name']
target.domain = self.get_network_domain(domainName)
networkName = blueprint['ethernet']['name']
target.network = self.get_ethernet(networkName)
return target
def get_network_domain(self, name):
"""
Retrieves a network domain by name
:param name: name of the target network domain
:type name: ``str``
"""
if len(self.facility._cache_network_domains) < 1:
plogging.debug("Listing network domains")
self.facility._cache_network_domains = \
self.region.ex_list_network_domains(
self.facility.get_location_id())
plogging.debug("- found {} network domains"
.format(len(self.facility._cache_network_domains)))
for domain in self.facility._cache_network_domains:
if domain.name == name:
return domain
return None
def get_ethernet(self, path):
"""
Retrieves an Ethernet network by name
:param path: the name of the target Ethernet network
:type path: ``str`` or ``list``of ``str``
:return: an instance of an Ethernet network
:rtype: :class:`VLAN` or ``None``
This function searches firstly at the current facility. If the
name is a complete path to a remote network, then plumbery looks
there. If a different region is provided, then authentication is done
against the related endpoint.
For example if ``MyNetwork`` has been defined in a data centre in
Europe::
>>>infrastructure.get_ethernet('MyNetwork')
>>>infrastructure.get_ethernet(['EU6', 'MyNetwork'])
Looking for remote Ethernet network 'EU6::MyNetwork'
- found it
>>>infrastructure.get_ethernet(['dd-eu', 'EU6', 'MyNetwork'])
Looking for offshore Ethernet network 'dd-eu::EU6::MyNetwork'
- found it
"""
if isinstance(path, str):
path = path.split('::')
if len(path) == 2: # force offshore lookup if needed
target_region = self.facility.get_region(path[0])
if target_region != self.facility.get_region():
path.insert(0, target_region)
if len(path) == 1: # local name
if len(self.facility._cache_vlans) < 1:
plogging.debug("Listing Ethernet networks")
self.facility._cache_vlans = self.region.ex_list_vlans(
location=self.facility.get_location_id())
plogging.debug("- found {} Ethernet networks"
.format(len(self.facility._cache_vlans)))
for network in self.facility._cache_vlans:
if network.name == path[0]:
return network
elif len(path) == 2: # different location, same region
if (len(self._cache_remote_vlan) == 3
and self._cache_remote_vlan[0] == path[0]
and self._cache_remote_vlan[1] == path[1]):
return self._cache_remote_vlan[2]
plogging.info("Looking for remote Ethernet network '%s'",
'::'.join(path))
try:
remoteLocation = self.region.ex_get_location_by_id(path[0])
except IndexError:
plogging.info("- '%s' is unknown", path[0])
return None
vlans = self.region.ex_list_vlans(location=remoteLocation)
for network in vlans:
if network.name == path[1]:
self._cache_remote_vlan += path
self._cache_remote_vlan.append(network)
plogging.info("- found it")
return network
plogging.info("- not found")
elif len(path) == 3: # other region
if (len(self._cache_offshore_vlan) == 4
and self._cache_offshore_vlan[0] == path[0]
and self._cache_offshore_vlan[1] == path[1]
and self._cache_offshore_vlan[2] == path[2]):
return self._cache_offshore_vlan[3]
plogging.info("Looking for offshore Ethernet network '{}'"
.format('::'.join(path)))
offshore = self.plumbery.get_compute_driver(region=path[0])
try:
remoteLocation = offshore.ex_get_location_by_id(path[1])
except IndexError:
plogging.info("- '{}' is unknown".format(path[1]))
return None
vlans = offshore.ex_list_vlans(location=remoteLocation)
for network in vlans:
if network.name == path[2]:
self._cache_offshore_vlan += path
self._cache_offshore_vlan.append(network)
plogging.info("- found it")
return network
plogging.info("- not found")
return None
def build(self, blueprint):
"""
Creates the infrastructure for one blueprint
:param blueprint: the various attributes of the target fittings
:type blueprint: ``dict``
:return: ``True`` if the network has been created or is already there,
``False`` otherwise
:rtype: ``bool``
:raises: :class:`plumbery.PlumberyException`
- if some unrecoverable error occurs
This function is looking at all fittings in the blueprint except the
nodes. This is including:
* a network domain
* one Ethernet network
* eventually, several public IPv4 addresses
* address translation rules to private IPv4 addresses
* firewall rules
In safe mode, the function will stop on any missing component since
it is not in a position to add fittings, and return ``False``.
If all components already exist then the funciton will return ``True``.
"""
self.blueprint = blueprint
plogging.debug("Building infrastructure of blueprint '{}'".format(
blueprint['target']))
if 'domain' not in blueprint or type(blueprint['domain']) is not dict:
raise PlumberyException(
"Error: no network domain has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
domainName = blueprint['domain']['name']
if 'ethernet' not in blueprint \
or type(blueprint['ethernet']) is not dict:
raise PlumberyException(
"Error: no ethernet network has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
if 'subnet' not in blueprint['ethernet']:
raise PlumberyException(
"Error: no IPv4 subnet "
"(e.g., '10.0.34.0') as been defined for the blueprint '{}'!"
.format(blueprint['target']))
networkName = blueprint['ethernet']['name']
self.domain = self.get_network_domain(domainName)
if self.domain is not None:
plogging.info("Creating network domain '{}'".format(domainName))
plogging.info("- already there")
elif self.plumbery.safeMode:
plogging.info("Creating network domain '{}'".format(domainName))
plogging.info("- skipped - safe mode")
plogging.info("Creating Ethernet network '{}'"
.format(networkName))
plogging.info("- skipped - safe mode")
return False
else:
plogging.info("Creating network domain '{}'".format(domainName))
# the description attribute is a smart way to tag resources
description = '#plumbery'
if 'description' in blueprint['domain']:
description = blueprint['domain']['description']+' #plumbery'
# level of service
service = 'ESSENTIALS'
if 'service' in blueprint['domain']:
service = blueprint['domain']['service'].upper()
while True:
try:
self.domain = self.region.ex_create_network_domain(
location=self.facility.location,
name=domainName,
service_plan=service,
description=description)
plogging.info("- in progress")
# prevent locks in xops
self.region.ex_wait_for_state(
'NORMAL', self.region.ex_get_network_domain,
poll_interval=5, timeout=1200,
network_domain_id=self.domain.id)
self.facility._cache_network_domains.append(self.domain)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'OPERATION_NOT_SUPPORTED' in str(feedback):
plogging.info("- operation not supported")
return False
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return False
else:
plogging.info("- unable to create network domain")
plogging.error(str(feedback))
return False
break
self.network = self.get_ethernet(networkName)
if self.network is not None:
plogging.info("Creating Ethernet network '{}'"
.format(networkName))
plogging.info("- already there")
elif self.plumbery.safeMode:
plogging.info("Creating Ethernet network '{}'"
.format(networkName))
plogging.info("- skipped - safe mode")
return False
else:
plogging.info("Creating Ethernet network '{}'"
.format(networkName))
# the description attribute is a smart way to tag resources
description = '#plumbery'
if 'description' in blueprint['ethernet']:
description = blueprint['ethernet']['description']+' #plumbery'
while True:
try:
self.network = self.region.ex_create_vlan(
network_domain=self.domain,
name=networkName,
private_ipv4_base_address=blueprint['ethernet']['subnet'],
description=description)
plogging.info("- in progress")
# prevent locks in xops
self.region.ex_wait_for_state(
'NORMAL',
self.region.ex_get_vlan,
poll_interval=5, timeout=1200,
vlan_id=self.network.id)
self.facility._cache_vlans.append(self.network)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- not possible "
"- network already exists elsewhere")
elif 'IP_ADDRESS_NOT_UNIQUE' in str(feedback):
plogging.info("- not possible "
"- subnet is used elsewhere")
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return False
else:
plogging.info("- unable to create Ethernet network")
plogging.error(str(feedback))
return False
break
if 'reserved' in blueprint['ethernet']:
for reserved in blueprint['ethernet']['reserved']:
plogging.info("Reserving address '{}'"
.format(reserved))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
continue
while True:
try:
self.ex_reserve_private_ip_addresses(
vlan=self.network,
address=reserved)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
else:
plogging.info("- unable to create Ethernet network")
plogging.error(str(feedback))
return False
break
if 'multicloud' in blueprint \
and isinstance(blueprint['multicloud'], dict):
plogging.info("Starting multicloud deployment")
self.terraform.build(blueprint['multicloud'])
return True
def destroy_blueprint(self, blueprint):
"""
Destroys network and security elements of a blueprint
:param blueprint: the various attributes of the target fittings
:type blueprint: ``dict``
This function looks after following service elements:
* it releases public IPv4 addresses
* it destroys firewall rules
* it destroys the Ethernet network
* it destroys the network domain
The destruction is tentative, meaning that if the Ethernet network or
the network domain have some dependency then they cannot be destroyed.
This is happenign quite often since multiple blueprints can share the
same Ethernet network or the same network domain.
"""
self.blueprint = blueprint
if ('domain' not in blueprint
or type(blueprint['domain']) is not dict):
raise PlumberyException(
"Error: no network domain has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
if ('ethernet' not in blueprint
or type(blueprint['ethernet']) is not dict):
raise PlumberyException(
"Error: no ethernet network has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
domainName = blueprint['domain']['name']
networkName = blueprint['ethernet']['name']
domain = self.get_network_domain(domainName)
if domain is None:
plogging.info("Destroying Ethernet network '{}'"
.format(networkName))
plogging.info("- not found")
plogging.info("Destroying network domain '{}'".format(domainName))
plogging.info("- not found")
return
self._destroy_firewall_rules()
self._destroy_balancer()
self._release_ipv4()
plogging.info("Destroying Ethernet network '{}'".format(networkName))
network = self.get_ethernet(networkName)
if network is None:
plogging.info("- not found")
elif ('destroy' in blueprint['ethernet']
and blueprint['ethernet']['destroy'] == 'never'):
plogging.info("- this network can never be destroyed")
elif self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
retry = True
while True:
try:
self.region.ex_delete_vlan(vlan=network)
plogging.info("- in progress")
while True:
try:
time.sleep(10)
self.region.ex_get_vlan(vlan_id=network.id)
except Exception as feedback:
if 'RESOURCE_NOT_FOUND' in str(feedback):
break
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not found")
elif 'HAS_DEPENDENCY' in str(feedback):
# give time to ensure nodes have been deleted
if retry:
retry = False
time.sleep(30)
continue
plogging.info("- not now - stuff on it")
return
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
plogging.info(feedback)
return
else:
plogging.info("- unable to destroy Ethernet network")
plogging.error(str(feedback))
return
break
plogging.info("Destroying network domain '{}'".format(domainName))
if 'multicloud' in blueprint \
and isinstance(blueprint['multicloud'], dict):
plogging.info("Destroying multicloud deployment")
self.terraform.destroy(blueprint['multicloud'], safe=self.plumbery.safeMode)
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return
while True:
try:
self.region.ex_delete_network_domain(network_domain=domain)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not found")
elif 'HAS_DEPENDENCY' in str(feedback):
plogging.info("- not now - stuff on it")
return
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return
else:
plogging.info("- unable to destroy Ethernet network")
plogging.error(str(feedback))
return
break
def _build_balancer(self):
"""
Adds load balancing for nodes in the blueprint
Example in the fittings plan::
- web:
domain: *vdc1
ethernet: *data
nodes:
- apache-[10..19]
balancer:
- http:
port: 80
protocol: http
- https:
port: 443
protocol: http
pool:
algorithm: round_robin
In this example, load balancing is configured to accept web traffic
and to distribute the workload across multiple web engines.
One balancer is configured for regular http protocol on port 80. The
other balancer is for secured web protocol, aka, https, on port 443.
The algorithm used by default is ``round_robin``. This parameter
can take any value among followings:
* ``random``
* ``round_robin``
* ``least_connections``
* ``weighted_round_robin``
* ``weighted_least_connections``
* ``shortest_response``
* ``persistent_ip``
"""
if 'balancers' not in self.blueprint:
return True
domain = self.get_network_domain(self.blueprint['domain']['name'])
network = self.get_ethernet(self.blueprint['ethernet']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
driver.ex_set_current_network_domain(domain.id)
pool = self._get_pool()
if pool is None:
if 'pool' in self.blueprint:
settings = self.blueprint['pool']
if not isinstance(settings, dict):
settings = {}
else:
settings = {}
name = self._name_pool()
if 'algorithm' in settings:
algorithm = settings['algorithm'].lower()
else:
algorithm = 'round_robin'
algorithms = [
'random',
'round_robin',
'least_connections',
'weighted_round_robin',
'weighted_least_connections',
'shortest_response',
'persistent_ip']
if algorithm not in algorithms:
raise PlumberyException(
"Error: unknown algorithm has been defined "
"for the pool '{}'!".format(name))
if 'description' not in settings:
settings['description'] = 'by plumbery'
plogging.info("Creating pool '{}'".format(name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
pool = driver.ex_create_pool(
network_domain_id=domain.id,
name=name,
balancer_method=algorithm,
ex_description=settings['description'],
health_monitors=None,
service_down_action='NONE',
slow_ramp_time=30)
if self._cache_pools is None:
self._cache_pools = []
self._cache_pools.append(pool)
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create pool")
plogging.error(str(feedback))
for item in self.blueprint['balancers']:
if isinstance(item, dict):
label = list(item)[0]
settings = item[label]
else:
label = str(item)
settings = {}
name = self.name_balancer(label, settings)
if self._get_balancer(name):
plogging.info("Creating balancer '{}'".format(name))
plogging.info("- already there")
continue
if 'protocol' in settings:
protocol = settings['protocol']
else:
protocol = 'http'
protocols = ['http', 'https', 'tcp', 'udp']
if protocol not in protocols:
raise PlumberyException(
"Error: unknown protocol has been defined "
"for the balancer '{}'!".format(label))
if 'port' in settings:
port = str(settings['port'])
else:
port = '80'
if int(port) < 1 or int(port) > 65535:
raise PlumberyException(
"Error: invalid port has been defined "
"for the balancer '{}'!".format(label))
plogging.info("Creating balancer '{}'".format(name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
continue
try:
if 'address' in settings:
ip = settings['address']
else:
ip = self._get_ipv4()
balancer = driver.ex_create_virtual_listener(
network_domain_id=domain.id,
name=name,
ex_description="#plumbery",
listener_ip_address=ip,
port=port,
pool=pool,
persistence_profile=None,
fallback_persistence_profile=None,
irule=None,
protocol='TCP',
connection_limit=25000,
connection_rate_limit=2000,
source_port_preservation='PRESERVE')
if self._cache_balancers is None:
self._cache_balancers = []
self._cache_balancers.append(balancer)
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
elif 'NO_IP_ADDRESS_AVAILABLE' in str(feedback):
plogging.info("- unable to create balancer")
plogging.error("Error: No more ipv4 address available "
"-- assign more")
raise
else:
plogging.info("- unable to create balancer")
plogging.error(str(feedback))
firewall = self.name_firewall_rule('Internet', name, port)
sourceIPv4 = DimensionDataFirewallAddress(
any_ip=True,
ip_address=network.private_ipv4_range_address,
ip_prefix_size=network.private_ipv4_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
destinationIPv4 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=ip,
ip_prefix_size=None,
port_begin=port,
port_end=None,
address_list_id=None,
port_list_id=None)
rule = DimensionDataFirewallRule(
id=uuid4(),
action='ACCEPT_DECISIVELY',
name=firewall,
location=network.location,
network_domain=network.network_domain,
status='NORMAL',
ip_version='IPV4',
protocol='TCP',
enabled='true',
source=sourceIPv4,
destination=destinationIPv4)
plogging.info("Creating firewall rule '{}'"
.format(firewall))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
self._ex_create_firewall_rule(
network_domain=domain,
rule=rule,
position='LAST')
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create firewall rule")
plogging.error(str(feedback))
return True
def _destroy_balancer(self):
"""
Destroys load balancer
"""
if 'balancers' not in self.blueprint:
return True
domain = self.get_network_domain(self.blueprint['domain']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
driver.ex_set_current_network_domain(domain.id)
for item in self.blueprint['balancers']:
if isinstance(item, dict):
label = list(item)[0]
settings = item[label]
else:
label = str(item)
settings = {}
name = self.name_balancer(label, settings)
balancer = self._get_balancer(name)
plogging.info("Destroying balancer '{}'".format(name))
if balancer is None:
plogging.info("- not found")
continue
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
continue
try:
driver.destroy_balancer(balancer)
plogging.info("- in progress")
except Exception as feedback:
if 'NOT_FOUND' in str(feedback):
plogging.info("- not found")
else:
plogging.info("- unable to destroy balancer")
plogging.error(str(feedback))
pool = self._get_pool()
plogging.info("Destroying pool '{}'".format(self._name_pool()))
if pool is None:
plogging.info("- not found")
elif self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
driver.ex_destroy_pool(pool)
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to destroy pool")
plogging.error(str(feedback))
plogging.info("Destroying pool nodes")
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
nodes = driver.ex_get_nodes(domain.id)
if len(nodes) > 0:
for node in nodes:
plogging.info("- destroying {}".format(node.name))
nodes = driver.ex_destroy_node(node.id)
plogging.info("- in progress")
else:
plogging.info("- nothing to do")
except Exception as feedback:
if 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not found")
else:
plogging.info("- unable to destroy node")
plogging.error(str(feedback))
def name_balancer(self, label, settings={}):
return label \
+ '.' + self.blueprint['target'] \
+ '.' + self.facility.get_location_id().lower() \
+ '.balancer'
def _get_balancer(self, name):
"""
Retrieves a balancer attached to this blueprint
"""
domain = self.get_network_domain(self.blueprint['domain']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
if driver is None:
return None
if domain is None:
return None
driver.ex_set_current_network_domain(domain.id)
if self._cache_balancers is None:
plogging.info("Listing balancers")
self._cache_balancers = driver.list_balancers()
plogging.info("- found {} balancers"
.format(len(self._cache_balancers)))
for balancer in self._cache_balancers:
if balancer.name.lower() == name.lower():
return balancer
return None
def _name_pool(self):
return self.blueprint['target'] \
+ '.' + self.facility.get_location_id().lower() \
+ '.pool'
def _get_pool(self):
"""
Retrieves the pool attached to this blueprint
"""
if 'pool' not in self.blueprint:
return None
domain = self.get_network_domain(self.blueprint['domain']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
driver.ex_set_current_network_domain(domain.id)
name = self._name_pool()
if self._cache_pools is None:
plogging.info("Listing pools")
self._cache_pools = driver.ex_get_pools()
plogging.info("- found {} pools".format(len(self._cache_pools)))
for pool in self._cache_pools:
if pool.name.lower() == name.lower():
return pool
return None
def name_member(self, node):
return node.private_ips[0]
def _add_to_pool(self, node):
"""
Makes a node a new member of the pool
"""
if 'pool' not in self.blueprint:
return
pool = self._get_pool()
if pool is None:
return
domain = self.get_network_domain(self.blueprint['domain']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
driver.ex_set_current_network_domain(domain.id)
plogging.info("Adding '{}' to pool '{}'".format(node.name, pool.name))
name = self.name_member(node)
members = driver.ex_get_pool_members(pool.id)
for member in members:
if member.name == name:
plogging.info("- already there")
return
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return
try:
member = driver.ex_create_node(
network_domain_id=domain.id,
name=name,
ip=node.private_ips[0],
ex_description='#plumbery')
driver.ex_create_pool_member(
pool=pool,
node=member)
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
plogging.error(str(feedback))
else:
plogging.info("- unable to add to pool")
plogging.error(str(feedback))
raise
def _detach_node_from_internet(self, node):
"""
Destroys address translation for one node
:param node: node that was reachable from the internet
:type node: :class:`libcloud.common.Node`
"""
internal_ip = node.private_ips[0]
domain = self.get_network_domain(self.blueprint['domain']['name'])
for rule in self.region.ex_list_nat_rules(domain):
if rule.internal_ip == internal_ip:
plogging.info("Detaching node '{}' from the internet"
.format(node.name))
while True:
try:
self.region.ex_delete_nat_rule(rule)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return
else:
plogging.info("- unable to remove "
"address translation")
plogging.error(str(feedback))
break
for rule in self._list_firewall_rules():
if rule.name.lower().startswith(node.name.lower()):
plogging.info("Destroying firewall rule '{}'"
.format(rule.name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
self.region.ex_delete_firewall_rule(rule)
plogging.info("- in progress")
def _get_ipv4(self):
"""
Provides a free public IPv4 if possible
This function looks at current IPv4 addresses reserved for the
target network domain, and adds more if needed.
Example to reserve 8 IPv4 addresses in the fittings plan::
- redis:
domain:
name: myVDC
ipv4: 8
If the directive `auto` is used, then plumbery does not check the
maximum number of addresses that can be provided.
"""
domain = self.get_network_domain(self.blueprint['domain']['name'])
if domain is None:
return None
addresses = self._list_ipv4()
if len(addresses) > 0:
plogging.debug('Pool of public IPv4 addresses:')
plogging.debug('- {} adresses have been reserved'.format(
len(addresses)))
for reserved in self.ex_list_reserved_public_ip_addresses(domain):
addresses.remove(reserved)
plogging.debug('- {} available'.format(len(addresses)))
if len(addresses) > 0:
plogging.debug('Using address: {}'.format(addresses[0]))
return addresses[0]
actual = len(self._list_ipv4())
if 'ipv4' in self.blueprint['domain']:
count = self.blueprint['domain']['ipv4']
else:
count = self.get_default('ipv4', 2)
if str(count).lower() == 'auto':
count = actual + 2
if count < 2 or count > 128:
plogging.warning("Invalid count of requested IPv4 public addresses")
return None
if actual >= count:
plogging.error("Error: need more IPv4 address than allocated")
return None
plogging.info('Reserving additional public IPv4 addresses')
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return None
count = actual + 2
while actual < count:
try:
block = self.region.ex_add_public_ip_block_to_network_domain(
self.get_network_domain(self.blueprint['domain']['name']))
actual += int(block.size)
plogging.info("- reserved {} addresses"
.format(int(block.size)))
return block.base_ip
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return None
# compensate for bug in Libcloud driver
elif 'RESOURCE_NOT_FOUND' in str(feedback):
actual += 2
continue
else:
plogging.info("- unable to reserve IPv4 public addresses")
plogging.error(str(feedback))
return None
def _list_ipv4(self):
"""
Lists public IPv4 addresses that have been assigned to a domain
:return: the full list of public IPv4 addresses assigned to the domain
:rtype: ``list`` of ``str`` or ``[]``
"""
addresses = []
while True:
try:
blocks = self.region.ex_list_public_ip_blocks(
self.get_network_domain(self.blueprint['domain']['name']))
for block in blocks:
splitted = block.base_ip.split('.')
for ticker in xrange(int(block.size)):
addresses.append('.'.join(splitted))
splitted[3] = str(int(splitted[3])+1)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
else:
plogging.info("Unable to list IPv4 public addresses")
plogging.error(str(feedback))
return []
break
return addresses
def _release_ipv4(self):
"""
Releases public IPv4 addresses assigned to the blueprint
"""
domain = self.get_network_domain(self.blueprint['domain']['name'])
if len(self.region.ex_list_nat_rules(domain)) > 0:
return
blocks = self.region.ex_list_public_ip_blocks(
self.get_network_domain(self.blueprint['domain']['name']))
if len(blocks) < 1:
return
plogging.info('Releasing public IPv4 addresses')
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return
for block in blocks:
while True:
try:
self.region.ex_delete_public_ip_block(block)
plogging.info('- in progress')
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'HAS_DEPENDENCY' in str(feedback):
plogging.info("- not now - stuff at '{}' and beyond"
.format(block.base_ip))
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
else:
plogging.info("- unable to release "
"IPv4 public addresses ")
plogging.error(str(feedback))
break
def _build_firewall_rules(self):
"""
Changes firewall settings to accept incoming traffic
This function adds firewall rules to allow traffic towards given
network. It looks at the ``accept`` settings in the blueprint to
identify all source networks.
Example in the fittings plan::
- web:
domain: *vdc1
ethernet:
name: gigafox.production
accept:
- gigafox.control
- dd-eu::EU6::other.network.there
In this example, the firewall is configured so that any ip traffic
from the Ethernet network ``gigafox.control`` can reach the Ethernet
network ``gigafox.production``. One rule is created for
IPv4 and another rule is created for IPv6.
The second network that is configured is from another data centre
in another region. This is leveraging the private network that
interconnect all MCPs. For networks outside the current domain, only
one rule is added to allow IPv6 traffic. This is because IPv4 routing
is not allowed across multiple network domains.
"""
if 'accept' not in self.blueprint['ethernet']:
return True
destination = self.get_ethernet(self.blueprint['ethernet']['name'])
if destination is None:
return True
destinationIPv4 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=destination.private_ipv4_range_address,
ip_prefix_size=destination.private_ipv4_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
destinationIPv6 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=destination.ipv6_range_address,
ip_prefix_size=destination.ipv6_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
for item in self.blueprint['ethernet']['accept']:
if isinstance(item, dict):
label = list(item)[0]
else:
label = str(item)
source = self.get_ethernet(label)
if source is None:
plogging.debug("Source network '{}' is unknown".format(label))
continue
# avoid name collisions across local, remote and off-shore networks
tokens = label.split('::')
while len(tokens) > 2:
tokens.pop(0)
source_name = '-'.join(tokens)
ruleIPv4Name = self.name_firewall_rule(
source_name, destination.name, 'IP')
shouldCreateRuleIPv4 = True
if source.location.name != destination.location.name:
shouldCreateRuleIPv4 = False
elif source.network_domain.name != destination.network_domain.name:
shouldCreateRuleIPv4 = False
ruleIPv6Name = self.name_firewall_rule(
source_name, destination.name, 'IPv6')
shouldCreateRuleIPv6 = True
for rule in self._list_firewall_rules():
if (shouldCreateRuleIPv4
and rule.name.lower() == ruleIPv4Name.lower()):
plogging.info("Creating firewall rule '{}'"
.format(rule.name))
plogging.info("- already there")
shouldCreateRuleIPv4 = False
continue
if (shouldCreateRuleIPv6
and rule.name.lower() == ruleIPv6Name.lower()):
plogging.info("Creating firewall rule '{}'"
.format(rule.name))
plogging.info("- already there")
shouldCreateRuleIPv6 = False
continue
if shouldCreateRuleIPv4:
plogging.info("Creating firewall rule '{}'"
.format(ruleIPv4Name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
sourceIPv4 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=source.private_ipv4_range_address,
ip_prefix_size=source.private_ipv4_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
ruleIPv4 = DimensionDataFirewallRule(
id=uuid4(),
action='ACCEPT_DECISIVELY',
name=ruleIPv4Name,
location=destination.location,
network_domain=destination.network_domain,
status='NORMAL',
ip_version='IPV4',
protocol='IP',
enabled='true',
source=sourceIPv4,
destination=destinationIPv4)
try:
self._ex_create_firewall_rule(
network_domain=destination.network_domain,
rule=ruleIPv4,
position='LAST')
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create firewall rule")
plogging.error(str(feedback))
if shouldCreateRuleIPv6:
plogging.info("Creating firewall rule '{}'"
.format(ruleIPv6Name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
sourceIPv6 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=source.ipv6_range_address,
ip_prefix_size=source.ipv6_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
ruleIPv6 = DimensionDataFirewallRule(
id=uuid4(),
action='ACCEPT_DECISIVELY',
name=ruleIPv6Name,
location=destination.location,
network_domain=destination.network_domain,
status='NORMAL',
ip_version='IPV6',
protocol='IP',
enabled='true',
source=sourceIPv6,
destination=destinationIPv6)
try:
self._ex_create_firewall_rule(
network_domain=destination.network_domain,
rule=ruleIPv6,
position='LAST')
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create firewall rule")
plogging.error(str(feedback))
ruleName = 'CCDEFAULT.DenyExternalInboundIPv6'
for rule in self._list_firewall_rules():
if rule.name.lower() == ruleName.lower():
plogging.info("Disabling firewall rule '{}'".format(ruleName))
try:
if rule.enabled:
self.region.ex_set_firewall_rule_state(rule, False)
plogging.info("- in progress")
else:
plogging.info("- already there")
except Exception as feedback:
plogging.info("- unable to disable firewall rule")
plogging.error(str(feedback))
return True
def _destroy_firewall_rules(self):
"""
Destroys firewall rules
"""
if 'accept' not in self.blueprint['ethernet']:
return True
destinationLabel = self.blueprint['ethernet']['name']
for item in self.blueprint['ethernet']['accept']:
if isinstance(item, dict):
label = list(item)[0]
else:
label = str(item)
sourceLabel = label.split('::').pop()
ruleIPv4Name = self.name_firewall_rule(
sourceLabel, destinationLabel, 'IP')
ruleIPv6Name = self.name_firewall_rule(
sourceLabel, destinationLabel, 'IPv6')
for rule in self._list_firewall_rules():
if rule.name == ruleIPv4Name or rule.name == ruleIPv6Name:
plogging.info("Destroying firewall rule '{}'"
.format(rule.name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
self.region.ex_delete_firewall_rule(rule)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not found")
else:
plogging.info("- unable to destroy "
"firewall rule")
plogging.error(str(feedback))
def name_firewall_rule(self, source, destination, protocol):
"""
Provides a name for a firewall rule
:param source: name of the source network
:type source: ``str``
:param destination: name of the destination network
:type destination: ``str``
:param protocol: the protocol that will flow
:type protocol: ``str``
Use this function to ensure consistent naming across firewall rules.
Example::
>>>source='gigafox.control'
>>>destination='gigafox.production'
>>>protocol='IP'
>>>domain.name_firewall_rule(source, destination, protocol)
'FromGigafoxControlToGigafoxProduction.IP.plumbery'
"""
source = ''.join(e for e in source.title()
if e.isalnum() or e == '_')
destination = ''.join(e for e in destination.title()
if e.isalnum() or e == '_')
if source == 'Internet':
return "{}.{}.plumbery".format(destination, protocol)
else:
return "From{}To{}.{}.plumbery".format(source,
destination,
protocol)
@classmethod
def parse_firewall_port(cls, port):
"""
Parses port definition for a firewall rule
:param port: string definition of a target port
:type port: ``str``
:return: elements of the port definition
This function analyses the provided string and returns
a tuple that can be used for firewall configuration.
Some examples:
>>>container.parse_firewall_port('icmp')
('ICMP', 'any', None, None)
>>>container.parse_firewall_port('tcp:80')
('TCP', '80', '80', None)
>>>container.parse_firewall_port(':80')
('TCP', '80', '80', None)
>>>container.parse_firewall_port('80')
('TCP', '80', '80', None)
>>>container.parse_firewall_port('udp:137..138')
('UDP', '137..138', '137', '138')
>>>container.parse_firewall_port('any')
('TCP', 'any', None, None)
"""
protocols = ('ip', 'icmp', 'tcp', 'udp')
tokens = port.lower().strip(':').split(':')
if len(tokens) > 1: # example: 'TCP:80'
protocol = tokens[0].upper()
port = tokens[1]
elif tokens[0] in protocols: # example: 'icmp'
protocol = tokens[0].upper()
port = 'any'
else: # example: '80'
protocol = 'TCP'
port = tokens[0]
if protocol.lower() not in protocols:
raise ValueError("'{}' is not a valid protocol"
.format(protocol))
tokens = port.split('..')
if len(tokens) == 1:
if tokens[0].lower() == 'any':
port_begin = None
else:
port_begin = tokens[0]
port_end = None
else:
port_begin = tokens[0]
port_end = tokens[1]
return (protocol, port, port_begin, port_end)
def _list_candidate_firewall_rules(self, node, ports=[]):
"""
Lists rules that should apply to one node
:param node: node that has to be reachable from the internet
:type node: :class:`libcloud.common.Node`
:param ports: the ports that have to be opened, or ``any``
:type ports: a ``list`` of ``str``
"""
domain = self.get_network_domain(self.blueprint['domain']['name'])
network = self.get_ethernet(self.blueprint['ethernet']['name'])
internal_ip = node.private_ips[0]
external_ip = None
for rule in self.region.ex_list_nat_rules(domain):
if rule.internal_ip == internal_ip:
external_ip = rule.external_ip
if external_ip is None:
return {}
candidates = {}
if len(ports) < 1:
ports = ['any']
for port in ports:
protocol, port, port_begin, port_end = \
self.parse_firewall_port(port)
ruleIPv4Name = self.name_firewall_rule(
'Internet',
node.name, protocol+'v4_'+port)
sourceIPv4 = DimensionDataFirewallAddress(
any_ip=True,
ip_address=network.private_ipv4_range_address,
ip_prefix_size=network.private_ipv4_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
destinationIPv4 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=external_ip,
ip_prefix_size=None,
port_begin=port_begin,
port_end=port_end,
address_list_id=None,
port_list_id=None)
ruleIPv4 = DimensionDataFirewallRule(
id=uuid4(),
action='ACCEPT_DECISIVELY',
name=ruleIPv4Name,
location=network.location,
network_domain=network.network_domain,
status='NORMAL',
ip_version='IPV4',
protocol=protocol,
enabled='true',
source=sourceIPv4,
destination=destinationIPv4)
candidates[ruleIPv4Name] = ruleIPv4
return candidates
def _list_firewall_rules(self):
"""
Lists all existing rules for the current domain
"""
if len(self._cache_firewall_rules) < 1:
self._cache_firewall_rules = self.region.ex_list_firewall_rules(
self.get_network_domain(self.blueprint['domain']['name']))
return self._cache_firewall_rules
def _ex_create_firewall_rule(self, network_domain, rule, position):
create_node = ET.Element('createFirewallRule', {'xmlns': TYPES_URN})
ET.SubElement(create_node, "networkDomainId").text = network_domain.id
ET.SubElement(create_node, "name").text = rule.name
ET.SubElement(create_node, "action").text = rule.action
ET.SubElement(create_node, "ipVersion").text = rule.ip_version
ET.SubElement(create_node, "protocol").text = rule.protocol
# Setup source port rule
source = ET.SubElement(create_node, "source")
source_ip = ET.SubElement(source, 'ip')
if rule.source.any_ip:
source_ip.set('address', 'ANY')
else:
source_ip.set('address', rule.source.ip_address)
source_ip.set('prefixSize', str(rule.source.ip_prefix_size))
if rule.source.port_begin is not None:
source_port = ET.SubElement(source, 'port')
source_port.set('begin', rule.source.port_begin)
if rule.source.port_end is not None:
source_port.set('end', rule.source.port_end)
# Setup destination port rule
dest = ET.SubElement(create_node, "destination")
dest_ip = ET.SubElement(dest, 'ip')
if rule.destination.any_ip:
dest_ip.set('address', 'ANY')
else:
dest_ip.set('address', rule.destination.ip_address)
if rule.destination.ip_prefix_size is not None:
dest_ip.set('prefixSize', str(rule.destination.ip_prefix_size))
if rule.destination.port_begin is not None:
dest_port = ET.SubElement(dest, 'port')
dest_port.set('begin', rule.destination.port_begin)
if rule.destination.port_end is not None:
dest_port.set('end', rule.destination.port_end)
ET.SubElement(create_node, "enabled").text = 'true'
placement = ET.SubElement(create_node, "placement")
placement.set('position', position)
response = self.region.connection.request_with_orgId_api_2(
'network/createFirewallRule',
method='POST',
data=ET.tostring(create_node)).object
rule_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'firewallRuleId':
rule_id = info.get('value')
rule.id = rule_id
return rule
def ex_reserve_private_ip_addresses(self, vlan, address):
req = ET.Element('reservePrivateIpv4Address', {'xmlns': TYPES_URN})
ET.SubElement(req, "vlanId").text = vlan.id
ET.SubElement(req, "ipAddress").text = address
result = self.region.connection.request_with_orgId_api_2(
action='network/reservedPrivateIpv4Address',
method='POST',
data=ET.tostring(req)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_list_reserved_private_ip_addresses(self, vlan):
params = {}
params['vlanId'] = vlan.id
response = self.region.connection \
.request_with_orgId_api_2('network/reservedPrivateIpv4Address',
params=params).object
reserved = []
for element in findall(response, 'ipv4', TYPES_URN):
reserved.append(element.text)
return reserved
def ex_list_reserved_public_ip_addresses(self, network_domain):
params = {}
params['networkDomainId'] = network_domain.id
response = self.region.connection \
.request_with_orgId_api_2('network/reservedPublicIpv4Address',
params=params).object
reserved = []
for element in findall(response, 'ip', TYPES_URN):
reserved.append(element.text)
return reserved
|
apache-2.0
| -8,525,752,551,770,169,000
| 33.397296
| 88
| 0.515647
| false
| 4.863676
| false
| false
| false
|
ioos/catalog-ckan
|
ckanext/ioos_theme/controllers/feedback.py
|
1
|
3831
|
#!/usr/bin/env python
'''
ckanext/ioos_theme/controllers/feedback.py
IOOS Theme Feedback Controller
'''
from ckan.lib.base import BaseController, render, _
from ckan.lib import helpers as h
from ckan.common import request
from ckanext.ioos_theme.lib import feedback
from pylons import config
import logging
import urllib
import urllib2
import json
class FeedbackController(BaseController):
'''
The FeedbackController renders a Feedback Form and accepts an HTTP POST to
/feedback with the Form parameters. On a POST it will flash a notice
thanking the user for their feedback and then redirect to the home page.
'''
def index(self, data=None, errors=None, error_summary=None, package_name=None):
'''
Returns a render for the feedback form.
:param dict data: Unused
:param dict errors: Any validation errors that the user has entered
will be passed to the controller
:param dict error_summary: Summary of any validation errors
'''
name = ""
email = ""
feedback = ""
recaptcha_response = request.params.get('g-captcha-token')
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': config.get('feedback.site_secret', ''),
'response': recaptcha_response
}
url_data = urllib.urlencode(values)
req = urllib2.Request(url, url_data)
response = urllib2.urlopen(req)
result = json.load(response)
# If the HTTP request is POST
if request.params:
try:
# Left for reference during refactor to captcha V3
#if request.params['g-recaptcha-response']:
if result['success']:
return self._post_feedback()
else:
name = request.params['name']
email = request.params['email']
feedback = request.params['feedback']
h.flash_notice(_('Please fill out missing fields below.'))
except KeyError:
name = request.params['name']
email = request.params['email']
feedback = request.params['feedback']
h.flash_notice(_('Please fill out missing fields below.'))
data = data or {"name": "", "email": "", "feedback": ""}
data['name'] = name or ""
data['email'] = email or ""
data['feedback'] = feedback or ""
errors = errors or {}
error_summary = error_summary or {}
site_key = config.get('feedback.site_key', '')
token = config.get('feedback.g-captcha-token', '')
if not site_key:
logging.warning('Administrator must setup feedback.site_key')
vars = {
'package_name': package_name,
'data': data,
'errors': errors,
'error_summary': error_summary,
'feedback_site_key': site_key
}
return render('feedback/form.html', extra_vars=vars)
def _post_feedback(self):
'''
Redirects the user to the home page and flashes a message,
acknowledging the feedback.
'''
context = {
'name': request.params['name'],
'email': request.params['email'],
'feedback': request.params['feedback'],
'package_name': request.params.get('package_name'),
'referrer': request.referrer
}
feedback.send_feedback(context)
h.flash_notice(_('Thank you for your feedback'))
if context['package_name'] is None:
h.redirect_to(controller='home', action='index')
else:
h.redirect_to(controller='package', action='read', id=context['package_name'])
return
|
agpl-3.0
| 8,250,284,174,334,944,000
| 35.141509
| 90
| 0.577656
| false
| 4.408516
| false
| false
| false
|
ned14/Bugs-Everywhere-for-BEurtle
|
libbe/command/severity.py
|
1
|
4051
|
# Copyright (C) 2005-2012 Aaron Bentley <abentley@panoramicfeedback.com>
# Chris Ball <cjb@laptop.org>
# Gianluca Montecchi <gian@grys.it>
# Marien Zwart <marien.zwart@gmail.com>
# Thomas Gerigk <tgerigk@gmx.de>
# Tim Guirgies <lt.infiltrator@gmail.com>
# W. Trevor King <wking@drexel.edu>
#
# This file is part of Bugs Everywhere.
#
# Bugs Everywhere is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# Bugs Everywhere is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Bugs Everywhere. If not, see <http://www.gnu.org/licenses/>.
import libbe
import libbe.bug
import libbe.command
import libbe.command.util
class Severity (libbe.command.Command):
"""Change a bug's severity level
>>> import sys
>>> import libbe.bugdir
>>> bd = libbe.bugdir.SimpleBugDir(memory=False)
>>> io = libbe.command.StringInputOutput()
>>> io.stdout = sys.stdout
>>> ui = libbe.command.UserInterface(io=io)
>>> ui.storage_callbacks.set_bugdir(bd)
>>> cmd = Severity(ui=ui)
>>> bd.bug_from_uuid('a').severity
'minor'
>>> ret = ui.run(cmd, args=['wishlist', '/a'])
>>> bd.flush_reload()
>>> bd.bug_from_uuid('a').severity
'wishlist'
>>> ret = ui.run(cmd, args=['none', '/a'])
Traceback (most recent call last):
UserError: Invalid severity level: none
>>> ui.cleanup()
>>> bd.cleanup()
"""
name = 'severity'
def __init__(self, *args, **kwargs):
libbe.command.Command.__init__(self, *args, **kwargs)
self.args.extend([
libbe.command.Argument(
name='severity', metavar='SEVERITY', default=None,
completion_callback=libbe.command.util.complete_severity),
libbe.command.Argument(
name='bug-id', metavar='BUG-ID', default=None,
repeatable=True,
completion_callback=libbe.command.util.complete_bug_id),
])
def _run(self, **params):
bugdir = self._get_bugdir()
for bug_id in params['bug-id']:
bug,dummy_comment = \
libbe.command.util.bug_comment_from_user_id(bugdir, bug_id)
if bug.severity != params['severity']:
try:
bug.severity = params['severity']
except ValueError, e:
if e.name != 'severity':
raise e
raise libbe.command.UserError(
'Invalid severity level: %s' % e.value)
return 0
def _long_help(self):
try: # See if there are any per-tree severity configurations
bd = self._get_bugdir()
except NotImplementedError:
pass # No tree, just show the defaults
longest_severity_len = max([len(s) for s in libbe.bug.severity_values])
severity_levels = []
for severity in libbe.bug.severity_values :
description = libbe.bug.severity_description[severity]
s = '%*s : %s' % (longest_severity_len, severity, description)
severity_levels.append(s)
ret = """
Show or change a bug's severity level.
If no severity is specified, the current value is printed. If a severity level
is specified, it will be assigned to the bug.
Severity levels are:
%s
You can overide the list of allowed severities on a per-repository
basis. See `be set --help` for details.
""" % ('\n '.join(severity_levels))
return ret
|
gpl-2.0
| 8,860,493,650,325,930,000
| 37.216981
| 79
| 0.596396
| false
| 3.785981
| false
| false
| false
|
rkomartin/user-recs-example
|
util/process_movielens.py
|
1
|
1215
|
import json
import sys
from os.path import join
'''
Read movielens data into Veritable-ready json
'''
def main(input_file, output_dir):
data = {}
columns = set()
with open(input_file) as fd:
for line in fd:
tokens = line.split('\t')
user_id = 'U{}'.format(tokens[0]).decode()
movie_id = 'M{}'.format(tokens[1]).decode()
rating = tokens[2]
if user_id not in data:
data[user_id] = { '_id': user_id }
data[user_id][movie_id] = rating
columns.add(movie_id)
# Add dummy data to ensure that each possible rating is observed at
# least once for each movie
for i in range(5):
user_id = 'FU{}'.format(i)
data[user_id] = dict([(m, str(i+1)) for m in columns])
data[user_id]['_id'] = user_id
rows = data.values()
schema = dict([(c, { 'type': 'categorical' }) for c in columns])
open(join(output_dir, 'movielens_data.json'), 'wb').write(
json.dumps(rows, indent=2))
open(join(output_dir, 'movielens_schema.json'), 'wb').write(
json.dumps(schema, indent=2))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
mit
| -8,084,242,574,354,765,000
| 29.375
| 72
| 0.549794
| false
| 3.375
| false
| false
| false
|
nosyndicate/pytorchrl
|
pytorchrl/distributions/diagonal_gaussian.py
|
1
|
4283
|
import numpy as np
import torch
from pytorchrl.distributions.base import Distribution
from pytorchrl.misc.tensor_utils import constant
class DiagonalGaussian(Distribution):
"""
Instead of a distribution, rather a collection of distribution.
"""
def __init__(self, means, log_stds):
"""
Parameters
----------
means (Variable):
log_stds (Variable):
"""
self.means = means
self.log_stds = log_stds
# dim is the dimension of action space
self.dim = self.means.size()[-1]
@classmethod
def from_dict(cls, means, log_stds):
"""
Parameters
----------
means (Variable):
log_std (Variable):
"""
return cls(means=means, log_stds=log_stds)
def entropy(self):
"""
Entropy of gaussian distribution is given by
1/2 * log(2 * \pi * e * sigma^2)
= log(sqrt(2 * \pi * e) * sigma))
= log(sigma) + log(sqrt(2 * \pi * e))
"""
return np.sum(self.log_stds.data.numpy() + np.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
def log_likelihood(self, a):
"""
Compute log likelihood of a.
Parameters
----------
a (Variable):
Returns
-------
logli (Variable)
"""
# First cast into float tensor
a = a.type(torch.FloatTensor)
# Convert into a sample of standard normal
zs = (a - self.means) / (self.log_stds.exp())
# TODO (ewei), I feel this equation is not correct.
# Mainly the first line
# TODO (ewei), still need to understand what is meaning of having
# -1 for axis in sum method, (same for numpy)
logli = - self.log_stds.sum(-1) - \
constant(0.5) * zs.pow(2).sum(-1) - \
constant(0.5) * constant(float(self.dim)) * constant(float(np.log(2 * np.pi)))
return logli
def kl_div(self, other):
"""
Given the distribution parameters of two diagonal multivariate Gaussians,
compute their KL divergence (vectorized)
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Kullback.E2.80.93Leibler_divergence_for_multivariate_normal_distributions
In general, for two n-dimensional distributions, we have
D_KL(N1||N2) =
1/2 ( tr(Σ_2^{-1}Σ_1) + (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) - n + ln(det(Σ_2) / det(Σ_1)) )
Here, Σ_1 and Σ_2 are diagonal. Hence this equation can be simplified.
In terms of the parameters of this method,
determinant of diagonal matrix is product of diagonal, thus
- ln(det(Σ_2) / det(Σ_1)) = sum(2 * (log_stds_2 - log_stds_1), axis=-1)
inverse of diagonal matrix is the diagonal matrix of elements at diagonal inverted, thus
- (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) = sum((means_1 - means_2)^2 / vars_2, axis=-1)
trace is sum of the diagonal elements
- tr(Σ_2^{-1}Σ_1) = sum(vars_1 / vars_2, axis=-1)
Where
- vars_1 = exp(2 * log_stds_1)
- vars_2 = exp(2 * log_stds_2)
Combined together, we have
D_KL(N1||N2)
= 1/2 ( tr(Σ_2^{-1}Σ_1) + (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) - n + ln(det(Σ_2) / det(Σ_1)) )
= sum(1/2 * ((vars_1 - vars_2) / vars_2 + (means_1 - means_2)^2 / vars_2 + 2 * (log_stds_2 - log_stds_1)), axis=-1)
= sum( ((means_1 - means_2)^2 + vars_1 - vars_2) / (2 * vars_2) + (log_stds_2 - log_stds_1)), axis=-1)
Parameters
----------
other (DiagonalGaussian):
Returns
-------
kl_div (Variable):
"""
# Constant should wrap in Variable to multiply with another Variable
# TODO (ewei) kl seems have problem
variance = (constant(2.0) * self.log_stds).exp()
other_variance = (constant(2.0) * other.log_stds).exp()
numerator = (self.means - other.means).pow(2) + \
variance - other_variance
denominator = constant(2.0) * other_variance + constant(1e-8)
# TODO (ewei), -1 for sum has a big impact, need to figure out why
kl_div = (numerator / denominator + other.log_stds - self.log_stds).sum(-1)
return kl_div
|
mit
| 3,443,545,097,000,983,600
| 32.234375
| 147
| 0.547485
| false
| 3.234981
| false
| false
| false
|
quantumlib/OpenFermion
|
src/openfermion/utils/grid.py
|
1
|
11136
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy
import scipy
import scipy.linalg
# Exceptions.
class OrbitalSpecificationError(Exception):
pass
class Grid:
"""
A multi-dimension grid of points with an assigned length scale.
This grid acts as a helper class for parallelpiped super cells. It
tracks a mapping from indices to grid points and stores the associated
reciprocal lattice with respect to the original real-space lattice.
This enables calculations with non-trivial unit cells.
Attributes:
dimensions (int): Number of spatial dimensions the grid occupys
length (tuple of ints): d-length tuple specifying number of points
along each dimension.
shifts (list of ints): Integer shifts in position to center grid.
scale (ndarray): Vectors defining the super cell being simulated,
vectors are stored as columns in the matrix.
volume (float): Total volume of the supercell parallelpiped.
num_points (int): Total number of points in the grid.
reciprocal_scale (ndarray): Vectors defining the reciprocal lattice.
The vectors are stored as the columns in the matrix.
"""
def __init__(self, dimensions, length, scale):
"""
Args:
dimensions (int): The number of dimensions the grid lives in.
length (int or tuple): The number of points along each grid axis
that will be taken in both reciprocal and real space.
If tuple, it is read for each dimension, otherwise assumed
uniform.
scale (float or ndarray): The total length of each grid dimension.
If a float is passed, the uniform cubic unit cell is assumed.
For an ndarray, dimensions independent vectors of the correct
dimension must be passed. We assume column vectors define
the supercell vectors.
"""
if not isinstance(dimensions, int) or dimensions <= 0:
raise ValueError(
'dimensions must be a positive int but was {} {}'.format(
type(dimensions), repr(dimensions)))
if ((not isinstance(length, int) or length < 0) and
(not isinstance(length, tuple)) and (not isinstance(length, list))):
raise ValueError('length must be a non-negative int or tuple '
'but was {} {}'.format(type(length), repr(length)))
if ((not isinstance(scale, float) or not scale > 0) and
(not isinstance(scale, numpy.ndarray))):
raise ValueError(
'scale must be a positive float or ndarray but was '
'{} {}'.format(type(scale), repr(scale)))
self.dimensions = dimensions
# If single integer, assume uniform
if isinstance(length, int):
self.length = (length,) * dimensions
else:
self.length = length
self.shifts = [self.length[i] // 2 for i in range(dimensions)]
# If single float, construct cubic unit cell
if isinstance(scale, float):
self.scale = numpy.diag([scale] * self.dimensions)
else:
self.scale = scale
# Compute the volume of the super cell
self.volume = numpy.abs(scipy.linalg.det(self.scale))
# Compute total number of points
self.num_points = numpy.prod(self.length)
# Compute the reciprocal lattice basis
self.reciprocal_scale = 2 * numpy.pi * scipy.linalg.inv(self.scale).T
def volume_scale(self):
"""
Returns:
float: The volume of a length-scale hypercube within the grid.
"""
return self.volume
def all_points_indices(self):
"""
Returns:
iterable[tuple[int]]:
The index-coordinate tuple of each point in the grid.
"""
return itertools.product(
*[range(self.length[i]) for i in range(self.dimensions)])
def position_vector(self, position_indices):
"""Given grid point coordinate, return position vector with dimensions.
Args:
position_indices (int|iterable[int]):
List or tuple of integers giving grid point coordinate.
Allowed values are ints in [0, grid_length).
Returns:
position_vector (numpy.ndarray[float])
"""
# Raise exceptions.
if isinstance(position_indices, int):
position_indices = [position_indices]
if not all(0 <= e < self.length[i]
for i, e in enumerate(position_indices)):
raise OrbitalSpecificationError(
'Position indices must be integers in [0, grid_length).')
# Compute position vector
vector = sum([
(float(n - self.shifts[i]) / self.length[i]) * self.scale[:, i]
for i, n in enumerate(position_indices)
])
return vector
def momentum_vector(self, momentum_indices, periodic=True):
"""Given grid point coordinate, return momentum vector with dimensions.
Args:
momentum_indices (list): integers giving momentum
indices. Allowed values are ints in [0, grid_length).
periodic (bool): Wrap the momentum indices according to periodicity
Returns:
momentum_vector: A numpy array giving the momentum vector with
dimensions.
"""
# Raise exceptions.
if isinstance(momentum_indices, int):
momentum_indices = [momentum_indices]
if (not all(0 <= e < self.length[i]
for i, e in enumerate(momentum_indices))):
raise OrbitalSpecificationError(
'Momentum indices must be integers in [0, grid_length).')
# Compute momentum vector.
momentum_ints = self.index_to_momentum_ints(momentum_indices)
vector = self.momentum_ints_to_value(momentum_ints, periodic)
return vector
def index_to_momentum_ints(self, index):
"""
Args:
index (tuple): d-dimensional tuple specifying index in the grid
Returns:
Integer momentum vector
"""
# Set baseline for grid between [-N//2, N//2]
momentum_int = [
index[i] - self.shifts[i] for i in range(self.dimensions)
]
return numpy.array(momentum_int, dtype=int)
def momentum_ints_to_index(self, momentum_ints):
"""
Args:
momentum_ints (tuple): d-dimensional tuple momentum integers
Returns:
d-dimensional tuples of indices
"""
indices = momentum_ints
# Shift to indices
indices = [n + self.shifts[i] for i, n in enumerate(indices)]
# Wrap dimensions
indices = [n % self.length[i] for i, n in enumerate(indices)]
return indices
def momentum_ints_to_value(self, momentum_ints, periodic=True):
"""
Args:
momentum_ints (tuple): d-dimensional tuple momentum integers
periodic (bool): Alias the momentum
Returns:
ndarray containing the momentum vector.
"""
# Alias the higher momentum modes
if periodic:
momentum_ints = self.index_to_momentum_ints(
self.momentum_ints_to_index(momentum_ints))
momentum_vector = sum([
n * self.reciprocal_scale[:, i] for i, n in enumerate(momentum_ints)
])
return momentum_vector
def orbital_id(self, grid_coordinates, spin=None):
"""Return the tensor factor of a orbital
with given coordinates and spin.
Args:
grid_coordinates: List or tuple of ints giving coordinates of grid
element. Acceptable to provide an int(instead of tuple or list)
for 1D case.
spin (bool): 0 means spin down and 1 means spin up.
If None, assume spinless model.
Returns:
tensor_factor (int):
tensor factor associated with provided orbital label.
"""
# Initialize.
if isinstance(grid_coordinates, int):
grid_coordinates = [grid_coordinates]
# Loop through dimensions of coordinate tuple.
tensor_factor = 0
for dimension, grid_coordinate in enumerate(grid_coordinates):
# Make sure coordinate is an integer in the correct bounds.
if (isinstance(grid_coordinate, int) and
grid_coordinate < self.length[dimension]):
tensor_factor += (grid_coordinate *
int(numpy.product(self.length[:dimension])))
else:
# Raise for invalid model.
raise OrbitalSpecificationError(
'Invalid orbital coordinates provided.')
# Account for spin and return.
if spin is None:
return tensor_factor
else:
tensor_factor *= 2
tensor_factor += spin
return tensor_factor
def grid_indices(self, qubit_id, spinless):
"""This function is the inverse of orbital_id.
Args:
qubit_id (int): The tensor factor to map to grid indices.
spinless (bool): Whether to use the spinless model or not.
Returns:
grid_indices (numpy.ndarray[int]):
The location of the qubit on the grid.
"""
if not (numpy.product(self.length) * (2 - spinless) > qubit_id >= 0):
raise OrbitalSpecificationError('Invalid qubit_id provided.')
# Remove spin degree of freedom if it exists.
orbital_id = qubit_id
if not spinless:
orbital_id //= 2
# Get grid indices.
grid_indices = []
for dimension in range(self.dimensions):
remainder = (orbital_id %
int(numpy.product(self.length[:dimension + 1])))
grid_index = (remainder //
int(numpy.product(self.length[:dimension])))
grid_indices += [grid_index]
return grid_indices
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (self.dimensions == other.dimensions and
(self.scale == other.scale).all() and
self.length == other.length)
def __ne__(self, other):
return not self == other
|
apache-2.0
| -896,618,528,948,675,100
| 36.494949
| 80
| 0.592672
| false
| 4.698734
| false
| false
| false
|
abetusk/www.meowcad.com
|
cgi/picModLibSentry.py
|
1
|
1864
|
#!/usr/bin/python
#
import os
import cgi
import cgitb
import sys
import meowaux as mew
import urllib
import Cookie
import json
cgitb.enable();
#print "Content-Type: text/html"
#print
cookie = Cookie.SimpleCookie()
cookie_hash = mew.getCookieHash( os.environ )
g_debug = False
def log_line( l ):
logf = open("/tmp/picmodlibsentry.log", "a")
logf.write( l + "\n")
logf.close()
def error_and_quit():
if g_debug:
log_line("error, quitting")
print "Status: 404 Not Found"
print
print "File not found"
sys.exit(0)
fields = cgi.FieldStorage()
if "data" not in fields:
if g_debug:
log_line("no data")
error_and_quit()
userId = None
sessionId = None
projectId = None
if ("userId" in fields) and ("sessionId" in fields):
if mew.authenticateSession( fields["userId"].value, fields["sessionId"].value ):
userId = fields["userId"].value
sessionId = fields["sessionId"].value
if "projectId" in fields:
projectId = fields["projectId"].value
if ( ("userId" in cookie_hash) and ("sessionId" in cookie_hash) and
( mew.authenticateSession( cookie_hash["userId"], cookie_hash["sessionId"] ) == 1) ):
userId = cookie_hash["userId"]
sessionId = cookie_hash["sessionId"]
if "projectId" in fields:
projectId = fields["projectId"].value
#raw_name = urllib.unquote( fields["data"].value )
raw_name = fields["data"].value
jsfnstr = mew.file_cascade_fn( userId, projectId, raw_name )
jsfn = json.loads( jsfnstr )
if jsfn["type"] != "success":
log_line( jsfnstr )
log_line( "raw_name: " + str(raw_name) )
error_and_quit()
fn = jsfn["filename"]
try:
with open( fn ) as pic_fd:
d = pic_fd.read()
print "Content-Type: image/png"
print
print d
except IOError as e:
if g_debug:
s_e = str(e)
log_line("error opening file (2) " + fileId + ", got '" + s_e + "'")
error_and_quit()
|
agpl-3.0
| -7,365,819,750,410,541,000
| 20.929412
| 90
| 0.65397
| false
| 2.954041
| false
| false
| false
|
alphapigger/igetui
|
igetui/google/protobuf/message_factory.py
|
1
|
4235
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a factory class for generating dynamic messages."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
from . import descriptor_database
from . import descriptor_pool
from . import message
from . import reflection
class MessageFactory(object):
"""Factory for creating Proto2 messages from descriptors in a pool."""
def __init__(self):
"""Initializes a new factory."""
self._classes = {}
def GetPrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor.full_name not in self._classes:
result_class = reflection.GeneratedProtocolMessageType(
descriptor.name.encode('ascii', 'ignore'),
(message.Message,),
{'DESCRIPTOR': descriptor})
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
return self._classes[descriptor.full_name]
_DB = descriptor_database.DescriptorDatabase()
_POOL = descriptor_pool.DescriptorPool(_DB)
_FACTORY = MessageFactory()
def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary containing all the message types in the files mapping the
fully qualified name to a Message subclass for the descriptor.
"""
result = {}
for file_proto in file_protos:
_DB.Add(file_proto)
for file_proto in file_protos:
for desc in _GetAllDescriptors(file_proto.message_type, file_proto.package):
result[desc.full_name] = _FACTORY.GetPrototype(desc)
return result
def _GetAllDescriptors(desc_protos, package):
"""Gets all levels of nested message types as a flattened list of descriptors.
Args:
desc_protos: The descriptor protos to process.
package: The package where the protos are defined.
Yields:
Each message descriptor for each nested type.
"""
for desc_proto in desc_protos:
name = '.'.join((package, desc_proto.name))
yield _POOL.FindMessageTypeByName(name)
for nested_desc in _GetAllDescriptors(desc_proto.nested_type, name):
yield nested_desc
|
mit
| -3,856,056,638,776,542,700
| 35.477876
| 80
| 0.716883
| false
| 4.34359
| false
| false
| false
|
ama-jharrison/agdc
|
agdc/api/source/main/python/datacube/api/tool/retrieve_dataset_stack.py
|
1
|
12976
|
#!/usr/bin/env python
# ===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
__author__ = "Simon Oldfield"
import logging
import os
from datacube.api import dataset_type_arg, writeable_dir, output_format_arg
from datacube.api.model import DatasetType
from datacube.api.tool import CellTool
from datacube.api.utils import get_mask_pqa, get_mask_wofs, get_dataset_data_masked, format_date, OutputFormat, \
get_mask_vector_for_cell
from datacube.api.utils import get_dataset_band_stack_filename
from datacube.api.utils import get_band_name_union, get_band_name_intersection
from datacube.api.utils import get_dataset_ndv, get_dataset_datatype, get_dataset_metadata
from enum import Enum
_log = logging.getLogger()
class BandListType(Enum):
__order__ = "EXPLICIT ALL COMMON"
EXPLICIT = "EXPLICIT"
ALL = "ALL"
COMMON = "COMMON"
class RetrieveDatasetStackTool(CellTool):
def __init__(self, name):
# Call method on super class
# super(self.__class__, self).__init__(name)
CellTool.__init__(self, name)
self.dataset_type = None
self.bands = None
self.output_directory = None
self.overwrite = None
self.list_only = None
self.output_format = None
def setup_arguments(self):
# Call method on super class
# super(self.__class__, self).setup_arguments()
CellTool.setup_arguments(self)
self.parser.add_argument("--dataset-type", help="The type(s) of dataset to retrieve",
action="store",
dest="dataset_type",
type=dataset_type_arg,
choices=self.get_supported_dataset_types(), default=DatasetType.ARG25, required=True,
metavar=" ".join([s.name for s in self.get_supported_dataset_types()]))
group = self.parser.add_mutually_exclusive_group()
# TODO explicit list of bands
# group.add_argument("--bands", help="List of bands to retrieve", action="store")
group.add_argument("--bands-all", help="Retrieve all bands with NULL values where the band is N/A",
action="store_const", dest="bands", const=BandListType.ALL)
group.add_argument("--bands-common", help="Retrieve only bands in common across all satellites",
action="store_const", dest="bands", const=BandListType.COMMON)
self.parser.set_defaults(bands=BandListType.ALL)
self.parser.add_argument("--output-directory", help="Output directory", action="store", dest="output_directory",
type=writeable_dir, required=True)
self.parser.add_argument("--overwrite", help="Over write existing output file", action="store_true",
dest="overwrite", default=False)
self.parser.add_argument("--list-only",
help="List the datasets that would be retrieved rather than retrieving them",
action="store_true", dest="list_only", default=False)
self.parser.add_argument("--output-format", help="The format of the output dataset",
action="store",
dest="output_format",
type=output_format_arg,
choices=OutputFormat, default=OutputFormat.GEOTIFF,
metavar=" ".join([f.name for f in OutputFormat]))
def process_arguments(self, args):
# Call method on super class
# super(self.__class__, self).process_arguments(args)
CellTool.process_arguments(self, args)
self.dataset_type = args.dataset_type
if args.bands == BandListType.ALL:
self.bands = get_band_name_union(self.dataset_type, self.satellites)
else:
self.bands = get_band_name_intersection(self.dataset_type, self.satellites)
self.output_directory = args.output_directory
self.overwrite = args.overwrite
self.list_only = args.list_only
self.output_format = args.output_format
def log_arguments(self):
# Call method on super class
# super(self.__class__, self).log_arguments()
CellTool.log_arguments(self)
_log.info("""
datasets to retrieve = {dataset_type}
bands to retrieve = {bands}
output directory = {output}
over write existing = {overwrite}
list only = {list_only}
output format = {output_format}
""".format(dataset_type=self.dataset_type.name,
bands=self.bands,
output=self.output_directory,
overwrite=self.overwrite,
list_only=self.list_only,
output_format=self.output_format.name))
def get_tiles(self):
return list(self.get_tiles_from_db())
def get_tiles_from_db(self):
from datacube.api.query import list_tiles
x_list = [self.x]
y_list = [self.y]
dataset_types = [self.dataset_type]
if self.mask_pqa_apply and DatasetType.PQ25 not in dataset_types:
dataset_types.append(DatasetType.PQ25)
if self.mask_wofs_apply and DatasetType.WATER not in dataset_types:
dataset_types.append(DatasetType.WATER)
for tile in list_tiles(x=x_list, y=y_list,
acq_min=self.acq_min, acq_max=self.acq_max,
satellites=[satellite for satellite in self.satellites],
dataset_types=dataset_types):
yield tile
def go(self):
# If we are applying a vector mask then calculate it not (once as it is the same for all tiles)
mask = None
if self.mask_vector_apply:
mask = get_mask_vector_for_cell(self.x, self.y, self.mask_vector_file, self.mask_vector_layer, self.mask_vector_feature)
# TODO move the dicking around with bands stuff into utils?
import gdal
driver = raster = None
metadata = None
data_type = ndv = None
tiles = self.get_tiles()
_log.info("Total tiles found [%d]", len(tiles))
for band_name in self.bands:
_log.info("Creating stack for band [%s]", band_name)
relevant_tiles = []
for tile in tiles:
dataset = self.dataset_type in tile.datasets and tile.datasets[self.dataset_type] or None
if not dataset:
_log.info("No applicable [%s] dataset for [%s]", self.dataset_type.name, tile.end_datetime)
continue
if band_name in [b.name for b in tile.datasets[self.dataset_type].bands]:
relevant_tiles.append(tile)
_log.info("Total tiles for band [%s] is [%d]", band_name, len(relevant_tiles))
for index, tile in enumerate(relevant_tiles, start=1):
dataset = tile.datasets[self.dataset_type]
assert dataset
band = dataset.bands[band_name]
assert band
if self.list_only:
_log.info("Would stack band [%s] from dataset [%s]", band.name, dataset.path)
continue
pqa = (self.mask_pqa_apply and DatasetType.PQ25 in tile.datasets) and tile.datasets[DatasetType.PQ25] or None
wofs = (self.mask_wofs_apply and DatasetType.WATER in tile.datasets) and tile.datasets[DatasetType.WATER] or None
if self.dataset_type not in tile.datasets:
_log.debug("No [%s] dataset present for [%s] - skipping", self.dataset_type.name, tile.end_datetime)
continue
filename = os.path.join(self.output_directory,
get_dataset_band_stack_filename(dataset, band,
output_format=self.output_format,
mask_pqa_apply=self.mask_pqa_apply,
mask_wofs_apply=self.mask_wofs_apply,
mask_vector_apply=self.mask_vector_apply))
if not metadata:
metadata = get_dataset_metadata(dataset)
assert metadata
if not data_type:
data_type = get_dataset_datatype(dataset)
assert data_type
if not ndv:
ndv = get_dataset_ndv(dataset)
assert ndv
if not driver:
if self.output_format == OutputFormat.GEOTIFF:
driver = gdal.GetDriverByName("GTiff")
elif self.output_format == OutputFormat.ENVI:
driver = gdal.GetDriverByName("ENVI")
assert driver
if not raster:
if self.output_format == OutputFormat.GEOTIFF:
raster = driver.Create(filename, metadata.shape[0], metadata.shape[1], len(tiles), data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
elif self.output_format == OutputFormat.ENVI:
raster = driver.Create(filename, metadata.shape[0], metadata.shape[1], len(tiles), data_type, options=["INTERLEAVE=BSQ"])
assert raster
# NOTE: could do this without the metadata!!
raster.SetGeoTransform(metadata.transform)
raster.SetProjection(metadata.projection)
raster.SetMetadata(self.generate_raster_metadata())
# mask = None
if pqa:
mask = get_mask_pqa(pqa, self.mask_pqa_mask, mask=mask)
if wofs:
mask = get_mask_wofs(wofs, self.mask_wofs_mask, mask=mask)
_log.info("Stacking [%s] band data from [%s] with PQA [%s] and PQA mask [%s] and WOFS [%s] and WOFS mask [%s] to [%s]",
band.name, dataset.path,
pqa and pqa.path or "",
pqa and self.mask_pqa_mask or "",
wofs and wofs.path or "", wofs and self.mask_wofs_mask or "",
filename)
data = get_dataset_data_masked(dataset, mask=mask, ndv=ndv)
_log.debug("data is [%s]", data)
stack_band = raster.GetRasterBand(index)
stack_band.SetDescription(os.path.basename(dataset.path))
stack_band.SetNoDataValue(ndv)
stack_band.WriteArray(data[band])
stack_band.ComputeStatistics(True)
stack_band.SetMetadata({"ACQ_DATE": format_date(tile.end_datetime), "SATELLITE": dataset.satellite.name})
stack_band.FlushCache()
del stack_band
if raster:
raster.FlushCache()
del raster
raster = None
def generate_raster_metadata(self):
return {
"X_INDEX": "{x:03d}".format(x=self.x),
"Y_INDEX": "{y:04d}".format(y=self.y),
"DATASET_TYPE": self.dataset_type.name,
"ACQUISITION_DATE": "{acq_min} to {acq_max}".format(acq_min=format_date(self.acq_min), acq_max=format_date(self.acq_max)),
"SATELLITES": " ".join([s.name for s in self.satellites]),
"PIXEL_QUALITY_FILTER": self.mask_pqa_apply and " ".join([mask.name for mask in self.mask_pqa_mask]) or "",
"WATER_FILTER": self.mask_wofs_apply and " ".join([mask.name for mask in self.mask_wofs_mask]) or ""
}
def format_date_time(d):
from datetime import datetime
if d:
return datetime.strftime(d, "%Y-%m-%d %H:%M:%S")
return None
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
RetrieveDatasetStackTool("Retrieve Dataset Stack").run()
|
apache-2.0
| 1,836,849,953,270,979,000
| 38.560976
| 161
| 0.556412
| false
| 4.21162
| false
| false
| false
|
emmanvg/cti-stix-elevator
|
stix2elevator/convert_pattern.py
|
1
|
89308
|
import datetime
import re
import sys
from cybox.objects.account_object import Account
from cybox.objects.address_object import Address
from cybox.objects.archive_file_object import ArchiveFile
from cybox.objects.domain_name_object import DomainName
from cybox.objects.email_message_object import EmailMessage
from cybox.objects.file_object import File
from cybox.objects.http_session_object import HostField, HTTPSession
from cybox.objects.mutex_object import Mutex
from cybox.objects.network_connection_object import NetworkConnection
from cybox.objects.network_packet_object import NetworkPacket
from cybox.objects.network_socket_object import NetworkSocket
from cybox.objects.process_object import Process
from cybox.objects.unix_user_account_object import UnixUserAccount
from cybox.objects.uri_object import URI
from cybox.objects.win_computer_account_object import WinComputerAccount
from cybox.objects.win_executable_file_object import WinExecutableFile
from cybox.objects.win_process_object import WinProcess
from cybox.objects.win_registry_key_object import WinRegistryKey
from cybox.objects.win_service_object import WinService
from six import text_type
import stix2
from stix2.patterns import (BasicObjectPathComponent, ListObjectPathComponent,
ObjectPath, ObservationExpression,
QualifiedObservationExpression,
ReferenceObjectPathComponent, _BooleanExpression,
_ComparisonExpression,
_CompoundObservationExpression, _Constant)
import stixmarx
from stix2elevator.common import ADDRESS_FAMILY_ENUMERATION, SOCKET_OPTIONS
from stix2elevator.convert_cybox import split_into_requests_and_responses
from stix2elevator.ids import (add_object_id_value, exists_object_id_key,
get_id_value, get_object_id_value)
from stix2elevator.options import error, get_option_value, info, warn
from stix2elevator.utils import identifying_info, map_vocabs_to_label
from stix2elevator.vocab_mappings import WINDOWS_PEBINARY
if sys.version_info > (3,):
long = int
KEEP_OBSERVABLE_DATA_USED_IN_PATTERNS = False
KEEP_INDICATORS_USED_IN_COMPOSITE_INDICATOR_EXPRESSION = True
class BasicObjectPathComponentForElevator(BasicObjectPathComponent):
@staticmethod
def create_ObjectPathComponent(component_name):
if component_name.endswith("_ref"):
return ReferenceObjectPathComponentForElevator(component_name)
elif component_name.find("[") != -1:
parse1 = component_name.split("[")
return ListObjectPathComponentForElevator(parse1[0], parse1[1][:-1])
else:
return BasicObjectPathComponentForElevator(component_name, False)
class ListObjectPathComponentForElevator(ListObjectPathComponent):
@staticmethod
def create_ObjectPathComponent(component_name):
if component_name.endswith("_ref"):
return ReferenceObjectPathComponentForElevator(component_name)
elif component_name.find("[") != -1:
parse1 = component_name.split("[")
return ListObjectPathComponentForElevator(parse1[0], parse1[1][:-1])
else:
return BasicObjectPathComponentForElevator(component_name, False)
class ReferenceObjectPathComponentForElevator(ReferenceObjectPathComponent):
@staticmethod
def create_ObjectPathComponent(component_name):
if component_name.endswith("_ref"):
return ReferenceObjectPathComponentForElevator(component_name)
elif component_name.find("[") != -1:
parse1 = component_name.split("[")
return ListObjectPathComponentForElevator(parse1[0], parse1[1][:-1])
else:
return BasicObjectPathComponentForElevator(component_name, False)
class ObjectPathForElevator(ObjectPath):
def toSTIX21(self):
current_cyber_observable_type = self.object_type_name
for x in self.property_path:
if x.property_name == "extensions":
continue
if current_cyber_observable_type == "file":
if (x.property_name == "is_encrypted" or
x.property_name == "encryption_algorithm" or
x.property_name == "decryption_key"):
print(
"Expression contains the property " + x.property_name + ", for a file, which is not in STIX 2.1")
elif x.property_name == "archive-ext" or x.property_name == "raster-image-ext":
current_cyber_observable_type = x.property_name
elif x.property_name == "contains_refs":
current_cyber_observable_type = "file"
elif x.property_name == "parent_directory_ref":
current_cyber_observable_type = "directory"
elif current_cyber_observable_type == "directory":
if x.property_name == "contains_refs":
# TODO - what if it is a directory?
current_cyber_observable_type = "file"
elif current_cyber_observable_type == "archive-ext":
if x.property_name == "version":
print("Expression contains the property version, for a file.archive-ext, which is not in STIX 2.1")
elif current_cyber_observable_type == "raster-image-ext":
if x.property_name == "image_compression_algorithm":
print(
"Expression contains the property image_compression_algorithm, for a file.raster-image-ext, which is not in STIX 2.1")
elif current_cyber_observable_type == "network_traffic":
if x.property_name == "socket-ext":
current_cyber_observable_type = x.property_name
elif current_cyber_observable_type == "socket-ext":
if x.property_name == "protocol_family":
print(
"Expression contains the property protocol_familys, for a network_traffic:socket-ext, which is not in STIX 2.1")
elif current_cyber_observable_type == "process":
if x.property_name == "name" or x.property_name == "arguments":
print(
"Expression contains the property " + x.property_name + ", for a process, which is not in STIX 2.1")
elif x.property_name == "binary_ref":
x.property_name = "image_ref"
elif x.property_name == "opened_connection_refs":
current_cyber_observable_type = "network_traffic"
elif x.property_name == 'creator_user_ref':
current_cyber_observable_type = "user_account"
elif x.property_name == 'binary_ref':
current_cyber_observable_type = "file"
elif x.property_name == 'windows-service-ext':
current_cyber_observable_type = 'windows-service-ext'
elif current_cyber_observable_type == 'windows-service-ext':
if x.property_name == 'service_dll_refs':
current_cyber_observable_type = "file"
elif current_cyber_observable_type == "user_account":
if x.property_name == "password_last_changed":
x.property_name = "credential_last_changed"
return self
class ComparisonExpressionForElevator(_ComparisonExpression):
# overrides, so IdrefPlaceHolder can be handled
def __init__(self, operator, lhs, rhs, negated=False):
self.operator = operator
if operator == "=" and isinstance(rhs, stix2.ListConstant):
warn("apply_condition assumed to be 'ANY' in %s",
721, identifying_info(get_dynamic_variable("current_observable")))
self.operator = "IN"
if isinstance(lhs, stix2.ObjectPath):
self.lhs = lhs
else:
self.lhs = stix2.ObjectPath.make_object_path(lhs)
# rhs might be a reference to another object, which has its own observable pattern
if isinstance(rhs, _Constant) or isinstance(rhs, IdrefPlaceHolder):
self.rhs = rhs
else:
self.rhs = make_constant(rhs)
self.negated = negated
self.root_type = self.lhs.object_type_name
def contains_placeholder(self):
return isinstance(self.rhs, IdrefPlaceHolder)
def collapse_reference(self, prefix):
new_lhs = prefix.merge(self.lhs)
new_lhs.collapsed = True
return ComparisonExpressionForElevator(self.operator, new_lhs, self.rhs)
def replace_placeholder_with_idref_pattern(self, idref):
if isinstance(self.rhs, IdrefPlaceHolder):
change_made, pattern = self.rhs.replace_placeholder_with_idref_pattern(idref)
if change_made:
if hasattr(self.lhs, "collapsed") and self.lhs.collapsed:
return True, ComparisonExpressionForElevator(pattern.operator, self.lhs, pattern.rhs)
else:
return True, pattern.collapse_reference(self.lhs)
return False, self
def partition_according_to_object_path(self):
return self
def contains_unconverted_term(self):
return False
def toSTIX21(self):
self.lhs = self.lhs.toSTIX21()
return self
class EqualityComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(EqualityComparisonExpressionForElevator, self).__init__("=", lhs, rhs, negated)
class MatchesComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(MatchesComparisonExpressionForElevator, self).__init__("MATCHES", lhs, rhs, negated)
class GreaterThanComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(GreaterThanComparisonExpressionForElevator, self).__init__(">", lhs, rhs, negated)
class LessThanComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(LessThanComparisonExpressionForElevator, self).__init__("<", lhs, rhs, negated)
class GreaterThanEqualComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(GreaterThanEqualComparisonExpressionForElevator, self).__init__(">=", lhs, rhs, negated)
class LessThanEqualComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(LessThanEqualComparisonExpressionForElevator, self).__init__("<=", lhs, rhs, negated)
class InComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(InComparisonExpressionForElevator, self).__init__("IN", lhs, rhs, negated)
class LikeComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(LikeComparisonExpressionForElevator, self).__init__("LIKE", lhs, rhs, negated)
class IsSubsetComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(IsSubsetComparisonExpressionForElevator, self).__init__("ISSUBSET", lhs, rhs, negated)
class IsSupersetComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(IsSupersetComparisonExpressionForElevator, self).__init__("ISSUPERSET", lhs, rhs, negated)
class BooleanExpressionForElevator(_BooleanExpression):
def add_operand(self, operand):
self.operands.append(operand)
def contains_placeholder(self):
for args in self.operands:
if args.contains_placeholder():
return True
return False
def replace_placeholder_with_idref_pattern(self, idref):
new_operands = []
change_made = False
for args in self.operands:
change_made_this_time, new_operand = args.replace_placeholder_with_idref_pattern(idref)
if change_made_this_time:
if not hasattr(self, "root_type"):
self.root_type = new_operand.root_type
elif self.root_type and hasattr(new_operand, "root_type") and (self.root_type != new_operand.root_type):
self.root_type = None
change_made = change_made or change_made_this_time
new_operands.append(new_operand)
self.operands = new_operands
return change_made, self
def collapse_reference(self, prefix):
new_operands = []
for operand in self.operands:
new_operands.append(operand.collapse_reference(prefix))
return BooleanExpressionForElevator(self.operator, new_operands)
def partition_according_to_object_path(self):
subexpressions = []
results = []
for term in self.operands:
term_was_appended = False
for sub in subexpressions:
if not hasattr(term, "root_type") and not hasattr(sub[0], "root_type"):
sub.append(term)
term_was_appended = True
break
elif hasattr(term, "root_type") and hasattr(sub[0], "root_type") and term.root_type == sub[0].root_type:
sub.append(term)
term_was_appended = True
break
if not term_was_appended:
subexpressions.append([term])
for x in subexpressions:
if len(x) == 1:
results.append(x[0])
else:
results.append(create_boolean_expression(self.operator, x))
if len(results) == 1:
return results[0]
else:
return CompoundObservationExpressionForElevator(self.operator, results)
def contains_unconverted_term(self):
for args in self.operands:
if args.contains_unconverted_term():
return True
return False
def toSTIX21(self):
for args in self.operands:
args.toSTIX21()
return self
class AndBooleanExpressionForElevator(BooleanExpressionForElevator):
"""'AND' Boolean Pattern Expression. Only use if both operands are of
the same root object.
Args:
operands (list): AND operands
"""
def __init__(self, operands):
super(AndBooleanExpressionForElevator, self).__init__("AND", operands)
class OrBooleanExpressionForElevator(BooleanExpressionForElevator):
"""'OR' Boolean Pattern Expression. Only use if both operands are of the same root object
Args:
operands (list): OR operands
"""
def __init__(self, operands):
super(OrBooleanExpressionForElevator, self).__init__("OR", operands)
class IdrefPlaceHolder(object):
def __init__(self, idref):
self.idref = idref
def __str__(self):
return "PLACEHOLDER:" + self.idref
def contains_placeholder(self):
return True
def replace_placeholder_with_idref_pattern(self, idref):
if idref == self.idref:
return True, get_pattern_from_cache(idref)
elif exists_object_id_key(self.idref) and idref == get_object_id_value(self.idref):
return True, get_pattern_from_cache(idref)
else:
return False, self
def partition_according_to_object_path(self):
error("Placeholder %s should be resolved", 203, self.idref)
return self
def contains_unconverted_term(self):
return False
class UnconvertedTerm(object):
def __init__(self, term_info):
self.term_info = term_info
def __str__(self):
return "unconverted_term:%s" % self.term_info
def contains_placeholder(self):
return False
def replace_placeholder_with_idref_pattern(self, idref):
return False, self
def partition_according_to_object_path(self):
return self
def contains_unconverted_term(self):
return True
class ObservationExpressionForElevator(ObservationExpression):
def toSTIX21(self):
self.operand.toSTIX21()
return self
class CompoundObservationExpressionForElevator(_CompoundObservationExpression):
def __str__(self):
sub_exprs = []
if len(self.operands) == 1:
return "[%s]" % self.operands[0]
for o in self.operands:
if isinstance(o, ObservationExpressionForElevator) or isinstance(o,
CompoundObservationExpressionForElevator):
sub_exprs.append("%s" % o)
else:
sub_exprs.append("[%s]" % o)
return (" " + self.operator + " ").join(sub_exprs)
def contains_placeholder(self):
for args in self.operands:
if args.contains_placeholder():
error("Observable Expressions should not contain placeholders", 202)
def contains_unconverted_term(self):
for args in self.operands:
if args.contains_unconverted_term():
return True
return False
def partition_according_to_object_path(self):
return self
def toSTIX21(self):
for arg in self.operands:
arg.toSTIX21()
return self
class AndObservationExpressionForElevator(CompoundObservationExpressionForElevator):
"""'AND' Compound Observation Pattern Expression
Args:
operands (str): compound observation operands
"""
def __init__(self, operands):
super(AndObservationExpressionForElevator, self).__init__("AND", operands)
class OrObservationExpressionForElevator(CompoundObservationExpressionForElevator):
"""Pattern 'OR' Compound Observation Expression
Args:
operands (str): compound observation operands
"""
def __init__(self, operands):
super(OrObservationExpressionForElevator, self).__init__("OR", operands)
class FollowedByObservationExpressionForElevator(CompoundObservationExpressionForElevator):
"""Pattern 'Followed by' Compound Observation Expression
Args:
operands (str): compound observation operands
"""
def __init__(self, operands):
super(FollowedByObservationExpressionForElevator, self).__init__("FOLLOWEDBY", operands)
class QualifiedObservationExpressionForElevator(QualifiedObservationExpression):
"""Pattern Qualified Observation Expression
Args:
observation_expression (PatternExpression OR _CompoundObservationExpression OR ): pattern expression
qualifier (_ExpressionQualifier): pattern expression qualifier
"""
def __init__(self, observation_expression, qualifier):
super(QualifiedObservationExpressionForElevator, self).__init__(observation_expression, qualifier)
def toSTIX21(self):
self.observation_expression.toSTIX21()
return self
class ParentheticalExpressionForElevator(stix2.ParentheticalExpression):
def contains_placeholder(self):
return self.expression.contains_placeholder()
def contains_unconverted_term(self):
return self.expression.contains_unconverted_term()
def replace_placeholder_with_idref_pattern(self, idref):
change_made, new_expression = self.expression.replace_placeholder_with_idref_pattern(idref)
self.expression = new_expression
if hasattr(new_expression, "root_type"):
self.root_type = new_expression.root_type
return change_made, self
def collapse_reference(self, prefix):
new_expression = self.expression.collapse_reference(prefix)
return ParentheticalExpressionForElevator(new_expression)
def partition_according_to_object_path(self):
self.expression = self.expression.partition_according_to_object_path()
return self
def toSTIX21(self):
self.expression.toSTIX21()
return self
def create_boolean_expression(operator, operands):
if len(operands) == 1:
return operands[0]
exp = BooleanExpressionForElevator(operator, [])
for arg in operands:
if not isinstance(arg, IdrefPlaceHolder) and not isinstance(arg, UnconvertedTerm) and hasattr(arg, "root_type"):
if not hasattr(exp, "root_type"):
exp.root_type = arg.root_type
elif exp.root_type and (exp.root_type != arg.root_type):
exp.root_type = None
exp.add_operand(arg)
return ParentheticalExpressionForElevator(exp)
###################
_PATTERN_CACHE = {}
def clear_pattern_cache():
global _PATTERN_CACHE
_PATTERN_CACHE = {}
def add_to_pattern_cache(key, pattern):
global _PATTERN_CACHE
if pattern:
_PATTERN_CACHE[key] = pattern
def id_in_pattern_cache(id_):
return id_ in _PATTERN_CACHE
def get_pattern_from_cache(id_):
return _PATTERN_CACHE[id_]
def get_ids_from_pattern_cache():
return _PATTERN_CACHE.keys()
def get_items_from_pattern_cache():
return _PATTERN_CACHE.items()
def pattern_cache_is_empty():
return _PATTERN_CACHE == {}
###########
_OBSERVABLE_MAPPINGS = {}
def add_to_observable_mappings(obs):
global _OBSERVABLE_MAPPINGS
if obs:
_OBSERVABLE_MAPPINGS[obs.id_] = obs
_OBSERVABLE_MAPPINGS[obs.object_.id_] = obs
def id_in_observable_mappings(id_):
return id_ in _OBSERVABLE_MAPPINGS
def get_obs_from_mapping(id_):
return _OBSERVABLE_MAPPINGS[id_]
def clear_observable_mappings():
global _OBSERVABLE_MAPPINGS
_OBSERVABLE_MAPPINGS = {}
# simulate dynamic variable environment
_DYNAMIC_SCOPING_ENV = {}
def intialize_dynamic_variable(var):
global _DYNAMIC_SCOPING_ENV
if var in _DYNAMIC_SCOPING_ENV:
raise Exception
else:
_DYNAMIC_SCOPING_ENV[var] = []
def set_dynamic_variable(var, value):
global _DYNAMIC_SCOPING_ENV
if var not in _DYNAMIC_SCOPING_ENV:
intialize_dynamic_variable(var)
_DYNAMIC_SCOPING_ENV[var].append(value)
def get_dynamic_variable(var):
if var not in _DYNAMIC_SCOPING_ENV:
raise Exception
else:
return _DYNAMIC_SCOPING_ENV[var][-1]
def pop_dynamic_variable(var):
if var not in _DYNAMIC_SCOPING_ENV or not _DYNAMIC_SCOPING_ENV[var]:
raise Exception
else:
_DYNAMIC_SCOPING_ENV[var].pop
_CLASS_NAME_MAPPING = {"File": "file",
"URI": "uri",
"EmailMessage": "email-message",
"WinRegistryKey": "windows-registry-key",
"Process": "process",
"DomainName": "domain-name",
"Mutex": "mutex",
"WinExecutableFile": "file:extensions.'windows-pebinary-ext'",
"ArchiveFile": "file:extensions.'archive-ext'",
"NetworkConnection": "network-traffic"}
_ADDRESS_NAME_MAPPING = {Address.CAT_IPV4: "ipv4-addr",
Address.CAT_IPV6: "ipv6-addr",
Address.CAT_MAC: "mac-addr",
Address.CAT_EMAIL: "email-addr"}
# address, network_connection
def convert_cybox_class_name_to_object_path_root_name(instance):
class_name = instance.__class__.__name__
if class_name in _CLASS_NAME_MAPPING:
return _CLASS_NAME_MAPPING[class_name]
elif class_name == "Address" and instance.category in _ADDRESS_NAME_MAPPING:
return _ADDRESS_NAME_MAPPING[class_name]
else:
error("Cannot convert CybOX 2.x class name %s to an object_path_root_name", 813, class_name)
return None
def need_not(condition):
return condition == "DoesNotContain"
def is_equal_condition(cond):
return cond == "Equals" or cond is None
def add_parens_if_needed(expr):
if expr.find("AND") != -1 or expr.find("OR") != -1:
return "(" + expr + ")"
else:
return expr
_CONDITION_OPERATOR_MAP = {
'Equals': "=",
"DoesNotEqual": "!=",
"Contains": "=",
"DoesNotContain": "!=",
"GreaterThan": ">",
'GreaterThanOrEqual': ">=",
"LessThan": "<",
"LessThanOrEqual": "<="
# StartsWith - handled in create_term_with_regex
# EndsWith - handled in create_term_with_regex
# InclusiveBetween - handled in create_term_with_range
# ExclusiveBetween - handled in create_term_with_range
# FitsPattern
# BitwiseAnd
# BitwiseOr
}
def convert_condition(condition):
if condition is None:
warn("No condition given for %s - assume '='", 714,
identifying_info(get_dynamic_variable("current_observable")))
return "="
for cond, op in _CONDITION_OPERATOR_MAP.items():
if cond.lower() == condition.lower():
if cond != condition:
warn("'%s' allowed in %s - should be '%s'", 630,
condition,
identifying_info(get_dynamic_variable("current_observable")),
cond)
return op
warn("Unknown condition given in %s - marked as 'INVALID_CONDITION'", 628,
identifying_info(get_dynamic_variable("current_observable")))
return "INVALID-CONDITION"
def process_boolean_negation(op, negated):
if not negated:
return op
elif op == "AND":
return "OR"
elif op == "OR":
return "AND"
else:
raise (ValueError("not a legal Boolean op: %s" % op))
def process_comparison_negation(op, negated):
if not negated:
return op
elif op == "=":
return "!="
elif op == "!=":
return "="
elif op == "<":
return ">="
elif op == "<=":
return ">"
elif op == ">":
return "<="
elif op == ">=":
return "<"
else:
raise (ValueError("not a legal Comparison op: %s" % op))
def create_term_with_regex(lhs, condition, rhs, negated):
# TODO: escape characters
if condition == "StartsWith":
rhs.value = "^%s" % rhs.value
elif condition == "EndsWith":
rhs.value = "$%s" % rhs.value
return ComparisonExpressionForElevator("MATCHES", lhs, rhs, negated)
def create_term_with_range(lhs, condition, rhs, negated=False):
# TODO: handle negated
if not isinstance(rhs, stix2.ListConstant) or len(rhs.value) != 2:
error("%s was used, but two values were not provided.", 609, condition)
return "'range term underspecified'"
else:
if condition == "InclusiveBetween":
# return "(" + lhs + " GE " + text_type(rhs[0]) + " AND " + lhs + " LE " + text_type(rhs[1]) + ")"
lower_bound = ComparisonExpressionForElevator(process_comparison_negation(">=", negated), lhs, rhs.value[0])
upper_bound = ComparisonExpressionForElevator(process_comparison_negation("<=", negated), lhs, rhs.value[1])
else: # "ExclusiveBetween"
# return "(" + lhs + " GT " + text_type(rhs[0]) + " AND " + lhs + " LT " + text_type(rhs[1]) + ")"
lower_bound = ComparisonExpressionForElevator(process_comparison_negation(">", negated), lhs, rhs.value[0])
upper_bound = ComparisonExpressionForElevator(process_comparison_negation("<", negated), lhs, rhs.value[1])
return create_boolean_expression(process_boolean_negation("AND", negated), [lower_bound, upper_bound])
def multi_valued_property(object_path):
return object_path and object_path.find("*") != -1
def negate_if_needed(condition, negated):
if negated:
return "NOT " + condition
else:
return condition
def create_term(lhs, condition, rhs, negated=False):
if condition == "StartsWith" or condition == "EndsWith":
return create_term_with_regex(lhs, condition, rhs, negated)
elif condition == "InclusiveBetween" or condition == "ExclusiveBetween":
return create_term_with_range(lhs, condition, rhs, negated)
else:
if condition == "Contains" and not multi_valued_property(lhs):
warn("Used MATCHES operator for %s", 715, condition)
return create_term_with_regex(lhs, condition, rhs, negated)
elif condition == "DoesNotContain":
warn("Used MATCHES operator for %s", 715, condition)
return create_term_with_regex(lhs, condition, rhs, not negated)
# return lhs + " " + negate_if_needed(convert_condition(condition), negated) + " '" + convert_to_text_type(rhs) + "'"
return ComparisonExpressionForElevator(convert_condition(condition), lhs, rhs, negated)
def make_constant(obj):
# TODO: handle other Markable objects?
if isinstance(obj, bool):
return stix2.BooleanConstant(obj)
elif isinstance(obj, int) or isinstance(obj, long):
return stix2.IntegerConstant(obj)
elif isinstance(obj, float):
return stix2.FloatConstant(obj)
elif isinstance(obj, str) or isinstance(obj, stixmarx.api.types.MarkableText):
return stix2.StringConstant(obj.strip())
elif isinstance(obj, list):
return stix2.ListConstant([make_constant(x) for x in obj])
elif isinstance(obj, datetime.datetime) or isinstance(obj, stixmarx.api.types.MarkableDateTime):
return stix2.TimestampConstant(obj.strftime("%Y-%m-%dT%H:%M:%S.%fZ"))
else:
raise ValueError("Can't make a constant from %s" % obj)
def add_comparison_expression(prop, object_path):
if prop is not None and prop.value is not None:
if hasattr(prop, "condition"):
cond = prop.condition
else:
warn("No condition given - assume '='", 714)
cond = None
return create_term(object_path, cond, make_constant(prop.value))
if prop is not None and prop.value is None:
warn("No term was yielded for %s", 622, object_path)
return None
def convert_custom_properties(cps, object_type_name):
expressions = []
for cp in cps.property_:
if not re.match("[a-z0-9_]+", cp.name):
warn("The custom property name %s does not adhere to the specification rules", 617, cp.name)
if " " in cp.name:
warn("The custom property name %s contains whitespace, replacing it with underscores", 624, cp.name)
expressions.append(
create_term(object_type_name + ":x_" + cp.name.replace(" ", "_"), cp.condition, make_constant(cp.value)))
return create_boolean_expression("AND", expressions)
_ACCOUNT_PROPERTIES = [
["full_name", "user-account:display_name"],
["last_login", "user-account:account_last_login"],
["username", "user-account:account_login"],
["creation_time", "user-account:account_created"]
]
def convert_account_to_pattern(account):
expressions = []
if hasattr(account, "disabled") and account.disabled:
expressions.append(create_term("user-account:is_disabled",
"Equals",
stix2.BooleanConstant(account.disabled)))
for prop_spec in _ACCOUNT_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(account, prop_1x) and getattr(account, prop_1x):
term = add_comparison_expression(getattr(account, prop_1x), object_path)
if term:
expressions.append(term)
if account.authentication and get_option_value("spec_version") == "2.1":
if account.authentication.authentication_data:
expressions.append(create_term("user-account:credential",
"Equals",
stix2.StringConstant(account.authentication.authentication_data)))
if isinstance(account, UnixUserAccount):
win_process_expression = convert_unix_user_to_pattern(account)
if win_process_expression:
expressions.append(win_process_expression)
else:
warn("No UnixUserAccount properties found in %s", 615, text_type(account))
elif isinstance(account, WinComputerAccount):
expressions.append(create_term("user-account:account_type",
"Equals",
stix2.StringConstant("windows-domain" if account.domain else "windows-local")))
if expressions:
return create_boolean_expression("AND", expressions)
_UNIX_ACCOUNT_PROPERTIES = [
["group_id", "user-account:extensions.'unix-account-ext'.gid"],
["login_shell", "user-account:extensions.'unix-account-ext'.shell"],
["home_directory", "user-account:extensions.'unix-account-ext'.home_dir"],
]
def convert_unix_user_to_pattern(account):
expressions = []
expressions.append(create_term("user-account:account_type",
"Equals",
stix2.StringConstant("unix")))
if hasattr(account, "user_id") and account.user_id:
expressions.append(create_term("user-account:user_id",
account.user_id.condition,
stix2.StringConstant(text_type(account.user_id.value))))
for prop_spec in _UNIX_ACCOUNT_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(account, prop_1x) and getattr(account, prop_1x):
term = add_comparison_expression(getattr(account, prop_1x), object_path)
if term:
expressions.append(term)
if expressions:
return create_boolean_expression("AND", expressions)
def convert_address_to_pattern(add):
cond = add.address_value.condition
if add.category == add.CAT_IPV4:
return create_term("ipv4-addr:value", cond, make_constant(add.address_value.value.strip()))
elif add.category == add.CAT_IPV6:
return create_term("ipv6-addr:value", cond, make_constant(add.address_value.value.strip()))
elif add.category == add.CAT_MAC:
return create_term("mac-addr:value", cond, make_constant(add.address_value.value.strip()))
elif add.category == add.CAT_EMAIL:
return create_term("email-addr:value", cond, make_constant(add.address_value.value.strip()))
else:
warn("The address type %s is not part of Cybox 3.0", 421, add.category)
def convert_uri_to_pattern(uri):
return create_term("url:value", uri.value.condition, make_constant(uri.value.value.strip()))
# NOTICE: The format of these PROPERTIES is different than the others in this file!!!!!!
_EMAIL_HEADER_PROPERTIES = [["email-message:subject", ["subject"]],
["email-message:from_ref.value", ["from_", "address_value"]],
["email-message:sender_ref.value", ["sender", "address_value"]],
["email-message:date", ["date"]],
["email-message:content_type", ["content_type"]],
["email-message:to_refs[*].value", ["to*", "address_value"]],
["email-message:cc_refs[*].value", ["cc*", "address_value"]],
["email-message:bcc_refs[*].value", ["bcc*", "address_value"]]]
_EMAIL_ADDITIONAL_HEADERS_PROPERTIES = \
[["email-message:additional_header_fields.Reply-To", ["reply-to*", "address_value"]],
["email-message:additional_header_fields.Message-ID", ["message_id"]],
["email-message:additional_header_fields.In-Reply-To", ["in_reply_to"]],
["email-message:additional_header_fields.Errors-To", ["errors_to"]],
["email-message:additional_header_fields.MIME-Version", ["mime_version"]],
["email-message:additional_header_fields.Precedence", ["precedence"]],
["email-message:additional_header_fields.User-Agent", ["user_agent"]],
["email-message:additional_header_fields.Boundary", ["boundary"]],
["email-message:additional_header_fields.X-Originating-IP", ["x_originating_ip", "address_value"]],
["email-message:additional_header_fields.X-Priority", ["x_priority"]],
["email-message:additional_header_fields.X-Mailer", ["x_mailer"]]]
def cannonicalize_prop_name(name):
if name.find("*") == -1:
return name
else:
return name[:-1]
def create_terms_from_prop_list(prop_list, obj, object_path):
if len(prop_list) == 1:
prop_1x = prop_list[0]
if hasattr(obj, cannonicalize_prop_name(prop_1x)):
if multi_valued_property(prop_1x):
prop_exprs = []
for c in getattr(obj, cannonicalize_prop_name(prop_1x)):
term = add_comparison_expression(c, object_path)
if term:
prop_exprs.append(term)
# return " OR ".join(prop_exprs)
if prop_exprs:
return create_boolean_expression("OR", prop_exprs)
else:
return add_comparison_expression(getattr(obj, cannonicalize_prop_name(prop_1x)), object_path)
else:
prop_1x, rest_of_prop_list = prop_list[0], prop_list[1:]
if hasattr(obj, cannonicalize_prop_name(prop_1x)):
if multi_valued_property(prop_1x):
prop_exprs = []
values = getattr(obj, cannonicalize_prop_name(prop_1x))
if values:
for c in values:
term = create_terms_from_prop_list(rest_of_prop_list, c, object_path)
if term:
prop_exprs.append(term)
# return " OR ".join(prop_exprs)
if prop_exprs:
return create_boolean_expression("OR", prop_exprs)
else:
return create_terms_from_prop_list(rest_of_prop_list,
getattr(obj, cannonicalize_prop_name(prop_1x)),
object_path)
def convert_email_header_to_pattern(head, properties):
header_expressions = []
for prop_spec in properties:
object_path = prop_spec[0]
prop_1x_list = prop_spec[1]
if hasattr(head, cannonicalize_prop_name(prop_1x_list[0])):
term = create_terms_from_prop_list(prop_1x_list, head, object_path)
if term:
header_expressions.append(term)
if head.received_lines:
warn("Email received lines not handled yet", 806)
if header_expressions:
return create_boolean_expression("AND", header_expressions)
def convert_attachment_to_ref(attachment):
return IdrefPlaceHolder(attachment.object_reference)
def convert_email_message_to_pattern(mess):
expressions = []
if mess.header is not None:
expressions.append(convert_email_header_to_pattern(mess.header, _EMAIL_HEADER_PROPERTIES))
add_headers = convert_email_header_to_pattern(mess.header, _EMAIL_ADDITIONAL_HEADERS_PROPERTIES)
if add_headers:
expressions.append(add_headers)
if mess.attachments is not None:
for attachment in mess.attachments:
expressions.append(ComparisonExpressionForElevator("=", "email-message:body_multipart[*].body_raw_ref",
convert_attachment_to_ref(attachment)))
if mess.raw_body is not None:
if not mess.raw_body.value:
warn("%s contains no value", 621, "Email raw body")
else:
warn("Email raw body not handled yet", 806)
if mess.links is not None:
warn("Email links not handled yet", 806)
if expressions:
return create_boolean_expression("AND", expressions)
_PE_FILE_HEADER_PROPERTIES = \
[["machine", "file:extensions.'windows-pebinary-ext'.file_header:machine_hex"],
["time_date_stamp", "file:extensions.'windows-pebinary-ext'.file_header.time_date_stamp"],
["number_of_sections", "file:extensions.'windows-pebinary-ext'.file_header.number_of_sections"],
["pointer_to_symbol_table", "file:extensions.'windows-pebinary-ext'.file_header.pointer_to_symbol_table"],
["number_of_symbols", "file:extensions.'windows-pebinary-ext'.file_header.number_of_symbols"],
["size_of_optional_header", "file:extensions.'windows-pebinary-ext'.file_header.size_of_optional_header"],
["characteristics", "file:extensions.'windows-pebinary-ext'.file_header.characteristics_hex"]]
_PE_SECTION_HEADER_PROPERTIES = [["name", "file:extensions.'windows-pebinary-ext'.section[*].name"],
["virtual_size", "file:extensions.'windows-pebinary-ext'.section[*].size"]]
_ARCHIVE_FILE_PROPERTIES_2_0 = [["comment", "file:extensions.'archive-ext'.comment"],
["version", "file:extensions.'archive-ext'.version"]]
_ARCHIVE_FILE_PROPERTIES_2_1 = [["comment", "file:extensions.'archive-ext'.comment"]]
def select_archive_file_properties():
if get_option_value("spec_version") == "2.1":
return _ARCHIVE_FILE_PROPERTIES_2_1
else:
return _ARCHIVE_FILE_PROPERTIES_2_0
def convert_windows_executable_file_to_pattern(f):
expressions = []
if f.headers:
file_header = f.headers.file_header
if file_header:
file_header_expressions = []
for prop_spec in _PE_FILE_HEADER_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(file_header, prop_1x) and getattr(file_header, prop_1x):
term = add_comparison_expression(getattr(file_header, prop_1x), object_path)
if term:
file_header_expressions.append(term)
if file_header.hashes is not None:
hash_expression = convert_hashes_to_pattern(file_header.hashes)
if hash_expression:
file_header_expressions.append(hash_expression)
if file_header_expressions:
expressions.append(create_boolean_expression("AND", file_header_expressions))
if f.headers.optional_header:
warn("file:extensions:'windows-pebinary-ext':optional_header is not implemented yet", 807)
if f.type_:
expressions.append(create_term("file:extensions.'windows-pebinary-ext'.pe_type",
f.type_.condition,
stix2.StringConstant(map_vocabs_to_label(f.type_.value, WINDOWS_PEBINARY))))
sections = f.sections
if sections:
sections_expressions = []
# should order matter in patterns???
for s in sections:
section_expressions = []
if s.section_header:
for prop_spec in _PE_SECTION_HEADER_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(s.section_header, prop_1x) and getattr(s.section_header, prop_1x):
term = add_comparison_expression(getattr(s.section_header, prop_1x), object_path)
if term:
section_expressions.append(term)
if s.entropy:
if s.entropy.min:
warn("Entropy.min is not supported in STIX 2.0", 424)
if s.entropy.min:
warn("Entropy.max is not supported in STIX 2.0", 424)
if s.entropy.value:
section_expressions.append(create_term("file:extensions.'windows-pebinary-ext'.section[*].entropy",
s.entropy.value.condition,
stix2.FloatConstant(s.entropy.value.value)))
if s.data_hashes:
section_expressions.append(convert_hashes_to_pattern(s.data_hashes))
if s.header_hashes:
section_expressions.append(convert_hashes_to_pattern(s.header_hashes))
if section_expressions:
sections_expressions.append(create_boolean_expression("AND", section_expressions))
if sections_expressions:
expressions.append(create_boolean_expression("AND", sections_expressions))
if f.exports:
warn("The exports property of WinExecutableFileObj is not part of STIX 2.x", 418)
expressions.append(UnconvertedTerm("WinExecutableFileObj.exports"))
if f.imports:
warn("The imports property of WinExecutableFileObj is not part of STIX 2.x", 418)
expressions.append(UnconvertedTerm("WinExecutableFileObj.imports"))
if expressions:
return create_boolean_expression("AND", expressions)
def convert_archive_file_to_pattern(f):
and_expressions = []
for prop_spec in select_archive_file_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(f, prop_1x):
term = add_comparison_expression(getattr(f, prop_1x), object_path)
if term:
and_expressions.append(term)
if and_expressions:
return create_boolean_expression("AND", and_expressions)
def convert_hashes_to_pattern(hashes):
hash_expressions = []
for h in hashes:
if getattr(h, "simple_hash_value"):
hash_value = h.simple_hash_value
else:
hash_value = h.fuzzy_hash_value
if text_type(h.type_).startswith("SHA"):
hash_type = "'" + "SHA" + "-" + text_type(h.type_)[3:] + "'"
elif text_type(h.type_) == "SSDEEP":
hash_type = text_type(h.type_).lower()
else:
hash_type = text_type(h.type_)
try:
hc = stix2.HashConstant(hash_value.value, text_type(h.type_))
except ValueError as err:
# don't cause exception if hash value isn't correct
warn(err, 626)
hc = make_constant(hash_value.value)
hash_expressions.append(create_term("file:hashes" + "." + hash_type,
hash_value.condition,
hc))
if hash_expressions:
return create_boolean_expression("OR", hash_expressions)
def convert_file_name_and_file_extension(file_name, file_extension):
if (file_extension and file_extension.value and is_equal_condition(file_name.condition) and
is_equal_condition(file_extension.condition) and file_name.value.endswith(file_extension.value)):
return create_term("file:name", file_name.condition, make_constant(file_name.value))
elif (file_name.condition == "StartsWith" and file_extension and file_extension.value and
is_equal_condition(file_extension.condition)):
return ComparisonExpressionForElevator("MATCHES", "file:name",
make_constant(
"^" + file_name.value + "*." + file_extension.value + "$"))
elif (file_name.condition == "Contains" and file_extension and file_extension.value and
is_equal_condition(file_extension.condition)):
return ComparisonExpressionForElevator("MATCHES", "file:name",
make_constant(
file_name.value + "*." + file_extension.value + "$"))
else:
warn("Unable to create a pattern for file:file_name from a File object", 620)
def convert_file_name_and_path_to_pattern(f):
file_name_path_expressions = []
if f.file_name and f.file_extension and f.file_extension.value:
file_name_path_expressions.append(convert_file_name_and_file_extension(f.file_name, f.file_extension))
elif f.file_name:
file_name_path_expressions.append(create_term("file:name",
f.file_name.condition,
make_constant(f.file_name.value)))
if f.file_path and f.file_path.value:
index = f.file_path.value.rfind("/")
if index == -1:
index = f.file_path.value.rfind("\\")
if index == -1:
warn("Ambiguous file path '%s' was not processed", 816, f.file_path.value)
else:
if not (f.file_path.value.endswith("/") or f.file_path.value.endswith("\\")):
file_name_path_expressions.append(create_term("file:name",
f.file_path.condition,
make_constant(f.file_path.value[index + 1:])))
path_string_constant = make_constant(((f.device_path.value if f.device_path else "") +
f.file_path.value[0: index]))
file_name_path_expressions.append(create_term("file:parent_directory_ref.path",
f.file_path.condition,
path_string_constant))
else:
path_string_constant = make_constant(((f.device_path.value if f.device_path else "") +
f.file_path.value[0: index]))
file_name_path_expressions.append(create_term("directory:path",
f.file_path.condition,
path_string_constant))
if f.full_path:
warn("1.x full file paths are not processed, yet", 802)
if file_name_path_expressions:
return create_boolean_expression("AND", file_name_path_expressions)
_FILE_PROPERTIES_2_0 = [["size_in_bytes", "file:size"],
["magic_number", "file:magic_number_hex"],
["created_time", "file:created"],
["modified_time", "file:modified"],
["accessed_time", "file:accessed"],
["encyption_algorithm", "file:encyption_algorithm"],
["decryption_key", "file:decryption_key"]]
# is_encrypted
_FILE_PROPERTIES_2_1 = [["size_in_bytes", "file:size"],
["magic_number", "file:magic_number_hex"],
["created_time", "file:created"],
["modified_time", "file:modified"],
["accessed_time", "file:accessed"]]
def select_file_properties():
if get_option_value("spec_version") == "2.1":
return _FILE_PROPERTIES_2_1
else:
return _FILE_PROPERTIES_2_0
def convert_file_to_pattern(f):
expressions = []
if f.hashes is not None:
hash_expression = convert_hashes_to_pattern(f.hashes)
if hash_expression:
expressions.append(hash_expression)
file_name_and_path_expression = convert_file_name_and_path_to_pattern(f)
if file_name_and_path_expression:
expressions.append(file_name_and_path_expression)
properties_expressions = []
for prop_spec in select_file_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(f, prop_1x) and getattr(f, prop_1x):
term = add_comparison_expression(getattr(f, prop_1x), object_path)
if term:
properties_expressions.append(term)
if properties_expressions:
expressions.extend(properties_expressions)
if isinstance(f, WinExecutableFile):
windows_executable_file_expression = convert_windows_executable_file_to_pattern(f)
if windows_executable_file_expression:
expressions.append(windows_executable_file_expression)
else:
warn("No WinExecutableFile properties found in %s", 613, text_type(f))
if isinstance(f, ArchiveFile):
archive_file_expressions = convert_archive_file_to_pattern(f)
if archive_file_expressions:
expressions.append(archive_file_expressions)
else:
warn("No ArchiveFile properties found in %s", 614, text_type(f))
if expressions:
return create_boolean_expression("AND", expressions)
_REGISTRY_KEY_VALUES_PROPERTIES = [["data", "windows-registry-key:values[*].data"],
["name", "windows-registry-key:values[*].name"],
["datatype", "windows-registry-key:values[*].data_type"]]
def convert_registry_key_to_pattern(reg_key):
expressions = []
if reg_key.key:
key_value_term = ""
if reg_key.hive:
if reg_key.hive.condition is None or is_equal_condition(reg_key.hive.condition):
key_value_term += reg_key.hive.value + "\\"
else:
warn("Condition %s on a hive property not handled", 812, reg_key.hive.condition)
if reg_key.key.value.startswith(reg_key.hive.value):
warn("Hive property, %s, is already a prefix of the key property, %s", 623, reg_key.hive.value,
reg_key.key.value)
key_value_term = reg_key.key.value
else:
key_value_term += reg_key.key.value
else:
key_value_term = reg_key.key.value
expressions.append(create_term("windows-registry-key:key",
reg_key.key.condition,
make_constant(key_value_term)))
if reg_key.values:
values_expressions = []
for v in reg_key.values:
value_expressions = []
for prop_spec in _REGISTRY_KEY_VALUES_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(v, prop_1x) and getattr(v, prop_1x):
term = add_comparison_expression(getattr(v, prop_1x), object_path)
if term:
value_expressions.append(term)
if value_expressions:
values_expressions.append(create_boolean_expression("OR", value_expressions))
expressions.extend(values_expressions)
if expressions:
return create_boolean_expression("AND", expressions)
def convert_image_info_to_pattern(image_info):
expressions = []
if image_info.command_line:
expressions.append(add_comparison_expression(image_info.command_line, "process:command_line"))
if image_info.current_directory:
expressions.append(add_comparison_expression(image_info.current_directory, "process:cwd"))
if expressions:
return create_boolean_expression("AND", expressions)
_PROCESS_PROPERTIES_2_0 = [
["is_hidden", "process:is_hidden"],
["pid", "process:pid"],
["name", "process:name"],
["parent_pid", "process:parent_ref.pid"],
["username", "process:creator_user_ref.user_id"],
["creation_time", "process:created"]
]
_PROCESS_PROPERTIES_2_1 = [
["is_hidden", "process:is_hidden"],
["pid", "process:pid"],
["parent_pid", "process:parent_ref.pid"],
["username", "process:creator_user_ref.user_id"],
["creation_time", "process:created"]
]
def select_process_properties():
if get_option_value("spec_version") == "2.1":
return _PROCESS_PROPERTIES_2_1
else:
return _PROCESS_PROPERTIES_2_0
def convert_process_to_pattern(process):
expressions = []
for prop_spec in select_process_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(process, prop_1x) and getattr(process, prop_1x):
term = add_comparison_expression(getattr(process, prop_1x), object_path)
if term:
expressions.append(term)
if process.image_info:
process_info = convert_image_info_to_pattern(process.image_info)
if process_info:
expressions.append(process_info)
if hasattr(process, "argument_list") and process.argument_list:
if get_option_value("spec_version") == "2.0":
argument_expressions = []
for a in process.argument_list:
argument_expressions.append(create_term("process:arguments[*]",
a.condition,
stix2.StringConstant(a.value)))
if argument_expressions:
expressions.append(create_boolean_expression("AND", argument_expressions))
else:
warn("The argument_list property of ProcessObj is not part of STIX 2.1", 418)
expressions.append(UnconvertedTerm("ProcessObj.argument_list"))
if hasattr(process, "environment_variable_list") and process.environment_variable_list:
ev_expressions = []
for ev in process.environment_variable_list:
# TODO: handle variable names with '-'
ev_expressions.append(create_term("process:environment_variables[*]." + str(ev.name),
ev.value.condition,
stix2.StringConstant(str(ev.value))))
if ev_expressions:
expressions.append(create_boolean_expression("AND", ev_expressions))
if hasattr(process, "child_pid_list") and process.child_pid_list:
child_pids_expressions = []
for cp in process.child_pid_list:
child_pids_expressions.append(create_term("process:child_refs[*].pid",
cp.condition,
stix2.IntegerConstant(cp.value)))
if child_pids_expressions:
expressions.append(create_boolean_expression("AND", child_pids_expressions))
if hasattr(process, "network_connection_list") and process.network_connection_list:
network_connection_expressions = []
for nc in process.network_connection_list:
new_pattern = convert_network_connection_to_pattern(nc)
network_connection_expressions.append(
new_pattern.collapse_reference(stix2.ObjectPath.make_object_path("process:opened_connection_refs[*]")))
if network_connection_expressions:
expressions.append(create_boolean_expression("AND", network_connection_expressions))
if isinstance(process, WinProcess):
win_process_expression = convert_windows_process_to_pattern(process)
if win_process_expression:
expressions.append(win_process_expression)
else:
warn("No WinProcess properties found in %s", 615, text_type(process))
if isinstance(process, WinService):
service_expression = convert_windows_service_to_pattern(process)
if service_expression:
expressions.append(service_expression)
else:
warn("No WinService properties found in %s", 616, text_type(process))
if expressions:
return create_boolean_expression("AND", expressions)
_WINDOWS_PROCESS_PROPERTIES = [
["aslr_enabled", "process:extensions.'windows-process-ext'.aslr_enabled"],
["dep_enabled", "process:extensions.'windows-process-ext'.dep_enabled"],
["priority", "process:extensions.'windows-process-ext'.priority"],
["security_id", "process:extensions.'windows-process-ext'.owner_sid"],
["window_title", "process:extensions.'windows-process-ext'.window_title"]
]
def convert_windows_process_to_pattern(process):
expressions = []
for prop_spec in _WINDOWS_PROCESS_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(process, prop_1x) and getattr(process, prop_1x):
term = add_comparison_expression(getattr(process, prop_1x), object_path)
if term:
expressions.append(term)
if process.handle_list:
for h in process.handle_list:
warn("Windows Handles are not a part of STIX 2.0", 420)
if process.startup_info:
warn("The startup_info property of ProcessObj is not part of STIX 2.x", 418)
expressions.append(UnconvertedTerm("ProcessObj.startup_info"))
if expressions:
return create_boolean_expression("AND", expressions)
_WINDOWS_SERVICE_PROPERTIES = \
[["service_name", "process:extensions.'windows-service-ext'.service_name"],
["display_name", "process:extensions.'windows-service-ext'.display_name"],
["startup_command_line", "process:extensions.'windows-service-ext'.startup_command_line"],
["start_type", "process:extensions.'windows-service-ext'.start_type"],
["service_type", "process:extensions.'windows-service-ext'.service_type"],
["service_status", "process:extensions.'windows-service-ext'.service_status"]]
def convert_windows_service_to_pattern(service):
expressions = []
for prop_spec in _WINDOWS_SERVICE_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(service, prop_1x) and getattr(service, prop_1x):
term = add_comparison_expression(getattr(service, prop_1x), object_path)
if term:
expressions.append(term)
if hasattr(service, "description_list") and service.description_list:
description_expressions = []
for d in service.description_list:
description_expressions.append(create_term("process:extensions.'windows-service-ext'.descriptions[*]",
d.condition,
make_constant(d.value)))
if description_expressions:
expressions.append(create_boolean_expression("OR", description_expressions))
if hasattr(service, "service_dll") and service.service_dll:
warn("The service_dll property of WinServiceObject is not part of STIX 2.x", 418)
expressions.append(UnconvertedTerm("WinServiceObject.service_dll"))
if expressions:
return create_boolean_expression("AND", expressions)
def convert_related_object_to_pattern(ro):
if ro.id_:
new_pattern = convert_object_to_pattern(ro, ro.id_)
if new_pattern:
add_to_pattern_cache(ro.id_, new_pattern)
return new_pattern
elif ro.idref:
if id_in_pattern_cache(ro.idref):
return get_pattern_from_cache(ro.idref)
else:
if id_in_observable_mappings(ro.idref):
return convert_observable_to_pattern(get_obs_from_mapping(ro.idref))
return IdrefPlaceHolder(ro.idref)
def convert_domain_name_to_pattern(domain_name, related_objects):
pattern = [
create_term("domain-name:value", domain_name.value.condition, make_constant(domain_name.value.value))]
if related_objects:
for ro in related_objects:
if ro.relationship == "Resolved_To":
new_pattern = convert_related_object_to_pattern(ro)
if new_pattern:
if isinstance(new_pattern, IdrefPlaceHolder):
pattern.append(ComparisonExpressionForElevator("=",
"domain-name:resolves_to_refs[*]",
new_pattern))
else:
pattern.append(new_pattern.collapse_reference(
stix2.ObjectPath.make_object_path("domain-name:resolves_to_refs[*]")))
else:
warn("The %s relationship involving %s is not supported in STIX 2.0", 427, ro.relationship,
identifying_info(ro))
return create_boolean_expression("AND", pattern)
def convert_mutex_to_pattern(mutex):
if mutex.name:
return create_term("mutex:name", mutex.name.condition, make_constant(mutex.name.value))
else:
return None
def convert_network_connection_to_pattern(conn):
expressions = []
if conn.layer3_protocol is not None:
expressions.append(create_term("network-traffic:protocols[*]",
conn.layer3_protocol.condition,
make_constant(conn.layer3_protocol.value.lower())))
if conn.layer4_protocol is not None:
expressions.append(create_term("network-traffic:protocols[*]",
conn.layer4_protocol.condition,
make_constant(conn.layer4_protocol.value.lower())))
if conn.layer7_protocol is not None:
expressions.append(create_term("network-traffic:protocols[*]",
conn.layer7_protocol.condition,
make_constant(conn.layer7_protocol.value.lower())))
if conn.source_socket_address is not None:
if conn.source_socket_address.port is not None:
if conn.source_socket_address.port.port_value is not None:
expressions.append(create_term("network-traffic:src_port",
conn.source_socket_address.port.port_value.condition,
stix2.IntegerConstant(int(conn.source_socket_address.port.port_value))))
if conn.source_socket_address.port.layer4_protocol is not None:
expressions.append(
create_term("network-traffic:protocols[*]",
conn.source_socket_address.port.layer4_protocol.condition,
make_constant(conn.source_socket_address.port.layer4_protocol.value.lower())))
if conn.source_socket_address.ip_address is not None:
expressions.append(
create_term("network-traffic:src_ref.value",
conn.source_socket_address.ip_address.address_value.condition,
make_constant(conn.source_socket_address.ip_address.address_value.value)))
elif conn.source_socket_address.hostname is not None:
if conn.source_socket_address.hostname.is_domain_name and conn.source_socket_address.hostname.hostname_value is not None:
expressions.append(
create_term("network-traffic:src_ref.value",
conn.source_socket_address.hostname.condition,
make_constant(conn.source_socket_address.hostname.hostname_value)))
elif (conn.source_socket_address.hostname.naming_system is not None and
any(x.value == "DNS" for x in conn.source_socket_address.hostname.naming_system)):
expressions.append(
create_term("network-traffic:src_ref.value",
conn.source_socket_address.hostname.condition,
make_constant(conn.source_socket_address.hostname.hostname_value)))
if conn.destination_socket_address is not None:
if conn.destination_socket_address.port is not None:
if conn.destination_socket_address.port.port_value is not None:
expressions.append(
create_term("network-traffic:dst_port",
conn.destination_socket_address.port.port_value.condition,
stix2.IntegerConstant(int(conn.destination_socket_address.port.port_value))))
if conn.destination_socket_address.port.layer4_protocol is not None:
expressions.append(
create_term("network-traffic:protocols[*]",
conn.destination_socket_address.port.layer4_protocol.condition,
make_constant(
conn.destination_socket_address.port.layer4_protocol.value.lower())))
if conn.destination_socket_address.ip_address is not None:
expressions.append(
create_term("network-traffic:dst_ref.value",
conn.destination_socket_address.ip_address.address_value.condition,
make_constant(conn.destination_socket_address.ip_address.address_value.value)))
elif conn.destination_socket_address.hostname is not None:
hostname = conn.destination_socket_address.hostname
if hostname.is_domain_name and hostname.hostname_value is not None:
expressions.append(
create_term("network-traffic:dst_ref.value",
conn.destination_socket_address.hostname.condition,
make_constant(conn.destination_socket_address.hostname.hostname_value)))
elif (conn.destination_socket_address.hostname.naming_system is not None and
any(x.value == "DNS" for x in conn.destination_socket_address.hostname.naming_system)):
expressions.append(
create_term("network-traffic:dst_ref.value",
conn.destination_socket_address.hostname.condition,
make_constant(conn.destination_socket_address.hostname.hostname_value)))
if conn.layer7_connections is not None:
if conn.layer7_connections.http_session is not None:
extension_expressions = convert_http_session_to_pattern(conn.layer7_connections.http_session)
if extension_expressions:
expressions.append(extension_expressions)
return create_boolean_expression("AND", expressions)
def convert_http_client_request_to_pattern(http_request):
expressions = []
if http_request.http_request_line is not None:
if http_request.http_request_line.http_method is not None:
term = add_comparison_expression(http_request.http_request_line.http_method,
"network-traffic:extensions.'http-request-ext'.request_method")
if term:
expressions.append(term)
if http_request.http_request_line.version is not None:
term = add_comparison_expression(http_request.http_request_line.version,
"network-traffic:extensions.'http-request-ext'.request_version")
if term:
expressions.append(term)
if http_request.http_request_header is not None:
if http_request.http_request_header.parsed_header is not None:
header = http_request.http_request_header.parsed_header
for prop_spec in _NETWORK_CONNECTION_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(header, prop_1x) and getattr(header, prop_1x):
value = getattr(header, prop_1x)
# handle non-String properties
if isinstance(value, Address):
value = getattr(value, "address_value")
elif isinstance(value, HostField):
value = getattr(value, "domain_name").value
elif isinstance(value, URI):
value = value.value
term = add_comparison_expression(value, object_path)
if term:
expressions.append(term)
return create_boolean_expression("AND", expressions)
def convert_http_network_connection_extension(http):
if http.http_client_request is not None:
return convert_http_client_request_to_pattern(http.http_client_request)
_NETWORK_CONNECTION_PROPERTIES = [
["accept", "network-traffic:extensions.'http-request-ext'.request_header.Accept"],
["accept_charset", "network-traffic:extensions.'http-request-ext'.request_header.'Accept-Charset'"],
["accept_language", "network-traffic:extensions.'http-request-ext'.request_header.'Accept-Language'"],
["accept_datetime", "network-traffic:extensions.'http-request-ext'.request_header.'Accept-Datetime'"],
["accept_encoding", "network-traffic:extensions.'http-request-ext'.request_header.'Accept-Encoding'"],
["authorization", "network-traffic:extensions.'http-request-ext'.request_header.Authorization"],
["cache_control", "network-traffic:extensions.'http-request-ext'.request_header.'Cache-Control'"],
["connection", "network-traffic:extensions.'http-request-ext'.request_header.Connection"],
["cookie", "network-traffic:extensions.'http-request-ext'.request_header.Cookie"],
["content_length", "network-traffic:extensions.'http-request-ext'.request_header.'Content-Length'"],
["content_md5", "network-traffic:extensions.'http-request-ext'.request_header.'Content-MD5'"],
["content_type", "network-traffic:extensions.'http-request-ext'.request_header.'Content-Type'"],
["date", "network-traffic:extensions.'http-request-ext'.request_header.Date"],
["expect", "network-traffic:extensions.'http-request-ext'.request_header.Expect"],
["from_", "network-traffic:extensions.'http-request-ext'.request_header.From"],
["host", "network-traffic:extensions.'http-request-ext'.request_header.Host"],
["if_match", "network-traffic:extensions.'http-request-ext'.request_header.'If-Match'"],
["if_modified_since", "network-traffic:extensions.'http-request-ext'.request_header.'If-Modified-Since'"],
["if_none_match", "network-traffic:extensions.'http-request-ext'.request_header.'If-None-Match'"],
["if_range", "network-traffic:extensions.'http-request-ext'.request_header.'If-Range'"],
["if_unmodified_since", "network-traffic:extensions.'http-request-ext'.request_header.'If-Unmodified-Since'"],
["max_forwards", "network-traffic:extensions.'http-request-ext'.request_header.'Max-Forwards'"],
["pragma", "network-traffic:extensions.'http-request-ext'.request_header.Pragma"],
["proxy_authorization", "network-traffic:extensions.'http-request-ext'.request_header.'Proxy-Authorization'"],
["range", "network-traffic:extensions.'http-request-ext'.request_header.Range"],
["referer", "network-traffic:extensions.'http-request-ext'.request_header.Referer"],
["te", "network-traffic:extensions.'http-request-ext'.request_header.TE"],
["user_agent", "network-traffic:extensions.'http-request-ext'.request_header.'User-Agent'"],
["via", "network-traffic:extensions.'http-request-ext'.request_header.Via"],
["warning", "network-traffic:extensions.'http-request-ext'.request_header.Warning"],
["dnt", "network-traffic:extensions.'http-request-ext'.request_header.DNT"],
["x_requested_with", "network-traffic:extensions.'http-request-ext'.request_header.'X-Requested-With'"],
["x_forwarded_for", "network-traffic:extensions.'http-request-ext'.request_header.'X-Forwarded-For'"],
["x_att_deviceid", "network-traffic:extensions.'http-request-ext'.request_header.'X-ATT-DeviceId'"],
["x_wap_profile", "network-traffic:extensions.'http-request-ext'.request_header.'X-Wap-Profile'"],
]
def convert_network_packet_to_pattern(packet):
if packet.internet_layer:
internet_layer = packet.internet_layer
if internet_layer.ipv4 or internet_layer.ipv6:
warn("Internet_Layer/IP_Packet content not supported in STIX 2.0", 424)
else:
if internet_layer.icmpv4:
icmp_header = internet_layer.icmpv4.icmpv4_header
elif internet_layer.icmpv6:
icmp_header = internet_layer.icmpv6.icmpv6_header
else:
return None
expressions = []
if icmp_header.type_:
expressions.append(create_term("network-traffic:extensions.'icmp-ext'.icmp_type_hex",
icmp_header.type_.condition,
stix2.HexConstant(str(icmp_header.type_))))
if icmp_header.code:
expressions.append(create_term("network-traffic:extensions.'icmp-ext'.icmp_type_code",
icmp_header.code.condition,
stix2.HexConstant(str(icmp_header.code))))
return create_boolean_expression("AND", expressions)
def convert_http_session_to_pattern(session):
if session.http_request_response:
requests, responses = split_into_requests_and_responses(session.http_request_response)
if len(responses) != 0:
warn("HTTPServerResponse type is not supported in STIX 2.0", 429)
if len(requests) >= 1:
expression = convert_http_client_request_to_pattern(requests[0])
if len(requests) > 1:
warn("Only HTTP_Request_Response used for http-request-ext, using first value", 512)
return expression
def convert_socket_options_to_pattern(options):
expressions = []
for prop_name in SOCKET_OPTIONS:
prop = getattr(options, prop_name)
if prop:
expressions.append(create_term("network-traffic:extensions.'socket-ext'.options." + prop_name.upper(),
"Equals",
prop))
return create_boolean_expression("AND", expressions)
_SOCKET_MAP = {
"is_blocking": "network-traffic:extensions.'socket-ext'.is_blocking",
"is_listening": "network-traffic:extensions.'socket-ext'.is_listening",
"type_": "network-traffic:extensions.'socket-ext'.socket_type",
"domain": "network-traffic:extensions.'socket-ext'.socket_type",
"socket_descriptor": "network-traffic:extensions.'socket-ext'.socket_descriptor"
}
def convert_network_socket_to_pattern(socket):
expressions = []
for prop_spec in _SOCKET_MAP:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(socket, prop_1x) and getattr(socket, prop_1x):
term = add_comparison_expression(getattr(socket, prop_1x), object_path)
if term:
expressions.append(term)
if socket.address_family:
if socket.address_family in ADDRESS_FAMILY_ENUMERATION:
expressions.append(add_comparison_expression(socket.address_family,
"network-traffic:extensions.'socket-ext'.address_family"))
else:
warn("%s in is not a member of the %s enumeration", 627, socket.address_family, "address family")
if socket.options:
expressions.append(convert_socket_options_to_pattern(socket.options))
if socket.local_address:
warn("Network_Socket.local_address content not supported in STIX 2.0", 424)
if socket.remote_address:
warn("Network_Socket.remote_address content not supported in STIX 2.0", 424)
if socket.protocol:
expressions.append(add_comparison_expression(socket.protocol,
"network-traffic:protocols[*]"))
return create_boolean_expression("AND", expressions)
####################################################################################################################
def convert_observable_composition_to_pattern(obs_comp):
expressions = []
for obs in obs_comp.observables:
term = convert_observable_to_pattern(obs)
if term:
expressions.append(term)
if expressions:
return create_boolean_expression(obs_comp.operator, expressions)
else:
return ""
def convert_object_to_pattern(obj, obs_id):
related_objects = obj.related_objects
prop = obj.properties
expression = None
if prop:
if isinstance(prop, Address):
expression = convert_address_to_pattern(prop)
elif isinstance(prop, URI):
expression = convert_uri_to_pattern(prop)
elif isinstance(prop, EmailMessage):
expression = convert_email_message_to_pattern(prop)
elif isinstance(prop, File):
expression = convert_file_to_pattern(prop)
elif isinstance(prop, WinRegistryKey):
expression = convert_registry_key_to_pattern(prop)
elif isinstance(prop, Process):
expression = convert_process_to_pattern(prop)
elif isinstance(prop, DomainName):
expression = convert_domain_name_to_pattern(prop, related_objects)
elif isinstance(prop, Mutex):
expression = convert_mutex_to_pattern(prop)
elif isinstance(prop, NetworkConnection):
expression = convert_network_connection_to_pattern(prop)
elif isinstance(prop, Account):
expression = convert_account_to_pattern(prop)
elif isinstance(prop, HTTPSession):
expression = convert_http_session_to_pattern(prop)
elif isinstance(prop, NetworkPacket):
expression = convert_network_packet_to_pattern(prop)
elif isinstance(prop, NetworkSocket):
expression = convert_network_socket_to_pattern(prop)
else:
warn("%s found in %s cannot be converted to a pattern, yet.", 808, text_type(obj.properties), obs_id)
expression = UnconvertedTerm(obs_id)
if prop.custom_properties is not None:
object_path_root = convert_cybox_class_name_to_object_path_root_name(prop)
if object_path_root:
if expression:
expression = create_boolean_expression("AND", [expression,
convert_custom_properties(prop.custom_properties,
object_path_root)])
else:
expression = convert_custom_properties(prop.custom_properties, object_path_root)
if not expression:
warn("No pattern term was created from %s", 422, obs_id)
expression = UnconvertedTerm(obs_id)
elif obj.id_:
add_object_id_value(obj.id_, obs_id)
return expression
def match_1x_id_with_20_id(id_1x, id_20):
id_1x_split = id_1x.split("-", 1)
id_20_split = id_20.split("--")
return id_1x_split[1] == id_20_split[1]
def find_definition(idref, sdos):
for obs in sdos:
if match_1x_id_with_20_id(idref, obs["id"]):
info("Found definition for %s", 204, idref)
return obs
# warn (idref + " cannot be resolved")
return None
def negate_expression(obs):
return hasattr(obs, "negate") and obs.negate
def convert_observable_to_pattern(obs):
try:
set_dynamic_variable("current_observable", obs)
if negate_expression(obs):
warn("Negation of %s is not handled yet", 810, obs.id_)
return convert_observable_to_pattern_without_negate(obs)
finally:
pop_dynamic_variable("current_observable")
def convert_observable_to_pattern_without_negate(obs):
if obs.observable_composition is not None:
pattern = convert_observable_composition_to_pattern(obs.observable_composition)
if pattern and obs.id_:
add_to_pattern_cache(obs.id_, pattern)
return pattern
elif obs.object_ is not None:
pattern = convert_object_to_pattern(obs.object_, obs.id_)
if pattern:
add_to_pattern_cache(obs.id_, pattern)
if obs.object_.related_objects:
related_patterns = []
for o in obs.object_.related_objects:
# save pattern for later use
if o.id_ and not id_in_pattern_cache(o.id_):
new_pattern = convert_object_to_pattern(o, o.id_)
if new_pattern:
related_patterns.append(new_pattern)
add_to_pattern_cache(o.id_, new_pattern)
if pattern:
related_patterns.append(pattern)
return create_boolean_expression("AND", related_patterns)
else:
return pattern
elif obs.idref is not None:
if id_in_pattern_cache(obs.idref):
return get_pattern_from_cache(obs.idref)
else:
# resolve now if possible, and remove from observed_data
if id_in_observable_mappings(obs.idref):
return convert_observable_to_pattern(get_obs_from_mapping(obs.idref))
return IdrefPlaceHolder(obs.idref)
# patterns can contain idrefs which might need to be resolved because the order in which the ids and idrefs appear
def interatively_resolve_placeholder_refs():
if pattern_cache_is_empty():
return
done = False
while not done:
# collect all of the fully resolved idrefs
fully_resolved_idrefs = []
for idref, expr in get_items_from_pattern_cache():
if expr and not expr.contains_placeholder():
# no PLACEHOLDER idrefs found in the expr, means this idref is fully resolved
fully_resolved_idrefs.append(idref)
# replace only fully resolved idrefs
change_made = False
for fr_idref in fully_resolved_idrefs:
for idref, expr in get_items_from_pattern_cache():
if expr:
change_made, expr = expr.replace_placeholder_with_idref_pattern(fr_idref)
# a change will be made, which could introduce a new placeholder id into the expr
if change_made:
add_to_pattern_cache(idref, expr) # PATTERN_CACHE[idref] = expr
done = not change_made
def is_placeholder(thing):
return thing.index("PLACEHOLDER") != -1
def fix_pattern(pattern):
if not pattern_cache_is_empty():
# info(text_type(PATTERN_CACHE))
# info("pattern is: " + pattern)
if pattern and pattern.contains_placeholder:
for idref in get_ids_from_pattern_cache():
pattern.replace_placeholder_with_idref_pattern(idref)
return pattern
def convert_indicator_to_pattern(ind):
try:
set_dynamic_variable("current_indicator", ind)
if ind.negate:
warn("Negation of %s is not handled yet", 810, ind.id_)
return convert_indicator_to_pattern_without_negate(ind)
finally:
pop_dynamic_variable("current_indicator")
def convert_indicator_to_pattern_without_negate(ind):
if ind.composite_indicator_expression is not None:
pattern = convert_indicator_composition_to_pattern(ind.composite_indicator_expression)
if pattern and ind.id_:
add_to_pattern_cache(ind.id_, pattern)
return pattern
elif ind.observable is not None:
pattern = convert_observable_to_pattern(ind.observable)
if pattern:
add_to_pattern_cache(ind.id_, pattern)
return pattern
elif ind.idref is not None:
if id_in_pattern_cache(ind.idref):
return get_pattern_from_cache(ind.idref)
else:
# resolve now if possible, and remove from observed_data
if id_in_observable_mappings(ind.idref):
return convert_observable_to_pattern(get_obs_from_mapping(ind.idref))
return IdrefPlaceHolder(ind.idref)
def convert_indicator_composition_to_pattern(ind_comp):
expressions = []
for ind in ind_comp.indicators:
term = convert_indicator_to_pattern(ind)
if term:
expressions.append(term)
else:
warn("No term was yielded for %s", 422, ind.id_ or ind.idref)
if expressions:
return create_boolean_expression(ind_comp.operator, expressions)
else:
return ""
def remove_pattern_objects(bundle_instance):
all_new_ids_with_patterns = []
for old_id in get_ids_from_pattern_cache():
new_id = get_id_value(old_id)
if new_id and len(new_id) == 1:
all_new_ids_with_patterns.append(new_id[0])
if not KEEP_OBSERVABLE_DATA_USED_IN_PATTERNS:
remaining_objects = []
for obj in bundle_instance["objects"]:
if obj["type"] != "observed-data" or obj["id"] not in all_new_ids_with_patterns:
remaining_objects.append(obj)
else:
warn("%s is used as a pattern, therefore it is not included as an observed_data instance", 423,
obj["id"])
bundle_instance["objects"] = remaining_objects
if not KEEP_OBSERVABLE_DATA_USED_IN_PATTERNS:
for obj in bundle_instance["objects"]:
if obj["type"] == "report":
remaining_object_refs = []
if "object_refs" in obj:
for ident in obj["object_refs"]:
if not ident.startswith("observed-data") or ident not in all_new_ids_with_patterns:
remaining_object_refs.append(ident)
obj["object_refs"] = remaining_object_refs
# TODO: only remove indicators that were involved ONLY as sub-indicators within composite indicator expressions
# if not KEEP_INDICATORS_USED_IN_COMPOSITE_INDICATOR_EXPRESSION and "indicators" in bundle_instance:
# remaining_indicators = []
# for ind in bundle_instance["indicators"]:
# if ind["id"] not in all_new_ids_with_patterns:
# remaining_indicators.append(ind)
# bundle_instance["indicators"] = remaining_indicators
|
bsd-3-clause
| 1,290,568,712,240,709,000
| 43.676338
| 142
| 0.613797
| false
| 4.045113
| false
| false
| false
|
Osthanes/appscan_static_analyzer
|
appscan_check.py
|
1
|
36174
|
#!/usr/bin/python
#***************************************************************************
# Copyright 2015 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
#***************************************************************************
import json
import logging
import logging.handlers
import os
import os.path
import sys
import time
import timeit
from datetime import datetime
from subprocess import call, Popen, PIPE
import python_utils
APP_SECURITY_SERVICE='Application Security on Cloud'
DEFAULT_SERVICE=APP_SECURITY_SERVICE
DEFAULT_SERVICE_PLAN="free"
DEFAULT_SERVICE_NAME=DEFAULT_SERVICE
DEFAULT_SCANNAME="staticscan"
DEFAULT_OLD_SCANS_TO_KEEP="5"
DEFAULT_OLD_SCANS_TO_KEEP_INT=5
# time to sleep between checks when waiting on pending jobs, in seconds
SLEEP_TIME=15
# check cli args, set globals appropriately
def parse_args ():
parsed_args = {}
parsed_args['loginonly'] = False
parsed_args['forcecleanup'] = False
parsed_args['checkstate'] = False
parsed_args['debug'] = False
parsed_args['help'] = False
for arg in sys.argv:
if arg == "--loginonly":
# only login, no scanning or submission
parsed_args['loginonly'] = True
if arg == "--forcecleanup":
# cleanup/cancel all complete jobs, and delete irx files
parsed_args['forcecleanup'] = True
if arg == "--checkstate":
# just check state of existing jobs, don't scan or submit
# any new ones
parsed_args['checkstate'] = True
if arg == "--debug":
# enable debug mode, can also be done with python_utils.DEBUG env var
parsed_args['debug'] = True
python_utils.DEBUG = "1"
if arg == "--help":
# just print help and return
parsed_args['help'] = True
return parsed_args
# print a quick usage/help statement
def print_help ():
print "usage: appscan_check.py [options]"
print
print "\toptions:"
print "\t --loginonly : get credentials and login to appscan only"
print "\t --forcecleanup : on exit, force removal of pending jobs from this run"
print "\t --checkstate : check state of existing job(s), no new submission"
print "\t --debug : get additional debug output"
print "\t --help : print this help message and exit"
print
# create a template for a current scan. this will be in the format
# "<scanname>-<version>-" where scanname comes from env var
# 'SUBMISSION_NAME', and version comes from env var 'APPLICATION_VERSION'
def get_scanname_template (include_version=True):
# check the env for name of the scan, else use default
if os.environ.get('SUBMISSION_NAME'):
scanname=os.environ.get('SUBMISSION_NAME')
elif os.environ.get('IDS_PROJECT_NAME'):
scanname=os.environ.get('IDS_PROJECT_NAME').replace(" | ", "-")
else:
scanname=DEFAULT_SCANNAME
if include_version:
# if we have an application version, append it to the scanname
if os.environ.get('APPLICATION_VERSION'):
scanname = scanname + "-" + os.environ.get('APPLICATION_VERSION')
scanname = scanname + "-"
return scanname
# given userid and password, attempt to authenticate to appscan for
# future calls
def appscan_login (userid, password):
proc = Popen(["appscan.sh login -u " + userid + " -P " + password + ""],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if not "Authenticated successfully." in out:
raise Exception("Unable to login to Static Analysis service")
# callout to appscan to prepare a current irx file, return a set of
# the files created by the prepare
def appscan_prepare ():
# sadly, prepare doesn't tell us what file it created, so find
# out by a list compare before/after
oldIrxFiles = []
for file in os.listdir("."):
if file.endswith(".irx"):
oldIrxFiles.append(file)
# clean up the appscan client log so we can dump it on error if needed
# and only see the error from this call
logfileName = None
appscanDir = os.environ.get('APPSCAN_INSTALL_DIR')
if appscanDir:
logfileName = appscanDir+"/logs/client.log"
if os.path.isfile( logfileName ):
os.remove( logfileName )
proc = Popen(["appscan.sh prepare"],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if not "IRX file generation successful" in out:
if "An IRX file was created, but it may be incomplete" in err:
# some jar/war/ear files were not scannable, but some were.
# attempt the submission
python_utils.LOGGER.warning("Not all files could be scanned, but the scan has been submitted for those which were")
else:
if python_utils.DEBUG:
call(["grep -H \".*\" logs/*.log"], shell=True, cwd=appscanDir)
raise Exception("Unable to prepare code for analysis by Static Analysis service: " +
err)
# what files are there now?
newIrxFiles = []
for file in os.listdir("."):
if file.endswith(".irx"):
newIrxFiles.append(file)
# which files are new?
newIrxFiles = set(newIrxFiles).difference(oldIrxFiles)
logMessage = "Generated scans as file(s):"
for file in newIrxFiles:
logMessage = logMessage + "\n\t" + file
python_utils.LOGGER.info(logMessage)
return newIrxFiles
# submit a created irx file to appscan for analysis
def appscan_submit (filelist):
if filelist==None:
raise Exception("No files to analyze")
scanlist = []
index = 0
for filename in filelist:
submit_scanname = get_scanname_template() + str(index)
proc = Popen(["appscan.sh queue_analysis -f " + filename +
" -n " + submit_scanname],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
transf_found = False
for line in out.splitlines() :
python_utils.LOGGER.debug("Submit response line: " + line)
if "100% transferred" in line:
# done transferring
transf_found = True
elif not transf_found:
# not done transferring yet
continue
elif line:
# done, if line isn't empty, is an id
scanlist.append(line)
python_utils.LOGGER.info("Job for file " + filename + " was submitted as scan " + submit_scanname + " and assigned id " + line)
else:
# empty line, skip it
continue
if err:
python_utils.LOGGER.warning("Submit error response: " + str(err))
index = index + 1
return scanlist, err
# get appscan list of current jobs
def appscan_list ():
proc = Popen(["appscan.sh list"],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
scanlist = []
for line in out.splitlines() :
if "No analysis jobs" in line:
# no jobs, return empty list
python_utils.LOGGER.debug("No analysis jobs found")
return []
elif line:
# done, if line isn't empty, is an id
scanlist.append(line)
else:
# empty line, skip it
continue
python_utils.LOGGER.debug("Analysis jobs found: " + str(scanlist))
return scanlist
# translate a job state to a pretty name
# CLI now returns the string, keeping in case needed later and to have a list of possible stages
def get_state_name (state):
return {
0 : "Pending",
1 : "Starting",
2 : "Running",
3 : "FinishedRunning",
4 : "FinishedRunningWithErrors",
5 : "PendingSupport",
6 : "Ready",
7 : "ReadyIncomplete",
8 : "FailedToScan",
9 : "ManuallyStopped",
10 : "None",
11 : "Initiating",
12 : "MissingConfiguration",
13 : "PossibleMissingConfiguration"
}.get(state, "Unknown")
# translate a job state from a name to a number
def get_state_num (state):
val = {
"pending" : 0,
"starting" : 1,
"running" : 2,
"finishedrunning" : 3,
"finishedrunningwitherrors" : 4,
"pendingsupport" : 5,
"ready" : 6,
"readyincomplete" : 7,
"failedtoscan" : 8,
"manuallystopped" : 9,
"none" : 10,
"initiating" : 11,
"missingconfiguration" : 12,
"possiblemissingconfiguration" : 13
}.get(state.lower().strip(), 14)
python_utils.LOGGER.debug("Getting number for state: \""+str(state)+"\" ("+str(val)+")")
return val
# given a state, is the job completed
def get_state_completed (state):
return {
0 : False,
1 : False,
2 : False,
3 : True,
4 : True,
5 : False,
6 : True,
7 : True,
8 : True,
9 : True,
10 : True,
11 : False,
12 : True,
13 : True
}.get(get_state_num(state), True)
# given a state, was it completed successfully
def get_state_successful (state):
return {
0 : False,
1 : False,
2 : False,
3 : True,
4 : False,
5 : False,
6 : True,
7 : False,
8 : False,
9 : False,
10 : False,
11 : False,
12 : False,
13 : False
}.get(get_state_num(state), False)
# get status of a given job
def appscan_status (jobid):
if jobid == None:
raise Exception("No jobid to check status")
proc = Popen(["appscan.sh status -i " + str(jobid)],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if "request is invalid" in err:
if python_utils.DEBUG:
python_utils.LOGGER.debug("error getting status: " + str(err))
raise Exception("Invalid jobid")
retval = str(out)
return retval
# cancel an appscan job
def appscan_cancel (jobid):
if jobid == None:
return
proc = Popen(["appscan.sh cancel -i " + str(jobid)],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
# parse a key=value line, return value
def parse_key_eq_val (line):
if line == None:
return None
eqIndex = line.find("=");
if eqIndex != -1:
return line[eqIndex+1:]
else:
return None
# extended info on a current appscan job. this comes back in a form
# similar to:
#NLowIssues=0
#ReadStatus=2
#NHighIssues=0
#Name=appscan.zip
#ScanEndTime=2014-11-20T13:56:04.497Z
#Progress=0
#RemainingFreeRescanMinutes=0
#ParentJobId=00000000-0000-0000-0000-000000000000
#EnableMailNotifications=false
#JobStatus=6
#NInfoIssues=0
#JobId=9b344fc7-bc70-e411-b922-005056924f9b
#NIssuesFound=0
#CreatedAt=2014-11-20T13:54:49.597Z
#UserMessage=Scan completed successfully. The report is ready.
#NMediumIssues=0
#Result=1
#
# parse it and return useful parts. in particular, returns
# a dict containing fields for "NLowIssues", "ReadStatus", et al
# per the list above
def appscan_info (jobid):
# setup default (empty) return
return_info = {}
return_info['NLowIssues'] = 0
return_info['ReadStatus'] = 0
return_info['NHighIssues'] = 0
return_info['Name'] = ""
return_info['ScanEndTime'] = None
return_info['Progress'] = 0
return_info['RemainingFreeRescanMinutes'] = 0
return_info['ParentJobId'] = ""
return_info['EnableMailNotifications'] = False
return_info['JobStatus'] = 0
return_info['NInfoIssues'] = 0
return_info['JobId'] = ""
return_info['NIssuesFound'] = 0
return_info['CreatedAt'] = None
return_info['UserMessage'] = ""
return_info['NMediumIssues'] = 0
return_info['Result'] = 0
if jobid == None:
return return_info
command = "appscan.sh info -i " + str(jobid)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
for line in out.splitlines() :
if "NLowIssues=" in line:
# number of low severity issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NLowIssues'] = int(tmpstr)
except ValueError:
return_info['NLowIssues']= 0
elif "NMediumIssues=" in line:
# number of medium severity issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NMediumIssues'] = int(tmpstr)
except ValueError:
return_info['NMediumIssues'] = 0
elif "NHighIssues=" in line:
# number of high severity issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NHighIssues'] = int(tmpstr)
except ValueError:
return_info['NHighIssues'] = 0
elif "NInfoIssues=" in line:
# number of info severity issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NInfoIssues'] = int(tmpstr)
except ValueError:
return_info['NInfoIssues'] = 0
elif "NIssuesFound=" in line:
# total number of issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NIssuesFound'] = int(tmpstr)
except ValueError:
return_info['NIssuesFound'] = 0
elif "Progress=" in line:
# current scan progress (0-100)
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['Progress'] = int(tmpstr)
except ValueError:
return_info['Progress'] = 0
elif "RemainingFreeRescanMinutes=" in line:
# what the name says
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['RemainingFreeRescanMinutes'] = int(tmpstr)
except ValueError:
return_info['RemainingFreeRescanMinutes'] = 0
elif "JobStatus=" in line:
# current job status
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['JobStatus'] = int(tmpstr)
except ValueError:
return_info['JobStatus'] = 0
elif "ReadStatus=" in line:
# not sure what this is
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['ReadStatus'] = int(tmpstr)
except ValueError:
return_info['ReadStatus'] = 0
elif "Result=" in line:
# final return code
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['Result'] = int(tmpstr)
except ValueError:
return_info['Result'] = 0
elif "ScanEndTime=" in line:
# timestamp when this scan completed
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['ScanEndTime'] = datetime.strptime(tmpstr, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return_info['ScanEndTime'] = None
elif "CreatedAt=" in line:
# timestamp when this job was created
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['CreatedAt'] = datetime.strptime(tmpstr, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return_info['CreatedAt'] = None
elif "Name=" in line:
# job name
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
return_info['Name'] = tmpstr
elif "JobId=" in line:
# job ID
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
return_info['JobId'] = tmpstr
elif "ParentJobId=" in line:
# parent job ID
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
return_info['ParentJobId'] = tmpstr
elif "UserMessage=" in line:
# user displayable message, current job state
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
return_info['UserMessage'] = tmpstr
elif "EnableMailNotifications=" in line:
# are email notifications setup (doesn't matter, we don't use it)
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
if tmpstr.lower() in ("yes", "true"):
return_info['EnableMailNotifications'] = True
else:
return_info['EnableMailNotifications'] = False
return return_info
# get the result file for a given job
def appscan_get_result (jobid, scan_name):
if jobid == None:
raise Exception("No jobid to get results")
# App name might have a space.
scan_name = scan_name.replace(" ", "-");
# Get the appscan zip file
proc = Popen(["appscan.sh get_result -i " + str(jobid) + " -d appscan-" + str(scan_name) + ".zip -t zip"],
shell=True, stdout=PIPE, stderr=PIPE, cwd=os.environ.get('EXT_DIR'))
out, err = proc.communicate();
print "Out = " + out
print "Err = " + err
# get the result file for a given job
def save_job_result (scan_name, job_result):
# App name might have a space.
scan_name = scan_name.replace(" ", "-");
# Store the job result summary
with open(os.environ.get('EXT_DIR') + "/appscan-" + str(scan_name) + ".json", 'w') as outfile:
json.dump(job_result, outfile, sort_keys = True)
# get the result file for a given job
def upload_results_to_dra ():
proc = Popen(["dra.sh"],
shell=True, stdout=PIPE, stderr=PIPE, cwd=os.environ.get('EXT_DIR'))
out, err = proc.communicate();
print "Out = " + out
print "Err = " + err
# if the job we would run is already up (and either pending or complete),
# we just want to get state (and wait for it if needed), not create a whole
# new submission. for the key, we use the job name, compared to the
# name template as per get_scanname_template()
def check_for_existing_job ( ignore_older_jobs = True):
alljobs = appscan_list()
if alljobs == None:
# no jobs, ours can't be there
return None
# get the name we're looking for
job_name = get_scanname_template( include_version = ignore_older_jobs )
joblist = []
found = False
for jobid in alljobs:
results = appscan_info(jobid)
python_utils.LOGGER.debug("Results for "+jobid+": "+ str(results))
if results["Name"].startswith(job_name):
joblist.append(jobid)
found = True
if found:
return joblist
else:
return None
# don't want to have too many old copies of the job hanging out, it
# makes a mess and is hard to read. prune old copies here
def cleanup_old_jobs ():
# see how many copies we're going to keep
try:
count_to_keep = int(os.getenv('OLD_SCANS_TO_KEEP', DEFAULT_OLD_SCANS_TO_KEEP))
except ValueError:
count_to_keep = DEFAULT_OLD_SCANS_TO_KEEP_INT
# if the count to keep is 0 or negative, keep all copies
if count_to_keep < 1:
return
joblist = check_for_existing_job( ignore_older_jobs = False )
if joblist == None or len(joblist) <= count_to_keep:
# related job count < number of jobs too keep, do nothing
return
# too many jobs! remove the oldest ones (cancel if necessary)
if python_utils.DEBUG:
python_utils.LOGGER.debug("Found " + str(len(joblist)) + " jobs pending with limit " + str(count_to_keep))
# make a sorted list of these jobs (yes, this is O(n**2) algorithm, but
# this should always be a fairly short list of scans)
s_jobs = []
for job in joblist:
results = appscan_info(job)
# if no results or time, this is not a valid job, skip it
if (results['CreatedAt'] == None):
continue
# put it in the right spot in the list
i = 0
while i < len(s_jobs):
if results['CreatedAt'] > s_jobs[i]['CreatedAt']:
# found right place
if python_utils.DEBUG:
python_utils.LOGGER.debug("Insert job " + str(results['Name']) + " at index " + str(i) + " for timestamp " + str(results['CreatedAt']))
s_jobs.insert(i, results)
break
i += 1
if i==len(s_jobs):
# right place is the end
if python_utils.DEBUG:
python_utils.LOGGER.debug("Append job " + str(results['Name']) + " at index " + str(i) + " for timestamp " + str(results['CreatedAt']))
s_jobs.append(results)
# now cleanup all jobs after the 'n' we're supposed to keep
for index, res in enumerate(s_jobs):
if index<count_to_keep:
if python_utils.DEBUG:
python_utils.LOGGER.debug("keeping: " + str(index) + " \"" + res['Name'] + "\" : " + str(res['JobId']))
else:
if python_utils.DEBUG:
python_utils.LOGGER.debug("cleaning: " + str(index) + " \"" + res['Name'] + "\" : " + str(res['JobId']))
appscan_cancel(res['JobId'])
# and we're done
# wait for a given set of scans to complete and, if successful,
# download the results
def wait_for_scans (joblist):
# create array of the jon results in json format
jobResults = []
# were all jobs completed on return
all_jobs_complete = True
# number of high sev issues in completed jobs
high_issue_count = 0
med_issue_count=0
python_utils.LOGGER.debug("Waiting for joblist: "+str(joblist))
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
for jobid in joblist:
try:
while True:
state = appscan_status(jobid)
python_utils.LOGGER.info("Job " + str(jobid) + " in state " + state)
if get_state_completed(state):
results = appscan_info(jobid)
if get_state_successful(state):
high_issue_count += results["NHighIssues"]
med_issue_count += results["NMediumIssues"]
python_utils.LOGGER.info("Analysis successful (" + results["Name"] + ")")
#print "\tOther Message : " + msg
job_result = { 'job_name': results["Name"],
'job_id': jobid,
'status': "successful",
'high_severity_issues': int(str(results["NHighIssues"])),
'medium_severity_issues': int(str(results["NMediumIssues"])),
'low_severity_issues': int(str(results["NLowIssues"])),
'info_severity_issues': int(str(results["NInfoIssues"])),
'url': dash}
# Search for file name results["Name"] + "*.zip"
if os.environ.get('DRA_IS_PRESENT') == "1":
appscan_get_result(jobid, results["Name"]);
save_job_result(results["Name"], job_result);
#appscan_get_result(jobid)
print python_utils.LABEL_GREEN + python_utils.STARS
print "Analysis successful for job \"" + results["Name"] + "\""
print "\tHigh Severity Issues : " + str(results["NHighIssues"])
print "\tMedium Severity Issues : " + str(results["NMediumIssues"])
print "\tLow Severity Issues : " + str(results["NLowIssues"])
print "\tInfo Severity Issues : " + str(results["NInfoIssues"])
if dash != None:
print "See detailed results at: " + python_utils.LABEL_COLOR + " " + dash
f = open("result_url","w")
f.write(dash)
f.close()
print python_utils.LABEL_GREEN + python_utils.STARS + python_utils.LABEL_NO_COLOR
# append results to the jobResults for the json format
jobResults.append(job_result)
else:
python_utils.LOGGER.info("Analysis unsuccessful (" + results["Name"] + ") with message \"" + results["UserMessage"] + "\"")
# append results to the jobResults for the json format
jobResults.append({'job_name': results["Name"],
'job_id': jobid,
'status': "unsuccessful"})
break
else:
time_left = python_utils.get_remaining_wait_time()
if (time_left > SLEEP_TIME):
time.sleep(SLEEP_TIME)
else:
# ran out of time, flag that at least one job didn't complete
all_jobs_complete = False
# get what info we can on this job
results = appscan_info(jobid)
# notify the user
print python_utils.LABEL_RED + python_utils.STARS
print "Analysis incomplete for job \"" + results["Name"] + "\""
print "\t" + str(results["Progress"]) + "% complete"
if dash != None:
print "Track current state and results at: " + python_utils.LABEL_COLOR + " " + dash
f = open("result_url","w")
f.write(dash)
f.close()
print python_utils.LABEL_RED + "Increase the time to wait and rerun this job. The existing analysis will continue and be found and tracked."
print python_utils.STARS + python_utils.LABEL_NO_COLOR
# append results to the jobResults for the json format
jobResults.append({'job_name': results["Name"],
'job_id': jobid,
'status': "incomplete",
'percentage_complete': int(str(results["Progress"]))})
# and continue to get state for other jobs
break
except Exception, e:
# bad id, skip it
if python_utils.DEBUG:
python_utils.LOGGER.debug("exception in wait_for_scans: " + str(e))
# generate appscan-result.json file
appscan_result = {'all_jobs_complete': all_jobs_complete,
'high_issue_count': high_issue_count,
'medium_issue_count': med_issue_count,
'job_results': jobResults}
appscan_result_file = './appscan-result.json'
with open(appscan_result_file, 'w') as outfile:
json.dump(appscan_result, outfile, sort_keys = True)
if os.environ.get('DRA_IS_PRESENT') == "1":
upload_results_to_dra()
return all_jobs_complete, high_issue_count, med_issue_count
# begin main execution sequence
try:
parsed_args = parse_args()
if parsed_args['help']:
print_help()
sys.exit(0)
python_utils.LOGGER = python_utils.setup_logging()
# send slack notification
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
command='{path}/utilities/sendMessage.sh -l info -m \"Starting static security scan\"'.format(path=python_utils.EXT_DIR)
if python_utils.DEBUG:
print "running command " + command
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
else:
if python_utils.DEBUG:
print "sendMessage.sh not found, notifications not attempted"
python_utils.WAIT_TIME = python_utils.get_remaining_wait_time(first = True)
python_utils.LOGGER.info("Getting credentials for Static Analysis service")
creds = python_utils.get_credentials_for_non_binding_service(service=APP_SECURITY_SERVICE)
python_utils.LOGGER.info("Connecting to Static Analysis service")
appscan_login(creds['bindingid'],creds['password'])
# allow testing connection without full job scan and submission
if parsed_args['loginonly']:
python_utils.LOGGER.info("LoginOnly set, login complete, exiting")
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(0)
# if checkstate, don't really do a scan, just check state of current outstanding ones
if parsed_args['checkstate']:
# for checkstate, don't wait, just check current
python_utils.WAIT_TIME = 0
# see if we have related jobs
joblist = check_for_existing_job()
if joblist == None:
# no related jobs, get whole list
joblist = appscan_list()
else:
# if the job we would run is already up (and either pending or complete),
# we just want to get state (and wait for it if needed), not create a whole
# new submission
joblist = check_for_existing_job()
if joblist == None:
python_utils.LOGGER.info("Scanning for code submission")
files_to_submit = appscan_prepare()
python_utils.LOGGER.info("Submitting scans for analysis")
joblist, errMsg = appscan_submit(files_to_submit)
if (not joblist) or len(joblist) < len(files_to_submit):
if (not errMsg):
errMsg = "Check status of existing scans."
#Error, we didn't return as many jobs as we should have
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> could not successfully submit scan. {errMsg}\"'.format(path=python_utils.EXT_DIR,url=dash,errMsg=errMsg)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
python_utils.LOGGER.error('ERROR: could not successfully submit scan. {errMsg} {url}'.format(url=dash,errMsg=errMsg))
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(4)
python_utils.LOGGER.info("Waiting for analysis to complete")
else:
python_utils.LOGGER.info("Existing job found, connecting")
# check on pending jobs, waiting if appropriate
all_jobs_complete, high_issue_count, med_issue_count = wait_for_scans(joblist)
# force cleanup of all?
if parsed_args['forcecleanup']:
# cleanup the jobs we launched (since they're complete)
print "Cleaning up"
for job in joblist:
appscan_cancel(job)
# and cleanup the submitted irx files
for file in files_to_submit:
if os.path.isfile(file):
os.remove(file)
if os.path.isfile(file+".log"):
os.remove(file+".log")
else:
# cleanup old copies of this job
cleanup_old_jobs()
# if we didn't successfully complete jobs, return that we timed out
if not all_jobs_complete:
# send slack notification
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> did not complete within {wait} minutes. Stage will need to be re-run after the scan completes.\"'.format(path=python_utils.EXT_DIR,url=dash,wait=python_utils.FULL_WAIT_TIME)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(2)
else:
if high_issue_count > 0:
# send slack notification
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> completed with {issues} high issues detected in the application.\"'.format(path=python_utils.EXT_DIR,url=dash, issues=high_issue_count)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(3)
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
if med_issue_count > 0:
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
command='SLACK_COLOR=\"warning\" {path}/utilities/sendMessage.sh -l good -m \"<{url}|Static security scan> completed with no major issues.\"'.format(path=python_utils.EXT_DIR,url=dash)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
else:
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
command='{path}/utilities/sendMessage.sh -l good -m \"<{url}|Static security scan> completed with no major issues.\"'.format(path=python_utils.EXT_DIR,url=dash)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(0)
except Exception, e:
python_utils.LOGGER.warning("Exception received", exc_info=e)
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(1)
|
apache-2.0
| -3,397,173,683,578,174,000
| 38.927152
| 268
| 0.565931
| false
| 4.027837
| false
| false
| false
|
braams/shtoom
|
shtoom/ui/tkui/popups.py
|
1
|
7401
|
from Tkinter import Toplevel, Tk
if __name__ == "__main__":
_ = lambda x:x
class Popup(Toplevel):
deferred = None
parent = None
def __init__(self, parent, addnl=None):
Toplevel.__init__(self)
self.initial_focus = self
self.parent = parent
self.addnl = addnl
self.body()
self.title('popup window')
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.showWindow()
def body(self):
pass
def cancel(self):
self.hideWindow()
if self.deferred:
d, self.deferred = self.deferred, None
if self.addnl is None:
d.callback(None)
else:
d.callback((None,self.addnl))
self.addnl = None
def getResult(self):
return None
def selected(self, option=None):
if option is None:
option = self.getResult()
self.hideWindow()
if self.deferred:
d, self.deferred = self.deferred, None
if self.addnl is None:
d.callback(option)
else:
d.callback((option,self.addnl))
self.addnl = None
def showWindow(self):
self.transient(self.parent)
self.geometry("+%d+%d" % (self.parent.winfo_rootx()+50,
self.parent.winfo_rooty()+50))
def hideWindow(self):
Toplevel.destroy(self)
class Dialog(Popup):
def __init__(self, parent, deferred, message, buttons, addnl=None):
self.message = message
self.buttons = buttons
self.deferred = deferred
Popup.__init__(self, parent, addnl)
def body(self):
from Tkinter import NW, E, Frame, Label, Button
self.top = Frame(self)
self.top.grid(row=1,column=1,sticky=E)
self.label = Label(self.top, text=self.message, justify='center')
self.label.grid(row=1, column=1, padx=5, pady=5,
columnspan=len(self.buttons),sticky=NW)
for n, b in enumerate(self.buttons):
b = Button(self.top, text=b, command=lambda b=b: self.selected(b))
b.grid(row=2, column=n, sticky=NW, pady=5, padx=5)
if self.initial_focus == self:
self.initial_focus = b
b.focus_set()
class AuthDialog(Popup):
message = _('Enter username and password\nfor "%(method)s" at "%(realm)s"')
def __init__(self, parent, deferred, method, realm, addnl=None):
self.deferred = deferred
self.method = method
self.realm = realm
self._saveOK = False
Popup.__init__(self, parent, addnl)
def _saveBoolean(self, *value):
self._saveOK = not self._saveOK
def getResult(self):
return (self.uentry.get(), self.pentry.get(), self._saveOK)
def body(self):
print "auth body"
from Tkinter import NW, E, W, Frame, Label, Button, Entry, Checkbutton
defargs = { 'padx':5, 'pady':5, 'sticky':W }
self.top = Frame(self)
self.top.grid(row=1,column=1,sticky=NW)
msg = self.message % { 'realm':self.realm, 'method':self.method }
self.label = Label(self.top, text=msg, justify='center')
self.label.grid(row=1, column=1, columnspan=4, **defargs)
self.ulabel = Label(self.top, text=_('User Name')+':', justify='left')
self.ulabel.grid(row=2, column=1, columnspan=2, **defargs)
self.uentry = Entry(self.top)
self.uentry.grid(row=2, column=3, columnspan=2, **defargs)
self.uentry.focus_set()
self.plabel = Label(self.top, text=_('Password')+':', justify='left')
self.plabel.grid(row=3, column=1, columnspan=2, **defargs)
self.pentry = Entry(self.top, show="*")
self.pentry.grid(row=3, column=3, columnspan=2, **defargs)
self._saveOk = False
self.saveCheck = Checkbutton(self.top, command=self._saveBoolean)
self.saveCheck.grid(row=4, column=1, columnspan=1, **defargs)
self.savelabel = Label(self.top,
text=_('Save this username and password'))
self.savelabel.grid(row=4, column=2, columnspan=3, **defargs)
defargs['sticky'] = W
self.cancelb = Button(self.top, text=_('Cancel'), command=self.cancel)
self.cancelb.grid(row=5, column=3, columnspan=1, **defargs)
self.okb = Button(self.top, text=_('OK'), command=self.selected)
self.okb.grid(row=5, column=4, columnspan=1, **defargs)
class MovingDialog(Dialog):
"A Dialog that slides in on the bottom right"
# XXX Tk doesn't seem to want to allow the geometry to go off-screen :-(
finalOffset = 10
def showWindow(self):
# Make this an override-redirect
self.overrideredirect(1)
self._x, self._y = self.winfo_width(), self.winfo_height()
if self._x == 1 or self._y == 1:
# sometimes we're called before being laid out, argh
self._x = self._y = None
# screen size
self._sx = self.parent.winfo_screenwidth()
self._sy = self.parent.winfo_screenheight()
# final positions
if self._x is not None:
self._fx = self._sx - self._x - self.finalOffset
self._fy = self._sy - self._y - self.finalOffset
self.geometry("+%d+%d" % (self._fx, self._sy))
else:
# Not laid out yet.
self.geometry("+%d+%d" % (self._sx, self._sy))
reactor.callLater(0.01, self._moveWindow)
def _moveWindow(self):
if self._x is None:
x, y = self.winfo_rootx(), self.winfo_rooty()
self._x, self._y = self.winfo_width(), self.winfo_height()
self._fx = self._sx - self._x - self.finalOffset
self._fy = self._sy - self._y - self.finalOffset
print "final",(self._fx, self._fy)
newx = self._sx
newy = self._fy
else:
x, y = self.winfo_rootx(), self.winfo_rooty()
newx, newy = x - 2, y
print "window/geom", (self._x, self._y),(x,y)
if newx < self._fx:
newx = self._fx
self.geometry("+%d+%d" % (newx, newy))
if newx > self._fx:
print "move",(newx, newy), (self._fx, self._fy)
reactor.callLater(0.02, self._moveWindow)
def hideWindow(self):
Toplevel.destroy(self)
if __name__ == "__main__":
from twisted.internet.task import LoopingCall
from twisted.internet import defer
from twisted.internet import tksupport, reactor
def mainWindow():
global main
main = Tk(className='shtoom')
tksupport.install(main)
def optionClicked(option):
print "got option", option
reactor.stop()
def popupWindow():
global main
d = defer.Deferred()
popup = MovingDialog(main, d, 'hello world', ('OK', 'Cancel'))
d.addCallback(optionClicked)
def oops(failure):
print "arg", failure
def popupAuth():
print "popup"
d = defer.Deferred()
popup = AuthDialog(main, d, 'INVITE', 'fwd.pulver.com')
d.addCallback(optionClicked)
d.addErrback(oops)
def ping():
print "ping"
p = LoopingCall(ping)
p.start(0.5)
reactor.callLater(0, mainWindow)
reactor.callLater(1, popupAuth)
reactor.run()
|
lgpl-2.1
| 8,494,601,776,254,829,000
| 31.460526
| 79
| 0.563437
| false
| 3.556463
| false
| false
| false
|
shiaki/iterative-modelling
|
src/pcs_snap.py
|
1
|
3204
|
#!/usr/bin/python
work_dir = ''
import numpy as np
from scipy.io import FortranFile as ufmt
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# See GALAXY 14.50 Manual, Sec. 9.2, P54
header_dtype = [('n1', '<i4'), ('n2', '<i4'), ('n3', '<i4'),
('ncoor', '<i4'), ('np', '<i4'), ('time', '<f4'),
('pm', '<f4'), ('pertbn', '<i4')]
def save_snap_galaxy_pcs(filename, snap):
# unpack snapshot
cps = snap['cps']
# Get ptcl number
n1, n2, n3 = 0, 0, 0
if(cps.has_key('C1')): n1 = cps['C1']['N_pcs']
if(cps.has_key('C2')): n2 = cps['C2']['N_pcs']
if(cps.has_key('C3')): n3 = cps['C3']['N_pcs']
N_pcs = n1 + n2 + n3
# Make array
pcs = np.empty(shape = (N_pcs, 6), dtype = 'f4')
if n1 != 0: pcs[:n1] = cps['C1']['pcs']
if n2 != 0: pcs[n1: n1 + n2] = cps['C2']['pcs']
if n3 != 0: pcs[n1 + n2: n1 + n2 + n3] = cps['C1']['pcs']
# prepare header,
header = np.empty(1, dtype = header_dtype)
header[0]['n1'] = n1
header[0]['n2'] = n2
header[0]['n3'] = n3
header[0]['ncoor'] = 6
header[0]['np'] = 5000
header[0]['time'] = snap['time']
header[0]['pm'] = snap['pm']
header[0]['pertbn'] = 0
# open a file, write the header
pcs_fs = ufmt(filename, 'w')
pcs_fs.write_record(header)
# write pcs array in batches of 5k ptcls
N_put, chunk_size = 0, 5000 * 6
pcs = pcs.reshape((-1,)) # into 1d array
while N_put < N_pcs * 6:
chunk_t = pcs[N_put: N_put + chunk_size]
pcs_fs.write_record(chunk_t)
N_put += chunk_t.size
pcs_fs.close()
return 0
def read_snap_galaxy_pcs(filename):
pcs_ds = ufmt(filename, 'r')
header = pcs_ds.read_record(dtype = header_dtype)[0]
# read header info / GALAXY 14.50 Manual, 9.2
n1, n2, n3 = header['n1'], header['n2'], header['n3']
N_pcs = n1 + n2 + n3
chunk_size = header['ncoor'] * header['np']
# assume 3D problem with equal-mass particles for each component
assert header['ncoor'] == 6
# read ptcls in batches
N_get = 0
pcs = np.empty(N_pcs * 6, dtype = 'f4')
while N_get < N_pcs * 6:
chunk_t = pcs_ds.read_reals(dtype = 'f4')
pcs[N_get: N_get + chunk_size] = chunk_t
N_get += chunk_t.size
pcs = pcs.reshape((-1, 6))
pcs_ds.close()
# Make them into components
snap = {'cps' : {},
'pm' : header['pm'],
'time': header['time']}
if n1 != 0: # component 1 has mtcls
snap['cps']['C1'] = {'N_pcs': n1,
'pm' : header['pm'],
'pcs' : pcs[:n1]}
if n2 != 0: # component 2 has ptcls
snap['cps']['C2'] = {'N_pcs': n2,
'pm' : header['pm'],
'pcs' : pcs[n1: n1 + n2]}
if n3 != 0: # component 3 has ptcls
snap['cps']['C3'] = {'N_pcs': n3,
'pm' : header['pm'],
'pcs' : pcs[n1 + n2: n1 + n2 + n3]}
return snap
# diff test
if False:
import os # for diff
dic = read_snap_galaxy_pcs('run999.pcs0')
save_snap_galaxy_pcs('test.pcs0', dic)
df = os.system('diff run999.pcs0 test.pcs0')
if(df): print "diff test failed."
else: print "diff test passed."
|
bsd-3-clause
| -3,170,242,807,777,087,500
| 25.92437
| 66
| 0.522784
| false
| 2.577635
| false
| false
| false
|
ParticulateSolutions/django-sofortueberweisung
|
setup.py
|
1
|
2687
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.match("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if 'tests' not in dirnames and not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
REQUIREMENTS = [
'Django>=1.8',
'xmltodict>=0.9.2',
'six>=1.10.0'
]
version = get_version('django_sofortueberweisung')
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
setup(
name='django-sofortueberweisung',
author='Particulate Solutions GmbH',
author_email='tech@particulate.me',
description=u'Django integration of Sofort.com',
version=version,
url='https://github.com/ParticulateSolutions/django-sofortueberweisung',
packages=get_packages('django_sofortueberweisung'),
package_data=get_package_data('django_sofortueberweisung'),
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules'],
install_requires=REQUIREMENTS,
zip_safe=False)
|
mit
| -1,253,928,873,854,482,700
| 30.768293
| 100
| 0.605136
| false
| 3.833096
| false
| false
| false
|
olynch/dicebag
|
dicebag.py
|
1
|
3627
|
#!/usr/bin/env python3
import random, pickle, sys, cmd
class Dice:
""" Contains x dice with n sides, or a plain modifier """
def __init__(self, dice):
""" Either takes in a string with a modifier, such as +4, or a dice description, such as 2d8 """
if dice[0] in ("+", "-"):
self.mod = int(dice)
self.num, self.sides = None, None
else:
self.num, self.sides = map(int, dice.split("d"))
self.mod = None
def roll(self):
""" rolls the dice, or just returns the modifier """
if self.mod != None:
return self.mod
else:
return sum([random.randrange(1, self.sides + 1) for x in range(self.num)])
def __str__(self):
if self.mod != None:
if self.mod < 0:
return "-" + str(self.mod)
else:
return "+" + str(self.mod)
return "+" + str(self.num) + "d" + str(self.sides)
class Roll:
""" Contains a set of dice and modifiers, provides a roll method to roll all its dice """
def __init__(self, desc_str):
desc = desc_str.split(" ")
self.dice_list = list(map(Dice, desc))
def roll(self):
return sum([x.roll() for x in self.dice_list])
def __str__(self):
return "".join(list(map(str, self.dice_list)))
def parse(args):
return args.split(" ")
class DiceInterpreter(cmd.Cmd):
""" The command line interface to the Roll class
Provides a dictionary that users can set and delete keys in, each key is a Roll
that users can roll. Users can also just specify a roll description on the command line, like 2d6 +10
Also provides a facility for saving the dictionary and opening it up again."""
prompt = "dice> "
DICE_PREFIX = ("+", "-", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
def preloop(self):
""" Initializes the rolls dictionary, possibly with a file passed as an argument """
self.rolls = {}
if len(sys.argv) > 1:
self.pickled_rolls = sys.argv[1]
self.rolls = pickle.load(open(self.pickled_rolls, 'rb'))
self.prompt = self.pickled_rolls + "> "
else:
self.pickled_rolls = None
def do_open(self, args):
""" Read a file into the rolls dictionary """
args = parse(args)
self.rolls = pickle.load(open(args[0], 'rb'))
def do_list(self, args):
""" List the contents of the rolls dictionary """
args = parse(args)
print(self.rolls)
def do_new(self, args):
""" Add a new Roll to the dictionary
The first argument is the name of the roll, the rest are the specifications. """
args = parse(args)
self.rolls[args[0]] = Roll(" ".join(args[1:]))
def do_del(self, args):
""" Deletes a roll from the dictionary
The first argument is the name of the roll """
args = parse(args)
del self.rolls[args[0]]
def default(self, line):
self.do_roll(line)
def do_roll(self, args):
""" Roll the specified rolls """
args = parse(args)
acc = 0
acc_str = ""
for dice in args:
if dice in self.rolls.keys():
acc_str += " " + str(self.rolls[dice])
acc += self.rolls[dice].roll()
elif dice[0] in self.DICE_PREFIX:
temp_dice = Dice(dice)
acc_str += " " + str(temp_dice)
acc += temp_dice.roll()
else:
print("A Roll of that name could not be found")
return
print(acc_str)
print(acc)
def do_exit(self, args):
""" Save the rolls dictionary, if desired, and then exit. """
return True
def postloop(self):
if self.pickled_rolls == None:
will_save = input("Do you wish to save (y/n): ")
if will_save != "n":
self.pickled_rolls = input("Where do you wish to save to: ")
pickle.dump(self.rolls, open(str(self.pickled_rolls), 'wb'))
else:
pickle.dump(self.rolls, open(str(self.pickled_rolls), 'wb'))
if __name__ == "__main__":
DiceInterpreter().cmdloop()
|
mit
| 1,283,960,359,057,703,400
| 29.225
| 102
| 0.632479
| false
| 2.844706
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.