code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 09:29:56 2015
@author: monteiro
"""
from thermopy.iapws import Water
from thermopy.units import Pressure, Temperature
def test_iapws():
"""
Tests are given inside the IAPWS document. See references for more details.
"""
#test Tsat given P
assert round(Water(
1e5, 373.15).temperature_saturation(0.1e6), 6) == 372.755919
assert round(Water(
1e5, 373.15).temperature_saturation(1e6), 6) == 453.035632
assert round(Water(
1e5, 373.15).temperature_saturation(10e6), 6) == 584.149488
#test Psat given T
assert round(Water(
1e5, 373.15).pressure_saturation(300).MPa, 11) == 0.00353658941
assert round(Water(
1e5, 373.15).pressure_saturation(500).MPa, 8) == 2.63889776
assert round(Water(
1e5, 373.15).pressure_saturation(600).MPa, 7) == 12.3443146
#test regions
# arbitrary points
point_in_region1 = (Pressure(20e6), Temperature(300))
point_in_region2 = (Pressure(1e5), Temperature(373.15))
point_in_region3 = (Pressure(40e6), Temperature(700))
point_in_region4 = (Pressure(1).unit('atm'), Temperature(373.1243))
point_in_region5 = (Pressure(20e6), Temperature(1500))
assert Water(*point_in_region1)._is_in_region() == 1
assert Water(*point_in_region2)._is_in_region() == 2
assert Water(*point_in_region3)._is_in_region() == 3
# region 4 does not exist as a region; it is rather the saturation line
assert Water(*point_in_region5)._is_in_region() == 5
#region 1
#assert specific volume
assert round(Water(3e6, 300, massic_basis=True).specific_volume(),
11) == 0.00100215168
assert round(Water(80e6, 300, massic_basis=True).specific_volume(),
12) == 0.000971180894
assert round(Water(3e6, 500, massic_basis=True).specific_volume(),
11) == 0.00120241800
#
# #assert internal energy
assert round(Water(3e6, 300, massic_basis=True).internal_energy(),
6) == 112.324818
assert round(Water(80e6, 300, massic_basis=True).internal_energy(),
6) == 106.448356
assert round(Water(3e6, 500, massic_basis=True).internal_energy(),
6) == 971.934985
#
# #assert enthropy
assert round(Water(3e6, 300, massic_basis=True).entropy(),
9) == 0.392294792
assert round(Water(80e6, 300, massic_basis=True).entropy(),
9) == 0.368563852
assert round(Water(3e6, 500, massic_basis=True).entropy(),
8) == 2.58041912
#assert enthalpy
assert round(Water(3e6, 300, massic_basis=True).enthalpy(),
6) == 115.331273
assert round(Water(80e6, 300, massic_basis=True).enthalpy(),
6) == 184.142828
assert round(Water(3e6, 500, massic_basis=True).enthalpy(),
6) == 975.542239
#assert cp
assert round(Water(3e6, 300, massic_basis=True).heat_capacity(),
8) == 4.17301218
assert round(Water(80e6, 300, massic_basis=True).heat_capacity(),
8) == 4.01008987
assert round(Water(3e6, 500, massic_basis=True).heat_capacity(),
8) == 4.65580682
# #assert cv
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
#assert speed of sound
assert round(Water(3e6, 300, massic_basis=True).speed_of_sound(),
5) == 1507.73921
assert round(Water(80e6, 300, massic_basis=True).speed_of_sound(),
5) == 1634.69054
assert round(Water(3e6, 500, massic_basis=True).speed_of_sound(),
5) == 1240.71337
#region 2
#assert specific volume
assert round(Water(3500, 300, massic_basis=True).specific_volume(),
7) == 39.4913866
assert round(Water(3500, 700, massic_basis=True).specific_volume(),
7) == 92.3015898
assert round(Water(30e6, 700, massic_basis=True).specific_volume(),
11) == 0.00542946619
#
# #assert internal energy
assert round(Water(3500, 300, massic_basis=True).internal_energy(),
5) == 2411.69160
assert round(Water(3500, 700, massic_basis=True).internal_energy(),
5) == 3012.62819
assert round(Water(30e6, 700, massic_basis=True).internal_energy(),
5) == 2468.61076
#
# #assert enthropy
assert round(Water(3500, 300, massic_basis=True).entropy(),
8) == 8.52238967
assert round(Water(3500, 700, massic_basis=True).entropy(),
7) == 10.1749996
assert round(Water(30e6, 700, massic_basis=True).entropy(),
8) == 5.17540298
#assert enthalpy
assert round(Water(3500, 300, massic_basis=True).enthalpy(),
5) == 2549.91145
assert round(Water(3500, 700, massic_basis=True).enthalpy(),
5) == 3335.68375
assert round(Water(30e6, 700, massic_basis=True).enthalpy(),
5) == 2631.49474
#assert cp
# assert round(Water(3e6, 300).heat_capacity(),8) == 4.17301218
# assert round(Water(80e6, 300).heat_capacity(),8) == 4.01008987
# assert round(Water(3e6, 500).heat_capacity(),8) == 4.65580682
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#region 3
#assert specific volume
# assert round(Water(3500, 300).specific_volume(),7) == 39.4913866
# assert round(Water(3500, 700).specific_volume(),7) == 92.3015898
# assert round(Water(30e6, 700).specific_volume(),11) == 0.00542946619
#
# #assert internal energy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthropy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#assert enthalpy
assert round(Water(25.5837018e6, 650,
massic_basis=True).enthalpy(), 5) == 1863.43019
assert round(Water(22.2930643e6, 650,
massic_basis=True).enthalpy(),
5) == round(2375.12401, 3)
assert round(Water(78.3095639e6, 750,
massic_basis=True).enthalpy(), 5) == 2258.68845
#assert cp
# assert round(Water(3e6, 300).heat_capacity(),8) == 4.17301218
# assert round(Water(80e6, 300).heat_capacity(),8) == 4.01008987
# assert round(Water(3e6, 500).heat_capacity(),8) == 4.65580682
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
# region 4
# There is no region 4; instead region 4 is the saturation line
# region 5
#assert specific volume
# assert round(Water(3500, 300).specific_volume(),7) == 39.4913866
# assert round(Water(3500, 700).specific_volume(),7) == 92.3015898
# assert round(Water(30e6, 700).specific_volume(),11) == 0.00542946619
#
# #assert internal energy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthropy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#assert enthalpy
assert round(Water(0.5e6, 1500,
massic_basis=True).enthalpy(), 5) == 5219.76855
assert round(Water(30e6, 1500,
massic_basis=True).enthalpy(), 5) == 5167.23514
assert round(Water(30e6, 2000,
massic_basis=True).enthalpy(), 5) == 6571.22604
#assert cp
# assert round(Water(3e6, 300).heat_capacity(),8) == 4.17301218
# assert round(Water(80e6, 300).heat_capacity(),8) == 4.01008987
# assert round(Water(3e6, 500).heat_capacity(),8) == 4.65580682
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
#
# #assert enthalpy
# assert round(Water(3e6, 300).enthalpy(),6) == 115.331273
# assert round(Water(80e6, 300).enthalpy(),6) == 184.142828
# assert round(Water(3e6, 500).enthalpy(),6) == 975.542239
# other tests
def triple_point_test():
triple_temperature = 273.16
triple_pressure = 611.657
triple_water = Water(triple_pressure, triple_temperature)
assert triple_water.internal_energy() < 1e-5
assert triple_water.entropy() < 1e-5
| guillemborrell/Thermopy | test/test_iapws.py | Python | bsd-3-clause | 9,460 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import mysql.connector
#from token find userId
#return 0 for error
def findUser(userToken, cnx):
userQuery = 'SELECT user_id FROM user_token WHERE user_token = %s'
try:
userCursor = cnx.cursor()
userCursor.execute(userQuery, (userToken, ))
return userCursor.fetchone()
#return 0 for db error
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
return '0'
finally:
userCursor.close()
#create new token
#return 1 for success
#return 0 for error
def addToken(userId, userToken, cnx):
addQuery = 'INSERT INTO user_token (user_id, user_token) VALUES (%s, %s) ON DUPLICATE KEY UPDATE user_token = %s'
try:
addCursor = cnx.cursor()
addCursor.execute(addQuery, (userId, userToken, userToken))
cnx.commit()
return '1'
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
cnx.rollback()
return '0'
finally:
addCursor.close()
#delete token
#return 1 for success
#return 0 for fail
def deleteToken(userId, cnx):
cleanQuery = 'DELETE FROM user_token WHERE user_id = %s'
try:
cleanCursor = cnx.cursor()
cleanCursor.execute(cleanQuery, (userId, ))
cnx.commit()
return '1'
except mysql.connector.Error as err:
cnx.rollback()
print('Something went wrong: {}'.format(err))
return '0'
finally:
cleanCursor.close()
| byn9826/Thousand-Day | handlers/token.py | Python | bsd-3-clause | 1,530 |
#!/usr/bin/python
#-------------------------------------------------------------------------------
#License GPL v3.0
#Author: Alexandre Manhaes Savio <alexsavio@gmail.com>
#Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
#Universidad del Pais Vasco UPV/EHU
#Use this at your own risk!
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#README:
#Transforms a NxN matrix volume (N^2 volumes in 4th dimension) into other measure maps.
#You can make a list of measures and they will be applied in order.
#A list of the implemented measures are listed below.
#Geodesic anisotropy equation was extracted from
#P. G. Batchelor et al. - A Rigorous Framework for Diffusion Tensor Calculus - Magnetic Resonance in Medicine 53:221-225 (2005)
# What is tensor denoising?
#Log-Euclidean tensor denoising was used to eliminate singular, negative definite, or rank-deficient tensors
#-------------------------------------------------------------------------------
#from IPython.core.debugger import Tracer; debug_here = Tracer()
import argparse, os, sys
from time import clock
import nibabel as nib
import numpy as np
from scipy.linalg import logm
from scipy.linalg.matfuncs import sqrtm
from numpy.linalg import det
from numpy.linalg import eigvals
from numpy.linalg import eigvalsh
#-------------------------------------------------------------------------------
#definining measure functions
def mylogm (v):
return np.reshape(logm(v.reshape(N,N)), [1,N*N])
#-------------------------------------------------------------------------------
def mydet (v):
return det(v.reshape(N,N))
#-------------------------------------------------------------------------------
def mytrace (v):
return np.trace(v.reshape(N,N))
#-------------------------------------------------------------------------------
def myeigvals (v):
return eigvals(v.reshape(N,N)).flatten()
#-------------------------------------------------------------------------------
def mymaxeigvals (v):
return max (myeigvals(v))
#-------------------------------------------------------------------------------
def myeigvalsh (v):
return eigvalsh(v.reshape(N,N)).flatten()
#-------------------------------------------------------------------------------
def mymaxeigvalsh (v):
return max (myeigvalsh(v))
#-------------------------------------------------------------------------------
def mydeftensor (v):
j = v.reshape([N,N])
s = sqrtm(j.transpose()*j)
return S.reshape([1,N*N])
#-------------------------------------------------------------------------------
def mygeodan (v):
s = logm(v.reshape(N,N))
return np.sqrt(np.trace(np.square(s - np.trace(s)/N * np.eye(N))))
#-------------------------------------------------------------------------------
def calculate_measures (funcs, data, odims):
for i in range(len(funcs)):
measure = funcs[i]
odim = odims[i]
data = measure(data)
return data
#-------------------------------------------------------------------------------
def set_parser():
parser = argparse.ArgumentParser(description='Transforms a NxN matrix volume (N^2 volumes in 4th dimension) into other measure maps. \n You can make a list of measures and they will be applied in order. \n A list of the implemented measures are listed below.', prefix_chars='-')
parser.add_argument('-i', '--in', dest='infile', required=True,
help='Jacobian matrix volume (4DVolume with 9 volumes)')
parser.add_argument('-m', '--mask', dest='maskfile', required=False,
help='Mask file')
parser.add_argument('-o', '--out', dest='outfile', required=True,
help='Output file name')
parser.add_argument('-N', '--dims', dest='dims', required=False, default=3, type=int,
help='Order of the matrices in the volume')
parser.add_argument('--matlog', dest='funcs', action='append_const', const='matlog',
help='Matrix logarithm')
parser.add_argument('--deftensor', dest='funcs', action='append_const', const='deftensor',
help='Deformation tensor S=sqrtm(J`*J)')
parser.add_argument('--det', dest='funcs', action='append_const', const='det',
help='Determinant')
parser.add_argument('--trace', dest='funcs', action='append_const', const='trace',
help='Trace')
parser.add_argument('--eigvals', dest='funcs', action='append_const', const='eigvals',
help='Eigenvalues of a general matrix')
parser.add_argument('--maxeigvals', dest='funcs', action='append_const', const='maxeigvals',
help='Maximum eigenvalue of a general matrix')
parser.add_argument('--eigvalsh', dest='funcs', action='append_const', const='eigvalsh',
help='Eigenvalues of a Hermitian or real symmetric matrix')
parser.add_argument('--maxeigvalsh', dest='funcs', action='append_const', const='maxeigvalsh',
help='Maximum eigenvalue of a Hermitian or real symmetric matrix')
parser.add_argument('--geodan', dest='funcs', action='append_const', const='geodan',
help='Geodesic anisotropy: sqrt(trace(matlog(S) - (trace(matlog(S))/N)*eye(N))^2, where N==3 ')
return parser
#Geodesic anisotropy from:
#COMPARISON OF FRACTIONAL AND GEODESIC ANISOTROPY IN DIFFUSION TENSOR IMAGES OF 90 MONOZYGOTIC AND DIZYGOTIC TWINS
#Agatha D. Lee1, Natasha Lepore1, Marina Barysheva1, Yi-Yu Chou1, Caroline Brun1, Sarah K. Madsen1, Katie L. McMahon2, 1 Greig I. de Zubicaray2, Matthew Meredith2, Margaret J. Wright3, Arthur W. Toga1, Paul M. Thompson
#http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.142.3274
#-------------------------------------------------------------------------------
## START MATRIX TRANSFORMATIONS
#-------------------------------------------------------------------------------
def main():
#parsing arguments
parser = set_parser()
#parsing arguments
try:
args = parser.parse_args ()
except argparse.ArgumentError, exc:
print (exc.message + '\n' + exc.argument)
parser.error(str(msg))
return -1
ifile = args.infile.strip()
ofile = args.outfile.strip()
maskf = args.maskfile.strip()
funcs = args.funcs
#setting the global variable that indicates the order of the matrices
global N
N = args.dims
#loading file and preprocessing
iinfo = nib.load(ifile)
affine = iinfo.get_affine()
minfo = nib.load(maskf)
if len(iinfo.shape) != 4:
err = 'File ' + ifile + ' should be a 4D volume'
print(err)
return -1
#global variable N (for the nested functions)
N = np.sqrt(iinfo.shape[3])
if not N % 1 == 0:
err = 'File ' + ifile + ' should have N volumes along its 4th dimension, where N is an exponent of 2.'
print(err)
return -1
try:
#deciding what function to use
# and indicating size of 4th dimension of output
myfuncs = {}
odims = np.empty(len(funcs), dtype=int)
for i in range(len(funcs)):
if funcs [i] == 'matlog':
myfuncs[i] = mylogm
odims [i] = N
elif funcs[i] == 'det':
myfuncs[i] = mydet
odims [i] = 1
elif funcs[i] == 'trace':
myfuncs[i] = mytrace
odims [i] = 1
elif funcs[i] == 'deftensor':
myfuncs[i] = mydeftensor
odims [i] = N
elif funcs[i] == 'eigvalsh':
myfuncs[i] = myeigvalsh
odims [i] = 3
elif funcs[i] == 'eigvals':
myfuncs[i] = myeigvals
odims [i] = 3
elif funcs[i] == 'maxeigvalsh':
myfuncs[i] = myeigvalsh
odims [i] = 1
elif funcs[i] == 'maxeigvals':
myfuncs[i] = myeigvals
odims [i] = 1
elif funcs[i] == 'geodan':
myfuncs[i] = mygeodan
odims [i] = 1
#reading input data
img = iinfo.get_data()
mask = minfo.get_data()
sx = img.shape[0]
sy = img.shape[1]
sz = img.shape[2]
nvox = sx*sy*sz
im = img.reshape(nvox,9)
msk = mask.flatten()
idx = np.where(msk > 0)[0]
tic = clock();
#processing
lm = np.zeros([nvox, odims[-1]])
for i in idx:
lm[i,:] = calculate_measures (myfuncs, im[i,:], odims)
#lm[i,:] = meafun(im[i,:])
toc = clock() - tic
print ('Time spent: ' + str(toc))
#saving output
lm = lm.reshape([sx, sy, sz, odims[-1]])
lm = lm.squeeze()
# debug_here()
new_image = nib.Nifti1Image(lm, affine)
nib.save(new_image, ofile)
except:
print ('Ooops! Error processing file ' + ifile)
print 'Unexpected error: ', sys.exc_info()
return -1
if __name__ == "__main__":
sys.exit(main())
#Testing multiprocessing. Not implemented. Leaving for patience to solve.
#for i in range(len(im)/7000):
# p.apply_async(mylogm, args=(im[i,:],i))
##determining multiprocessing stuff
#if nthreads > 1:
# from multiprocessing.pool import Pool
#ncpus = multiprocessing.cpu_count()
#if nthreads > ncpus:
# nthreads = ncpus - 1
#if nthreads > 1:
# p = ThreadPool(nthreads)
# print ('Using ' + nthreads + ' threads for execution')
#import nibabel as nib
#import numpy as np
#from time import clock
#from multiprocessing import Pool
#ifile='patient.M.90..5.OAS1_0247_MR1_mpr_n4_anon_111_t88_masked_gfc_spline_jacmat.nii.gz'
#meta = nib.load(ifile)
#img = meta.get_data()
#sx = img.shape[0]
#sy = img.shape[1]
#sz = img.shape[2]
#im = img.reshape(sx*sy*sz,9)
#p = Pool(4)
#p.map(mylogm, im)
#from time import clock
#for l in range(3):
# ti = clock();
# p = ThreadPool(4)
# lm = im[np.arange(len(im)/500),:]
# lm = np.zeros(lm.shape)
# lm = p.map(mylogm, im)
## for i in range(len(im)/500):
## v = p.apply_async(mylogm, args=(im[i,:]))
## p.close()
## p.join()
# tf = clock()-ti
# print tf
#for l in range(3):
# lm = np.empty(im.shape)
# ti = clock();
# for i in range(len(im)/500):
# lm[i,:] = mylogm(im[i,:])
# tf = clock()-ti
# print tf
| alexsavio/aizkolari | matrans.py | Python | bsd-3-clause | 10,313 |
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime, timedelta
from airflow.operators import PythonOperator
from airflow.hooks import RedisHook
from airflow.models import Variable
from airflow.hooks import MemcacheHook
from etl_tasks_functions import get_time
from etl_tasks_functions import subtract_time
from subdags.utilization_utility import calculate_wimax_utilization
from subdags.utilization_utility import calculate_cambium_ss_utilization
from subdags.utilization_utility import calculate_radwin5k_ss_utilization
from subdags.utilization_utility import calculate_radwin5k_bs_utilization
from subdags.utilization_utility import calculate_radwin5kjet_ss_utilization
from subdags.utilization_utility import calculate_radwin5kjet_bs_utilization
from subdags.utilization_utility import calculate_radwin5k_bs_and_ss_dyn_tl_kpi
from subdags.utilization_utility import calculate_backhaul_utilization
from subdags.utilization_utility import calculate_ptp_utilization
from subdags.utilization_utility import calculate_mrotek_utilization
from subdags.utilization_utility import backtrack_x_min
from subdags.utilization_utility import get_severity_values
from subdags.utilization_utility import calculate_age
from subdags.utilization_utility import calculate_severity
from airflow.operators import MySqlLoaderOperator
import logging
import itertools
import socket
import random
import traceback
import time
from pprint import pprint
default_args = {
'owner': 'wireless',
'depends_on_past': False,
'start_date': datetime.now() - timedelta(minutes=2),
'email': ['vipulsharma144@gmail.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=1),
'provide_context': True,
'catchup': False,
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
redis_hook_util_10 = RedisHook(redis_conn_id="redis_hook_util_10")
memc_con_cluster = MemcacheHook(memc_cnx_id = 'memc_cnx')
vrfprv_memc_con = MemcacheHook(memc_cnx_id = 'vrfprv_memc_cnx')
pub_memc_con = MemcacheHook(memc_cnx_id = 'pub_memc_cnx')
redis_hook_static_5 = RedisHook(redis_conn_id="redis_hook_5")
INSERT_HEADER = "INSERT INTO %s.performance_utilization"
INSERT_TAIL = """
(machine_name,current_value,service_name,avg_value,max_value,age,min_value,site_name,data_source,critical_threshold,device_name,severity,sys_timestamp,ip_address,warning_threshold,check_timestamp,refer )
values
(%(machine_name)s,%(current_value)s,%(service_name)s,%(avg_value)s,%(max_value)s,%(age)s,%(min_value)s,%(site_name)s,%(data_source)s,%(critical_threshold)s,%(device_name)s,%(severity)s,%(sys_timestamp)s,%(ip_address)s,%(warning_threshold)s,%(check_timestamp)s,%(refer)s)
"""
UPDATE_HEADER = "INSERT INTO %s.performance_utilizationstatus"
UPDATE_TAIL = """
(machine_name,current_value,service_name,avg_value,max_value,age,min_value,site_name,data_source,critical_threshold,device_name,severity,sys_timestamp,ip_address,warning_threshold,check_timestamp,refer )
values
(%(machine_name)s,%(current_value)s,%(service_name)s,%(avg_value)s,%(max_value)s,%(age)s,%(min_value)s,%(site_name)s,%(data_source)s,%(critical_threshold)s,%(device_name)s,%(severity)s,%(sys_timestamp)s,%(ip_address)s,%(warning_threshold)s,%(check_timestamp)s,%(refer)s)
ON DUPLICATE KEY UPDATE machine_name = VALUES(machine_name),current_value = VALUES(current_value),age=VALUES(age),site_name=VALUES(site_name),critical_threshold=VALUES(critical_threshold),severity=VALUES(severity),sys_timestamp=VALUES(sys_timestamp),ip_address=VALUES(ip_address),warning_threshold=VALUES(warning_threshold),check_timestamp=VALUES(check_timestamp),refer=VALUES(refer)
"""
ERROR_DICT ={404:'Device not found yet',405:'No SS Connected to BS-BS is not skipped'}
ERROR_FOR_DEVICE_OMITTED = [404]
kpi_rules = eval(Variable.get("kpi_rules"))
DEBUG = False
sv_to_ds_mapping = {}
#O7_CALC_Q = "calculation_q"
O7_CALC_Q = "poller_queue"
down_and_unresponsive_devices = eval(redis_hook_static_5.get("current_down_devices_all"))
def process_utilization_kpi(
parent_dag_name,
child_dag_name,
start_date,
schedule_interval,
celery_queue,
ss_tech_sites,
hostnames_ss_per_site,
ss_name,
utilization_attributes,
config_sites): #here config site is list of all sites in system_config var
utilization_kpi_subdag_dag = DAG(
dag_id="%s.%s"%(parent_dag_name, child_dag_name),
schedule_interval=schedule_interval,
start_date=start_date,
)
for service in utilization_attributes:
sv_to_ds_mapping [service.get("service_name")] ={"data_source":service.get("data_source"),"sector_type":service.get("sector_type")}
def get_calculated_ss_data():
ss_data = redis_hook_util_10.rget("calculated_ss_utilization_kpi")
combined_site_data = {}
for site_data in ss_data:
site_data = eval(site_data)
combined_site_data.update(site_data)
return combined_site_data
#To create SS dict
def format_data(**kwargs):
device_type = kwargs.get("params").get("technology")
utilization_attributes = kwargs.get("params").get("attributes")
machine_name = kwargs.get("params").get("machine_name")
ss_kpi_dict = {
'site_name': 'unknown' ,
'device_name': 'unknown',
'service_name': 'unknown',
'ip_address': 'unknown',
'severity': 'unknown',
'age': 'unknown',
'data_source': 'unknown',
'current_value': 'unknown',
'warning_threshold': 'unknown',
'critical_threshold': 'unknown',
'check_timestamp': 'unknown',
'sys_timestamp': 'unknown' ,
'refer':'unknown',
'min_value':'unknown',
'max_value':'unknown',
'avg_value':'unknown',
'machine_name':'unknown'
}
ss_data =redis_hook_util_10.rget("calculated_utilization_%s_%s"%(device_type,machine_name))
cur_processing_time = backtrack_x_min(time.time(),300) + 120 # this is used to rewind the time to previous multiple of 5 value so that kpi can be shown accordingly
ss_devices_list = []
for ss_device in ss_data:
ss_device = eval(ss_device)
hostname = ss_device.get('hostname')
for service in ss_device.get('services'):
data_source = sv_to_ds_mapping.get(service).get("data_source")
pmp_type = sv_to_ds_mapping.get(service).get("sector_type")
thresholds = get_severity_values(service)
ss_kpi_dict['critical_threshold']=thresholds[0]
ss_kpi_dict['data_source']=data_source
ss_kpi_dict['site_name']=ss_device.get('site')
#TODO: ok and unknown are only 2 sev for ss we can incluudethis in rules later
ss_kpi_dict['service_name']= service
ss_kpi_dict['machine_name']= machine_name
ss_kpi_dict['check_timestamp']=cur_processing_time
ss_kpi_dict['device_name']=ss_device.get('hostname')
ss_kpi_dict['sys_timestamp']=cur_processing_time
ss_kpi_dict['refer']=ss_device.get("%s_sector"%(pmp_type))
ss_kpi_dict['ip_address']=ss_device.get('ipaddress')
ss_kpi_dict['warning_threshold']= thresholds[1]
if not isinstance(ss_device.get(service),dict):
#handling cur_value if it is greater than 100
cur_value=ss_device.get(service)
if ss_device.get(service) and ss_device.get(service) != None:
cur_value=ss_device.get(service)
try:
if isinstance(curr_value,float) and cur_value and cur_value > 100.00:
cur_value = 100
except Exception:
logging.error("Exception while handling above 100 entries")
ss_kpi_dict['severity']= calculate_severity(service,ss_device.get(service))
ss_kpi_dict['age']= calculate_age(hostname,ss_kpi_dict['severity'],ss_device.get('device_type'),cur_processing_time,service)
ss_kpi_dict['current_value']=cur_value
ss_kpi_dict['avg_value']=cur_value
ss_kpi_dict['min_value']=cur_value
ss_kpi_dict['max_value']=cur_value
if ss_kpi_dict['current_value'] != None:
ss_devices_list.append(ss_kpi_dict.copy())
else:
for data_source in ss_device.get(service):
ds_values = ss_device.get(service)
curr_value= ss_device.get(service).get(data_source)
if isinstance(curr_value,str):
try:
curr_value=float(curr_value)
if isinstance(curr_value,float):
if curr_value > 100.00:
curr_value=100
except Exception:
logging.error("Unable to convert to float")
else:
if curr_value > 100.00:
curr_value=100
ss_kpi_dict['data_source']=data_source
ss_kpi_dict['severity']= calculate_severity(service,ds_values.get(data_source))
ss_kpi_dict['age']= calculate_age(hostname,ss_kpi_dict['severity'],ss_device.get('device_type'),cur_processing_time,service)
ss_kpi_dict['current_value'] = curr_value
ss_kpi_dict['avg_value']=curr_value
ss_kpi_dict['min_value']=curr_value
ss_kpi_dict['max_value']=curr_value
if ss_kpi_dict['current_value'] != None:
ss_devices_list.append(ss_kpi_dict.copy())
try:
if len(ss_devices_list) > 0:
redis_hook_util_10.rpush("formatted_util_%s_%s"%(device_type,machine_name),ss_devices_list)
else:
logging.info("No %s device found in %s after formatting "%(device_type,machine_name))
except Exception:
logging.error("Unable to push formatted SS data to redis")
def get_required_data_ss(**kwargs):
site_name = kwargs.get("params").get("site_name")
device_type = kwargs.get("params").get("technology")
utilization_attributes = kwargs.get("params").get("attributes")
if "vrfprv" in site_name:
memc_con = vrfprv_memc_con
elif "pub" in site_name:
memc_con = pub_memc_con
else:
memc_con = memc_con_cluster
ss_data_dict = {}
all_ss_data = []
if site_name not in hostnames_ss_per_site.keys():
logging.warning("No SS devices found for %s"%(site_name))
return 1
for hostnames_dict in hostnames_ss_per_site.get(site_name):
host_name = hostnames_dict.get("hostname")
ip_address = hostnames_dict.get("ip_address")
ss_data_dict['hostname'] = host_name
ss_data_dict['ipaddress'] = ip_address
ss_data_dict['site_name'] = site_name
if host_name not in down_and_unresponsive_devices:
for service in utilization_attributes:
ss_data_dict[service.get('service_name')] = memc_con.get(service.get('utilization_key')%(host_name))
all_ss_data.append(ss_data_dict.copy())
if len(all_ss_data) == 0:
logging.info("No data Fetched ! Aborting Successfully")
return 0
try:
#redis_hook_util_10.rpush("%s_%s"%(device_type,site_name),all_ss_data)
print "++++++++++++"
print site_name.split("_")[0]
redis_hook_util_10.rpush("%s_%s"%(device_type,site_name.split("_")[0]),all_ss_data)
except Exception:
logging.warning("Unable to insert ss data into redis")
#pprint(all_ss_data)
def calculate_utilization_data_ss(**kwargs):
machine_name = kwargs.get("params").get("machine_name")
device_type = kwargs.get("params").get("technology")
utilization_attributes = kwargs.get("params").get("attributes")
devices_data_dict = redis_hook_util_10.rget("%s_%s"%(device_type,machine_name))
if len(devices_data_dict) == 0:
logging.info("No Data found for ss %s "%(machine_name))
return 1
ss_data = []
for devices in devices_data_dict:
devices = eval(devices)
site_name = devices.get("site_name")
devices['site'] = site_name
devices['device_type'] = device_type
for service_attributes in utilization_attributes: #loop for the all the configured services
service = service_attributes.get('service_name')
if service_attributes.get('isKpi'):
if 'services' in devices.keys() and devices.get('services') != None:
devices.get('services').append(service)
elif service and devices.get('services') == None:
devices['services'] = [service]
else:
devices['services'] = []
if service_attributes.get('isKpi'):
utilization_type = service_attributes.get("utilization_type")
capacity = None
if "capacity" in service_attributes.keys():
capacity = service_attributes.get("capacity")
try:
formula = kpi_rules.get(service).get('formula')
devices[service] = eval(formula)
except Exception:
print "Exception in calculating data"
pass
else:
continue
#ip_ul_mapper[devices.get('ipaddress')] = devices
ss_data.append(devices.copy())
#ss_utilization_list.append(ip_ul_mapper.copy())
key="calculated_utilization_%s_%s"%(device_type,machine_name)
redis_hook_util_10.rpush(key,ss_data)
print "Setting ....."
print "calculated_utilization_%s_%s"%(device_type,machine_name)
#redis_hook_util_10.rpush("calculated_ss_utilization_kpi",ss_utilization_list)
def aggregate_utilization_data(*args,**kwargs):
print "Aggregating Data"
machine_name = kwargs.get("params").get("machine_name")
device_type = kwargs.get("params").get("technology")
#device_type = kwargs.get("params").get("device_type")
formatted_data=redis_hook_util_10.rget("formatted_util_%s_%s"%(device_type,machine_name))
machine_data = []
for site_data in formatted_data:
machine_data.append(eval(site_data))
redis_hook_util_10.set("aggregated_utilization_%s_%s"%(machine_name,device_type),str(machine_data))
machine_names = set([site.split("_")[0] for site in ss_tech_sites])
config_machines = set([site.split("_")[0] for site in config_sites])
aggregate_dependency_ss = {}
aggregate_dependency_bs = {}
calculate_task_list={}
format_task_list={}
#TODo Remove this if ss >> bs task
# calculate_utilization_lost_ss_bs_task = PythonOperator(
# task_id = "calculate_bs_utilization_lost_ss",
# provide_context=True,
# python_callable=calculate_utilization_data_bs,
# params={"lost_n_found":True},
# dag=utilization_kpi_subdag_dag
# )
for each_machine_name in machine_names:
if each_machine_name in config_machines:
aggregate_utilization_data_ss_task = PythonOperator(
task_id = "aggregate_utilization_ss_%s"%each_machine_name,
provide_context=True,
python_callable=aggregate_utilization_data,
params={"machine_name":each_machine_name,"technology":ss_name},
dag=utilization_kpi_subdag_dag,
queue = O7_CALC_Q,
trigger_rule = 'all_done'
)
aggregate_dependency_ss[each_machine_name] = aggregate_utilization_data_ss_task
calculate_utilization_data_ss_task = PythonOperator(
task_id = "calculate_ss_utilization_kpi_of_%s"%each_machine_name,
provide_context=True,
trigger_rule = 'all_done',
python_callable=calculate_utilization_data_ss,
params={"machine_name":each_machine_name,"technology":ss_name,'attributes':utilization_attributes},
dag=utilization_kpi_subdag_dag,
queue = O7_CALC_Q,
)
format_data_ss_task = PythonOperator(
task_id = "format_data_of_ss_%s"%each_machine_name,
provide_context=True,
python_callable=format_data,
trigger_rule = 'all_done',
params={"machine_name":each_machine_name,"technology":ss_name,'attributes':utilization_attributes},
dag=utilization_kpi_subdag_dag,
queue = celery_queue,
)
calculate_task_list[each_machine_name] = calculate_utilization_data_ss_task
calculate_utilization_data_ss_task >> format_data_ss_task
format_data_ss_task >> aggregate_utilization_data_ss_task
#we gotta create teh crazy queries WTF this is so unsafe
INSERT_QUERY = INSERT_HEADER%("nocout_"+each_machine_name) + INSERT_TAIL
UPDATE_QUERY = UPDATE_HEADER%("nocout_"+each_machine_name) + UPDATE_TAIL
INSERT_QUERY = INSERT_QUERY.replace('\n','')
UPDATE_QUERY = UPDATE_QUERY.replace('\n','')
#ss_name == Device_type
if not DEBUG:
insert_data_in_mysql = MySqlLoaderOperator(
task_id ="upload_data_%s"%(each_machine_name),
dag=utilization_kpi_subdag_dag,
query=INSERT_QUERY,
#data="",
redis_key="aggregated_utilization_%s_%s"%(each_machine_name,ss_name),
redis_conn_id = "redis_hook_util_10",
mysql_conn_id='mysql_uat',
queue = O7_CALC_Q,
trigger_rule = 'all_done'
)
update_data_in_mysql = MySqlLoaderOperator(
task_id ="update_data_%s"%(each_machine_name),
query=UPDATE_QUERY ,
#data="",
redis_key="aggregated_utilization_%s_%s"%(each_machine_name,ss_name),
redis_conn_id = "redis_hook_util_10",
mysql_conn_id='mysql_uat',
dag=utilization_kpi_subdag_dag,
queue = O7_CALC_Q,
trigger_rule = 'all_done'
)
update_data_in_mysql << aggregate_utilization_data_ss_task
insert_data_in_mysql << aggregate_utilization_data_ss_task
db_list=[]
for each_site_name in ss_tech_sites:
if each_site_name in config_sites:
machine = each_site_name.split("_")[0]
get_required_data_ss_task = PythonOperator(
task_id = "get_utilization_data_of_ss_%s"%each_site_name,
provide_context=True,
trigger_rule = 'all_done',
python_callable=get_required_data_ss,
params={"site_name":each_site_name,"technology":ss_name,'attributes':utilization_attributes},
dag=utilization_kpi_subdag_dag,
queue = celery_queue
)
get_required_data_ss_task >> calculate_task_list.get(machine)
#calculate_utilization_data_ss_task >> format_data_ss_task
#calculate_utilization_data_ss_task >> calculate_utilization_data_bs_task
# try:
# aggregate_dependency_ss[machine_name] << format_data_ss_task
# except:
# logging.info("Site Not Found %s"%(machine_name))
# pass
else:
logging.info("Skipping %s"%(each_site_name))
return utilization_kpi_subdag_dag
| vipul-tm/DAG | dags-ttpl/subdags/utilization_kpi_subdag.py | Python | bsd-3-clause | 17,641 |
# coding: utf-8
from django import VERSION
from django.core.management import call_command
from ._compat import patch
CELERYD_COMMAND = 'djcelery.management.commands.celeryd.Command.handle'
def test_celeryd_command():
if VERSION >= (1, 10):
traceback = False
else:
traceback = None
with patch(CELERYD_COMMAND) as handle:
call_command('celeryd')
handle.assert_called_with(
autoreload=None, autoscale=None, beat=None, broker=None,
concurrency=0, detach=None, exclude_queues=[], executable=None,
gid=None, heartbeat_interval=None, hostname=None, include=[],
logfile=None, loglevel='WARN', max_tasks_per_child=None,
no_color=False, no_execv=False, optimization=None, pidfile=None,
pool_cls='prefork', purge=False, pythonpath=None, queues=[],
quiet=None, schedule_filename='celerybeat-schedule',
scheduler_cls=None, send_events=False, settings=None,
skip_checks=True, state_db=None, task_soft_time_limit=None,
task_time_limit=None, traceback=traceback, uid=None, umask=None,
verbosity=1, without_gossip=False, without_heartbeat=False,
without_mingle=False, working_directory=None
)
| kanemra/django-celery | djcelery/tests/test_commands.py | Python | bsd-3-clause | 1,278 |
from sqlalchemy.orm import sessionmaker as sqla_sessionmaker
from sqlalchemy import engine_from_config
import zope.sqlalchemy
import transaction
from .meta import Base
from .user import (
User,
UserTickets,
)
from .domain import Domain
from .record import Record
from .types import (
type_to_value,
value_to_type,
)
def includeme(config):
settings = config.get_settings()
sessionmaker = build_sessionmaker(settings)
config.add_request_method(
lambda r: get_dbsession(r, sessionmaker),
'dbsession',
reify=True,
)
# Include the transaction manager
if 'tm.manager_hook' not in settings:
config.add_settings({
'tm.manager_hook': lambda _: transaction.TransactionManager(),
})
config.include('pyramid_tm')
def get_dbsession(request, sessionmaker):
dbsession = sessionmaker()
zope.sqlalchemy.register(dbsession, transaction_manager=request.tm)
return dbsession
def build_sessionmaker(settings, prefix='sqlalchemy.'):
engine = engine_from_config(settings, prefix)
sessionmaker = sqla_sessionmaker()
sessionmaker.configure(bind=engine)
return sessionmaker
| bertjwregeer/alexandria | alexandria/models/__init__.py | Python | isc | 1,257 |
"""
None of the functions/objects in this module need be passed `db`.
Naming convention: a `pub` is either a pubkey or a pubkeyhash
"""
import hashlib
import bitcoin as bitcoinlib
import binascii
from bitcoin.core.key import CPubKey
from counterpartylib.lib import util
from counterpartylib.lib import config
from counterpartylib.lib import exceptions
b58_digits = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
class InputError (Exception):
pass
class AddressError(Exception):
pass
class MultiSigAddressError(AddressError):
pass
class VersionByteError (AddressError):
pass
class Base58Error (AddressError):
pass
class Base58ChecksumError (Base58Error):
pass
def validate(address):
"""Make sure the address is valid.
May throw `AddressError`.
"""
# Get array of pubkeyhashes to check.
if is_multisig(address):
pubkeyhashes = pubkeyhash_array(address)
else:
pubkeyhashes = [address]
# Check validity by attempting to decode.
for pubkeyhash in pubkeyhashes:
base58_check_decode(pubkeyhash, config.ADDRESSVERSION)
def base58_encode(binary):
"""Encode the address in base58."""
# Convert big‐endian bytes to integer
n = int('0x0' + util.hexlify(binary), 16)
# Divide that integer into base58
res = []
while n > 0:
n, r = divmod(n, 58)
res.append(b58_digits[r])
res = ''.join(res[::-1])
return res
def base58_check_encode(original, version):
"""Check if base58 encoding is valid."""
b = binascii.unhexlify(bytes(original, 'utf-8'))
d = version + b
binary = d + util.dhash(d)[:4]
res = base58_encode(binary)
# Encode leading zeros as base58 zeros
czero = 0
pad = 0
for c in d:
if c == czero:
pad += 1
else:
break
address = b58_digits[0] * pad + res
if original != util.hexlify(base58_check_decode(address, version)):
raise AddressError('encoded address does not decode properly')
return address
def base58_check_decode(s, version):
"""Decode from base58."""
# Convert the string to an integer
n = 0
for c in s:
n *= 58
if c not in b58_digits:
raise Base58Error('Not a valid Base58 character: ‘{}’'.format(c))
digit = b58_digits.index(c)
n += digit
# Convert the integer to bytes
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = binascii.unhexlify(h.encode('utf8'))
# Add padding back.
pad = 0
for c in s[:-1]:
if c == b58_digits[0]:
pad += 1
else:
break
k = version * pad + res
addrbyte, data, chk0 = k[0:1], k[1:-4], k[-4:]
if addrbyte != version:
raise VersionByteError('incorrect version byte')
chk1 = util.dhash(addrbyte + data)[:4]
if chk0 != chk1:
raise Base58ChecksumError('Checksum mismatch: 0x{} ≠ 0x{}'.format(util.hexlify(chk0), util.hexlify(chk1)))
return data
def is_multisig(address):
"""Check if the address is multi‐signature."""
array = address.split('_')
return len(array) > 1
def is_fully_valid(pubkey_bin):
"""Check if the public key is valid."""
cpubkey = CPubKey(pubkey_bin)
return cpubkey.is_fullyvalid
def make_canonical(address):
"""Return canonical version of the address."""
if is_multisig(address):
signatures_required, pubkeyhashes, signatures_possible = extract_array(address)
try:
[base58_check_decode(pubkeyhash, config.ADDRESSVERSION) for pubkeyhash in pubkeyhashes]
except Base58Error:
raise MultiSigAddressError('Multi‐signature address must use PubKeyHashes, not public keys.')
return construct_array(signatures_required, pubkeyhashes, signatures_possible)
else:
return address
def test_array(signatures_required, pubs, signatures_possible):
"""Check if multi‐signature data is valid."""
try:
signatures_required, signatures_possible = int(signatures_required), int(signatures_possible)
except (ValueError, TypeError):
raise MultiSigAddressError('Signature values not integers.')
if signatures_required < 1 or signatures_required > 3:
raise MultiSigAddressError('Invalid signatures_required.')
if signatures_possible < 2 or signatures_possible > 3:
raise MultiSigAddressError('Invalid signatures_possible.')
for pubkey in pubs:
if '_' in pubkey:
raise MultiSigAddressError('Invalid characters in pubkeys/pubkeyhashes.')
if signatures_possible != len(pubs):
raise InputError('Incorrect number of pubkeys/pubkeyhashes in multi‐signature address.')
def construct_array(signatures_required, pubs, signatures_possible):
"""Create a multi‐signature address."""
test_array(signatures_required, pubs, signatures_possible)
address = '_'.join([str(signatures_required)] + sorted(pubs) + [str(signatures_possible)])
return address
def extract_array(address):
"""Extract data from multi‐signature address."""
assert is_multisig(address)
array = address.split('_')
signatures_required, pubs, signatures_possible = array[0], sorted(array[1:-1]), array[-1]
test_array(signatures_required, pubs, signatures_possible)
return int(signatures_required), pubs, int(signatures_possible)
def pubkeyhash_array(address):
"""Return PubKeyHashes from an address."""
signatures_required, pubs, signatures_possible = extract_array(address)
if not all([is_pubkeyhash(pub) for pub in pubs]):
raise MultiSigAddressError('Invalid PubKeyHashes. Multi‐signature address must use PubKeyHashes, not public keys.')
pubkeyhashes = pubs
return pubkeyhashes
def hash160(x):
x = hashlib.sha256(x).digest()
m = hashlib.new('ripemd160')
m.update(x)
return m.digest()
def pubkey_to_pubkeyhash(pubkey):
"""Convert public key to PubKeyHash."""
pubkeyhash = hash160(pubkey)
pubkey = base58_check_encode(binascii.hexlify(pubkeyhash).decode('utf-8'), config.ADDRESSVERSION)
return pubkey
def get_asm(scriptpubkey):
# TODO: When is an exception thrown here? Can this `try` block be tighter? Can it be replaced by a conditional?
try:
asm = []
# TODO: This should be `for element in scriptpubkey`.
for op in scriptpubkey:
if type(op) == bitcoinlib.core.script.CScriptOp:
# TODO: `op = element`
asm.append(str(op))
else:
# TODO: `data = element` (?)
asm.append(op)
except bitcoinlib.core.script.CScriptTruncatedPushDataError:
raise exceptions.DecodeError('invalid pushdata due to truncation')
if not asm:
raise exceptions.DecodeError('empty output')
return asm
def get_checksig(asm):
if len(asm) == 5 and asm[0] == 'OP_DUP' and asm[1] == 'OP_HASH160' and asm[3] == 'OP_EQUALVERIFY' and asm[4] == 'OP_CHECKSIG':
pubkeyhash = asm[2]
if type(pubkeyhash) == bytes:
return pubkeyhash
raise exceptions.DecodeError('invalid OP_CHECKSIG')
def get_checkmultisig(asm):
# N‐of‐2
if len(asm) == 5 and asm[3] == 2 and asm[4] == 'OP_CHECKMULTISIG':
pubkeys, signatures_required = asm[1:3], asm[0]
if all([type(pubkey) == bytes for pubkey in pubkeys]):
return pubkeys, signatures_required
# N‐of‐3
if len(asm) == 6 and asm[4] == 3 and asm[5] == 'OP_CHECKMULTISIG':
pubkeys, signatures_required = asm[1:4], asm[0]
if all([type(pubkey) == bytes for pubkey in pubkeys]):
return pubkeys, signatures_required
raise exceptions.DecodeError('invalid OP_CHECKMULTISIG')
def scriptpubkey_to_address(scriptpubkey):
asm = get_asm(scriptpubkey)
if asm[-1] == 'OP_CHECKSIG':
try:
checksig = get_checksig(asm)
except exceptions.DecodeError: # coinbase
return None
return base58_check_encode(binascii.hexlify(checksig).decode('utf-8'), config.ADDRESSVERSION)
elif asm[-1] == 'OP_CHECKMULTISIG':
pubkeys, signatures_required = get_checkmultisig(asm)
pubkeyhashes = [pubkey_to_pubkeyhash(pubkey) for pubkey in pubkeys]
return construct_array(signatures_required, pubkeyhashes, len(pubkeyhashes))
return None
# TODO: Use `python-bitcointools` instead. (Get rid of `pycoin` dependency.)
from pycoin.encoding import wif_to_tuple_of_secret_exponent_compressed, public_pair_to_sec, EncodingError
from pycoin.ecdsa import generator_secp256k1, public_pair_for_secret_exponent
class AltcoinSupportError (Exception): pass
def private_key_to_public_key(private_key_wif):
"""Convert private key to public key."""
if config.TESTNET:
allowable_wif_prefixes = [config.PRIVATEKEY_VERSION_TESTNET]
else:
allowable_wif_prefixes = [config.PRIVATEKEY_VERSION_MAINNET]
try:
secret_exponent, compressed = wif_to_tuple_of_secret_exponent_compressed(
private_key_wif, allowable_wif_prefixes=allowable_wif_prefixes)
except EncodingError:
raise AltcoinSupportError('pycoin: unsupported WIF prefix')
public_pair = public_pair_for_secret_exponent(generator_secp256k1, secret_exponent)
public_key = public_pair_to_sec(public_pair, compressed=compressed)
public_key_hex = binascii.hexlify(public_key).decode('utf-8')
return public_key_hex
def is_pubkeyhash(monosig_address):
"""Check if PubKeyHash is valid. """
assert not is_multisig(monosig_address)
try:
base58_check_decode(monosig_address, config.ADDRESSVERSION)
return True
except (Base58Error, VersionByteError):
return False
def make_pubkeyhash(address):
"""Create a new PubKeyHash."""
if is_multisig(address):
signatures_required, pubs, signatures_possible = extract_array(address)
pubkeyhashes = []
for pub in pubs:
if is_pubkeyhash(pub):
pubkeyhash = pub
else:
pubkeyhash = pubkey_to_pubkeyhash(binascii.unhexlify(bytes(pub, 'utf-8')))
pubkeyhashes.append(pubkeyhash)
pubkeyhash_address = construct_array(signatures_required, pubkeyhashes, signatures_possible)
else:
if is_pubkeyhash(address):
pubkeyhash_address = address
else:
pubkeyhash_address = pubkey_to_pubkeyhash(binascii.unhexlify(bytes(address, 'utf-8')))
return pubkeyhash_address
def extract_pubkeys(pub):
"""Assume pubkey if not pubkeyhash. (Check validity later.)"""
pubkeys = []
if is_multisig(pub):
_, pubs, _ = extract_array(pub)
for pub in pubs:
if not is_pubkeyhash(pub):
pubkeys.append(pub)
else:
if not is_pubkeyhash(pub):
pubkeys.append(pub)
return pubkeys
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| tokenly/counterparty-lib | counterpartylib/lib/script.py | Python | mit | 10,947 |
from bank_CI import BankCI
from bank_controller import BankController
from settings import DB_NAME, CREATE_TABLES, DROP_DATABASE
from sql_manager import BankDatabaseManager
def main():
manager = BankDatabaseManager.create_from_db_and_sql(DB_NAME, CREATE_TABLES, DROP_DATABASE, create_if_exists=False)
controller = BankController(manager)
command_interface = BankCI(controller)
command_interface.main_menu()
if __name__ == '__main__':
main()
| pepincho/Python101-and-Algo1-Courses | Programming-101-v3/week9/1-Money-In-The-Bank/start.py | Python | mit | 465 |
from rpython.flowspace.model import Variable
from rpython.rtyper.lltypesystem import lltype
from rpython.translator.simplify import get_graph
from rpython.tool.uid import uid
class CreationPoint(object):
def __init__(self, creation_method, TYPE, op=None):
self.escapes = False
self.returns = False
self.creation_method = creation_method
if creation_method == "constant":
self.escapes = True
self.TYPE = TYPE
self.op = op
def __repr__(self):
return ("CreationPoint(<0x%x>, %r, %s, esc=%s)" %
(uid(self), self.TYPE, self.creation_method, self.escapes))
class VarState(object):
def __init__(self, *creps):
self.creation_points = set()
for crep in creps:
self.creation_points.add(crep)
def contains(self, other):
return other.creation_points.issubset(self.creation_points)
def merge(self, other):
creation_points = self.creation_points.union(other.creation_points)
return VarState(*creation_points)
def setescapes(self):
changed = []
for crep in self.creation_points:
if not crep.escapes:
changed.append(crep)
crep.escapes = True
return changed
def setreturns(self):
changed = []
for crep in self.creation_points:
if not crep.returns:
changed.append(crep)
crep.returns = True
return changed
def does_escape(self):
for crep in self.creation_points:
if crep.escapes:
return True
return False
def does_return(self):
for crep in self.creation_points:
if crep.returns:
return True
return False
def __repr__(self):
return "<VarState %s>" % (self.creation_points, )
class AbstractDataFlowInterpreter(object):
def __init__(self, translation_context):
self.translation_context = translation_context
self.scheduled = {} # block: graph containing it
self.varstates = {} # var-or-const: state
self.creationpoints = {} # var: creationpoint
self.constant_cps = {} # const: creationpoint
self.dependencies = {} # creationpoint: {block: graph containing it}
self.functionargs = {} # graph: list of state of args
self.flown_blocks = {} # block: True
def seen_graphs(self):
return self.functionargs.keys()
def getstate(self, var_or_const):
if not isonheap(var_or_const):
return None
if var_or_const in self.varstates:
return self.varstates[var_or_const]
if isinstance(var_or_const, Variable):
varstate = VarState()
else:
if var_or_const not in self.constant_cps:
crep = CreationPoint("constant", var_or_const.concretetype)
self.constant_cps[var_or_const] = crep
else:
crep = self.constant_cps[var_or_const]
varstate = VarState(crep)
self.varstates[var_or_const] = varstate
return varstate
def getstates(self, varorconstlist):
return [self.getstate(var) for var in varorconstlist]
def setstate(self, var, state):
self.varstates[var] = state
def get_creationpoint(self, var, method="?", op=None):
if var in self.creationpoints:
return self.creationpoints[var]
crep = CreationPoint(method, var.concretetype, op)
self.creationpoints[var] = crep
return crep
def schedule_function(self, graph):
startblock = graph.startblock
if graph in self.functionargs:
args = self.functionargs[graph]
else:
args = []
for var in startblock.inputargs:
if not isonheap(var):
varstate = None
else:
crep = self.get_creationpoint(var, "arg")
varstate = VarState(crep)
self.setstate(var, varstate)
args.append(varstate)
self.scheduled[startblock] = graph
self.functionargs[graph] = args
resultstate = self.getstate(graph.returnblock.inputargs[0])
return resultstate, args
def flow_block(self, block, graph):
self.flown_blocks[block] = True
if block is graph.returnblock:
if isonheap(block.inputargs[0]):
self.returns(self.getstate(block.inputargs[0]))
return
if block is graph.exceptblock:
if isonheap(block.inputargs[0]):
self.escapes(self.getstate(block.inputargs[0]))
if isonheap(block.inputargs[1]):
self.escapes(self.getstate(block.inputargs[1]))
return
self.curr_block = block
self.curr_graph = graph
for op in block.operations:
self.flow_operation(op)
for exit in block.exits:
args = self.getstates(exit.args)
targetargs = self.getstates(exit.target.inputargs)
# flow every block at least once
if (multicontains(targetargs, args) and
exit.target in self.flown_blocks):
continue
for prevstate, origstate, var in zip(args, targetargs,
exit.target.inputargs):
if not isonheap(var):
continue
newstate = prevstate.merge(origstate)
self.setstate(var, newstate)
self.scheduled[exit.target] = graph
def flow_operation(self, op):
args = self.getstates(op.args)
opimpl = getattr(self, 'op_' + op.opname, None)
if opimpl is not None:
res = opimpl(op, *args)
if res is not NotImplemented:
self.setstate(op.result, res)
return
if isonheap(op.result) or filter(None, args):
for arg in args:
if arg is not None:
self.escapes(arg)
def complete(self):
while self.scheduled:
block, graph = self.scheduled.popitem()
self.flow_block(block, graph)
def escapes(self, arg):
changed = arg.setescapes()
self.handle_changed(changed)
def returns(self, arg):
changed = arg.setreturns()
self.handle_changed(changed)
def handle_changed(self, changed):
for crep in changed:
if crep not in self.dependencies:
continue
self.scheduled.update(self.dependencies[crep])
def register_block_dependency(self, state, block=None, graph=None):
if block is None:
block = self.curr_block
graph = self.curr_graph
for crep in state.creation_points:
self.dependencies.setdefault(crep, {})[block] = graph
def register_state_dependency(self, state1, state2):
"state1 depends on state2: if state2 does escape/change, so does state1"
# change state1 according to how state2 is now
if state2.does_escape():
self.escapes(state1)
if state2.does_return():
self.returns(state1)
# register a dependency of the current block on state2:
# that means that if state2 changes the current block will be reflown
# triggering this function again and thus updating state1
self.register_block_dependency(state2)
# _____________________________________________________________________
# operation implementations
def op_malloc(self, op, typestate, flagsstate):
assert flagsstate is None
flags = op.args[1].value
if flags != {'flavor': 'gc'}:
return NotImplemented
return VarState(self.get_creationpoint(op.result, "malloc", op))
def op_malloc_varsize(self, op, typestate, flagsstate, lengthstate):
assert flagsstate is None
flags = op.args[1].value
if flags != {'flavor': 'gc'}:
return NotImplemented
return VarState(self.get_creationpoint(op.result, "malloc_varsize", op))
def op_cast_pointer(self, op, state):
return state
def op_setfield(self, op, objstate, fieldname, valuestate):
if valuestate is not None:
# be pessimistic for now:
# everything that gets stored into a structure escapes
self.escapes(valuestate)
return None
def op_setarrayitem(self, op, objstate, indexstate, valuestate):
if valuestate is not None:
# everything that gets stored into a structure escapes
self.escapes(valuestate)
return None
def op_getarrayitem(self, op, objstate, indexstate):
if isonheap(op.result):
return VarState(self.get_creationpoint(op.result, "getarrayitem", op))
def op_getfield(self, op, objstate, fieldname):
if isonheap(op.result):
# assume that getfield creates a new value
return VarState(self.get_creationpoint(op.result, "getfield", op))
def op_getarraysize(self, op, arraystate):
pass
def op_direct_call(self, op, function, *args):
graph = get_graph(op.args[0], self.translation_context)
if graph is None:
for arg in args:
if arg is None:
continue
# an external function can escape every parameter:
self.escapes(arg)
funcargs = [None] * len(args)
else:
result, funcargs = self.schedule_function(graph)
assert len(args) == len(funcargs)
for localarg, funcarg in zip(args, funcargs):
if localarg is None:
assert funcarg is None
continue
if funcarg is not None:
self.register_state_dependency(localarg, funcarg)
if isonheap(op.result):
# assume that a call creates a new value
return VarState(self.get_creationpoint(op.result, "direct_call", op))
def op_indirect_call(self, op, function, *args):
graphs = op.args[-1].value
args = args[:-1]
if graphs is None:
for localarg in args:
if localarg is None:
continue
self.escapes(localarg)
else:
for graph in graphs:
result, funcargs = self.schedule_function(graph)
assert len(args) == len(funcargs)
for localarg, funcarg in zip(args, funcargs):
if localarg is None:
assert funcarg is None
continue
self.register_state_dependency(localarg, funcarg)
if isonheap(op.result):
# assume that a call creates a new value
return VarState(self.get_creationpoint(op.result, "indirect_call", op))
def op_ptr_iszero(self, op, ptrstate):
return None
op_cast_ptr_to_int = op_keepalive = op_ptr_nonzero = op_ptr_iszero
def op_ptr_eq(self, op, ptr1state, ptr2state):
return None
op_ptr_ne = op_ptr_eq
def op_same_as(self, op, objstate):
return objstate
def isonheap(var_or_const):
return isinstance(var_or_const.concretetype, lltype.Ptr)
def multicontains(l1, l2):
assert len(l1) == len(l2)
for a, b in zip(l1, l2):
if a is None:
assert b is None
elif not a.contains(b):
return False
return True
def is_malloc_like(adi, graph, seen):
if graph in seen:
return seen[graph]
return_state = adi.getstate(graph.getreturnvar())
if return_state is None or len(return_state.creation_points) != 1:
seen[graph] = False
return False
crep, = return_state.creation_points
if crep.escapes:
seen[graph] = False
return False
if crep.creation_method in ["malloc", "malloc_varsize"]:
assert crep.returns
seen[graph] = True
return True
if crep.creation_method == "direct_call":
subgraph = get_graph(crep.op.args[0], adi.translation_context)
if subgraph is None:
seen[graph] = False
return False
res = is_malloc_like(adi, subgraph, seen)
seen[graph] = res
return res
seen[graph] = False
return False
def malloc_like_graphs(adi):
seen = {}
return [graph for graph in adi.seen_graphs()
if is_malloc_like(adi, graph, seen)]
| oblique-labs/pyVM | rpython/translator/backendopt/escape.py | Python | mit | 12,552 |
import sys
import os
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand, CommandError
from workshops.models import Award, Badge, Person
SKIP_DIRS = ['class']
class Command(BaseCommand):
args = '/path/to/site'
help = 'Report inconsistencies in badges.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('Usage: check_badges /path/to/site')
path_to_site = args[0]
badge_dir = os.path.join(path_to_site, 'badges')
for entry in os.listdir(badge_dir):
entry_path = os.path.join(badge_dir, entry)
if os.path.isdir(entry_path) and entry not in SKIP_DIRS:
self.check_badges(entry, entry_path)
def check_badges(self, badge_name, badge_path):
try:
badge = Badge.objects.get(name=badge_name)
db_awards = set([a.person.username for a in Award.objects.filter(badge=badge)])
path_awards = set([os.path.splitext(p)[0] for p in os.listdir(badge_path) if p.endswith('.json')])
self.report_missing('in database but not site', badge_name, db_awards - path_awards)
self.report_missing('in site but not database', badge_name, path_awards - db_awards)
except ObjectDoesNotExist:
print('badge "{0}" not known'.format(badge_name, file=sys.stderr))
def report_missing(self, title, badge_name, items):
if items:
print('{0} {1}'.format(badge_name, title))
for i in sorted(list(items)):
try:
p = Person.objects.get(username=i)
print(' {0}: {1}'.format(i, p))
except ObjectDoesNotExist:
print(' {0}'.format(i))
| shapiromatron/amy | workshops/management/commands/check_badges.py | Python | mit | 1,785 |
# django-salesforce
#
# by Phil Christensen
# (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org)
# See LICENSE.md for details
#
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
# from django.test import TestCase
| django-salesforce/django-salesforce | salesforce/testrunner/example/tests.py | Python | mit | 363 |
def is_integer(nr):
try:
int(nr)
return True
except ValueError:
return False
| leyyin/university-SE | school/util.py | Python | mit | 110 |
#!/usr/bin/env python
import os
import sys
import argparse
import traceback
sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir))
from toollib.group import Group,UnsortedInputGrouper
import scipy.stats as ss
class KSGroup(Group):
def __init__(self, tup):
super(KSGroup, self).__init__(tup)
self.samples = []
def add(self, chunks):
self.samples.append(float(chunks[args.column]))
def done(self):
jdelim = args.delimiter if args.delimiter != None else ' '
if len(self.tup) > 0:
args.outfile.write(jdelim.join(self.tup) + jdelim)
args.outfile.write(jdelim.join(map(str, ss.kstest(self.samples, args.distf, args=args.params))) + '\n')
if __name__ == "__main__":
# set up command line args
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
description='Compare the request distributions of all clients')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument('-s', '--source', default='scipy.stats', choices=['scipy.stats', 'lambda'], help='source of the distribution to fit')
parser.add_argument('-i', '--dist', default='paretoLomax')
parser.add_argument('-p', '--params', default='', help='initial parameters')
parser.add_argument('-c', '--column', type=int, default=0)
parser.add_argument('-g', '--group', nargs='+', type=int, default=[])
parser.add_argument('-d', '--delimiter', default=None)
args = parser.parse_args()
args.params = map(float, args.params.split(args.delimiter))
if args.source == 'scipy.stats':
args.source = ss
else:
args.source = None
if args.source:
mod = args.source
for c in args.dist.split('.'):
mod = getattr(mod, c)
args.distf = mod
else:
args.distf = eval(args.dist)
grouper = UnsortedInputGrouper(args.infile, KSGroup, args.group, args.delimiter)
grouper.group()
| scoky/pytools | curve/ks_test.py | Python | mit | 2,158 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Score.created_at'
db.add_column(u'core_score', 'created_at',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2015, 1, 22, 0, 0), blank=True),
keep_default=False)
# Adding field 'Score.updated_at'
db.add_column(u'core_score', 'updated_at',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2015, 1, 22, 0, 0), blank=True),
keep_default=False)
# Adding field 'Score.changed_by'
db.add_column(u'core_score', 'changed_by',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name=u'core_score_related', null=True, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Score.created_at'
db.delete_column(u'core_score', 'created_at')
# Deleting field 'Score.updated_at'
db.delete_column(u'core_score', 'updated_at')
# Deleting field 'Score.changed_by'
db.delete_column(u'core_score', 'changed_by_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.image': {
'Meta': {'object_name': 'Image'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_image_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.indicator': {
'Meta': {'object_name': 'Indicator'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_indicator_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['forms.Form']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'form_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maximum_monthly_records': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'passing_percentage': ('django.db.models.fields.FloatField', [], {'default': '85'}),
'title': ('django.db.models.fields.TextField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.location': {
'Meta': {'object_name': 'Location'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_location_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Image']", 'null': 'True', 'blank': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Indicator']", 'null': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'position': ('geoposition.fields.GeopositionField', [], {'max_length': '42'}),
'title': ('django.db.models.fields.TextField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.score': {
'Meta': {'object_name': 'Score'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_score_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'entry_count': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Indicator']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Location']"}),
'month': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'passing': ('django.db.models.fields.BooleanField', [], {}),
'passing_entry_count': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.FloatField', [], {'default': '85'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'forms.form': {
'Meta': {'object_name': 'Form'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': u"orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['core'] | SM2015/orchid | core/migrations/0002_auto__add_field_score_created_at__add_field_score_updated_at__add_fiel.py | Python | mit | 11,751 |
class Solution(object):
def multiply(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
return str(int(num1) * int(num2))
| SF-Zhou/LeetCode.Solutions | solutions/multiply_strings.py | Python | mit | 194 |
from transmute_core import *
from .route import route
from .swagger import add_swagger
| toumorokoshi/web-transmute | transmute_core/frameworks/flask/__init__.py | Python | mit | 87 |
from voluptuous import Invalid
import re
def Url(msg=None):
def f(v):
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if re.match(regex, str(v)):
return str(v)
else:
raise Invalid(msg or "value is not correct Uniform Resource Locator")
return f
def TrackableCid(msg=None):
def f(v):
regex = re.compile(r'^[A-Z]{2}-[A-Z]{2}-[1-9]{1}[0-9]*$', re.IGNORECASE)
if re.match(regex, str(v)):
return str(v)
else:
raise Invalid(msg or "value is not correct trackable CID")
return f
| sparwelt/bitool | sparweltbitool/validation.py | Python | mit | 914 |
"""Support for vacuum cleaner robots (botvacs)."""
from dataclasses import dataclass
from datetime import timedelta
from functools import partial
import logging
from typing import final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ( # noqa: F401 # STATE_PAUSED/IDLE are API
ATTR_BATTERY_LEVEL,
ATTR_COMMAND,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_IDLE,
STATE_ON,
STATE_PAUSED,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import (
Entity,
EntityDescription,
ToggleEntity,
ToggleEntityDescription,
)
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "vacuum"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = timedelta(seconds=20)
ATTR_BATTERY_ICON = "battery_icon"
ATTR_CLEANED_AREA = "cleaned_area"
ATTR_FAN_SPEED = "fan_speed"
ATTR_FAN_SPEED_LIST = "fan_speed_list"
ATTR_PARAMS = "params"
ATTR_STATUS = "status"
SERVICE_CLEAN_SPOT = "clean_spot"
SERVICE_LOCATE = "locate"
SERVICE_RETURN_TO_BASE = "return_to_base"
SERVICE_SEND_COMMAND = "send_command"
SERVICE_SET_FAN_SPEED = "set_fan_speed"
SERVICE_START_PAUSE = "start_pause"
SERVICE_START = "start"
SERVICE_PAUSE = "pause"
SERVICE_STOP = "stop"
STATE_CLEANING = "cleaning"
STATE_DOCKED = "docked"
STATE_RETURNING = "returning"
STATE_ERROR = "error"
STATES = [STATE_CLEANING, STATE_DOCKED, STATE_RETURNING, STATE_ERROR]
DEFAULT_NAME = "Vacuum cleaner robot"
SUPPORT_TURN_ON = 1
SUPPORT_TURN_OFF = 2
SUPPORT_PAUSE = 4
SUPPORT_STOP = 8
SUPPORT_RETURN_HOME = 16
SUPPORT_FAN_SPEED = 32
SUPPORT_BATTERY = 64
SUPPORT_STATUS = 128
SUPPORT_SEND_COMMAND = 256
SUPPORT_LOCATE = 512
SUPPORT_CLEAN_SPOT = 1024
SUPPORT_MAP = 2048
SUPPORT_STATE = 4096
SUPPORT_START = 8192
@bind_hass
def is_on(hass, entity_id):
"""Return if the vacuum is on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the vacuum component."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
component.async_register_entity_service(
SERVICE_START_PAUSE, {}, "async_start_pause"
)
component.async_register_entity_service(SERVICE_START, {}, "async_start")
component.async_register_entity_service(SERVICE_PAUSE, {}, "async_pause")
component.async_register_entity_service(
SERVICE_RETURN_TO_BASE, {}, "async_return_to_base"
)
component.async_register_entity_service(SERVICE_CLEAN_SPOT, {}, "async_clean_spot")
component.async_register_entity_service(SERVICE_LOCATE, {}, "async_locate")
component.async_register_entity_service(SERVICE_STOP, {}, "async_stop")
component.async_register_entity_service(
SERVICE_SET_FAN_SPEED,
{vol.Required(ATTR_FAN_SPEED): cv.string},
"async_set_fan_speed",
)
component.async_register_entity_service(
SERVICE_SEND_COMMAND,
{
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMS): vol.Any(dict, cv.ensure_list),
},
"async_send_command",
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
class _BaseVacuum(Entity):
"""Representation of a base vacuum.
Contains common properties and functions for all vacuum devices.
"""
@property
def supported_features(self):
"""Flag vacuum cleaner features that are supported."""
raise NotImplementedError()
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
raise NotImplementedError()
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
return None
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
raise NotImplementedError()
@property
def capability_attributes(self):
"""Return capability attributes."""
if self.supported_features & SUPPORT_FAN_SPEED:
return {ATTR_FAN_SPEED_LIST: self.fan_speed_list}
@property
def state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = {}
if self.supported_features & SUPPORT_BATTERY:
data[ATTR_BATTERY_LEVEL] = self.battery_level
data[ATTR_BATTERY_ICON] = self.battery_icon
if self.supported_features & SUPPORT_FAN_SPEED:
data[ATTR_FAN_SPEED] = self.fan_speed
return data
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
raise NotImplementedError()
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.stop, **kwargs))
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
raise NotImplementedError()
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.return_to_base, **kwargs))
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
raise NotImplementedError()
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.clean_spot, **kwargs))
def locate(self, **kwargs):
"""Locate the vacuum cleaner."""
raise NotImplementedError()
async def async_locate(self, **kwargs):
"""Locate the vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.locate, **kwargs))
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
raise NotImplementedError()
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.set_fan_speed, fan_speed, **kwargs)
)
def send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
raise NotImplementedError()
async def async_send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.send_command, command, params=params, **kwargs)
)
@dataclass
class VacuumEntityDescription(ToggleEntityDescription):
"""A class that describes vacuum entities."""
class VacuumEntity(_BaseVacuum, ToggleEntity):
"""Representation of a vacuum cleaner robot."""
entity_description: VacuumEntityDescription
@property
def status(self):
"""Return the status of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = False
if self.status is not None:
charging = "charg" in self.status.lower()
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
@final
@property
def state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = super().state_attributes
if self.supported_features & SUPPORT_STATUS:
data[ATTR_STATUS] = self.status
return data
def turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.turn_off, **kwargs))
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
raise NotImplementedError()
async def async_start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.start_pause, **kwargs))
async def async_pause(self):
"""Not supported."""
async def async_start(self):
"""Not supported."""
@dataclass
class StateVacuumEntityDescription(EntityDescription):
"""A class that describes vacuum entities."""
class StateVacuumEntity(_BaseVacuum):
"""Representation of a vacuum cleaner robot that supports states."""
entity_description: StateVacuumEntityDescription
@property
def state(self):
"""Return the state of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = bool(self.state == STATE_DOCKED)
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
def start(self):
"""Start or resume the cleaning task."""
raise NotImplementedError()
async def async_start(self):
"""Start or resume the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(self.start)
def pause(self):
"""Pause the cleaning task."""
raise NotImplementedError()
async def async_pause(self):
"""Pause the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(self.pause)
async def async_turn_on(self, **kwargs):
"""Not supported."""
async def async_turn_off(self, **kwargs):
"""Not supported."""
async def async_toggle(self, **kwargs):
"""Not supported."""
| rohitranjan1991/home-assistant | homeassistant/components/vacuum/__init__.py | Python | mit | 11,915 |
# -*- coding: UTF-8 -*-
#############################################
## (C)opyright by Dirk Holtwick, 2008 ##
## All rights reserved ##
#############################################
from pyxer.base import *
@controller
def index():
return "/index" | holtwick/pyxer | tests/public/__init__.py | Python | mit | 279 |
import sys
import requests
from urllib.parse import urlparse
docs_repos = [
"frappe_docs",
"erpnext_documentation",
"erpnext_com",
"frappe_io",
]
def uri_validator(x):
result = urlparse(x)
return all([result.scheme, result.netloc, result.path])
def docs_link_exists(body):
for line in body.splitlines():
for word in line.split():
if word.startswith('http') and uri_validator(word):
parsed_url = urlparse(word)
if parsed_url.netloc == "github.com":
parts = parsed_url.path.split('/')
if len(parts) == 5 and parts[1] == "frappe" and parts[2] in docs_repos:
return True
if __name__ == "__main__":
pr = sys.argv[1]
response = requests.get("https://api.github.com/repos/frappe/frappe/pulls/{}".format(pr))
if response.ok:
payload = response.json()
title = payload.get("title", "").lower()
head_sha = payload.get("head", {}).get("sha")
body = payload.get("body", "").lower()
if title.startswith("feat") and head_sha and "no-docs" not in body:
if docs_link_exists(body):
print("Documentation Link Found. You're Awesome! 🎉")
else:
print("Documentation Link Not Found! ⚠️")
sys.exit(1)
else:
print("Skipping documentation checks... 🏃")
| mhbu50/frappe | .github/helper/documentation.py | Python | mit | 1,220 |
from django.template import Library
register = Library()
def filer_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
filer_actions = register.inclusion_tag("admin/filer/actions.html", takes_context=True)(filer_actions)
| croepha/django-filer | filer/templatetags/filer_admin_tags.py | Python | mit | 403 |
#!/usr/bin/env python3.2
import argparse
import os
import re
import sqlite3
from collections import OrderedDict
from biocode.utils import read_list_file
def main():
"""This is the second script I've written in Python. I'm sure it shows."""
parser = argparse.ArgumentParser( description='Reads a BLAST m8 file and taxonomy DB to produce a taxonomic profile at any user-specified ranking level.')
## input formats: btab, blast_m8
parser.add_argument('-f', '--input_format', type=str, required=True, help='Blast format: current options are btab or blast_m8' )
## The SQLite3 file that will be read for taxonomy information
parser.add_argument('-t', '--taxonomy_db', type=str, required=True, help='Path to a taxonomy.db file created by "create_taxonomy_db.py"' )
## BLAST list file
parser.add_argument('-b', '--blast_list_file', type=str, required=True, help='List of BLAST files (m8 format)' )
## output file to be written
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path where the result file should written' )
## E-value cutoff to use
parser.add_argument('-e', '--eval_cutoff', type=float, required=False, help='Optional E-value cutoff to use.' )
## Top N hits per query to score. Only counts those where the taxon could be looked up in the indexes
parser.add_argument('-n', '--top_n', type=int, required=False, default=1, help=' Top N hits per query to score. Only counts unique taxon matches which could be looked up in the indexes' )
## rank on which matches will be grouped and reported. values like: species, genus, order, family, etc.
parser.add_argument('-r', '--rank', type=str, required=True, help='Taxonomy rank on which to group all matches, such as: species, genus, order, family, etc.' )
args = parser.parse_args()
conn = sqlite3.connect( args.taxonomy_db )
c = conn.cursor()
blast_files = read_list_file( args.blast_list_file )
taxon_counts = {}
processed_file_count = 0
stats = {}
stats['gi_lookup_success_count'] = 0
stats['gi_lookup_fail_count'] = 0
stats['taxon_lookup_success_count'] = 0
stats['taxon_lookup_failure_count'] = 0
for file in blast_files:
print("Processing file: ", file)
if args.input_format == 'blast_m8' or args.input_format == 'btab':
parse_blast_file( file, c, taxon_counts, args.eval_cutoff, args.input_format, stats, args.top_n )
else:
raise Exception("Unsupported input format passed: {0}".format(args.input_format) )
processed_file_count += 1
#if processed_file_count == 50:
#break
## process the taxon counts, conforming them to the user-specified rank
result_table = group_taxa_by_rank( args.rank, taxon_counts, c )
node_names = get_selected_node_names( result_table, c )
c.close()
fout = open(args.output_file, mode='w')
## write the results to the output file in order of most-found clade first
for tax_id in OrderedDict(sorted(result_table.items(), reverse=True, key=lambda t: t[1])):
sci_name = ''
if tax_id in node_names:
sci_name = node_names[tax_id]
fout.write( "{0}\t{1}\t{2}\n".format(tax_id, int(result_table[tax_id]), sci_name ) )
fout.close()
print("INFO: successful GI lookups: {0}/{1}".format(stats['gi_lookup_success_count'], \
(stats['gi_lookup_fail_count'] + stats['gi_lookup_success_count'])) )
print("INFO: successful taxon lookups: {0}/{1}".format( stats['taxon_lookup_success_count'], \
(stats['taxon_lookup_success_count'] + stats['taxon_lookup_failure_count']) ) )
def get_selected_node_names( res_table, cursor):
node_names = {}
for taxon_id in res_table:
cursor.execute('''SELECT scientific_name FROM orgs WHERE tax_id=?''', (taxon_id,) )
row = cursor.fetchone()
if row:
node_names[taxon_id] = row[0]
else:
print("WARN: failed to get scientific name for tax_id:", taxon_id)
return node_names
def get_ranked_taxon( tax_id, c, rank, rec_depth ):
c.execute("""SELECT parent_tax_id, rank FROM nodes WHERE tax_id = ?""", (tax_id,) )
row = c.fetchone()
if rec_depth > 20:
print("WARN: deep recursion detected for tax ID:", tax_id)
return None
if row:
if row[1] == rank:
return tax_id
else:
return get_ranked_taxon( row[0], c, rank, rec_depth + 1 )
else:
print("WARN: unable to find ranked taxon for tax_id:", tax_id)
return None
def group_taxa_by_rank(rank, counts, cursor):
"""Given a taxonomic rank, the input count table is regrouped by walking
up the taxonomy tree until all nodes are at the level of the passed
rank
"""
ranked_counts = dict()
unranked_taxon_count = 0
for taxon_id in counts:
ranked_taxon_id = get_ranked_taxon(taxon_id, cursor, rank, 0)
if ranked_taxon_id:
if ranked_taxon_id in ranked_counts:
#print("DEBUG: increasing count for taxon: {0} by: {1}".format(taxon_id, counts[taxon_id]['n']) )
ranked_counts[ranked_taxon_id] += counts[taxon_id]['n']
else:
#print("DEBUG: initializing a count for ranked_taxon_id {0} from taxon_id {1}".format(ranked_taxon_id, taxon_id) )
ranked_counts[ranked_taxon_id] = counts[taxon_id]['n']
else:
unranked_taxon_count += 1
return ranked_counts
def parse_blast_file( file, cursor, tax, eval_cutoff, format, stats, hits_per_query ):
""" For each query sequence find the top match above the E-val cutoff (if any)
which has an NCBI taxonomy assignment.
"""
if ( not os.path.isfile(file) ):
raise Exception("Couldn't find file: " + file)
## presets are for ncbi_m8, for which lines should have 12 columns. they are
# 1: Query - The query sequence id
# 2: Subject - The matching subject sequence id
# 3: Percent identity
# 4: alignment length
# 5: mismatches
# 6: gap openings
# 7: q.start
# 8: q.end
# 9: s.start
# 10: s.end
# 11: e-value
# 12: bit score
ID_COLUMN_NUM = 0
SUBJECT_LABEL_COLUMN_NUM = 1
EVAL_COLUMN_NUM = 10
ALIGN_LEN_COLUMN_NUM = 3
BIT_SCORE_COLUMN_NUM = 11
if format == 'btab':
ID_COLUMN_NUM = 0
SUBJECT_LABEL_COLUMN_NUM = 5
EVAL_COLUMN_NUM = 19
current_id = ""
current_id_classified = False
current_id_match_count = 0
current_match_ids = dict()
for line in open(file, "r"):
cols = line.split("\t")
if len(cols) >= 10:
this_id = cols[ID_COLUMN_NUM]
## this controls that we only look at the top hit for query
if this_id != current_id:
current_id_classified = False
current_id_match_ids = dict()
current_id_match_count = 0
current_id = this_id
if current_id_match_count < hits_per_query:
if eval_cutoff is None or eval_cutoff >= float(cols[EVAL_COLUMN_NUM]):
#print("DEBUG: attempting to parse a GI for header: ({0})".format(cols[SUBJECT_LABEL_COLUMN_NUM]) )
gi = parse_gi(cols[SUBJECT_LABEL_COLUMN_NUM], cursor)
if gi:
#print("DEBUG: Got a GI ({0}) for hit with id this_id".format(gi))
stats['gi_lookup_success_count'] += 1
taxon_id = get_taxon_id_by_gi(gi, cursor)
if taxon_id:
stats['taxon_lookup_success_count'] += 1
if taxon_id not in current_match_ids:
#print("DEBUG: adding match to taxon_id: {0}".format(taxon_id) )
match_score = int(cols[BIT_SCORE_COLUMN_NUM])/int(cols[ALIGN_LEN_COLUMN_NUM])
add_taxon_match( taxon_id, tax, cursor, match_score )
current_id_match_count += 1
current_match_ids[taxon_id] = True
else:
stats['taxon_lookup_failure_count'] += 1
print("WARN: failed to find a taxon_id for gi: {0}".format(gi))
else:
stats['gi_lookup_fail_count'] += 1
def add_taxon_match( id, tax, c, score ):
if id in tax:
tax[id]['n'] += score
#print("DEBUG: match count for taxon id {0} increased to {1}".format(id, tax[id]['n']) )
else:
tax[id] = {}
tax[id]['n'] = score
tax[id]['l'] = get_clade_name_by_taxon_id( c, id )
def get_clade_name_by_taxon_id( cursor, taxon_id ):
cursor.execute("""SELECT scientific_name FROM orgs WHERE tax_id = ?""", (taxon_id,) )
row = cursor.fetchone()
if row:
return row[0]
else:
return None
def get_taxon_id_by_gi( gi, cursor ):
"""Attempts to fetch a taxon ID using the GI from first the nucl_acc and then prot_acc tables"""
taxon_id = None
cursor.execute( 'SELECT tax_id FROM nucl_acc WHERE gi = ?', (gi,) )
row = cursor.fetchone()
if row:
taxon_id = row[0]
else:
cursor.execute( 'SELECT tax_id FROM prot_acc WHERE gi = ?', (gi,) )
row = cursor.fetchone()
if row:
taxon_id = row[0]
return taxon_id
def get_gi_by_accession( acc, cursor ):
## The table we query depends on the source
# Protein queries have a three-letter prefix, followed by five digits.
m = re.match( '^[A-Z]{3}[0-9]{5}\.\d', acc )
## CURRENT HACK, because of a db index bug the version number was indexed with
# the period character removed.
acc = acc.replace('.', '')
if m:
cursor.execute("""SELECT gi FROM prot_acc WHERE version = ?""", (acc,) )
row = cursor.fetchone()
else:
cursor.execute("""SELECT gi FROM nucl_acc WHERE version = ?""", (acc,) )
row = cursor.fetchone()
if row:
return row[0]
else:
return None
def parse_gi( match_header, cursor ):
"""Parses the GI number out of a NCBI-formatted BLAST match header string. If the
header has an accession instead of a GI, it performs a lookup
"""
## look for a GI first
#print("DEBUG: looking for a gi in header: ({0})".format(match_header))
m = re.match( 'gi\|(\d+)', match_header )
gi = None
accession = None
if m:
gi = m.group(1)
else:
## a direct GI pull failed, so look for something that resembles an accession instead
m = re.match( '[a-z]+\|([A-Z]{2,}\d{5,}\.\d)', match_header )
if m:
accession = m.group(1)
gi = get_gi_by_accession( accession, cursor )
#print("DEBUG:\treturning GI ({0}) for header ({1}), accession ({2})".format( gi, match_header, accession ) )
return gi
if __name__ == '__main__':
main()
| jorvis/biocode | taxonomy/create_taxonomic_profile_from_blast.py | Python | mit | 11,261 |
"""
Outlier Detection using Tukeys Filter Class
"""
import sys
import itertools
from time import time
from lib.modules.base_task import BaseTask
from lib.modules.helper import extract_service_name, get_closest_datapoint
from lib.modules.models import TimeSeriesTuple
class TukeysFilter(BaseTask):
def __init__(self, config, logger, options):
super(TukeysFilter, self).__init__(config, logger, resource={'metric_sink': 'RedisSink',
'output_sink': 'GraphiteSink'})
self.namespace = 'TukeysFilter'
self.service = options['service']
self.params = options['params']
def read(self):
quantile_25 = self.params['quantile_25']
quantile_75 = self.params['quantile_75']
metrics = self.params['metrics']
delay = self.params.get('offset', 0)
maximum_delay = self.params.get('maximum_delay', 600)
# read metrics from metric_sink
quantile_25 = [i for i in self.metric_sink.iread(quantile_25)]
quantile_75 = [i for i in self.metric_sink.iread(quantile_75)]
metrics = [i for i in self.metric_sink.iread(metrics)]
if not (len(quantile_25) * len(quantile_75) * len(metrics)):
self.logger.error(
'No data found for quantile/to be checked metrics. Exiting')
return None
# sort TimeSeriesTuples by timestamp
quantile_25 = sorted(quantile_25, key=lambda tup: tup.timestamp)
quantile_75 = sorted(quantile_75, key=lambda tup: tup.timestamp)
metrics = sorted(metrics, key=lambda tup: (tup.name, tup.timestamp))
# find closest datapoint to now() (corrected by delay) if not too old
time_now = time() - delay
quantile_25 = get_closest_datapoint(quantile_25, time_now)
if time_now - quantile_25.timestamp > maximum_delay:
self.logger.error('Quantile25 Value is too old (Timestamp: %d) of: %s. Exiting' % (
quantile_25.timestamp, quantile_25.name))
return None
quantile_25 = quantile_25.value
quantile_75 = get_closest_datapoint(quantile_75, time_now)
if time_now - quantile_75.timestamp > maximum_delay:
self.logger.error('Quantile75 Value is too old (Timestamp: %d) of: %s. Exiting' % (
quantile_75.timestamp, quantile_75.name))
return None
quantile_75 = quantile_75.value
if quantile_25 > quantile_75:
self.logger.error('Inconsistent Quantile Values (Q25: %f, Q75: %f). Exiting' % (
quantile_25, quantile_75))
return None
# group by metric (e.g. instance) first and find then closest datapoint
distribution = {}
grouped = itertools.groupby(metrics, key=lambda tup: tup.name)
for key, metrics in grouped:
closest_datapoint = get_closest_datapoint(
[metric for metric in metrics], time_now)
if time_now - closest_datapoint.timestamp < maximum_delay:
distribution[key] = closest_datapoint.value
if len(distribution) == 0:
self.logger.error('No Distribution Values. Exiting')
return None
return quantile_25, quantile_75, distribution
def process(self, data):
quantile_25, quantile_75, distribution = data
iqr_scaling = self.params.get('iqr_scaling', 1.5)
iqr = quantile_75 - quantile_25
lower_limit = quantile_25 - iqr_scaling * iqr
upper_limit = quantile_75 + iqr_scaling * iqr
if 'static_lower_threshold' in self.params:
lower_limit = max(
lower_limit, self.params['static_lower_threshold'])
if 'static_upper_threshold' in self.params:
upper_limit = min(
upper_limit, self.params['static_upper_threshold'])
states = {}
for metric, value in distribution.iteritems():
if value > upper_limit:
states[metric] = 1.0
elif value < lower_limit:
states[metric] = -1.0
else:
states[metric] = 0.0
return quantile_25, quantile_75, states
def write(self, data):
quantile_25, quantile_75, states = data
prefix = '%s.%s' % (self.namespace, self.service)
count = len(states)
invalid = 0
now = int(time())
tuples = []
for name, state in states.iteritems():
if state:
invalid += 1
name = extract_service_name(name)
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, name), now, state))
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, 'quantile_25'), now, quantile_25))
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, 'quantile_75'), now, quantile_75))
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, 'count'), now, count))
tuples.append(TimeSeriesTuple('%s.%s' % (prefix, 'invalid'), now, invalid))
self.output_sink.write(tuples)
def run(self):
data = self.read()
if data:
state = self.process(data)
self.write(state)
return True
else:
return None
| trademob/anna-molly | lib/plugins/tukeys_filter.py | Python | mit | 5,255 |
"""
Mapzen geocoder, contributed by Michal Migurski of Mapzen.
"""
from geopy.geocoders.base import (
Geocoder,
DEFAULT_FORMAT_STRING,
DEFAULT_TIMEOUT
)
from geopy.compat import urlencode
from geopy.location import Location
from geopy.util import logger
__all__ = ("Mapzen", )
class Mapzen(Geocoder):
"""
Mapzen Search geocoder. Documentation at:
https://mapzen.com/documentation/search/
"""
def __init__(
self,
api_key,
format_string=DEFAULT_FORMAT_STRING,
boundary_rect=None,
country_bias=None,
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None
): # pylint: disable=R0913
"""
:param string format_string: String containing '%s' where the
string to geocode should be interpolated before querying the
geocoder. For example: '%s, Mountain View, CA'. The default
is just '%s'.
:param tuple boundary_rect: Coordinates to restrict search within,
given as (west, south, east, north) coordinate tuple.
:param string country_bias: Bias results to this country (ISO alpha-3).
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
.. versionadded:: 0.96
"""
super(Mapzen, self).__init__(
format_string, 'https', timeout, proxies, user_agent=user_agent
)
self.country_bias = country_bias
self.format_string = format_string
self.boundary_rect = boundary_rect
self.api_key = api_key
self.geocode_api = 'https://search.mapzen.com/v1/search'
self.reverse_api = 'https://search.mapzen.com/v1/reverse'
def geocode(
self,
query,
exactly_one=True,
timeout=None,
): # pylint: disable=R0913,W0221
"""
Geocode a location query.
:param query: The address, query or structured query to geocode
you wish to geocode.
:type query: string
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
params = {'text': self.format_string % query}
params.update({
'api_key': self.api_key
})
if self.boundary_rect:
params['boundary.rect.min_lon'] = self.boundary_rect[0]
params['boundary.rect.min_lat'] = self.boundary_rect[1]
params['boundary.rect.max_lon'] = self.boundary_rect[2]
params['boundary.rect.max_lat'] = self.boundary_rect[3]
if self.country_bias:
params['boundary.country'] = self.country_bias
url = "?".join((self.geocode_api, urlencode(params)))
logger.debug("%s.geocode_api: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def reverse(
self,
query,
exactly_one=True,
timeout=None,
): # pylint: disable=W0221
"""
Returns a reverse geocoded location.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
try:
lat, lon = [
x.strip() for x in
self._coerce_point_to_string(query).split(',')
] # doh
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'point.lat': lat,
'point.lon': lon,
'api_key': self.api_key,
}
url = "?".join((self.reverse_api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
@staticmethod
def parse_code(feature):
"""
Parse each resource.
"""
latitude = feature.get('geometry', {}).get('coordinates', [])[1]
longitude = feature.get('geometry', {}).get('coordinates', [])[0]
placename = feature.get('properties', {}).get('name')
return Location(placename, (latitude, longitude), feature)
def _parse_json(self, response, exactly_one):
if response is None:
return None
features = response['features']
if not len(features):
return None
if exactly_one is True:
return self.parse_code(features[0])
else:
return [self.parse_code(feature) for feature in features]
| mthh/geopy | geopy/geocoders/mapzen.py | Python | mit | 5,724 |
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'gae.settings.settings'
# ===========
# Add any python 3rd party module that doesnt exist at
# https://developers.google.com/appengine/docs/python/tools/libraries27
# ===========
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
# ===========
# Force Django to reload its settings.
from django.conf import settings
settings._target = None
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch
# Log errors.
import logging
def log_exception(*args, **kwargs):
logging.exception('Exception in request:')
django.dispatch.Signal.connect(
django.core.signals.got_request_exception, log_exception)
# Unregister the rollback event handler.
#django.dispatch.Signal.disconnect(
# django.core.signals.got_request_exception,
# django.db._rollback_on_exception)
app = django.core.handlers.wsgi.WSGIHandler()
| paulormart/gae-project-skeleton-100 | gae/main.py | Python | mit | 936 |
#-------------------------------------------------------------------------------
# Uppod decoder
#-------------------------------------------------------------------------------
import urllib2
def decode(param):
try:
#-- define variables
loc_3 = [0,0,0,0]
loc_4 = [0,0,0]
loc_2 = ''
#-- define hash parameters for decoding
dec = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
hash1 = ["0", "5", "u", "w", "6", "n", "H", "o", "B", "p", "N", "M", "D", "R", "z", "G", "V", "e", "i", "3", "m", "W", "U", "7", "g", "="]
hash2 = ["c", "T", "I", "4", "Q", "Z", "v", "Y", "y", "X", "k", "b", "8", "a", "J", "d", "1", "x", "L", "t", "l", "2", "f", "s", "9", "h"]
#-- decode
for i in range(0, len(hash1)):
re1 = hash1[i]
re2 = hash2[i]
param = param.replace(re1, '___')
param = param.replace(re2, re1)
param = param.replace('___', re2)
i = 0
while i < len(param):
j = 0
while j < 4 and i+j < len(param):
loc_3[j] = dec.find(param[i+j])
j = j + 1
loc_4[0] = (loc_3[0] << 2) + ((loc_3[1] & 48) >> 4);
loc_4[1] = ((loc_3[1] & 15) << 4) + ((loc_3[2] & 60) >> 2);
loc_4[2] = ((loc_3[2] & 3) << 6) + loc_3[3];
j = 0
while j < 3:
if loc_3[j + 1] == 64 or loc_4[j] == 0:
break
loc_2 += unichr(loc_4[j])
j = j + 1
i = i + 4;
except:
loc_2 = ''
return loc_2
def decodeSourceURL(uhash):
print "*** [uppod.py-decodeSourceURL] Got uppod uhash \n%s" % uhash
return decode(uhash)
def getDecodedHashFromSourceURL(url, referer):
print "*** [uppod.py-getDecodedHashFromSourceURL] Decoded source URL \n%s" % url
request = urllib2.Request(url, None)
request.add_header('Referer', referer)
return urllib2.urlopen(request).read()
| mrstealth/kodi-isengard | plugin.video.mrstealth.fepcom.net/uppod.py | Python | mit | 2,232 |
"""
This module implement a filesystem storage adapter.
"""
from __future__ import unicode_literals
import errno
import logging
import os
from flask import current_app
from .interface import ImagineAdapterInterface
from PIL import Image
LOGGER = logging.getLogger(__name__)
class ImagineFilesystemAdapter(ImagineAdapterInterface):
"""
Filesystem storage adapter
"""
source_folder = None
cache_folder = None
def __init__(self, **kwargs):
"""
Init _adapter
:param kwargs: parameters
:return:
"""
self.source_folder = kwargs.get('source_folder', '').strip('/')
self.cache_folder = kwargs.get('cache_folder', 'cache').strip('/')
def get_item(self, path):
"""
Get resource item
:param path: string
:return: PIL.Image
"""
if self.source_folder:
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.source_folder,
path.strip('/')
)
else:
item_path = '%s/%s' % (
current_app.static_folder,
path.strip('/')
)
if os.path.isfile(item_path):
try:
return Image.open(item_path)
except IOError as err:
LOGGER.warning('File not found on path "%s" with error: %s' % (item_path, str(err)))
return False
else:
return False
def create_cached_item(self, path, content):
"""
Create cached resource item
:param path: str
:param content: Image
:return: str
"""
if isinstance(content, Image.Image):
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.cache_folder,
path.strip('/')
)
self.make_dirs(item_path)
content.save(item_path)
if os.path.isfile(item_path):
return '%s/%s/%s' % (current_app.static_url_path, self.cache_folder, path.strip('/'))
else: # pragma: no cover
LOGGER.warning('File is not created on path: %s' % item_path)
return False
else:
return False
def get_cached_item(self, path):
"""
Get cached resource item
:param path: str
:return: PIL.Image
"""
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.cache_folder,
path.strip('/')
)
if os.path.isfile(item_path):
try:
return Image.open(item_path)
except IOError as err: # pragma: no cover
LOGGER.warning('Cached file not found on path "%s" with error: %s' % (item_path, str(err)))
return False
else:
return False
def check_cached_item(self, path):
"""
Check for cached resource item exists
:param path: str
:return: bool
"""
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.cache_folder,
path.strip('/')
)
if os.path.isfile(item_path):
return '%s/%s/%s' % (current_app.static_url_path, self.cache_folder, path.strip('/'))
else:
return False
def remove_cached_item(self, path):
"""
Remove cached resource item
:param path: str
:return: bool
"""
item_path = '%s/%s/%s' % (
current_app.static_folder,
self.cache_folder,
path.strip('/')
)
if os.path.isfile(item_path):
os.remove(item_path)
return True
@staticmethod
def make_dirs(path):
"""
Create directories if not exist
:param path: string
:return:
"""
try:
os.makedirs(os.path.dirname(path))
except OSError as err:
if err.errno != errno.EEXIST:
LOGGER.error('Failed to create directory %s with error: %s' % (path, str(err)))
raise
| FlaskGuys/Flask-Imagine | flask_imagine/adapters/filesystem.py | Python | mit | 4,244 |
# Django settings for opendata project.
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
from django.core.urlresolvers import reverse_lazy
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PROJECT_ROOT = os.path.abspath(os.path.join(PROJECT_PATH, os.pardir))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Victor Rocha', 'vrocha@caktusgroup.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'opendata',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'public', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'public', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pagination.middleware.PaginationMiddleware',
'django_sorting.middleware.SortingMiddleware',
'django.middleware.locale.LocaleMiddleware',
)
ROOT_URLCONF = 'opendata.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'opendata.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, 'templates'),
)
FIXTURE_DIRS = (
os.path.join(PROJECT_PATH, 'fixtures'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.comments',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.sitemaps',
# Internal apps
'opendata.catalog',
'opendata.requests',
'opendata.search',
'opendata.comments',
'opendata.suggestions',
# External apps
'south',
'compressor',
'captcha',
'scribbler',
'widget_tweaks',
'haystack',
'selectable',
'djcelery',
'djangoratings',
'pagination',
'django_sorting',
'registration',
'sorl.thumbnail',
'secure_input',
)
# Comments app
COMMENTS_APP = 'opendata.comments'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
# '': {
# 'handlers': ['console'],
# 'level': 'DEBUG',
# },
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
LOGIN_REDIRECT_URL = reverse_lazy('home')
# Application settings
SKIP_SOUTH_TESTS = True
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
# Celery setup
import djcelery
djcelery.setup_loader()
# Haystack conf
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://127.0.0.1:8983/solr'
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'opendata.search.index_processors.M2MRealtimeSignalProcessor'
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 5
ACCOUNT_ACTIVATION_DAYS = 7
DEFAULT_FROM_EMAIL = "Open NC <info@open-nc.org>"
# DATEFORMAT
DATE_INPUT_FORMATS = (
'%m/%d/%Y', '%m/%d/%y', '%Y-%m-%d', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
RECAPTCHA_PUBLIC_KEY = '6LcyhuoSAAAAAPHpTGWpqvIZvO5rBttlZisQl2q3'
RECAPTCHA_USE_SSL = True
# WYSIWYG Editor
ALLOWED_TAGS = ('p', 'ol', 'ul', 'li', 'br')
| openrural/open-data-nc | opendata/settings/base.py | Python | mit | 7,040 |
"""
Data and methods to retrieve app specific configuration
"""
import json
import requests
APP_BACKDROP = "E8C28D3C"
APP_YOUTUBE = "233637DE"
APP_MEDIA_RECEIVER = "CC1AD845"
APP_PLEX = "06ee44ee-e7e3-4249-83b6-f5d0b6f07f34_1"
APP_DASHCAST = "84912283"
APP_SPOTIFY = "CC32E753"
APP_HOME_ASSISTANT = "B12CE3CA"
APP_SUPLA = "A41B766D"
APP_YLEAREENA = "A9BCCB7C"
def get_possible_app_ids():
""" Returns all possible app ids. """
try:
req = requests.get(
"https://clients3.google.com/cast/chromecast/device/baseconfig"
)
data = json.loads(req.text[4:])
return [app["app_id"] for app in data["applications"]] + data["enabled_app_ids"]
except ValueError:
# If json fails to parse
return []
def get_app_config(app_id):
""" Get specific configuration for 'app_id'. """
try:
req = requests.get(
("https://clients3.google.com/" "cast/chromecast/device/app?a={}").format(
app_id
)
)
return json.loads(req.text[4:]) if req.status_code == 200 else {}
except ValueError:
# If json fails to parse
return {}
| dominikkarall/pychromecast | pychromecast/config.py | Python | mit | 1,167 |
from collections import OrderedDict
from datetime import date
from nose.tools import eq_, assert_true, assert_false
from mosql.compat import binary_type, text_type
from mosql.util import (
autoparam, build_set, build_where, param, ___,
_is_iterable_not_str,
)
def test_is_iterable_not_str():
# Iterable objects.
assert_true(_is_iterable_not_str([]))
assert_true(_is_iterable_not_str(tuple()))
assert_true(_is_iterable_not_str({}))
assert_true(_is_iterable_not_str(set()))
# Strings are iterable, but not included.
assert_false(_is_iterable_not_str(''))
assert_false(_is_iterable_not_str(b''))
assert_false(_is_iterable_not_str(u''))
assert_false(_is_iterable_not_str(binary_type()))
assert_false(_is_iterable_not_str(text_type()))
def test_build_where():
gen = build_where(OrderedDict([
('detail_id', 1), ('age >= ', 20), ('created', date(2013, 4, 16)),
]))
eq_(gen, '"detail_id" = 1 AND "age" >= 20 AND "created" = \'2013-04-16\'')
def test_build_where_operator():
gen = build_where(OrderedDict([
('detail_id', 1), (('age', '>='), 20), ('created', date(2013, 4, 16)),
]))
eq_(gen, '"detail_id" = 1 AND "age" >= 20 AND "created" = \'2013-04-16\'')
def test_build_where_prepared():
gen = build_where(OrderedDict([
('custom_param', param('my_param')), ('auto_param', autoparam),
('using_alias', ___),
]))
exp = ('"custom_param" = %(my_param)s AND "auto_param" = %(auto_param)s '
'AND "using_alias" = %(using_alias)s')
eq_(gen, exp)
def test_build_set():
gen = build_set(OrderedDict([
('a', 1), ('b', True), ('c', date(2013, 4, 16)),
]))
eq_(gen, '"a"=1, "b"=TRUE, "c"=\'2013-04-16\'')
def test_build_set_prepared():
gen = build_set(OrderedDict([
('custom_param', param('myparam')), ('auto_param', autoparam),
]))
eq_(gen, '"custom_param"=%(myparam)s, "auto_param"=%(auto_param)s')
| moskytw/mosql | tests/test_util.py | Python | mit | 1,969 |
"""
Virtual environment (venv) package for Python. Based on PEP 405.
Copyright (C) 2011-2014 Vinay Sajip.
Licensed to the PSF under a contributor agreement.
"""
import logging
import os
import shutil
import subprocess
import sys
import types
logger = logging.getLogger(__name__)
class EnvBuilder:
"""
This class exists to allow virtual environment creation to be
customized. The constructor parameters determine the builder's
behaviour when called upon to create a virtual environment.
By default, the builder makes the system (global) site-packages dir
*un*available to the created environment.
If invoked using the Python -m option, the default is to use copying
on Windows platforms but symlinks elsewhere. If instantiated some
other way, the default is to *not* use symlinks.
:param system_site_packages: If True, the system (global) site-packages
dir is available to created environments.
:param clear: If True, delete the contents of the environment directory if
it already exists, before environment creation.
:param symlinks: If True, attempt to symlink rather than copy files into
virtual environment.
:param upgrade: If True, upgrade an existing virtual environment.
:param with_pip: If True, ensure pip is installed in the virtual
environment
:param prompt: Alternative terminal prefix for the environment.
"""
def __init__(self, system_site_packages=False, clear=False,
symlinks=False, upgrade=False, with_pip=False, prompt=None):
self.system_site_packages = system_site_packages
self.clear = clear
self.symlinks = symlinks
self.upgrade = upgrade
self.with_pip = with_pip
self.prompt = prompt
def create(self, env_dir):
"""
Create a virtual environment in a directory.
:param env_dir: The target directory to create an environment in.
"""
env_dir = os.path.abspath(env_dir)
context = self.ensure_directories(env_dir)
self.create_configuration(context)
self.setup_python(context)
if self.with_pip:
self._setup_pip(context)
if not self.upgrade:
self.setup_scripts(context)
self.post_setup(context)
def clear_directory(self, path):
for fn in os.listdir(path):
fn = os.path.join(path, fn)
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
def ensure_directories(self, env_dir):
"""
Create the directories for the environment.
Returns a context object which holds paths in the environment,
for use by subsequent logic.
"""
def create_if_needed(d):
if not os.path.exists(d):
os.makedirs(d)
elif os.path.islink(d) or os.path.isfile(d):
raise ValueError('Unable to create directory %r' % d)
if os.path.exists(env_dir) and self.clear:
self.clear_directory(env_dir)
context = types.SimpleNamespace()
context.env_dir = env_dir
context.env_name = os.path.split(env_dir)[1]
prompt = self.prompt if self.prompt is not None else context.env_name
context.prompt = '(%s) ' % prompt
create_if_needed(env_dir)
env = os.environ
if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env:
executable = os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
dirname, exename = os.path.split(os.path.abspath(executable))
context.executable = executable
context.python_dir = dirname
context.python_exe = exename
if sys.platform == 'win32':
binname = 'Scripts'
incpath = 'Include'
libpath = os.path.join(env_dir, 'Lib', 'site-packages')
else:
binname = 'bin'
incpath = 'include'
libpath = os.path.join(env_dir, 'lib',
'python%d.%d' % sys.version_info[:2],
'site-packages')
context.inc_path = path = os.path.join(env_dir, incpath)
create_if_needed(path)
create_if_needed(libpath)
# Issue 21197: create lib64 as a symlink to lib on 64-bit non-OS X POSIX
if ((sys.maxsize > 2**32) and (os.name == 'posix') and
(sys.platform != 'darwin')):
link_path = os.path.join(env_dir, 'lib64')
if not os.path.exists(link_path): # Issue #21643
os.symlink('lib', link_path)
context.bin_path = binpath = os.path.join(env_dir, binname)
context.bin_name = binname
context.env_exe = os.path.join(binpath, exename)
create_if_needed(binpath)
return context
def create_configuration(self, context):
"""
Create a configuration file indicating where the environment's Python
was copied from, and whether the system site-packages should be made
available in the environment.
:param context: The information for the environment creation request
being processed.
"""
context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg')
with open(path, 'w', encoding='utf-8') as f:
f.write('home = %s\n' % context.python_dir)
if self.system_site_packages:
incl = 'true'
else:
incl = 'false'
f.write('include-system-site-packages = %s\n' % incl)
f.write('version = %d.%d.%d\n' % sys.version_info[:3])
if os.name == 'nt':
def include_binary(self, f):
if f.endswith(('.pyd', '.dll')):
result = True
else:
result = f.startswith('python') and f.endswith('.exe')
return result
def symlink_or_copy(self, src, dst, relative_symlinks_ok=False):
"""
Try symlinking a file, and if that fails, fall back to copying.
"""
force_copy = not self.symlinks
if not force_copy:
try:
if not os.path.islink(dst): # can't link to itself!
if relative_symlinks_ok:
assert os.path.dirname(src) == os.path.dirname(dst)
os.symlink(os.path.basename(src), dst)
else:
os.symlink(src, dst)
except Exception: # may need to use a more specific exception
logger.warning('Unable to symlink %r to %r', src, dst)
force_copy = True
if force_copy:
shutil.copyfile(src, dst)
def setup_python(self, context):
"""
Set up a Python executable in the environment.
:param context: The information for the environment creation request
being processed.
"""
binpath = context.bin_path
path = context.env_exe
copier = self.symlink_or_copy
copier(context.executable, path)
dirname = context.python_dir
if os.name != 'nt':
if not os.path.islink(path):
os.chmod(path, 0o755)
for suffix in ('python', 'python3'):
path = os.path.join(binpath, suffix)
if not os.path.exists(path):
# Issue 18807: make copies if
# symlinks are not wanted
copier(context.env_exe, path, relative_symlinks_ok=True)
if not os.path.islink(path):
os.chmod(path, 0o755)
else:
subdir = 'DLLs'
include = self.include_binary
files = [f for f in os.listdir(dirname) if include(f)]
for f in files:
src = os.path.join(dirname, f)
dst = os.path.join(binpath, f)
if dst != context.env_exe: # already done, above
copier(src, dst)
dirname = os.path.join(dirname, subdir)
if os.path.isdir(dirname):
files = [f for f in os.listdir(dirname) if include(f)]
for f in files:
src = os.path.join(dirname, f)
dst = os.path.join(binpath, f)
copier(src, dst)
# copy init.tcl over
for root, dirs, files in os.walk(context.python_dir):
if 'init.tcl' in files:
tcldir = os.path.basename(root)
tcldir = os.path.join(context.env_dir, 'Lib', tcldir)
if not os.path.exists(tcldir):
os.makedirs(tcldir)
src = os.path.join(root, 'init.tcl')
dst = os.path.join(tcldir, 'init.tcl')
shutil.copyfile(src, dst)
break
def _setup_pip(self, context):
"""Installs or upgrades pip in a virtual environment"""
# We run ensurepip in isolated mode to avoid side effects from
# environment vars, the current directory and anything else
# intended for the global Python environment
cmd = [context.env_exe, '-Im', 'ensurepip', '--upgrade',
'--default-pip']
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def setup_scripts(self, context):
"""
Set up scripts into the created environment from a directory.
This method installs the default scripts into the environment
being created. You can prevent the default installation by overriding
this method if you really need to, or if you need to specify
a different location for the scripts to install. By default, the
'scripts' directory in the venv package is used as the source of
scripts to install.
"""
path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(path, 'scripts')
self.install_scripts(context, path)
def post_setup(self, context):
"""
Hook for post-setup modification of the venv. Subclasses may install
additional packages or scripts here, add activation shell scripts, etc.
:param context: The information for the environment creation request
being processed.
"""
pass
def replace_variables(self, text, context):
"""
Replace variable placeholders in script text with context-specific
variables.
Return the text passed in , but with variables replaced.
:param text: The text in which to replace placeholder variables.
:param context: The information for the environment creation request
being processed.
"""
text = text.replace('__VENV_DIR__', context.env_dir)
text = text.replace('__VENV_NAME__', context.env_name)
text = text.replace('__VENV_PROMPT__', context.prompt)
text = text.replace('__VENV_BIN_NAME__', context.bin_name)
text = text.replace('__VENV_PYTHON__', context.env_exe)
return text
def install_scripts(self, context, path):
"""
Install scripts into the created environment from a directory.
:param context: The information for the environment creation request
being processed.
:param path: Absolute pathname of a directory containing script.
Scripts in the 'common' subdirectory of this directory,
and those in the directory named for the platform
being run on, are installed in the created environment.
Placeholder variables are replaced with environment-
specific values.
"""
binpath = context.bin_path
plen = len(path)
for root, dirs, files in os.walk(path):
if root == path: # at top-level, remove irrelevant dirs
for d in dirs[:]:
if d not in ('common', os.name):
dirs.remove(d)
continue # ignore files in top level
for f in files:
srcfile = os.path.join(root, f)
suffix = root[plen:].split(os.sep)[2:]
if not suffix:
dstdir = binpath
else:
dstdir = os.path.join(binpath, *suffix)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
dstfile = os.path.join(dstdir, f)
with open(srcfile, 'rb') as f:
data = f.read()
if srcfile.endswith('.exe'):
mode = 'wb'
else:
mode = 'w'
try:
data = data.decode('utf-8')
data = self.replace_variables(data, context)
except UnicodeDecodeError as e:
data = None
logger.warning('unable to copy script %r, '
'may be binary: %s', srcfile, e)
if data is not None:
with open(dstfile, mode) as f:
f.write(data)
shutil.copymode(srcfile, dstfile)
def create(env_dir, system_site_packages=False, clear=False,
symlinks=False, with_pip=False, prompt=None):
"""Create a virtual environment in a directory."""
builder = EnvBuilder(system_site_packages=system_site_packages,
clear=clear, symlinks=symlinks, with_pip=with_pip,
prompt=prompt)
builder.create(env_dir)
def main(args=None):
compatible = True
if sys.version_info < (3, 3):
compatible = False
elif not hasattr(sys, 'base_prefix'):
compatible = False
if not compatible:
raise ValueError('This script is only for use with Python >= 3.3')
else:
import argparse
parser = argparse.ArgumentParser(prog=__name__,
description='Creates virtual Python '
'environments in one or '
'more target '
'directories.',
epilog='Once an environment has been '
'created, you may wish to '
'activate it, e.g. by '
'sourcing an activate script '
'in its bin directory.')
parser.add_argument('dirs', metavar='ENV_DIR', nargs='+',
help='A directory to create the environment in.')
parser.add_argument('--system-site-packages', default=False,
action='store_true', dest='system_site',
help='Give the virtual environment access to the '
'system site-packages dir.')
if os.name == 'nt':
use_symlinks = False
else:
use_symlinks = True
group = parser.add_mutually_exclusive_group()
group.add_argument('--symlinks', default=use_symlinks,
action='store_true', dest='symlinks',
help='Try to use symlinks rather than copies, '
'when symlinks are not the default for '
'the platform.')
group.add_argument('--copies', default=not use_symlinks,
action='store_false', dest='symlinks',
help='Try to use copies rather than symlinks, '
'even when symlinks are the default for '
'the platform.')
parser.add_argument('--clear', default=False, action='store_true',
dest='clear', help='Delete the contents of the '
'environment directory if it '
'already exists, before '
'environment creation.')
parser.add_argument('--upgrade', default=False, action='store_true',
dest='upgrade', help='Upgrade the environment '
'directory to use this version '
'of Python, assuming Python '
'has been upgraded in-place.')
parser.add_argument('--without-pip', dest='with_pip',
default=True, action='store_false',
help='Skips installing or upgrading pip in the '
'virtual environment (pip is bootstrapped '
'by default)')
parser.add_argument('--prompt',
help='Provides an alternative prompt prefix for '
'this environment.')
options = parser.parse_args(args)
if options.upgrade and options.clear:
raise ValueError('you cannot supply --upgrade and --clear together.')
builder = EnvBuilder(system_site_packages=options.system_site,
clear=options.clear,
symlinks=options.symlinks,
upgrade=options.upgrade,
with_pip=options.with_pip,
prompt=options.prompt)
for d in options.dirs:
builder.create(d)
if __name__ == '__main__':
rc = 1
try:
main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.6.0/Lib/venv/__init__.py | Python | mit | 18,240 |
__all__ = ["Initiator","Simulator","Evaluator","Modelator","Validator"] | joebowen/ChannelWorm | channelworm/fitter/__init__.py | Python | mit | 71 |
from StringIO import StringIO
import textwrap
import importer
def test_import_csv():
current = StringIO(textwrap.dedent('''\
status,qty,type,transaction_date,posting_date,description,amount
A,,,2016/11/02,,This is a test,$4.53
'''))
new = StringIO(textwrap.dedent('''\
"Trans Date", "Summary", "Amount"
5/2/2007, Regal Theaters, $15.99
11/2/2016, This is a test , $4.53
5/2/2007, Regal Theaters, $15.99
'''))
mapping = {
'Trans Date': 'transaction_date',
'Summary': 'description',
'Amount': 'amount'
}
importer.save_csv(current, new, mapping, '%m/%d/%Y')
lines = current.getvalue().splitlines()
assert lines[0].rstrip() == 'status,qty,type,transaction_date,posting_date,description,amount'
assert lines[1].rstrip() == 'N,2,,2007/05/02,,Regal Theaters,$15.99'
assert lines[2].rstrip() == 'A,,,2016/11/02,,This is a test,$4.53'
assert len(lines) == 3
| kalafut/go-ledger | importer_test.py | Python | mit | 1,021 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/decorative/shared_radio.iff"
result.attribute_template_id = 6
result.stfName("frn_n","radio")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/furniture/decorative/shared_radio.py | Python | mit | 440 |
#
# Code by Alexander Pruss and under the MIT license
#
#
# pysanka.py [filename [height [oval|N]]]
# oval: wrap an oval image onto an egg
# N: wrap a rectangular image onto an egg N times (N is an integer)
#
# Yeah, these arguments are a mess!
from mine import *
import colors
import sys
import os
from PIL import Image
from random import uniform
def egg(block=block.GOLD_BLOCK, h=40, a=2.5, b=1, c=0.1, sphere=False):
def radius(y):
if y < 0 or y >= h:
return 0
if sphere:
return sqrt((h/2.)**2 - (y-h/2.)**2)
l = y / float(h-1)
# Formula from: http://www.jolyon.co.uk/myresearch/image-analysis/egg-shape-modelling/
return h*a*exp((-0.5*l*l+c*l-.5*c*c)/(b*b))*sqrt(1-l)*sqrt(l)/(pi*b)
for y in range(0,h):
r = radius(y)
minimumr = min(r-2,radius(y-1),radius(y+1))
for x in range(-h,h+1):
for z in range(-h,h+1):
myr = sqrt(x*x + z*z)
if myr <= r and minimumr <= myr:
if x==0 and z==0:
theta = 0
else:
theta = atan2(z,x)+pi/2
yield (x,y,z,block,theta % (2*pi))
def getPixel(image, x, y, dither=None):
rgb = image.getpixel(( image.size[0]-1-floor( x * image.size[0] ), image.size[1]-1-floor( y * image.size[1] ) ))
if dither is not None:
tweaked = ( rgb[0] + uniform(-dither,dither), rgb[1] + uniform(-dither,dither), rgb[2] + uniform(-dither,dither) )
return colors.rgbToBlock(tweaked)[0]
return colors.rgbToBlock(rgb)[0]
if __name__ == '__main__':
mc = Minecraft()
if len(sys.argv) > 1:
filename = sys.argv[1]
if not os.path.isfile(filename):
filename = os.path.dirname(os.path.realpath(sys.argv[0])) + "/" + filename
else:
filename = os.path.dirname(os.path.realpath(sys.argv[0])) + "/" + "pysanka.jpg"
if len(sys.argv) > 2:
height = int(sys.argv[2])
else:
height = 100
oval = False
sphereWrap = False
if len(sys.argv) > 3:
if sys.argv[3] == "oval":
oval = True
elif sys.argv[3] == "sphere":
sphereWrap = True
else:
repeat = int(sys.argv[3])
else:
repeat = 2
pos = mc.player.getPos()
if oval:
image = Image.open(filename).convert('RGBA')
first = None
last = None
start = [None] * image.size[1]
stop = [None] * image.size[1]
for y in range(image.size[1]):
for x in range(image.size[0]):
_,_,_,alpha = image.getpixel((x,y))
if alpha == 255:
start[y] = x
break
for x in range(image.size[0]-1,-1,-1):
_,_,_,alpha = image.getpixel((x,y))
if alpha == 255:
stop[y] = x
break
if start[y] is not None:
if first is None:
first = y
last = y
assert first is not None
for (x,y,z,block,theta) in egg(h=height,block=None):
imageY = first + int(float(height-1-y)/height*(last-first+1))
if imageY < first:
imageY = first
if imageY > last:
imageY = last
imageX = start[imageY]+ int((0.5 - 0.5 * sin(theta)) * (stop[imageY]-start[imageY]))
if imageX < start[imageY]:
imageX = start[imageY]
if imageX > stop[imageY]:
imageX = stop[imageY]
mc.setBlock(x+pos.x,y+pos.y,z+pos.z, getPixel(image, imageX, imageY))
else:
image = Image.open(filename).convert('RGB')
for (x,y,z,block,theta) in egg(h=height,block=None):
mc.setBlock(x+pos.x,y+pos.y,z+pos.z,getPixel(image, (theta * repeat / (2*pi)) % 1, y / float(height), dither=20))
| arpruss/raspberryjam-pe | p2/scripts3/pysanka.py | Python | mit | 4,144 |
"""
DGL-based PAGTN for graph property prediction.
"""
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class Pagtn(nn.Module):
"""Model for Graph Property Prediction
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT, where a
linear additive form of attention is applied. Attention Weights are derived
by concatenating the node and edge features for each bond.
* Update node representations with multiple rounds of message passing.
* For each layer has, residual connections with its previous layer.
* The final molecular representation is computed by combining the representations
of all nodes in the molecule.
* Perform the final prediction using a linear layer
Examples
--------
>>> import deepchem as dc
>>> import dgl
>>> from deepchem.models import Pagtn
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
>>> graphs = featurizer.featurize(smiles)
>>> print(type(graphs[0]))
<class 'deepchem.feat.graph_data.GraphData'>
>>> dgl_graphs = [graphs[i].to_dgl_graph() for i in range(len(graphs))]
>>> batch_dgl_graph = dgl.batch(dgl_graphs)
>>> model = Pagtn(n_tasks=1, mode='regression')
>>> preds = model(batch_dgl_graph)
>>> print(type(preds))
<class 'torch.Tensor'>
>>> preds.shape == (2, 1)
True
References
----------
.. [1] Benson Chen, Regina Barzilay, Tommi Jaakkola. "Path-Augmented
Graph Transformer Network." arXiv:1905.12712
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
number_atom_features: int = 94,
number_bond_features: int = 42,
mode: str = 'regression',
n_classes: int = 2,
output_node_features: int = 256,
hidden_features: int = 32,
num_layers: int = 5,
num_heads: int = 1,
dropout: float = 0.1,
nfeat_name: str = 'x',
efeat_name: str = 'edge_attr',
pool_mode: str = 'sum'):
"""
Parameters
----------
n_tasks: int
Number of tasks.
number_atom_features : int
Size for the input node features. Default to 94.
number_bond_features : int
Size for the input edge features. Default to 42.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
output_node_features : int
Size for the output node features in PAGTN layers. Default to 256.
hidden_features : int
Size for the hidden node features in PAGTN layers. Default to 32.
num_layers : int
Number of PAGTN layers to be applied. Default to 5.
num_heads : int
Number of attention heads. Default to 1.
dropout : float
The probability for performing dropout. Default to 0.1
nfeat_name: str
For an input graph ``g``, the model assumes that it stores node features in
``g.ndata[nfeat_name]`` and will retrieve input node features from that.
Default to 'x'.
efeat_name: str
For an input graph ``g``, the model assumes that it stores edge features in
``g.edata[efeat_name]`` and will retrieve input edge features from that.
Default to 'edge_attr'.
pool_mode : 'max' or 'mean' or 'sum'
Whether to compute elementwise maximum, mean or sum of the node representations.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
try:
import dgllife
except:
raise ImportError('This class requires dgllife.')
if mode not in ['classification', 'regression']:
raise ValueError("mode must be either 'classification' or 'regression'")
super(Pagtn, self).__init__()
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.nfeat_name = nfeat_name
self.efeat_name = efeat_name
if mode == 'classification':
out_size = n_tasks * n_classes
else:
out_size = n_tasks
from dgllife.model import PAGTNPredictor as DGLPAGTNPredictor
self.model = DGLPAGTNPredictor(
node_in_feats=number_atom_features,
node_out_feats=output_node_features,
node_hid_feats=hidden_features,
edge_feats=number_bond_features,
depth=num_layers,
nheads=num_heads,
dropout=dropout,
n_tasks=out_size,
mode=pool_mode)
def forward(self, g):
"""Predict graph labels
Parameters
----------
g: DGLGraph
A DGLGraph for a batch of graphs. It stores the node features in
``dgl_graph.ndata[self.nfeat_name]`` and edge features in
``dgl_graph.edata[self.efeat_name]``.
Returns
-------
torch.Tensor
The model output.
* When self.mode = 'regression',
its shape will be ``(dgl_graph.batch_size, self.n_tasks)``.
* When self.mode = 'classification', the output consists of probabilities
for classes. Its shape will be
``(dgl_graph.batch_size, self.n_tasks, self.n_classes)`` if self.n_tasks > 1;
its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if self.n_tasks is 1.
torch.Tensor, optional
This is only returned when self.mode = 'classification', the output consists of the
logits for classes before softmax.
"""
node_feats = g.ndata[self.nfeat_name]
edge_feats = g.edata[self.efeat_name]
out = self.model(g, node_feats, edge_feats)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
else:
return out
class PagtnModel(TorchModel):
"""Model for Graph Property Prediction.
This model proceeds as follows:
* Update node representations in graphs with a variant of GAT, where a
linear additive form of attention is applied. Attention Weights are derived
by concatenating the node and edge features for each bond.
* Update node representations with multiple rounds of message passing.
* For each layer has, residual connections with its previous layer.
* The final molecular representation is computed by combining the representations
of all nodes in the molecule.
* Perform the final prediction using a linear layer
Examples
--------
>>> import deepchem as dc
>>> from deepchem.models import PagtnModel
>>> # preparing dataset
>>> smiles = ["C1CCC1", "CCC"]
>>> labels = [0., 1.]
>>> featurizer = dc.feat.PagtnMolGraphFeaturizer(max_length=5)
>>> X = featurizer.featurize(smiles)
>>> dataset = dc.data.NumpyDataset(X=X, y=labels)
>>> # training model
>>> model = PagtnModel(mode='classification', n_tasks=1,
... batch_size=16, learning_rate=0.001)
>>> loss = model.fit(dataset, nb_epoch=5)
References
----------
.. [1] Benson Chen, Regina Barzilay, Tommi Jaakkola. "Path-Augmented
Graph Transformer Network." arXiv:1905.12712
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
"""
def __init__(self,
n_tasks: int,
number_atom_features: int = 94,
number_bond_features: int = 42,
mode: str = 'regression',
n_classes: int = 2,
output_node_features: int = 256,
hidden_features: int = 32,
num_layers: int = 5,
num_heads: int = 1,
dropout: float = 0.1,
pool_mode: str = 'sum',
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
number_atom_features : int
Size for the input node features. Default to 94.
number_bond_features : int
Size for the input edge features. Default to 42.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
output_node_features : int
Size for the output node features in PAGTN layers. Default to 256.
hidden_features : int
Size for the hidden node features in PAGTN layers. Default to 32.
num_layers: int
Number of graph neural network layers, i.e. number of rounds of message passing.
Default to 2.
num_heads : int
Number of attention heads. Default to 1.
dropout: float
Dropout probability. Default to 0.1
pool_mode : 'max' or 'mean' or 'sum'
Whether to compute elementwise maximum, mean or sum of the node representations.
kwargs
This can include any keyword argument of TorchModel.
"""
model = Pagtn(
n_tasks=n_tasks,
number_atom_features=number_atom_features,
number_bond_features=number_bond_features,
mode=mode,
n_classes=n_classes,
output_node_features=output_node_features,
hidden_features=hidden_features,
num_layers=num_layers,
num_heads=num_heads,
dropout=dropout,
pool_mode=pool_mode)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(PagtnModel, self).__init__(
model, loss=loss, output_types=output_types, **kwargs)
def _prepare_batch(self, batch):
"""Create batch data for Pagtn.
Parameters
----------
batch: tuple
The tuple is ``(inputs, labels, weights)``.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: list of torch.Tensor or None
The graph labels.
weights: list of torch.Tensor or None
The weights for each sample or sample/task pair converted to torch.Tensor.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
inputs, labels, weights = batch
dgl_graphs = [graph.to_dgl_graph() for graph in inputs[0]]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(PagtnModel, self)._prepare_batch(([], labels,
weights))
return inputs, labels, weights
| deepchem/deepchem | deepchem/models/torch_models/pagtn.py | Python | mit | 10,872 |
#!/usr/bin/env python
import json
import dateutil.parser
import datetime
import numpy as np
import calendar
import itertools
from flask import Flask, request, Response, render_template, redirect, url_for
import Uber
app = Flask(__name__)
'''
The index page has links to the from_file API and the from_stream API.
'''
@app.route('/')
def index():
return render_template('index.html', links={'from_file':url_for('from_file', data_file='uber_demand_prediction_challenge.json'), 'from_stream':url_for('from_stream')})
'''
The from_file API. Accepts a get parameter 'data_file' that points at a data file
containing the login data.
'''
@app.route('/from_file', methods=['GET'])
def from_file():
if request.method == 'GET':
data_file = request.args.get('data_file', '')
dp = Uber.DemandPredictor()
f = open(data_file,'r')
logins = json.loads(f.read())
f.close()
logins_np = np.array([dateutil.parser.parse(x) for x in logins], dtype=datetime.datetime)
for login in logins_np:
dp.addLogin(login)
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
forecast = []
start_date = datetime.datetime(2012, 5, 1, hour = 0, minute = 0, second = 0)
end_date = datetime.datetime(2012, 5, 15, hour = 23, minute = 59, second = 59)
current_date = datetime.datetime(1972, 11, 16, hour = 0, minute = 0, second = 0)
day_index = -1
for single_date in Uber.daterange(start_date, end_date, increment='hours'):
if single_date.date() != current_date.date():
forecast.append(
{
'display_date': '%s, %s %i'%(days[single_date.weekday()], calendar.month_name[single_date.month], single_date.day),
'forecasts': [dp.forecast(single_date.weekday(), single_date.hour)]
}
)
current_date = single_date
day_index += 1
else:
forecast[day_index]['forecasts'].append(dp.forecast(single_date.weekday(), single_date.hour));
return render_template('from_file.html', forecast=json.dumps(forecast))
'''
The from_stream API.
'''
@app.route('/from_stream')
def from_stream():
dp = Uber.DemandPredictor()
'''
This is a fake stream of data. It loops over the provided JSON file.
'''
def login_stream(logins):
for login in itertools.cycle(logins):
parsed_login = dateutil.parser.parse(login)
dp.addLogin(parsed_login)
day = parsed_login.weekday()
hour = parsed_login.hour
forecast = dp.forecast(day, hour)
ret = {'day':day, 'hour':hour, 'forecast':forecast}
yield "data: %s\n\n" % (json.dumps(ret))
data_file = 'uber_demand_prediction_challenge.json'
f = open(data_file,'r')
logins = json.loads(f.read())
f.close()
if request.headers.get('accept') == 'text/event-stream':
return Response(login_stream(logins), content_type='text/event-stream')
return redirect(url_for('static', filename='from_stream.html'))
if __name__ == '__main__':
app.run(debug=True, threaded=True)
| mksachs/UberCC | uber_API.py | Python | mit | 3,244 |
import pandas as pd
from datetime import date, timedelta
import time
import numpy as np
import re
import psycopg2
import ConfigParser
import argparse
from sqlalchemy import create_engine
import random
import sql
parser = argparse.ArgumentParser()
parser.add_argument('-cf','--contract_file',help='Contract data file')
parser.add_argument('-if','--invest_file',help='Labelled data file')
parser.add_argument('-a','--amounts',action='store_true',default=False,help='Calculate aggregated amount features')
parser.add_argument('-dist','-dist',action='store_true',default=True,help='Calculate distribution features')
parser.add_argument('-dom','-dom',action='store_true',default=False,help='Calculate dominance features')
parser.add_argument('-y','--num_years',default=0,help='Time periods in years')
parser.add_argument('-cat','--categ',default=['major_sector'],nargs='*',help='Categoricals to use')
parser.add_argument('-id','--table_id',default=time.strftime("%Y%m%d"),help='ID for SQL tables')
parser.add_argument('-lim','--contract_num_lim',default=5000,help='Maximum number of rows to use')
args = parser.parse_args()
def connect():
"""Connect to database"""
#read password from config file
config = ConfigParser.RawConfigParser()
config.read('config')
password = config.get('SQL','password')
#open connection with database
config = ConfigParser.RawConfigParser()
config.read('config')
password = config.get('SQL','password')
con = psycopg2.connect(host="localhost",user='dssg',password=password,dbname="world_bank")
return con
def snake_case(name):
"""Clean entity name strings"""
remove_list = ['llc','ltd','llc','ltd','co','corporation','srl','nv','limited','pvtltd']
remove = '|'.join(remove_list)
regex = re.compile(r'\b('+remove+r')\b', flags=re.IGNORECASE)
try:
s1 = name.lower()
s1 = s1.replace('.','')
s1 = regex.sub("", s1)
s1 = s1.strip()
s1 = re.sub(' +','_',s1)
s1 = re.sub('-','_',s1)
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s1)
s1 = s1.replace('*','')
s1 = s1.replace('(','')
s1 = s1.replace(')','')
s1 = s1.replace('"','')
s1 = s1.replace(',','')
s1 = s1.replace('#','')
s1 = s1.replace(':','_')
s1 = s1.replace('&','_')
s1 = s1.replace('\'','')
s1 = s1.replace('/','_')
s1 = re.sub('_+','_',s1)
except:
s1 = ''
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def reformat(data,column,inplace=False,shorten=False):
if inplace:
data[column] = data[column].map(lambda x: snake_case(x))
else:
data[column + '_reformat'] = data[column].map(lambda x: snake_case(x))
if shorten:
data[column] = [re.sub(r'and', '', x).replace('__','_') for x in data[column]]
data[column] = [re.sub(r'[aeiou]', '', x) for x in data[column]]
return data
def binarize(data,fields):
dummies = pd.get_dummies(data[fields]).astype('int64')
dummies.columns = ['_'.join(('is',fields,col,'ct')) for col in dummies.columns]
data = data.merge(dummies,left_index=True,right_index=True,how='left')
return data
def conditional_amounts(data):
for col in data.columns:
if 'is' in col and 'total' not in col and 'cum' not in col and 'percent' not in col and 'dominance' not in col:
data[re.sub('_ct$','',col) + '_amt'] = data[col]*data['amount_standardized']
return data
def distribution(data,field,amount=False):
cols_to_use = []
for col in data.columns:
if 'is' in col and 'cum' in col and field in col and 'total' not in col and 'percent' not in col and 'dominance' not in col:
if amount and 'amt' in col:
cols_to_use.append(col)
elif not amount and not 'amt' in col:
cols_to_use.append(col)
subset = data[cols_to_use]
dist = subset.apply(lambda x: 100.0*x/x.sum(), axis=1)
dist.columns = [col + '_percent' for col in dist.columns]
return dist
def count_previous_contracts(data,days=0,amount = True, count = False):
"""Count number of data entries in the past n days from each entry"""
def sum_func(column):
def inner_func(t):
if days == 0:
min_date_lim = 0
else:
min_date_lim = t - timedelta(days)
total = data.ix[(min_date_lim < data['contract_signing_date']) & (data['contract_signing_date'] <= t),[column,'amount_standardized']]
if amount:
total_sum = ((total[column] != 0)*total['amount_standardized']).cumsum()
else:
total_sum = total[column].cumsum()
return total_sum
return inner_func
data = data.sort('contract_signing_date')
count = 0
for col in data.columns:
if 'is' in col and 'total' not in col and 'cum' not in col and 'full' not in col and 'year' not in col:
func = sum_func(col)
result_temp = data[['contract_signing_date']].apply(func)
result_temp = pd.DataFrame(result_temp)
result_temp.columns = [col + '_cum']
if count == 0:
result = result_temp
else:
result = result.merge(result_temp,left_index=True,right_index=True,how='left')
count += 1
data = data.merge(result,left_index=True,right_index=True,how='left')
return data
def dominance(data,field,not_field=[]):
col_list = []
for col in data.columns:
if 'is' in col and 'cum' in col and field in col and 'total' not in col and 'percent' not in col and 'dominance' not in col:
col_list.append(col+'_dominance')
data[col + '_dominance'] = data[col]/data[col + '_total']
data.replace([np.inf, -np.inf], np.nan,inplace=True)
data[col + '_dominance'] = data[col + '_dominance'].fillna(0)
return data
def rank(data,col_base,no=[]):
"""Rank the values in a set of fields to create anonymous ranking fields
e.g. first_major_sector_percent, second_major_sector_percent, ..."""
#find matching columns
col_list = []
for col in data.columns:
match = True
for base in col_base:
if base not in col:
match = False
if match:
col_list.append(col)
data_sub = data[col_list]
#sort the columns by value
data_array = np.array(data_sub)
data_array.sort(axis=1)
data_array = np.fliplr(data_array)
#create data frame with column names
df = pd.DataFrame(data_array,index=data.index,columns=['_'.join(('_'.join(col_base),str(i + 1))) for i in range(len(col_list))])
return df
def get_engine():
config = ConfigParser.RawConfigParser()
config.read('config')
password = config.get('SQL','password')
engine = create_engine(r'postgresql://dssg:' + password + '@localhost/world_bank')
return engine
def write_sql_query(fields,table_name,years=0,amount=False,total=False,table_name2=''):
if table_name2 == '':
table_name2 = table_name
sql_base = 'SELECT st1.supplier_reformat,st1.contract_signing_date, st1.amount_standardized,st1.unique_id'
for field in fields:
if not total:
sql_base += ',\nSUM(st2."' + field + '") AS "' + field + '_cum"'
else:
sql_base += ',\nSUM(st2."' + field + '") AS "' + field + '_cum_total"'
sql_base += '\nFROM\n'
sql_base += table_name + ' AS st1\n'
sql_base += 'INNER JOIN\n'
sql_base += table_name2 + ' AS st2\n'
sql_base += 'ON\n'
sql_base += 'st2.contract_signing_date <= st1.contract_signing_date'
if years != 0:
sql_base += ' AND\n st2.contract_signing_date >= st1.contract_signing_date::date - ' + str(years*365)
if not total:
sql_base += ' AND\n st2.supplier_reformat = st1.supplier_reformat'
sql_base += '\nGROUP BY st1.contract_signing_date, st1.amount_standardized, st1.supplier_reformat, st1.unique_id\n'
sql_base += 'ORDER BY st1.contract_signing_date'
sql_base += ';'
return sql_base
def fix_duplicate_columns(data):
cols_fixed = []
for col in data.columns:
pattern_y = re.compile('.*_y')
pattern_x = re.compile('.*_x')
if pattern_y.match(col):
data.drop(col,axis=1,inplace=True)
elif pattern_x.match(col):
cols_fixed.append(col[:-2])
else:
cols_fixed.append(col)
data.columns = cols_fixed
return data
def setup_binary_fields(contracts,amounts,categories):
print 'Generating binary fields...'
start = time.time()
boolean_fields = []
for field in categories:
# boolean_fields.append([])
print ' ' + field + '...',
contracts = binarize(contracts,field)
for col in contracts.columns:
if 'is' in col and field in col and len(categories) != 2:
if not amounts:
boolean_fields.append(col)
else:
boolean_fields.append(re.sub('_ct$','',col) + '_amt')
print time.time() - start, 's elapsed'
if len(categories) == 2:
print 'Generating combined binary fields...'
start = time.time()
# boolean_fields.append([])
for cat1 in contracts[categories[0]].unique():
for cat2 in contracts[categories[1]].unique():
if ( (contracts[categories[0]] == cat1) & (contracts[categories[1]] == cat2)).sum() > 0:
col_name = '_'.join(('is',categories[0],categories[1],cat1,cat2 ,'ct'))
contracts[col_name] = (contracts[categories[0]] == cat1) & (contracts[categories[1]] == cat2)
contracts[col_name] = contracts[col_name].astype('int64')
if not amounts:
boolean_fields.append(col_name)
if amounts:
boolean_fields.append(re.sub('_ct$','',col_name) + '_amt')
print time.time() - start, 's elapsed'
print 'Boolean fields: ',len(boolean_fields)
print 'Conditional amounts...'
if amounts:
contracts = conditional_amounts(contracts)
print time.time() - start, 's elapsed'
return contracts,boolean_fields
def drop_duplicate_cols(contracts):
cols_fixed = []
for col in contracts.columns:
pattern_y = re.compile('.*_y')
pattern_x = re.compile('.*_x')
if pattern_y.match(col):
print 'dropping ' + col
contracts.drop(col,axis=1,inplace=True)
elif pattern_x.match(col):
print 'keeping ' + col,col[:-2]
cols_fixed.append(col[:-2])
else:
cols_fixed.append(col)
contracts.columns = cols_fixed
col_list = []
for i,col in enumerate(contracts.columns):
if col not in col_list:
col_list.append(col)
else:
col_list.append(col + '2')
contracts.columns = col_list
return contracts
def cleaning(contracts,categories):
"""Drop duplicate column names, reformat names, """
drop_duplicate_cols(contracts)
contracts = reformat(contracts,'supplier')
contracts = reformat(contracts,'country',inplace=True)
contracts = reformat(contracts,'region',inplace=True,shorten=True)
contracts['major_sector'][contracts['major_sector'].str.contains("\(H\)")] = 'Other'
contracts['major_sector'][contracts['major_sector'].str.contains("X")] = 'Other'
contracts['major_sector'][contracts['major_sector'].str.contains("Not assigned")] = 'Other'
contracts['prc_ctg'] = contracts['procurement_category']
contracts['prc_typ'] = contracts['procurement_type']
contracts = reformat(contracts,'major_sector',inplace=True,shorten=True)
contracts = reformat(contracts,'prc_ctg',inplace=True,shorten=True)
contracts = reformat(contracts,'prc_typ',inplace=True,shorten=True)
contracts['ctry'] = contracts['country']
contracts['rgn'] = contracts['region']
contracts['sect'] = contracts['major_sector']
#interesting columns
contracts = contracts[['supplier_reformat','supplier','contract_signing_date',
'amount_standardized','wb_contract_number','unique_id'] + categories]
contracts = contracts[contracts['amount_standardized'].notnull()]
contracts['amount_standardized'] = contracts['amount_standardized'].astype('int64')
#convert date to datetime
contracts['contract_signing_date'] = pd.to_datetime(contracts['contract_signing_date'])
return contracts
def main():
print 'Connecting to database...',
start = time.time()
engine = get_engine()
con = engine.connect()
print time.time() - start,'s elapsed'
print 'Reading data...',
start = time.time()
contracts = pd.read_csv(args.contract_file)
# contracts = pd.read_csv('/mnt/data/world-bank/joinedcontracts_features_phase4_resolved.csv')
# labelled_contracts = pd.read_csv('/mnt/data/world-bank/joinedcontracts_features_phase4_supplier_features_labelled_resolved.csv')
labelled_contracts = pd.read_csv(args.invest_file)
print time.time() - start, 's elapsed'
print labelled_contracts.shape
if len(labelled_contracts.index) > args.contract_num_lim:
labelled_contracts.sort(['contract_signing_date'],inplace=True)
labelled_contracts = labelled_contracts.head(args.contract_num_lim)
print labelled_contracts.shape
contracts['unique_id'] = contracts.index
labelled_contracts['unique_id'] = labelled_contracts.index
labelled_contracts.to_sql(args.invest_file.split('/')[-1].split('.')[0] + '_' + args.table_id,engine,if_exists='replace')
#drop duplicate column names
contracts = drop_duplicate_cols(contracts)
labelled_contracts = drop_duplicate_cols(labelled_contracts)
#make sure labelled contracts are included in contracts (Should be true anyway)
contracts = pd.concat([contracts,labelled_contracts[contracts.columns]])
contracts.drop_duplicates(inplace=True,cols=['supplier','wb_contract_number','major_sector','amount_standardized'])
amounts = args.amounts
dist_bool = args.dist
dom_bool = args.dom
categories = args.categ
dt = args.num_years
supplier_list = labelled_contracts['supplier'].unique()
if dist_bool:
#we don't care about the overall distribution so limit ourselves to labelled suppliers
print len(contracts.index)
contracts = contracts[contracts['supplier'].isin(supplier_list)]
print len(contracts.index)
if dom_bool:
#only need total counts for fields present in labelled data
for categ in categories:
print len(contracts.index)
categ_list = labelled_contracts[categ].unique()
contracts = contracts[contracts[categ].isin(categ_list)]
print len(contracts.index)
categs_temp = []
for categ in categories:
if categ == 'major_sector':
categ = 'sect'
if categ == 'country':
categ = 'ctry'
if categ == 'region':
categ = 'rgn'
if categ == 'procurement_category':
categ = 'prc_ctg'
if categ == 'procurement_type':
categ = 'prc_typ'
categs_temp.append(categ)
categories = categs_temp
#clean data and create dummy boolean fields
contracts = cleaning(contracts,categories)
labelled_contracts = cleaning(labelled_contracts,categories)
contracts,boolean_fields = setup_binary_fields(contracts,amounts,categories)
labelled_contracts,boolean_fields_labelled = setup_binary_fields(labelled_contracts,amounts,categories)
start_cols = labelled_contracts.columns
print 'Num years: ', dt
field = '_'.join(categories)
field_list = boolean_fields
field_list_labelled = boolean_fields_labelled
field_list = [val for val in boolean_fields_labelled if val in set(boolean_fields)]
if True:
# for field_list,field_list_labelled in zip(boolean_fields,boolean_fields_labelled):
table_name = 'contracts_w_booleans_' + args.table_id
if amounts:
table_name = '_'.join((table_name,'amt',field))
else:
table_name = '_'.join((table_name,field))
result = con.execute("SELECT table_name FROM information_schema.tables ORDER BY table_name;")
result = list(result.fetchall())
tables = [r[0] for r in result]
if True:
print 'Running full table'
print 'Writing to database...'
start = time.time()
contracts_boolean_fields = contracts[['supplier_reformat','contract_signing_date',
'amount_standardized','unique_id'] + field_list]
con.execute('DROP TABLE IF EXISTS ' + table_name + ';')
print len(contracts_boolean_fields.index)
for q in range((len(contracts_boolean_fields.index) / 5000) + 1):
subset = contracts_boolean_fields.iloc[q*5000:min((q+1)*5000,len(contracts_boolean_fields.index))]
print q, subset.shape
if (q==0):
subset.to_sql(table_name,engine,if_exists='replace')
else:
subset.to_sql(table_name,engine,if_exists='append')
print 'Writing to database...',
table_name2 = 'contracts_w_booleans_lab_' + args.table_id
if amounts:
table_name2 = '_'.join((table_name2,'amt',field))
else:
table_name2 = '_'.join((table_name2,field))
start = time.time()
contracts_boolean_fields_labelled = labelled_contracts[['supplier_reformat','contract_signing_date',
'amount_standardized','unique_id']
+ field_list]
con.execute('DROP TABLE IF EXISTS ' + table_name2 + ';')
contracts_boolean_fields_labelled.to_sql(table_name2, engine)
print time.time() - start,'s elapsed'
total_agg = [False]
if dom_bool:
total_agg.append(True)
for tagg in total_agg:
print 'Running SQL statement...',tagg,
start = time.time()
sql_statement = write_sql_query(field_list,
table_name2,
total=tagg,
table_name2=table_name)
result = con.execute(sql_statement)
print result
sql_results = pd.DataFrame(result.fetchall())
sql_results.columns = result.keys()
for col in sql_results.columns:
if 'ct_cum' in col or 'amt_cum' in col:
sql_results[col] = sql_results[col].astype(float)
print labelled_contracts.shape
labelled_contracts = labelled_contracts.merge(sql_results,
on=['supplier_reformat',
'contract_signing_date',
'amount_standardized',
'unique_id'],
how='left')
print labelled_contracts.shape
print time.time() - start,'s elapsed'
print 'Generating supplier specific counts...'
start = time.time()
print ' ' + field + '...'
labelled_contracts = labelled_contracts.sort(['supplier','contract_signing_date'])
if dist_bool:
print ' distribution...',
start = time.time()
dist = distribution(labelled_contracts,field,amount=amounts)
labelled_contracts = labelled_contracts.merge(dist,left_index=True,right_index=True,how='left')
print time.time() - start, 's elapsed'
if dom_bool:
print ' dominance...',
start = time.time()
labelled_contracts = dominance(labelled_contracts,field)
print time.time() - start, 's elapsed'
#drop temperary fields
for col in labelled_contracts.columns:
if '_total' in col:
labelled_contracts.drop(col,axis=1,inplace=True)
print 'Creating anonymous ranking features...'
start = time.time()
if dist_bool:
if not amounts:
print field
anonymous_dist = rank(labelled_contracts,col_base=[field,'percent','ct'])
else:
anonymous_dist = rank(labelled_contracts,col_base=[field,'percent','amt'])
labelled_contracts = labelled_contracts.merge(anonymous_dist,left_index=True,right_index=True)
print time.time() - start, 's elapsed'
cols_added = labelled_contracts.columns.difference(start_cols).tolist()
dt_name = 'full'
if int(dt) != 0:
dt_name = str(dt) + 'years'
cols_renamed = []
for col in cols_added:
cols_renamed.append(col + '_' + dt_name)
dictionary = dict(zip(cols_added, cols_renamed))
labelled_contracts.rename(columns=dictionary,inplace=True)
labelled_contracts = labelled_contracts.sort(['supplier','contract_signing_date'])
booleans = [inner for outer in boolean_fields_labelled for inner in outer]
contracts_to_write = labelled_contracts[labelled_contracts.columns - booleans]
contracts_to_write.columns = [col.replace('country','cty') for col in contracts_to_write.columns]
contracts_to_write.columns = [col.replace('percent','pct') for col in contracts_to_write.columns]
contracts_to_write.columns = [col.replace('major_sector','sect') for col in contracts_to_write.columns]
contracts_to_write.columns = [col.replace('dominance','dom') for col in contracts_to_write.columns]
contracts_to_write.columns = [col.replace('amount','amt') for col in contracts_to_write.columns]
contracts_to_write.columns = [col.replace('years','yr') for col in contracts_to_write.columns]
contracts_to_write.columns = [col.lower() for col in contracts_to_write.columns]
contracts_to_write = contracts_to_write.fillna(0)
zero_cols = contracts_to_write.apply(lambda x: np.all(x==0))
for col,value in zip(zero_cols.index,zero_cols):
if value:
contracts_to_write.drop(col,axis=1,inplace=True)
if amounts:
agg_types = ['amt_cum_pct','pct_amt']
else:
agg_types = ['ct_cum_pct','pct_ct']
already_used = ['unique_id','supplier_reformat','supplier',
'wb_contract_number','sect','region','ctry',
'contract_signing_date','amt_standardized']
for agg_type in agg_types:
final_cols = ['unique_id','supplier_reformat','supplier',
'wb_contract_number','contract_signing_date',
'amt_standardized'] + categories
for col in contracts_to_write.columns:
if agg_type in col and col not in already_used:
already_used.append(col)
final_cols.append(col)
to_write_subset = contracts_to_write[final_cols]
output_name = '_'.join(('cntrcts_splr_ftr_set_' + args.table_id,field,agg_type))
if dist_bool:
output_name += '_dist'
if dom_bool:
output_name += '_dominance'
output_name += '_' + dt_name
# output_name += '_test2'
con.execute('DROP TABLE IF EXISTS ' + output_name + ';')
to_write_subset.to_sql(output_name,engine)
print labelled_contracts.shape
print contracts.shape
if __name__ == "__main__":
main()
| eredmiles/GeneralDataScienceToolsDSSG2015 | WorldBank2015/Code/data_pipeline_src/supplier_feature_gen.py | Python | mit | 24,730 |
__author__ = 'Xsank'
from handlers import IndexHandler
from handlers import WSHandler
handlers=[
(r"/",IndexHandler),
(r"/ws",WSHandler),
] | hanmichael/Pyrumpetroll | urls.py | Python | mit | 153 |
import numpy as N
class Binner(object):
"""Class to perform averages of arbitary variables over some bin
(i.e. range) of a coordinate.
An example:
r, u = calculateVelocitiesAtPositions()
# r is nx X ny X nz X 3 array of positions
# v is the same but for velocities at the corresponding position.
# Distance from origin
rNorm = N.sqrt(N.sum(r**2, axis=-1))
# velocity magnitude
uNorm = N.sqrt(N.sum(u**2, axis=-1))
b = Binner(rNorm.ravel(), bins=100)
uBin = b.mean(uNorm.ravel())
g = Gnuplot.Gnuplot()
g('set datafile missing "nan"')
g('set logscale')
g.plot(Gnuplot.Data(b.means,
b.mean(uNorm.ravel()),
b.map(N.std, rNorm.ravel()),
b.map(N.std, uNorm.ravel()),
**{'with': 'xyerrorlines'}))
"""
def __init__(self, coord, **kwargs):
"""coord -- the coordinate on which we're constructing the bins
The following arguments are as for numpy.histogram.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. Note that with `new` set to False, values below
the range are ignored, while those above the range are tallied
in the rightmost bin.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1.
"""
self.coordShape = coord.shape
self.counts, self.edges = N.histogram(coord, new=True, normed=False,
**kwargs)
self.centres = 0.5*(self.edges[:-1]+self.edges[1:])
self.widths = self.edges[1:]-self.edges[:-1]
self.nBins = len(self.counts)
self.binmap = N.zeros(self.coordShape, dtype=N.int)
for i in range(self.nBins):
self.binmap[N.where(coord>self.edges[i])] = i
continue
self.masks = [N.ma.masked_not_equal(self.binmap, i, copy=False).mask
for i in range(self.nBins)]
self.maskedCoords = [N.ma.array(coord, mask=msk)
for msk in self.masks]
self.means = N.array([mc.mean() for mc in self.maskedCoords])
return
def map(self, func, data):
"""For each bin of the coordinate specified in the
constructor, take an array of the corresponding elements from
"data" and apply "func" to it, returning a numpy array of the
result.
func -- callable
data -- numpy array of data, same shape as coord
"""
return N.array([func(N.ma.array(data, mask=msk))
for msk in self.masks])
def mean(self, data):
"""For each bin of the coordinate specified in the
constructor, take an array of the corresponding elements from
"data" and calculate it's mean, returning a numpy array of the
results.
data -- numpy array of data, same shape as coord
"""
return self.map(N.mean, data)
if __name__ == '__main__':
import cPickle
import Gnuplot
u = cPickle.load(file('/Disk/radio3data1/s0567077/pd_64/u.000122880'))
uNorm = N.sqrt(N.sum(u**2, axis=-1))
r = N.mgrid[(1.-u.shape[0])/2.:(1.+u.shape[0])/2.,
(1.-u.shape[1])/2.:(1.+u.shape[1])/2.,
(1.-u.shape[2])/2.:(1.+u.shape[2])/2.,].transpose((1,2,3,0))
rNorm = N.sqrt(N.sum(r**2, axis=-1))
b = Binner(rNorm.ravel(), bins=100, range=(0, u.shape[0]/2.))
uBin = b.mean(uNorm.ravel())
g = Gnuplot.Gnuplot()
g('set datafile missing "nan"')
g('set logscale')
g.plot(Gnuplot.Data(b.means,
b.mean(uNorm.ravel()),
b.map(N.std, rNorm.ravel()),
b.map(N.std, uNorm.ravel()),
**{'with': 'xyerrorlines'}),
'2e-4*x**-3')
| rupertnash/subgrid | python/dqTools/binner.py | Python | mit | 4,769 |
from __future__ import division, absolute_import, print_function,\
unicode_literals
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup, Extension
from distutils.core import Extension
from distutils.errors import DistutilsError
from distutils.command.build_ext import build_ext
with open(os.path.join('nanomsg','version.py')) as f:
exec(f.read())
class skippable_build_ext(build_ext):
def run(self):
try:
build_ext.run(self)
except Exception as e:
print()
print("=" * 79)
print("WARNING : CPython API extension could not be built.")
print()
print("Exception was : %r" % (e,))
print()
print(
"If you need the extensions (they may be faster than "
"alternative on some"
)
print(" platforms) check you have a compiler configured with all"
" the necessary")
print(" headers and libraries.")
print("=" * 79)
print()
try:
import ctypes
if sys.platform in ('win32', 'cygwin'):
_lib = ctypes.windll.nanoconfig
else:
_lib = ctypes.cdll.LoadLibrary('libnanoconfig.so')
except OSError:
# Building without nanoconfig
cpy_extension = Extension(str('_nanomsg_cpy'),
sources=[str('_nanomsg_cpy/wrapper.c')],
libraries=[str('nanomsg')],
)
else:
# Building with nanoconfig
cpy_extension = Extension(str('_nanomsg_cpy'),
define_macros=[('WITH_NANOCONFIG', '1')],
sources=[str('_nanomsg_cpy/wrapper.c')],
libraries=[str('nanomsg'), str('nanoconfig')],
)
install_requires = []
try:
import importlib
except ImportError:
install_requires.append('importlib')
setup(
name='nanomsg',
version=__version__,
packages=[str('nanomsg'), str('_nanomsg_ctypes'), str('nanomsg_wrappers')],
ext_modules=[cpy_extension],
cmdclass = {'build_ext': skippable_build_ext},
install_requires=install_requires,
description='Python library for nanomsg.',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
],
author='Tony Simpson',
author_email='agjasimpson@gmail.com',
url='https://github.com/tonysimpson/nanomsg-python',
keywords=['nanomsg', 'driver'],
license='MIT',
test_suite="tests",
)
| romanoved/nanomsg-python | setup.py | Python | mit | 2,912 |
"""Unit tests for the copy module."""
import sys
import copy
import copy_reg
import unittest
from test import test_support
class TestCopy(unittest.TestCase):
# Attempt full line coverage of copy.py from top to bottom
def test_exceptions(self):
self.assert_(copy.Error is copy.error)
self.assert_(issubclass(copy.Error, Exception))
# The copy() method
def test_copy_basic(self):
x = 42
y = copy.copy(x)
self.assertEqual(x, y)
def test_copy_copy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
x = C(42)
y = copy.copy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_copy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.copy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.copy(x)
def test_copy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.copy, x)
# Type-specific _copy_xxx() methods
def test_copy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.copy(x) is x, repr(x))
def test_copy_list(self):
x = [1, 2, 3]
self.assertEqual(copy.copy(x), x)
def test_copy_tuple(self):
x = (1, 2, 3)
self.assertEqual(copy.copy(x), x)
def test_copy_dict(self):
x = {"foo": 1, "bar": 2}
self.assertEqual(copy.copy(x), x)
def test_copy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_copy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
# The deepcopy() method
def test_deepcopy_basic(self):
x = 42
y = copy.deepcopy(x)
self.assertEqual(y, x)
def test_deepcopy_memo(self):
# Tests of reflexive objects are under type-specific sections below.
# This tests only repetitions of objects.
x = []
x = [x, x]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y[0] is not x[0])
self.assert_(y[0] is y[1])
def test_deepcopy_issubclass(self):
# XXX Note: there's no way to test the TypeError coming out of
# issubclass() -- this can only happen when an extension
# module defines a "type" that doesn't formally inherit from
# type.
class Meta(type):
pass
class C:
__metaclass__ = Meta
self.assertEqual(copy.deepcopy(C), C)
def test_deepcopy_deepcopy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo=None):
return C(self.foo)
x = C(42)
y = copy.deepcopy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_deepcopy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.deepcopy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.deepcopy(x)
def test_deepcopy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.deepcopy, x)
# Type-specific _deepcopy_xxx() methods
def test_deepcopy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.deepcopy(x) is x, repr(x))
def test_deepcopy_list(self):
x = [[1, 2], 3]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_deepcopy_reflexive_list(self):
x = []
x.append(x)
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y[0] is y)
self.assertEqual(len(y), 1)
def test_deepcopy_tuple(self):
x = ([1, 2], 3)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_deepcopy_reflexive_tuple(self):
x = ([],)
x[0].append(x)
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y[0] is not x[0])
self.assert_(y[0][0] is y)
def test_deepcopy_dict(self):
x = {"foo": [1, 2], "bar": 3}
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x["foo"] is not y["foo"])
def test_deepcopy_reflexive_dict(self):
x = {}
x['foo'] = x
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y['foo'] is y)
self.assertEqual(len(y), 1)
def test_deepcopy_keepalive(self):
memo = {}
x = 42
y = copy.deepcopy(x, memo)
self.assert_(memo[id(x)] is x)
def test_deepcopy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_deepcopy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo):
return C(copy.deepcopy(self.foo, memo))
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_reflexive_inst(self):
class C:
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assert_(y is not x)
self.assert_(y.foo is y)
# _reconstruct()
def test_reconstruct_string(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assert_(y is x)
y = copy.deepcopy(x)
self.assert_(y is x)
def test_reconstruct_nostate(self):
class C(object):
def __reduce__(self):
return (C, ())
x = C()
x.foo = 42
y = copy.copy(x)
self.assert_(y.__class__ is x.__class__)
y = copy.deepcopy(x)
self.assert_(y.__class__ is x.__class__)
def test_reconstruct_state(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_reconstruct_state_setstate(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __setstate__(self, state):
self.__dict__.update(state)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_reconstruct_reflexive(self):
class C(object):
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assert_(y is not x)
self.assert_(y.foo is y)
# Additions for Python 2.3 and pickle protocol 2
def test_reduce_4tuple(self):
class C(list):
def __reduce__(self):
return (C, (), self.__dict__, iter(self))
def __cmp__(self, other):
return (cmp(list(self), list(other)) or
cmp(self.__dict__, other.__dict__))
x = C([[1, 2], 3])
y = copy.copy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x[0] is y[0])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_reduce_5tuple(self):
class C(dict):
def __reduce__(self):
return (C, (), self.__dict__, None, self.iteritems())
def __cmp__(self, other):
return (cmp(dict(self), list(dict)) or
cmp(self.__dict__, other.__dict__))
x = C([("foo", [1, 2]), ("bar", 3)])
y = copy.copy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x["foo"] is y["foo"])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x["foo"] is not y["foo"])
def test_copy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.copy(x)
self.assert_(x.foo is y.foo)
def test_deepcopy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.deepcopy(x)
self.assertEqual(x.foo, y.foo)
self.assert_(x.foo is not y.foo)
def test_copy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.copy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assert_(x[0] is y[0])
self.assert_(x.foo is y.foo)
def test_deepcopy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.deepcopy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assert_(x[0] is not y[0])
self.assert_(x.foo is not y.foo)
def test_copy_tuple_subclass(self):
class C(tuple):
pass
x = C([1, 2, 3])
self.assertEqual(tuple(x), (1, 2, 3))
y = copy.copy(x)
self.assertEqual(tuple(y), (1, 2, 3))
def test_deepcopy_tuple_subclass(self):
class C(tuple):
pass
x = C([[1, 2], 3])
self.assertEqual(tuple(x), ([1, 2], 3))
y = copy.deepcopy(x)
self.assertEqual(tuple(y), ([1, 2], 3))
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_getstate_exc(self):
class EvilState(object):
def __getstate__(self):
raise ValueError, "ain't got no stickin' state"
self.assertRaises(ValueError, copy.copy, EvilState())
def test_main():
test_support.run_unittest(TestCopy)
if __name__ == "__main__":
test_main()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.4/Lib/test/test_copy.py | Python | mit | 17,174 |
import logging
from django.core.management.base import BaseCommand
from waldur_rancher.utils import SyncUser
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Sync users from Waldur to Rancher."""
def handle(self, *args, **options):
def print_message(count, action, name='user'):
if count == 1:
self.stdout.write(
self.style.SUCCESS('%s %s has been %s.' % (count, name, action))
)
else:
self.stdout.write(
self.style.SUCCESS('%s %ss have been %s.' % (count, name, action))
)
result = SyncUser.run()
for action in ['blocked', 'created', 'activated', 'updated']:
print_message(result.get(action, 0), action)
print_message(result.get('project roles deleted', 0), 'deleted', 'project role')
print_message(result('project roles created', 0), 'created', 'project role')
| opennode/nodeconductor-assembly-waldur | src/waldur_rancher/management/commands/sync_users.py | Python | mit | 986 |
# -*- coding: utf-8 -*-
#
# romeo documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 19 15:51:40 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
import alabaster
import subprocess
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
print os.getcwd()
if read_the_docs_build:
subprocess.call('doxygen doxygen.cfg', shell=True)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
breathe_projects = {
"romeo":"xml/",
}
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["breathe", "alabaster"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'romeo'
copyright = u'2015, erik'
author = u'erik'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'collapsiblesidebar': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'romeodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'romeo.tex', u'romeo Documentation',
u'erik', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'romeo', u'romeo Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'romeo', u'romeo Documentation',
author, 'romeo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| eerimoq/robomower | romeo/romeo/doc/conf.py | Python | mit | 9,686 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_hutt_heavy_s02_tier4.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/ship/shared_hutt_heavy_s02_tier4.py | Python | mit | 417 |
from __future__ import absolute_import
import pytest
from qtpy import PYQT5, PYSIDE2
@pytest.mark.skipif(not (PYQT5 or PYSIDE2), reason="Only available in Qt5 bindings")
def test_qt3dinput():
"""Test the qtpy.Qt3DInput namespace"""
Qt3DInput = pytest.importorskip("qtpy.Qt3DInput")
assert Qt3DInput.QAxisAccumulator is not None
assert Qt3DInput.QInputSettings is not None
assert Qt3DInput.QAnalogAxisInput is not None
assert Qt3DInput.QAbstractAxisInput is not None
assert Qt3DInput.QMouseHandler is not None
assert Qt3DInput.QButtonAxisInput is not None
assert Qt3DInput.QInputSequence is not None
assert Qt3DInput.QWheelEvent is not None
assert Qt3DInput.QActionInput is not None
assert Qt3DInput.QKeyboardDevice is not None
assert Qt3DInput.QMouseDevice is not None
assert Qt3DInput.QAxis is not None
assert Qt3DInput.QInputChord is not None
assert Qt3DInput.QMouseEvent is not None
assert Qt3DInput.QKeyboardHandler is not None
assert Qt3DInput.QKeyEvent is not None
assert Qt3DInput.QAbstractActionInput is not None
assert Qt3DInput.QInputAspect is not None
assert Qt3DInput.QLogicalDevice is not None
assert Qt3DInput.QAction is not None
assert Qt3DInput.QAbstractPhysicalDevice is not None
assert Qt3DInput.QAxisSetting is not None
| sserrot/champion_relationships | venv/Lib/site-packages/qtpy/tests/test_qt3dinput.py | Python | mit | 1,343 |
from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from django.views import generic
from django.views.generic.edit import FormView, CreateView
from django.core.urlresolvers import reverse_lazy
from django.core.mail import send_mail
from hacks.models import Hackathon, CodeMania, SendRSVP, RSVPConfirmation
from hacks.forms import HackathonForm
from .forms import HackathonForm, CodeManiaForm
import json
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseForbidden
# Create your views here.
class HackathonView(generic.View):
def get(self, request):
form = HackathonForm
template_name = 'index.html'
return render(request, template_name)
# def post(self, request):
# try:
# # f = HackathonForm(request.POST)
# # f.save()
# name = request.POST.get('name', None)
# email = request.POST.get('email',None )
# phone_number = request.POST.get('phone_number', None)
# github = request.POST.get('github', None)
# linkedin = request.POST.get('linkedin', None)
# hardware_required= request.POST.get('hardware_required', None)
# mac_address= request.POST.get('mac_address', None)
# size = request.POST.get('size', None)
# Hackathon.objects.create(name = name, email=email, phone_number=phone_number, github=github, linkedin=linkedin, hardware_required = hardware_required, mac_address=mac_address, size=size)
# plaintext = get_template('registration_email.txt')
# htmly = get_template('registration_email.html')
# d = Context({ 'name': request.POST.get('name', None) })
# subject, from_email, to = 'JSS Hackathon 2015', 'Hackathon 2015<mmil@jssaten.ac.in>', request.POST.get('email', )
# text_content = plaintext.render(d)
# html_content = htmly.render(d)
# msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
# msg.attach_alternative(html_content, "text/html")
# msg.send()
# # send_mail('Hackathon-2015', 'Here is the message.', 'Microsoft Mobile Innovation Lab <mmil@jssaten.ac.in>', ['deshrajdry@gmail.com'], fail_silently=False)
# return HttpResponse(json.dumps({"event":1}), content_type="application/json")
# except Exception as e:
# return HttpResponse(json.dumps(e), content_type="application/json")
class CodeManiaView(generic.View):
def get(self, request):
form = CodeManiaForm
template_name = 'codemania.html'
return render(request, template_name, {'form':form})
# def post(self, request):
# try:
# name = request.POST.get('name', None)
# email = request.POST.get('email',None )
# phone_number = request.POST.get('phone_number', None)
# year = request.POST.get('year', None)
# course= request.POST.get('course', None)
# branch= request.POST.get('branch', None)
# CodeMania.objects.create(name = name, email=email, phone_number=phone_number, year=year, course=course, branch = branch)
# plaintext = get_template('codemania_registration_email.txt')
# htmly = get_template('codemania_registration_email.html')
# d = Context({ 'name': name})
# subject, from_email, to = 'CodeMania-2015', 'Hackathon 2015 <mmil@jssaten.ac.in>', email
# text_content = plaintext.render(d)
# html_content = htmly.render(d)
# msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
# msg.attach_alternative(html_content, "text/html")
# msg.send()
# # send_mail('Hackathon-2015', 'Here is the message.', 'Microsoft Mobile Innovation Lab <mmil@jssaten.ac.in>', ['deshrajdry@gmail.com'], fail_silently=False)
# return HttpResponse(json.dumps({"event":1}), content_type="application/json")
# except Exception as e:
# return HttpResponse(json.dumps(e), content_type="application/json")
def handler404(request):
response = render_to_response('404.html', {},
context_instance=RequestContext(request))
response.status_code = 404
return response
def handler500(request):
response = render_to_response('500.html', {},
context_instance=RequestContext(request))
response.status_code = 500
return response
def problems(request):
return render_to_response('problems.html')
from django.contrib.auth.decorators import login_required
class LoginRequiredMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
class sendRSVP(LoginRequiredMixin, generic.View):
def get(self, request):
template_name = 'send_rsvp.html'
return render(request, template_name, )
def post(self, request):
import uuid
emails = request.POST.get('emails', None)
emails = emails.replace('\r','')
emails = emails.split('\n')
plaintext = get_template('rsvp_confirmation_mail.txt')
htmly = get_template('rsvp_confirmation_mail.html')
for email in emails:
if email is not None or email is not "":
uid = uuid.uuid4()
SendRSVP.objects.create(email = email, uid = uid)
d = Context({ 'uid': uid})
subject, from_email, to = 'Confirmation: Hackathon-2015 ', 'Hackathon 2015 <mmil@jssaten.ac.in>', email
text_content = plaintext.render(d)
html_content = htmly.render(d)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
return HttpResponse(json.dumps({"Event":"Email Sent to selected list of participants"}), content_type="application/json")
class RSVP(generic.View):
def get(self, request, uid = None):
template_name = 'rsvp_view.html'
uid = str(request.get_full_path()).split('/')[-1]
if RSVPConfirmation.objects.filter(sent_rsvp__uid = uid):
return render(request, 'already_confirmed.html', )
else:
return render(request, template_name, {'uid': uid})
def post(self, request, uid):
uid = str(request.get_full_path()).split('/')[-1]
if RSVPConfirmation.objects.filter(sent_rsvp__uid = uid).count() == 1:
return HttpResponseForbidden()
else:
college = request.POST.get('college', '')
status = request.POST.get('status', False)
rsvp_obj = SendRSVP.objects.filter(uid = str(uid))[0]
RSVPConfirmation.objects.create(status = status, college = college, sent_rsvp = rsvp_obj)
return HttpResponse(json.dumps({"event":1}), content_type="application/json")
| jsshack15/hackathon-2015 | hacks/views.py | Python | mit | 6,441 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/reactor/shared_rct_sds_imperial_1.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","rct_sds_imperial_1_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_sds_imperial_1.py | Python | mit | 482 |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Starwels developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.test_framework import StarwelsTestFramework
from test_framework.util import assert_equal, wait_until, connect_nodes_bi
class NotificationsTest(StarwelsTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt")
self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt")
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [["-blockversion=2",
"-alertnotify=echo %%s >> %s" % self.alert_filename,
"-blocknotify=echo %%s >> %s" % self.block_filename],
["-blockversion=211",
"-rescan",
"-walletnotify=echo %%s >> %s" % self.tx_filename]]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generate(block_count)
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated blocks hashes
with open(self.block_filename, 'r') as f:
assert_equal(sorted(blocks), sorted(f.read().splitlines()))
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
os.remove(self.tx_filename)
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.restart_node(1)
connect_nodes_bi(self.nodes, 0, 1)
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
# Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
self.log.info("test -alertnotify")
self.nodes[1].generate(41)
self.sync_all()
# Give starwelsd 10 seconds to write the alert notification
wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10)
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text = f.read()
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generate(2)
self.sync_all()
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text2 = f.read()
self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
assert_equal(alert_text, alert_text2)
if __name__ == '__main__':
NotificationsTest().main()
| starwels/starwels | test/functional/feature_notifications.py | Python | mit | 4,003 |
"""pint_journal_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^user/', include('pints_user.urls')),
url(r'^/?', include('pints_main.urls')),
]
| daterrell2/pint_journal_project | pint_journal_project/urls.py | Python | mit | 863 |
#! /usr/bin/env python
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
distribution = ClaytonCopula(1.5)
size = 1000
sample = distribution.getSample(size)
factory = ClaytonCopulaFactory()
estimatedDistribution = factory.build(sample)
print "distribution=", repr(distribution)
print "Estimated distribution=", repr(estimatedDistribution)
estimatedDistribution = factory.build()
print "Default distribution=", estimatedDistribution
estimatedDistribution = factory.build(
distribution.getParametersCollection())
print "Distribution from parameters=", estimatedDistribution
estimatedClaytonCopula = factory.buildAsClaytonCopula(sample)
print "ClaytonCopula =", distribution
print "Estimated claytonCopula=", estimatedClaytonCopula
estimatedClaytonCopula = factory.buildAsClaytonCopula()
print "Default claytonCopula=", estimatedClaytonCopula
estimatedClaytonCopula = factory.buildAsClaytonCopula(
distribution.getParametersCollection())
print "ClaytonCopula from parameters=", estimatedClaytonCopula
except:
import sys
print "t_ClaytonCopulaFactory_std.py", sys.exc_type, sys.exc_value
| sofianehaddad/ot-svn | python/test/t_ClaytonCopulaFactory_std.py | Python | mit | 1,214 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_nightspider.iff"
result.attribute_template_id = 9
result.stfName("monster_name","hermit_spider")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/mobile/shared_nightspider.py | Python | mit | 438 |
#! /usr/bin/python
#-*- coding:utf-8 -*
import csv
from werkzeug import generate_password_hash
from web.models import User
from bootstrap import db
def create_user(email, name, password):
user = User(email=email,
name=name,
pwdhash=generate_password_hash(password),
is_active=True)
db.session.add(user)
db.session.commit()
| rodekruis/shelter-database | src/scripts/create_user.py | Python | mit | 386 |
"""
Utilities for Caltech BE/Bi 103.
Author: Justin Bois
"""
import collections
import random
import warnings
import numbers
import matplotlib.path as path
import numpy as np
import pandas as pd
import scipy.odr
import scipy.stats as st
import statsmodels.tools.numdiff as smnd
import skimage.io
import skimage.measure
import emcee
import bokeh.models
import bokeh.palettes
import bokeh.plotting
import seaborn as sns
# ########################################################################## #
# COLOR CONVERSION UTILITIES #
# ########################################################################## #
def rgb_frac_to_hex(rgb_frac):
"""
Convert fractional RGB values to hexidecimal color string.
Parameters
----------
rgb_frac : array_like, shape (3,)
Fractional RGB values; each entry is between 0 and 1.
Returns
-------
str
Hexidecimal string for the given RGB color.
Examples
--------
>>> rgb_frac_to_hex((0.65, 0.23, 1.0))
'#a53aff'
>>> rgb_frac_to_hex((1.0, 1.0, 1.0))
'#ffffff'
"""
if len(rgb_frac) != 3:
raise RuntimeError('`rgb_frac` must have exactly three entries.')
if (np.array(rgb_frac) < 0).any() or (np.array(rgb_frac) > 1).any():
raise RuntimeError('RGB values must be between 0 and 1.')
return '#{0:02x}{1:02x}{2:02x}'.format(int(rgb_frac[0] * 255),
int(rgb_frac[1] * 255),
int(rgb_frac[2] * 255))
def data_to_hex_color(x, palette, x_range=[0, 1], na_value='#000000'):
"""
Convert a value to a hexidecimal color according to
color palette.
Parameters
----------
x : float or int
Value to be converted to hexidecimal color.
palette : list of 3-tuples
Color palette as returned from seaborn.color_palette().
List of 3-tuples containing fractional RGB values.
x_range : array_list, shape (2,), default = [0, 1]
Low and high value of the range of values `x` may
assume.
Returns
-------
str
Hexidecimal string.
Examples
--------
>>> data_to_hex_color(0.7, sns.colorpalette())
'#ccb974'
>>> data_to_hex_color(7.1, [(1, 0, 0), (0, 1, 0), (0, 0, 1)], [0, 10])
'#0000ff'
"""
if x is None or np.isnan(x):
return na_value
elif x > x_range[1] or x < x_range[0]:
raise RuntimeError('data outside of range')
elif x == x_range[1]:
return rgb_frac_to_hex(palette[-1])
# Fractional position of x in x_range
f = (x - x_range[0]) / (x_range[1] - x_range[0])
return rgb_frac_to_hex(palette[int(f * len(palette))])
def im_merge_cmy(im_cyan, im_magenta, im_yellow=None):
"""
Merge channels to make RGB image that has cyan, magenta, and
yellow.
Parameters
----------
im_cyan: array_like
Image represented in cyan channel. Must be same shape
as `im_magenta` and `im_yellow`.
im_magenta: array_like
Image represented in magenta channel. Must be same shape
as `im_yellow` and `im_yellow`.
im_yellow: array_like
Image represented in yellow channel. Must be same shape
as `im_cyan` and `im_magenta`.
Returns
-------
output : array_like, dtype float, shape (*im_cyan.shape, 3)
RGB image the give CMY coloring of image
Notes
-----
.. All input images are streched so that their pixel intensities
go from 0 to 1.
"""
im_cyan_scaled = \
(im_cyan - im_cyan.min()) / (im_cyan.max() - im_cyan.min())
im_magenta_scaled = \
(im_magenta - im_magenta.min()) / (im_magenta.max() - im_magenta.min())
if im_yellow is None:
im_yellow_scaled = np.zeros_like(im_cyan)
else:
im_yellow_scaled = \
(im_yellow - im_yellow.min()) / (im_yellow.max() - im_yellow.min())
# Convert images to RGB with magenta, cyan, and yellow channels
im_cyan_scaled_rgb = np.dstack((np.zeros_like(im_cyan_scaled),
im_cyan_scaled,
im_cyan_scaled))
im_magenta_scaled_rgb = np.dstack((im_magenta_scaled,
np.zeros_like(im_magenta_scaled),
im_magenta_scaled))
im_yellow_scaled_rgb = np.dstack((im_yellow_scaled,
im_yellow_scaled,
np.zeros_like(im_yellow_scaled)))
# Merge together
merged_image = \
im_cyan_scaled_rgb + im_magenta_scaled_rgb + im_yellow_scaled_rgb
# Scale each channel to be between zero and 1
merged_image[:, :, 0] /= merged_image[:, :, 0].max()
merged_image[:, :, 1] /= merged_image[:, :, 1].max()
merged_image[:, :, 2] /= merged_image[:, :, 2].max()
return merged_image
# ########################################################################## #
# BOKEH UTILITIES #
# ########################################################################## #
def bokeh_matplot(df, i_col, j_col, data_col, data_range=None, n_colors=21,
label_ticks=True, colormap='RdBu_r', plot_width=1000,
plot_height=1000, x_axis_location='auto',
toolbar_location='left',
tools='reset,resize,hover,save,pan,box_zoom,wheel_zoom',
**kwargs):
"""
Create Bokeh plot of a matrix.
Parameters
----------
df : Pandas DataFrame
Tidy DataFrame to be plotted as a matrix.
i_col : hashable object
Column in `df` to be used for row indices of matrix.
j_col : hashable object
Column in `df` to be used for column indices of matrix.
data_col : hashable object
Column containing values to be plotted. These values
set which color is displayed in the plot and also are
displayed in the hover tool.
data_range : array_like, shape (2,)
Low and high values that data may take, used for scaling
the color. Default is the range of the inputted data.
n_colors : int, default = 21
Number of colors to be used in colormap.
label_ticks : bool, default = True
If False, do not put tick labels
colormap : str, default = 'RdBu_r'
Any of the allowed seaborn colormaps.
plot_width : int, default 1000
Width of plot in pixels.
plot_height : int, default 1000
Height of plot in pixels.
x_axis_location : str, default = None
Location of the x-axis around the plot. If 'auto' and first
element of `df[i_col]` is numerical, x-axis will be placed below
with the lower left corner as the origin. Otherwise, above
with the upper left corner as the origin.
toolbar_location : str, default = 'left'
Location of the Bokeh toolbar around the plot
tools : str, default = 'reset,resize,hover,save,pan,box_zoom,wheel_zoom'
Tools to show in the Bokeh toolbar
**kwargs
Arbitrary keyword arguments passed to bokeh.plotting.figure
Returns
-------
Bokeh plotting object
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> data = np.array(np.unravel_index(range(9), a.shape) + (a.ravel(),)).T
>>> df = pd.DataFrame(data, columns=['i', 'j', 'data'])
>>> bokeh.plotting.output_file('test_matplot.html')
>>> p = bokeh_matplot(df, i_col, j_col, data_col, n_colors=21,
colormap='RdBu_r', plot_width=1000,
plot_height=1000)
>>> bokeh.plotting.show(p)
"""
# Copy the DataFrame
df_ = df.copy()
# Convert i, j to strings so not interpreted as physical space
df_[i_col] = df_[i_col].astype(str)
df_[j_col] = df_[j_col].astype(str)
# Get data range
if data_range is None:
data_range = (df[data_col].min(), df[data_col].max())
elif (data_range[0] > df[data_col].min()) \
or (data_range[1] < df[data_col].max()):
raise RuntimeError('Data out of specified range.')
# Get colors
palette = sns.color_palette(colormap, n_colors)
# Compute colors for squares
df_['color'] = df_[data_col].apply(data_to_hex_color,
args=(palette, data_range))
# Data source
source = bokeh.plotting.ColumnDataSource(df_)
# only reverse the y-axis and put the x-axis on top
# if the x-axis is categorical:
if x_axis_location == 'auto':
if isinstance(df[j_col].iloc[0], numbers.Number):
y_range = list(df_[i_col].unique())
x_axis_location = 'below'
else:
y_range = list(reversed(list(df_[i_col].unique())))
x_axis_location = 'above'
elif x_axis_location == 'above':
y_range = list(reversed(list(df_[i_col].unique())))
elif x_axis_location == 'below':
y_range = list(df_[i_col].unique())
# Set up figure
p = bokeh.plotting.figure(x_range=list(df_[j_col].unique()),
y_range=y_range,
x_axis_location=x_axis_location,
plot_width=plot_width,
plot_height=plot_height,
toolbar_location=toolbar_location,
tools=tools, **kwargs)
# Populate colored squares
p.rect(j_col, i_col, 1, 1, source=source, color='color', line_color=None)
# Set remaining properties
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
if label_ticks:
p.axis.major_label_text_font_size = '8pt'
else:
p.axis.major_label_text_color = None
p.axis.major_label_text_font_size = '0pt'
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi / 3
# Build hover tool
hover = p.select(dict(type=bokeh.models.HoverTool))
hover.tooltips = collections.OrderedDict([('i', ' @' + i_col),
('j', ' @' + j_col),
(data_col, ' @' + data_col)])
return p
def bokeh_boxplot(df, value, label, ylabel=None, sort=True, plot_width=650,
plot_height=450, box_fill_color='medium_purple',
background_fill_color='#DFDFE5',
tools='reset,resize,hover,save,pan,box_zoom,wheel_zoom',
**kwargs):
"""
Make a Bokeh box plot from a tidy DataFrame.
Parameters
----------
df : tidy Pandas DataFrame
DataFrame to be used for plotting
value : hashable object
Column of DataFrame containing data to be used.
label : hashable object
Column of DataFrame use to categorize.
ylabel : str, default None
Text for y-axis label
sort : Boolean, default True
If True, sort DataFrame by label so that x-axis labels are
alphabetical.
plot_width : int, default 650
Width of plot in pixels.
plot_height : int, default 450
Height of plot in pixels.
box_fill_color : string
Fill color of boxes, default = 'medium_purple'
background_fill_color : str, default = '#DFDFE5'
Fill color of the plot background
tools : str, default = 'reset,resize,hover,save,pan,box_zoom,wheel_zoom'
Tools to show in the Bokeh toolbar
**kwargs
Arbitrary keyword arguments passed to bokeh.plotting.figure
Returns
-------
Bokeh plotting object
Example
-------
>>> cats = list('ABCD')
>>> values = np.random.randn(200)
>>> labels = np.random.choice(cats, 200)
>>> df = pd.DataFrame({'label': labels, 'value': values})
>>> bokeh.plotting.output_file('test_boxplot.html')
>>> p = bokeh_boxplot(df, value='value', label='label')
>>> bokeh.plotting.show(p)
Notes
-----
.. Based largely on example code found here:
https://github.com/bokeh/bokeh/blob/master/examples/plotting/file/boxplot.py
"""
# Sort DataFrame by labels for alphabetical x-labeling
if sort:
df_sort = df.sort_values(label)
else:
df_sort = df.copy()
# Convert labels to string to allow categorical axis labels
df_sort[label] = df_sort[label].astype(str)
# Get the categories
cats = list(df_sort[label].unique())
# Group Data frame
df_gb = df_sort.groupby(label)
# Compute quartiles for each group
q1 = df_gb[value].quantile(q=0.25)
q2 = df_gb[value].quantile(q=0.5)
q3 = df_gb[value].quantile(q=0.75)
# Compute interquartile region and upper and lower bounds for outliers
iqr = q3 - q1
upper_cutoff = q3 + 1.5 * iqr
lower_cutoff = q1 - 1.5 * iqr
# Find the outliers for each category
def outliers(group):
cat = group.name
outlier_inds = (group[value] > upper_cutoff[cat]) | \
(group[value] < lower_cutoff[cat])
return group[value][outlier_inds]
# Apply outlier finder
out = df_gb.apply(outliers).dropna()
# Points of outliers for plotting
outx = []
outy = []
if not out.empty:
for cat in cats:
if not out[cat].empty:
for val in out[cat]:
outx.append(cat)
outy.append(val)
# Shrink whiskers to smallest and largest non-outlier
qmin = df_gb[value].min()
qmax = df_gb[value].max()
upper = upper_cutoff.combine(qmax, min)
lower = lower_cutoff.combine(qmin, max)
# Reindex to make sure ordering is right when plotting
upper = upper.reindex(cats)
lower = lower.reindex(cats)
q1 = q1.reindex(cats)
q2 = q2.reindex(cats)
q3 = q3.reindex(cats)
# Build figure
p = bokeh.plotting.figure(x_range=cats,
background_fill_color=background_fill_color,
plot_width=plot_width, plot_height=plot_height,
tools=tools,
**kwargs)
p.ygrid.grid_line_color = 'white'
p.xgrid.grid_line_color = None
p.ygrid.grid_line_width = 2
p.yaxis.axis_label = ylabel
# stems
p.segment(cats, upper, cats, q3, line_width=2, line_color="black")
p.segment(cats, lower, cats, q1, line_width=2, line_color="black")
# boxes
p.rect(cats, (q3 + q1) / 2, 0.5, q3 - q1, fill_color="mediumpurple",
alpha=0.7, line_width=2, line_color="black")
# median (almost-0 height rects simpler than segments)
y_range = qmax.max() - qmin.min()
p.rect(cats, q2, 0.5, 0.0001 * y_range, line_color="black",
line_width=2, fill_color='black')
# whiskers (almost-0 height rects simpler than segments with
# categorial x-axis)
p.rect(cats, lower, 0.2, 0.0001 * y_range, line_color='black',
fill_color='black')
p.rect(cats, upper, 0.2, 0.0001 * y_range, line_color='black',
fill_color='black')
# outliers
p.circle(outx, outy, size=6, color='black')
return p
def bokeh_imrgb(im, plot_height=400, plot_width=None,
tools='pan,box_zoom,wheel_zoom,reset,resize'):
"""
Make a Bokeh Figure instance displaying an RGB image.
If the image is already 32 bit, just display it
"""
# Make 32 bit image
if len(im.shape) == 2 and im.dtype == np.uint32:
im_disp = im
else:
im_disp = rgb_to_rgba32(im)
# Get shape
n, m = im_disp.shape
# Determine plot height and width
if plot_height is not None and plot_width is None:
plot_width = int(m/n * plot_height)
elif plot_height is None and plot_width is not None:
plot_height = int(n/m * plot_width)
elif plot_height is None and plot_width is None:
plot_heigt = 400
plot_width = int(m/n * plot_height)
# Set up figure with appropriate dimensions
p = bokeh.plotting.figure(plot_height=plot_height, plot_width=plot_width,
x_range=[0, m], y_range=[0, n], tools=tools)
# Display the image, setting the origin and heights/widths properly
p.image_rgba(image=[im_disp], x=0, y=0, dw=m, dh=n)
return p
def bokeh_im(im, plot_height=400, plot_width=None,
color_palette=bokeh.palettes.gray(256),
tools='pan,box_zoom,wheel_zoom,reset,resize'):
"""
"""
# Get shape
n, m = im.shape
# Determine plot height and width
if plot_height is not None and plot_width is None:
plot_width = int(m/n * plot_height)
elif plot_height is None and plot_width is not None:
plot_height = int(n/m * plot_width)
elif plot_height is None and plot_width is None:
plot_heigt = 400
plot_width = int(m/n * plot_height)
p = bokeh.plotting.figure(plot_height=plot_height, plot_width=plot_width,
x_range=[0, m], y_range=[0, n], tools=tools)
# Set color mapper
color = bokeh.models.LinearColorMapper(color_palette)
# Display the image
p.image(image=[im], x=0, y=0, dw=m, dh=n, color_mapper=color)
return p
# ########################################################################## #
# MCMC UTILITIES #
# ########################################################################## #
def generic_log_posterior(log_prior, log_likelihood, params, logpargs=(),
loglargs=()):
"""
Generic log posterior for MCMC calculations
Parameters
----------
log_prior : function
Function to compute the log prior.
Call signature: log_prior(params, *logpargs)
log_likelihood : function
Function to compute the log prior.
Call signature: log_likelhood(params, *loglargs)
params : ndarray
Numpy array containing the parameters of the posterior.
logpargs : tuple, default ()
Tuple of parameters to be passed to log_prior.
loglargs : tuple, default ()
Tuple of parameters to be passed to log_likelihood.
Returns
-------
output : float
The logarithm of the posterior evaluated at `params`.
"""
# Compute log prior
lp = log_prior(params, *logpargs)
# If log prior is -inf, return that
if lp == -np.inf:
return -np.inf
# Compute and return posterior
return lp + log_likelihood(params, *loglargs)
def sampler_to_dataframe(sampler, columns=None):
"""
Convert output of an emcee sampler to a Pandas DataFrame.
Parameters
----------
sampler : emcee.EnsembleSampler or emcee.PTSampler instance
Sampler instance form which MCMC has already been run.
Returns
-------
output : DataFrame
Pandas DataFrame containing the samples. Each column is
a variable, except: 'lnprob' and 'chain' for an
EnsembleSampler, and 'lnlike', 'lnprob', 'beta_ind',
'beta', and 'chain' for a PTSampler. These contain obvious
values.
"""
invalid_column_names = ['lnprob', 'chain', 'lnlike', 'beta',
'beta_ind']
if np.any([x in columns for x in invalid_column_names]):
raise RuntimeError('You cannot name columns with any of these: '
+ ' '.join(invalid_column_names))
if columns is None:
columns = list(range(sampler.chain.shape[-1]))
if isinstance(sampler, emcee.EnsembleSampler):
n_walkers, n_steps, n_dim = sampler.chain.shape
df = pd.DataFrame(data=sampler.flatchain, columns=columns)
df['lnprob'] = sampler.flatlnprobability
df['chain'] = np.concatenate([i * np.ones(n_steps, dtype=int)
for i in range(n_walkers)])
elif isinstance(sampler, emcee.PTSampler):
n_temps, n_walkers, n_steps, n_dim = sampler.chain.shape
df = pd.DataFrame(
data=sampler.flatchain.reshape(
(n_temps * n_walkers * n_steps, n_dim)),
columns=columns)
df['lnlike'] = sampler.lnlikelihood.flatten()
df['lnprob'] = sampler.lnprobability.flatten()
beta_inds = [i * np.ones(n_steps * n_walkers, dtype=int)
for i, _ in enumerate(sampler.betas)]
df['beta_ind'] = np.concatenate(beta_inds)
df['beta'] = sampler.betas[df['beta_ind']]
chain_inds = [j * np.ones(n_steps, dtype=int)
for i, _ in enumerate(sampler.betas)
for j in range(n_walkers)]
df['chain'] = np.concatenate(chain_inds)
else:
raise RuntimeError('Invalid sample input.')
return df
def run_ensemble_emcee(log_post=None, n_burn=100, n_steps=100,
n_walkers=None, p_dict=None, p0=None, columns=None,
args=(), threads=None, thin=1, return_sampler=False,
return_pos=False):
"""
Run emcee.
Parameters
----------
log_post : function
The function that computes the log posterior. Must be of
the form log_post(p, *args), where p is a NumPy array of
parameters that are sampled by the MCMC sampler.
n_burn : int, default 100
Number of burn steps
n_steps : int, default 100
Number of MCMC samples to take
n_walkers : int
Number of walkers, ignored if p0 is None
p_dict : collections.OrderedDict
Each entry is a tuple with the function used to generate
starting points for the parameter and the arguments for
the function. The starting point function must have the
call signature f(*args_for_function, n_walkers). Ignored
if p0 is not None.
p0 : array
n_walkers by n_dim array of initial starting values.
p0[i,j] is the starting point for walk i along variable j.
If provided, p_dict is ignored.
columns : list of strings
Name of parameters. These will be the column headings in the
returned DataFrame. If None, either inferred from p_dict or
assigned sequential integers.
args : tuple
Arguments passed to log_post
threads : int
Number of cores to use in calculation
thin : int
The number of iterations to perform between saving the
state to the internal chain.
return_sampler : bool, default False
If True, return sampler as well as DataFrame with results.
return_pos : bool, default False
If True, additionally return position of the sampler.
Returns
-------
df : pandas.DataFrame
First columns give flattened MCMC chains, with columns
named with the variable being sampled as a string.
Other columns are:
'chain': ID of chain
'lnprob': Log posterior probability
sampler : emcee.EnsembleSampler instance, optional
The sampler instance.
pos : ndarray, shape (nwalkers, ndim), optional
Last position of the walkers.
"""
if p0 is None and p_dict is None:
raise RuntimeError('Must supply either p0 or p_dict.')
# Infer n_dim and n_walkers (and check inputs)
if p0 is None:
if n_walkers is None:
raise RuntimeError('n_walkers must be specified if p0 is None')
if type(p_dict) is not collections.OrderedDict:
raise RuntimeError('p_dict must be collections.OrderedDict.')
n_dim = len(p_dict)
else:
n_walkers, n_dim = p0.shape
if p_dict is not None:
warnings.RuntimeWarning('p_dict is being ignored.')
# Infer columns
if columns is None:
if p_dict is not None:
columns = list(p_dict.keys())
else:
columns = list(range(n_dim))
elif len(columns) != n_dim:
raise RuntimeError('len(columns) must equal number of parameters.')
# Check for invalid column names
invalid_column_names = ['lnprob', 'chain', 'lnlike', 'beta',
'beta_ind']
if np.any([x in columns for x in invalid_column_names]):
raise RuntimeError('You cannot name columns with any of these: '
+ ' '.join(invalid_column_names))
# Build starting points of walkers
if p0 is None:
p0 = np.empty((n_walkers, n_dim))
for i, key in enumerate(p_dict):
p0[:, i] = p_dict[key][0](*(p_dict[key][1] + (n_walkers,)))
# Set up the EnsembleSampler instance
if threads is not None:
sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_post,
args=args, threads=threads)
else:
sampler = emcee.EnsembleSampler(n_walkers, n_dim, log_post,
args=args)
# Do burn-in
if n_burn > 0:
pos, _, _ = sampler.run_mcmc(p0, n_burn, storechain=False)
else:
pos = p0
# Sample again, starting from end burn-in state
pos, _, _ = sampler.run_mcmc(pos, n_steps, thin=thin)
# Make DataFrame for results
df = sampler_to_dataframe(sampler, columns=columns)
# Set up return
return_vals = (df, sampler, pos)
return_bool = (True, return_sampler, return_pos)
ret = tuple([rv for rv, rb in zip(return_vals, return_bool) if rb])
if len(ret) == 1:
return ret[0]
return ret
def run_pt_emcee(log_like, log_prior, n_burn, n_steps, n_temps=None,
n_walkers=None, p_dict=None, p0=None, columns=None,
loglargs=(), logpargs=(), threads=None, thin=1,
return_lnZ=False, return_sampler=False, return_pos=False):
"""
Run emcee.
Parameters
----------
log_like : function
The function that computes the log likelihood. Must be of
the form log_like(p, *llargs), where p is a NumPy array of
parameters that are sampled by the MCMC sampler.
log_prior : function
The function that computes the log prior. Must be of
the form log_post(p, *lpargs), where p is a NumPy array of
parameters that are sampled by the MCMC sampler.
n_burn : int
Number of burn steps
n_steps : int
Number of MCMC samples to take
n_temps : int
The number of temperatures to use in PT sampling.
n_walkers : int
Number of walkers
p_dict : collections.OrderedDict
Each entry is a tuple with the function used to generate
starting points for the parameter and the arguments for
the function. The starting point function must have the
call signature f(*args_for_function, n_walkers). Ignored
if p0 is not None.
p0 : array
n_walkers by n_dim array of initial starting values.
p0[k,i,j] is the starting point for walk i along variable j
for temperature k. If provided, p_dict is ignored.
columns : list of strings
Name of parameters. These will be the column headings in the
returned DataFrame. If None, either inferred from p_dict or
assigned sequential integers.
args : tuple
Arguments passed to log_post
threads : int
Number of cores to use in calculation
thin : int
The number of iterations to perform between saving the
state to the internal chain.
return_lnZ : bool, default False
If True, additionally return lnZ and dlnZ.
return_sampler : bool, default False
If True, additionally return sampler.
return_pos : bool, default False
If True, additionally return position of the sampler.
Returns
-------
df : pandas.DataFrame
First columns give flattened MCMC chains, with columns
named with the variable being sampled as a string.
Other columns are:
'chain': ID of chain
'beta': Inverse temperature
'beta_ind': Index of beta in list of betas
'lnlike': Log likelihood
'lnprob': Log posterior probability (with beta multiplying
log likelihood)
lnZ : float, optional
ln Z(1), which is equal to the evidence of the
parameter estimation problem.
dlnZ : float, optional
The estimated error in the lnZ calculation.
sampler : emcee.PTSampler instance, optional
The sampler instance.
pos : ndarray, shape (ntemps, nwalkers, ndim), optional
Last position of the walkers.
"""
if p0 is None and p_dict is None:
raise RuntimeError('Must supply either p0 or p_dict.')
# Infer n_dim and n_walkers (and check inputs)
if p0 is None:
if n_walkers is None:
raise RuntimeError('n_walkers must be specified if p0 is None')
if type(p_dict) is not collections.OrderedDict:
raise RuntimeError('p_dict must be collections.OrderedDict.')
n_dim = len(p_dict)
else:
n_temps, n_walkers, n_dim = p0.shape
if p_dict is not None:
warnings.RuntimeWarning('p_dict is being ignored.')
# Infer columns
if columns is None:
if p_dict is not None:
columns = list(p_dict.keys())
else:
columns = list(range(n_dim))
elif len(columns) != n_dim:
raise RuntimeError('len(columns) must equal number of parameters.')
# Check for invalid column names
invalid_column_names = ['lnprob', 'chain', 'lnlike', 'beta',
'beta_ind']
if np.any([x in columns for x in invalid_column_names]):
raise RuntimeError('You cannot name columns with any of these: '
+ ' '.join(invalid_column_names))
# Build starting points of walkers
if p0 is None:
p0 = np.empty((n_temps, n_walkers, n_dim))
for i, key in enumerate(p_dict):
p0[:, :, i] = p_dict[key][0](
*(p_dict[key][1] + ((n_temps, n_walkers),)))
# Set up the PTSampler instance
if threads is not None:
sampler = emcee.PTSampler(n_temps, n_walkers, n_dim, log_like,
log_prior, loglargs=loglargs,
logpargs=logpargs, threads=threads)
else:
sampler = emcee.PTSampler(n_temps, n_walkers, n_dim, log_like,
log_prior, loglargs=loglargs,
logpargs=logpargs)
# Do burn-in
if n_burn > 0:
pos, _, _ = sampler.run_mcmc(p0, n_burn, storechain=False)
else:
pos = p0
# Sample again, starting from end burn-in state
pos, _, _ = sampler.run_mcmc(pos, n_steps, thin=thin)
# Compute thermodynamic integral
lnZ, dlnZ = sampler.thermodynamic_integration_log_evidence(fburnin=0)
# Make DataFrame for results
df = sampler_to_dataframe(sampler, columns=columns)
# Set up return
return_vals = (df, lnZ, dlnZ, sampler, pos)
return_bool = (True, return_lnZ, return_lnZ, return_sampler, return_pos)
ret = tuple([rv for rv, rb in zip(return_vals, return_bool) if rb])
if len(ret) == 1:
return ret[0]
return ret
def lnZ(df_mcmc):
"""
Compute log Z(1) from PTMCMC traces stored in DataFrame.
Parameters
----------
df_mcmc : pandas DataFrame, as outputted from run_ptmcmc.
DataFrame containing output of a parallel tempering MCMC
run. Only need to contain columns pertinent to computing
ln Z, which are 'beta_int', 'lnlike', and 'beta'.
Returns
-------
output : float
ln Z as computed by thermodynamic integration. This is
equivalent to what is obtained by calling
`sampler.thermodynamic_integration_log_evidence(fburnin=0)`
where `sampler` is an emcee.PTSampler instance.
Notes
-----
.. This is useful when the DataFrame from a PTSampler is too
large to store in RAM.
"""
# Average the log likelihood over the samples
log_mean = np.zeros(len(df_mcmc['beta_ind'].unique()))
for i, b in enumerate(df_mcmc['beta_ind'].unique()):
log_mean[i] = df_mcmc['lnlike'][df_mcmc['beta_ind']==b].mean()
# Set of betas (temperatures)
betas = np.concatenate((np.array(df_mcmc['beta'].unique()), (0,)))
# Approximate quadrature
return np.dot(log_mean, -np.diff(betas))
def extract_1d_hist(samples, nbins=100, density=True):
"""
Compute a 1d histogram with x-values at bin centers.
Meant to be used with MCMC samples.
Parameters
----------
samples : array
1D array of MCMC samples
nbins : int
Number of bins in histogram
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Returns
-------
count : array, shape (nbins,)
The counts, appropriately weighted depending on the
`density` kwarg, for the histogram.
x : array, shape (nbins,)
The positions of the bin centers.
"""
# Obtain histogram
count, bins = np.histogram(trace, bins=nbins, density=density)
# Make the bins into the bin centers, not the edges
x = (bins[:-1] + bins[1:]) / 2.0
return count, x
def extract_2d_hist(samples_x, samples_y, bins=100, density=True,
meshgrid=False):
"""
Compute a 2d histogram with x,y-values at bin centers.
Meant to be used with MCMC samples.
Parameters
----------
samples_x : array
1D array of MCMC samples for x-axis
samples_y : array
1D array of MCMC samples for y-axis
bins : int
Number of bins in histogram. The same binning is
used in the x and y directions.
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
meshgrid : bool, options
If True, the returned `x` and `y` arrays are two-dimensional
as constructed with np.meshgrid(). If False, `x` and `y`
are returned as 1D arrays.
Returns
-------
count : array, shape (nbins, nbins)
The counts, appropriately weighted depending on the
`density` kwarg, for the histogram.
x : array, shape either (nbins,) or (nbins, nbins)
The positions of the bin centers on the x-axis.
y : array, shape either (nbins,) or (nbins, nbins)
The positions of the bin centers on the y-axis.
"""
# Obtain histogram
count, x_bins, y_bins = np.histogram2d(samples_x, samples_y, bins=bins,
normed=density)
# Make the bins into the bin centers, not the edges
x = (x_bins[:-1] + x_bins[1:]) / 2.0
y = (y_bins[:-1] + y_bins[1:]) / 2.0
# Make mesh grid out of x_bins and y_bins
if meshgrid:
y, x = np.meshgrid(x, y)
return count.transpose(), x, y
def norm_cumsum_2d(sample_x, sample_y, bins=100, meshgrid=False):
"""
Returns 1 - the normalized cumulative sum of two sets of samples.
Parameters
----------
samples_x : array
1D array of MCMC samples for x-axis
samples_y : array
1D array of MCMC samples for y-axis
bins : int
Number of bins in histogram. The same binning is
used in the x and y directions.
meshgrid : bool, options
If True, the returned `x` and `y` arrays are two-dimensional
as constructed with np.meshgrid(). If False, `x` and `y`
are returned as 1D arrays.
Returns
-------
norm_cumcum : array, shape (nbins, nbins)
1 - the normalized cumulative sum of two sets of samples.
I.e., an isocontour on this surface at level alpha encompasses
a fraction alpha of the total probability.
x : array, shape either (nbins,) or (nbins, nbins)
The positions of the bin centers on the x-axis.
y : array, shape either (nbins,) or (nbins, nbins)
The positions of the bin centers on the y-axis.
Notes
-----
.. To make a contour plot with contour lines drawn to contain
68.27, 95.45, and 99.73% of the total probability, use the
output of this function as:
plt.contourf(x, y, norm_cumsum, levels=(0.6827, 0.9545, 0.9973))
"""
# Compute the histogram
count, x, y = extract_2d_hist(sample_x, sample_y, bins=bins,
density=False, meshgrid=meshgrid)
# Remember the shape
shape = count.shape
count = count.ravel()
# Inverse sort the histogram
isort = np.argsort(count)[::-1]
unsort = np.argsort(isort)
# Compute the cumulative sum and normalize
count_cumsum = count[isort].cumsum()
count_cumsum /= count_cumsum[-1]
# Normalized, reshaped cumulative sum
return count_cumsum[unsort].reshape(shape), x, y
def hpd(trace, mass_frac):
"""
Returns highest probability density region given by
a set of samples.
Parameters
----------
trace : array
1D array of MCMC samples for a single variable
mass_frac : float with 0 < mass_frac <= 1
The fraction of the probability to be included in
the HPD. For example, `massfrac` = 0.95 gives a
95% HPD.
Returns
-------
output : array, shape (2,)
The bounds of the HPD
"""
# Get sorted list
d = np.sort(np.copy(trace))
# Number of total samples taken
n = len(trace)
# Get number of samples that should be included in HPD
n_samples = np.floor(mass_frac * n).astype(int)
# Get width (in units of data) of all intervals with n_samples samples
int_width = d[n_samples:] - d[:n - n_samples]
# Pick out minimal interval
min_int = np.argmin(int_width)
# Return interval
return np.array([d[min_int], d[min_int + n_samples]])
# ########################################################################## #
# IMAGE PROCESSING UTILITIES #
# ########################################################################## #
class SimpleImageCollection(object):
"""
Load a collection of images.
Parameters
----------
load_pattern : string or list
If string, uses glob to generate list of files containing
images. If list, this is the list of files containing images.
load_func : callable, default skimage.io.imread
Function to be called to load images.
conserve_memory : bool, default True
If True, do not load all images into RAM. If False, load
all into a list.
Returns
-------
ic : SimpleImageCollection instance
ic[n] gives image n of the image collection.
Notes
-----
.. Any keyword arguments except those listed above are passed into
load_func as kwargs.
.. This is a much simplified (and therefore faster) version of
skimage.io.ImageCollection.
"""
def __init__(self, load_pattern, load_func=skimage.io.imread,
conserve_memory=True, **load_func_kwargs):
if isinstance(load_pattern, str):
self.fnames = glob.glob(load_pattern)
else:
self.fnames = load_pattern
self.conserve_memory = conserve_memory
if self.conserve_memory:
self.load_func = load_func
self.kwargs = load_func_kwargs
else:
self.ims = [load_func(f, **load_func_kwargs) for f in self.fnames]
def __getitem__(self, n):
"""
Return selected image.
"""
if self.conserve_memory:
return self.load_func(self.fnames[n], **self.load_func_kwargs)
else:
return self.ims[n]
def simple_image_collection(im_glob, load_func=skimage.io.imread,
conserve_memory=True, **load_func_kwargs):
"""
Load a collection of images.
Parameters
----------
load_pattern : string or list
If string, uses glob to generate list of files containing
images. If list, this is the list of files containing images.
load_func : callable, default skimage.io.imread
Function to be called to load images.
conserve_memory : bool, default True
If True, do not load all images into RAM. If False, load
all into a list.
Returns
-------
ic : SimpleImageCollection instance
ic[n] gives image n of the image collection.
Notes
-----
.. Any keyword arguments except those listed above are passed into
load_func as kwargs.
.. This is a much simplified (and therefore faster) version of
skimage.io.ImageCollection.
"""
return SimpleImageCollection(im_glob, load_func=load_func,
conserve_memory=conserve_memory,
**load_func_kwargs)
def rgb_to_rgba32(im, flip=True):
"""
Convert an RGB image to a 32 bit-encoded RGBA image.
Parameters
----------
im : nd_array, shape (m, n, 3)
Input m by n RGB image.
flip : bool, default True
If True, up-down flit the image. This is useful
for display with Bokeh.
Returns
-------
output : nd_array, shape (m, n), dtype int32
RGB image encoded as 32-bit integers.
Notes
-----
.. The input image is converted to 8-bit and then encoded
as 32-bit. The main use for this function is encoding images
for display with Bokeh, so this data loss is ok.
"""
# Ensure it has three channels
if len(im.shape) != 3 or im.shape[2] !=3:
raise RuntimeError('Input image is not RGB.')
# Get image shape
n, m, _ = im.shape
# Convert to 8-bit, which is expected for viewing
im_8 = skimage.img_as_ubyte(im)
# Add the alpha channel, which is expected by Bokeh
im_rgba = np.dstack((im_8, 255*np.ones_like(im_8[:,:,0])))
# Reshape into 32 bit. Must flip up/down for proper orientation
return np.flipud(im_rgba.view(dtype=np.int32).reshape(n, m))
def verts_to_roi(verts, size_i, size_j):
"""
Converts list of vertices to an ROI and ROI bounding box
Parameters
----------
verts : array_like, shape (n_verts, 2)
List of vertices of a polygon with no crossing lines. The units
describing the positions of the vertices are interpixel spacing.
size_i : int
Number of pixels in the i-direction (number of rows) in
the image
size_j : int
Number of pixels in the j-direction (number of columns) in
the image
Returns
-------
roi : array_like, Boolean, shape (size_i, size_j)
roi[i,j] is True if pixel (i,j) is in the ROI.
roi[i,j] is False otherwise
roi_bbox : tuple of slice objects
To get a subimage with the bounding box of the ROI, use
im[roi_bbox].
roi_box : array_like, shape is size of bounding box or ROI
A mask for the ROI with the same dimension as the bounding
box. The indexing starts at zero at the upper right corner
of the box.
"""
# Make list of all points in the image in units of pixels
i = np.arange(size_i)
j = np.arange(size_j)
ii, jj = np.meshgrid(j, i)
pts = np.array(list(zip(ii.ravel(), jj.ravel())))
# Make a path object from vertices
p = path.Path(verts)
# Get list of points that are in roi
in_roi = p.contains_points(pts)
# Convert it to an image
roi = in_roi.reshape((size_i, size_j)).astype(np.bool)
# Get bounding box of ROI
regions = skimage.measure.regionprops(roi.astype(np.int))
bbox = regions[0].bbox
roi_bbox = np.s_[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1]
# Get ROI mask for just within bounding box
roi_box = roi[roi_bbox]
# Return boolean in same shape as image
return (roi, roi_bbox, roi_box)
class CostesColocalization(object):
"""
Generic class just to store attributes
"""
def __init__(self, **kw):
self.__dict__ = kw
def costes_coloc(im_1, im_2, psf_width=3, n_scramble=1000, thresh_r=0.0,
roi=None, roi_method='all', do_manders=True):
"""
Perform Costes colocalization analysis on a pair of images.
Parameters
----------
im_1: array_like
Intensity image for colocalization. Must be the
same shame as `im_1`.
im_2: array_like
Intensity image for colocalization. Must be the
same shame as `im_2`.
psf_width: int, default 3
Width, in pixels of the point spread function.
n_scramble: int, default 1000
Number of strambled image comparisons to do to get statistics.
thresh_r: float, default 0.0
Threshold Pearson r value to be considered colocalized.
roi: array_like, dtype bool, default None
Boolean image the same shape as `im_1` and `im_2` that
is True for pixels within the ROI.
roi_method: str, default 'all'
If 'all', all pixels of a given subimage must be within
the ROI for the subimage itself to be considered part
of the ROI. If 'any', if any one pixel is within the ROI,
the subimage is considered part of the ROI.
do_manders: bool, default True
If True, compute the Manders coefficients.
Returns
-------
output: A CostesColocalization instance.
The CostesColocalization instance has the following attributes.
im_1, im_2, psf_width, n_scramble, thresh_r, roi,
roi_method: As in the input parameters.
a: slope of the regression line I_2 = a * I_1 + b
b: intercept of regression line I_2 = a * I_1 + b
M_1: Manders coefficient for image 1
M_2: Manders coefficient for image 2
pearson_r: Pearson coerrelaction coefficient of the pixels
in the two images.
p_coloc: The probability of colocalization being present
in the two images.
"""
# Make mirrored boundaries in preparation for scrambling
im_1_mirror = mirror_edges(im_1, psf_width)
im_2_mirror = mirror_edges(im_2, psf_width)
# Set up ROI
if roi is None:
roi = np.ones_like(im_1, dtype='bool')
# Rename images to be sliced ROI
im_1 = im_1[roi]
im_2 = im_2[roi]
# Mirror ROI at edges
roi_mirror = mirror_edges(roi, psf_width)
# Compute the blocks that we'll scramble
blocks_1 = im_to_blocks(im_1_mirror, psf_width, roi_mirror, roi_method)
blocks_2 = im_to_blocks(im_2_mirror, psf_width, roi_mirror, roi_method)
# Flatten second list of blocks for Pearson calculations
blocks_2_flat = np.array(blocks_2).flatten()
# Compute the Pearson coefficient
pearson_r, _ = st.pearsonr(np.array(blocks_1).ravel(), blocks_2_flat)
# Do image scrambling and r calculations
r_scr = np.empty(n_scramble)
for i in range(n_scramble):
random.shuffle(blocks_1)
r, _ = scipy.stats.pearsonr(np.array(blocks_1).ravel(), blocks_2_flat)
r_scr[i] = r
# Compute percent chance of coloc
p_coloc = (r_scr < pearson_r).sum() / n_scramble
# Now do work to compute adjusted Manders's coefficients
if do_manders:
# Get the linear relationship between im_2 and im_1
a, b = odr_linear(im_1.ravel(), im_2.ravel())
# Perform threshold calculation
thresh_1 = find_thresh(im_1, im_2, a, b, thresh_r=thresh_r)
thresh_2 = a * thresh_1 + b
# Compute Costes's update to the Manders's coefficients
inds = (im_1 > thresh_1) & (im_2 > thresh_2)
M_1 = im_1[inds].sum() / im_1.sum()
M_2 = im_2[inds].sum() / im_2.sum()
# Toss results into class for returning
return CostesColocalization(
im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method,
psf_width=psf_width, n_scramble=n_scramble, thresh_r=thresh_r,
thresh_1=thresh_1, thresh_2=thresh_2, a=a, b=b, M_1=M_1,
M_2=M_2, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc)
else:
return CostesColocalization(
im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method,
psf_width=psf_width, n_scramble=n_scramble, thresh_r=None,
thresh_1=None, thresh_2=None, a=None, b=None, M_1=None,
M_2=None, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc)
def odr_linear(x, y, intercept=None, beta0=None):
"""
Performs orthogonal linear regression on x, y data.
Parameters
----------
x: array_like
x-data, 1D array. Must be the same lengths as `y`.
y: array_like
y-data, 1D array. Must be the same lengths as `x`.
intercept: float, default None
If not None, fixes the intercept.
beta0: array_like, shape (2,)
Guess at the slope and intercept, respectively.
Returns
-------
output: ndarray, shape (2,)
Array containing slope and intercept of ODR line.
"""
def linear_fun(p, x):
return p[0] * x + p[1]
def linear_fun_fixed(p, x):
return p[0] * x + intercept
# Set the model to be used for the ODR fitting
if intercept is None:
model = scipy.odr.Model(linear_fun)
if beta0 is None:
beta0 = (0.0, 1.0)
else:
model = scipy.odr.Model(linear_fun_fixed)
if beta0 is None:
beta0 = (1.0,)
# Make a Data instance
data = scipy.odr.Data(x, y)
# Instantiate ODR
odr = scipy.odr.ODR(data, model, beta0=beta0)
# Perform ODR fit
try:
result = odr.run()
except scipy.odr.odr_error:
raise scipy.odr.odr_error('ORD failed.')
return result.beta
def find_thresh(im_1, im_2, a, b, thresh_r=0.0):
"""
Find the threshold pixel intensity of `im_1` where
the Pearson correlation between the images goes below `thresh_r`.
Parameters
----------
im_1: array_like
Intensity image for colocalization. Must be the
same shame as `im_1`.
im_2: array_like
Intensity image for colocalization. Must be the
same shame as `im_2`.
a: float
Slope of the ORD regression of `im_2` vs. `im_1`.
b: float
Intercept of the ORD regression of `im_2` vs. `im_1`.
thresh_r: float, default 0.0
Threshold Pearson correlation
Returns
-------
output: int or float
The threshold pixel intensity for colocalization
(see notes below).
Notes
-----
.. To determine which pixels are colocalized in two images, we
do the following:
1. Perform a regression based on all points of to give
I_2 = a * I_1 + b.
2. Define T = I_1.max().
3. Compute the Pearson r value considering all pixels with
I_1 < T and I_2 < a * T + b.
4. If r <= thresh_r decrement T and goto 3. Otherwise,
save $T_1 = T$ and $T_2 = a * T + b.
5. Pixels with I_2 > T_2 and I_1 > T_1 are colocalized.
This function returns T.
"""
if im_1.dtype not in [np.uint16, np.uint8]:
incr = (im_1.max() - im_1.min()) / 256.0
else:
incr = 1
thresh_max = im_1.max()
thresh_min = im_1.min()
thresh = thresh_max
r = pearsonr_below_thresh(thresh, im_1, im_2, a, b)
min_r = r
min_thresh = thresh
while thresh > thresh_min and r > thresh_r:
thresh -= incr
r = pearsonr_below_thresh(thresh, im_1, im_2, a, b)
if min_r > r:
min_r = r
min_thresh = thresh
if thresh == thresh_min:
thresh = min_thresh
return thresh
def pearsonr_below_thresh(thresh, im_1, im_2, a, b):
"""
The Pearson r between two images for pixel values below
threshold.
Parameters
----------
thresh: float or int
The threshold value of pixel intensities to consider for
`im_1`.
im_1: array_like
Intensity image for colocalization. Must be the
same shame as `im_1`.
im_2: array_like
Intensity image for colocalization. Must be the
same shame as `im_2`.
a: float
Slope of the ORD regression of `im_2` vs. `im_1`.
b: float
Intercept of the ORD regression of `im_2` vs. `im_1`.
"""
inds = (im_1 <= thresh) | (im_2 <= a * thresh + b)
r, _ = st.pearsonr(im_1[inds], im_2[inds])
return r
def mirror_edges(im, psf_width):
"""
Given a 2D image pads the boundaries by mirroring so that the
dimensions of the image are multiples for the width of the
point spread function.
Parameters
----------
im: array_like
Image to mirror edges
psf_width: int
The width, in pixels, of the point spread function
Returns
-------
output: array_like
Image with mirrored edges
"""
# How much we need to pad
pad_i = psf_width - (im.shape[0] % psf_width)
pad_j = psf_width - (im.shape[1] % psf_width)
# Get widths
pad_top = pad_i // 2
pad_bottom = pad_i - pad_top
pad_left = pad_j // 2
pad_right = pad_j - pad_left
# Do the padding
return np.pad(im, ((pad_top, pad_bottom), (pad_left, pad_right)),
mode='reflect')
def im_to_blocks(im, width, roi=None, roi_method='all'):
"""
Converts image to list of square subimages called "blocks."
Parameters
----------
im: array_like
Image to convert to a list of blocks.
width: int
Width of square blocks in units of pixels.
roi: array_like, dtype bool, default None
Boolean image the same shape as `im_1` and `im_2` that
is True for pixels within the ROI.
roi_method: str, default 'all'
If 'all', all pixels of a given subimage must be within
the ROI for the subimage itself to be considered part
of the ROI. If 'any', if any one pixel is within the ROI,
the subimage is considered part of the ROI.
Returns
-------
output: list of ndarrays
Each entry is a `width` by `width` NumPy array containing
a block.
"""
# Initialize ROI
if roi is None:
roi = np.ones_like(im)
# Specify method for determining if in ROI or not
if roi_method == 'all':
roi_test = np.all
else:
roi_test = np.any
# Construct list of blocks
return [im[i:i + width, j:j + width]
for i in range(0, im.shape[0], width)
for j in range(0, im.shape[1], width)
if roi_test(roi[i:i + width, j:j + width])]
# ########################################################################## #
# GENERAL UTILITIES #
# ########################################################################## #
def ecdf(data, conventional=False, buff=0.1, min_x=None, max_x=None):
"""
Computes the x and y values for an ECDF of a one-dimensional
data set.
Parameters
----------
data : array_like
Array of data to be plotted as an ECDF.
conventional : bool, default False
If True, generates x,y values for "conventional" ECDF, which
give staircase style ECDF when plotted as plt.plot(x, y, '-').
Otherwise, gives points x,y corresponding to the concave
corners of the conventional ECDF, plotted as
plt.plot(x, y, '.').
buff : float, default 0.1
How long the tails at y = 0 and y = 1 should extend as a
fraction of the total range of the data. Ignored if
`coneventional` is False.
min_x : float, default -np.inf
If min_x is greater than extent computed from `buff`, tail at
y = 0 extends to min_x. Ignored if `coneventional` is False.
max_x : float, default -np.inf
If max_x is less than extent computed from `buff`, tail at
y = 0 extends to max_x. Ignored if `coneventional` is False.
Returns
-------
x : array_like, shape (n_data, )
The x-values for plotting the ECDF.
y : array_like, shape (n_data, )
The y-values for plotting the ECDF.
"""
# Get x and y values for data points
x, y = np.sort(data), np.arange(1, len(data)+1) / len(data)
if conventional:
# Set defaults for min and max tails
if min_x is None:
min_x = -np.inf
if max_x is None:
max_x = np.inf
# Set up output arrays
x_conv = np.empty(2*(len(x) + 1))
y_conv = np.empty(2*(len(x) + 1))
# y-values for steps
y_conv[:2] = 0
y_conv[2::2] = y
y_conv[3::2] = y
# x- values for steps
x_conv[0] = max(min_x, x[0] - (x[-1] - x[0])*buff)
x_conv[1] = x[0]
x_conv[2::2] = x
x_conv[3:-1:2] = x[1:]
x_conv[-1] = min(max_x, x[-1] + (x[-1] - x[0])*buff)
return x_conv, y_conv
return x, y
def approx_hess(x, f, epsilon=None, args=(), kwargs={}):
"""
Parameters
----------
x : array_like
value at which function derivative is evaluated
f : function
function of one array f(x, `*args`, `**kwargs`)
epsilon : float or array-like, optional
Stepsize used, if None, then stepsize is automatically chosen
according to EPS**(1/4)*x.
args : tuple
Arguments for function `f`.
kwargs : dict
Keyword arguments for function `f`.
Returns
-------
hess : ndarray
array of partial second derivatives, Hessian
Notes
-----
Equation (9) in Ridout. Computes the Hessian as::
1/(4*d_j*d_k) * ((f(x + d[j]*e[j] + d[k]*e[k]) - f(x + d[j]*e[j]
- d[k]*e[k])) -
(f(x - d[j]*e[j] + d[k]*e[k]) - f(x - d[j]*e[j]
- d[k]*e[k]))
where e[j] is a vector with element j == 1 and the rest are zero and
d[i] is epsilon[i].
References
----------:
Ridout, M.S. (2009) Statistical applications of the complex-step method
of numerical differentiation. The American Statistician, 63, 66-74
Copyright
---------
This is an adaptation of the function approx_hess3() in
statsmodels.tools.numdiff. That code is BSD (3 clause) licensed as
follows:
Copyright (C) 2006, Jonathan E. Taylor
All rights reserved.
Copyright (c) 2006-2008 Scipy Developers.
All rights reserved.
Copyright (c) 2009-2012 Statsmodels Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of Statsmodels nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL STATSMODELS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
n = len(x)
h = smnd._get_epsilon(x, 4, epsilon, n)
ee = np.diag(h)
hess = np.outer(h,h)
for i in range(n):
for j in range(i, n):
hess[i, j] = (f(*((x + ee[i, :] + ee[j, :],) + args), **kwargs)
- f(*((x + ee[i, :] - ee[j, :],) + args), **kwargs)
- (f(*((x - ee[i, :] + ee[j, :],) + args), **kwargs)
- f(*((x - ee[i, :] - ee[j, :],) + args), **kwargs))
)/(4.*hess[i, j])
hess[j, i] = hess[i, j]
return hess
| justinbois/bebi103_utils | legacy/bebi103.py | Python | mit | 61,231 |
# Generator package
| brextonpham/python-Ultron | python-vlc-master/generator/__init__.py | Python | mit | 20 |
import os, os.path
import subprocess
import shutil
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--make", help="run make clean && make on all files",
action="store_true")
parser.add_argument("-c", "--check", help="run ./check.sh on all files",
action="store_true")
parser.add_argument("-p", "--printresult", help="Compiles the code with printing of the result enabled",
action="store_true")
parser.add_argument("-t", "--tag", help="tag this benchmark with a string")
parser.add_argument("-r", "--run", help="run all binary files for the given device", choices=['CPU', 'GPU'])
parser.add_argument("-i", "--input", help="input choice for the binarys", choices=['basic', 'K20Max'])
parser.add_argument("-n", "--numberofiterations", help="the number of iterations we benchmark a given binary.", type=int, default=1)
args = parser.parse_args()
benchmark = ['MatMul',
'Jacobi',
'KNearest',
'NBody',
'Laplace',
'GaussianDerivates'
]
cmdlineoptsbasic = {'MatMul' : '-n 1024' ,
'Jacobi' : '-n 1024' ,
'KNearest' : '-n 1024 -k 16' ,
'NBody' : '-n 1024' ,
'Laplace' : '-n 256 -k 3' ,
'GaussianDerivates' : '-n 256 -m 256 -k 3'}
cmdlineoptsK20Max = {'MatMul' : '-n 12544' ,
'Jacobi' : '-n 16384' ,
'KNearest' : '-n 16384 -k 16' ,
'NBody' : '-n 1081600' ,
'Laplace' : '-n 215296 -k 5' ,
'GaussianDerivates' : '-n 4608 -m 4608 -k 3'}
## benchmark = ['MatMul']
# Check all folder are actually there
for n in benchmark:
if not os.path.exists(n):
raise Exception('Folder ' + n + 'does not exist')
if args.make or args.check:
# run the makefile in each folder
if args.make:
command = "make clean && make"
if args.printresult:
command += " DEF=PRINT"
if args.check:
command = "./check.sh"
for n in benchmark:
os.chdir(n)
p1 = subprocess.Popen(command, shell=True,\
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
erracc = ''
while True:
line = p1.stdout.readline()
if not line:
line = p1.stderr.readline()
if not line: break
erracc += line
if line[0:9] == 'make: ***':
raise Exception('Program ' + n + ' did not compile: ' + erracc)
if args.check:
print line
os.chdir('..')
if args.run is not None:
dev = args.run
# run each exe in benchmark
if args.input == 'K20Max':
cmdlineopts = cmdlineoptsK20Max
else:
cmdlineopts = cmdlineoptsbasic
tag = ''
if args.tag:
tag = args.tag + '_'
for n in benchmark:
m = n + dev
uniqueid = open('logs/.uniqueid.txt','r')
uid = uniqueid.readline()
uniqueid.close()
uniqueid = open('logs/.uniqueid.txt','w')
uniqueid.write(str(int(uid) + 1))
log = open('logs/' + uid + '_' + tag + m + cmdlineopts[n].replace(" ", "_") \
.replace("-", "_") + '.txt','w')
os.chdir(n)
for k in xrange(args.numberofiterations):
p1 = subprocess.Popen('./' + m +'.exe ' + cmdlineopts[n], shell=True,\
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
acc = '$Func ' + m + ', $Defines ' + cmdlineopts[n]
while True:
line = p1.stdout.readline()
if not line:
line = p1.stderr.readline()
if not line: break
acc += ', ' + line[:-1]
log.write(acc + '\n')
log.flush()
os.fsync(log)
#print acc + '\n'
os.chdir('..')
log.close()
uniqueid.close()
| dikujepsen/OpenTran | v3.0/test/C/run.py | Python | mit | 4,191 |
import warnings
from functools import wraps
def deprecated(func):
"""
Generates a deprecation warning
"""
@wraps(func)
def wrapper(*args, **kwargs):
msg = "'{}' is deprecated".format(func.__name__)
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
| nerandell/vyked | vyked/utils/decorators.py | Python | mit | 356 |
""" Custom fields used in InvenTree """
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from .validators import allowable_url_schemes
from django.utils.translation import ugettext_lazy as _
from django.forms.fields import URLField as FormURLField
from django.db import models as models
from django.core import validators
from django import forms
from decimal import Decimal
from djmoney.models.fields import MoneyField as ModelMoneyField
from djmoney.forms.fields import MoneyField
from djmoney.models.validators import MinMoneyValidator
import InvenTree.helpers
class InvenTreeURLFormField(FormURLField):
""" Custom URL form field with custom scheme validators """
default_validators = [validators.URLValidator(schemes=allowable_url_schemes())]
class InvenTreeURLField(models.URLField):
""" Custom URL field which has custom scheme validators """
default_validators = [validators.URLValidator(schemes=allowable_url_schemes())]
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': InvenTreeURLFormField
})
def money_kwargs():
""" returns the database settings for MoneyFields """
from common.settings import currency_code_mappings, currency_code_default
kwargs = {}
kwargs['currency_choices'] = currency_code_mappings()
kwargs['default_currency'] = currency_code_default()
return kwargs
class InvenTreeModelMoneyField(ModelMoneyField):
"""
Custom MoneyField for clean migrations while using dynamic currency settings
"""
def __init__(self, **kwargs):
# detect if creating migration
if 'migrate' in sys.argv or 'makemigrations' in sys.argv:
# remove currency information for a clean migration
kwargs['default_currency'] = ''
kwargs['currency_choices'] = []
else:
# set defaults
kwargs.update(money_kwargs())
# Set a minimum value validator
validators = kwargs.get('validators', [])
if len(validators) == 0:
validators.append(
MinMoneyValidator(0),
)
kwargs['validators'] = validators
super().__init__(**kwargs)
def formfield(self, **kwargs):
""" override form class to use own function """
kwargs['form_class'] = InvenTreeMoneyField
return super().formfield(**kwargs)
class InvenTreeMoneyField(MoneyField):
""" custom MoneyField for clean migrations while using dynamic currency settings """
def __init__(self, *args, **kwargs):
# override initial values with the real info from database
kwargs.update(money_kwargs())
super().__init__(*args, **kwargs)
class DatePickerFormField(forms.DateField):
"""
Custom date-picker field
"""
def __init__(self, **kwargs):
help_text = kwargs.get('help_text', _('Enter date'))
label = kwargs.get('label', None)
required = kwargs.get('required', False)
initial = kwargs.get('initial', None)
widget = forms.DateInput(
attrs={
'type': 'date',
}
)
forms.DateField.__init__(
self,
required=required,
initial=initial,
help_text=help_text,
widget=widget,
label=label
)
def round_decimal(value, places):
"""
Round value to the specified number of places.
"""
if value is not None:
# see https://docs.python.org/2/library/decimal.html#decimal.Decimal.quantize for options
return value.quantize(Decimal(10) ** -places)
return value
class RoundingDecimalFormField(forms.DecimalField):
def to_python(self, value):
value = super(RoundingDecimalFormField, self).to_python(value)
value = round_decimal(value, self.decimal_places)
return value
def prepare_value(self, value):
"""
Override the 'prepare_value' method, to remove trailing zeros when displaying.
Why? It looks nice!
"""
if type(value) == Decimal:
return InvenTree.helpers.normalize(value)
else:
return value
class RoundingDecimalField(models.DecimalField):
def to_python(self, value):
value = super(RoundingDecimalField, self).to_python(value)
return round_decimal(value, self.decimal_places)
def formfield(self, **kwargs):
defaults = {
'form_class': RoundingDecimalFormField
}
defaults.update(kwargs)
return super().formfield(**kwargs)
| inventree/InvenTree | InvenTree/InvenTree/fields.py | Python | mit | 4,623 |
#!/usr/bin/python
# Copyright (c) 2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import cellpilot_usart
led_no=int(sys.argv[1])
state=int(sys.argv[2])
cellpilot_usart.set_led(led_no,state)
| nf-dj/cellpilot | drone/scripts/set_led.py | Python | mit | 1,239 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Daniel and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CotizacionItemRecursosHumanos(Document):
pass
| Init-7/demoest | demoest/demoest/doctype/cotizacion_item_recursos_humanos/cotizacion_item_recursos_humanos.py | Python | mit | 272 |
"""Record information about how a game finished.
Revision ID: 4623531fa2b
Revises: 9aec2a74d9
Create Date: 2016-04-16 12:27:45.788322
"""
# revision identifiers, used by Alembic.
revision = '4623531fa2b'
down_revision = '9aec2a74d9'
from alembic import op
import sqlalchemy as sa
import app.go as go
# A kind of hybrid table that contains both the old/downgraded column
# 'finished' as well as the upgraded column 'result'
gamehelper = sa.Table(
'games',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('result', sa.Enum('WBR', 'WBC', 'BBR', 'BBC', 'D', '')),
sa.Column('finished', sa.Boolean),
sa.Column('sgf', sa.Text)
)
def upgrade():
with op.batch_alter_table('games', schema=None) as batch_op:
batch_op.add_column(sa.Column('result', sa.Enum('WBR', 'WBC', 'BBR', 'BBC', 'D', ''), nullable=True))
connection = op.get_bind()
for game in connection.execute(gamehelper.select()):
result = go.get_game_result(game.sgf).value
connection.execute(
gamehelper.update().where(
gamehelper.c.id == game.id
).values(result=result)
)
with op.batch_alter_table('games', schema=None) as batch_op:
batch_op.drop_column('finished')
### end Alembic commands ###
def downgrade():
connection = op.get_bind()
with op.batch_alter_table('games', schema=None) as batch_op:
batch_op.add_column(sa.Column('finished', sa.BOOLEAN(), server_default=sa.text("'0'"), autoincrement=False, nullable=True))
for game in connection.execute(gamehelper.select()):
finished = game.result != ""
connection.execute(
gamehelper.update().where(
gamehelper.c.id == game.id ).values(finished=finished))
with op.batch_alter_table('games', schema=None) as batch_op:
batch_op.drop_column('result')
### end Alembic commands ###
| karlorg/drunken-octo-avenger | migrations/versions/4623531fa2b_record_information_about_how_a_game_.py | Python | cc0-1.0 | 1,921 |
"""Functional tests for URIEval Plugin"""
from __future__ import absolute_import
import random
import sys
import unittest
from string import ascii_letters
from string import digits
import tests.util
# Load plugin and report matched RULES and SCORE
PRE_CONFIG = """
loadplugin oa.plugins.uri_eval.URIEvalPlugin
report _SCORE_
report _TESTS_
"""
# Define rules for plugin
CONFIG = """
body CHECK_FOR_HTTP_REDIRECTOR eval:check_for_http_redirector()
body CHECK_HTTPS_IP_MISMATCH eval:check_https_ip_mismatch()
body CHECK_URI_TRUNCATED eval:check_uri_truncated()
"""
class TestFunctionalURIEval(tests.util.TestBase):
"""Class containing functional tests for the URI Plugin"""
mytext = [random.choice(ascii_letters + digits) for _ in range(8182)]
long_text = "".join(mytext)
def test_check_for_http_redirector(self):
email = """From: sender@example.com
\nhttp://utility.baidu.com/traf/click.php?id=215&url=https://log0.wordpress.com"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_http_redirector_in_a_label_closed_commas(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://utility.baidu.com/traf/click.php?id=215&url=https://log0.wordpress.com"></a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
@unittest.skipIf(sys.version_info.major == 3 and sys.version_info.minor < 3,
'Incompatible with python 3.2.*')
def test_check_for_http_redirector_in_a_label_no_commas(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href=http://utility.baidu.com/traf/click.php?id=215&url=https://log0.wordpress.com></a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_http_redirector_links_combined(self):
email = """From: sender@example.com
\nhttp://utility.baidu.com/traf/click.php?id=215&urlhttps://log0.wordpress.com"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_http_redirector_no_http(self):
email = """From: sender@example.com
\nhttp://utility.baidu.com/traf/click.php?id=215&url=://log0.wordpress.com"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_http_redirector_with_ftp(self):
email = """From: sender@example.com
\nhttp://utility.baidu.com/traf/click.php?id=215&url=ftp://log0.wordpress.com"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_http_redirector_only_http(self):
email = """From: sender@example.com
\nhttp://utility.baidu.com/traf/click.php?id=215&url=https://"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_http_redirector_incomplete_link(self):
email = """From: sender@example.com
\nhttp://utility.baidu.com/traf/click.php?id=215&url=https://ceva"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_http_redirector_different_links(self):
email = """From: sender@example.com
\nhttp://utility.baidu.com/traf/click.php?id=215&url= https://ceva.com"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_http_redirector_middle_of_body(self):
email = """From: sender@example.com
\nFYI, this week is Learning Week @LinkedIn, so if you are interested in taking some free courses, hurry up
asfglajds;galsg a;slfa;sl laddg http://utility.baidu.com/traf/click.php?id=215&url=https://ceva.com asdgksal;fjlaskfdghs"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited http://utility.baidu.com/traf/click.php?id=215&url=https://log0.wordpress.com:
<a href="http://45.42.12.12/login/account-unlock">https://www.paypal.com/login/account-unlock</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 2, ['CHECK_HTTPS_IP_MISMATCH', 'CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_and_redirector_in_a_label(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://google.com=https://log0.wordpress.com/">https://ceva.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_and_redirector_in_a_label_with_invalid_expression(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://@1.2.3.4=https://log0.wordpress.com/">https://ceva.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_and_redirector_in_a_label_ip_left(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/https://log0.wordpress.com/">https://ceva.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 2, ['CHECK_HTTPS_IP_MISMATCH', 'CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_and_redirector_in_link_label_same_address(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<link rel=parent href="http://log0.wordpress.com/https://log0.wordpress.com/">
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_and_redirector_in_link_label(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<link rel=parent href="http://google.com=https://log0.wordpress.com/">https://ceva.com/
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_and_redirector_in_link_label_with_invalid_expression(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<link rel=parent href="http://@1.2.3.4=https://log0.wordpress.com/">https://ceva.com/
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_and_redirector_in_link_label_ip_left(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<link rel=parent href="http://1.2.3.4=https://log0.wordpress.com/">https://ceva.com/
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_domains(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://google.com/">https://www.google.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_domains_incomplete_right(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://google.com/"> cevatest https://ceva/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_ip_right(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://google.com/">http://300.58.209.206/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_both_ips(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://5.79.73.204/">http://300.58.209.206/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_incomplete_domain(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://5.79.73.204/">https://ceva/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_ipv6_left(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d:2::1/">https://1.2.3.4/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_ipv6_left_domain_right(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d:2::1/">https://yahoo.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_ipv6_left_multiple_labels(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d:2::1/">https://1.2.3.4/</a>
<a href="http://2001:1af8:4700:a02d:2::1/">https://yahoo.com/</a>
<a href="http://2001:1af8:4700:a02d:2::1/">https://6.6.6.6/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_ipv6_with_redirector(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d:2::1/https://test">https://1.2.3.4/</a>
<a href="http://2001:1af8:4700:a02d:2::1/">https://yahoo.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 2, ['CHECK_HTTPS_IP_MISMATCH', 'CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_ipv6_with_redirector_and_link_label(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d:2::1/https://test">https://1.2.3.4/</a>
<a href="http://2001:1af8:4700:a02d:2::1/">https://yahoo.com/</a>
<link href="http://2001:1af8:4700:a02d:2::1/">https://yahoo.com/
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 2, ['CHECK_HTTPS_IP_MISMATCH', 'CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_ipv6_with_false_redirector(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d:2::1/https://2001:1af8:4700:a02d:2::1">https://1.2.3.4/</a>
<a href="http://2001:1af8:4700:a02d:2::1/">https://yahoo.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_incorrect_ipv4_domain_right(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d/https://2001:1af8:4700:a02d/">https://yahoo.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_no_domain(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_incorrect_ip(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3/">https://</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_unfinished_ip(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3./">https://</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_inverted_commas_16_ip(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.'2'.3.4/">https://test.com</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_inverted_commas_ip_right(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://'1'.2.3.4</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_inverted_commas_on_all_ip(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://'1.2.3.4'/">https://test.com</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_invalid_expression_ip(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://@1.2.3.4/">https://test.com</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_ipv6_right(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://2001:1af8:4700:a02d:2::1/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_same_ipv6_right_and_left(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d:2::1/">https://2001:1af8:4700:a02d:2::1/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_same_ipv6_right_and_left_with_redirector(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d:2::1/https://2901:1af8:4711:a02d:2::1">https://2901:1af8:4711:a02d:2::1/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_same_ipv6_right_and_left_with_redirector_negative(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://2001:1af8:4700:a02d:2::1/https://2001:1af8:4700:a02d:2::1/">https://2901:1af8:4711:a02d:2::1/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_text_between_links_domain_right(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/"> cevatest https://google.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_text_between_links_ip_right(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/"> cevatest https://1.2.3.4/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_with_multiple_uri(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/"> cevatest https://1.2.3.4/ https://test.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_redirector_with_multiple_redirector(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/https://1.2.3.4/https://test.com/https://1.2.3.4/"> cevatest https://1.2.3.4/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_redirector_with_multiple_redirector_negative(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/https://1.2.3.4/https://1.2.3.4/"> cevatest https://1.2.3.4/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_label_not_closed(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://google
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_incorrect_link_label(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<link href="http://1.2.3.4/">https://google.com/</link>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_https_ip_mismatch_multiple_labels_redirector_in_link_label(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://5.5.5.5/</a>
<link href="http://1.2.3.4/https://google.com/">
<a href="http://1.2.3.4/">https://6.6.6.6/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_multiple_labels_match_on_a(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://google.com/</a>
<link href="http://1.2.3.4/">https://test.com/
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_multiple_labels_match_on_both(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://google.com/</a>
<link href="http://1.2.3.4/https://test.com/">
<a href="http://6.6.6.6/"></a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 2, ['CHECK_HTTPS_IP_MISMATCH', 'CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_https_ip_mismatch_multiple_labels(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://5.5.5.5/</a>
<a href="http://1.2.3.4/">https://google.com/</a>
<a href="http://1.2.3.4/">https://6.6.6.6/></a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_multiple_labels_match_last(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://5.5.5.5/</a>
<a href="http://1.2.3.4/">https://google.com/</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_https_ip_mismatch_multiple_labels_match_first(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
<a href="http://1.2.3.4/">https://google.com/</a>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/">https://5.5.5.5/</a>
<a href="http://1.2.3.4/">https://1.2.3.4./</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_uri_truncated_negative(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="https://www.PAYPAL.com/login/account-unlock">https://www.PAYPAL.com/...</a>
</html>"""
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_uri_truncated_superior_limit(self):
mytext1 = [random.choice(ascii_letters + digits) for _ in range(8181)]
long_text1 = "".join(mytext1)
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://%s.com">https://test.com</a>
</html>"""
email = email % (long_text1)
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_uri_truncated(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://%s.com">https://test.com</a>
</html>"""
email = email % (self.long_text)
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_URI_TRUNCATED'])
def test_check_for_uri_truncated_and_redirector_after(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://%s.com/https://ceva.com">https://test.com</a>
</html>"""
email = email % (self.long_text)
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 2, ['CHECK_URI_TRUNCATED', 'CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_uri_truncated_redirector_before_and_ip_mismatch(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<a href="http://1.2.3.4/https://%s.com/">https://test.com</a>
</html>"""
email = email % (self.long_text)
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 3, ['CHECK_URI_TRUNCATED', 'CHECK_FOR_HTTP_REDIRECTOR','CHECK_HTTPS_IP_MISMATCH'])
def test_check_for_uri_truncated_link_label(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<link href="http://%s.com">
</html>"""
email = email % (self.long_text)
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 1, ['CHECK_URI_TRUNCATED'])
def test_check_for_uri_truncated_superior_limit_link_label(self):
mytext1 = [random.choice(ascii_letters + digits) for _ in range(8181)]
long_text1 = "".join(mytext1)
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<link href="http://%s.com">
</html>"""
email = email % (long_text1)
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 0, [])
def test_check_for_uri_truncated_and_redirector_after_link_label(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<link href="http://%s.com/https://%s.com/https://ceva.com">
</html>"""
email = email % (self.long_text, self.long_text)
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 2, ['CHECK_URI_TRUNCATED', 'CHECK_FOR_HTTP_REDIRECTOR'])
def test_check_for_uri_truncated_redirector_before_link_label(self):
email = """From: sender@example.com
Content-Type: text/html
\n<html>
Dear user,
Your account has been limited please follow the instructions on the next link:
<link href="http://1.2.3.4/https://1.2.3.4/https://%s.com/">
</html>"""
email = email % (self.long_text)
self.setup_conf(config=CONFIG, pre_config=PRE_CONFIG)
result = self.check_pad(email)
self.check_report(result, 2, ['CHECK_URI_TRUNCATED', 'CHECK_FOR_HTTP_REDIRECTOR'])
def suite():
"""Gather all the tests from this package in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(TestFunctionalURIEval, "test"))
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| SpamExperts/SpamPAD | tests/functional/test_plugins/test_uri_eval.py | Python | gpl-2.0 | 31,989 |
# -*- coding: utf-8 -*-
#
# test_connect_helpers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy.stats
import nest
from scipy.stats import truncexpon
try:
from mpi4py import MPI
haveMPI4Py = True
except ImportError:
haveMPI4Py = False
def gather_data(data_array):
'''
Gathers data from all mpi processes by collecting all element in a list if
data is a list and summing all elements to one numpy-array if data is one
numpy-array. Returns gathered data if rank of current mpi node is zero and
None otherwise.
'''
if haveMPI4Py:
data_array_list = MPI.COMM_WORLD.gather(data_array, root=0)
if MPI.COMM_WORLD.Get_rank() == 0:
if isinstance(data_array, list):
gathered_data = [
item for sublist in data_array_list for item in sublist]
else:
gathered_data = sum(data_array_list)
return gathered_data
else:
return None
else:
return data_array
def bcast_data(data):
"""
Broadcasts data from the root MPI node to all other nodes.
"""
if haveMPI4Py:
data = MPI.COMM_WORLD.bcast(data, root=0)
return data
def is_array(data):
'''
Returns True if data is a list or numpy-array and False otherwise.
'''
return isinstance(data, (list, np.ndarray, np.generic))
def mpi_barrier():
if haveMPI4Py:
MPI.COMM_WORLD.Barrier()
def mpi_assert(data_original, data_test, TestCase):
'''
Compares data_original and data_test using assertTrue from the TestCase.
'''
data_original = gather_data(data_original)
# only test if on rank 0
if data_original is not None:
if isinstance(data_original, (np.ndarray, np.generic)) \
and isinstance(data_test, (np.ndarray, np.generic)):
TestCase.assertTrue(np.allclose(data_original, data_test))
else:
TestCase.assertTrue(data_original == data_test)
def all_equal(x):
'''
Tests if all elements in a list are equal.
Returns True or False
'''
return x.count(x[0]) == len(x)
def get_connectivity_matrix(pop1, pop2):
'''
Returns a connectivity matrix describing all connections from pop1 to pop2
such that M_ij describes the connection between the jth neuron in pop1 to
the ith neuron in pop2.
'''
M = np.zeros((len(pop2), len(pop1)))
connections = nest.GetConnections(pop1, pop2)
index_dic = {}
for count, node in enumerate(pop1):
index_dic[node.get('global_id')] = count
for count, node in enumerate(pop2):
index_dic[node.get('global_id')] = count
for source, target in zip(connections.sources(), connections.targets()):
M[index_dic[target]][index_dic[source]] += 1
return M
def get_weighted_connectivity_matrix(pop1, pop2, label):
'''
Returns a weighted connectivity matrix describing all connections from
pop1 to pop2 such that M_ij describes the connection between the jth
neuron in pop1 to the ith neuron in pop2. Only works without multapses.
'''
M = np.zeros((len(pop2), len(pop1)))
connections = nest.GetConnections(pop1, pop2)
sources = connections.get('source')
targets = connections.get('target')
weights = connections.get(label)
index_dic = {}
for count, node in enumerate(pop1):
index_dic[node.get('global_id')] = count
for count, node in enumerate(pop2):
index_dic[node.get('global_id')] = count
for counter, weight in enumerate(weights):
source_id = sources[counter]
target_id = targets[counter]
M[index_dic[target_id]][index_dic[source_id]] += weight
return M
def check_synapse(params, values, syn_params, TestCase):
for i, param in enumerate(params):
syn_params[param] = values[i]
TestCase.setUpNetwork(TestCase.conn_dict, syn_params)
for i, param in enumerate(params):
conns = nest.GetConnections(TestCase.pop1, TestCase.pop2)
conn_params = conns.get(param)
TestCase.assertTrue(all_equal(conn_params))
TestCase.assertTrue(conn_params[0] == values[i])
# copied from Masterthesis, Daniel Hjertholm
def counter(x, fan, source_pop, target_pop):
'''
Count similar elements in list.
Parameters
----------
x: Any list.
Return values
-------------
list containing counts of similar elements.
'''
N_p = len(source_pop) if fan == 'in' else len(target_pop) # of pool nodes.
start = min(x)
counts = [0] * N_p
for elem in x:
counts[elem - start] += 1
return counts
def get_degrees(fan, pop1, pop2):
M = get_connectivity_matrix(pop1, pop2)
if fan == 'in':
degrees = np.sum(M, axis=1)
elif fan == 'out':
degrees = np.sum(M, axis=0)
return degrees
# adapted from Masterthesis, Daniel Hjertholm
def get_expected_degrees_fixedDegrees(N, fan, len_source_pop, len_target_pop):
N_d = len_target_pop if fan == 'in' else len_source_pop # of driver nodes.
N_p = len_source_pop if fan == 'in' else len_target_pop # of pool nodes.
expected_degree = N_d * N / float(N_p)
expected = [expected_degree] * N_p
return expected
# adapted from Masterthesis, Daniel Hjertholm
def get_expected_degrees_totalNumber(N, fan, len_source_pop, len_target_pop):
expected_indegree = [N / float(len_target_pop)] * len_target_pop
expected_outdegree = [N / float(len_source_pop)] * len_source_pop
if fan == 'in':
return expected_indegree
elif fan == 'out':
return expected_outdegree
# copied from Masterthesis, Daniel Hjertholm
def get_expected_degrees_bernoulli(p, fan, len_source_pop, len_target_pop):
'''
Calculate expected degree distribution.
Degrees with expected number of observations below e_min are combined
into larger bins.
Return values
-------------
2D array. The four columns contain degree,
expected number of observation, actual number observations, and
the number of bins combined.
'''
n = len_source_pop if fan == 'in' else len_target_pop
n_p = len_target_pop if fan == 'in' else len_source_pop
mid = int(round(n * p))
e_min = 5
# Combine from front.
data_front = []
cumexp = 0.0
bins_combined = 0
for degree in range(mid):
cumexp += scipy.stats.binom.pmf(degree, n, p) * n_p
bins_combined += 1
if cumexp < e_min:
if degree == mid - 1:
if len(data_front) == 0:
raise RuntimeWarning('Not enough data')
deg, exp, obs, num = data_front[-1]
data_front[-1] = (deg, exp + cumexp, obs,
num + bins_combined)
else:
continue
else:
data_front.append((degree - bins_combined + 1, cumexp, 0,
bins_combined))
cumexp = 0.0
bins_combined = 0
# Combine from back.
data_back = []
cumexp = 0.0
bins_combined = 0
for degree in reversed(range(mid, n + 1)):
cumexp += scipy.stats.binom.pmf(degree, n, p) * n_p
bins_combined += 1
if cumexp < e_min:
if degree == mid:
if len(data_back) == 0:
raise RuntimeWarning('Not enough data')
deg, exp, obs, num = data_back[-1]
data_back[-1] = (degree, exp + cumexp, obs,
num + bins_combined)
else:
continue
else:
data_back.append((degree, cumexp, 0, bins_combined))
cumexp = 0.0
bins_combined = 0
data_back.reverse()
expected = np.array(data_front + data_back)
if fan == 'out':
assert (sum(expected[:, 3]) == len_target_pop + 1)
else: # , 'Something is wrong'
assert (sum(expected[:, 3]) == len_source_pop + 1)
# np.hstack((np.asarray(data_front)[0], np.asarray(data_back)[0]))
return expected
# adapted from Masterthesis, Daniel Hjertholm
def reset_seed(seed, nr_threads):
'''
Reset the simulator and seed the PRNGs.
Parameters
----------
seed: PRNG seed value.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': nr_threads, 'rng_seed': seed})
# copied from Masterthesis, Daniel Hjertholm
def chi_squared_check(degrees, expected, distribution=None):
'''
Create a single network and compare the resulting degree distribution
with the expected distribution using Pearson's chi-squared GOF test.
Parameters
----------
seed : PRNG seed value.
control: Boolean value. If True, _generate_multinomial_degrees will
be used instead of _get_degrees.
Return values
-------------
chi-squared statistic.
p-value from chi-squared test.
'''
if distribution in ('pairwise_bernoulli', 'symmetric_pairwise_bernoulli'):
observed = {}
for degree in degrees:
if degree not in observed:
observed[degree] = 1
else:
observed[degree] += 1
# Add observations to data structure, combining multiple observations
# where necessary.
expected[:, 2] = 0.0
for row in expected:
for i in range(int(row[3])):
deg = int(row[0]) + i
if deg in observed:
row[2] += observed[deg]
# ddof: adjustment to the degrees of freedom. df = k-1-ddof
return scipy.stats.chisquare(np.array(expected[:, 2]),
np.array(expected[:, 1]))
else:
# ddof: adjustment to the degrees of freedom. df = k-1-ddof
return scipy.stats.chisquare(np.array(degrees), np.array(expected))
| lekshmideepu/nest-simulator | testsuite/pytests/test_connect_helpers.py | Python | gpl-2.0 | 10,549 |
# -*- coding: utf-8 -*-
#pylint: skip-file
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
Package = orm['core.Package']
Backup = orm['core.PackageBackup']
prev = None
to_be_del = []
for pack in Package.objects.order_by('name', 'id').all():
# for packages with the same name,
# only keep the one which has the smallest id
if prev is None or prev.name != pack.name:
prev = pack
else:
to_be_del.append(pack.id)
Backup.objects.filter(pid__in=to_be_del).update(isdel=True)
Package.objects.filter(id__in=to_be_del).delete()
def backwards(self, orm):
"Write your backwards methods here."
Package = orm['core.Package']
Backup = orm['core.PackageBackup']
to_be_add = [Package(id=bak.pid,
name=bak.name,
gittree_id=bak.tid)
for bak in Backup.objects.filter(isdel=True).all()]
Package.objects.bulk_create(to_be_add)
Backup.objects.filter(isdel=True).update(isdel=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '225'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.domain': {
'Meta': {'object_name': 'Domain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'core.domainrole': {
'Meta': {'object_name': 'DomainRole', '_ormbases': [u'auth.Group']},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'role_set'", 'to': "orm['core.Domain']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'})
},
'core.gittree': {
'Meta': {'object_name': 'GitTree'},
'gitpath': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'licenses': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.License']", 'symmetrical': 'False'}),
'subdomain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SubDomain']"})
},
'core.gittreerole': {
'Meta': {'object_name': 'GitTreeRole', '_ormbases': [u'auth.Group']},
'gittree': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'role_set'", 'to': "orm['core.GitTree']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'})
},
'core.image': {
'Meta': {'object_name': 'Image'},
'arch': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']"}),
'target': ('django.db.models.fields.TextField', [], {})
},
'core.imagebuild': {
'Meta': {'object_name': 'ImageBuild'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Image']"}),
'log': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Log']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'})
},
'core.license': {
'Meta': {'object_name': 'License'},
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shortname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'core.log': {
'Meta': {'object_name': 'Log'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'core.package': {
'Meta': {'unique_together': "(('name', 'gittree'),)", 'object_name': 'Package'},
'gittree': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.GitTree']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'core.packagebackup': {
'Meta': {'object_name': 'PackageBackup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isdel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pid': ('django.db.models.fields.IntegerField', [], {}),
'tid': ('django.db.models.fields.IntegerField', [], {})
},
'core.packagebuild': {
'Meta': {'object_name': 'PackageBuild'},
'arch': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Log']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Package']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'target': ('django.db.models.fields.TextField', [], {})
},
'core.product': {
'Meta': {'object_name': 'Product'},
'description': ('django.db.models.fields.TextField', [], {}),
'gittrees': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.GitTree']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'core.productrole': {
'Meta': {'object_name': 'ProductRole', '_ormbases': [u'auth.Group']},
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'})
},
'core.subdomain': {
'Meta': {'unique_together': "(('name', 'domain'),)", 'object_name': 'SubDomain'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Domain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'core.subdomainrole': {
'Meta': {'object_name': 'SubDomainRole', '_ormbases': [u'auth.Group']},
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'subdomain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SubDomain']"})
},
'core.submission': {
'Meta': {'object_name': 'Submission'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'commit': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'gittree': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.GitTree']", 'symmetrical': 'False', 'blank': 'True'}),
'ibuilds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.ImageBuild']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'db_index': 'True'}),
'pbuilds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.PackageBuild']", 'symmetrical': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'submitters': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'testresults': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.TestResult']", 'symmetrical': 'False', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.submissiongroup': {
'Meta': {'object_name': 'SubmissionGroup'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'submissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Submission']", 'symmetrical': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.testresult': {
'Meta': {'object_name': 'TestResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Log']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'core.userparty': {
'Meta': {'object_name': 'UserParty', '_ormbases': [u'auth.Group']},
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'party': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['core']
symmetrical = True
| gttechsign/iris-panel | iris/core/migrations/0004_remove_redundant_packs.py | Python | gpl-2.0 | 15,684 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import yaml
from qgis.core import Qgis, QgsWkbTypes
from qgis.PyQt.QtCore import QSettings, QLocale
def loadShortHelp():
h = {}
path = os.path.dirname(__file__)
for f in os.listdir(path):
if f.endswith("yaml"):
filename = os.path.join(path, f)
with open(filename) as stream:
h.update(yaml.load(stream))
version = ".".join(Qgis.QGIS_VERSION.split(".")[0:2])
overrideLocale = QSettings().value('locale/overrideFlag', False, bool)
if not overrideLocale:
locale = QLocale.system().name()[:2]
else:
locale = QSettings().value('locale/userLocale', '')
locale = locale.split("_")[0]
def replace(s):
if s is not None:
return s.replace("{qgisdocs}", "https://docs.qgis.org/%s/%s/docs" % (version, locale))
else:
return None
h = {k: replace(v) for k, v in h.items()}
return h
shortHelp = loadShortHelp()
| wbyne/QGIS | python/plugins/processing/algs/help/__init__.py | Python | gpl-2.0 | 2,053 |
# Library for RTS2 JSON calls.
# (C) 2012 Petr Kubanek, Institute of Physics
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import json
class Target:
def __init__(self,id,name=None):
self.id = id
self.name = name
def reload(self):
"""Load target data from JSON interface."""
if self.id is None:
name = None
return
try:
data = json.getProxy().loadJson('/api/tbyid',{'id':self.id})['d'][0]
self.name = data[1]
except Exception,ex:
self.name = None
def get(name):
"""Return array with targets matching given name or target ID"""
try:
return json.getProxy().loadJson('/api/tbyid',{'id':int(name)})['d']
except ValueError:
return json.getProxy().loadJson('/api/tbyname',{'n':name})['d']
def create(name,ra,dec):
return json.getProxy().loadJson('/api/create_target', {'tn':name, 'ra':ra, 'dec':dec})['id']
| zguangyu/rts2 | python/rts2/target.py | Python | gpl-2.0 | 1,526 |
#!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'longjmp3', """
# DURATION TID FUNCTION
1.164 us [ 4107] | __monstartup();
0.657 us [ 4107] | __cxa_atexit();
[ 4107] | main() {
0.705 us [ 4107] | _setjmp() = 0;
1.823 us [ 4107] | getpid();
0.182 us [ 4107] | _setjmp() = 0;
[ 4107] | foo() {
[ 4107] | __longjmp_chk(1) {
8.790 us [ 4107] | } = 1; /* _setjmp */
0.540 us [ 4107] | getpid();
[ 4107] | bar() {
[ 4107] | baz() {
[ 4107] | __longjmp_chk(2) {
1.282 us [ 4107] | } = 2; /* _setjmp */
0.540 us [ 4107] | getpid();
[ 4107] | foo() {
[ 4107] | __longjmp_chk(3) {
0.578 us [ 4107] | } = 3; /* _setjmp */
[ 4107] | bar() {
[ 4107] | baz() {
[ 4107] | __longjmp_chk(4) {
0.642 us [ 4107] | } = 4; /* _setjmp */
18.019 us [ 4107] | } /* main */
""")
def build(self, name, cflags='', ldflags=''):
return TestBase.build(self, name, cflags + ' -D_FORTIFY_SOURCE=2', ldflags)
def runcmd(self):
args = '-A .?longjmp@arg2 -R .?setjmp@retval'
return '%s %s %s' % (TestBase.ftrace, args, 't-' + self.name)
def fixup(self, cflags, result):
return result.replace('__longjmp_chk', "longjmp")
| JIMyungSik/uftrace | tests/t145_longjmp3.py | Python | gpl-2.0 | 1,447 |
# -*- coding: utf-8 -*-
#
from django.urls import path
from . import views
urlpatterns = [
path('login/', views.OpenIDLoginView.as_view(), name='openid-login'),
path('login/complete/', views.OpenIDLoginCompleteView.as_view(),
name='openid-login-complete'),
]
| eli261/jumpserver | apps/authentication/backends/openid/urls.py | Python | gpl-2.0 | 278 |
#
# Copyright (c) 2008--2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Kickstart-related operations
#
from spacewalk.common import rhnFlags
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnException
from spacewalk.server import rhnSQL, rhnAction, rhnLib, rhnChannel
def update_kickstart_session(server_id, action_id, action_status,
kickstart_state, next_action_type):
log_debug(3, server_id, action_id, action_status, kickstart_state, next_action_type)
# Is this a kickstart-related action?
ks_session_id = get_kickstart_session_id(server_id, action_id)
if ks_session_id is None:
# Nothing more to do
log_debug(4, "Kickstart session not found")
return None
# Check the current action state
if action_status == 2:
# Completed
ks_status = kickstart_state
# Get the next action - it has to be of the right type
next_action_id = get_next_action_id(action_id, next_action_type)
elif action_status == 3:
# Failed
ks_status = 'failed'
next_action_id = None
else:
raise rhnException("Invalid action state %s" % action_status)
update_ks_session_table(ks_session_id, ks_status, next_action_id,
server_id)
return ks_session_id
_query_update_ks_session_table = rhnSQL.Statement("""
update rhnKickstartSession
set action_id = :action_id,
state_id = :ks_status_id,
new_server_id = :server_id
where id = :ks_session_id
""")
def update_ks_session_table(ks_session_id, ks_status, next_action_id,
server_id):
log_debug(4, ks_session_id, ks_status, next_action_id, server_id)
ks_table = rhnSQL.Table('rhnKickstartSessionState', 'label')
ks_status_id = ks_table[ks_status]['id']
h = rhnSQL.prepare(_query_update_ks_session_table)
h.execute(ks_session_id=ks_session_id, ks_status_id=ks_status_id,
action_id=next_action_id, server_id=server_id)
if ks_status == 'complete':
delete_guests(server_id)
_query_lookup_guests_for_host = rhnSQL.Statement("""
select virtual_system_id from rhnVirtualInstance
where host_system_id = :server_id
""")
_query_delete_virtual_instances = rhnSQL.Statement("""
delete from rhnVirtualInstance where host_system_id = :server_id
""")
def delete_guests(server_id):
"""
Callback used after a successful kickstart to remove any guest virtual
instances, as well as their associated servers.
"""
# First delete all the guest server objects:
h = rhnSQL.prepare(_query_lookup_guests_for_host)
h.execute(server_id=server_id)
delete_server = rhnSQL.Procedure("delete_server")
log_debug(4, "Deleting guests")
while 1:
row = h.fetchone_dict()
if not row:
break
guest_id = row['virtual_system_id']
log_debug(4, 'Deleting guest server: %s'% guest_id)
try:
if guest_id != None:
delete_server(guest_id)
except rhnSQL.SQLError:
log_error("Error deleting server: %s" % guest_id)
# Finally delete all the virtual instances:
log_debug(4, "Deleting all virtual instances for host")
h = rhnSQL.prepare(_query_delete_virtual_instances)
h.execute(server_id=server_id)
# Commit all changes:
try:
rhnSQL.commit()
except rhnSQL.SQLError, e:
log_error("Error committing transaction: %s" % e)
rhnSQL.rollback()
_query_get_next_action_id = rhnSQL.Statement("""
select a.id
from rhnAction a, rhnActionType at
where a.prerequisite = :action_id
and a.action_type = at.id
and at.label = :next_action_type
""")
def get_next_action_id(action_id, next_action_type = None):
if not next_action_type:
return None
h = rhnSQL.prepare(_query_get_next_action_id)
h.execute(action_id=action_id, next_action_type=next_action_type)
row = h.fetchone_dict()
if not row:
return None
return row['id']
_query_lookup_kickstart_session_id = rhnSQL.Statement("""
select ks.id
from rhnKickstartSession ks
where (
(ks.old_server_id = :server_id and ks.new_server_id is NULL)
or ks.new_server_id = :server_id
or ks.host_server_id = :server_id
)
and ks.action_id = :action_id
""")
def get_kickstart_session_id(server_id, action_id):
h = rhnSQL.prepare(_query_lookup_kickstart_session_id)
h.execute(server_id=server_id, action_id=action_id)
row = h.fetchone_dict()
if not row:
# Nothing to do
return None
return row['id']
_query_insert_package_delta = rhnSQL.Statement("""
insert into rhnPackageDelta (id, label)
values (:package_delta_id, 'ks-delta-' || :package_delta_id)
""")
_query_insert_action_package_delta = rhnSQL.Statement("""
insert into rhnActionPackageDelta (action_id, package_delta_id)
values (:action_id, :package_delta_id)
""")
_query_insert_package_delta_element = rhnSQL.Statement("""
insert into rhnPackageDeltaElement
(package_delta_id, transaction_package_id)
values
(:package_delta_id,
lookup_transaction_package(:operation, :n, :e, :v, :r, :a))
""")
def schedule_kickstart_delta(server_id, kickstart_session_id,
installs, removes):
log_debug(3, server_id, kickstart_session_id)
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
action_id = rhnAction.schedule_server_action(
server_id,
action_type='packages.runTransaction', action_name="Package delta",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
package_delta_id = rhnSQL.Sequence('rhn_packagedelta_id_seq').next()
h = rhnSQL.prepare(_query_insert_package_delta)
h.execute(package_delta_id=package_delta_id)
h = rhnSQL.prepare(_query_insert_action_package_delta)
h.execute(action_id=action_id, package_delta_id=package_delta_id)
h = rhnSQL.prepare(_query_insert_package_delta_element)
col_names = [ 'n', 'v', 'r', 'e']
__execute_many(h, installs, col_names, operation='insert', a=None,
package_delta_id=package_delta_id)
__execute_many(h, removes, col_names, operation='delete', a=None,
package_delta_id=package_delta_id)
update_ks_session_table(kickstart_session_id, 'package_synch_scheduled',
action_id, server_id)
return action_id
def schedule_kickstart_sync(server_id, kickstart_session_id):
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
# Create a new action
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart.schedule_sync',
action_name="Schedule a package sync",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
return action_id
def _get_ks_virt_type(type_id):
_query_kickstart_virt_type = rhnSQL.Statement("""
select label
from rhnKickstartVirtualizationType kvt
where kvt.id = :id
""")
prepared_query = rhnSQL.prepare(_query_kickstart_virt_type)
prepared_query.execute(id=type_id)
row = prepared_query.fetchone_dict()
# XXX: we should have better constraints on the db so this doesn't happen.
if not row:
kstype = 'auto'
else:
kstype = row['label']
log_debug(1, "KS_TYPE: %s" % kstype)
return kstype
def get_kickstart_session_type(server_id, action_id):
ks_session_id = get_kickstart_session_id(server_id, action_id)
ks_session_info = get_kickstart_session_info(ks_session_id, server_id)
ks_type_id = ks_session_info['virtualization_type']
ks_type = _get_ks_virt_type(ks_type_id)
return ks_type
def subscribe_to_tools_channel(server_id, kickstart_session_id):
log_debug(3)
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
ks_type_id = row['virtualization_type']
ks_type = _get_ks_virt_type(ks_type_id)
if ks_type == 'para_host':
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart_host.add_tools_channel',
action_name='Subscribe server to RHN Tools channel.',
delta_time=0, scheduler=scheduler, org_id=org_id,
)
elif ks_type == 'para_guest':
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart_guest.add_tools_channel',
action_name='Subscribe guest to RHN Tools channel.',
delta_time=0, scheduler=scheduler, org_id=org_id,
)
else:
action_id = None
return action_id
def schedule_virt_pkg_install(server_id, kickstart_session_id):
log_debug(3)
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
ks_type_id = row['virtualization_type']
log_debug(1, "VIRTUALIZATION_TYPE: %s" % str(ks_type_id))
ks_type = _get_ks_virt_type(ks_type_id)
log_debug(1, "VIRTUALZIATION_TYPE_LABEL: %s" % str(ks_type))
if ks_type == 'para_host':
log_debug(1, "SCHEDULING VIRT HOST PACKAGE INSTALL...")
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart_host.schedule_virt_host_pkg_install',
action_name="Schedule install of rhn-virtualization-host package.",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
elif ks_type == 'para_guest':
log_debug(1, "SCHEDULING VIRT GUEST PACKAGE INSTALL...")
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart_guest.schedule_virt_guest_pkg_install',
action_name="Schedule install of rhn-virtualization-guest package.",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
else:
log_debug(1, "NOT A VIRT KICKSTART")
action_id = None
return action_id
_query_ak_deploy_config = rhnSQL.Statement("""
select rt.deploy_configs
from rhnKickstartSession ks,
rhnKickstartDefaultRegToken kdrt,
rhnRegToken rt
where ks.kickstart_id = kdrt.kickstart_id
and kdrt.regtoken_id = rt.id
and ks.id = :session_id
""")
# Make sure the activation keys associated with this kickstart profile
# have enabled deploying config files. Only deploy configs if at least one
# of them has. This is replacing code that didn't work because the
# rhnFlags('registration_token') could not be set during the rhn_check call.
def ks_activation_key_deploy_config(kickstart_session_id):
h = rhnSQL.prepare(_query_ak_deploy_config)
h.execute(session_id=kickstart_session_id)
rows = h.fetchall_dict()
if rows:
for row in rows:
if row['deploy_configs'] and row['deploy_configs'] == 'Y':
return True
return False
_query_schedule_config_files = rhnSQL.Statement("""
insert into rhnActionConfigRevision
(id, action_id, server_id, config_revision_id)
select sequence_nextval('rhn_actioncr_id_seq'), :action_id,
server_id, config_revision_id
from (
select distinct scc.server_id,
cf.latest_config_revision_id config_revision_id
from rhnServerConfigChannel scc,
rhnConfigChannelType cct,
rhnConfigChannel cc,
rhnConfigFile cf,
rhnConfigFileState cfs
where scc.server_id = :server_id
and scc.config_channel_id = cf.config_channel_id
and cf.config_channel_id = cc.id
and cc.confchan_type_id = cct.id
and cct.label in ('normal', 'local_override')
and cf.latest_config_revision_id is not null
and cf.state_id = cfs.id
and cfs.label = 'alive'
) X
""")
def schedule_config_deploy(server_id, action_id, kickstart_session_id,
server_profile):
""" schedule a configfiles.deploy action dependent on the current action """
log_debug(3, server_id, action_id, kickstart_session_id)
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
deploy_configs = (row['deploy_configs'] == 'Y'
and ks_activation_key_deploy_config(kickstart_session_id))
if not deploy_configs:
# Nothing more to do here
update_ks_session_table(kickstart_session_id, 'complete',
next_action_id=None, server_id=server_id)
return None
if server_profile:
# Have to schedule a package deploy action
aid = schedule_rhncfg_install(server_id, action_id, scheduler,
server_profile)
else:
aid = action_id
next_action_id = rhnAction.schedule_server_action(
server_id,
action_type='configfiles.deploy',
action_name='Deploy config files',
delta_time=0, scheduler=scheduler, org_id=org_id,
prerequisite=aid,
)
# Deploy all of the config files that are part of this server's config
# channels
h = rhnSQL.prepare(_query_schedule_config_files)
h.execute(server_id=server_id, action_id=next_action_id)
update_ks_session_table(kickstart_session_id, 'configuration_deploy',
next_action_id, server_id)
return next_action_id
class MissingBaseChannelError(Exception):
pass
def schedule_rhncfg_install(server_id, action_id, scheduler,
server_profile=None):
capability = 'rhn-config-action'
try:
packages = _subscribe_server_to_capable_channels(server_id, scheduler,
capability)
except MissingBaseChannelError:
log_debug(2, "No base channel", server_id)
return action_id
if not packages:
# No channels offer this capability
log_debug(3, server_id, action_id,
"No channels to provide %s found" % capability)
# No new action needed here
return action_id
if not server_profile:
server_profile = get_server_package_profile(server_id)
# Make the package profile a hash, for easier checking
sphash = {}
for p in server_profile:
sphash[tuple(p)] = None
packages_to_install = []
for p in packages:
key = (p['name'], p['version'], p['release'], p['epoch'])
if not sphash.has_key(key):
packages_to_install.append(p['package_id'])
if not packages_to_install:
# We already have these packages installed
log_debug(4, "No packages needed to be installed")
return action_id
log_debug(4, "Scheduling package install action")
new_action_id = schedule_package_install(server_id, action_id, scheduler,
packages_to_install)
return new_action_id
_query_lookup_subscribed_server_channels = rhnSQL.Statement("""
select sc.channel_id,
case when c.parent_channel is not null then 0 else 1 end is_base_channel
from rhnServerChannel sc, rhnChannel c
where sc.server_id = :server_id
and sc.channel_id = c.id
""")
_query_lookup_unsubscribed_server_channels = rhnSQL.Statement("""
select c.id
from
-- Get all the channels available to this org
( select cfm.channel_id
from rhnChannelFamilyMembers cfm,
rhnPrivateChannelFamily pcf
where pcf.org_id = :org_id
and pcf.channel_family_id = cfm.channel_family_id
and pcf.current_members < coalesce(pcf.max_members,
pcf.current_members + 1)
union
select cfm.channel_id
from rhnChannelFamilyMembers cfm,
rhnPublicChannelFamily pcf
where pcf.channel_family_id = cfm.channel_family_id) ac,
rhnChannel c
where c.parent_channel = :base_channel_id
and c.id = ac.channel_id
and not exists (
select 1
from rhnServerChannel
where server_id = :server_id
and channel_id = c.id)
""")
def _subscribe_server_to_capable_channels(server_id, scheduler, capability):
log_debug(4, server_id, scheduler, capability)
# Look through the channels this server is already subscribed to
h = rhnSQL.prepare(_query_lookup_subscribed_server_channels)
h.execute(server_id=server_id)
base_channel_id = None
channels = []
while 1:
row = h.fetchone_dict()
if not row:
break
channel_id = row['channel_id']
if row['is_base_channel']:
base_channel_id = channel_id
channels.append((channel_id, 1))
if base_channel_id is None:
raise MissingBaseChannelError()
org_id = rhnSQL.Table('rhnServer', 'id')[server_id]['org_id']
# Get the child channels this system is *not* subscribed to
h = rhnSQL.prepare(_query_lookup_unsubscribed_server_channels)
h.execute(server_id=server_id, org_id=org_id,
base_channel_id=base_channel_id)
l = map(lambda x: (x['id'], 0), h.fetchall_dict() or [])
channels.extend(l)
# We now have a list of channels; look for one that provides the
# capability
for channel_id, is_subscribed in channels:
log_debug(5, "Checking channel:", channel_id, "; subscribed:",
is_subscribed)
packages = _channel_provides_capability(channel_id, capability)
if packages:
if is_subscribed:
log_debug(4, "Already subscribed; found packages", packages)
return packages
# Try to subscribe to it
try:
rhnChannel._subscribe_sql(server_id, channel_id, 0)
except rhnChannel.SubscriptionCountExceeded:
# Try another one
continue
log_debug(4, "Subscribed to", channel_id,
"Found packages", packages)
# We subscribed to this channel - we're done
return packages
# No channels provide this capability - we're done
log_debug(4, "No channels to provide capability", capability)
return None
_query_channel_provides_capability = rhnSQL.Statement("""
select distinct pp.package_id, pn.name, pe.version, pe.release, pe.epoch
from rhnChannelNewestPackage cnp,
rhnPackageProvides pp,
rhnPackageCapability pc,
rhnPackageName pn,
rhnPackageEVR pe
where cnp.channel_id = :channel_id
and cnp.package_id = pp.package_id
and pp.capability_id = pc.id
and pc.name = :capability
and cnp.name_id = pn.id
and cnp.evr_id = pe.id
""")
def _channel_provides_capability(channel_id, capability):
log_debug(4, channel_id, capability)
h = rhnSQL.prepare(_query_channel_provides_capability)
h.execute(channel_id=channel_id, capability=capability)
ret = h.fetchall_dict()
if not ret:
return ret
return ret
_query_insert_action_packages = rhnSQL.Statement("""
insert into rhnActionPackage
(id, action_id, name_id, evr_id, package_arch_id, parameter)
select sequence_nextval('rhn_act_p_id_seq'), :action_id, name_id, evr_id,
package_arch_id, 'upgrade'
from rhnPackage
where id = :package_id
""")
def schedule_package_install(server_id, action_id, scheduler, packages):
if not packages:
# Nothing to do
return action_id
new_action_id = rhnAction.schedule_server_action(
server_id, action_type='packages.update',
action_name="Package update to enable configuration deployment",
delta_time=0, scheduler=scheduler, prerequisite=action_id,
)
# Add entries to rhnActionPackage
action_ids = [ new_action_id ] * len(packages)
h = rhnSQL.prepare(_query_insert_action_packages)
h.executemany(action_id=action_ids, package_id=packages)
return new_action_id
def __execute_many(cursor, array, col_names, **kwargs):
""" Execute the cursor, with arguments extracted from the array
The array is converted into a hash having col_names as keys, and adds
whatever kwarg was specified too.
"""
linecount = len(array)
if not linecount:
return
# Transpose the array into a hash with col_names as keys
params = rhnLib.transpose_to_hash(array, col_names)
for k, v in kwargs.items():
params[k] = [ v ] * linecount
apply(cursor.executemany, (), params)
def _packages_from_cursor(cursor):
result = []
while 1:
row = cursor.fetchone_dict()
if not row:
break
p_name = row['name']
if p_name == 'gpg-pubkey':
# We ignore GPG public keys since they are too weird to schedule
# as a package delta
continue
result.append((p_name, row['version'], row['release'], row['epoch']))
return result
_query_lookup_pending_kickstart_sessions = rhnSQL.Statement("""
select ks.id, ks.action_id, NULL other_server_id
from rhnKickstartSessionState kss,
rhnKickstartSession ks
where (
(ks.old_server_id = :server_id and ks.new_server_id is null)
or ks.new_server_id = :server_id
)
and ks.state_id = kss.id
and kss.label not in ('complete', 'failed')
and (:ks_session_id is null or ks.id != :ks_session_id)
""")
_query_terminate_pending_kickstart_sessions = rhnSQL.Statement("""
update rhnKickstartSession
set action_id = NULL,
state_id = :state_id
where id = :kickstart_session_id
""")
def terminate_kickstart_sessions(server_id):
log_debug(3, server_id)
history = []
tokens_obj = rhnFlags.get('registration_token')
current_ks_session_id = tokens_obj.get_kickstart_session_id()
# ks_session_id can be null
h = rhnSQL.prepare(_query_lookup_pending_kickstart_sessions)
h.execute(server_id=server_id, ks_session_id=current_ks_session_id)
log_debug(4, "current_ks_session_id", current_ks_session_id)
ks_session_ids = []
action_ids = []
while 1:
row = h.fetchone_dict()
if not row:
break
ks_session_ids.append(row['id'])
action_ids.append(row['action_id'])
if not ks_session_ids:
# Nothing to do
log_debug(4, "Nothing to do", server_id, current_ks_session_id)
return []
ks_session_table = rhnSQL.Table('rhnKickstartSessionState', 'label')
state_id_failed = ks_session_table['failed']['id']
state_ids = [state_id_failed] * len(ks_session_ids)
# Add a history item
for ks_session_id in ks_session_ids:
log_debug(4, "Adding history entry for session id", ks_session_id)
history.append(("Kickstart session canceled",
"A kickstart session for this system was canceled because "
"the system was re-registered with token <strong>%s</strong>" %
tokens_obj.get_names()))
h = rhnSQL.prepare(_query_terminate_pending_kickstart_sessions)
params = {
'kickstart_session_id' : ks_session_ids,
'state_id' : state_ids,
}
# Terminate pending actions
log_debug(4, "Terminating sessions", params)
h.execute_bulk(params)
# Invalidate pending actions
for action_id in action_ids:
if action_id is None:
continue
rhnAction.invalidate_action(server_id, action_id)
return history
def get_kickstart_profile_package_profile(kickstart_session_id):
""" Fetches the package profile from the kickstart profile (Not the session) """
h = rhnSQL.prepare("""
select pn.name, pe.version, pe.release, pe.epoch, pa.label
from rhnKickstartSession ks,
rhnKickstartDefaults kd,
rhnServerProfilePackage spp,
rhnPackageName pn,
rhnPackageEVR pe,
rhnPackageArch pa
where ks.id = :kickstart_session_id
and kd.server_profile_id = spp.server_profile_id
and spp.name_id = pn.id
and spp.evr_id = pe.id
and spp.package_arch_id = pa.id
and kd.kickstart_id = ks.kickstart_id
""")
h.execute(kickstart_session_id=kickstart_session_id)
return _packages_from_cursor(h)
def get_kisckstart_session_package_profile(kickstart_session_id):
""" Fetches the package profile from the kickstart session """
h = rhnSQL.prepare("""
select pn.name, pe.version, pe.release, pe.epoch, pa.label
from rhnKickstartSession ks,
rhnServerProfilePackage spp,
rhnPackageName pn,
rhnPackageEVR pe,
rhnPackageArch pa
where ks.id = :kickstart_session_id
and ks.server_profile_id = spp.server_profile_id
and spp.name_id = pn.id
and spp.evr_id = pe.id
and spp.package_arch_id = pa.id
""")
h.execute(kickstart_session_id=kickstart_session_id)
return _packages_from_cursor(h)
def get_server_package_profile(server_id):
# XXX misa 2005-05-25 May need to look at package arches too
h = rhnSQL.prepare("""
select pn.name, pe.version, pe.release, pe.epoch, pa.label
from rhnServerPackage sp,
rhnPackageName pn,
rhnPackageEVR pe,
rhnPackageArch pa
where sp.server_id = :server_id
and sp.name_id = pn.id
and sp.evr_id = pe.id
and sp.package_arch_id = pa.id
""")
h.execute(server_id=server_id)
return _packages_from_cursor(h)
_query_get_kickstart_session_info = rhnSQL.Statement("""
select org_id, scheduler, deploy_configs, virtualization_type
from rhnKickstartSession
where id = :kickstart_session_id
""")
def get_kickstart_session_info(kickstart_session_id, server_id):
h = rhnSQL.prepare(_query_get_kickstart_session_info)
h.execute(kickstart_session_id=kickstart_session_id)
row = h.fetchone_dict()
if not row:
raise rhnException("Could not fetch kickstart session id %s "
"for server %s" % (kickstart_session_id, server_id))
return row
_query_lookup_ks_server_profile = rhnSQL.Statement("""
select kss.server_profile_id
from rhnServerProfileType spt,
rhnServerProfile sp,
rhnKickstartSession kss
where kss.id = :ks_session_id
and kss.server_profile_id = sp.id
and sp.profile_type_id = spt.id
and spt.label = :profile_type_label
""")
_query_delete_server_profile = rhnSQL.Statement("""
delete from rhnServerProfile where id = :server_profile_id
""")
def cleanup_profile(server_id, action_id, ks_session_id, action_status):
if ks_session_id is None:
log_debug(4, "No kickstart session")
return
if action_status != 2:
log_debug(4, "Action status: %s; nothing to do" % action_status)
return
h = rhnSQL.prepare(_query_lookup_ks_server_profile)
h.execute(ks_session_id=ks_session_id, profile_type_label='sync_profile')
row = h.fetchone_dict()
if not row:
log_debug(4, "No server profile of the right type found; nothing to do")
return
server_profile_id = row['server_profile_id']
if server_profile_id is None:
log_debug(4, "No server profile associated with this kickstart session")
return
# There is an "on delete cascade" constraint on
# rhnKickstartSession.server_profile_id and on
# rhnServerProfilePacakge.server_profile_id
h = rhnSQL.prepare(_query_delete_server_profile)
h.execute(server_profile_id=server_profile_id)
| dmacvicar/spacewalk | backend/server/rhnServer/server_kickstart.py | Python | gpl-2.0 | 28,246 |
"""
Binary 16 byte is an export plugin to convert gcode into 16 byte binary segments.
An export plugin is a script in the export_plugins folder which has the functions getOuput, isArchivable and writeOutput. It is
meant to be run from the export tool. To ensure that the plugin works on platforms which do not handle file capitalization
properly, give the plugin a lower case name.
The getOutput function of this script takes a gcode text and returns that text converted into 16 byte segments. The writeOutput
function of this script takes a gcode text and writes that in a binary format converted into 16 byte segments.
Many of the functions in this script are copied from gcodec in skeinforge_utilities. They are copied rather than imported so
developers making new plugins do not have to learn about gcodec, the code here is all they need to learn.
This plugin is just a starter to make a real binary converter.
//Record structure
BinArray(0) = AscW(Inst_Code_Letter)
BinArray(1) = cInst_Code
X Data
sInt32_to_Hbytes(iXdim_1)
BinArray(2) = lsb 'short lsb
BinArray(3) = msb 'short msb
Y Data
sInt32_to_Hbytes(iYdim_2)
BinArray(4) = lsb 'short lsb
BinArray(5) = msb 'short msb
Z Data
sInt32_to_Hbytes(iZdim_3)
BinArray(6) = lsb 'short lsb
BinArray(7) = msb 'short msb
I Data
sInt32_to_Hbytes(iIdim_4)
BinArray(8) = lsb 'short lsb
BinArray(9) = msb 'short msb
J Data
sInt32_to_Hbytes(iJdim_5)
BinArray(10) = lsb 'short lsb
BinArray(11) = msb 'short msb
BinArray(12) = FP_Char
sInt32_to_Hbytes(iFP_Num)
BinArray(13) = lsb 'short lsb
BinArray(14) = bActiveFlags
BinArray(15) = AscW("#")End of record filler
Byte 14 is worth a few extra notes, this byte is used to define which of the axes are active, its used to get round the problem of say a
line of code with no mention of z. This would be put into the file as z = 0 as the space for this data is reserved, if we did nothing, this
would instruct the machine to go to z = 0. If we use the active flag to define the z axis as inactive the z = 0 is ignored and the value
set to the last saved value of z, i.e it does not move. If the z data is actually set to z = 0 then the axis would be set to active and
the move takes place.
"""
from __future__ import absolute_import
import __init__
from skeinforge_tools.skeinforge_utilities import gcodec
from skeinforge_tools.skeinforge_utilities import preferences
from skeinforge_tools.skeinforge_utilities import interpret
from skeinforge_tools import polyfile
from struct import Struct
import cStringIO
import os
import sys
__author__ = "Enrique Perez (perez_enrique@yahoo.com)"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getIntegerFromCharacterLengthLineOffset( character, offset, splitLine, stepLength ):
"Get the integer after the first occurence of the character in the split line."
lineFromCharacter = getStringFromCharacterSplitLine( character, splitLine )
if lineFromCharacter == None:
return 0
floatValue = ( float( lineFromCharacter ) + offset ) / stepLength
return int( round( floatValue ) )
def getIntegerFlagFromCharacterSplitLine( character, splitLine ):
"Get the integer flag after the first occurence of the character in the split line."
lineFromCharacter = getStringFromCharacterSplitLine( character, splitLine )
if lineFromCharacter == None:
return 0
return 1
def getOutput( gcodeText, binary16BytePreferences = None ):
"""Get the exported version of a gcode file. This function, isArchivable and writeOutput are the only necessary functions in a skeinforge export plugin.
If this plugin writes an output than should not be printed, an empty string should be returned."""
if gcodeText == '':
return ''
if binary16BytePreferences == None:
binary16BytePreferences = Binary16BytePreferences()
preferences.readPreferences( binary16BytePreferences )
skein = Binary16ByteSkein()
skein.parseGcode( gcodeText, binary16BytePreferences )
return skein.output.getvalue()
def getStringFromCharacterSplitLine( character, splitLine ):
"Get the string after the first occurence of the character in the split line."
indexOfCharacter = indexOfStartingWithSecond( character, splitLine )
if indexOfCharacter < 0:
return None
return splitLine[ indexOfCharacter ][ 1 : ]
def getSummarizedFilename( fileName ):
"Get the fileName basename if the file is in the current working directory, otherwise return the original full name."
if os.getcwd() == os.path.dirname( fileName ):
return os.path.basename( fileName )
return fileName
def getTextLines( text ):
"Get the all the lines of text of a text."
return text.replace( '\r', '\n' ).split( '\n' )
def indexOfStartingWithSecond( letter, splitLine ):
"Get index of the first occurence of the given letter in the split line, starting with the second word. Return - 1 if letter is not found"
for wordIndex in xrange( 1, len( splitLine ) ):
word = splitLine[ wordIndex ]
firstLetter = word[ 0 ]
if firstLetter == letter:
return wordIndex
return - 1
def isArchivable():
"Return whether or not this plugin is archivable."
return True
def isReplacable():
"Return whether or not the output from this plugin is replacable. This should be true if the output is text and false if it is binary."
return False
def writeFileText( fileName, fileText ):
"Write a text to a file."
try:
file = open( fileName, 'wb' )
file.write( fileText )
file.close()
except IOError:
print( 'The file ' + fileName + ' can not be written to.' )
def writeOutput( fileName = '', gcodeText = '' ):
"Write the exported version of a gcode file. This function, getOutput and isArchivable are the only necessary functions in a skeinforge export plugin."
if fileName == '':
unmodified = interpret.getGNUTranslatorFilesUnmodified()
if len( unmodified ) == 0:
print( "There are no unmodified gcode files in this folder." )
return
fileName = unmodified[ 0 ]
binary16BytePreferences = Binary16BytePreferences()
preferences.readPreferences( binary16BytePreferences )
gcodeText = gcodec.getGcodeFileText( fileName, gcodeText )
skeinOutput = getOutput( gcodeText, binary16BytePreferences )
suffixFilename = fileName[ : fileName.rfind( '.' ) ] + '_export.' + binary16BytePreferences.fileExtension.value
writeFileText( suffixFilename, skeinOutput )
print( 'The converted file is saved as ' + getSummarizedFilename( suffixFilename ) )
class Binary16BytePreferences:
"A class to handle the export preferences."
def __init__( self ):
"Set the default preferences, execute title & preferences fileName."
#Set the default preferences.
self.archive = []
self.fileExtension = preferences.StringPreference().getFromValue( 'File Extension:', 'bin' )
self.archive.append( self.fileExtension )
self.fileNameInput = preferences.Filename().getFromFilename( [ ( 'Gcode text files', '*.gcode' ) ], 'Open File to be Converted to Binary 16 Byte', '' )
self.archive.append( self.fileNameInput )
self.feedrateStepLength = preferences.FloatPreference().getFromValue( 'Feedrate Step Length (millimeters/second)', 0.1 )
self.archive.append( self.feedrateStepLength )
self.xStepLength = preferences.FloatPreference().getFromValue( 'X Step Length (millimeters)', 0.1 )
self.archive.append( self.xStepLength )
self.yStepLength = preferences.FloatPreference().getFromValue( 'Y Step Length (millimeters)', 0.1 )
self.archive.append( self.yStepLength )
self.zStepLength = preferences.FloatPreference().getFromValue( 'Z Step Length (millimeters)', 0.01 )
self.archive.append( self.zStepLength )
self.xOffset = preferences.FloatPreference().getFromValue( 'X Offset (millimeters)', 0.0 )
self.archive.append( self.xOffset )
self.yOffset = preferences.FloatPreference().getFromValue( 'Y Offset (millimeters)', 0.0 )
self.archive.append( self.yOffset )
self.zOffset = preferences.FloatPreference().getFromValue( 'Z Offset (millimeters)', 0.0 )
self.archive.append( self.zOffset )
#Create the archive, title of the execute button, title of the dialog & preferences fileName.
self.executeTitle = 'Convert to Binary 16 Byte'
self.saveTitle = 'Save Preferences'
preferences.setHelpPreferencesFileNameTitleWindowPosition( self, 'skeinforge_tools.export_plugins.binary_16_byte.html' )
def execute( self ):
"Convert to binary 16 byte button has been clicked."
fileNames = polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, [ '.gcode' ], self.fileNameInput.wasCancelled )
for fileName in fileNames:
writeOutput( fileName )
class Binary16ByteSkein:
"A class to convert gcode into 16 byte binary segments."
def __init__( self ):
self.output = cStringIO.StringIO()
def parseGcode( self, gcodeText, binary16BytePreferences ):
"Parse gcode text and store the gcode."
self.binary16BytePreferences = binary16BytePreferences
lines = getTextLines( gcodeText )
for line in lines:
self.parseLine( line )
def parseLine( self, line ):
"Parse a gcode line."
binary16BytePreferences = self.binary16BytePreferences
splitLine = line.split()
if len( splitLine ) < 1:
return
firstWord = splitLine[ 0 ]
if len( firstWord ) < 1:
return
firstLetter = firstWord[ 0 ]
if firstLetter == '(':
return
feedrateInteger = getIntegerFromCharacterLengthLineOffset( 'F', 0.0, splitLine, binary16BytePreferences.feedrateStepLength.value )
iInteger = getIntegerFromCharacterLengthLineOffset( 'I', 0.0, splitLine, binary16BytePreferences.xStepLength.value )
jInteger = getIntegerFromCharacterLengthLineOffset( 'J', 0.0, splitLine, binary16BytePreferences.yStepLength.value )
xInteger = getIntegerFromCharacterLengthLineOffset( 'X', binary16BytePreferences.xOffset.value, splitLine, binary16BytePreferences.xStepLength.value )
yInteger = getIntegerFromCharacterLengthLineOffset( 'Y', binary16BytePreferences.yOffset.value, splitLine, binary16BytePreferences.yStepLength.value )
zInteger = getIntegerFromCharacterLengthLineOffset( 'Z', binary16BytePreferences.zOffset.value, splitLine, binary16BytePreferences.zStepLength.value )
sixteenByteStruct = Struct( 'cBhhhhhhBc' )
# print( 'xInteger' )
# print( xInteger )
flagInteger = getIntegerFlagFromCharacterSplitLine( 'X', splitLine )
flagInteger += 2 * getIntegerFlagFromCharacterSplitLine( 'Y', splitLine )
flagInteger += 4 * getIntegerFlagFromCharacterSplitLine( 'Z', splitLine )
flagInteger += 8 * getIntegerFlagFromCharacterSplitLine( 'I', splitLine )
flagInteger += 16 * getIntegerFlagFromCharacterSplitLine( 'J', splitLine )
flagInteger += 32 * getIntegerFlagFromCharacterSplitLine( 'F', splitLine )
packedString = sixteenByteStruct.pack( firstLetter, int( firstWord[ 1 : ] ), xInteger, yInteger, zInteger, iInteger, jInteger, feedrateInteger, flagInteger, '#' )
self.output.write( packedString )
def main( hashtable = None ):
"Display the export dialog."
if len( sys.argv ) > 1:
writeOutput( ' '.join( sys.argv[ 1 : ] ) )
else:
preferences.displayDialog( Binary16BytePreferences() )
if __name__ == "__main__":
main()
| natetrue/ReplicatorG | skein_engines/skeinforge-0006/skeinforge_tools/export_plugins/binary_16_byte.py | Python | gpl-2.0 | 10,998 |
# Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This module defines interpolators for the common OSs.
Globs and Artifacts may expand interpolations from the KnowledgeBase. This
module provides a live, on demand, KnowledgeBase.
"""
from builtins import object
import os
import re
import platform
from rekall import kb
from rekall_lib import registry
class KnowledgeBase(object):
def __init__(self, session):
self.session = session
def expand(self, variable):
return []
class LinuxKnowledgeBase(KnowledgeBase):
@registry.memoize
def _get_users_homedir(self):
homedirs = []
for user in open("/etc/passwd"):
user = user.strip()
homedirs.append(user.split(":")[5])
return homedirs
def expand(self, variable):
if variable == "%%users.homedir%%":
return self._get_users_homedir()
self.session.logging.warn("Unable to interpolate %s", variable)
return []
class WindowsKnowledgeBase(KnowledgeBase):
@registry.memoize
def _get_sids(self):
result = []
for hit in self.session.plugins.glob(
r"HKEY_USERS\*", filesystem="Reg", root="\\",
path_sep="\\").collect():
path = hit["path"]
m = re.search(
r"(S-(\d+-)+\d+)$", path.filename.name or "", re.I)
if m:
result.append(m.group(1))
return result
@registry.memoize
def _get_homedirs(self):
"""On windows the homedirs are the paths of the user's profile."""
result = []
for artifact_hit in self.session.plugins.artifact_collector(
"WindowsRegistryProfiles"):
for hit_result in artifact_hit.get("result", []):
profile_path = hit_result.get("value")
if profile_path:
result.append(profile_path)
return result
def expand(self, variable):
if variable == "%%users.sid%%":
return self._get_sids()
if variable == "%%users.homedir%%":
return self._get_homedirs()
if variable == "%%environ_systemroot%%":
return [os.environ["systemroot"]]
return []
class KnowledgeBaseHook(kb.ParameterHook):
name = "knowledge_base"
def calculate(self):
if platform.system() == "Linux":
return LinuxKnowledgeBase(self.session)
elif platform.system() == "Windows":
return WindowsKnowledgeBase(self.session)
| google/rekall | rekall-core/rekall/plugins/response/interpolators.py | Python | gpl-2.0 | 3,264 |
import json, sys, re
def printString(s, begin, end):
if not re.match(r'^(\*.*\*|CAM[0-9] .*|Z ?NULL.*)$', s):
sys.stdout.write('{}_({}){}'.format(begin, json.dumps(s, ensure_ascii=False), end))
def parse(obj):
if isinstance(obj, dict):
for k, v in obj.items():
parse(v)
if k == 'name' and isinstance(v, str):
printString(v, '', '\n')
elif k == 'text' and isinstance(v, list):
for s in v:
if isinstance(s, str):
printString(s, '', '\n')
elif isinstance(obj, list):
for v in obj:
parse(v)
parse(json.load(open(sys.argv[1], 'r')))
| Zabanya/warzone2100 | po/parseJson.py | Python | gpl-2.0 | 571 |
__all__ = ('db', 'format', 'query', 'sync') | Timdawson264/acd_cli | acdcli/cache/__init__.py | Python | gpl-2.0 | 43 |
import time
import logging
from autotest.client.shared import error
from virttest import utils_test
from generic.tests import file_transfer
def run(test, params, env):
"""
live_snapshot test:
1). Create live snapshot during big file creating
2). Create live snapshot when guest reboot
3). Check if live snapshot is created
4). Shutdown guest
:param test: Kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
@error.context_aware
def create_snapshot(vm):
"""
Create live snapshot:
1). Check which monitor is used
2). Get device info
3). Create snapshot
"""
error.context("Creating live snapshot ...", logging.info)
block_info = vm.monitor.info("block")
if vm.monitor.protocol == 'qmp':
device = block_info[0]["device"]
else:
device = "".join(block_info).split(":")[0]
snapshot_name = params.get("snapshot_name")
format = params.get("snapshot_format", "qcow2")
vm.monitor.live_snapshot(device, snapshot_name, format)
logging.info("Check snapshot is created ...")
snapshot_info = str(vm.monitor.info("block"))
if snapshot_name not in snapshot_info:
logging.error(snapshot_info)
raise error.TestFail("Snapshot doesn't exist")
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
dd_timeout = int(params.get("dd_timeout", 900))
session = vm.wait_for_login(timeout=timeout)
def runtime_test():
try:
clean_cmd = params.get("clean_cmd")
file_create = params.get("file_create")
clean_cmd += " %s" % file_create
logging.info("Clean file before creation")
session.cmd(clean_cmd)
logging.info("Creating big file...")
create_cmd = params.get("create_cmd") % file_create
args = (create_cmd, dd_timeout)
bg = utils_test.BackgroundTest(session.cmd_output, args)
bg.start()
time.sleep(5)
create_snapshot(vm)
if bg.is_alive():
try:
bg.join()
except Exception:
raise
finally:
session.close()
def reboot_test():
try:
bg = utils_test.BackgroundTest(vm.reboot, (session,))
logging.info("Rebooting guest ...")
bg.start()
sleep_time = int(params.get("sleep_time"))
time.sleep(sleep_time)
create_snapshot(vm)
finally:
bg.join()
def file_transfer_test():
try:
bg_cmd = file_transfer.run_file_transfer
args = (test, params, env)
bg = utils_test.BackgroundTest(bg_cmd, args)
bg.start()
sleep_time = int(params.get("sleep_time"))
time.sleep(sleep_time)
create_snapshot(vm)
if bg.is_alive():
try:
bg.join()
except Exception:
raise
finally:
session.close()
subcommand = params.get("subcommand")
eval("%s_test()" % subcommand)
| ypu/tp-qemu | qemu/tests/live_snapshot.py | Python | gpl-2.0 | 3,334 |
# upgrade.py - test the upgrade transaction using RPM
#
# Copyright (C) 2012 Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Will Woods <wwoods@redhat.com>
# For the sake of simplicity, we don't bother with yum here.
import rpm
from rpm._rpm import ts as TransactionSetCore
import os, tempfile
from threading import Thread
import logging
log = logging.getLogger(__package__+'.upgrade')
from . import _
from .util import df, hrsize
class TransactionSet(TransactionSetCore):
flags = TransactionSetCore._flags
vsflags = TransactionSetCore._vsflags
color = TransactionSetCore._color
def run(self, callback, data, probfilter):
log.debug('ts.run()')
rv = TransactionSetCore.run(self, callback, data, probfilter)
problems = self.problems()
if rv != rpm.RPMRC_OK and problems:
raise TransactionError(problems)
return rv
def check(self, *args, **kwargs):
TransactionSetCore.check(self, *args, **kwargs)
# NOTE: rpm.TransactionSet throws out all problems but these
return [p for p in self.problems()
if p.type in (rpm.RPMPROB_CONFLICT, rpm.RPMPROB_REQUIRES)]
def add_install(self, path, key=None, upgrade=False):
log.debug('add_install(%s, %s, upgrade=%s)', path, key, upgrade)
if key is None:
key = path
with open(path) as fileobj:
retval, header = self.hdrFromFdno(fileobj)
if retval != rpm.RPMRC_OK:
raise rpm.error("error reading package header")
if not self.addInstall(header, key, upgrade):
raise rpm.error("adding package to transaction failed")
def __del__(self):
self.closeDB()
probtypes = { rpm.RPMPROB_NEW_FILE_CONFLICT : _('file conflicts'),
rpm.RPMPROB_FILE_CONFLICT : _('file conflicts'),
rpm.RPMPROB_OLDPACKAGE: _('older package(s)'),
rpm.RPMPROB_DISKSPACE: _('insufficient disk space'),
rpm.RPMPROB_DISKNODES: _('insufficient disk inodes'),
rpm.RPMPROB_CONFLICT: _('package conflicts'),
rpm.RPMPROB_PKG_INSTALLED: _('package already installed'),
rpm.RPMPROB_REQUIRES: _('broken dependencies'),
rpm.RPMPROB_BADARCH: _('package for incorrect arch'),
rpm.RPMPROB_BADOS: _('package for incorrect os'),
}
# --- stuff for doing useful summaries of big sets of problems
probattrs = ('type', 'pkgNEVR', 'altNEVR', 'key', '_str', '_num')
def prob2dict(p):
return {f:getattr(p,f) for f in probattrs}
class ProblemSummary(object):
def __init__(self, probtype, problems):
self.type = probtype
self.problems = [p for p in problems if p.type == self.type]
self.desc = probtypes.get(probtype)
self.details = self.get_details()
def get_details(self):
return None
def format_details(self):
raise NotImplementedError
def _log_probs(self):
for p in self.problems:
log.debug('%s -> "%s"', prob2dict(p), p)
def __str__(self):
if self.details:
return "\n ".join([self.desc+':'] + self.format_details())
else:
return self.desc
class DiskspaceProblemSummary(ProblemSummary):
def get_details(self):
needs = dict()
for p in self.problems:
(mnt, size) = (p._str, p._num)
if size > needs.get(mnt,0):
needs[mnt] = size
return needs
def format_details(self):
return [_("%s needs %s more free space") % (mnt, hrsize(size))
for (mnt,size) in self.details.iteritems()]
class DepProblemSummary(ProblemSummary):
def get_details(self):
self._log_probs()
pkgprobs = dict()
# pkgprobs['installedpkg'] = {'otherpkg1': [req1, req2, ...], ...}
for p in self.problems:
# NOTE: p._num is a header reference if p.pkgNEVR is installed
thispkg, otherpkg, req = p.altNEVR, p.pkgNEVR, p._str
if thispkg not in pkgprobs:
pkgprobs[thispkg] = {}
if otherpkg not in pkgprobs[thispkg]:
pkgprobs[thispkg][otherpkg] = set()
pkgprobs[thispkg][otherpkg].add(req)
return pkgprobs
def format_details(self):
return [_("%s requires %s") % (pkg, ", ".join(pkgprob))
for (pkg, pkgprob) in self.details.iteritems()]
# If there is no handler for a type of problem, just return the
# rpmProblemString result for the problems
class GenericProblemSummary(ProblemSummary):
def format_details(self):
return [str(p) for p in self.problems]
probsummary = { rpm.RPMPROB_DISKSPACE: DiskspaceProblemSummary,
rpm.RPMPROB_REQUIRES: DepProblemSummary,
}
def summarize_problems(problems):
summaries = []
for t in set(p.type for p in problems):
summarize = probsummary.get(t, GenericProblemSummary) # get the summarizer
summaries.append(summarize(t, problems)) # summarize the problem
return summaries
class TransactionError(Exception):
def __init__(self, problems):
self.problems = problems
self.summaries = summarize_problems(problems)
def pipelogger(pipe, level=logging.INFO):
logger = logging.getLogger(__package__+".rpm")
logger.info("opening pipe")
with open(pipe, 'r') as fd:
for line in fd:
if line.startswith('D: '):
logger.debug(line[3:].rstrip())
else:
logger.log(level, line.rstrip())
logger.info("got EOF")
logger.info("exiting")
logging_to_rpm = {
logging.DEBUG: rpm.RPMLOG_DEBUG,
logging.INFO: rpm.RPMLOG_INFO,
logging.WARNING: rpm.RPMLOG_WARNING,
logging.ERROR: rpm.RPMLOG_ERR,
logging.CRITICAL: rpm.RPMLOG_CRIT,
}
class RPMUpgrade(object):
def __init__(self, root='/', logpipe=True, rpmloglevel=logging.INFO):
self.root = root
self.ts = None
self.logpipe = None
rpm.setVerbosity(logging_to_rpm[rpmloglevel])
if logpipe:
self.logpipe = self.openpipe()
def setup_transaction(self, pkgfiles, check_fatal=False):
log.debug("starting")
# initialize a transaction set
self.ts = TransactionSet(self.root, rpm._RPMVSF_NOSIGNATURES)
if self.logpipe:
self.ts.scriptFd = self.logpipe.fileno()
# populate the transaction set
for pkg in pkgfiles:
try:
self.ts.add_install(pkg, upgrade=True)
except rpm.error as e:
log.warn('error adding pkg: %s', e)
# TODO: error callback
log.debug('ts.check()')
problems = self.ts.check() or []
if problems:
log.info("problems with transaction check:")
for p in problems:
log.info(p)
if check_fatal:
raise TransactionError(problems=problems)
log.debug('ts.order()')
self.ts.order()
log.debug('ts.clean()')
self.ts.clean()
log.debug('transaction is ready')
if problems:
return TransactionError(problems=problems)
def openpipe(self):
log.debug("creating log pipe")
pipefile = tempfile.mktemp(prefix='rpm-log-pipe.')
os.mkfifo(pipefile, 0600)
log.debug("starting logging thread")
pipethread = Thread(target=pipelogger, name='pipelogger',
args=(pipefile,))
pipethread.daemon = True
pipethread.start()
log.debug("opening log pipe")
pipe = open(pipefile, 'w')
rpm.setLogFile(pipe)
return pipe
def closepipe(self):
log.debug("closing log pipe")
rpm.setVerbosity(rpm.RPMLOG_WARNING)
rpm.setLogFile(None)
if self.ts:
self.ts.scriptFd = None
self.logpipe.close()
os.remove(self.logpipe.name)
self.logpipe = None
def run_transaction(self, callback):
assert callable(callback.callback)
probfilter = ~rpm.RPMPROB_FILTER_DISKSPACE
rv = self.ts.run(callback.callback, None, probfilter)
if rv != 0:
log.info("ts completed with problems - code %u", rv)
return rv
def test_transaction(self, callback):
self.ts.flags = rpm.RPMTRANS_FLAG_TEST
try:
return self.run_transaction(callback)
finally:
self.ts.flags &= ~rpm.RPMTRANS_FLAG_TEST
def __del__(self):
if self.logpipe:
self.closepipe()
| dashea/redhat-upgrade-tool | redhat_upgrade_tool/upgrade.py | Python | gpl-2.0 | 9,234 |
import attr
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from widgetastic.widget import NoSuchElementException
from widgetastic.widget import Text
from widgetastic.widget import View
from widgetastic_patternfly import BreadCrumb
from widgetastic_patternfly import Button
from widgetastic_patternfly import Dropdown
from cfme.base.ui import BaseLoggedInPage
from cfme.common import CustomButtonEventsMixin
from cfme.common import PolicyProfileAssignable
from cfme.common import Taggable
from cfme.common import TaggableCollection
from cfme.common import TagPageView
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.storage.volume import AttachInstanceView
from cfme.storage.volume import DetachInstanceView
from cfme.storage.volume import StorageManagerVolumeAllView
from cfme.storage.volume import VolumeAddView
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.providers import get_crud_by_name
from widgetastic_manageiq import Accordion
from widgetastic_manageiq import BaseEntitiesView
from widgetastic_manageiq import ItemsToolBarViewSelector
from widgetastic_manageiq import ManageIQTree
from widgetastic_manageiq import PaginationPane
from widgetastic_manageiq import SummaryTable
from widgetastic_manageiq import Table
class StorageManagerToolbar(View):
"""The toolbar on the Storage Manager or Provider page"""
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
view_selector = View.nested(ItemsToolBarViewSelector)
class StorageManagerDetailsToolbar(View):
"""The toolbar on the Storage Manager or Provider detail page"""
reload = Button(title='Refresh this page')
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
monitoring = Dropdown('Monitoring')
download = Button(title='Print or export summary')
class StorageManagerEntities(BaseEntitiesView):
"""The entities on the main list Storage Manager or Provider page"""
table = Table(".//div[@id='list_grid' or @class='miq-data-table']/table")
class StorageManagerDetailsEntities(View):
"""The entities on the Storage Manager or Provider details page"""
breadcrumb = BreadCrumb()
properties = SummaryTable('Properties')
relationships = SummaryTable('Relationships')
smart_management = SummaryTable('Smart Management')
status = SummaryTable('Status')
class StorageManagerDetailsAccordion(View):
"""The accordion on the Storage Manager or Provider details page"""
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class StorageManagerView(BaseLoggedInPage):
"""A base view for all the Storage Manager or Provider pages"""
title = Text('.//div[@id="center_div" or @id="main-content"]//h1')
@property
def in_manager(self):
navigation_path = self.context['object'].navigation_path
return(
self.logged_in_as_current_user and
self.navigation.currently_selected == navigation_path)
class StorageManagerAllView(StorageManagerView):
"""The all Storage Manager or Provider page"""
@property
def is_displayed(self):
return (
self.in_manager and
self.title.text in ('Storage Managers', self.context['object'].manager_type))
toolbar = View.nested(StorageManagerToolbar)
including_entities = View.include(StorageManagerEntities, use_parent=True)
paginator = PaginationPane()
class ProviderStorageManagerAllView(StorageManagerAllView):
@property
def is_displayed(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Clouds', 'Providers'] and
self.title.text == '{} (All Storage Managers)'.format(self.context['object'].name)
)
class StorageManagerDetailsView(StorageManagerView):
"""The details page for Storage Manager or Provider"""
@property
def is_displayed(self):
obj = self.context['object']
return(
self.title.text == obj.expected_details_title and
self.entities.breadcrumb.active_location == obj.expected_details_breadcrumb)
toolbar = View.nested(StorageManagerDetailsToolbar)
sidebar = View.nested(StorageManagerDetailsAccordion)
entities = View.nested(StorageManagerDetailsEntities)
@attr.s
class StorageManager(BaseEntity, CustomButtonEventsMixin, Taggable, PolicyProfileAssignable):
""" Model of an storage manager in cfme
Args:
collection: Instance of collection
name: Name of the object manager.
provider: Provider
"""
name = attr.ib()
provider = attr.ib()
storage_title = 'Storage Manager'
@property
def navigation_path(self):
return self.parent.navigation_path
@property
def manager_type(self):
return self.parent.manager_type
def refresh(self, cancel=False):
"""Refresh storage manager"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Refresh Relationships and Power States',
handle_alert=not cancel)
if not cancel:
view.flash.assert_no_error()
def delete(self, cancel=False):
"""Delete storage manager"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select(
f'Remove this {self.storage_title} from Inventory',
handle_alert=not cancel
)
view = self.create_view(StorageManagerDetailsView)
view.flash.assert_no_error()
@attr.s
class BlockManagerCollection(BaseCollection, TaggableCollection):
"""Collection object [block manager] for the :py:class:'cfme.storage.manager'"""
ENTITY = StorageManager
manager_type = 'Block Storage Managers'
navigation_path = ['Storage', 'Block Storage', 'Managers']
def all(self):
"""returning all block storage manager objects and support filtering as per provider"""
provider = self.filters.get("provider")
blocks = ("Cinder Manager", "EBS Storage Manager")
prov_db = {prov.id: prov for prov in self.appliance.rest_api.collections.providers.all}
managers = [
prov
for prov in prov_db.values()
if any(block in prov.name for block in blocks)
]
if provider:
return [
self.instantiate(name=mag.name, provider=provider)
for mag in managers
if provider.id == mag.parent_ems_id
]
else:
return [
self.instantiate(
name=mag.name, provider=get_crud_by_name(prov_db[mag.parent_ems_id].name)
)
for mag in managers
]
@attr.s
class ObjectManagerCollection(BaseCollection, TaggableCollection):
"""Collection object [object manager] for the :py:class:'cfme.storage.manager'"""
ENTITY = StorageManager
manager_type = 'Object Storage Managers'
navigation_path = ['Storage', 'Object Storage', 'Managers']
def all(self):
"""returning all object storage manager objects and support filtering as per provider"""
provider = self.filters.get("provider")
prov_db = {prov.id: prov for prov in self.appliance.rest_api.collections.providers.all}
managers = [
prov
for prov in prov_db.values()
if "Swift Manager" in prov.name
]
if provider:
return [
self.instantiate(name=mag.name, provider=provider)
for mag in managers
if provider.id == mag.parent_ems_id
]
else:
return [
self.instantiate(
name=mag.name, provider=get_crud_by_name(prov_db[mag.parent_ems_id].name)
)
for mag in managers
]
@navigator.register(BlockManagerCollection, 'All')
@navigator.register(ObjectManagerCollection, 'All')
class StorageManagerAll(CFMENavigateStep):
VIEW = StorageManagerAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select(*self.obj.navigation_path)
@navigator.register(StorageManager, 'Details')
class StorageManagerDetails(CFMENavigateStep):
VIEW = StorageManagerDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.view_selector.select('List View')
try:
row = self.prerequisite_view.entities.get_entity(name=self.obj.name, surf_pages=True)
row.click()
except NoSuchElementException:
raise ItemNotFound(f'Could not locate {self.obj.name}')
@navigator.register(StorageManager, 'Volumes')
class StorageManagerVolumesAll(CFMENavigateStep):
VIEW = StorageManagerVolumeAllView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
volume_count = int(
self.prerequisite_view.entities.relationships.get_text_of("Cloud Volumes"))
if volume_count > 0:
self.prerequisite_view.entities.relationships.click_at("Cloud Volumes")
else:
raise ItemNotFound(f'{self.obj.name} has no volumes')
@navigator.register(StorageManager, 'AddVolume')
class StorageManagerVolumesAdd(CFMENavigateStep):
VIEW = VolumeAddView
prerequisite = NavigateToSibling('Volumes')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select('Add a new Cloud Volume')
@navigator.register(StorageManager, 'VolumeAttachInstance')
class AttachInstance(CFMENavigateStep):
VIEW = AttachInstanceView
prerequisite = NavigateToSibling('Volumes')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select('Attach selected Cloud Volume to '
'an Instance')
@navigator.register(StorageManager, 'VolumeDetachInstance')
class DetachInstance(CFMENavigateStep):
VIEW = DetachInstanceView
prerequisite = NavigateToSibling('Volumes')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select('Detach selected Cloud Volume from'
' an Instance')
@navigator.register(StorageManager, 'EditTagsFromDetails')
class StorageManagerDetailEditTag(CFMENavigateStep):
""" This navigation destination help to WidgetasticTaggable"""
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
| nachandr/cfme_tests | cfme/storage/manager.py | Python | gpl-2.0 | 11,178 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.abstract_variables.abstract_travel_time_variable import abstract_travel_time_variable
class travel_time_hbw_am_drive_alone_from_home_to_work_alt(abstract_travel_time_variable):
"""travel_time_hbw_am_drive_alone_from_home_to_work"""
agent_zone_id = "psrc.household.home_zone_id_from_grid_id"
location_zone_id = "urbansim.zone.zone_id"
travel_data_attribute = "urbansim.travel_data.am_single_vehicle_to_work_travel_time"
from numpy import ma, array
from opus_core.tests import opus_unittest
from urbansim.variable_test_toolbox import VariableTestToolbox
from psrc.opus_package_info import package
from urbansim.datasets.zone_dataset import ZoneDataset
from urbansim.datasets.household_dataset import HouseholdDataset
from psrc.datasets.person_x_zone_dataset import PersonXZoneDataset
from psrc.datasets.person_dataset import PersonDataset
class Tests(opus_unittest.OpusTestCase):
variable_name = "psrc.household_x_zone.travel_time_hbw_am_drive_alone_from_home_to_work_alt"
def test_my_inputs(self):
values = VariableTestToolbox().compute_variable(self.variable_name, \
{
"household":{
"household_id":array([1,2,3,4,5]),
"home_zone_id_from_grid_id":array([3, 1, 1, 1, 2]),
},
"zone":{
"zone_id":array([1, 2, 3]),
},
"travel_data":{
"from_zone_id": array([3, 3, 1, 1, 1, 2, 2, 3, 2]),
"to_zone_id": array([1, 3, 1, 3, 2, 1, 3, 2, 2]),
"am_single_vehicle_to_work_travel_time":array([1.1, 2.2, 3.3, 4.4, 0.5, 0.7, 8.7, 7.8, 1.0])}},
dataset = "household_x_zone")
default_value = travel_time_hbw_am_drive_alone_from_home_to_work_alt.default_value
should_be = array([[1.1, 7.8, 2.2],
[3.3, 0.5, 4.4], [3.3, 0.5, 4.4],
[3.3, 0.5, 4.4], [0.7, 1.0, 8.7]])
self.assertEqual(ma.allclose(values, should_be, rtol=1e-3), \
True, msg = "Error in " + self.variable_name)
if __name__=='__main__':
opus_unittest.main() | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/psrc/household_x_zone/travel_time_hbw_am_drive_alone_from_home_to_work_alt.py | Python | gpl-2.0 | 2,397 |
# partitioning.py
# Disk partitioning functions.
#
# Copyright (C) 2009, 2010, 2011, 2012, 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
from operator import gt, lt
from decimal import Decimal
import functools
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
import parted
from .errors import DeviceError, PartitioningError
from .flags import flags
from .devices import Device, PartitionDevice, LUKSDevice, devicePathToName
from .size import Size
from .i18n import _
from .util import stringize, unicodeize, compare
import logging
log = logging.getLogger("blivet")
def partitionCompare(part1, part2):
""" More specifically defined partitions come first.
< 1 => x < y
0 => x == y
> 1 => x > y
:param part1: the first partition
:type part1: :class:`devices.PartitionDevice`
:param part2: the other partition
:type part2: :class:`devices.PartitionDevice`
:return: see above
:rtype: int
"""
ret = 0
# start sector overrides all other sorting factors
part1_start = part1.req_start_sector
part2_start = part2.req_start_sector
if part1_start is not None and part2_start is None:
return -1
elif part1_start is None and part2_start is not None:
return 1
elif part1_start is not None and part2_start is not None:
return compare(part1_start, part2_start)
if part1.req_base_weight:
ret -= part1.req_base_weight
if part2.req_base_weight:
ret += part2.req_base_weight
# more specific disk specs to the front of the list
# req_disks being empty is equivalent to it being an infinitely long list
if part1.req_disks and not part2.req_disks:
ret -= 500
elif not part1.req_disks and part2.req_disks:
ret += 500
else:
ret += compare(len(part1.req_disks), len(part2.req_disks)) * 500
# primary-only to the front of the list
ret -= compare(part1.req_primary, part2.req_primary) * 200
# fixed size requests to the front
ret += compare(part1.req_grow, part2.req_grow) * 100
# larger requests go to the front of the list
ret -= compare(part1.req_base_size, part2.req_base_size) * 50
# potentially larger growable requests go to the front
if part1.req_grow and part2.req_grow:
if not part1.req_max_size and part2.req_max_size:
ret -= 25
elif part1.req_max_size and not part2.req_max_size:
ret += 25
else:
ret -= compare(part1.req_max_size, part2.req_max_size) * 25
# give a little bump based on mountpoint
if hasattr(part1.format, "mountpoint") and \
hasattr(part2.format, "mountpoint"):
ret += compare(part1.format.mountpoint, part2.format.mountpoint) * 10
if ret > 0:
ret = 1
elif ret < 0:
ret = -1
return ret
_partitionCompareKey = functools.cmp_to_key(partitionCompare)
def getNextPartitionType(disk, no_primary=None):
""" Return the type of partition to create next on a disk.
Return a parted partition type value representing the type of the
next partition we will create on this disk.
If there is only one free primary partition and we can create an
extended partition, we do that.
If there are free primary slots and an extended partition we will
recommend creating a primary partition. This can be overridden
with the keyword argument no_primary.
:param disk: the disk from which a partition may be allocated
:type disk: :class:`parted.Disk`
:keyword no_primary: refuse to return :const:`parted.PARTITION_NORMAL`
:returns: the chosen partition type
:rtype: a parted PARTITION_* constant
"""
part_type = None
extended = disk.getExtendedPartition()
supports_extended = disk.supportsFeature(parted.DISK_TYPE_EXTENDED)
logical_count = len(disk.getLogicalPartitions())
max_logicals = disk.getMaxLogicalPartitions()
primary_count = disk.primaryPartitionCount
if primary_count < disk.maxPrimaryPartitionCount:
if primary_count == disk.maxPrimaryPartitionCount - 1:
# can we make an extended partition? now's our chance.
if not extended and supports_extended:
part_type = parted.PARTITION_EXTENDED
elif not extended:
# extended partitions not supported. primary or nothing.
if not no_primary:
part_type = parted.PARTITION_NORMAL
else:
# there is an extended and a free primary
if not no_primary:
part_type = parted.PARTITION_NORMAL
elif logical_count < max_logicals:
# we have an extended with logical slots, so use one.
part_type = parted.PARTITION_LOGICAL
else:
# there are two or more primary slots left. use one unless we're
# not supposed to make primaries.
if not no_primary:
part_type = parted.PARTITION_NORMAL
elif extended and logical_count < max_logicals:
part_type = parted.PARTITION_LOGICAL
elif extended and logical_count < max_logicals:
part_type = parted.PARTITION_LOGICAL
return part_type
def getBestFreeSpaceRegion(disk, part_type, req_size, start=None,
boot=None, best_free=None, grow=None,
alignment=None):
""" Return the "best" free region on the specified disk.
For non-boot partitions, we return the largest free region on the
disk. For boot partitions, we return the first region that is
large enough to hold the partition.
Partition type (parted's PARTITION_NORMAL, PARTITION_LOGICAL) is
taken into account when locating a suitable free region.
For locating the best region from among several disks, the keyword
argument best_free allows the specification of a current "best"
free region with which to compare the best from this disk. The
overall best region is returned.
:param disk: the disk
:type disk: :class:`parted.Disk`
:param part_type: the type of partition we want to allocate
:type part_type: one of parted's PARTITION_* constants
:param req_size: the requested size of the partition in MiB
:type req_size: :class:`~.size.Size`
:keyword start: requested start sector for the partition
:type start: int
:keyword boot: whether this will be a bootable partition
:type boot: bool
:keyword best_free: current best free region for this partition
:type best_free: :class:`parted.Geometry`
:keyword grow: indicates whether this is a growable request
:type grow: bool
:keyword alignment: disk alignment requirements
:type alignment: :class:`parted.Alignment`
"""
log.debug("getBestFreeSpaceRegion: disk=%s part_type=%d req_size=%s "
"boot=%s best=%s grow=%s start=%s",
disk.device.path, part_type, req_size, boot, best_free, grow,
start)
extended = disk.getExtendedPartition()
alignment = alignment or parted.Alignment(offset=0, grainSize=1)
for free_geom in disk.getFreeSpaceRegions():
# align the start sector of the free region since we will be aligning
# the start sector of the partition
if start is not None and \
not alignment.isAligned(free_geom, free_geom.start):
log.debug("aligning start sector of region %d-%d", free_geom.start,
free_geom.end)
try:
aligned_start = alignment.alignUp(free_geom, free_geom.start)
except ArithmeticError:
aligned_start = None
else:
# parted tends to align down when it cannot align up
if aligned_start < free_geom.start:
aligned_start = None
if aligned_start is None:
log.debug("failed to align start sector -- skipping region")
continue
free_geom = parted.Geometry(device=free_geom.device,
start=aligned_start,
end=free_geom.end)
log.debug("checking %d-%d (%s)", free_geom.start, free_geom.end,
Size(free_geom.getLength(unit="B")))
if start is not None and not free_geom.containsSector(start):
log.debug("free region does not contain requested start sector")
continue
if extended:
in_extended = extended.geometry.contains(free_geom)
if ((in_extended and part_type == parted.PARTITION_NORMAL) or
(not in_extended and part_type == parted.PARTITION_LOGICAL)):
log.debug("free region not suitable for request")
continue
if free_geom.start > disk.maxPartitionStartSector:
log.debug("free range start sector beyond max for new partitions")
continue
if boot:
max_boot = Size("2 TiB")
free_start = Size(free_geom.start * disk.device.sectorSize)
req_end = free_start + req_size
if req_end > max_boot:
log.debug("free range position would place boot req above %s",
max_boot)
continue
log.debug("current free range is %d-%d (%s)", free_geom.start,
free_geom.end,
Size(free_geom.getLength(unit="B")))
free_size = Size(free_geom.getLength(unit="B"))
# For boot partitions, we want the first suitable region we find.
# For growable or extended partitions, we want the largest possible
# free region.
# For all others, we want the smallest suitable free region.
if grow or part_type == parted.PARTITION_EXTENDED:
op = gt
else:
op = lt
if req_size <= free_size:
if not best_free or op(free_geom.length, best_free.length):
best_free = free_geom
if boot:
# if this is a bootable partition we want to
# use the first freespace region large enough
# to satisfy the request
break
return best_free
def sectorsToSize(sectors, sectorSize):
""" Convert length in sectors to size.
:param sectors: sector count
:type sectors: int
:param sectorSize: sector size
:type sectorSize: :class:`~.size.Size`
:returns: the size
:rtype: :class:`~.size.Size`
"""
return Size(sectors * sectorSize)
def sizeToSectors(size, sectorSize):
""" Convert size to length in sectors.
:param size: size
:type size: :class:`~.size.Size`
:param sectorSize: sector size in bytes
:type sectorSize: :class:`~.size.Size`
:returns: sector count
:rtype: int
"""
return int(size // sectorSize)
def removeNewPartitions(disks, remove, all_partitions):
""" Remove newly added partitions from disks.
Remove all non-existent partitions from the disks in blivet's model.
:param: disks: list of partitioned disks
:type disks: list of :class:`~.devices.StorageDevice`
:param remove: list of partitions to remove
:type remove: list of :class:`~.devices.PartitionDevice`
:param all_partitions: list of all partitions on the disks
:type all_partitions: list of :class:`~.devices.PartitionDevice`
:returns: None
:rtype: NoneType
"""
log.debug("removing all non-preexisting partitions %s from disk(s) %s",
["%s(id %d)" % (p.name, p.id) for p in remove],
[d.name for d in disks])
for part in remove:
if part.partedPartition and part.disk in disks:
if part.exists:
# we're only removing partitions that don't physically exist
continue
if part.isExtended:
# these get removed last
continue
part.disk.format.partedDisk.removePartition(part.partedPartition)
part.partedPartition = None
part.disk = None
for disk in disks:
# remove empty extended so it doesn't interfere
extended = disk.format.extendedPartition
if extended and not disk.format.logicalPartitions and \
(flags.installer_mode or
extended not in (p.partedPartition for p in all_partitions)):
log.debug("removing empty extended partition from %s", disk.name)
disk.format.partedDisk.removePartition(extended)
def addPartition(disklabel, free, part_type, size, start=None, end=None):
""" Add a new partition to a disk.
:param disklabel: the disklabel to add the partition to
:type disklabel: :class:`~.formats.DiskLabel`
:param free: the free region in which to place the new partition
:type free: :class:`parted.Geometry`
:param part_type: the partition type
:type part_type: a parted.PARTITION_* constant
:param size: size of the new partition
:type size: :class:`~.size.Size`
:keyword start: starting sector for the partition
:type start: int
:keyword end: ending sector for the partition
:type end: int
:raises: :class:`~.errors.PartitioningError`
:returns: the newly added partitions
:rtype: :class:`parted.Partition`
.. note::
The new partition will be aligned using the kernel-provided optimal
alignment unless a start sector is provided.
"""
sectorSize = Size(disklabel.partedDevice.sectorSize)
if start is not None:
if end is None:
end = start + sizeToSectors(size, sectorSize) - 1
else:
start = free.start
if not disklabel.alignment.isAligned(free, start):
start = disklabel.alignment.alignNearest(free, start)
if disklabel.labelType == "sun" and start == 0:
start = disklabel.alignment.alignUp(free, start)
if part_type == parted.PARTITION_LOGICAL:
# make room for logical partition's metadata
start += disklabel.alignment.grainSize
if start != free.start:
log.debug("adjusted start sector from %d to %d", free.start, start)
if part_type == parted.PARTITION_EXTENDED and not size:
end = free.end
length = end - start + 1
else:
length = sizeToSectors(size, sectorSize)
end = start + length - 1
if not disklabel.endAlignment.isAligned(free, end):
end = disklabel.endAlignment.alignUp(free, end)
log.debug("adjusted length from %d to %d", length, end - start + 1)
if start > end:
raise PartitioningError(_("unable to allocate aligned partition"))
new_geom = parted.Geometry(device=disklabel.partedDevice,
start=start,
end=end)
max_length = disklabel.partedDisk.maxPartitionLength
if max_length and new_geom.length > max_length:
raise PartitioningError(_("requested size exceeds maximum allowed"))
# create the partition and add it to the disk
partition = parted.Partition(disk=disklabel.partedDisk,
type=part_type,
geometry=new_geom)
constraint = parted.Constraint(exactGeom=new_geom)
disklabel.partedDisk.addPartition(partition=partition,
constraint=constraint)
return partition
def getFreeRegions(disks, align=False):
""" Return a list of free regions on the specified disks.
:param disks: list of disks
:type disks: list of :class:`~.devices.Disk`
:param align: align the region length to disk grainSize
:type align: bool
:returns: list of free regions
:rtype: list of :class:`parted.Geometry`
Only free regions guaranteed to contain at least one aligned sector for
both the start and end alignments in the
:class:`~.formats.disklabel.DiskLabel` are returned.
"""
free = []
for disk in disks:
for f in disk.format.partedDisk.getFreeSpaceRegions():
grain_size = disk.format.alignment.grainSize
if f.length >= grain_size:
if align:
aligned_length = f.length - (f.length % grain_size)
log.debug("length of free region aligned from %d to %d",
f.length, aligned_length)
f.length = aligned_length
free.append(f)
return free
def updateExtendedPartitions(storage, disks):
""" Reconcile extended partition changes with the DeviceTree.
:param storage: the Blivet instance
:type storage: :class:`~.Blivet`
:param disks: list of disks
:type disks: list of :class:`~.devices.StorageDevice`
:returns: :const:`None`
:rtype: NoneType
"""
# XXX hack -- if we created any extended partitions we need to add
# them to the tree now
for disk in disks:
extended = disk.format.extendedPartition
if not extended:
# remove any obsolete extended partitions
for part in storage.partitions:
if part.disk == disk and part.isExtended:
if part.exists:
storage.destroyDevice(part)
else:
storage.devicetree._removeDevice(part, modparent=False)
continue
extendedName = devicePathToName(extended.getDeviceNodeName())
device = storage.devicetree.getDeviceByName(extendedName)
if device:
if not device.exists:
# created by us, update partedPartition
device.partedPartition = extended
# remove any obsolete extended partitions
for part in storage.partitions:
if part.disk == disk and part.isExtended and \
part.partedPartition not in disk.format.partitions:
if part.exists:
storage.destroyDevice(part)
else:
storage.devicetree._removeDevice(part, modparent=False)
if device:
continue
# This is a little odd because normally instantiating a partition
# that does not exist means leaving self.parents empty and instead
# populating self.req_disks. In this case, we need to skip past
# that since this partition is already defined.
device = PartitionDevice(extendedName, parents=disk)
device.parents = [disk]
device.partedPartition = extended
# just add the device for now -- we'll handle actions at the last
# moment to simplify things
storage.devicetree._addDevice(device)
def doPartitioning(storage):
""" Allocate and grow partitions.
When this function returns without error, all PartitionDevice
instances must have their parents set to the disk they are
allocated on, and their partedPartition attribute set to the
appropriate parted.Partition instance from their containing
disk. All req_xxxx attributes must be unchanged.
:param storage: Blivet instance
:type storage: :class:`~.Blivet`
:raises: :class:`~.errors.PartitioningError`
:returns: :const:`None`
"""
disks = [d for d in storage.partitioned if not d.protected]
for disk in disks:
try:
disk.setup()
except DeviceError as e:
log.error("failed to set up disk %s: %s", disk.name, e)
raise PartitioningError(_("disk %s inaccessible") % disk.name)
# Remove any extended partition that does not have an action associated.
#
# XXX This does not remove the extended from the parted.Disk, but it should
# cause removeNewPartitions to remove it since there will no longer be
# a PartitionDevice for it.
for partition in storage.partitions:
if not partition.exists and partition.isExtended and \
not storage.devicetree.findActions(device=partition, action_type="create"):
storage.devicetree._removeDevice(partition, modparent=False, force=True)
partitions = storage.partitions[:]
for part in storage.partitions:
part.req_bootable = False
if not part.exists:
# start over with flexible-size requests
part.req_size = part.req_base_size
try:
storage.bootDevice.req_bootable = True
except AttributeError:
# there's no stage2 device. hopefully it's temporary.
pass
removeNewPartitions(disks, partitions, partitions)
free = getFreeRegions(disks)
try:
allocatePartitions(storage, disks, partitions, free)
growPartitions(disks, partitions, free, size_sets=storage.size_sets)
except Exception:
raise
else:
# Mark all growable requests as no longer growable.
for partition in storage.partitions:
log.debug("fixing size of %s", partition)
partition.req_grow = False
partition.req_base_size = partition.size
partition.req_size = partition.size
finally:
# these are only valid for one allocation run
storage.size_sets = []
# The number and thus the name of partitions may have changed now,
# allocatePartitions() takes care of this for new partitions, but not
# for pre-existing ones, so we update the name of all partitions here
for part in storage.partitions:
# leave extended partitions as-is -- we'll handle them separately
if part.isExtended:
continue
part.updateName()
updateExtendedPartitions(storage, disks)
for part in [p for p in storage.partitions if not p.exists]:
problem = part.checkSize()
if problem < 0:
raise PartitioningError(_("partition is too small for %(format)s formatting "
"(allowable size is %(minSize)s to %(maxSize)s)")
% {"format": part.format.name, "minSize": part.format.minSize,
"maxSize": part.format.maxSize})
elif problem > 0:
raise PartitioningError(_("partition is too large for %(format)s formatting "
"(allowable size is %(minSize)s to %(maxSize)s)")
% {"format": part.format.name, "minSize": part.format.minSize,
"maxSize": part.format.maxSize})
def align_size_for_disklabel(size, disklabel):
# Align the base size to the disk's grain size.
grain_size = Size(disklabel.alignment.grainSize)
grains, rem = divmod(size, grain_size)
return (grains * grain_size) + (grain_size if rem else Size(0))
def allocatePartitions(storage, disks, partitions, freespace):
""" Allocate partitions based on requested features.
:param storage: a Blivet instance
:type storage: :class:`~.Blivet`
:param disks: list of usable disks
:type disks: list of :class:`~.devices.StorageDevice`
:param partitions: list of partitions
:type partitions: list of :class:`~.devices.PartitionDevice`
:param freespace: list of free regions on disks
:type freespace: list of :class:`parted.Geometry`
:raises: :class:`~.errors.PartitioningError`
:returns: :const:`None`
Non-existing partitions are sorted according to their requested
attributes, and then allocated.
The basic approach to sorting is that the more specifically-
defined a request is, the earlier it will be allocated. See
:func:`partitionCompare` for details of the sorting criteria.
The :class:`~.devices.PartitionDevice` instances will have their name
and parents attributes set once they have been allocated.
"""
log.debug("allocatePartitions: disks=%s ; partitions=%s",
[d.name for d in disks],
["%s(id %d)" % (p.name, p.id) for p in partitions])
new_partitions = [p for p in partitions if not p.exists]
new_partitions.sort(key=_partitionCompareKey)
# the following dicts all use device path strings as keys
disklabels = {} # DiskLabel instances for each disk
all_disks = {} # StorageDevice for each disk
for disk in disks:
if disk.path not in disklabels.keys():
disklabels[disk.path] = disk.format
all_disks[disk.path] = disk
removeNewPartitions(disks, new_partitions, partitions)
for _part in new_partitions:
if _part.partedPartition and _part.isExtended:
# ignore new extendeds as they are implicit requests
continue
# obtain the set of candidate disks
req_disks = []
if _part.req_disks:
# use the requested disk set
req_disks = _part.req_disks
else:
# no disks specified means any disk will do
req_disks = disks
# sort the disks, making sure the boot disk is first
req_disks.sort(key=storage.compareDisksKey)
for disk in req_disks:
if storage.bootDisk and disk == storage.bootDisk:
boot_index = req_disks.index(disk)
req_disks.insert(0, req_disks.pop(boot_index))
boot = _part.req_base_weight > 1000
log.debug("allocating partition: %s ; id: %d ; disks: %s ;\n"
"boot: %s ; primary: %s ; size: %s ; grow: %s ; "
"max_size: %s ; start: %s ; end: %s", _part.name, _part.id,
[d.name for d in req_disks],
boot, _part.req_primary,
_part.req_size, _part.req_grow,
_part.req_max_size, _part.req_start_sector,
_part.req_end_sector)
free = None
use_disk = None
part_type = None
growth = 0 # in sectors
# loop through disks
for _disk in req_disks:
disklabel = disklabels[_disk.path]
best = None
current_free = free
# for growable requests, we don't want to pass the current free
# geometry to getBestFreeRegion -- this allows us to try the
# best region from each disk and choose one based on the total
# growth it allows
if _part.req_grow:
current_free = None
log.debug("checking freespace on %s", _disk.name)
if _part.req_start_sector is None:
req_size = align_size_for_disklabel(_part.req_size, disklabel)
else:
# don't align size if start sector was specified
req_size = _part.req_size
if req_size != _part.req_size:
log.debug("size %s rounded up to %s for disk %s",
_part.req_size, req_size, _disk.name)
new_part_type = getNextPartitionType(disklabel.partedDisk)
if new_part_type is None:
# can't allocate any more partitions on this disk
log.debug("no free partition slots on %s", _disk.name)
continue
if _part.req_primary and new_part_type != parted.PARTITION_NORMAL:
if (disklabel.partedDisk.primaryPartitionCount <
disklabel.partedDisk.maxPrimaryPartitionCount):
# don't fail to create a primary if there are only three
# primary partitions on the disk (#505269)
new_part_type = parted.PARTITION_NORMAL
else:
# we need a primary slot and none are free on this disk
log.debug("no primary slots available on %s", _disk.name)
continue
elif _part.req_partType is not None and \
new_part_type != _part.req_partType:
new_part_type = _part.req_partType
best = getBestFreeSpaceRegion(disklabel.partedDisk,
new_part_type,
req_size,
start=_part.req_start_sector,
best_free=current_free,
boot=boot,
grow=_part.req_grow,
alignment=disklabel.alignment)
if best == free and not _part.req_primary and \
new_part_type == parted.PARTITION_NORMAL:
# see if we can do better with a logical partition
log.debug("not enough free space for primary -- trying logical")
new_part_type = getNextPartitionType(disklabel.partedDisk,
no_primary=True)
if new_part_type:
best = getBestFreeSpaceRegion(disklabel.partedDisk,
new_part_type,
req_size,
start=_part.req_start_sector,
best_free=current_free,
boot=boot,
grow=_part.req_grow,
alignment=disklabel.alignment)
if best and free != best:
update = True
allocated = new_partitions[:new_partitions.index(_part)+1]
if any([p.req_grow for p in allocated]):
log.debug("evaluating growth potential for new layout")
new_growth = 0
for disk_path in disklabels.keys():
log.debug("calculating growth for disk %s", disk_path)
# Now we check, for growable requests, which of the two
# free regions will allow for more growth.
# set up chunks representing the disks' layouts
temp_parts = []
for _p in new_partitions[:new_partitions.index(_part)]:
if _p.disk.path == disk_path:
temp_parts.append(_p)
# add the current request to the temp disk to set up
# its partedPartition attribute with a base geometry
if disk_path == _disk.path:
_part_type = new_part_type
_free = best
if new_part_type == parted.PARTITION_EXTENDED and \
new_part_type != _part.req_partType:
addPartition(disklabel, best, new_part_type,
None)
_part_type = parted.PARTITION_LOGICAL
_free = getBestFreeSpaceRegion(disklabel.partedDisk,
_part_type,
req_size,
start=_part.req_start_sector,
boot=boot,
grow=_part.req_grow,
alignment=disklabel.alignment)
if not _free:
log.info("not enough space after adding "
"extended partition for growth test")
if new_part_type == parted.PARTITION_EXTENDED:
e = disklabel.extendedPartition
disklabel.partedDisk.removePartition(e)
continue
temp_part = None
try:
temp_part = addPartition(disklabel,
_free,
_part_type,
req_size,
_part.req_start_sector,
_part.req_end_sector)
except ArithmeticError as e:
log.debug("failed to allocate aligned partition "
"for growth test")
continue
_part.partedPartition = temp_part
_part.disk = _disk
temp_parts.append(_part)
chunks = getDiskChunks(all_disks[disk_path],
temp_parts, freespace)
# grow all growable requests
disk_growth = 0 # in sectors
disk_sector_size = Size(disklabels[disk_path].partedDevice.sectorSize)
for chunk in chunks:
chunk.growRequests()
# record the growth for this layout
new_growth += chunk.growth
disk_growth += chunk.growth
for req in chunk.requests:
log.debug("request %d (%s) growth: %d (%s) "
"size: %s",
req.device.id,
req.device.name,
req.growth,
sectorsToSize(req.growth,
disk_sector_size),
sectorsToSize(req.growth + req.base,
disk_sector_size))
log.debug("disk %s growth: %d (%s)",
disk_path, disk_growth,
sectorsToSize(disk_growth,
disk_sector_size))
if temp_part:
disklabel.partedDisk.removePartition(temp_part)
_part.partedPartition = None
_part.disk = None
if new_part_type == parted.PARTITION_EXTENDED:
e = disklabel.extendedPartition
disklabel.partedDisk.removePartition(e)
log.debug("total growth: %d sectors", new_growth)
# update the chosen free region unless the previous
# choice yielded greater total growth
if free is not None and new_growth <= growth:
log.debug("keeping old free: %d <= %d", new_growth,
growth)
update = False
else:
growth = new_growth
if update:
# now we know we are choosing a new free space,
# so update the disk and part type
log.debug("updating use_disk to %s, type: %s",
_disk.name, new_part_type)
part_type = new_part_type
use_disk = _disk
log.debug("new free: %d-%d / %s", best.start,
best.end,
Size(best.getLength(unit="B")))
log.debug("new free allows for %d sectors of growth", growth)
free = best
if free and boot:
# if this is a bootable partition we want to
# use the first freespace region large enough
# to satisfy the request
log.debug("found free space for bootable request")
break
if free is None:
raise PartitioningError(_("Unable to allocate requested partition scheme."))
_disk = use_disk
disklabel = _disk.format
if _part.req_start_sector is None:
aligned_size = align_size_for_disklabel(_part.req_size, disklabel)
else:
# not aligned
aligned_size = _part.req_size
# create the extended partition if needed
if part_type == parted.PARTITION_EXTENDED and \
part_type != _part.req_partType:
log.debug("creating extended partition")
addPartition(disklabel, free, part_type, None)
# now the extended partition exists, so set type to logical
part_type = parted.PARTITION_LOGICAL
# recalculate freespace
log.debug("recalculating free space")
free = getBestFreeSpaceRegion(disklabel.partedDisk,
part_type,
aligned_size,
start=_part.req_start_sector,
boot=boot,
grow=_part.req_grow,
alignment=disklabel.alignment)
if not free:
raise PartitioningError(_("not enough free space after "
"creating extended partition"))
try:
partition = addPartition(disklabel, free, part_type, aligned_size,
_part.req_start_sector, _part.req_end_sector)
except ArithmeticError:
raise PartitioningError(_("failed to allocate aligned partition"))
log.debug("created partition %s of %s and added it to %s",
partition.getDeviceNodeName(),
Size(partition.getLength(unit="B")),
disklabel.device)
# this one sets the name
_part.partedPartition = partition
_part.disk = _disk
# parted modifies the partition in the process of adding it to
# the disk, so we need to grab the latest version...
_part.partedPartition = disklabel.partedDisk.getPartitionByPath(_part.path)
class Request(object):
""" A partition request.
Request instances are used for calculating how much to grow
partitions.
"""
def __init__(self, device):
"""
:param device: the device being requested
:type device: :class:`~.devices.StorageDevice`
"""
self.device = device
self.growth = 0 # growth in sectors
self.max_growth = 0 # max growth in sectors
self.done = not getattr(device, "req_grow", True) # can we grow this
# request more?
self.base = 0 # base sectors
@property
def reserveRequest(self):
""" Requested reserved fixed extra space for the request (in sectors) """
# generic requests don't need any such extra space
return 0
@property
def growable(self):
""" True if this request is growable. """
return getattr(self.device, "req_grow", True)
@property
def id(self):
""" The id of the Device instance this request corresponds to. """
return self.device.id
def __repr__(self):
s = ("%(type)s instance --\n"
"id = %(id)s name = %(name)s growable = %(growable)s\n"
"base = %(base)d growth = %(growth)d max_grow = %(max_grow)d\n"
"done = %(done)s" %
{"type": self.__class__.__name__, "id": self.id,
"name": self.device.name, "growable": self.growable,
"base": self.base, "growth": self.growth,
"max_grow": self.max_growth, "done": self.done})
return s
class PartitionRequest(Request):
def __init__(self, partition):
"""
:param partition: the partition being requested
:type partition: :class:`~.devices.PartitionDevice`
"""
super(PartitionRequest, self).__init__(partition)
self.base = partition.partedPartition.geometry.length # base sectors
sector_size = Size(partition.partedPartition.disk.device.sectorSize)
if partition.req_grow:
limits = [l for l in [sizeToSectors(partition.req_max_size, sector_size),
sizeToSectors(partition.format.maxSize, sector_size),
partition.partedPartition.disk.maxPartitionLength] if l > 0]
if limits:
max_sectors = min(limits)
self.max_growth = max_sectors - self.base
if self.max_growth <= 0:
# max size is less than or equal to base, so we're done
self.done = True
class LVRequest(Request):
def __init__(self, lv):
"""
:param lv: the logical volume being requested
:type lv: :class:`~.devices.LVMLogicalVolumeDevice`
"""
super(LVRequest, self).__init__(lv)
# Round up to nearest pe. For growable requests this will mean that
# first growth is to fill the remainder of any unused extent.
self.base = int(lv.vg.align(lv.req_size, roundup=True) // lv.vg.peSize)
if lv.req_grow:
limits = [int(l // lv.vg.peSize) for l in
(lv.vg.align(lv.req_max_size),
lv.vg.align(lv.format.maxSize)) if l > Size(0)]
if limits:
max_units = min(limits)
self.max_growth = max_units - self.base
if self.max_growth <= 0:
# max size is less than or equal to base, so we're done
self.done = True
@property
def reserveRequest(self):
reserve = super(LVRequest, self).reserveRequest
if self.device.cached:
total_cache_size = self.device.cache.size + self.device.cache.md_size
reserve += int(self.device.vg.align(total_cache_size, roundup=True) / self.device.vg.peSize)
return reserve
class Chunk(object):
""" A free region from which devices will be allocated """
def __init__(self, length, requests=None):
"""
:param length: the length of the chunk (units vary with subclass)
:type length: int
:keyword requests: list of requests to add
:type requests: list of :class:`Request`
"""
if not hasattr(self, "path"):
self.path = None
self.length = length
self.pool = length # free unit count
self.base = 0 # sum of growable requests' base
# sizes
self.requests = [] # list of Request instances
if isinstance(requests, list):
for req in requests:
self.addRequest(req)
self.skip_list = []
def __repr__(self):
s = ("%(type)s instance --\n"
"device = %(device)s length = %(length)d size = %(size)s\n"
"remaining = %(rem)d pool = %(pool)d" %
{"type": self.__class__.__name__, "device": self.path,
"length": self.length, "size": self.lengthToSize(self.length),
"pool": self.pool, "rem": self.remaining})
return s
# Force str and unicode types in case path is unicode
def _toString(self):
s = "%d on %s" % (self.length, self.path)
return s
def __str__(self):
return stringize(self._toString())
def __unicode__(self):
return unicodeize(self._toString())
def addRequest(self, req):
""" Add a request to this chunk.
:param req: the request to add
:type req: :class:`Request`
"""
log.debug("adding request %d to chunk %s", req.device.id, self)
self.requests.append(req)
self.pool -= req.base
self.pool -= req.reserveRequest
if not req.done:
self.base += req.base
def reclaim(self, request, amount):
""" Reclaim units from a request and return them to the pool.
:param request: the request to reclaim units from
:type request: :class:`Request`
:param amount: number of units to reclaim from the request
:type amount: int
:raises: ValueError
:returns: None
"""
log.debug("reclaim: %s %d (%s)", request, amount, self.lengthToSize(amount))
if request.growth < amount:
log.error("tried to reclaim %d from request with %d of growth",
amount, request.growth)
raise ValueError(_("cannot reclaim more than request has grown"))
request.growth -= amount
self.pool += amount
# put this request in the skip list so we don't try to grow it the
# next time we call growRequests to allocate the newly re-acquired pool
if request not in self.skip_list:
self.skip_list.append(request)
@property
def growth(self):
""" Sum of growth for all requests in this chunk. """
return sum(r.growth for r in self.requests)
@property
def hasGrowable(self):
""" True if this chunk contains at least one growable request. """
for req in self.requests:
if req.growable:
return True
return False
@property
def remaining(self):
""" Number of requests still being grown in this chunk. """
return len([d for d in self.requests if not d.done])
@property
def done(self):
""" True if we are finished growing all requests in this chunk. """
return self.remaining == 0 or self.pool == 0
def maxGrowth(self, req):
return req.max_growth
def lengthToSize(self, length):
return length
def sizeToLength(self, size):
return size
def trimOverGrownRequest(self, req, base=None):
""" Enforce max growth and return extra units to the pool.
:param req: the request to trim
:type req: :class:`Request`
:keyword base: base unit count to adjust if req is done growing
:type base: int
:returns: the new base or None if no base was given
:rtype: int or None
"""
max_growth = self.maxGrowth(req)
if max_growth and req.growth >= max_growth:
if req.growth > max_growth:
# we've grown beyond the maximum. put some back.
extra = req.growth - max_growth
log.debug("taking back %d (%s) from %d (%s)",
extra, self.lengthToSize(extra),
req.device.id, req.device.name)
self.pool += extra
req.growth = max_growth
# We're done growing this request, so it no longer
# factors into the growable base used to determine
# what fraction of the pool each request gets.
if base is not None:
base -= req.base
req.done = True
return base
def sortRequests(self):
pass
def growRequests(self, uniform=False):
""" Calculate growth amounts for requests in this chunk.
:keyword uniform: grow requests uniformly instead of proportionally
:type uniform: bool
The default mode of growth is as follows: given a total number of
available units, requests receive an allotment proportional to their
base sizes. That means a request with base size 1000 will grow four
times as fast as a request with base size 250.
Under uniform growth, all requests receive an equal portion of the
free units.
"""
log.debug("Chunk.growRequests: %r", self)
self.sortRequests()
for req in self.requests:
log.debug("req: %r", req)
# we use this to hold the base for the next loop through the
# chunk's requests since we want the base to be the same for
# all requests in any given growth iteration
new_base = self.base
last_pool = 0 # used to track changes to the pool across iterations
while not self.done and self.pool and last_pool != self.pool:
last_pool = self.pool # to keep from getting stuck
self.base = new_base
if uniform:
growth = int(last_pool / self.remaining)
log.debug("%d requests and %s (%s) left in chunk",
self.remaining, self.pool, self.lengthToSize(self.pool))
for p in self.requests:
if p.done or p in self.skip_list:
continue
if not uniform:
# Each request is allocated free units from the pool
# based on the relative _base_ sizes of the remaining
# growable requests.
share = Decimal(p.base) / Decimal(self.base)
growth = int(share * last_pool) # truncate, don't round
p.growth += growth
self.pool -= growth
log.debug("adding %s (%s) to %d (%s)",
growth, self.lengthToSize(growth),
p.device.id, p.device.name)
new_base = self.trimOverGrownRequest(p, base=new_base)
log.debug("new grow amount for request %d (%s) is %s "
"units, or %s",
p.device.id, p.device.name, p.growth,
self.lengthToSize(p.growth))
if self.pool:
# allocate any leftovers in pool to the first partition
# that can still grow
for p in self.requests:
if p.done or p in self.skip_list:
continue
growth = self.pool
p.growth += growth
self.pool = 0
log.debug("adding %s (%s) to %d (%s)",
growth, self.lengthToSize(growth),
p.device.id, p.device.name)
self.trimOverGrownRequest(p)
log.debug("new grow amount for request %d (%s) is %s "
"units, or %s",
p.device.id, p.device.name, p.growth,
self.lengthToSize(p.growth))
if self.pool == 0:
break
# requests that were skipped over this time through are back on the
# table next time
self.skip_list = []
class DiskChunk(Chunk):
""" A free region on disk from which partitions will be allocated """
def __init__(self, geometry, requests=None):
"""
:param geometry: the free region this chunk represents
:type geometry: :class:`parted.Geometry`
:keyword requests: list of requests to add initially
:type requests: list of :class:`PartitionRequest`
.. note::
We will limit partition growth based on disklabel limitations
for partition end sector, so a 10TB disk with an msdos disklabel
will be treated like a 2TiB disk.
.. note::
If you plan to allocate aligned partitions you should pass in an
aligned geometry instance.
"""
self.geometry = geometry # parted.Geometry
self.sectorSize = Size(self.geometry.device.sectorSize)
self.path = self.geometry.device.path
super(DiskChunk, self).__init__(self.geometry.length, requests=requests)
def __repr__(self):
s = super(DiskChunk, self).__str__()
s += (" start = %(start)d end = %(end)d\n"
"sectorSize = %(sectorSize)s\n" %
{"start": self.geometry.start, "end": self.geometry.end,
"sectorSize": self.sectorSize})
return s
# Force str and unicode types in case path is unicode
def _toString(self):
s = "%d (%d-%d) on %s" % (self.length, self.geometry.start,
self.geometry.end, self.path)
return s
def __str__(self):
return stringize(self._toString())
def __unicode__(self):
return unicodeize(self._toString())
def addRequest(self, req):
""" Add a request to this chunk.
:param req: the request to add
:type req: :class:`PartitionRequest`
"""
if not isinstance(req, PartitionRequest):
raise ValueError(_("DiskChunk requests must be of type "
"PartitionRequest"))
if not self.requests:
# when adding the first request to the chunk, adjust the pool
# size to reflect any disklabel-specific limits on end sector
max_sector = req.device.partedPartition.disk.maxPartitionStartSector
chunk_end = min(max_sector, self.geometry.end)
if chunk_end <= self.geometry.start:
# this should clearly never be possible, but if the chunk's
# start sector is beyond the maximum allowed end sector, we
# cannot continue
log.error("chunk start sector is beyond disklabel maximum")
raise PartitioningError(_("partitions allocated outside "
"disklabel limits"))
new_pool = chunk_end - self.geometry.start + 1
if new_pool != self.pool:
log.debug("adjusting pool to %d based on disklabel limits", new_pool)
self.pool = new_pool
super(DiskChunk, self).addRequest(req)
def maxGrowth(self, req):
""" Return the maximum possible growth for a request.
:param req: the request
:type req: :class:`PartitionRequest`
"""
req_end = req.device.partedPartition.geometry.end
req_start = req.device.partedPartition.geometry.start
# Establish the current total number of sectors of growth for requests
# that lie before this one within this chunk. We add the total count
# to this request's end sector to obtain the end sector for this
# request, including growth of earlier requests but not including
# growth of this request. Maximum growth values are obtained using
# this end sector and various values for maximum end sector.
growth = 0
for request in self.requests:
if request.device.partedPartition.geometry.start < req_start:
growth += request.growth
req_end += growth
# obtain the set of possible maximum sectors-of-growth values for this
# request and use the smallest
limits = []
# disklabel-specific maximum sector
max_sector = req.device.partedPartition.disk.maxPartitionStartSector
limits.append(max_sector - req_end)
# 2TB limit on bootable partitions, regardless of disklabel
if req.device.req_bootable:
max_boot = sizeToSectors(Size("2 TiB"), self.sectorSize)
limits.append(max_boot - req_end)
# request-specific maximum (see Request.__init__, above, for details)
if req.max_growth:
limits.append(req.max_growth)
max_growth = min(limits)
return max_growth
def lengthToSize(self, length):
return sectorsToSize(length, self.sectorSize)
def sizeToLength(self, size):
return sizeToSectors(size, self.sectorSize)
def sortRequests(self):
# sort the partitions by start sector
self.requests.sort(key=lambda r: r.device.partedPartition.geometry.start)
class VGChunk(Chunk):
""" A free region in an LVM VG from which LVs will be allocated """
def __init__(self, vg, requests=None):
"""
:param vg: the volume group whose free space this chunk represents
:type vg: :class:`~.devices.LVMVolumeGroupDevice`
:keyword requests: list of requests to add initially
:type requests: list of :class:`LVRequest`
"""
self.vg = vg
self.path = vg.path
usable_extents = vg.extents - int(vg.align(vg.reservedSpace, roundup=True) / vg.peSize)
super(VGChunk, self).__init__(usable_extents, requests=requests)
def addRequest(self, req):
""" Add a request to this chunk.
:param req: the request to add
:type req: :class:`LVRequest`
"""
if not isinstance(req, LVRequest):
raise ValueError(_("VGChunk requests must be of type "
"LVRequest"))
super(VGChunk, self).addRequest(req)
def lengthToSize(self, length):
return self.vg.peSize * length
def sizeToLength(self, size):
return int(size / self.vg.peSize)
def sortRequests(self):
# sort the partitions by start sector
self.requests.sort(key=_lvCompareKey)
class ThinPoolChunk(VGChunk):
""" A free region in an LVM thin pool from which LVs will be allocated """
def __init__(self, pool, requests=None):
"""
:param pool: the thin pool whose free space this chunk represents
:type pool: :class:`~.devices.LVMThinPoolDevice`
:keyword requests: list of requests to add initially
:type requests: list of :class:`LVRequest`
"""
self.vg = pool.vg # only used for align, &c
self.path = pool.path
usable_extents = (pool.size / pool.vg.peSize)
super(VGChunk, self).__init__(usable_extents, requests=requests) # pylint: disable=bad-super-call
def getDiskChunks(disk, partitions, free):
""" Return a list of Chunk instances representing a disk.
:param disk: the disk
:type disk: :class:`~.devices.StorageDevice`
:param partitions: list of partitions
:type partitions: list of :class:`~.devices.PartitionDevice`
:param free: list of free regions
:type free: list of :class:`parted.Geometry`
:returns: list of chunks representing the disk
:rtype: list of :class:`DiskChunk`
Partitions and free regions not on the specified disk are ignored.
Chunks contain an aligned version of the free region's geometry.
"""
# list of all new partitions on this disk
disk_parts = [p for p in partitions if p.disk == disk and not p.exists]
disk_free = [f for f in free if f.device.path == disk.path]
chunks = []
for f in disk_free[:]:
# Align the geometry so we have a realistic view of the free space.
# alignUp and alignDown can align in the reverse direction if the only
# aligned sector within the geometry is in that direction, so we have to
# also check that the resulting aligned geometry has a non-zero length.
# (It is possible that both will align to the same sector in a small
# enough region.)
al_start = disk.format.alignment.alignUp(f, f.start)
al_end = disk.format.endAlignment.alignDown(f, f.end)
if al_start >= al_end:
disk_free.remove(f)
continue
geom = parted.Geometry(device=f.device,
start=al_start,
end=al_end)
if geom.length < disk.format.alignment.grainSize:
disk_free.remove(f)
continue
chunks.append(DiskChunk(geom))
for p in disk_parts:
if p.isExtended:
# handle extended partitions specially since they are
# indeed very special
continue
for i, f in enumerate(disk_free):
if f.contains(p.partedPartition.geometry):
chunks[i].addRequest(PartitionRequest(p))
break
return chunks
class TotalSizeSet(object):
""" Set of device requests with a target combined size.
This will be handled by growing the requests until the desired combined
size has been achieved.
"""
def __init__(self, devices, size):
"""
:param devices: the set of devices
:type devices: list of :class:`~.devices.PartitionDevice`
:param size: the target combined size
:type size: :class:`~.size.Size`
"""
self.devices = []
for device in devices:
if isinstance(device, LUKSDevice):
partition = device.slave
else:
partition = device
self.devices.append(partition)
self.size = size
self.requests = []
self.allocated = sum((d.req_base_size for d in self.devices), Size(0))
log.debug("set.allocated = %d", self.allocated)
def allocate(self, amount):
log.debug("allocating %d to TotalSizeSet with %d/%d (%d needed)",
amount, self.allocated, self.size, self.needed)
self.allocated += amount
@property
def needed(self):
return self.size - self.allocated
def deallocate(self, amount):
log.debug("deallocating %d from TotalSizeSet with %d/%d (%d needed)",
amount, self.allocated, self.size, self.needed)
self.allocated -= amount
class SameSizeSet(object):
""" Set of device requests with a common target size. """
def __init__(self, devices, size, grow=False, max_size=None):
"""
:param devices: the set of devices
:type devices: list of :class:`~.devices.PartitionDevice`
:param size: target size for each device/request
:type size: :class:`~.size.Size`
:keyword grow: whether the devices can be grown
:type grow: bool
:keyword max_size: the maximum size for growable devices
:type max_size: :class:`~.size.Size`
"""
self.devices = []
for device in devices:
if isinstance(device, LUKSDevice):
partition = device.slave
else:
partition = device
self.devices.append(partition)
self.size = int(size / len(devices))
self.grow = grow
self.max_size = max_size
self.requests = []
def manageSizeSets(size_sets, chunks):
growth_by_request = {}
requests_by_device = {}
chunks_by_request = {}
for chunk in chunks:
for request in chunk.requests:
requests_by_device[request.device] = request
chunks_by_request[request] = chunk
growth_by_request[request] = 0
for i in range(2):
reclaimed = dict([(chunk, 0) for chunk in chunks])
for ss in size_sets:
if isinstance(ss, TotalSizeSet):
# TotalSizeSet members are trimmed to achieve the requested
# total size
log.debug("set: %s %d/%d", [d.name for d in ss.devices],
ss.allocated, ss.size)
for device in ss.devices:
request = requests_by_device[device]
chunk = chunks_by_request[request]
new_growth = request.growth - growth_by_request[request]
ss.allocate(chunk.lengthToSize(new_growth))
# decide how much to take back from each request
# We may assume that all requests have the same base size.
# We're shooting for a roughly equal distribution by trimming
# growth from the requests that have grown the most first.
requests = sorted([requests_by_device[d] for d in ss.devices],
key=lambda r: r.growth, reverse=True)
needed = ss.needed
for request in requests:
chunk = chunks_by_request[request]
log.debug("%s", request)
log.debug("needed: %d", ss.needed)
if ss.needed < 0:
# it would be good to take back some from each device
# instead of taking all from the last one(s)
extra = -chunk.sizeToLength(needed) // len(ss.devices)
if extra > request.growth and i == 0:
log.debug("not reclaiming from this request")
continue
else:
extra = min(extra, request.growth)
reclaimed[chunk] += extra
chunk.reclaim(request, extra)
ss.deallocate(chunk.lengthToSize(extra))
if ss.needed <= 0:
request.done = True
elif isinstance(ss, SameSizeSet):
# SameSizeSet members all have the same size as the smallest
# member
requests = [requests_by_device[d] for d in ss.devices]
_min_growth = min([r.growth for r in requests])
log.debug("set: %s %d", [d.name for d in ss.devices], ss.size)
log.debug("min growth is %d", _min_growth)
for request in requests:
chunk = chunks_by_request[request]
_max_growth = chunk.sizeToLength(ss.size) - request.base
log.debug("max growth for %s is %d", request, _max_growth)
min_growth = max(min(_min_growth, _max_growth), 0)
if request.growth > min_growth:
extra = request.growth - min_growth
reclaimed[chunk] += extra
chunk.reclaim(request, extra)
request.done = True
elif request.growth == min_growth:
request.done = True
# store previous growth amounts so we know how much was allocated in
# the latest growRequests call
for request in growth_by_request.keys():
growth_by_request[request] = request.growth
for chunk in chunks:
if reclaimed[chunk] and not chunk.done:
chunk.growRequests()
def growPartitions(disks, partitions, free, size_sets=None):
""" Grow all growable partition requests.
Partitions have already been allocated from chunks of free space on
the disks. This function does not modify the ordering of partitions
or the free chunks from which they are allocated.
Free space within a given chunk is allocated to each growable
partition allocated from that chunk in an amount corresponding to
the ratio of that partition's base size to the sum of the base sizes
of all growable partitions allocated from the chunk.
:param disks: all usable disks
:type disks: list of :class:`~.devices.StorageDevice`
:param partitions: all partitions
:type partitions: list of :class:`~.devices.PartitionDevice`
:param free: all free regions on disks
:type free: list of :class:`parted.Geometry`
:keyword size_sets: list of size-related partition sets
:type size_sets: list of :class:`TotalSizeSet` or :class:`SameSizeSet`
:returns: :const:`None`
"""
log.debug("growPartitions: disks=%s, partitions=%s",
[d.name for d in disks],
["%s(id %d)" % (p.name, p.id) for p in partitions])
all_growable = [p for p in partitions if p.req_grow]
if not all_growable:
log.debug("no growable partitions")
return
if size_sets is None:
size_sets = []
log.debug("growable partitions are %s", [p.name for p in all_growable])
#
# collect info about each disk and the requests it contains
#
chunks = []
for disk in disks:
# list of free space regions on this disk prior to partition allocation
disk_free = [f for f in free if f.device.path == disk.path]
if not disk_free:
log.debug("no free space on %s", disk.name)
continue
disk_chunks = getDiskChunks(disk, partitions, disk_free)
log.debug("disk %s has %d chunks", disk.name, len(disk_chunks))
chunks.extend(disk_chunks)
#
# grow the partitions in each chunk as a group
#
for chunk in chunks:
if not chunk.hasGrowable:
# no growable partitions in this chunk
continue
chunk.growRequests()
# adjust set members' growth amounts as needed
manageSizeSets(size_sets, chunks)
for disk in disks:
log.debug("growing partitions on %s", disk.name)
for chunk in chunks:
if chunk.path != disk.path:
continue
if not chunk.hasGrowable:
# no growable partitions in this chunk
continue
# recalculate partition geometries
disklabel = disk.format
start = chunk.geometry.start
# find any extended partition on this disk
extended_geometry = getattr(disklabel.extendedPartition,
"geometry",
None) # parted.Geometry
# align start sector as needed
if not disklabel.alignment.isAligned(chunk.geometry, start):
start = disklabel.alignment.alignUp(chunk.geometry, start)
new_partitions = []
for p in chunk.requests:
ptype = p.device.partedPartition.type
log.debug("partition %s (%d): %s", p.device.name,
p.device.id, ptype)
if ptype == parted.PARTITION_EXTENDED:
continue
# XXX since we need one metadata sector before each
# logical partition we burn one logical block to
# safely align the start of each logical partition
if ptype == parted.PARTITION_LOGICAL:
start += disklabel.alignment.grainSize
new_length = p.base + p.growth
end = start + new_length - 1
# align end sector as needed
if not disklabel.endAlignment.isAligned(chunk.geometry, end):
end = disklabel.endAlignment.alignDown(chunk.geometry, end)
new_geometry = parted.Geometry(device=disklabel.partedDevice,
start=start,
end=end)
log.debug("new geometry for %s: %s", p.device.name,
new_geometry)
start = end + 1
new_partition = parted.Partition(disk=disklabel.partedDisk,
type=ptype,
geometry=new_geometry)
new_partitions.append((new_partition, p.device))
# remove all new partitions from this chunk
removeNewPartitions([disk], [r.device for r in chunk.requests],
partitions)
log.debug("back from removeNewPartitions")
# adjust the extended partition as needed
# we will ony resize an extended partition that we created
log.debug("extended: %s", extended_geometry)
if extended_geometry and \
chunk.geometry.contains(extended_geometry):
log.debug("setting up new geometry for extended on %s", disk.name)
ext_start = 0
for (partition, device) in new_partitions:
if partition.type != parted.PARTITION_LOGICAL:
continue
if not ext_start or partition.geometry.start < ext_start:
# account for the logical block difference in start
# sector for the extended -v- first logical
# (partition.geometry.start is already aligned)
ext_start = partition.geometry.start - disklabel.alignment.grainSize
new_geometry = parted.Geometry(device=disklabel.partedDevice,
start=ext_start,
end=chunk.geometry.end)
log.debug("new geometry for extended: %s", new_geometry)
new_extended = parted.Partition(disk=disklabel.partedDisk,
type=parted.PARTITION_EXTENDED,
geometry=new_geometry)
ptypes = [p.type for (p, d) in new_partitions]
for pt_idx, ptype in enumerate(ptypes):
if ptype == parted.PARTITION_LOGICAL:
new_partitions.insert(pt_idx, (new_extended, None))
break
# add the partitions with their new geometries to the disk
for (partition, device) in new_partitions:
if device:
name = device.name
else:
# If there was no extended partition on this disk when
# doPartitioning was called we won't have a
# PartitionDevice instance for it.
name = partition.getDeviceNodeName()
log.debug("setting %s new geometry: %s", name,
partition.geometry)
constraint = parted.Constraint(exactGeom=partition.geometry)
disklabel.partedDisk.addPartition(partition=partition,
constraint=constraint)
path = partition.path
if device:
# set the device's name
device.partedPartition = partition
# without this, the path attr will be a basename. eek.
device.disk = disk
# make sure we store the disk's version of the partition
newpart = disklabel.partedDisk.getPartitionByPath(path)
device.partedPartition = newpart
def lvCompare(lv1, lv2):
""" More specifically defined lvs come first.
< 1 => x < y
0 => x == y
> 1 => x > y
"""
if not isinstance(lv1, Device):
lv1 = lv1.device
if not isinstance(lv2, Device):
lv2 = lv2.device
ret = 0
# larger requests go to the front of the list
ret -= compare(lv1.size, lv2.size) * 100
# fixed size requests to the front
ret += compare(lv1.req_grow, lv2.req_grow) * 50
# potentially larger growable requests go to the front
if lv1.req_grow and lv2.req_grow:
if not lv1.req_max_size and lv2.req_max_size:
ret -= 25
elif lv1.req_max_size and not lv2.req_max_size:
ret += 25
else:
ret -= compare(lv1.req_max_size, lv2.req_max_size) * 25
if ret > 0:
ret = 1
elif ret < 0:
ret = -1
return ret
_lvCompareKey = functools.cmp_to_key(lvCompare)
def _apply_chunk_growth(chunk):
""" grow the lvs by the amounts the VGChunk calculated """
for req in chunk.requests:
if not req.device.req_grow:
continue
size = chunk.lengthToSize(req.base + req.growth)
# reduce the size of thin pools by the pad size
if hasattr(req.device, "lvs"):
size -= Size(blockdev.lvm.get_thpool_padding(size, req.device.vg.peSize, included=True))
# Base is pe, which means potentially rounded up by as much as
# pesize-1. As a result, you can't just add the growth to the
# initial size.
req.device.size = size
def growLVM(storage):
""" Grow LVs according to the sizes of the PVs.
Strategy for growth involving thin pools:
- Applies to device factory class as well.
- Overcommit is not allowed.
- Pool lv's base size includes sizes of thin lvs within it.
- Pool is grown along with other non-thin lvs.
- Thin lvs within each pool are grown separately using the
ThinPoolChunk class.
"""
for vg in storage.vgs:
total_free = vg.freeSpace
if total_free < 0:
# by now we have allocated the PVs so if there isn't enough
# space in the VG we have a real problem
raise PartitioningError(_("not enough space for LVM requests"))
elif not total_free:
log.debug("vg %s has no free space", vg.name)
continue
log.debug("vg %s: %s free ; lvs: %s", vg.name, total_free,
[l.lvname for l in vg.lvs])
# don't include thin lvs in the vg's growth calculation
fatlvs = [lv for lv in vg.lvs if lv not in vg.thinlvs]
requests = []
for lv in fatlvs:
if lv in vg.thinpools:
# make sure the pool's base size is at least the sum of its lvs'
lv.req_size = max(lv.req_size, lv.usedSpace)
# add the required padding to the requested pool size
lv.req_size += Size(blockdev.lvm.get_thpool_padding(lv.req_size, vg.peSize))
# establish sizes for the percentage-based requests (which are fixed)
percentage_based_lvs = [lv for lv in vg.lvs if lv.req_percent]
if sum(lv.req_percent for lv in percentage_based_lvs) > 100:
raise ValueError("sum of percentages within a vg cannot exceed 100")
percent_base = sum(vg.align(lv.req_size, roundup=False) / vg.peSize
for lv in percentage_based_lvs)
percentage_basis = vg.freeExtents + percent_base
for lv in percentage_based_lvs:
new_extents = int(lv.req_percent * Decimal('0.01') * percentage_basis)
# set req_size also so the request can also be growable if desired
lv.size = lv.req_size = vg.peSize * new_extents
# grow regular lvs
chunk = VGChunk(vg, requests=[LVRequest(l) for l in fatlvs])
chunk.growRequests()
_apply_chunk_growth(chunk)
# now, grow thin lv requests within their respective pools
for pool in vg.thinpools:
requests = [LVRequest(l) for l in pool.lvs]
thin_chunk = ThinPoolChunk(pool, requests)
thin_chunk.growRequests()
_apply_chunk_growth(thin_chunk)
| karmix/blivet | blivet/partitioning.py | Python | gpl-2.0 | 81,110 |
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for more fancy file handles.
Classes:
UndoHandle File object decorator with support for undo-like operations.
StringHandle Wraps a file object around a string.
SGMLStripper Object that strips SGML. This is now considered OBSOLETE, and
is likely to be deprecated in a future release of Biopython,
and later removed.
"""
import StringIO
class UndoHandle:
"""A Python handle that adds functionality for saving lines.
Saves lines in a LIFO fashion.
Added methods:
saveline Save a line to be returned next time.
peekline Peek at the next line without consuming it.
"""
def __init__(self, handle):
self._handle = handle
self._saved = []
def __iter__(self):
return self
def next(self):
next = self.readline()
if not next:
raise StopIteration
return next
def readlines(self, *args, **keywds):
lines = self._saved + self._handle.readlines(*args,**keywds)
self._saved = []
return lines
def readline(self, *args, **keywds):
if self._saved:
line = self._saved.pop(0)
else:
line = self._handle.readline(*args,**keywds)
return line
def read(self, size=-1):
if size == -1:
saved = "".join(self._saved)
self._saved[:] = []
else:
saved = ''
while size > 0 and self._saved:
if len(self._saved[0]) <= size:
size = size - len(self._saved[0])
saved = saved + self._saved.pop(0)
else:
saved = saved + self._saved[0][:size]
self._saved[0] = self._saved[0][size:]
size = 0
return saved + self._handle.read(size)
def saveline(self, line):
if line:
self._saved = [line] + self._saved
def peekline(self):
if self._saved:
line = self._saved[0]
else:
line = self._handle.readline()
self.saveline(line)
return line
def tell(self):
lengths = map(len, self._saved)
sum = reduce(lambda x, y: x+y, lengths, 0)
return self._handle.tell() - sum
def seek(self, *args):
self._saved = []
self._handle.seek(*args)
def __getattr__(self, attr):
return getattr(self._handle, attr)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._handle.close()
# I could make this faster by using cStringIO.
# However, cStringIO (in v1.52) does not implement the
# readlines method.
StringHandle = StringIO.StringIO
try:
import sgmllib
except ImportError:
#This isn't available on Python 3, but we don't care much as SGMLStripper
#is obsolete
pass
else:
class SGMLStripper:
"""Object to strip SGML tags (OBSOLETE)."""
class MyParser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self.data = ''
def handle_data(self, data):
self.data = self.data + data
def __init__(self):
import warnings
warnings.warn("This class is obsolete, and likely to be deprecated and later removed in a future version of Biopython", PendingDeprecationWarning)
self._parser = SGMLStripper.MyParser()
def strip(self, str):
"""S.strip(str) -> string
Strip the SGML tags from str.
"""
if not str: # empty string, don't do anything.
return ''
# I need to make sure that I don't return an empty string if
# the buffer is not empty. This can happen if there's a newline
# character embedded within a tag. Thus, I'll first check to
# see if the last character is a newline. If it is, and it's stripped
# away, I'll add it back.
is_newline = str[-1] in ['\n', '\r']
self._parser.data = '' # clear the parser's data (don't reset)
self._parser.feed(str)
if self._parser.data:
str = self._parser.data
elif is_newline:
str = '\n'
else:
str = ''
return str
| BlogomaticProject/Blogomatic | opt/blog-o-matic/usr/lib/python/Bio/File.py | Python | gpl-2.0 | 4,640 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Gaspar/.qgis/python/plugins/delPropiedad/forms_ui/frmSelec.ui'
#
# Created: Wed Jul 18 12:50:20 2012
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_frmSelec(object):
def setupUi(self, frmSelec):
frmSelec.setObjectName(_fromUtf8("frmSelec"))
frmSelec.resize(972, 310)
frmSelec.setWindowTitle(QtGui.QApplication.translate("frmSelec", "Seleccionar trabajo", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget = QtGui.QTableWidget(frmSelec)
self.tableWidget.setGeometry(QtCore.QRect(10, 30, 951, 231))
self.tableWidget.setToolTip(QtGui.QApplication.translate("frmSelec", "Seleccione una fila y pulse aceptar", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.bttAceptar = QtGui.QPushButton(frmSelec)
self.bttAceptar.setGeometry(QtCore.QRect(440, 270, 111, 31))
self.bttAceptar.setText(QtGui.QApplication.translate("frmSelec", "Aceptar", None, QtGui.QApplication.UnicodeUTF8))
self.bttAceptar.setObjectName(_fromUtf8("bttAceptar"))
self.bttCancelar = QtGui.QPushButton(frmSelec)
self.bttCancelar.setGeometry(QtCore.QRect(570, 270, 91, 31))
self.bttCancelar.setText(QtGui.QApplication.translate("frmSelec", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
self.bttCancelar.setObjectName(_fromUtf8("bttCancelar"))
self.label = QtGui.QLabel(frmSelec)
self.label.setGeometry(QtCore.QRect(20, 10, 331, 16))
self.label.setText(QtGui.QApplication.translate("frmSelec", "Selecciones el trabajo que desea consultar:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(frmSelec)
QtCore.QMetaObject.connectSlotsByName(frmSelec)
def retranslateUi(self, frmSelec):
pass
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
frmSelec = QtGui.QDialog()
ui = Ui_frmSelec()
ui.setupUi(frmSelec)
frmSelec.show()
sys.exit(app.exec_())
| gasparmoranavarro/TopoDelProp | forms/frmSelec.py | Python | gpl-2.0 | 2,643 |
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
source("../../shared/suites_qtta.py")
source("../../shared/qtcreator.py")
# entry of test
def main():
# prepare example project
sourceExample = os.path.join(Qt5Path.examplesPath(Targets.DESKTOP_5_14_1_DEFAULT),
"quick", "animation")
proFile = "animation.pro"
if not neededFilePresent(os.path.join(sourceExample, proFile)):
return
# copy example project to temp directory
templateDir = prepareTemplate(sourceExample, "/../shared")
examplePath = os.path.join(templateDir, proFile)
startQC()
if not startedWithoutPluginError():
return
# open example project
targets = Targets.desktopTargetClasses()
openQmakeProject(examplePath, targets)
# create syntax error
openDocument("animation.Resources.animation\\.qrc./animation.basics.property-animation\\.qml")
if not appendToLine(waitForObject(":Qt Creator_QmlJSEditor::QmlJSTextEditorWidget"), "Image {", "SyntaxError"):
invokeMenuItem("File", "Exit")
return
# save all to invoke qml parsing
invokeMenuItem("File", "Save All")
# open issues list view
ensureChecked(waitForObject(":Qt Creator_Issues_Core::Internal::OutputPaneToggleButton"))
issuesView = waitForObject(":Qt Creator.Issues_QListView")
# verify that error is properly reported
test.verify(checkSyntaxError(issuesView, ["Expected token `:'"], True),
"Verifying QML syntax error while parsing complex qt quick application.")
# exit qt creator
invokeMenuItem("File", "Exit")
# no cleanup needed, as whole testing directory gets properly removed after test finished
| qtproject/qt-creator | tests/system/suite_CCOM/tst_CCOM02/test.py | Python | gpl-3.0 | 2,818 |
import numpy as np
import bpy
from bpy.props import FloatProperty, EnumProperty, IntProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level
from sverchok.utils.curve import SvCurveLengthSolver, SvCurve
from sverchok.utils.nodes_mixins.draft_mode import DraftMode
class SvCurveLengthParameterNode(DraftMode, bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Curve Length Parameter
Tooltip: Solve curve length (natural) parameter
"""
bl_idname = 'SvExCurveLengthParameterNode'
bl_label = 'Curve Length Parameter'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_CURVE_LENGTH_P'
resolution : IntProperty(
name = 'Resolution',
min = 1,
default = 50,
update = updateNode)
length : FloatProperty(
name = "Length",
min = 0.0,
default = 0.5,
update = updateNode)
length_draft : FloatProperty(
name = "[D] Length",
min = 0.0,
default = 0.5,
update = updateNode)
modes = [('SPL', 'Cubic', "Cubic Spline", 0),
('LIN', 'Linear', "Linear Interpolation", 1)]
mode: EnumProperty(name='Interpolation mode', default="SPL", items=modes, update=updateNode)
def update_sockets(self, context):
self.inputs['Length'].hide_safe = self.eval_mode != 'MANUAL'
self.inputs['Samples'].hide_safe = self.eval_mode != 'AUTO'
updateNode(self, context)
eval_modes = [
('AUTO', "Automatic", "Evaluate the curve at evenly spaced points", 0),
('MANUAL', "Manual", "Evaluate the curve at specified points", 1)
]
eval_mode : EnumProperty(
name = "Mode",
items = eval_modes,
default = 'AUTO',
update = update_sockets)
sample_size : IntProperty(
name = "Samples",
default = 50,
min = 4,
update = updateNode)
specify_accuracy : BoolProperty(
name = "Specify accuracy",
default = False,
update = updateNode)
accuracy : IntProperty(
name = "Accuracy",
default = 3,
min = 0,
update = updateNode)
accuracy_draft : IntProperty(
name = "[D] Accuracy",
default = 1,
min = 0,
update = updateNode)
draft_properties_mapping = dict(length = 'length_draft', accuracy = 'accuracy_draft')
def sv_init(self, context):
self.inputs.new('SvCurveSocket', "Curve")
self.inputs.new('SvStringsSocket', "Resolution").prop_name = 'resolution'
self.inputs.new('SvStringsSocket', "Length").prop_name = 'length'
self.inputs.new('SvStringsSocket', "Samples").prop_name = 'sample_size'
self.outputs.new('SvStringsSocket', "T")
self.outputs.new('SvVerticesSocket', "Vertices")
self.update_sockets(context)
def draw_buttons(self, context, layout):
layout.prop(self, 'eval_mode', expand=True)
layout.prop(self, 'specify_accuracy')
if self.specify_accuracy:
if self.id_data.sv_draft:
layout.prop(self, 'accuracy_draft')
else:
layout.prop(self, 'accuracy')
def draw_buttons_ext(self, context, layout):
self.draw_buttons(context, layout)
layout.prop(self, 'mode', expand=True)
def does_support_draft_mode(self):
return True
def draw_label(self):
label = self.label or self.name
if self.id_data.sv_draft:
label = "[D] " + label
return label
def process(self):
if not any((s.is_linked for s in self.outputs)):
return
need_eval = self.outputs['Vertices'].is_linked
curves_s = self.inputs['Curve'].sv_get()
resolution_s = self.inputs['Resolution'].sv_get()
length_s = self.inputs['Length'].sv_get()
samples_s = self.inputs['Samples'].sv_get(default=[[]])
length_s = ensure_nesting_level(length_s, 3)
resolution_s = ensure_nesting_level(resolution_s, 2)
samples_s = ensure_nesting_level(samples_s, 2)
curves_s = ensure_nesting_level(curves_s, 2, data_types=(SvCurve,))
ts_out = []
verts_out = []
for curves, resolutions, input_lengths_i, samples_i in zip_long_repeat(curves_s, resolution_s, length_s, samples_s):
for curve, resolution, input_lengths, samples in zip_long_repeat(curves, resolutions, input_lengths_i, samples_i):
mode = self.mode
accuracy = self.accuracy
if self.id_data.sv_draft:
mode = 'LIN'
accuracy = self.accuracy_draft
if self.specify_accuracy:
tolerance = 10 ** (-accuracy)
else:
tolerance = None
solver = SvCurveLengthSolver(curve)
solver.prepare(mode, resolution, tolerance=tolerance)
if self.eval_mode == 'AUTO':
total_length = solver.get_total_length()
input_lengths = np.linspace(0.0, total_length, num = samples)
else:
input_lengths = np.array(input_lengths)
ts = solver.solve(input_lengths)
ts_out.append(ts.tolist())
if need_eval:
verts = curve.evaluate_array(ts).tolist()
verts_out.append(verts)
self.outputs['T'].sv_set(ts_out)
self.outputs['Vertices'].sv_set(verts_out)
def register():
bpy.utils.register_class(SvCurveLengthParameterNode)
def unregister():
bpy.utils.unregister_class(SvCurveLengthParameterNode)
| nortikin/sverchok | nodes/curve/length_parameter.py | Python | gpl-3.0 | 5,754 |
import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = '25174c22'.decode('hex')
P2P_PORT = 8698
ADDRESS_VERSION = 0
RPC_PORT = 8697
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
(yield helper.check_genesis_block(bitcoind, '00000000bb82b1cbe86b5fe62967c13ff2e8cdabf68adeea2038289771c3491f')) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 69*69000000 >> (height + 1)//500000
POW_FUNC = data.hash256
BLOCK_PERIOD = 60 # s
SYMBOL = 'TIT'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'titcoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/titcoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.titcoin'), 'titcoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'https://blockexperts.com/tit/hash/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://blockexperts.com/tit/address/'
TX_EXPLORER_URL_PREFIX = 'http://blockexperts.com/tit/tx/'
SANE_TARGET_RANGE = (2**256//2**32//1000000 - 1, 2**256//2**32 - 1)
DUMB_SCRYPT_DIFF = 1
DUST_THRESHOLD = 0.001e8
| ptcrypto/p2pool-adaptive | p2pool/bitcoin/networks/titcoin.py | Python | gpl-3.0 | 1,188 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MessageRecord'
db.create_table(u'nuntium_messagerecord', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('django.db.models.fields.CharField')(max_length=255)),
('datetime', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 4, 24, 0, 0))),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal(u'nuntium', ['MessageRecord'])
def backwards(self, orm):
# Deleting model 'MessageRecord'
db.delete_table(u'nuntium_messagerecord')
models = {
u'contactos.contact': {
'Meta': {'object_name': 'Contact'},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.ContactType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'contactos.contacttype': {
'Meta': {'object_name': 'ContactType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'nuntium.message': {
'Meta': {'object_name': 'Message'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': "'4'"}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.WriteItInstance']"})
},
u'nuntium.messagerecord': {
'Meta': {'object_name': 'MessageRecord'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'datetime': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'nuntium.outboundmessage': {
'Meta': {'object_name': 'OutboundMessage'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.Contact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.Message']"})
},
u'nuntium.writeitinstance': {
'Meta': {'object_name': 'WriteItInstance'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'popit.apiinstance': {
'Meta': {'object_name': 'ApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('popit.fields.ApiInstanceURLField', [], {'unique': 'True', 'max_length': '200'})
},
u'popit.person': {
'Meta': {'object_name': 'Person'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'popit_url': ('popit.fields.PopItURLField', [], {'default': "''", 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['nuntium'] | TEDICpy/write-it | nuntium/migrations/0007_auto__add_messagerecord.py | Python | gpl-3.0 | 5,320 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os
from calibre.customize.conversion import OutputFormatPlugin
from calibre.customize.conversion import OptionRecommendation
class LRFOptions(object):
def __init__(self, output, opts, oeb):
def f2s(f):
try:
return unicode(f[0])
except:
return ''
m = oeb.metadata
for x in ('left', 'top', 'right', 'bottom'):
attr = 'margin_'+x
val = getattr(opts, attr)
if val < 0:
setattr(opts, attr, 0)
self.title = None
self.author = self.publisher = _('Unknown')
self.title_sort = self.author_sort = ''
for x in m.creator:
if x.role == 'aut':
self.author = unicode(x)
fa = unicode(getattr(x, 'file_as', ''))
if fa:
self.author_sort = fa
for x in m.title:
if unicode(x.file_as):
self.title_sort = unicode(x.file_as)
self.freetext = f2s(m.description)
self.category = f2s(m.subject)
self.cover = None
self.use_metadata_cover = True
self.output = output
self.ignore_tables = opts.linearize_tables
if opts.disable_font_rescaling:
self.base_font_size = 0
else:
self.base_font_size = opts.base_font_size
self.blank_after_para = opts.insert_blank_line
self.use_spine = True
self.font_delta = 0
self.ignore_colors = False
from calibre.ebooks.lrf import PRS500_PROFILE
self.profile = PRS500_PROFILE
self.link_levels = sys.maxint
self.link_exclude = '@'
self.no_links_in_toc = True
self.disable_chapter_detection = True
self.chapter_regex = 'dsadcdswcdec'
self.chapter_attr = '$,,$'
self.override_css = self._override_css = ''
self.page_break = 'h[12]'
self.force_page_break = '$'
self.force_page_break_attr = '$'
self.add_chapters_to_toc = False
self.baen = self.pdftohtml = self.book_designer = False
self.verbose = opts.verbose
self.encoding = 'utf-8'
self.lrs = False
self.minimize_memory_usage = False
self.autorotation = opts.enable_autorotation
self.header_separation = (self.profile.dpi/72.) * opts.header_separation
self.headerformat = opts.header_format
for x in ('top', 'bottom', 'left', 'right'):
setattr(self, x+'_margin',
(self.profile.dpi/72.) * float(getattr(opts, 'margin_'+x)))
for x in ('wordspace', 'header', 'header_format',
'minimum_indent', 'serif_family',
'render_tables_as_images', 'sans_family', 'mono_family',
'text_size_multiplier_for_rendered_tables'):
setattr(self, x, getattr(opts, x))
class LRFOutput(OutputFormatPlugin):
name = 'LRF Output'
author = 'Kovid Goyal'
file_type = 'lrf'
options = set([
OptionRecommendation(name='enable_autorotation', recommended_value=False,
help=_('Enable auto-rotation of images that are wider than the screen width.')
),
OptionRecommendation(name='wordspace',
recommended_value=2.5, level=OptionRecommendation.LOW,
help=_('Set the space between words in pts. Default is %default')
),
OptionRecommendation(name='header', recommended_value=False,
help=_('Add a header to all the pages with title and author.')
),
OptionRecommendation(name='header_format', recommended_value="%t by %a",
help=_('Set the format of the header. %a is replaced by the author '
'and %t by the title. Default is %default')
),
OptionRecommendation(name='header_separation', recommended_value=0,
help=_('Add extra spacing below the header. Default is %default pt.')
),
OptionRecommendation(name='minimum_indent', recommended_value=0,
help=_('Minimum paragraph indent (the indent of the first line '
'of a paragraph) in pts. Default: %default')
),
OptionRecommendation(name='render_tables_as_images',
recommended_value=False,
help=_('Render tables in the HTML as images (useful if the '
'document has large or complex tables)')
),
OptionRecommendation(name='text_size_multiplier_for_rendered_tables',
recommended_value=1.0,
help=_('Multiply the size of text in rendered tables by this '
'factor. Default is %default')
),
OptionRecommendation(name='serif_family', recommended_value=None,
help=_('The serif family of fonts to embed')
),
OptionRecommendation(name='sans_family', recommended_value=None,
help=_('The sans-serif family of fonts to embed')
),
OptionRecommendation(name='mono_family', recommended_value=None,
help=_('The monospace family of fonts to embed')
),
])
recommendations = set([
('change_justification', 'original', OptionRecommendation.HIGH),
])
def convert_images(self, pages, opts, wide):
from calibre.ebooks.lrf.pylrs.pylrs import Book, BookSetting, ImageStream, ImageBlock
from uuid import uuid4
from calibre.constants import __appname__, __version__
width, height = (784, 1012) if wide else (584, 754)
ps = {}
ps['topmargin'] = 0
ps['evensidemargin'] = 0
ps['oddsidemargin'] = 0
ps['textwidth'] = width
ps['textheight'] = height
book = Book(title=opts.title, author=opts.author,
bookid=uuid4().hex,
publisher='%s %s'%(__appname__, __version__),
category=_('Comic'), pagestyledefault=ps,
booksetting=BookSetting(screenwidth=width, screenheight=height))
for page in pages:
imageStream = ImageStream(page)
_page = book.create_page()
_page.append(ImageBlock(refstream=imageStream,
blockwidth=width, blockheight=height, xsize=width,
ysize=height, x1=width, y1=height))
book.append(_page)
book.renderLrf(open(opts.output, 'wb'))
def flatten_toc(self):
from calibre.ebooks.oeb.base import TOC
nroot = TOC()
for x in self.oeb.toc.iterdescendants():
nroot.add(x.title, x.href)
self.oeb.toc = nroot
def convert(self, oeb, output_path, input_plugin, opts, log):
self.log, self.opts, self.oeb = log, opts, oeb
lrf_opts = LRFOptions(output_path, opts, oeb)
if input_plugin.is_image_collection:
self.convert_images(input_plugin.get_images(), lrf_opts,
getattr(opts, 'wide', False))
return
self.flatten_toc()
from calibre.ptempfile import TemporaryDirectory
with TemporaryDirectory(u'_lrf_output') as tdir:
from calibre.customize.ui import plugin_for_output_format
oeb_output = plugin_for_output_format('oeb')
oeb_output.convert(oeb, tdir, input_plugin, opts, log)
opf = [x for x in os.listdir(tdir) if x.endswith('.opf')][0]
from calibre.ebooks.lrf.html.convert_from import process_file
process_file(os.path.join(tdir, opf), lrf_opts, self.log)
| jelly/calibre | src/calibre/ebooks/conversion/plugins/lrf_output.py | Python | gpl-3.0 | 7,750 |
"""This will perform basic enrichment on a given IP."""
import csv
import json
import mmap
import os
import socket
import urllib
import dns.resolver
import dns.reversename
from geoip import geolite2
from IPy import IP
from joblib import Parallel, delayed
from netaddr import AddrFormatError, IPSet
torcsv = 'Tor_ip_list_ALL.csv'
sfile = 'http://torstatus.blutmagie.de/ip_list_all.php/Tor_ip_list_ALL.csv'
SUBNET = 0
INPUTDICT = {}
SECTOR_CSV = 'sector.csv'
OUTFILE = 'IPLookup-output.csv'
CSVCOLS = '"ip-address","asn","as-name","isp","abuse-1","abuse-2","abuse-3","domain","reverse-dns","type","country","lat","long","tor-node"'
def identify(var):
result = ""
with open(SECTOR_CSV) as f:
root = csv.reader(f)
for i in root:
if i[0] in var:
result = i[1]
return result
def lookup(value):
"""Perform a dns request on the given value."""
try:
answers = dns.resolver.query(value, 'TXT')
for rdata in answers:
for txt_string in rdata.strings:
value = txt_string.replace(" | ", "|")
value = value.replace(" |", "|").split("|")
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
value = []
return value
def flookup(value, fname, sfile):
"""Look up a value in a file."""
try:
fhandle = open(fname)
except IOError:
sourceFile = urllib.URLopener()
sourceFile.retrieve(
sfile,
fname)
fhandle = open(fname)
search = mmap.mmap(fhandle.fileno(), 0, access=mmap.ACCESS_READ)
if search.find(value) != -1:
return 'true'
else:
return 'false'
def iprange(sample, sub):
"""Identify if the given ip address is in the previous range."""
if sub is not 0:
try:
ipset = IPSet([sub])
if sample in ipset:
return True
except AddrFormatError:
return False
else:
return False
def mainlookup(var):
"""Wrap the main lookup and generated the dictionary."""
global SUBNET
global INPUTDICT
var = ''.join(var.split())
if IP(var).iptype() != 'PRIVATE' and IP(var).version() == 4:
if iprange(var, SUBNET) is True:
print
elif INPUTDICT.get("ip-address") == var:
print
else:
try:
socket.inet_aton(var)
except socket.error:
var = socket.gethostbyname(var)
contactlist = []
rvar = '.'.join(reversed(str(var).split(".")))
origin = lookup(rvar + '.origin.asn.shadowserver.org')
SUBNET = origin[1]
try:
contact = lookup(rvar + '.abuse-contacts.abusix.org')
contactlist = str(contact[0]).split(",")
except IndexError:
contactlist = []
contactlist.extend(["-"] * (4 - len(contactlist)))
try:
addr = dns.reversename.from_address(var)
rdns = str(dns.resolver.query(addr, "PTR")[0])
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
rdns = ""
match = geolite2.lookup(var)
if match is None or match.location is None:
country = ''
location = ["", ""]
else:
country = match.country
location = match.location
tor = flookup(var, torcsv, sfile)
category = identify(origin[4])
if category == "":
category = identify(contactlist[0])
origin.extend(["-"] * (6 - len(origin)))
INPUTDICT = {
'abuse-1': contactlist[0],
'abuse-2': contactlist[1],
'abuse-3': contactlist[2],
'as-name': origin[2],
'asn': origin[0],
'country': country,
'descr': origin[5],
'domain': origin[4],
'ip-address': var,
'lat': location[0],
'long': location[1],
'reverse-dns': rdns,
'tor-node': tor,
'sector': category,
}
else:
INPUTDICT = {
'abuse-1': "", 'abuse-2': "", 'abuse-3': "", 'as-name': "",
'asn': "", 'country': "", 'descr': "", 'domain': "",
'domain-count': "", 'ip-address': var, 'lat': "", 'long': "",
'reverse-dns': "", 'tor-node': "", 'sector': "",
}
INPUTDICT['ip-address'] = var
out = json.dumps(
INPUTDICT,
indent=4,
sort_keys=True,
ensure_ascii=False)
csvout(INPUTDICT)
return out
def batch(inputfile):
"""Handle batch lookups using file based input."""
if os.path.isfile(OUTFILE):
os.remove(OUTFILE)
fhandle = open(OUTFILE, "a")
header = 0
if header == 0:
fhandle.write(str(CSVCOLS) + "\n")
header = 1
fhandle.close()
with open(inputfile) as fhandle:
Parallel(n_jobs=100, verbose=51)(delayed(mainlookup)(i.rstrip('\n'))
for i in fhandle)
def single(lookupvar):
"""Do a single IP lookup."""
result = mainlookup(lookupvar)
return result
def csvout(inputdict):
"""Generate a CSV file from the output inputdict."""
fhandle = open(OUTFILE, "a")
# header = 0
# if header == 0:
# fhandle.write("Boop")
# header = 1
try:
writer = csv.writer(fhandle, quoting=csv.QUOTE_ALL)
writer.writerow((
inputdict['ip-address'],
inputdict['asn'],
inputdict['as-name'],
inputdict['descr'],
inputdict['abuse-1'],
inputdict['abuse-2'],
inputdict['abuse-3'],
inputdict['domain'],
inputdict['reverse-dns'],
inputdict['sector'],
inputdict['country'],
inputdict['lat'],
inputdict['long'],
inputdict['tor-node']))
finally:
fhandle.close()
def main():
import argparse
PARSER = argparse.ArgumentParser()
PARSER.add_argument("-t",
choices=('single', 'batch'),
required="false",
metavar="request-type",
help="Either single or batch request")
PARSER.add_argument("-v",
required="false",
metavar="value",
help="The value of the request")
ARGS = PARSER.parse_args()
if ARGS.t == "single":
print(single(ARGS.v))
elif ARGS.t == "batch":
batch(ARGS.v)
else:
PARSER.print_help()
if __name__ == "__main__":
main()
| zebde/RobIP | iplookup.py | Python | gpl-3.0 | 6,811 |
import math
def formatAmount(val, prec=3, lowest=0, highest=0, currency=False, forceSign=False):
"""
Add suffix to value, transform value to match new suffix and round it.
Keyword arguments:
val -- value to process
prec -- precision of final number (number of significant positions to show)
lowest -- lowest order for suffixizing for numbers 0 < |num| < 1
highest -- highest order for suffixizing for numbers |num| > 1
currency -- if currency, billion suffix will be B instead of G
forceSign -- if True, positive numbers are signed too
"""
if val is None:
return ""
# Define suffix maps
posSuffixMap = {3: "k", 6: "M", 9: "B" if currency is True else "G"}
negSuffixMap = {-6: '\u03bc', -3: "m"}
# Define tuple of the map keys
# As we're going to go from the biggest order of abs(key), sort
# them differently due to one set of values being negative
# and other positive
posOrders = tuple(sorted(iter(posSuffixMap.keys()), reverse=True))
negOrders = tuple(sorted(iter(negSuffixMap.keys()), reverse=False))
# Find the least abs(key)
posLowest = min(posOrders)
negHighest = max(negOrders)
# By default, mantissa takes just value and no suffix
mantissa, suffix = val, ""
# Positive suffixes
if abs(val) > 1 and highest >= posLowest:
# Start from highest possible suffix
for key in posOrders:
# Find first suitable suffix and check if it's not above highest order
if abs(val) >= 10 ** key and key <= highest:
mantissa, suffix = val / float(10 ** key), posSuffixMap[key]
# Do additional step to eliminate results like 999999 => 1000k
# If we're already using our greatest order, we can't do anything useful
if posOrders.index(key) == 0:
break
else:
# Get order greater than current
prevKey = posOrders[posOrders.index(key) - 1]
# Check if the key to which we potentially can change is greater
# than our highest boundary
if prevKey > highest:
# If it is, bail - we already have acceptable results
break
# Find multiplier to get from one order to another
orderDiff = 10 ** (prevKey - key)
# If rounded mantissa according to our specifications is greater than
# or equal to multiplier
if roundToPrec(mantissa, prec) >= orderDiff:
# Divide mantissa and use suffix of greater order
mantissa, suffix = mantissa / orderDiff, posSuffixMap[prevKey]
# Otherwise consider current results as acceptable
break
# Take numbers between 0 and 1, and matching/below highest possible negative suffix
elif abs(val) < 1 and val != 0 and lowest <= negHighest:
# Start from lowest possible suffix
for key in negOrders:
# Get next order
try:
nextKey = negOrders[negOrders.index(key) + 1]
except IndexError:
nextKey = 0
# Check if mantissa with next suffix is in range [1, 1000)
if abs(val) < 10 ** nextKey and key >= lowest:
mantissa, suffix = val / float(10 ** key), negSuffixMap[key]
# Do additional step to eliminate results like 0.9999 => 1000m
# Check if the key we're potentially switching to is greater than our
# upper boundary
if nextKey > highest:
# If it is, leave loop with results we already have
break
# Find the multiplier between current and next order
orderDiff = 10 ** (nextKey - key)
# If rounded mantissa according to our specifications is greater than
# or equal to multiplier
if roundToPrec(mantissa, prec) >= orderDiff:
# Divide mantissa and use suffix of greater order
# Use special handling of zero key as it's not on the map
mantissa, suffix = mantissa / orderDiff, posSuffixMap[nextKey] if nextKey != 0 else ""
# Otherwise consider current results as acceptable
break
# Round mantissa according to our prec variable
mantissa = roundToPrec(mantissa, prec)
sign = "+" if forceSign is True and mantissa > 0 else ""
# Round mantissa and add suffix
result = "{0}{1}{2}".format(sign, mantissa, suffix)
return result
def roundToPrec(val, prec):
# We're not rounding integers anyway
# Also make sure that we do not ask to calculate logarithm of zero
if int(val) == val:
return int(val)
# Find round factor, taking into consideration that we want to keep at least prec
# positions for fractions with zero integer part (e.g. 0.0000354 for prec=3)
roundFactor = int(prec - math.ceil(math.log10(abs(val))))
# But we don't want to round integers
if roundFactor < 0:
roundFactor = 0
# Do actual rounding
val = round(val, roundFactor)
# Make sure numbers with .0 part designating float don't get through
if int(val) == val:
val = int(val)
return val
def roundDec(val, prec):
if int(val) == val:
return int(val)
return round(val, prec)
| bsmr-eve/Pyfa | gui/utils/numberFormatter.py | Python | gpl-3.0 | 5,541 |
#
# Copyright © 2012–2022 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.apps import AppConfig
class MetricsConfig(AppConfig):
name = "weblate.metrics"
label = "metrics"
verbose_name = "Metrics"
| nijel/weblate | weblate/metrics/apps.py | Python | gpl-3.0 | 906 |
#!/usr/bin/env python
import matplotlib as mpl
from matplotlib import pyplot as plt
import argparse
def add_adiabatic_map_to_axis(axis, style, energies, color):
""" add single set of energies to plot """
# Energy horizontal decks
x = style['START']
for energy in energies:
axis.plot([x, x+style['WIDTH']], [energy, energy],
'-%s' % color, linewidth=2)
x += style['SPACING']
# Connect steps
x = style['START']
for i in range(1, len(energies)):
x1 = x + style['WIDTH']
x2 = x + style['SPACING']
y1 = energies[i-1]
y2 = energies[i]
axis.plot([x1, x2], [y1, y2], '-%s' % color)
x += style['SPACING']
def getargs():
parser = argparse.ArgumentParser(description="""
Make plot from user provided energies.
Can read multiple sets of energies.""",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-o', '--output',
default='PES.svg',
help='File name of output figure')
parser.add_argument('--dpi',
default=300, type=int,
help='Resolution for bitmaps')
parser.add_argument('-e', '--energies',
nargs='+', type=float, action='append',
help='Energies for any number of stationary points')
parser.add_argument('-l', '--labels', nargs='+',
help='Name of stationary points')
parser.add_argument('-c', '--colors', nargs='+',
help='Color codes')
args = parser.parse_args()
# less colors than PES ? add 'k'
if args.colors:
missing_colors = len(args.energies) - len(args.colors)
missing_colors = (missing_colors > 0) * missing_colors
args.colors += 'k' * missing_colors
return args
def makelabels(N):
""" Make automatic labels: TS1, INT1, TS2, etc.."""
labels = ['R']
n_ts = N / 2
n_i = (N - 2) / 2
n_i = n_i * (n_i > 0) # becomes zero if negative
for i in range(n_ts + n_i):
if i % 2:
labels.append('INT%d' % (i/2+1))
else:
labels.append('TS%d' % (i/2+1))
if N % 2 and N >= 3:
labels.append('P')
return labels
def configure_axis_limits(axis, style, energies):
# Appearance
ymin, ymax = float('+inf'), float('-inf')
maxlen = 0
for energy_set in energies:
ymin = min(ymin, min(energy_set))
ymax = max(ymax, max(energy_set))
maxlen = max(len(energy_set), maxlen)
yrange = ymax-ymin
axis.set_ylim(ymin-0.1*yrange, ymax+0.1*yrange)
xmax = style['START']*2 + style['WIDTH'] + (maxlen-1)*style['SPACING']
axis.set_xlim(0, xmax)
axis.set_xticks([
style['START']+i*style['SPACING']+style['WIDTH']/2.0 for i in range(maxlen)])
return maxlen
def main():
# get user input
args = getargs()
# important style features
style = {
'WIDTH' : 4, # width of horizontal bars
'SPACING' : 10, # spacing between center of horizontal bars
'START' : 3 # x-offset from y-axis
}
# Configure Figure
fig = plt.gcf()
fig.set_size_inches(3.3, 2.5)
mpl.rcParams.update({'font.size': 7, 'axes.linewidth':0.5})
plt.subplots_adjust(bottom=.15)
plt.subplots_adjust(left=.15)
plt.ylabel('Energy (kcal/mol)')
plt.xlabel('Reaction coordinate')
ax = fig.gca()
ax.grid(True)
maxlen = configure_axis_limits(ax, style, args.energies)
if not args.labels:
args.labels = makelabels(maxlen)
ax.set_xticklabels(args.labels)
# plot stuff
color = 'k'
for j,energies in enumerate(args.energies):
if args.colors:
color = args.colors[j]
add_adiabatic_map_to_axis(ax, style, energies, color)
plt.savefig(args.output, dpi=args.dpi)
if __name__ == '__main__':
main()
| eduardoftoliveira/qt_scripts | scripts/draw_PES.py | Python | gpl-3.0 | 3,836 |
#
# Copyright (C) 2016 Dang Duong
#
# This file is part of Open Tux World.
#
# Open Tux World is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Open Tux World is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Open Tux World. If not, see <http://www.gnu.org/licenses/>.
#
from scripts import common
from mathutils import Vector
logic = common.logic
def main(cont):
own = cont.owner
own.applyForce([0, 0, -10 * own.mass], False)
if own["health"] < 1:
return
own["hit"] = False
own.enableRigidBody()
v = Vector((own["v_x"], own["v_y"], own["v_z"]))
dv = Vector(own.worldLinearVelocity) - v
v += dv
speed = common.getDistance([dv.x, dv.y, dv.z])
own["v_x"] = v.x
own["v_y"] = v.y
own["v_z"] = v.z
if speed > common.DANGER_SPEED:
if speed > common.FATAL_SPEED:
own["health"] = 0
else:
own["health"] -= speed * (common.HIGH_DAMAGE_RATE if speed > common.HIGH_DANGER_SPEED else common.DAMAGE_RATE)
own.state = logic.KX_STATE3
elif speed < common.RIGID_SPEED and (cont.sensors["Collision.001"].positive or not own["fall"]):
own.disableRigidBody()
own.worldOrientation[2] = [0.0,0.0,1.0]
own.state = logic.KX_STATE2
| khanhduong95/Open-Tux-World | scripts/fall.py | Python | gpl-3.0 | 1,747 |
# This file is part of profileNJ
#
# Date: 02/2014
# ClusterUtils contain implementation of nj and upgma clustering algo, and
# required methods
__author__ = "Emmanuel Noutahi"
from TreeClass import TreeClass
import os
import numpy as np
from StringIO import StringIO
import random
try:
from lxml import etree
# should work since etree is used by ete
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImporError:
try:
import xml.etree.ElementTree as etree
except:
pass
np.set_printoptions(precision=3)
numerictypes = np.core.numerictypes.sctype2char
Float = numerictypes(float)
def find_smallest_index(matrice):
"""Return smallest number i,j index in a matrice
A Tuple (i,j) is returned.
Warning, the diagonal should have the largest number so it will never be choose
"""
index = np.tril_indices_from(matrice, -1)
return np.vstack(index)[:, matrice[index].argmin()]
def condense_matrix(matrice, smallest_index, method='upgma'):
"""Matrice condensation in the next iteration
Smallest index is returned from find_smallest_index.
For both leaf at i and j a new distance is calculated from the average of the corresponding
row or the corresponding columns
We then replace the first index (row and column) by the average vector obtained
and the second index by an array with large numbers so that
it is never chosen again with find_smallest_index.
Now the new regroupement distance value is at the first position! (on row and column)
"""
first_index, second_index = smallest_index
# get the rows and make a new vector by updating distance
rows = np.take(matrice, smallest_index, 1)
# default we use upgma
if(method.lower() == 'nj'):
new_vector = (
np.sum(rows, 1) - matrice[first_index, second_index]) * 0.5
else:
new_vector = np.average(rows, 1)
# replace info in the row and column for first index with new_vector
matrice[second_index] = new_vector
matrice[:, second_index] = new_vector
np.fill_diagonal(matrice, 0)
# replace the info in the row and column for the second index with
# high numbers so that it is ignored
return remove_ij(matrice, first_index, first_index)
def remove_ij(x, i, j):
# Row i and column j divide the array into 4 quadrants
y = x[:-1, :-1]
y[:i, j:] = x[:i, j + 1:]
y[i:, :j] = x[i + 1:, :j]
y[i:, j:] = x[i + 1:, j + 1:]
return y
def calculate_Q_ij(matrice, ind, n):
"""Calcutates Q_matrix for two taxa
"""
return (n - 2) * matrice[ind] - np.sum(matrice[ind[0]]) - np.sum(matrice[ind[1]])
def calculate_Q_matrix(matrice):
"""Calculate Q_matrix for nj algorithm
"""
n = matrice.shape[0]
Q_matrix = np.zeros(shape=matrice.shape)
it = np.nditer(matrice, flags=['multi_index'])
while not it.finished:
ind = it.multi_index
Q_matrix[ind] = calculate_Q_ij(matrice, ind, n)
it.iternext()
return Q_matrix
def paired_node_distance(matrice, smallest_index):
i, j = smallest_index
# i, j are the index of the recently joined node
n = matrice.shape[0]
# http://en.wikipedia.org/wiki/Neighbor_joining#equation_2
# distance from the pair members to the new node second term
x = np.sum(matrice[i]) - np.sum(matrice[:, j])
if(n - 2 > 0):
dist_i = 0.5 * matrice[i, j] + ((0.5 / (n - 2)) * (x))
dist_j = matrice[i, j] - dist_i
return dist_i, dist_j
else:
# We have only two node to join (final join)
# Find the index of the node not already joined
distance = matrice[i, j]
# In this case, we split the dist value by two
return distance / 2.0, distance / 2.0
def condense_node_order(matrice, smallest_index, node_order, method='upgma'):
"""
condenses two nodes in node_order based on smallest_index info
This function is used to create a tree while condensing a matrice
with the condense_matrix function. The smallest_index is retrieved
with find_smallest_index. The first index is replaced with a node object
that combines the two nodes corresponding to the indices in node order.
The second index in smallest_index is replaced with None.
Also sets the branch length of the nodes to 1/2 of the distance between
the nodes in the matrice"""
index1, index2 = smallest_index
node1 = node_order[index1]
node2 = node_order[index2]
# get the distance between the nodes and assign 1/2 the distance to the
# Length property of each node
if(method.lower() == 'nj'):
dist = paired_node_distance(matrice, smallest_index)
elif(method.lower() == 'upgma'):
distance = matrice[index1, index2]
dist = (distance / 2.0, distance / 2.0)
else:
dist = (0, 0)
nodes = [node1, node2]
pos = [0, 1]
for ind in pos:
nodes[ind].add_features(length=dist[ind])
# combine the two nodes into a new TreeNode object
new_node = TreeClass()
new_node.add_child(node1)
new_node.add_child(node2)
new_node.add_features(length=sum(dist))
# replace the object at index1 with the combined node
node_order[index2] = new_node
# replace the object at index2 with None
del node_order[index1] # distance at i=index2 || j=index2
return node_order
def NJ_cluster(matrice, node_order, nj_depth=None):
"""
Node clustering with NJ
matrice is a np array.
node_order is a list of PhyloNode objects corresponding to the matrice.
WARNING: Changes matrice in-place.
before this function is called.
"""
# this is for a test, should made it into one function with upgma
num_entries = len(node_order)
if not nj_depth or nj_depth > (num_entries - 1):
nj_depth = num_entries - 1 # default, do all, same as upgma
tree = None
smallest_index = []
for i in range(nj_depth):
Q_matrix = calculate_Q_matrix(matrice)
index_1, index_2 = find_smallest_index(Q_matrix)
smallest_index = (index_1, index_2)
row_order = condense_node_order(
matrice, smallest_index, node_order, method='nj')
matrice = condense_matrix(matrice, smallest_index, method='nj')
tree = node_order[smallest_index[1]]
return tree, matrice, smallest_index
def UPGMA_cluster(matrice, node_order, upgma_depth=None):
"""cluster with UPGMA
matrice is a np array.
node_order is a list of TreeClass objects corresponding to the matrice.
WARNING: Changes matrice in-place.
before this function is called.
"""
num_entries = len(node_order)
if not upgma_depth or upgma_depth > (num_entries - 1):
upgma_depth = num_entries - 1 # default, do all
tree = None
smallest_index = []
for i in range(upgma_depth):
index_1, index_2 = find_smallest_index(matrice)
smallest_index = (index_1, index_2)
assert(index_1 > index_2)
row_order = condense_node_order(
matrice, smallest_index, node_order, method='upgma')
matrice = condense_matrix(matrice, smallest_index, method='upgma')
tree = node_order[smallest_index[1]]
return tree, matrice, smallest_index
def RAND_cluster(matrice, node_order, rand_depth=None):
"""Random clustering
matrice is a np array.
node_order is a list of PhyloNode objects corresponding to the matrice.s
WARNING: Changes matrice in-place.
before this function is called.
"""
num_entries = len(node_order)
if not rand_depth or rand_depth > (num_entries - 1):
rand_depth = num_entries - 1 # default, do all
tree = None
smallest_index = []
for i in range(rand_depth):
tochoose = [i for i, t in enumerate(node_order) if t is not None]
index1, index2 = random.sample(tochoose, 2)
smallest_index = (max(index1, index2), min(index1, index2))
node_order = condense_node_order(
matrice, smallest_index, node_order, method='rand')
tree = node_order[smallest_index[1]]
return tree, matrice, smallest_index
def treeCluster(matrice, node_order, depth=None, method='upgma'):
if(len(node_order) == 2):
smallest_index = (1, 0)
row_order = condense_node_order(
matrice, smallest_index, node_order, method='rand')
tree = node_order[smallest_index[1]]
return tree, None, smallest_index
if(method.lower() == 'nj'):
return NJ_cluster(matrice, node_order, nj_depth=depth)
elif(method.lower() == 'rand'):
return RAND_cluster(matrice, node_order, rand_depth=depth)
else:
return UPGMA_cluster(matrice, node_order, upgma_depth=depth)
def distMatProcessor(distances, nFlagVal=1e305, nFlag=False, ignoreNodes=[]):
"""Formating distance matrix from a file or string input and node order for
UPGMA or NJ join
"""
read_fl = False
dist_matrix = []
node_order = []
matrix = None
# Read in matrix if file name is given
if isinstance(distances, basestring) and os.path.exists(distances):
distances = open(distances, 'rU')
distances = distances.read()
distances_lines = distances.splitlines()
if '<?xml' in distances_lines[0]:
# this is an xml file
# parse it differently
matrix, node_order = parseFastPhyloXml(
StringIO(distances), nFlagVal, nFlag)
else:
x_ind = 0
for line in distances_lines:
line = line.strip()
if(line):
if not read_fl:
read_fl = True
else:
x_ind += 1
line_list = [getFloatValue(
x.strip(), x_ind, y_ind, nFlagVal, nFlag) for y_ind, x in enumerate(line.split())]
dist_matrix.append(line_list[1:])
node_order.append(line_list[0])
matrix = np.array(dist_matrix, dtype=np.float)
if ignoreNodes:
for n in ignoreNodes:
ind = node_order.index(n)
if ind > -1:
matrix = remove_ij(matrix, ind, ind)
node_order.remove(n)
return matrix, node_order
def makeFakeDstMatrice(n, dmin, dmax):
"""Create a fake distance matrice"""
b = (dmax - dmin) * np.random.random_sample(size=(n, n)) + dmin
b_sym = (b + b.T) / 2
np.fill_diagonal(b_sym, 0)
return b_sym
def saveMatrix(filename, matrix, node_order):
# matrix[np.where(matrix==1e305)]=0
with open(filename, 'w+') as out:
out.write("\t%i\n" % len(node_order))
lines = []
for entry in matrix.tolist():
line = node_order.pop(0) + "\t" + " ".join(map(str, entry)) + "\n"
lines.append(line)
out.writelines(lines)
return True
def getFloatValue(number, x_ind, y_ind, nFlagVal, nFlag=False):
"""Get a distance matrice validate input from a string"""
try:
n = float(number)
if(n < 0 and nFlag):
n = nFlagVal
return 0 if (x_ind == y_ind) else n
except ValueError:
return number
def parseFastPhyloXml(infile, nFlagVal, nFlag=False):
"""Parse the fastphylo xml format"""
xml = etree.parse(infile)
run = xml.find('//run')
dimension = int(run.attrib['dim'])
identities = run.find('identities')
node_order = [i.attrib['name'] for i in identities.iter('identity')]
dm = run.find('dms').find('dm')
distance_mat = np.zeros(shape=(dimension, dimension), dtype=np.float)
i = 0
for node in dm.iter('row'):
j = 0
for entry in node.iter('entry'):
val = float(entry.text)
if(val < 0 and nFlag):
val = nFlagVal
distance_mat[i, j] = val
distance_mat[j, i] = val
j += 1
i += 1
return distance_mat, node_order
| maclandrol/profileNJ | profileNJ/TreeLib/ClusterUtils.py | Python | gpl-3.0 | 11,942 |
# -*- coding: utf-8 -*-
from .__version__ import __version__
| GjjvdBurg/HugoPhotoSwipe | hugophotoswipe/__init__.py | Python | gpl-3.0 | 62 |
from __future__ import absolute_import
import csv
from datetime import datetime
import httplib
from itertools import count
import logging
import smtplib
from xml.dom.minidom import Document
from django.conf import settings
from django.core.exceptions import FieldError
from django.core.mail import EmailMultiAlternatives
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext as _rc
from django.utils.html import escape
from .forms import forms_for_survey
from .models import (
Answer,
BALLOT_STUFFING_FIELDS,
FORMAT_CHOICES,
OPTION_TYPE_CHOICES,
Question,
SURVEY_DISPLAY_TYPE_CHOICES,
Submission,
Survey,
SurveyReport,
SurveyReportDisplay,
extra_from_filters,
get_all_answers,
get_filters)
from .jsonutils import dump, dumps, datetime_to_string
from .util import ChoiceEnum, get_function
from . import settings as crowdsourcing_settings
def _user_entered_survey(request, survey):
if not request.user.is_authenticated():
return False
return bool(survey.submissions_for(
request.user,
request.session.session_key.lower()).count())
def _entered_no_more_allowed(request, survey):
""" The user entered the survey and the survey allows only one entry. """
return all((
not survey.allow_multiple_submissions,
_user_entered_survey(request, survey),))
def _get_remote_ip(request):
forwarded=request.META.get('HTTP_X_FORWARDED_FOR')
if forwarded:
return forwarded.split(',')[-1].strip()
return request.META['REMOTE_ADDR']
def _login_url(request):
if crowdsourcing_settings.LOGIN_VIEW:
start_with = reverse(crowdsourcing_settings.LOGIN_VIEW) + '?next=%s'
return start_with % request.path
return "/?login_required=true"
def _get_survey_or_404(slug, request=None):
manager = Survey.live
if request and request.user.is_staff:
manager = Survey.objects
return get_object_or_404(manager, slug=slug)
def _survey_submit(request, survey):
if survey.require_login and request.user.is_anonymous():
# again, the form should only be shown after the user is logged in, but
# to be safe...
return HttpResponseRedirect(_login_url(request))
if not hasattr(request, 'session'):
return HttpResponse("Cookies must be enabled to use this application.",
status=httplib.FORBIDDEN)
if (_entered_no_more_allowed(request, survey)):
slug_template = 'crowdsourcing/%s_already_submitted.html' % survey.slug
return render_to_response([slug_template,
'crowdsourcing/already_submitted.html'],
dict(survey=survey),
_rc(request))
forms = forms_for_survey(survey, request)
if _submit_valid_forms(forms, request, survey):
if survey.can_have_public_submissions():
return _survey_results_redirect(request, survey, thanks=True)
return _survey_show_form(request, survey, ())
else:
return _survey_show_form(request, survey, forms)
def _submit_valid_forms(forms, request, survey):
if not all(form.is_valid() for form in forms):
return False
submission_form = forms[0]
submission = submission_form.save(commit=False)
submission.survey = survey
submission.ip_address = _get_remote_ip(request)
submission.is_public = not survey.moderate_submissions
if request.user.is_authenticated():
submission.user = request.user
submission.save()
for form in forms[1:]:
answer = form.save(commit=False)
if isinstance(answer, (list, tuple)):
for a in answer:
a.submission = submission
a.save()
elif answer:
answer.submission = submission
answer.save()
if survey.email:
_send_survey_email(request, survey, submission)
return True
def _url_for_edit(request, obj):
view_args = (obj._meta.app_label, obj._meta.module_name,)
try:
edit_url = reverse("admin:%s_%s_change" % view_args, args=(obj.id,))
except NoReverseMatch:
# Probably 'admin' is not a registered namespace on a site without an
# admin. Just fake it.
edit_url = "/admin/%s/%s/%d/" % (view_args + (obj.id,))
admin_url = crowdsourcing_settings.SURVEY_ADMIN_SITE
if not admin_url:
admin_url = "http://" + request.META["HTTP_HOST"]
elif len(admin_url) < 4 or admin_url[:4].lower() != "http":
admin_url = "http://" + admin_url
return admin_url + edit_url
def _send_survey_email(request, survey, submission):
subject = survey.title
sender = crowdsourcing_settings.SURVEY_EMAIL_FROM
links = [(_url_for_edit(request, submission), "Edit Submission"),
(_url_for_edit(request, survey), "Edit Survey"),]
if survey.can_have_public_submissions():
u = "http://" + request.META["HTTP_HOST"] + _survey_report_url(survey)
links.append((u, "View Survey",))
parts = ["<a href=\"%s\">%s</a>" % link for link in links]
set = submission.answer_set.all()
lines = ["%s: %s" % (a.question.label, escape(a.value),) for a in set]
parts.extend(lines)
html_email = "<br/>\n".join(parts)
recipients = [a.strip() for a in survey.email.split(",")]
email_msg = EmailMultiAlternatives(subject,
html_email,
sender,
recipients)
email_msg.attach_alternative(html_email, 'text/html')
try:
email_msg.send()
except smtplib.SMTPException as ex:
logging.exception("SMTP error sending email: %s" % str(ex))
except Exception as ex:
logging.exception("Unexpected error sending email: %s" % str(ex))
def _survey_show_form(request, survey, forms):
specific_template = 'crowdsourcing/%s_survey_detail.html' % survey.slug
entered = _user_entered_survey(request, survey)
return render_to_response([specific_template,
'crowdsourcing/survey_detail.html'],
dict(survey=survey,
forms=forms,
entered=entered,
login_url=_login_url(request),
request=request),
_rc(request))
def _can_show_form(request, survey):
authenticated = request.user.is_authenticated()
return all((
survey.is_open,
authenticated or not survey.require_login,
not _entered_no_more_allowed(request, survey)))
def survey_detail(request, slug):
""" When you load the survey, this view decides what to do. It displays
the form, redirects to the results page, displays messages, or whatever
makes sense based on the survey, the user, and the user's entries. """
survey = _get_survey_or_404(slug, request)
if not survey.is_open and survey.can_have_public_submissions():
return _survey_results_redirect(request, survey)
need_login = (survey.is_open
and survey.require_login
and not request.user.is_authenticated())
if _can_show_form(request, survey):
if request.method == 'POST':
return _survey_submit(request, survey)
forms = forms_for_survey(survey, request)
elif need_login:
forms = ()
elif survey.can_have_public_submissions():
return _survey_results_redirect(request, survey)
else: # Survey is closed with private results.
forms = ()
return _survey_show_form(request, survey, forms)
def embeded_survey_questions(request, slug):
survey = _get_survey_or_404(slug, request)
templates = ['crowdsourcing/embeded_survey_questions_%s.html' % slug,
'crowdsourcing/embeded_survey_questions.html']
forms = ()
if _can_show_form(request, survey):
forms = forms_for_survey(survey, request)
if request.method == 'POST':
if _submit_valid_forms(forms, request, survey):
forms = ()
return render_to_response(templates, dict(
entered=_user_entered_survey(request, survey),
request=request,
forms=forms,
survey=survey,
login_url=_login_url(request)), _rc(request))
def _survey_results_redirect(request, survey, thanks=False):
response = HttpResponseRedirect(_survey_report_url(survey))
if thanks:
request.session['survey_thanks_%s' % survey.slug] = '1'
return response
def _survey_report_url(survey):
return reverse('survey_default_report_page_1',
kwargs={'slug': survey.slug})
def allowed_actions(request, slug):
survey = _get_survey_or_404(slug, request)
authenticated = request.user.is_authenticated()
response = HttpResponse(mimetype='application/json')
dump({"enter": _can_show_form(request, survey),
"view": survey.can_have_public_submissions(),
"open": survey.is_open,
"need_login": survey.require_login and not authenticated}, response)
return response
def questions(request, slug):
response = HttpResponse(mimetype='application/json')
dump(_get_survey_or_404(slug, request).to_jsondata(), response)
return response
def submissions(request, format):
""" Use this view to make arbitrary queries on submissions. If the user is
a logged in staff member, ignore submission.is_public,
question.answer_is_public, and survey.can_have_public_submissions. Use the
query string to pass keys and values. For example,
/crowdsourcing/submissions/?survey=my-survey will return all submissions
for the survey with slug my-survey.
survey - the slug for the survey
user - the username of the submittor. Leave blank for submissions without
a logged in user.
submitted_from and submitted_to - strings in the format YYYY-mm-ddThh:mm:ss
For example, 2010-04-05T13:02:03
featured - A blank value, 'f', 'false', 0, 'n', and 'no' all mean ignore
the featured flag. Everything else means display only featured.
You can also use filters in the survey report sense. Rather than document
exactly what parameters you would pass, follow these steps to figure it
out:
1. Enable filters on your survey and the questions you want to filter on.
2. Go to the report page and fill out the filters you want.
3. Click Submit.
4. Examine the query string of the page you end up on and note which
parameters are filled out. Use those same parameters here. """
format = format.lower()
if format not in FORMAT_CHOICES:
msg = ("%s is an unrecognized format. Crowdsourcing recognizes "
"these: %s") % (format, ", ".join(FORMAT_CHOICES))
return HttpResponse(msg)
is_staff = request.user.is_authenticated() and request.user.is_staff
if is_staff:
results = Submission.objects.all()
else:
# survey.can_have_public_submissions is complicated enough that
# we'll check it in Python, not the database.
results = Submission.objects.filter(is_public=True)
results = results.select_related("survey", "user")
get = request.GET.copy()
limit = int(get.pop("limit", [0])[0])
keys = get.keys()
basic_filters = (
'survey',
'user',
'submitted_from',
'submitted_to',
'featured',
'is_public')
if is_staff:
basic_filters += BALLOT_STUFFING_FIELDS
survey_slug = ""
for field in [f for f in keys if f in basic_filters]:
value = get[field]
search_field = field
if 'survey' == field:
search_field = 'survey__slug'
survey_slug = value
elif 'user' == field:
if '' == value:
value = None
else:
search_field = 'user__username'
elif field in ('submitted_from', 'submitted_to'):
date_format = "%Y-%m-%dT%H:%M:%S"
try:
value = datetime.strptime(value, date_format)
except ValueError:
return HttpResponse(
("Invalid %s format. Try, for example, "
"%s") % (field, datetime.now().strftime(date_format),))
if 'submitted_from' == field:
search_field = 'submitted_at__gte'
else:
search_field = 'submitted_at__lte'
elif field in('featured', 'is_public',):
falses = ('f', 'false', 'no', 'n', '0',)
value = len(value) and not value.lower() in falses
# search_field is unicode but needs to be ascii.
results = results.filter(**{str(search_field): value})
get.pop(field)
def get_survey():
survey = Survey.objects.get(slug=survey_slug)
get_survey = lambda: survey
return survey
if get:
if survey_slug:
results = extra_from_filters(
results,
"crowdsourcing_submission.id",
get_survey(),
get)
else:
message = (
"You've got a couple of extra filters here, and we "
"aren't sure what to do with them. You may have just "
"misspelled one of the basic filters (%s). You may have a "
"filter from a particular survey in mind. In that case, just "
"include survey=my-survey-slug in the query string. You may "
"also be trying to pull some hotshot move like, \"Get me all "
"submissions that belong to a survey with a filter named '%s' "
"that match '%s'.\" Crowdsourcing could support this, but it "
"would be pretty inefficient and, we're guessing, pretty "
"rare. If that's what you're trying to do I'm afraid you'll "
"have to do something more complicated like iterating through "
"all your surveys.")
item = get.items()[0]
message = message % (", ".join(basic_filters), item[0], item[1])
return HttpResponse(message)
if not is_staff:
if survey_slug:
if not get_survey().can_have_public_submissions():
results = []
else:
rs = [r for r in results if r.survey.can_have_public_submissions()]
results = rs
if limit:
results = results[:limit]
answer_lookup = get_all_answers(results,
include_private_questions=is_staff)
result_data = []
for r in results:
data = r.to_jsondata(answer_lookup, include_private_questions=is_staff)
result_data.append(data)
for data in result_data:
data.update(data["data"])
data.pop("data")
def get_keys():
key_lookup = {}
for data in result_data:
for key in data.keys():
key_lookup[key] = True
return sorted(key_lookup.keys())
if format == 'json':
response = HttpResponse(mimetype='application/json')
dump(result_data, response)
elif format == 'csv':
response = HttpResponse(mimetype='text/csv')
writer = csv.writer(response)
keys = get_keys()
writer.writerow(keys)
for data in result_data:
row = []
for k in keys:
row.append((u"%s" % _encode(data.get(k, ""))).encode("utf-8"))
writer.writerow(row)
elif format == 'xml':
doc = Document()
submissions = doc.createElement("submissions")
doc.appendChild(submissions)
for data in result_data:
submission = doc.createElement("submission")
submissions.appendChild(submission)
for key, value in data.items():
if value:
cell = doc.createElement(key)
submission.appendChild(cell)
cell.appendChild(doc.createTextNode(u"%s" % value))
response = HttpResponse(doc.toxml(), mimetype='text/xml')
elif format == 'html': # mostly for debugging.
keys = get_keys()
results = [
"<html><body><table>",
"<tr>%s</tr>" % "".join(["<th>%s</th>" % k for k in keys])]
for data in result_data:
cell = "<td>%s</td>"
cells = [cell % _encode(data.get(key, "")) for key in keys]
results.append("<tr>%s</tr>" % "".join(cells))
results.append("</table></body></html>")
response = HttpResponse("\n".join(results))
else:
return HttpResponse("Unsure how to handle %s format" % format)
return response
def _encode(possible):
if possible is True:
return 1
elif possible is False:
return 0
return datetime_to_string(possible) or possible
def submission(request, id):
template = 'crowdsourcing/submission.html'
sub = get_object_or_404(Submission.objects, is_public=True, pk=id)
return render_to_response(template, dict(submission=sub), _rc(request))
def _default_report(survey):
field_count = count(1)
OTC = OPTION_TYPE_CHOICES
pie_choices = (
OTC.BOOL,
OTC.SELECT,
OTC.CHOICE,
OTC.NUMERIC_SELECT,
OTC.NUMERIC_CHOICE,
OTC.BOOL_LIST,)
all_choices = pie_choices + (OTC.LOCATION, OTC.PHOTO)
public_fields = survey.get_public_fields()
fields = [f for f in public_fields if f.option_type in all_choices]
report = SurveyReport(
survey=survey,
title=survey.title,
summary=survey.description or survey.tease)
displays = []
for field in fields:
if field.option_type in pie_choices:
type = SURVEY_DISPLAY_TYPE_CHOICES.PIE
elif field.option_type == OTC.LOCATION:
type = SURVEY_DISPLAY_TYPE_CHOICES.MAP
elif field.option_type == OTC.PHOTO:
type = SURVEY_DISPLAY_TYPE_CHOICES.SLIDESHOW
displays.append(SurveyReportDisplay(
report=report,
display_type=type,
fieldnames=field.fieldname,
annotation=field.label,
order=field_count.next()))
report.survey_report_displays = displays
return report
def survey_report(request, slug, report='', page=None):
templates = ['crowdsourcing/survey_report_%s.html' % slug,
'crowdsourcing/survey_report.html']
return _survey_report(request, slug, report, page, templates)
def embeded_survey_report(request, slug, report=''):
templates = ['crowdsourcing/embeded_survey_report_%s.html' % slug,
'crowdsourcing/embeded_survey_report.html']
return _survey_report(request, slug, report, None, templates)
def _survey_report(request, slug, report, page, templates):
""" Show a report for the survey. As rating is done in a separate
application we don't directly check request.GET["sort"] here.
crowdsourcing_settings.PRE_REPORT is the place for that. """
if page is None:
page = 1
else:
try:
page = int(page)
except ValueError:
raise Http404
survey = _get_survey_or_404(slug, request)
# is the survey anything we can actually have a report on?
is_public = survey.is_live and survey.can_have_public_submissions()
if not is_public and not request.user.is_staff:
raise Http404
reports = survey.surveyreport_set.all()
if report:
report_obj = get_object_or_404(reports, slug=report)
elif survey.default_report:
args = {"slug": survey.slug, "report": survey.default_report.slug}
return HttpResponseRedirect(reverse("survey_report_page_1",
kwargs=args))
else:
report_obj = _default_report(survey)
archive_fields = list(survey.get_public_archive_fields())
is_staff = request.user.is_staff
if is_staff:
submissions = survey.submission_set.all()
fields = list(survey.get_fields())
else:
submissions = survey.public_submissions()
fields = list(survey.get_public_fields())
filters = get_filters(survey, request.GET)
id_field = "crowdsourcing_submission.id"
if not report_obj.display_individual_results:
submissions = submissions.none()
else:
submissions = extra_from_filters(submissions,
id_field,
survey,
request.GET)
# If you want to sort based on rating, wire it up here.
if crowdsourcing_settings.PRE_REPORT:
pre_report = get_function(crowdsourcing_settings.PRE_REPORT)
submissions = pre_report(
submissions=submissions,
report=report_obj,
request=request)
if report_obj.featured:
submissions = submissions.filter(featured=True)
if report_obj.limit_results_to:
submissions = submissions[:report_obj.limit_results_to]
paginator, page_obj = paginate_or_404(submissions, page)
page_answers = get_all_answers(
page_obj.object_list,
include_private_questions=is_staff)
pages_to_link = pages_to_link_from_paginator(page, paginator)
display_individual_results = all([
report_obj.display_individual_results,
archive_fields or (is_staff and fields)])
context = dict(
survey=survey,
submissions=submissions,
paginator=paginator,
page_obj=page_obj,
pages_to_link=pages_to_link,
fields=fields,
archive_fields=archive_fields,
filters=filters,
report=report_obj,
page_answers=page_answers,
is_public=is_public,
display_individual_results=display_individual_results,
request=request)
return render_to_response(templates, context, _rc(request))
def pages_to_link_from_paginator(page, paginator):
""" Return an array with numbers where you should link to a page, and False
where you should show elipses. For example, if you have 9 pages and you are
on page 9, return [1, False, 5, 6, 7, 8, 9]. """
pages = []
for i in range(page - 4, page + 5):
if 1 <= i <= paginator.num_pages:
pages.append(i)
if pages[0] > 1:
pages = [1, False] + pages
if pages[-1] < paginator.num_pages:
pages = pages + [False, paginator.num_pages]
DISCARD = -999
for i in range(1, len(pages) - 1):
if pages[i - 1] + 2 == pages[i + 1]:
# Turn [1, False, 3... into [1, 2, 3
pages[i] = (pages[i - 1] + pages[i + 1]) / 2
elif pages[i - 1] + 1 == pages[i + 1]:
# Turn [1, False, 2... into [1, DISCARD, 2...
pages[i] = DISCARD
return [p for p in pages if p != DISCARD]
def paginate_or_404(queryset, page, num_per_page=20):
"""
paginate a queryset (or other iterator) for the given page, returning the
paginator and page object. Raises a 404 for an invalid page.
"""
if page is None:
page = 1
paginator = Paginator(queryset, num_per_page)
try:
page_obj = paginator.page(page)
except EmptyPage, InvalidPage:
raise Http404
return paginator, page_obj
def location_question_results(
request,
question_id,
limit_map_answers,
survey_report_slug=""):
question = get_object_or_404(Question.objects.select_related("survey"),
pk=question_id,
answer_is_public=True)
is_staff = request.user.is_staff
if not question.survey.can_have_public_submissions() and not is_staff:
raise Http404
featured = limit_results_to = False
if survey_report_slug:
survey_report = get_object_or_404(SurveyReport.objects,
survey=question.survey,
slug=survey_report_slug)
featured = survey_report.featured
limit_results_to = survey_report.limit_results_to
icon_lookup = {}
icon_questions = question.survey.icon_questions()
for icon_question in icon_questions:
icon_by_answer = {}
for (option, icon) in icon_question.parsed_option_icon_pairs():
if icon:
icon_by_answer[option] = icon
answer_set = icon_question.answer_set.all()
for answer in answer_set.select_related("question"):
if answer.value in icon_by_answer:
icon = icon_by_answer[answer.value]
icon_lookup[answer.submission_id] = icon
answers = question.answer_set.filter(
~Q(latitude=None),
~Q(longitude=None)).order_by("-submission__submitted_at")
if not is_staff:
answers = answers.filter(submission__is_public=True)
if featured:
answers = answers.filter(submission__featured=True)
answers = extra_from_filters(
answers,
"submission_id",
question.survey,
request.GET)
limit_map_answers = int(limit_map_answers) if limit_map_answers else 0
if limit_map_answers or limit_results_to:
answers = answers[:min(filter(None, [limit_map_answers,
limit_results_to,]))]
entries = []
view = "crowdsourcing.views.submission_for_map"
for answer in answers:
kwargs = {"id": answer.submission_id}
d = {
"lat": answer.latitude,
"lng": answer.longitude,
"url": reverse(view, kwargs=kwargs)}
if answer.submission_id in icon_lookup:
d["icon"] = icon_lookup[answer.submission_id]
entries.append(d)
response = HttpResponse(mimetype='application/json')
dump({"entries": entries}, response)
return response
def location_question_map(
request,
question_id,
display_id,
survey_report_slug=""):
question = Question.objects.get(pk=question_id)
if not question.answer_is_public and not request.user.is_staff:
raise Http404
report = None
limit = 0
if survey_report_slug:
report = SurveyReport.objects.get(slug=survey_report_slug,
survey=question.survey)
limit = report.limit_results_to
else:
report = _default_report(question.survey)
# This cast is not for validation since the urls file already guaranteed
# it would be a nonempty string of digits. It's simply because display_id
# is a string.
if int(display_id):
display = SurveyReportDisplay.objects.get(pk=display_id)
else:
for d in report.survey_report_displays:
if question.pk in [q.pk for q in d.questions()]:
display = d
display.limit_map_answers = limit
return render_to_response('crowdsourcing/location_question_map.html', dict(
display=display,
question=question,
report=report))
def submission_for_map(request, id):
template = 'crowdsourcing/submission_for_map.html'
if request.user.is_staff:
sub = get_object_or_404(Submission.objects, pk=id)
else:
sub = get_object_or_404(Submission.objects, is_public=True, pk=id)
return render_to_response(template, dict(submission=sub), _rc(request))
| wmde/do-index | crowdsourcing/views.py | Python | gpl-3.0 | 27,746 |
import collections
import os
import codecs
from .top_block import TopBlockGenerator
from .. import Constants
from ..io import yaml
class HierBlockGenerator(TopBlockGenerator):
"""Extends the top block generator to also generate a block YML file"""
def __init__(self, flow_graph, _):
"""
Initialize the hier block generator object.
Args:
flow_graph: the flow graph object
"""
platform = flow_graph.parent
output_dir = platform.config.hier_block_lib_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
TopBlockGenerator.__init__(self, flow_graph, output_dir)
self._mode = Constants.HIER_BLOCK_FILE_MODE
self.file_path_yml = self.file_path[:-3] + '.block.yml'
def write(self):
"""generate output and write it to files"""
TopBlockGenerator.write(self)
data = yaml.dump(self._build_block_n_from_flow_graph_io())
replace = [
('parameters:', '\nparameters:'),
('inputs:', '\ninputs:'),
('outputs:', '\noutputs:'),
('asserts:', '\nasserts:'),
('templates:', '\ntemplates:'),
('documentation:', '\ndocumentation:'),
('file_format:', '\nfile_format:'),
]
for r in replace:
data = data.replace(*r)
with codecs.open(self.file_path_yml, 'w', encoding='utf-8') as fp:
fp.write(data)
# Windows only supports S_IREAD and S_IWRITE, other flags are ignored
os.chmod(self.file_path_yml, self._mode)
def _build_block_n_from_flow_graph_io(self):
"""
Generate a block YML nested data from the flow graph IO
Returns:
a yml node tree
"""
# Extract info from the flow graph
block_id = self._flow_graph.get_option('id')
parameters = self._flow_graph.get_parameters()
def var_or_value(name):
if name in (p.name for p in parameters):
return "${" + name + " }"
return name
# Build the nested data
data = collections.OrderedDict()
data['id'] = block_id
data['label'] = (
self._flow_graph.get_option('title') or
self._flow_graph.get_option('id').replace('_', ' ').title()
)
data['category'] = self._flow_graph.get_option('category')
# Parameters
data['parameters'] = []
for param_block in parameters:
p = collections.OrderedDict()
p['id'] = param_block.name
p['label'] = param_block.params['label'].get_value() or param_block.name
p['dtype'] = param_block.params['value'].dtype
p['default'] = param_block.params['value'].get_value()
p['hide'] = param_block.params['hide'].get_value()
data['parameters'].append(p)
# Ports
for direction in ('inputs', 'outputs'):
data[direction] = []
for port in get_hier_block_io(self._flow_graph, direction):
p = collections.OrderedDict()
p['label'] = port.parent.params['label'].value
if port.domain != Constants.DEFAULT_DOMAIN:
p['domain'] = port.domain
p['dtype'] = port.dtype
if port.domain != Constants.GR_MESSAGE_DOMAIN:
p['vlen'] = var_or_value(port.vlen)
if port.optional:
p['optional'] = True
data[direction].append(p)
t = data['templates'] = collections.OrderedDict()
t['imports'] = "from {0} import {0} # grc-generated hier_block".format(
self._flow_graph.get_option('id'))
# Make data
if parameters:
t['make'] = '{cls}(\n {kwargs},\n)'.format(
cls=block_id,
kwargs=',\n '.join(
'{key}=${{ {key} }}'.format(key=param.name) for param in parameters
),
)
else:
t['make'] = '{cls}()'.format(cls=block_id)
# Self-connect if there aren't any ports
if not data['inputs'] and not data['outputs']:
t['make'] += '\nself.connect(self.${id})'
# Callback data
t['callbacks'] = [
'set_{key}(${{ {key} }})'.format(key=param_block.name) for param_block in parameters
]
# Documentation
data['documentation'] = "\n".join(field for field in (
self._flow_graph.get_option('author'),
self._flow_graph.get_option('description'),
self.file_path
) if field)
data['grc_source'] = str(self._flow_graph.grc_file_path)
data['file_format'] = 1
return data
class QtHierBlockGenerator(HierBlockGenerator):
def _build_block_n_from_flow_graph_io(self):
n = HierBlockGenerator._build_block_n_from_flow_graph_io(self)
block_n = collections.OrderedDict()
# insert flags after category
for key, value in n.items():
block_n[key] = value
if key == 'category':
block_n['flags'] = 'need_qt_gui'
if not block_n['label'].upper().startswith('QT GUI'):
block_n['label'] = 'QT GUI ' + block_n['label']
gui_hint_param = collections.OrderedDict()
gui_hint_param['id'] = 'gui_hint'
gui_hint_param['label'] = 'GUI Hint'
gui_hint_param['dtype'] = 'gui_hint'
gui_hint_param['hide'] = 'part'
block_n['parameters'].append(gui_hint_param)
block_n['templates']['make'] += (
"\n<% win = 'self.%s'%id %>"
"\n${ gui_hint() % win }"
)
return block_n
def get_hier_block_io(flow_graph, direction, domain=None):
"""
Get a list of io ports for this flow graph.
Returns a list of blocks
"""
pads = flow_graph.get_pad_sources() if direction == 'inputs' else flow_graph.get_pad_sinks()
for pad in pads:
for port in (pad.sources if direction == 'inputs' else pad.sinks):
if domain and port.domain != domain:
continue
yield port
| mrjacobagilbert/gnuradio | grc/core/generator/hier_block.py | Python | gpl-3.0 | 6,202 |
#!/usr/bin/env python
"""Allows functions from coot_utils to be imported"""
# Copyright 2011, 2012 Kevin Keating
#
# Licensed under the Educational Community License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
#"import coot_utils" results in an error, so this module is required to retrieve
#functions that are defined in coot_utils
import os, sys
from os.path import exists, join
from coot import *
use_gui_qm = False #coot_utils requires this variable to be defined
#search the Python path for coot_utils
for curpath in sys.path:
abspath = join(curpath, "coot_utils.py")
if exists(abspath):
#when we find it, exec it
#but first exec redefine_functions.py if it's in the same directory
#redefine_functions.py renames func_py() to func(), which used to be done in coot_utils.py itself
#new versions of coot_utils.py requires this renaming to be done before being exec'ed
redefAbspath = join(curpath, "redefine_functions.py")
if exists(redefAbspath):
execfile(redefAbspath)
execfile(abspath)
break | jlec/coot | rcrane/coot_utils_adapter.py | Python | gpl-3.0 | 1,561 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Iztok Jeras <iztok.jeras@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
# Dictionary of ROM commands and their names, next state.
command = {
0x33: ['Read ROM' , 'GET ROM' ],
0x0f: ['Conditional read ROM' , 'GET ROM' ],
0xcc: ['Skip ROM' , 'TRANSPORT' ],
0x55: ['Match ROM' , 'GET ROM' ],
0xf0: ['Search ROM' , 'SEARCH ROM'],
0xec: ['Conditional search ROM', 'SEARCH ROM'],
0x3c: ['Overdrive skip ROM' , 'TRANSPORT' ],
0x69: ['Overdrive match ROM' , 'GET ROM' ],
}
class Decoder(srd.Decoder):
api_version = 2
id = 'onewire_network'
name = '1-Wire network layer'
longname = '1-Wire serial communication bus (network layer)'
desc = 'Bidirectional, half-duplex, asynchronous serial bus.'
license = 'gplv2+'
inputs = ['onewire_link']
outputs = ['onewire_network']
annotations = (
('text', 'Human-readable text'),
)
def __init__(self, **kwargs):
self.beg = 0
self.end = 0
self.state = 'COMMAND'
self.bit_cnt = 0
self.search = 'P'
self.data_p = 0x0
self.data_n = 0x0
self.data = 0x0
self.rom = 0x0000000000000000
def start(self):
self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(srd.OUTPUT_ANN)
def putx(self, data):
# Helper function for most annotations.
self.put(self.beg, self.end, self.out_ann, data)
def puty(self, data):
# Helper function for most protocol packets.
self.put(self.beg, self.end, self.out_python, data)
def decode(self, ss, es, data):
code, val = data
# State machine.
if code == 'RESET/PRESENCE':
self.search = 'P'
self.bit_cnt = 0
self.put(ss, es, self.out_ann,
[0, ['Reset/presence: %s' % ('true' if val else 'false')]])
self.put(ss, es, self.out_python, ['RESET/PRESENCE', val])
self.state = 'COMMAND'
return
# For now we're only interested in 'RESET/PRESENCE' and 'BIT' packets.
if code != 'BIT':
return
if self.state == 'COMMAND':
# Receiving and decoding a ROM command.
if self.onewire_collect(8, val, ss, es) == 0:
return
if self.data in command:
self.putx([0, ['ROM command: 0x%02x \'%s\''
% (self.data, command[self.data][0])]])
self.state = command[self.data][1]
else:
self.putx([0, ['ROM command: 0x%02x \'%s\''
% (self.data, 'unrecognized')]])
self.state = 'COMMAND ERROR'
elif self.state == 'GET ROM':
# A 64 bit device address is selected.
# Family code (1 byte) + serial number (6 bytes) + CRC (1 byte)
if self.onewire_collect(64, val, ss, es) == 0:
return
self.rom = self.data & 0xffffffffffffffff
self.putx([0, ['ROM: 0x%016x' % self.rom]])
self.puty(['ROM', self.rom])
self.state = 'TRANSPORT'
elif self.state == 'SEARCH ROM':
# A 64 bit device address is searched for.
# Family code (1 byte) + serial number (6 bytes) + CRC (1 byte)
if self.onewire_search(64, val, ss, es) == 0:
return
self.rom = self.data & 0xffffffffffffffff
self.putx([0, ['ROM: 0x%016x' % self.rom]])
self.puty(['ROM', self.rom])
self.state = 'TRANSPORT'
elif self.state == 'TRANSPORT':
# The transport layer is handled in byte sized units.
if self.onewire_collect(8, val, ss, es) == 0:
return
self.putx([0, ['Data: 0x%02x' % self.data]])
self.puty(['DATA', self.data])
elif self.state == 'COMMAND ERROR':
# Since the command is not recognized, print raw data.
if self.onewire_collect(8, val, ss, es) == 0:
return
self.putx([0, ['ROM error data: 0x%02x' % self.data]])
else:
raise Exception('Invalid state: %s' % self.state)
# Data collector.
def onewire_collect(self, length, val, ss, es):
# Storing the sample this sequence begins with.
if self.bit_cnt == 1:
self.beg = ss
self.data = self.data & ~(1 << self.bit_cnt) | (val << self.bit_cnt)
self.bit_cnt += 1
# Storing the sample this sequence ends with.
# In case the full length of the sequence is received, return 1.
if self.bit_cnt == length:
self.end = es
self.data = self.data & ((1 << length) - 1)
self.bit_cnt = 0
return 1
else:
return 0
# Search collector.
def onewire_search(self, length, val, ss, es):
# Storing the sample this sequence begins with.
if (self.bit_cnt == 0) and (self.search == 'P'):
self.beg = ss
if self.search == 'P':
# Master receives an original address bit.
self.data_p = self.data_p & ~(1 << self.bit_cnt) | \
(val << self.bit_cnt)
self.search = 'N'
elif self.search == 'N':
# Master receives a complemented address bit.
self.data_n = self.data_n & ~(1 << self.bit_cnt) | \
(val << self.bit_cnt)
self.search = 'D'
elif self.search == 'D':
# Master transmits an address bit.
self.data = self.data & ~(1 << self.bit_cnt) | (val << self.bit_cnt)
self.search = 'P'
self.bit_cnt += 1
# Storing the sample this sequence ends with.
# In case the full length of the sequence is received, return 1.
if self.bit_cnt == length:
self.end = es
self.data_p = self.data_p & ((1 << length) - 1)
self.data_n = self.data_n & ((1 << length) - 1)
self.data = self.data & ((1 << length) - 1)
self.search = 'P'
self.bit_cnt = 0
return 1
else:
return 0
| salberin/libsigrokdecode | decoders/onewire_network/pd.py | Python | gpl-3.0 | 7,048 |
# Import classes
from idtxl.active_information_storage import ActiveInformationStorage
from idtxl.data import Data
# a) Generate test data
data = Data()
data.generate_mute_data(n_samples=1000, n_replications=5)
# b) Initialise analysis object and define settings
network_analysis = ActiveInformationStorage()
settings = {'cmi_estimator': 'JidtGaussianCMI',
'max_lag': 5}
# c) Run analysis
results = network_analysis.analyse_network(settings=settings, data=data)
# d) Plot list of processes with significant AIS to console
print(results.get_significant_processes(fdr=False))
| pwollstadt/IDTxl | demos/demo_active_information_storage.py | Python | gpl-3.0 | 591 |
# -*- coding: utf-8 -*-
# hhfit.py ---
# Description:
# Author:
# Maintainer:
# Created: Tue May 21 16:31:56 2013 (+0530)
# Commentary:
# Functions for fitting common equations for Hodgkin-Huxley type gate
# equations.
import traceback
import warnings
import numpy as np
import logging
logger_ = logging.getLogger('moose.nml2.hhfit')
try:
import scipy.optimize as _SO
except ImportError:
raise RuntimeError("To use this feature/module, please install scipy")
def exponential2(x, a, scale, x0, y0=0):
res = a * np.exp((x - x0) / scale) + y0
#print('============ Calculating exponential2 for %s, a=%s, scale=%s, x0=%s, y0=%s; = %s'%(x, a, scale, x0, y0, res))
return res
def exponential(x, a, k, x0, y0=0):
res = a * np.exp(k * (x - x0)) + y0
#print('============ Calculating exponential for %s, a=%s, k=%s, x0=%s, y0=%s; = %s'%(x, a, k, x0, y0, res))
return res
def sigmoid2(x, a, scale, x0, y0=0):
res = a / (np.exp(-1 * (x - x0) / scale) + 1.0) + y0
#print('============ Calculating sigmoid for %s, a=%s, scale=%s, x0=%s, y0=%s; = %s'%(x, a, scale, x0, y0, res))
return res
def sigmoid(x, a, k, x0, y0=0):
res = a / (np.exp(k * (x - x0)) + 1.0) + y0
#print('============ Calculating sigmoid for %s, a=%s, k=%s, x0=%s, y0=%s; = %s'%(x, a, k, x0, y0, res))
return res
def linoid2(x, a, scale, x0, y0=0):
"""The so called linoid function. Called explinear in neuroml."""
denominator = 1 - np.exp(-1 * (x - x0) / scale)
# Linoid often includes a zero denominator - we need to fill those
# points with interpolated values (interpolation is simpler than
# finding limits).
ret = (a / scale) * (x - x0) / denominator
infidx = np.flatnonzero((ret == np.inf) | (ret == -np.inf))
if len(infidx) > 0:
for ii in infidx:
if ii == 0:
ret[ii] = ret[ii + 1] - (ret[ii + 2] - ret[ii + 1])
elif ii == len(ret):
ret[ii] = ret[ii - 1] + (ret[ii - 1] - ret[ii - 2])
else:
ret[ii] = (ret[ii + 1] + ret[ii + 2]) * 0.5
res = ret + y0
#print('============ Calculating linoid2 for %s, a=%s, scale=%s, x0=%s, y0=%s; res=%s'%(x, a, scale, x0, y0,res))
return res
def linoid(x, a, k, x0, y0=0):
"""The so called linoid function. Called explinear in neuroml."""
denominator = np.exp(k * (x - x0)) - 1.0
# Linoid often includes a zero denominator - we need to fill those
# points with interpolated values (interpolation is simpler than
# finding limits).
ret = a * (x - x0) / denominator
infidx = np.flatnonzero((ret == np.inf) | (ret == -np.inf))
if len(infidx) > 0:
for ii in infidx:
if ii == 0:
ret[ii] = ret[ii + 1] - (ret[ii + 2] - ret[ii + 1])
elif ii == len(ret):
ret[ii] = ret[ii - 1] + (ret[ii - 1] - ret[ii - 2])
else:
ret[ii] = (ret[ii + 1] + ret[ii + 2]) * 0.5
res = ret + y0
#print('============ Calculating linoid for %s, a=%s, k=%s, x0=%s, y0=%s; res=%s'%(x, a, k, x0, y0,res))
return res
def double_exp(x, a, k1, x1, k2, x2, y0=0):
"""For functions of the form:
a / (exp(k1 * (x - x1)) + exp(k2 * (x - x2)))
"""
ret = np.zeros(len(x))
try:
ret = a / (np.exp(k1 * (x - x1)) + np.exp(k2 * (x - x2))) + y0
except RuntimeWarning as e:
logger_.warn(e)
return ret
# Map from the above functions to corresponding neuroml class
fn_rate_map = {
exponential: 'HHExpRate',
sigmoid: 'HHSigmoidRate',
linoid: 'HHExpLinearRate',
double_exp: None,
}
# These are default starting parameter values
fn_p0_map = {
exponential: (1.0, -100, 20e-3, 0.0),
sigmoid: (1.0, 1.0, 0.0, 0.0),
linoid: (1.0, 1.0, 0.0, 0.0),
double_exp: (1e-3, -1.0, 0.0, 1.0, 0.0, 0.0),
}
def randomized_curve_fit(fn, x, y, maxiter=10, best=True):
"""Repeatedly search for a good fit for common gate functions for
HHtype channels with randomly generated initial parameter
set. This function first tries with default p0 for fn. If that
fails to find a good fit, (correlation coeff returned by curve_fit
being inf is an indication of this), it goes on to generate random
p0 arrays and try scipy.optimize.curve_fit using this p0 until it
finds a good fit or the number of iterations reaches maxiter.
Ideally we should be doing something like stochastic gradient
descent, but I don't know if that might have performance issue in
pure python. The random parameterization in the present function
uses uniformly distributed random numbers within the half-open
interval [min(x), max(x)). The reason for choosing this: the
offset used in the exponential parts of Boltzman-type/HH-type
equations are usually within the domain of x. I also invert the
second entry (p0[1], because it is always (one of) the scale
factor(s) and usually 1/v for some v in the domain of x. I have
not tested the utility of this inversion. Even without this
inversion, with maxiter=100 this function is successful for the
test cases.
Parameters
----------
x: ndarray
values of the independent variable
y: ndarray
sample values of the dependent variable
maxiter: int
maximum number of iterations
best: bool
if true, repeat curve_fit for maxiter and return the case of least
squared error.
Returns
-------
The return value of scipy.optimize.curve_fit which succeed, or the
last call to it if maxiter iterations is reached..
"""
bad = True
p0 = fn_p0_map[fn]
p = None
p_best = None
min_err = 1e10 # A large value as placeholder
for ii in range(maxiter):
try:
p = _SO.curve_fit(fn, x, y, p0=p0)
except (RuntimeError, RuntimeWarning):
p = None
# The last entry returned by scipy.optimize.leastsq used by
# curve_fit is 1, 2, 3 or 4 if it succeeds.
bad = (p is None) or (p[1] == np.inf).any()
if not bad:
if not best:
return p
err = sum((y - fn(x, *tuple(p[0])))**2)
if err < min_err:
min_err = err
p_best = p
p0 = np.random.uniform(low=min(x),
high=max(x),
size=len(fn_p0_map[fn]))
if p0[1] != 0.0:
p0[1] = 1 / p0[1] # k = 1/v_scale - could help faster convergence
if p_best is None:
if p is not None:
msg = p[-2]
else:
msg = ''
warnings.warn(
'Maximum iteration %d reached. Could not find a decent fit. %s' %
(maxiter, msg), RuntimeWarning)
return p_best
def find_ratefn(x, y, **kwargs):
"""Find the function that fits the rate function best. This will try
exponential, sigmoid and linoid and return the best fit.
Needed until NeuroML2 supports tables or MOOSE supports
functions.
Parameters
----------
x: 1D array
independent variable.
y: 1D array
function values.
**kwargs: keyword arguments
passed to randomized_curve_fit.
Returns
-------
best_fn: function
the best fit function.
best_p: tuple
the optimal parameter values for the best fit function.
"""
rms_error = 1e10 # arbitrarily setting this
best_fn = None
best_p = None
for fn in fn_rate_map:
p = randomized_curve_fit(fn, x, y, **kwargs)
if p is None:
continue
popt = p[0]
error = y - fn(x, *popt)
erms = np.sqrt(np.mean(error**2))
# Ideally I want a fuzzy selection criterion here - a
# preference for fewer parameters, but if the errors are
# really small then we go for functions with more number of
# parameters. Some kind of weighted decision would have been
# nice. I am arbitrarily setting less than 0.1% relative error
# as a strong argument for taking a longer parameter function
# as a really better fit. Even with 1%, double exponential
# betters detected sigmoid for sigmoid curve in test case.
if erms < rms_error and ((best_p is None) or len(popt) <= len(best_p)
or erms / (max(y) - min(y)) < 0.001):
rms_error = erms
best_fn = fn
best_p = popt
return (best_fn, best_p)
| dilawar/moose-core | python/moose/neuroml2/hhfit.py | Python | gpl-3.0 | 8,486 |
#!/usr/bin/env python
# APM automatic test suite
# Andrew Tridgell, October 2011
import pexpect, os, sys, shutil, atexit
import optparse, fnmatch, time, glob, traceback, signal
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pysim'))
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..', 'mavlink', 'pymavlink'))
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..', 'mavlink', 'pymavlink', 'generator'))
import util
os.environ['PYTHONUNBUFFERED'] = '1'
os.putenv('TMPDIR', util.reltopdir('tmp'))
def get_default_params(atype):
'''get default parameters'''
sil = util.start_SIL(atype, wipe=True)
mavproxy = util.start_MAVProxy_SIL(atype)
print("Dumping defaults")
idx = mavproxy.expect(['Please Run Setup', 'Saved [0-9]+ parameters to (\S+)'])
if idx == 0:
# we need to restart it after eeprom erase
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL(atype)
mavproxy = util.start_MAVProxy_SIL(atype)
idx = mavproxy.expect('Saved [0-9]+ parameters to (\S+)')
parmfile = mavproxy.match.group(1)
dest = util.reltopdir('../buildlogs/%s.defaults.txt' % atype)
shutil.copy(parmfile, dest)
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
print("Saved defaults for %s to %s" % (atype, dest))
return True
def dump_logs(atype):
'''dump DataFlash logs'''
print("Dumping logs for %s" % atype)
sil = util.start_SIL(atype)
logfile = util.reltopdir('../buildlogs/%s.flashlog' % atype)
log = open(logfile, mode='w')
mavproxy = util.start_MAVProxy_SIL(atype, setup=True, logfile=log)
mavproxy.send('\n\n\n')
print("navigating menus")
mavproxy.expect(']')
mavproxy.send("logs\n")
mavproxy.expect("logs enabled:")
lognums = []
i = mavproxy.expect(["No logs", "(\d+) logs"])
if i == 0:
numlogs = 0
else:
numlogs = int(mavproxy.match.group(1))
for i in range(numlogs):
mavproxy.expect("Log (\d+)")
lognums.append(int(mavproxy.match.group(1)))
mavproxy.expect("Log]")
for i in range(numlogs):
print("Dumping log %u (i=%u)" % (lognums[i], i))
mavproxy.send("dump %u\n" % lognums[i])
mavproxy.expect("logs enabled:", timeout=120)
mavproxy.expect("Log]")
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
log.close()
print("Saved log for %s to %s" % (atype, logfile))
return True
def build_all():
'''run the build_all.sh script'''
print("Running build_all.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_all.sh'), dir=util.reltopdir('.')) != 0:
print("Failed build_all.sh")
return False
return True
def build_binaries():
'''run the build_binaries.sh script'''
print("Running build_binaries.sh")
import shutil
# copy the script as it changes git branch, which can change the script while running
orig=util.reltopdir('Tools/scripts/build_binaries.sh')
copy=util.reltopdir('./build_binaries.sh')
shutil.copyfile(orig, copy)
shutil.copymode(orig, copy)
if util.run_cmd(copy, dir=util.reltopdir('.')) != 0:
print("Failed build_binaries.sh")
return False
return True
def build_examples():
'''run the build_examples.sh script'''
print("Running build_examples.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_examples.sh'), dir=util.reltopdir('.')) != 0:
print("Failed build_examples.sh")
return False
return True
def convert_gpx():
'''convert any tlog files to GPX and KML'''
import glob
mavlog = glob.glob(util.reltopdir("../buildlogs/*.tlog"))
for m in mavlog:
util.run_cmd(util.reltopdir("../mavlink/pymavlink/examples/mavtogpx.py") + " --nofixcheck " + m)
gpx = m + '.gpx'
kml = m + '.kml'
util.run_cmd('gpsbabel -i gpx -f %s -o kml,units=m,floating=1,extrude=1 -F %s' % (gpx, kml), checkfail=False)
util.run_cmd('zip %s.kmz %s.kml' % (m, m), checkfail=False)
util.run_cmd(util.reltopdir("../MAVProxy/tools/mavflightview.py") + " --imagefile=%s.png %s" % (m,m))
return True
def test_prerequesites():
'''check we have the right directories and tools to run tests'''
print("Testing prerequesites")
util.mkdir_p(util.reltopdir('../buildlogs'))
return True
def alarm_handler(signum, frame):
'''handle test timeout'''
global results, opts
try:
results.add('TIMEOUT', '<span class="failed-text">FAILED</span>', opts.timeout)
util.pexpect_close_all()
convert_gpx()
results.addglob("Google Earth track", '*.kmz')
results.addfile('Full Logs', 'autotest-output.txt')
results.addglob('DataFlash Log', '*.flashlog')
results.addglob("MAVLink log", '*.tlog')
results.addfile('ArduPlane build log', 'ArduPlane.txt')
results.addfile('ArduPlane defaults', 'ArduPlane.defaults.txt')
results.addfile('ArduCopter build log', 'ArduCopter.txt')
results.addfile('ArduCopter defaults', 'ArduCopter.defaults.txt')
results.addfile('APMrover2 build log', 'APMrover2.txt')
results.addfile('APMrover2 defaults', 'APMrover2.defaults.txt')
write_webresults(results)
os.killpg(0, signal.SIGKILL)
except Exception:
pass
sys.exit(1)
############## main program #############
parser = optparse.OptionParser("autotest")
parser.add_option("--skip", type='string', default='', help='list of steps to skip (comma separated)')
parser.add_option("--list", action='store_true', default=False, help='list the available steps')
parser.add_option("--viewerip", default=None, help='IP address to send MAVLink and fg packets to')
parser.add_option("--map", action='store_true', default=False, help='show map')
parser.add_option("--experimental", default=False, action='store_true', help='enable experimental tests')
parser.add_option("--timeout", default=3000, type='int', help='maximum runtime in seconds')
opts, args = parser.parse_args()
import arducopter, arduplane, apmrover2
steps = [
'prerequesites',
'build.All',
'build.Binaries',
'build.Examples',
'build1280.ArduPlane',
'build2560.ArduPlane',
'build.ArduPlane',
'defaults.ArduPlane',
'fly.ArduPlane',
'logs.ArduPlane',
'build1280.APMrover2',
'build2560.APMrover2',
'build.APMrover2',
'defaults.APMrover2',
'drive.APMrover2',
'logs.APMrover2',
'build2560.ArduCopter',
'build.ArduCopter',
'defaults.ArduCopter',
'fly.ArduCopter',
'logs.ArduCopter',
'convertgpx',
]
skipsteps = opts.skip.split(',')
# ensure we catch timeouts
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
if opts.list:
for step in steps:
print(step)
sys.exit(0)
def skip_step(step):
'''see if a step should be skipped'''
for skip in skipsteps:
if fnmatch.fnmatch(step.lower(), skip.lower()):
return True
return False
def run_step(step):
'''run one step'''
if step == "prerequesites":
return test_prerequesites()
if step == 'build.ArduPlane':
return util.build_SIL('ArduPlane')
if step == 'build.APMrover2':
return util.build_SIL('APMrover2')
if step == 'build.ArduCopter':
return util.build_SIL('ArduCopter')
if step == 'build1280.ArduCopter':
return util.build_AVR('ArduCopter', board='mega')
if step == 'build2560.ArduCopter':
return util.build_AVR('ArduCopter', board='mega2560')
if step == 'build1280.ArduPlane':
return util.build_AVR('ArduPlane', board='mega')
if step == 'build2560.ArduPlane':
return util.build_AVR('ArduPlane', board='mega2560')
if step == 'build1280.APMrover2':
return util.build_AVR('APMrover2', board='mega')
if step == 'build2560.APMrover2':
return util.build_AVR('APMrover2', board='mega2560')
if step == 'defaults.ArduPlane':
return get_default_params('ArduPlane')
if step == 'defaults.ArduCopter':
return get_default_params('ArduCopter')
if step == 'defaults.APMrover2':
return get_default_params('APMrover2')
if step == 'logs.ArduPlane':
return dump_logs('ArduPlane')
if step == 'logs.ArduCopter':
return dump_logs('ArduCopter')
if step == 'logs.APMrover2':
return dump_logs('APMrover2')
if step == 'fly.ArduCopter':
return arducopter.fly_ArduCopter(viewerip=opts.viewerip, map=opts.map)
if step == 'fly.ArduPlane':
return arduplane.fly_ArduPlane(viewerip=opts.viewerip, map=opts.map)
if step == 'drive.APMrover2':
return apmrover2.drive_APMrover2(viewerip=opts.viewerip, map=opts.map)
if step == 'build.All':
return build_all()
if step == 'build.Binaries':
return build_binaries()
if step == 'build.Examples':
return build_examples()
if step == 'convertgpx':
return convert_gpx()
raise RuntimeError("Unknown step %s" % step)
class TestResult(object):
'''test result class'''
def __init__(self, name, result, elapsed):
self.name = name
self.result = result
self.elapsed = "%.1f" % elapsed
class TestFile(object):
'''test result file'''
def __init__(self, name, fname):
self.name = name
self.fname = fname
class TestResults(object):
'''test results class'''
def __init__(self):
self.date = time.asctime()
self.githash = util.run_cmd('git rev-parse HEAD', output=True, dir=util.reltopdir('.')).strip()
self.tests = []
self.files = []
self.images = []
def add(self, name, result, elapsed):
'''add a result'''
self.tests.append(TestResult(name, result, elapsed))
def addfile(self, name, fname):
'''add a result file'''
self.files.append(TestFile(name, fname))
def addimage(self, name, fname):
'''add a result image'''
self.images.append(TestFile(name, fname))
def addglob(self, name, pattern):
'''add a set of files'''
import glob
for f in glob.glob(util.reltopdir('../buildlogs/%s' % pattern)):
self.addfile(name, os.path.basename(f))
def addglobimage(self, name, pattern):
'''add a set of images'''
import glob
for f in glob.glob(util.reltopdir('../buildlogs/%s' % pattern)):
self.addimage(name, os.path.basename(f))
def write_webresults(results):
'''write webpage results'''
sys.path.insert(0, os.path.join(util.reltopdir("../mavlink/pymavlink/generator")))
import mavtemplate
t = mavtemplate.MAVTemplate()
for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
html = util.loadfile(h)
f = open(util.reltopdir("../buildlogs/%s" % os.path.basename(h)), mode='w')
t.write(f, html, results)
f.close()
for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
shutil.copy(f, util.reltopdir('../buildlogs/%s' % os.path.basename(f)))
results = TestResults()
def run_tests(steps):
'''run a list of steps'''
global results
passed = True
failed = []
for step in steps:
util.pexpect_close_all()
if skip_step(step):
continue
t1 = time.time()
print(">>>> RUNNING STEP: %s at %s" % (step, time.asctime()))
try:
if not run_step(step):
print(">>>> FAILED STEP: %s at %s" % (step, time.asctime()))
passed = False
failed.append(step)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
continue
except Exception, msg:
passed = False
failed.append(step)
print(">>>> FAILED STEP: %s at %s (%s)" % (step, time.asctime(), msg))
traceback.print_exc(file=sys.stdout)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
continue
results.add(step, '<span class="passed-text">PASSED</span>', time.time() - t1)
print(">>>> PASSED STEP: %s at %s" % (step, time.asctime()))
if not passed:
print("FAILED %u tests: %s" % (len(failed), failed))
util.pexpect_close_all()
results.addglob("Google Earth track", '*.kmz')
results.addfile('Full Logs', 'autotest-output.txt')
results.addglob('DataFlash Log', '*.flashlog')
results.addglob("MAVLink log", '*.tlog')
results.addglob("GPX track", '*.gpx')
results.addfile('ArduPlane build log', 'ArduPlane.txt')
results.addfile('ArduPlane code size', 'ArduPlane.sizes.txt')
results.addfile('ArduPlane stack sizes', 'ArduPlane.framesizes.txt')
results.addfile('ArduPlane defaults', 'ArduPlane.defaults.txt')
results.addfile('ArduCopter build log', 'ArduCopter.txt')
results.addfile('ArduCopter code size', 'ArduCopter.sizes.txt')
results.addfile('ArduCopter stack sizes', 'ArduCopter.framesizes.txt')
results.addfile('ArduCopter defaults', 'ArduCopter.defaults.txt')
results.addfile('APMrover2 build log', 'APMrover2.txt')
results.addfile('APMrover2 code size', 'APMrover2.sizes.txt')
results.addfile('APMrover2 stack sizes', 'APMrover2.framesizes.txt')
results.addfile('APMrover2 defaults', 'APMrover2.defaults.txt')
results.addglobimage("Flight Track", '*.png')
write_webresults(results)
return passed
util.mkdir_p(util.reltopdir('../buildlogs'))
lck = util.lock_file(util.reltopdir('../buildlogs/autotest.lck'))
if lck is None:
print("autotest is locked - exiting")
sys.exit(0)
atexit.register(util.pexpect_close_all)
if len(args) > 0:
# allow a wildcard list of steps
matched = []
for a in args:
for s in steps:
if fnmatch.fnmatch(s.lower(), a.lower()):
matched.append(s)
steps = matched
try:
if not run_tests(steps):
sys.exit(1)
except KeyboardInterrupt:
util.pexpect_close_all()
sys.exit(1)
except Exception:
# make sure we kill off any children
util.pexpect_close_all()
raise
| GekoCH/HighAltitudeArduPlane20130613 | Tools/autotest/autotest.py | Python | gpl-3.0 | 14,291 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017,2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from textwrap import dedent
from testtools.matchers import Contains, FileContains
from snapcraft.plugins.v1 import _python
from ._basesuite import PythonBaseTestCase
def _create_site_py(base_dir):
site_py = os.path.join(base_dir, "usr", "lib", "pythontest", "site.py")
os.makedirs(os.path.dirname(site_py))
open(site_py, "w").close()
def _create_user_site_packages(base_dir):
user_site_dir = os.path.join(base_dir, "lib", "pythontest", "site-packages")
os.makedirs(user_site_dir)
class SiteCustomizeTestCase(PythonBaseTestCase):
def setUp(self):
super().setUp()
self.expected_sitecustomize = dedent(
"""\
import site
import os
snap_dir = os.getenv("SNAP")
snapcraft_stage_dir = os.getenv("SNAPCRAFT_STAGE")
snapcraft_part_install = os.getenv("SNAPCRAFT_PART_INSTALL")
# Do not include snap_dir during builds as this will include
# snapcraft's in-snap site directory.
if snapcraft_stage_dir is not None and snapcraft_part_install is not None:
site_directories = [snapcraft_stage_dir, snapcraft_part_install]
else:
site_directories = [snap_dir]
for d in site_directories:
if d:
site_dir = os.path.join(d, "lib/pythontest/site-packages")
site.addsitedir(site_dir)
if snap_dir:
site.ENABLE_USER_SITE = False"""
)
def test_generate_sitecustomize_staged(self):
stage_dir = "stage_dir"
install_dir = "install_dir"
# Create the python binary in the staging area
self._create_python_binary(stage_dir)
# Create a site.py in both staging and install areas
_create_site_py(stage_dir)
_create_site_py(install_dir)
# Create a user site dir in install area
_create_user_site_packages(install_dir)
_python.generate_sitecustomize(
"test", stage_dir=stage_dir, install_dir=install_dir
)
site_path = os.path.join(
install_dir, "usr", "lib", "pythontest", "sitecustomize.py"
)
self.assertThat(site_path, FileContains(self.expected_sitecustomize))
def test_generate_sitecustomize_installed(self):
stage_dir = "stage_dir"
install_dir = "install_dir"
# Create the python binary in the installed area
self._create_python_binary(install_dir)
# Create a site.py in both staging and install areas
_create_site_py(stage_dir)
_create_site_py(install_dir)
# Create a user site dir in install area
_create_user_site_packages(install_dir)
_python.generate_sitecustomize(
"test", stage_dir=stage_dir, install_dir=install_dir
)
site_path = os.path.join(
install_dir, "usr", "lib", "pythontest", "sitecustomize.py"
)
self.assertThat(site_path, FileContains(self.expected_sitecustomize))
def test_generate_sitecustomize_missing_user_site_raises(self):
stage_dir = "stage_dir"
install_dir = "install_dir"
# Create the python binary in the installed area
self._create_python_binary(install_dir)
# Create a site.py in both staging and install areas
_create_site_py(stage_dir)
_create_site_py(install_dir)
# Do NOT create a user site dir, and attempt to generate sitecustomize.
raised = self.assertRaises(
_python.errors.MissingUserSitePackagesError,
_python.generate_sitecustomize,
"test",
stage_dir=stage_dir,
install_dir=install_dir,
)
self.assertThat(str(raised), Contains("Unable to find user site packages"))
def test_generate_sitecustomize_missing_site_py_raises(self):
stage_dir = "stage_dir"
install_dir = "install_dir"
# Create the python binary in the staging area
self._create_python_binary(stage_dir)
# Create a site.py, but only in install area (not staging area)
_create_site_py(install_dir)
# Create a user site dir in install area
_create_user_site_packages(install_dir)
raised = self.assertRaises(
_python.errors.MissingSitePyError,
_python.generate_sitecustomize,
"test",
stage_dir=stage_dir,
install_dir=install_dir,
)
self.assertThat(str(raised), Contains("Unable to find site.py"))
| chipaca/snapcraft | tests/unit/plugins/v1/python/test_sitecustomize.py | Python | gpl-3.0 | 5,261 |