code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from unittest import TestCase
try:
from unittest import mock
except ImportError:
import mock
from bettercache.views import BetterView
class TestView(TestCase):
def setUp(self):
self.view = BetterView()
self.view.should_bypass_cache = lambda x: False
self.view.get_cache = lambda x: (None, None, )
self.view.set_cache = lambda x, y: True
self.request = mock.Mock()
self.request.build_absolute_uri = lambda : '_'
@mock.patch('bettercache.views.proxy')
def test_miss(self, proxy):
''' make sure we proxy when there is no cache '''
proxy.return_value = {}
self.view.get(self.request)
self.assertTrue(proxy.called)
@mock.patch('bettercache.views.strip_wsgi')
@mock.patch('bettercache.views.proxy')
def test_notexpired(self, proxy, strip_wsgi):
''' make sure we don't send off a task if it's not expired '''
self.view.get_cache = lambda x: ({}, False, )
self.view.send_task = mock.Mock()
self.view.get(self.request)
self.assertFalse(self.view.send_task.called)
self.assertFalse(proxy.called)
@mock.patch('bettercache.views.strip_wsgi')
@mock.patch('bettercache.views.proxy')
def test_expired(self, proxy, strip_wsgi):
''' make sure that when it's expired the task is sent '''
self.view.should_bypass_cache = lambda x: False
self.view.send_task = mock.Mock()
self.view.get_cache = lambda x: ({}, True, )
self.view.get(self.request)
self.assertTrue(self.view.send_task.called)
self.assertFalse(proxy.called)
| ironfroggy/django-better-cache | bettercache/tests/test_views.py | Python | mit | 1,632 |
import numpy as np
from imfractal import *
from numpy import recfromcsv
import scipy
import math
import pandas
from pandas import DataFrame, Series
import statsmodels.formula.api as sm
import statsmodels
from statsmodels.tools.eval_measures import aicc, bic, hqic, rmse
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
import itertools
import numpy as np
from matplotlib.mlab import griddata
from mpl_toolkits.mplot3d import Axes3D
from pylab import *
np.seterr(divide='ignore', invalid='ignore')
def ribbon1(data):
x=data[:,0]
fig=plt.figure()
ax=fig.gca(projection='3d')
for i in range(1,data.shape[1]-1,1):
y=data[:,i]
z=data[:,i+1]
xi=np.linspace(min(x),max(x))
yi=np.linspace(min(y),max(y))
X,Y=np.meshgrid(xi,yi)
Z=griddata(x,y,z,xi,yi)
ax.plot_surface(X,Y,Z,rstride=50,cstride=1,cmap='RdYlBu')
ax.set_zlim3d(np.min(Z),np.max(Z))
ax.set_zlim3d(0.0,3.0)
ax.set_title('Gradient')
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_yticks([])
ax.set_zlabel('')
plt.show()
def ribbon2(spectra, alpha, title):
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
traces = []
y_raw = spectra[:, 0]
sample_size = spectra.shape[1]
for i in range(0, sample_size):
print i
z_raw = spectra[:, i]
x = []
y = []
z = []
ci = int(255/sample_size*i) # ci = "color index"
for j in range(0, len(z_raw)):
z.append([z_raw[j], z_raw[j]])
y.append([alpha[j], alpha[j]])
x.append([i, i+1])
traces.append(dict(
xsrc="xsrc",
z=z,
x=x,
y=y,
colorscale=[ [i, 'rgb(%d,%d,255)'%(ci, ci)] for i in np.arange(0,1.1,0.1) ],
showscale=False,
type='surface',
))
#traces = traces[::-1]
fig = go.Figure( data=traces, layout={'title':'Pyramid Gradient MFS'})
py.iplot(fig, filename='ribbon-plot-python')
def ribbon3(spectra, alpha):
matplotlib.rcParams.update({'font.size':10})
fig=figure()
ax=fig.gca(projection='3d')
for i in range(0,spectra.shape[1]):
y=spectra[:,i]
x=sorted(range(1,len(y)+1)*2)
a=[i,i+1]*len(y)
b=list(itertools.chain(*zip(y,y)))
xi=np.linspace(min(x),max(x))
yi=np.linspace(min(a),max(a))
X,Y=np.meshgrid(xi/(len(x)*0.5),yi)
Z=griddata(x,a,b,xi,yi)
ax.plot_surface(X,Y,Z, rstride=50, cstride=1, cmap='Spectral')
ax.set_zlim3d(np.min(Z),np.max(Z))
ax.grid(False)
ax.w_xaxis.pane.set_visible(False)
ax.w_yaxis.pane.set_visible(False)
ax.w_zaxis.pane.set_color('gainsboro')
ax.set_title('Pyramid Gradient MFS')
ax.set_xlim3d(0,1)
ax.set_xticks(alpha)
ax.set_xticklabels(alpha)
ax.set_xlabel(r'$\alpha$')
#ax.set_yticks([0.5,1.5,2.5,3.5,4.5])
#ax.set_yticklabels(['1','2','3','4','5'])
ax.set_ylabel('Resolution')
ax.set_zlim3d(0,3)
ax.set_zlabel(r'$f(\alpha)$')
show()
def csvToNumpy(X):
x = tuple(X[0])
x = np.array(x[1:])
X_res = x
for i in range(1, len(X)):
x = tuple(X[i])
x = np.array(x[1:])
X_res = np.vstack((X_res, x))
return X_res
def openMatlab(typ, filename, threshold = 100, adaptive = False):
import scipy.io as sio
arr = np.array(sio.loadmat(filename)[typ])
if typ == "False":
if adaptive:
threshold = self.determine_threshold(arr)
arr = arr > threshold
a_v = arr.cumsum()
print "Amount of white pixels: ", a_v[len(a_v) - 1]
# debug - to see the spongious structure
# plt.imshow((arr[:,:,50]), cmap=plt.gray())
# plt.show()
return arr
#def prec_and_acc(data, f):
#return f(data)
# for Fexp
#fexp_names = np.load(data_path + 'bioAsset_meta.npy')
# Precision and accuracy test
#precision_and_accuracy()
#exit()
# Adaptive Metadata and mfs
measures = recfromcsv('exps/data/BioAssetAdaptiveThresh/default_BioAsset_Adaptive.csv', delimiter=',')
measures_npy = csvToNumpy(measures)
mfs = np.load('exps/data/mfs_holder_BioAsset_raw.npy')
mfs_normalized = recfromcsv('exps/data/BioAssetAdaptiveThresh/mfs_holder_BioAsset.csv', delimiter=',')
mfs_normalized = csvToNumpy(mfs_normalized)
mfs_sandbox_adaptive = np.load('exps/data/BioAssetAdaptiveThresh/mfs_Sandbox_BioAsset_adaptive_0.75.npy')
# should be similar to mfs_sandbox_adaptive?
mfs_sandbox_absolute_normalized = np.load('exps/data/mfs_Sandbox_BioAsset_normalized.npy')
mfs_local = np.load('exps/data/mfs_holder_local_BioAsset.npy')
mfs_local_pyramid = np.load('exps/data/mfs_holder_local_BioAsset_pyramid.npy')
mfs_gradient_pyramid = np.load('exps/data/mfs_holder_pyramid_gradient_BioAsset.npy')
mfs_slices_x = np.load('exps/data/mfs_holder_slices_x_BioAsset.npy')
mfs_slices_y = np.load('exps/data/mfs_holder_slices_y_BioAsset.npy')
mfs_slices_z = np.load('exps/data/mfs_holder_slices_z_BioAsset.npy')
mfs_pure_pyramid = np.load('exps/data/mfs_pure_pyramid_BioAsset.npy')
mfs_pure_pyramid_gradient = np.load('exps/data/mfs_pure_pyramid_gradient_BioAsset.npy')
mfs_sigmoid = np.load('exps/data/mfs_holder_sigmoid_BioAsset.npy')
mfs_holder_10 = np.load('exps/data/mfs_holder_10_BioAsset.npy')
mfs_holder_5 = np.load('exps/data/mfs_holder_5_BioAsset.npy')
#mfs_gradient = recfromcsv('exps/data/mfs_holder_gradient_BioAsset.csv', delimiter=',')
#mfs_gradient = csvToNumpy(mfs_gradient)
mfs_gradient = np.load('exps/data/mfs_gradient.npy')
mfs_laplacian = np.load('exps/data/mfs_laplacian.npy')
stats_mfs_holder = np.load('exps/data/stats_mfs_holder_BioAsset.npy')
stats_mfs_holder2 = np.load('exps/data/mfs_stats_2.npy')
stats_mfs_pyramid = np.load('exps/data/mfs_stats_pyramid.npy')
stats_mfs_pyramid_gradient = np.load('exps/data/mfs_stats_pyramid_gradient.npy')
stats_mfs_slices_x = np.load('exps/data/stats_mfs_slices_x.npy')
stats_mfs_slices_y = np.load('exps/data/stats_mfs_slices_y.npy')
stats_mfs_slices_z = np.load('exps/data/stats_mfs_slices_z.npy')
#stats_mfs_pyramid_gradient = np.hstack((stats_mfs_pyramid_gradient[:, 5:45])) #, #stats_mfs_pyramid_gradient[:, 36:47]))
#stats_mfs_pyramid_gradient = stats_mfs_pyramid_gradient[:, [1, 17,46]] #, #stats_mfs_pyramid_gradient[:, 36:47]))
#print stats_mfs_pyramid_gradient
#mfs = np.hstack([mfs[:, 6:10], mfs[:, 17:20]])
#mfs_slices_x[i] = np.hstack([mfs_slices_x[:, 6:10], mfs_slices_x[:, 17:20]])
pos_fexp = 17 #check
def normalize(vector):
if np.std(vector) != 0:
return (vector - np.mean(vector))/ np.std(vector)
else:
print "std equals 0"
return vector
def compute_robust_r2(y, X, total_model):
# leave-one-out cross validation
# (robust R^2 = cross-validated R^2)
# Predictive Error Sum of Squares (PRESS)
# Total Sum of Squares (TSS)
# http://www.moleculardescriptors.eu/tutorials/T5_moleculardescriptors_models.pdf
PRESS = 0.0
TSS = 0.0
# average experimental response
y_line = np.mean(y)
sum_rmse = 0.0
for i in range(X.shape[0]):
arr = np.arange(X.shape[0])
mask = np.ones(arr.shape, dtype=bool)
mask[i] = 0
model = sm.OLS(y[mask], X[mask])
model = model.fit()
p = model.predict(X[i])
PRESS += (y[i] - p) * (y[i] - p)
TSS += (y[i] - y_line) * (y[i] - y_line)
sum_rmse += model.mse_resid # RMSE^2
rob_r2 = 1.0 - PRESS / TSS
rob_rmse = np.sqrt(sum_rmse / X.shape[0])
return rob_r2, rob_rmse
def compute_best_aicc(X, fexp):
# X: [BMD MFS]
# X2 : [CTE BMD MFS]
#X2 = statsmodels.tools.tools.add_constant(X)
#X2 = np.hstack((np.ones(X.shape[0]), X))
X2 = np.append(np.ones((X.shape[0], 1)), X, axis = 1)
#print np.ones((X.shape[0], 1)).shape
#print X.shape
if X2.shape == X.shape:
print "Error in add_constant!"
exit()
# one dimension
best_aicc = 100000
best_aicc2 = 100000
best_aicc3 = 100000
best_i = -1
best_i_j = [-1, -1]
best_i_j_k = [-1, -1, -1]
best_r2_ij = -1
best_r2 = -1
best_r2_ijk = -1
best_rmse = 100000
best_rmse2 = 100000
best_rmse3 = 100000
best_rob_rmse = 100000
best_rob_rmse2 = 100000
best_rob_rmse3 = 100000
best_rob_r2 = 0.0
best_rob_r2_ij = 0.0
best_rob_r2_ijk = 0.0
for i in range(2, X2.shape[1]):
# 0: constant, 1: BMD
Xi = X2[:, [0, 1, i]]
first_c = np.any(np.array(X2[:,0]).astype(np.int32) ==
np.ones(X2.shape[0]).astype(np.int32))
if not(np.any(first_c)):
print "NOT!!"
continue
#print ""
#print i
model = sm.OLS(fexp, Xi)
res = model.fit()
rob_r2, rob_rmse = compute_robust_r2(fexp, Xi, res)
#if aic < best_aicc :
if rob_r2 > best_rob_r2:
#best_aicc = aic
best_i = i-2
best_r2 = res.rsquared_adj
best_rmse = np.sqrt(res.mse_resid)
best_rob_r2 = rob_r2
best_rob_rmse = rob_rmse
best_aicc = aicc(res.llf, res.nobs, res.params.shape[0])
#best_rob_r2, best_rob_rmse = compute_robust_r2(fexp, Xi, res)
for i in range(2, X2.shape[1]):
for j in range(i+1, X2.shape[1]):
Xij = X2[:, [0, 1, i, j]]
first_c = np.any(np.array(X2[:, 0]).astype(np.int32) ==
np.ones(X2.shape[0]).astype(np.int32))
if not (np.any(first_c)):
print "NOT!!"
continue
model = sm.OLS(fexp, Xij)
res = model.fit()
rob_r2_ij, rob_rmse2 = compute_robust_r2(fexp, Xij, res)
#if aic2 < best_aicc2:
if rob_r2_ij > best_rob_r2_ij:
#best_aicc2 = aic2
best_i_j = [i-2, j-2]
best_r2_ij = res.rsquared_adj
best_rmse2 = np.sqrt(res.mse_resid)
best_rob_r2_ij = rob_r2_ij
best_rob_rmse2 = rob_rmse2
best_aicc2 = aicc(res.llf, res.nobs, res.params.shape[0])
#best_rob_r2_ij, best_rob_rmse2 = compute_robust_r2(fexp, Xij, res)
for i in range(2, X2.shape[1]):
for j in range(i+1, X2.shape[1]):
for k in range(j + 1, X2.shape[1]):
Xijk = X2[:, [0, 1, i, j, k]]
first_c = np.any(np.array(X2[:, 0]).astype(np.int32) ==
np.ones(X2.shape[0]).astype(np.int32))
if not (np.any(first_c)):
print "NOT!!"
continue
model = sm.OLS(fexp, Xijk)
res = model.fit()
rob_r2_ijk, rob_rmse3 = compute_robust_r2(fexp, Xijk, res)
if rob_r2_ijk > best_rob_r2_ijk:
#if aic3 < best_aicc3:
#best_aicc3 = aic3
best_i_j_k = [i-2, j-2, k-2]
best_r2_ijk = res.rsquared_adj
best_rmse3 = np.sqrt(res.mse_resid)
#best_rob_r2_ijk, best_rob_rmse3 = compute_robust_r2(fexp, Xijk, res)
best_rob_r2_ijk = rob_r2_ijk
best_rob_rmse3 = rob_rmse3
best_aicc3 = aicc(res.llf, res.nobs, res.params.shape[0])
return best_aicc, best_i, best_r2, best_rob_r2, best_rmse, best_rob_rmse,\
best_aicc2, best_i_j, best_r2_ij, best_rob_r2_ij, best_rmse2, best_rob_rmse2,\
best_aicc3, best_i_j_k, best_r2_ijk, best_rob_r2_ijk, best_rmse3, best_rob_rmse3
def compute_linear_model(mfs, measures, output_file="standarized.csv"):
#from sklearn.linear_model import Ridge
from sklearn import linear_model
# try different ones
#clf = Ridge(alpha = 1.0)
#clf = RidgeCV(alphas=[0.1, 1.0, 10.0])
#clf = linear_model.LinearRegression()
# explain fexp using BMD + the MFS data
bmd = measures[:, 0]
fexp = measures[:, measures.shape[1]-1]
print "BMD: ", bmd.shape
#print "FEXP: ", fexp
print "MFS; ", mfs.shape
#PCA
#from sklearn.decomposition import PCA
#pca = PCA(n_components=8)
#pca.fit(mfs)
#mfs = pca.transform(mfs)
X = np.hstack((bmd.reshape(bmd.shape[0], 1), mfs))
#clf.fit(X, fexp)
# Results
# print "Coefs:", clf.coef_
#print ""
#print "Score (R^2):", clf.score(X, fexp)
#cols = ['bmd']
#for i in range(mfs.shape[1]):
#cols.append('mfs_' + str(i))
#### using statsmodel
#df = DataFrame(np.hstack((X, np.array([fexp]).T)), columns=cols)
#df = DataFrame(X, columns=cols)
# BMD ALONE
#import statsmodels.robust.robust_linear_model
#Xbmd = X[:, [0]]
X2 = statsmodels.tools.tools.add_constant(bmd)
#huber_t = sm.RLM(fexp, X2, M=statsmodels.robust.norms.HuberT())
#m = huber_t.fit()
#print m.rsquared
#print m.summary()
#exit()
model = sm.OLS(fexp, X2)
res = model.fit()
aic = aicc(res.llf, res.nobs, res.params.shape[0])
r2 = res.rsquared_adj
rmsee = np.sqrt(res.mse_resid)
rob_r2, rob_rmse = compute_robust_r2(fexp, X2, res)
#print "BMD AICc, dimension, R2: " , aic, ' bmd ', r2
res = compute_best_aicc(X, fexp)
#print "AICc, dimension, R2: ", res[0], ' bmd + ', res[1], res[2]
#print "AICc p-value (significance): ", 1.0 / np.exp((aic - res[0])/2.0)
#print "AICc, dimensions, R2: ", res[3],' bmd + ', res[4], res[5]
#print "AICc p-value (significance): ", 1.0 / np.exp((aic - res[3]) / 2.0)
#print "AICc, dimensions, R2: ", res[6],' bmd + ', res[7], res[8]
#print "AICc p-value (significance): ", 1.0 / np.exp((aic - res[6]) / 2.0)
return aic, r2, rob_r2, rmsee, rob_rmse, res
#X_normalized = X
#for i in range(X.shape[1]):
# X_normalized[:,i] = normalize(X_normalized[:,i])
#res_n = compute_best_aicc(X_normalized, fexp)
#print "AICc, dimension, R2: ", res_n[0], ' bmd + ', res_n[1], res_n[2]
#print "AICc p-value (significance): ", 1.0 / np.exp((aic - res_n[0]) / 2.0)
#print "AICc, dimensions, R2: ", res_n[3], ' bmd + ', res_n[4], res_n[5]
#print "AICc p-value (significance): ", 1.0 / np.exp((aic - res_n[3]) / 2.0)
#print "AICc, dimensions, R2: ", res_n[6], ' bmd + ', res_n[7], res_n[8]
#print "AICc p-value (significance): ", 1.0 / np.exp((aic - res_n[6]) / 2.0)
#print ""
#print "Normalized Variables - Score (R^2):", clf.score(X_normalized, fexp)
#X_2 = statsmodels.tools.tools.add_constant(X_normalized)
#model_2 = sm.OLS(fexp, X_2)
#res_2 = model_2.fit()
np.savetxt(data_path + output_file, X_normalized, delimiter=",")
def compute_correlations(X, Y):#, mfs_pos_start_data,
#mfs_pos_end_data, transform_mfs = True):
#result = []
# convert from ugly format to matrix
#mfs_matrix = mfs
#print mfs.shape
#if transform_mfs:
#if len(mfs.shape) == 1:
# for i in range(mfs.shape[0]):
# mfs_i = tuple(mfs[i])
# mfs_i = mfs_i[mfs_pos_start_data : mfs_pos_end_data + 1]
# if len(mfs_matrix) == 0:
# mfs_matrix = mfs_i
# else:
# mfs_matrix = np.vstack((mfs_matrix, mfs_i))
#else: mfs_matrix = mfs
#print mfs_matrix.shape
#print measures_matrix.shape
correls = np.zeros((X.shape[1], Y.shape[1]))
pvals = np.zeros((X.shape[1], Y.shape[1]))
# compute correlations
for x in range(X.shape[1]):
for y in range(Y.shape[1]):
corr, pval = scipy.stats.spearmanr(X[:, x],
Y[:, y])
if not(math.isnan(corr)):
correls[x, y] = corr
pvals[x, y] = pval
else:
#print "Nan in correlations", x, y
correls[x, y] = 0
pvals[x, y] = pval
#DEBUG CORRELATIONS:
#import matplotlib.pyplot as plt
#plt.plot(correls)
#plt.show()
#print "Higher correlations: ", np.min(correls), np.max(correls)
return np.min(correls), np.max(correls), correls, pvals
#mfs_matrix_normalized = mfs_matrix.copy()
#measures_matrix_normalized = measures_matrix.copy()
#for i in range(mfs_matrix_normalized.shape[1]):
# mfs_matrix_normalized[:, i] = normalize(mfs_matrix_normalized[:, i])
#for i in range(measures_matrix_normalized.shape[1]):
# measures_matrix_normalized[:, i] = normalize(measures_matrix_normalized[:, i])
#correls_normalized = np.zeros((mfs_matrix_normalized.shape[1], measures_matrix_normalized.shape[1]))
# compute correlations
#for d in range(mfs_matrix_normalized.shape[1]):
# for m in range(measures_matrix_normalized.shape[1]):
# corr = scipy.stats.stats.spearmanr(mfs_matrix_normalized[:, d],
# measures_matrix_normalized[:, m])[0]
# if not (math.isnan(corr)):
# correls_normalized[d, m] = corr
# else:
# correls_normalized[d, m] = 0
#print "Higher normalized correlations: ", np.min(correls_normalized), np.max(correls_normalized)
def compute_subset(measures_matrix, mfs,
mfs_pos_start_data, mfs_pos_end_data):
mfs_subset = []
for i in range(len(measures)):
if not (math.isnan(measures_matrix[i][pos_fexp])):
mfs_i = tuple(mfs[i])
mfs_i = mfs_i[mfs_pos_start_data: mfs_pos_end_data + 1]
if len(mfs_subset) == 0:
mfs_subset = np.array(mfs_i)
else:
mfs_subset = np.vstack((mfs_subset, mfs_i))
return mfs_subset
#################################################################
# Paper figures
# Figure : MFS Sandbox of volume 1_2
fsize = 15
mfs1_2 = mfs_sandbox_absolute_normalized[1] #mfs_sandbox_adaptive[1]
x1 = -10
x2 = 10
x = np.arange(x1, x2+1)
plt.ylim((2.2, 3.5))
plt.xlim(x1,x2)
plt.ylabel('$D_{q}$',fontsize=fsize)
plt.xlabel('Generalised dimension',fontsize=fsize)
print len(x), len(mfs1_2)
plt.plot(x, mfs1_2, '*-', linewidth=2.0)
plt.show()
# Figure: MFS Holder of volume 1_2
xt = np.arange(1,20,2)
alpha = [ 6., 19., 32., 45., 58., 71., 84., 97., 110., 123. , 136. , 149.,
162., 175., 188., 201. , 214. , 227. , 240. ,253.]
alpha = map(lambda i:"%.2f" % float(i/255.), alpha)
alpha_orig = alpha
alpha = alpha[0:len(alpha):2]
plt.xticks(xt,alpha) # translate
fsize = 15
mfs1_2 = mfs[1]
#x1 = -10
#x2 = 10
#x = np.arange(x1, x2+1)
plt.ylim((0.0, 3.0))
#plt.xlim(x1,x2)
plt.ylabel(r'$f(\alpha)$', fontsize=fsize)
plt.xlabel(r'$\alpha$', fontsize=fsize)
x = np.arange(len(mfs1_2))
plt.xticks(xt,alpha) # translate
plt.plot(map(lambda i: i+1, x), mfs1_2, '*-', linewidth=2.0)
plt.show()
# Figure: MFS Pyramid of volume 1_2
fsize = 15
mfs1_2 = mfs_pure_pyramid[1]
# reverse (bug)
a = mfs1_2[:20]
b = mfs1_2[20:40]
c = mfs1_2[40:60]
d = mfs1_2[60:80]
e = mfs1_2[80:]
plt.xticks(xt,alpha) # translate
mfs1_2 = np.hstack((e,d,c,b,a))
#x1 = -10
#x2 = 10
#x = np.arange(x1, x2+1)
plt.ylim((0.0, 3.0))
#plt.xlim(x1,x2)
plt.ylabel(r'$f(\alpha)$', fontsize=fsize)
#plt.xlabel(r'$\alpha$', fontsize=fsize)
#x = np.arange(len(mfs1_2))
#plt.plot(mfs1_2, '*-', linewidth=2.0)
plt.plot(e, '*-', linewidth=2.0, label='0')
plt.plot(d, '*-', linewidth=2.0, label='1')
plt.plot(c, '*-', linewidth=2.0, label='2')
plt.plot(b, '*-', linewidth=2.0, label='3')
plt.plot(a, '*-', linewidth=2.0, label='4')
plt.xlabel(r'$\alpha$', fontsize=fsize)
plt.legend()
plt.show()
# Figure: Gradient MFS Pyramid of volume 1_2
fsize = 15
mfs1_2 = mfs_pure_pyramid_gradient[1]
plt.xticks(xt,alpha) # translate
a = mfs1_2[:20]
b = mfs1_2[20:40]
c = mfs1_2[40:60]
d = mfs1_2[60:80]
e = mfs1_2[80:]
mfs1_2 = np.hstack((e,d,c,b,a))
plt.plot(e, '*-', linewidth=2.0, label='0')
plt.plot(d, '*-', linewidth=2.0, label='1')
plt.plot(c, '*-', linewidth=2.0, label='2')
plt.plot(b, '*-', linewidth=2.0, label='3')
plt.plot(a, '*-', linewidth=2.0, label='4')
plt.xlabel(r'$\alpha$', fontsize=fsize)
#x1 = -10
#x2 = 10
#x = np.arange(x1, x2+1)
plt.ylim((0.0, 3.0))
#plt.xlim(x1,x2)
plt.ylabel(r'$f(\alpha)$', fontsize=fsize)
plt.legend()
#plt.xlabel(r'$\alpha$', fontsize=fsize)
#x = np.arange(len(mfs1_2))
#plt.plot(mfs1_2, '*-', linewidth=2.0)
plt.show()
#ribbon = np.vstack((a, b, c, d, e))
#print "Shape ribbon:" , ribbon.shape
#ribbon2(ribbon.T, alpha_orig, 'Pyramid Gradient MFS')
#exit()
####### 3D SCATTER BMD vs FEXP vs SK_{0}
from mpl_toolkits.mplot3d import Axes3D
indexes = np.load('exps/data/valid_fexp_indexes.npy')
bmd17 = np.load('exps/data/bmd17.npy')
fexp = np.load('exps/data/fexp.npy')
sk0 = stats_mfs_pyramid_gradient[:, 36:37]
#sk0 = np.load('exps/data/sk0.npy')
print sk0.shape
sk0_17 = sk0[indexes]
if(True):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
p = ax.scatter(sk0_17, bmd17, fexp, s=(4*fexp**2)**2, c="r")
ax.set_xlabel(r'$SK_{0}$')
ax.set_ylabel('BMD')
ax.set_zlabel(r'$F_{exp}$')
plt.show()
colors=['b', 'c', 'y', 'm', 'r']
ax = plt.subplot(111, projection='3d')
ax.plot(bmd17, sk0_17, fexp, color=colors[0], label='Fexp')
#ax.plot(random(10), random(10), random(10), 'o', color=colors[0], label='LoLo')
#ax.plot(random(10), random(10), random(10), 'o', color=colors[1], label='Lo')
#ax.plot(random(10), random(10), random(10), 'o', color=colors[2], label='Average')
#ax.plot(random(10), random(10), random(10), 'o', color=colors[3], label='Hi')
#ax.plot(random(10), random(10), random(10), 'o', color=colors[4], label='HiHi')
#ax.plot(random(10), random(10), random(10), 'x', color=colors[4], label='High Outlier')
plt.legend(loc='upper left', numpoints=1, ncol=3, fontsize=8, bbox_to_anchor=(0, 0))
plt.show()
from scipy import stats
matplotlib.rc('text', usetex=True)
print "BMD17.SHAPE ", bmd17.shape
# 2D Scatters
plt.xlabel('BMD')
plt.ylabel(r'F$_{Failure}$')
plt.scatter(bmd17, fexp)
#slope, intercept, r_value, p_value, std_err=stats.linregress(bmd17,fexp)
#yvals = np.linspace(fexp.min(),fexp.max())
#line = slope * yvals + intercept # This is the critical change
#plt.plot(yvals, line, 'r', label="Regression Line", antialiased=True)
plt.plot(np.unique(bmd17), np.poly1d(np.polyfit(bmd17, fexp, 1))(np.unique(bmd17)), 'r', label=r'Adj. R$^2$ 0.684, Rob.RMSE = 0.22')
plt.legend(loc=2)
plt.show()
#
sk0_17 = sk0_17.reshape((17,))
print "SK017.SHAPE ", sk0_17.shape
plt.xlabel(r"SK$_0$")
plt.ylabel(r'F$_{Failure}$')
plt.scatter(sk0_17, fexp)
plt.plot(np.unique(sk0_17), np.poly1d(np.polyfit(sk0_17, fexp, 1))(np.unique(sk0_17)), 'r')
plt.show()
#
plt.xlabel(r'SK$_0$')
plt.ylabel('BMD')
plt.scatter(sk0_17, bmd17)
plt.plot(np.unique(sk0_17), np.poly1d(np.polyfit(sk0_17, bmd17, 1))(np.unique(sk0_17)), 'r')
plt.legend()
plt.show()
#################################################################
measures_pos_start_data = 1
measures_pos_end_data = 18
measures_matrix = []
for i in range(measures.shape[0]):
measures_i = tuple(measures[i])
measures_i = measures_i[measures_pos_start_data: measures_pos_end_data + 1]
if len(measures_matrix) == 0:
measures_matrix = np.array(measures_i)
else:
measures_matrix = np.vstack((measures_matrix, measures_i))
################################################################
measures_subset = np.array([])
# subset of measures
for i in range(len(measures)):
if not(math.isnan(measures_matrix[i][pos_fexp])):
if len(measures_subset) == 0:
measures_subset = np.array(measures_matrix[i])
else:
measures_subset = np.vstack((measures_subset, measures_matrix[i]))
##############################################
str_method = [
#"MFS",
#"Standard Measures",
#"Normalized MFS",
#"Gradient MFS",
"Stats Pyramid Gradient MFS",
#"Normalized MFS",
#"Stats Pyramid MFS",
#"Pyramid MFS (local or global) ...",
#"Sandbox MFS Adaptive",
#"Sandbox_Absolute MFS_normalized ...",
#"Local MFS",
#"Sigmoid MFS",
#"10-MFS",
#"5-MFS",
"Stats MFS Holder",
#"Stats MFS Holder 2",
#"Pyramid MFS",
"Pyramid Gradient MFS",
#"Slices X MFS",
#"Slices Y MFS",
#"Slices Z MFS",
"Pure Pyramid Gradient MFS",
#"Stats MFS Slices X",
#"Stats MFS Slices Y",
#"Stats MFS Slices Z",
]
method_array = [
#mfs,
#measures_npy[:, :-2],
#mfs_normalized,
#mfs_gradient,
stats_mfs_pyramid_gradient[:,46:47],
#mfs_normalized,
#stats_mfs_pyramid,
#mfs_local_pyramid,
#mfs_sandbox_adaptive,
#mfs_sandbox_absolute_normalized,
#mfs_local,
#mfs_sigmoid,
#mfs_holder_10,
#mfs_holder_5,
stats_mfs_holder,
#stats_mfs_holder2,
#mfs_pure_pyramid,
mfs_gradient_pyramid,
#mfs_slices_x,
#mfs_slices_y,
#mfs_slices_z,
mfs_pure_pyramid_gradient,
#stats_mfs_slices_x,
#stats_mfs_slices_y,
#stats_mfs_slices_z,
]
#print "Auto-correlations:"
#print "MFS: "
c1, c2, c, pv = compute_correlations(mfs, mfs)
mask1 = []
sums = []
for i in range(len(c)):
#print i, c[i]
s = np.median(np.abs(c[i]))
sums = np.append(sums, s)
arr = np.arange(c.shape[1])
mask = np.ones(arr.shape, dtype = bool)
mask[i] = 0
th = 0.89
if not(np.any(c[i][mask] > th) or np.any(c[i][mask] < -th)):
#print c[i][mask]
#print i
mask1 = np.append(mask1, i)
mask1 = np.append(mask1, 1)
mask1 = np.append(mask1, 17)
mask1 = np.sort(mask1)
#print sums
#plt.plot(sums.T)
#plt.show()
#exit()
mask1 = mask1.astype(np.int32)
#print mask1
sk0 = stats_mfs_pyramid_gradient[:, 36:37]
#np.save('exps/data/sk0.npy', sk0)
#exit()
if(True):
np.set_printoptions(suppress=True)
print measures_matrix[0]
print "[0, 7, 5, 6, 2, 4]: BMD MIL Tb.Th Tb.Sp BV/TV Tb.N"
idxs = [0, 7, 5, 6, 2, 4]
measures_matrix_2 = measures_matrix[:, idxs]
np.set_printoptions(suppress=True)
print measures_matrix_2
print "Standard Measures intra-correlations:"
print "BMD MIL Tb.Th Tb.Sp BV/TV Tb.N"
c1, c2, c, pv = compute_correlations(measures_matrix_2, measures_matrix_2)
#np.set_printoptions(suppress=True)
print "CORRS: ", c
print "PVALS: ", pv
print "Multifractal Skewness - Standard Measures : correlations:"
skew_levels = stats_mfs_pyramid_gradient[:, [6,36,46]]
c1, c2, c, pv = compute_correlations(measures_matrix_2, skew_levels)
np.set_printoptions(suppress=True)
print c
print "PVALS: ", pv
print "Multifractal Skewness intra-correlations:"
skew_levels = stats_mfs_pyramid_gradient[:, [6,36,46]]
c1, c2, c, pv = compute_correlations(skew_levels, skew_levels)
np.set_printoptions(suppress=True)
print c
print "PVALS: ", pv
fexp = np.array(measures_matrix[:, measures_matrix.shape[1] - 1]).reshape(measures_matrix.shape[0],1)
rest = np.hstack((skew_levels, measures_matrix))
rest_subset = compute_subset(measures_matrix, rest, 0, rest.shape[1])
fexp_subset = compute_subset(measures_matrix, fexp, 0, fexp.shape[1])
print "FEXP", fexp
print fexp.shape
indexes = []
for i in range(len(fexp)):
if not(np.isnan(fexp[i])): indexes.append(i)
indexes = np.array(indexes).astype(np.uint32)
print indexes
print "Fexp against all correlations:"
skew_levels = stats_mfs_pyramid_gradient[:, [6,36,46]]
c1, c2, c, pv = compute_correlations(fexp_subset*1000, rest_subset)
np.set_printoptions(suppress=True)
print c
print "PVALS: ", pv
exit()
#print "Std Measures: ", compute_correlations(measures_matrix, measures_matrix)
for i in range(len(method_array)):
#c1, c2, _ = compute_correlations(measures_matrix, method_array[i])
#print str_method[i], " Min, Max correlations with #", measures_matrix.shape, "std measures: ", c1, c2
if False:
#mask1 = np.array([0,6,9,19]) #np.hstack((np.array(np.arange(1, 7)), np.array([16])))
if method_array[i].shape[1] == 20:
method_array[i] = method_array[i][:, mask1]
if method_array[i].shape[1] == 100:
#method_array[i] = method_array[i][:,60:80]
if True:
mask = []
for j in range(5):
mask = np.hstack(( mask , (20*j)+ mask1)).astype(np.int32)
#print mask
#method_array[i] = np.hstack((method_array[i][:, 0:20], method_array[i][:, 80:100]))
method_array[i] = method_array[i][:, mask]
if(len(method_array[i]) > 17):
mfs_subset = compute_subset(measures_matrix, method_array[i], 0, method_array[i].shape[1])
else:
mfs_subset = method_array[i]
#plt.plot(mfs_subset.T)
#plt.show()
#exit()
print str_method[i], " #", method_array[i].shape[1]
#np.savetxt('pyramid.csv', mfs_subset, delimiter=",")
aic, r2, rob_r2, rmsee, rob_rmse, res = compute_linear_model(mfs_subset, measures_subset)
aic_s = "%.4f" % aic
rmsee = "%.4f" % rmsee
r2 = "%.4f" % r2
rob_r2 = "%.4f" % rob_r2
rob_rmse = "%.4f" % rob_rmse
aicc1 = "%.4f" % res[0]
aicc2 = "%.4f" % res[6]
aicc3 = "%.4f" % res[12]
r2_1 = "%.4f" % res[2]
r2_2 = "%.4f" % res[8]
r2_3 = "%.4f" % res[14]
dims_1 = res[1]
dims_2 = res[7]
dims_3 = res[13]
p1 = "%.7f" % np.exp((min(aic, res[0])-max(aic, res[0])) / 2.0)
p2 = "%.7f" % np.exp((min(aic, res[6])-max(aic, res[6])) / 2.0)
p3 = "%.7f" % np.exp((min(aic, res[12])-max(aic, res[12])) / 2.0)
rmse1 = "%.5f" % res[4]
rmse2 = "%.5f" % res[10]
rmse3 = "%.5f" % res[16]
rob_rmse1 = "%.5f" % res[5]
rob_rmse2 = "%.5f" % res[11]
rob_rmse3 = "%.5f" % res[17]
rob_r2_1 = "%.4f" % res[3]
rob_r2_2 = "%.4f" % res[9]
rob_r2_3 = "%.4f" % res[15]
print "AICC - Adj R^2 - Rob. R^2 - DIMS - p-value - RMSE - Rob. RMSE "
print aic_s, ' |', r2, ' |', rob_r2, ' | bmd ', " | --------- | ", rmsee, " | ", rob_rmse
print aicc1, ' |', r2_1, ' |', rob_r2_1, ' | bmd + ', dims_1, " |", p1, "| ", rmse1, "| ", rob_rmse1
print aicc2, ' |', r2_2, ' |', rob_r2_2, ' | bmd + ', dims_2, " |", p2, "| ", rmse2, "| ", rob_rmse2
print aicc3, ' |', r2_3, ' |', rob_r2_3, ' | bmd + ', dims_3, "|", p3, "| ", rmse3, "| ", rob_rmse3
| rbaravalle/imfractal | test_paper_BioAsset.py | Python | bsd-3-clause | 30,625 |
# Copyright 2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import datetime
from dateutil.parser import parse as date_parse
from unittest import mock
import shutil
import time
from c7n.ctx import ExecutionContext
from c7n.config import Bag, Config
from c7n.testing import mock_datetime_now
from c7n.output import metrics_outputs
from c7n.utils import parse_url_config, reset_session_cache
from c7n_gcp.output import (
GCPStorageOutput, StackDriverLogging, StackDriverMetrics)
from gcp_common import BaseTest
class MetricsOutputTest(BaseTest):
def test_metrics_selector(self):
self.assertEqual(
metrics_outputs.get('gcp'),
StackDriverMetrics)
def test_metrics_output(self):
project_id = 'cloud-custodian'
factory = self.replay_flight_data('output-metrics', project_id=project_id)
ctx = Bag(session_factory=factory,
policy=Bag(name='custodian-works', resource_type='gcp.function'))
conf = Bag()
metrics = StackDriverMetrics(ctx, conf)
metrics.put_metric('ResourceCount', 43, 'Count', Scope='Policy')
metrics.flush()
if self.recording:
time.sleep(42)
session = factory()
client = session.client('monitoring', 'v3', 'projects.timeSeries')
results = client.execute_command(
'list', {
'name': 'projects/{}'.format(project_id),
'filter': 'metric.type="custom.googleapis.com/custodian/policy/resourcecount"',
'pageSize': 3,
'interval_startTime': (
datetime.datetime.utcnow() - datetime.timedelta(minutes=5)
).isoformat('T') + 'Z',
'interval_endTime': datetime.datetime.utcnow().isoformat('T') + 'Z'
})
self.assertEqual(
results['timeSeries'],
[{u'metric': {
u'labels': {
u'policy': u'custodian-works',
u'project_id': u'cloud-custodian'},
u'type': u'custom.googleapis.com/custodian/policy/resourcecount'},
u'metricKind': u'GAUGE',
u'points': [{
u'interval': {
u'endTime': u'2018-08-12T22:30:53.524505Z',
u'startTime': u'2018-08-12T22:30:53.524505Z'},
u'value': {u'int64Value': u'43'}}],
u'resource': {
u'labels': {u'project_id': u'cloud-custodian'},
u'type': u'global'},
u'valueType': u'INT64'}])
def test_metrics_output_set_write_project_id(self):
project_id = 'cloud-custodian-sub'
write_project_id = 'cloud-custodian'
factory = self.replay_flight_data('output-metrics', project_id=project_id)
ctx = Bag(session_factory=factory,
policy=Bag(name='custodian-works', resource_type='gcp.function'))
conf = Bag(project_id=write_project_id)
metrics = StackDriverMetrics(ctx, conf)
metrics.put_metric('ResourceCount', 43, 'Count', Scope='Policy')
metrics.flush()
def get_log_output(request, output_url):
log = StackDriverLogging(
ExecutionContext(
lambda assume=False: mock.MagicMock(),
Bag(name="xyz", provider_name="gcp", resource_type='gcp.function'),
Config.empty(account_id='custodian-test')),
parse_url_config(output_url)
)
request.addfinalizer(reset_session_cache)
return log
def get_blob_output(request, output_url=None, cleanup=True):
if output_url is None:
output_url = "gs://cloud-custodian/policies"
output = GCPStorageOutput(
ExecutionContext(
lambda assume=False: mock.MagicMock(),
Bag(name="xyz", provider_name="gcp"),
Config.empty(output_dir=output_url, account_id='custodian-test')),
parse_url_config(output_url)
)
if cleanup:
request.addfinalizer(lambda : shutil.rmtree(output.root_dir)) # noqa
request.addfinalizer(reset_session_cache)
return output
@mock.patch('c7n_gcp.output.LogClient')
@mock.patch('c7n_gcp.output.CloudLoggingHandler')
def test_gcp_logging(handler, client, request):
output = get_log_output(request, 'gcp://')
with output:
assert 1
handler().transport.flush.assert_called_once()
handler().transport.worker.stop.assert_called_once()
output = get_log_output(request, 'gcp://apples')
assert output.get_log_group() == 'custodian-apples-xyz'
@mock.patch('c7n_gcp.output.StorageClient')
@mock.patch('c7n_gcp.output.Bucket')
def test_output(bucket, client, request):
bucket().blob.return_value = key = mock.MagicMock()
with mock_datetime_now(date_parse('2020/06/10 13:00'), datetime):
output = get_blob_output(request)
assert output.key_prefix == 'policies/xyz/2020/06/10/13'
output.upload_file('resources.json', f"{output.key_prefix}/resources.json")
key.upload_from_filename.assert_called_once()
| capitalone/cloud-custodian | tools/c7n_gcp/tests/test_output_gcp.py | Python | apache-2.0 | 5,097 |
"""
Copyright 2016, 2017 UFPE - Universidade Federal de Pernambuco
Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS
O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença.
Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENSE", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from django.test import TestCase, RequestFactory
from users.models import User
from django.contrib.auth.models import AnonymousUser
from .. import views
from ..models import Category
from django.shortcuts import render
class Index_Test(TestCase):
def setUp(self):
"""Set up all the variables we need for these test"""
self.factory = RequestFactory()
self.user = User.objects.create(username="felipe", email="felipe.bormann@gmail.com", password="teste")
self.admin = User.objects.create_superuser('admin', email = 'admin@teste.com', password = 'teste')
self.coordinator = User.objects.create(username="coordinator", email="felipe@gmail.com", password="teste")
#self.category = Category.objects.create(name="test", coordinators=self.coordinator)
def test_index_get_not_admin(self):
"""Tests if an user can get into 'manage categories' page and be redirected"""
request = self.factory.get('categories/')
request.user = self.user
response = views.IndexView.as_view()(request)
self.assertEqual(response.status_code, 302)
def test_index_get_unauth(self):
"""Tests if an unauthenticated user can get into 'manage categories' page and be redirected"""
request = self.factory.get('categories/')
request.user = AnonymousUser()
response = views.IndexView.as_view()(request)
self.assertEqual(response.status_code, 302) #Which means it is been redirected to login page
def test_create_category(self):
"""Tests if an admin can access and the create_category page is displayed and rendered without errors"""
request = self.factory.get('categories/create')
request.user = self.admin
response = views.CreateCategory.as_view()(request)
self.assertEqual(response.status_code, 200)
rendered = render(response, template_name = 'categories/create.html') #try to render the page, this one gives us more errors
def test_create_category_unauth(self):
"""Tests if an unauthenticated user can get into 'create categories' page and be redirected"""
request = self.factory.get('categories/create')
request.user = AnonymousUser()
response = views.IndexView.as_view()(request)
self.assertEqual(response.status_code, 302) #Which means it is been redirected to login page
def test_create_category_not_admin(self):
"""Tests if a non-admin user can get into 'create categories' page and be redirected"""
request = self.factory.get('categories/create')
request.user = self.user
response = views.IndexView.as_view()(request)
self.assertEqual(response.status_code, 302) #Which means it is been redirected to main page or login page
def test_update_category_not_coordinator(self):
request = self.factory.get('categories/create')
request.user = self.user
response = views.UpdateCategory.as_view()(request, self.category.slug)
self.assertEqual(response.status_code, 302) #Which means it is been redirected to main page or login page
| amadeusproject/amadeuslms | categories/tests/test_views.py | Python | gpl-2.0 | 3,802 |
from collections import defaultdict
from instructions import StaAbsInstruction, LdaImmInstruction, SEIInstruction, CLDInstruction
from memory import RAM
from ppu import PPU
from rom import ROM
from status import Status
RAM_START_INCLUSIVE = int.from_bytes(bytes.fromhex('0000'), byteorder='big')
RAM_END_INCLUSIVE = int.from_bytes(bytes.fromhex('1FFF'), byteorder='big')
PPU_START_INCLUSIVE = int.from_bytes(bytes.fromhex('2000'), byteorder='big')
PPU_END_INCLUSIVE = int.from_bytes(bytes.fromhex('2007'), byteorder='big')
class CPU(object):
def __init__(self, ram: RAM, ppu: PPU):
self.ram = ram
self.ppu = ppu
# Status Registers: store a single byte
self.status_reg = None # type: Status
# counter registers: store single byte
self.pc_reg = None # program counter
self.sp_reg = None # stack pointer
# data registers: store a single byte
self.x_reg = None # x register
self.y_reg = None # y register
self.a_reg = None # a register
# program counter stores current execution point
self.running = True
self.instructions = [
SEIInstruction(),
CLDInstruction(),
LdaImmInstruction(),
StaAbsInstruction()
]
self.instructions_mapping = defaultdict()
for instruction in self.instructions:
self.instructions_mapping[instruction.identifier_byte] = instruction
self.rom = None
def start_up(self):
"""
set the initial values of cpu registers
status reg: 000100 (IRQs disabled)
x, y, a regs: 0
stack pointer: $FD
$4017: 0 (frame irq disabled)
$4015: 0 (sound channels disabled)
$4000-$400F: 0 (sound registers)
"""
# TODO Hex vs Binary
self.pc_reg = 0
self.status_reg = Status()
self.sp_reg = bytes.fromhex('FD')
self.x_reg = 0
self.y_reg = 0
self.a_reg = 0
# TODO Implement memory sets
def get_memory_owner(self, location: int):
"""
return the owner of a memory location
"""
if RAM_START_INCLUSIVE <= location <= RAM_END_INCLUSIVE:
return self.ram
elif PPU_START_INCLUSIVE <= location <= PPU_END_INCLUSIVE:
# pass off to the ppu register manager
return self.ppu
def run_rom(self, rom: ROM):
# load rom
self.rom = rom
self.pc_reg = self.rom.header_size
# run program
self.running = True
while self.running:
# get the current byte at pc
identifier_byte = self.rom.get_bytes(self.pc_reg)
# turn the byte into an Instruction
instruction = self.instructions_mapping.get(identifier_byte, None)
if instruction is None:
raise Exception('Instruction not found: {}'.format(identifier_byte))
# get the correct amount of data bytes
num_data_bytes = instruction.instruction_length - 1
# get the data bytes
data_bytes = self.rom.get_bytes(self.pc_reg + 1, num_data_bytes)
# we have a valid instruction
instruction.execute(self, data_bytes)
self.pc_reg += instruction.instruction_length
| ThatBenderGuy/pyN3S | cpu.py | Python | gpl-3.0 | 3,328 |
# -*- coding: utf-8 -*-
import re
import time
from module.plugins.internal.Account import Account
class MegaRapidCz(Account):
__name__ = "MegaRapidCz"
__type__ = "account"
__version__ = "0.38"
__status__ = "testing"
__description__ = """MegaRapid.cz account plugin"""
__license__ = "GPLv3"
__authors__ = [("MikyWoW", "mikywow@seznam.cz"),
("zoidberg", "zoidberg@mujmail.cz")]
login_timeout = 60
LIMITDL_PATTERN = ur'<td>Max. počet paralelních stahování: </td><td>(\d+)'
VALID_UNTIL_PATTERN = ur'<td>Paušální stahování aktivní. Vyprší </td><td><strong>(.*?)</strong>'
TRAFFIC_LEFT_PATTERN = r'<tr><td>Kredit</td><td>(.*?) GiB'
def grab_info(self, user, password, data, req):
htmll = self.load("http://megarapid.cz/mujucet/")
m = re.search(self.LIMITDL_PATTERN, htmll)
if m:
data = self.get_data(user)
data['options']['limitDL'] = [int(m.group(1))]
m = re.search(self.VALID_UNTIL_PATTERN, htmll)
if m:
validuntil = time.mktime(time.strptime(m.group(1), "%d.%m.%Y - %H:%M"))
return {'premium': True, 'trafficleft': -1, 'validuntil': validuntil}
m = re.search(self.TRAFFIC_LEFT_PATTERN, htmll)
if m:
trafficleft = float(m.group(1)) * (1 << 20)
return {'premium': True, 'trafficleft': trafficleft, 'validuntil': -1}
return {'premium': False, 'trafficleft': None, 'validuntil': None}
def login(self, user, password, data, req):
html = self.load("http://megarapid.cz/prihlaseni/")
if "Heslo:" in html:
start = html.index('id="inp_hash" name="hash" value="')
html = html[start + 33:]
hashes = html[0:32]
html = self.load("https://megarapid.cz/prihlaseni/",
post={'hash' : hashes,
'login' : user,
'pass1' : password,
'remember': 1,
'sbmt' : u"Přihlásit"})
| mationic/pyload | module/plugins/accounts/MegaRapidCz.py | Python | gpl-3.0 | 2,147 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class WithConstraintMet(Package):
"""Package that tests True when specs on directives."""
homepage = "http://www.example.com"
url = "http://www.example.com/example-1.0.tar.gz"
version('2.0', '0123456789abcdef0123456789abcdef')
version('1.0', '0123456789abcdef0123456789abcdef')
with when('@1.0'):
depends_on('b')
conflicts('%gcc', when='+foo')
with when('@0.14: ^b@:4.0'):
depends_on('c', when='@:15 ^b@3.8:')
| LLNL/spack | var/spack/repos/builtin.mock/packages/with-constraint-met/package.py | Python | lgpl-2.1 | 686 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from random import sample, choice, randint
from string import lowercase
import avro.datafile
import avro.schema
import avro.io
types = ["A", "CNAME"]
def rand_name():
return ''.join(sample(lowercase, 15))
def rand_ip():
return "%s.%s.%s.%s" %(randint(0,255), randint(0,255), randint(0,255), randint(0,255))
def write(n):
schema_s="""
{ "type": "record",
"name": "Query",
"fields" : [
{"name": "query", "type": "string"},
{"name": "response", "type": "string"},
{"name": "type", "type": "string", "default": "A"}
]}"""
out = open("datafile.avr",'w')
schema = avro.schema.parse(schema_s)
writer = avro.io.DatumWriter(schema)
dw = avro.datafile.DataFileWriter(out, writer, schema) #,codec='deflate')
for _ in xrange(n):
response = rand_ip()
query = rand_name()
type = choice(types)
dw.append({'query': query, 'response': response, 'type': type})
dw.close()
def read():
f = open("datafile.avr")
reader = avro.io.DatumReader()
af=avro.datafile.DataFileReader(f,reader)
x=0
for _ in af:
pass
def t(f, *args):
s = time.time()
f(*args)
e = time.time()
return e-s
if __name__ == "__main__":
n = int(sys.argv[1])
print "Write %0.4f" % t(write, n)
print "Read %0.4f" % t(read)
| eonezhang/avro | lang/py/test/av_bench.py | Python | apache-2.0 | 2,175 |
# projector - a tool for managing multiple repositories and setting up
# development environments.
# Copyright (C) 2018 Barret Rennie
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""SCM tool utilities."""
from functools import lru_cache
from pkg_resources import iter_entry_points
from typing import Dict
from projector.scm_tools.base import ScmTool
# This really only exists so we can spy on it with kgb.
#
# We cannot spy on `get_scm_tools` becuase some Python dsitributions have a native module for the
# wrapper that lru_cache uses to wrap functions. This wrapper is not a proper function, but a C
# object.
def _get_scm_tools_uncached() -> Dict[str, ScmTool]:
return {
tool.name: tool
for tool in (
entry_point.load() for entry_point in iter_entry_points("projector.scm_tools")
)
}
@lru_cache(None)
def get_scm_tools() -> Dict[str, ScmTool]:
"""Return registered SCM Tools.
Supported SCM tools are registered with the ``projector.scm_tools`` entry point. The results
will be cached for future use.
Returns:
The registered SCM tools.
"""
return _get_scm_tools_uncached()
| brennie/projector | projector/scm_tools/__init__.py | Python | gpl-3.0 | 1,763 |
"""Create START_FILTER_REQUESTS_LENGTH
Revision ID: 808f4e517394
Revises: af2de80654b6
Create Date: 2018-11-29 10:58:02.103458
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from app.models import cfg_settings
import datetime
# revision identifiers, used by Alembic.
revision = '808f4e517394'
down_revision = 'af2de80654b6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
date_created = datetime.datetime.now().isoformat()
date_modified = datetime.datetime.now().isoformat()
op.bulk_insert(
cfg_settings.Cfg_settings.__table__,
[
{"key": "START_FILTER_REQUESTS_LENGTH", "value": "3", "public": True, "date_created": date_created,
"date_modified": date_modified,
"description": "The number characters to wait to start filtering table content"},
]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
keys = ["START_FILTER_REQUESTS_LENGTH"]
for key in keys:
op.execute("""DELETE from cfg_settings where `key`='%s';""" % (key))
# ### end Alembic commands ###
| InQuest/ThreatKB | migrations/versions/808f4e517394_create_start_filter_requests_length.py | Python | gpl-2.0 | 1,247 |
import numpy as np
import pandas as pd
from shapely import prepared
def sjoin(left_df, right_df, how='inner', op='intersects',
lsuffix='left', rsuffix='right', **kwargs):
"""Spatial join of two GeoDataFrames.
left_df, right_df are GeoDataFrames
how: type of join
left -> use keys from left_df; retain only left_df geometry column
right -> use keys from right_df; retain only right_df geometry column
inner -> use intersection of keys from both dfs;
retain only left_df geometry column
op: binary predicate {'intersects', 'contains', 'within'}
see http://toblerity.org/shapely/manual.html#binary-predicates
lsuffix: suffix to apply to overlapping column names (left GeoDataFrame)
rsuffix: suffix to apply to overlapping column names (right GeoDataFrame)
"""
import rtree
allowed_hows = ['left', 'right', 'inner']
if how not in allowed_hows:
raise ValueError("`how` was \"%s\" but is expected to be in %s" % \
(how, allowed_hows))
allowed_ops = ['contains', 'within', 'intersects']
if op not in allowed_ops:
raise ValueError("`op` was \"%s\" but is expected to be in %s" % \
(op, allowed_ops))
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
if left_df.crs != right_df.crs:
print('Warning: CRS does not match!')
tree_idx = rtree.index.Index()
right_df_bounds = right_df['geometry'].apply(lambda x: x.bounds)
for i in right_df_bounds.index:
tree_idx.insert(i, right_df_bounds[i])
idxmatch = (left_df['geometry'].apply(lambda x: x.bounds)
.apply(lambda x: list(tree_idx.intersection(x))))
idxmatch = idxmatch[idxmatch.apply(len) > 0]
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
# Vectorize predicate operations
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {'intersects': find_intersects,
'contains': find_contains,
'within': find_contains}
check_predicates = np.vectorize(predicate_d[op])
result = (
pd.DataFrame(
np.column_stack(
[l_idx,
r_idx,
check_predicates(
left_df['geometry']
.apply(lambda x: prepared.prep(x))[l_idx],
right_df['geometry'][r_idx])
]))
)
result.columns = ['index_%s' % lsuffix, 'index_%s' % rsuffix, 'match_bool']
result = (
pd.DataFrame(result[result['match_bool']==1])
.drop('match_bool', axis=1)
)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(columns={
'index_%s' % (lsuffix): 'index_%s' % (rsuffix),
'index_%s' % (rsuffix): 'index_%s' % (lsuffix)})
if how == 'inner':
result = result.set_index('index_%s' % lsuffix)
return (
left_df
.merge(result, left_index=True, right_index=True)
.merge(right_df.drop('geometry', axis=1),
left_on='index_%s' % rsuffix, right_index=True,
suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
)
elif how == 'left':
result = result.set_index('index_%s' % lsuffix)
return (
left_df
.merge(result, left_index=True, right_index=True, how='left')
.merge(right_df.drop('geometry', axis=1),
how='left', left_on='index_%s' % rsuffix, right_index=True,
suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
)
elif how == 'right':
return (
left_df
.drop('geometry', axis=1)
.merge(result.merge(right_df,
left_on='index_%s' % rsuffix, right_index=True,
how='right'), left_index=True,
right_on='index_%s' % lsuffix, how='right')
.set_index('index_%s' % rsuffix)
)
| urschrei/geopandas | geopandas/tools/sjoin.py | Python | bsd-3-clause | 4,462 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Module containing util functions."""
from model_search.proto import phoenix_spec_pb2
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.feature_column import feature_column_lib
def default_get_input_layer_fn(problem_type, feature_columns):
"""Default implementation of get_input_layer_fn."""
def _input_layer_fn(features,
is_training,
scope_name="Phoenix/Input",
lengths_feature_name=None):
with tf.compat.v1.variable_scope(scope_name):
if problem_type == phoenix_spec_pb2.PhoenixSpec.CNN:
# Sometimes we only get the image feature as a tensor.
if not isinstance(features, dict):
return features, None
return tf.cast(
features[feature_columns[0].name], dtype=tf.float32), None
# DNN
elif problem_type == phoenix_spec_pb2.PhoenixSpec.DNN:
# To allow running a custom evaluation where multiple batches are
# aggregated in a single metric_fn call, we need to define the
# batch_size based on the input_fn, but DenseFeatures does not allow
# this.
if (len(feature_columns) == 1 and isinstance(
feature_columns[0], type(tf.feature_column.numeric_column("x")))):
return tf.cast(
features[feature_columns[0].name], dtype=tf.float32), None
# All are TF1 feature columns
elif all([
not feature_column_lib.is_feature_column_v2([fc])
for fc in feature_columns
]):
return tf.compat.v1.feature_column.input_layer(
features, feature_columns, trainable=is_training), None
# Some are TF1 feature columns
elif any([
not feature_column_lib.is_feature_column_v2([fc])
for fc in feature_columns
]):
fc_v1 = [
fc for fc in feature_columns
if not feature_column_lib.is_feature_column_v2([fc])
]
fc_v2 = [
fc for fc in feature_columns
if feature_column_lib.is_feature_column_v2([fc])
]
input_1 = tf.compat.v1.feature_column.input_layer(
features, fc_v1, trainable=is_training)
input_2 = tf.keras.layers.DenseFeatures(
fc_v2, name="input_layer_fc_v2", trainable=is_training)(
features)
return tf.concat([input_1, input_2], axis=1), None
# None is TF1 feature columns
else:
return tf.keras.layers.DenseFeatures(
feature_columns, name="input_layer",
trainable=is_training)(features), None
# RNN
elif (problem_type == phoenix_spec_pb2.PhoenixSpec.RNN_ALL_ACTIVATIONS or
problem_type == phoenix_spec_pb2.PhoenixSpec.RNN_LAST_ACTIVATIONS):
if lengths_feature_name:
return (tf.cast(features[feature_columns[0].name],
dtype=tf.float32), features[lengths_feature_name])
elif (
len(feature_columns) == 1 and
feature_columns[0].name in features and
not isinstance(features[feature_columns[0].name], tf.SparseTensor)):
return tf.cast(
features[feature_columns[0].name], dtype=tf.float32), None
else:
# IMPORTANT NOTE:
# When you use Keras layers with variables, always give them a name!
# If not, keras will add "_#" (e.g., dense_1 instead of dense).
# It will add the suffix even if the outer-scope is different.
# This is a surprising behavior.
# TODO(mazzawi): Contact the Keras team about this.
return tf.keras.experimental.SequenceFeatures(
feature_columns=feature_columns,
trainable=is_training,
name=scope_name)(
features)
else:
raise ValueError("Unknown problem type")
return _input_layer_fn
def default_get_keras_input_layer_fn(problem_type, feature_columns):
"""Default implementation of get_input_layer_fn."""
def _input_layer_fn(is_training, scope_name="Phoenix/Input"):
with tf.compat.v1.variable_scope(scope_name):
if problem_type != phoenix_spec_pb2.PhoenixSpec.DNN:
raise ValueError("Only DNN problem type is supported for keras at the "
"moment.")
# To allow running a custom evaluation where multiple batches are
# aggregated in a single metric_fn call, we need to define the
# batch_size based on the input_fn, but DenseFeatures does not allow
# this.
if any([
not feature_column_lib.is_feature_column_v2([fc])
for fc in feature_columns
]):
raise ValueError("TF1 feature columns are not supported for keras")
return tf.keras.layers.DenseFeatures(
feature_columns, name="input_layer", trainable=is_training), None
return _input_layer_fn
| google/model_search | model_search/data/utils.py | Python | apache-2.0 | 5,565 |
#!/usr/bin/env python
# This file has been imported into the apache source tree from
# the IWYU source tree as of version 0.8
# https://github.com/include-what-you-use/include-what-you-use/blob/master/iwyu_tool.py
# and corresponding license has been added:
# https://github.com/include-what-you-use/include-what-you-use/blob/master/LICENSE.TXT
#
# ==============================================================================
# LLVM Release License
# ==============================================================================
# University of Illinois/NCSA
# Open Source License
#
# Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
# All rights reserved.
#
# Developed by:
#
# LLVM Team
#
# University of Illinois at Urbana-Champaign
#
# http://llvm.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the names of the LLVM Team, University of Illinois at
# Urbana-Champaign, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
# SOFTWARE.
""" Driver to consume a Clang compilation database and invoke IWYU.
Example usage with CMake:
# Unix systems
$ mkdir build && cd build
$ CC="clang" CXX="clang++" cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON ...
$ iwyu_tool.py -p .
# Windows systems
$ mkdir build && cd build
$ cmake -DCMAKE_CXX_COMPILER="%VCINSTALLDIR%/bin/cl.exe" \
-DCMAKE_C_COMPILER="%VCINSTALLDIR%/VC/bin/cl.exe" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-G Ninja ...
$ python iwyu_tool.py -p .
See iwyu_tool.py -h for more details on command-line arguments.
"""
import os
import sys
import json
import argparse
import subprocess
import re
import logging
logging.basicConfig(filename='iwyu.log')
LOGGER = logging.getLogger("iwyu")
def iwyu_formatter(output):
""" Process iwyu's output, basically a no-op. """
print('\n'.join(output))
CORRECT_RE = re.compile(r'^\((.*?) has correct #includes/fwd-decls\)$')
SHOULD_ADD_RE = re.compile(r'^(.*?) should add these lines:$')
SHOULD_REMOVE_RE = re.compile(r'^(.*?) should remove these lines:$')
FULL_LIST_RE = re.compile(r'The full include-list for (.*?):$')
END_RE = re.compile(r'^---$')
LINES_RE = re.compile(r'^- (.*?) // lines ([0-9]+)-[0-9]+$')
GENERAL, ADD, REMOVE, LIST = range(4)
def clang_formatter(output):
""" Process iwyu's output into something clang-like. """
state = (GENERAL, None)
for line in output:
match = CORRECT_RE.match(line)
if match:
print('%s:1:1: note: #includes/fwd-decls are correct', match.groups(1))
continue
match = SHOULD_ADD_RE.match(line)
if match:
state = (ADD, match.group(1))
continue
match = SHOULD_REMOVE_RE.match(line)
if match:
state = (REMOVE, match.group(1))
continue
match = FULL_LIST_RE.match(line)
if match:
state = (LIST, match.group(1))
elif END_RE.match(line):
state = (GENERAL, None)
elif not line.strip():
continue
elif state[0] == GENERAL:
print(line)
elif state[0] == ADD:
print('%s:1:1: error: add the following line', state[1])
print(line)
elif state[0] == REMOVE:
match = LINES_RE.match(line)
line_no = match.group(2) if match else '1'
print('%s:%s:1: error: remove the following line', state[1], line_no)
print(match.group(1))
DEFAULT_FORMAT = 'iwyu'
FORMATTERS = {
'iwyu': iwyu_formatter,
'clang': clang_formatter
}
def get_output(cwd, command):
""" Run the given command and return its output as a string. """
process = subprocess.Popen(command,
cwd=cwd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return process.communicate()[0].decode("utf-8").splitlines()
def run_iwyu(cwd, compile_command, iwyu_args, verbose, formatter):
""" Rewrite compile_command to an IWYU command, and run it. """
compiler, _, args = compile_command.partition(' ')
if compiler.endswith('cl.exe'):
# If the compiler name is cl.exe, let IWYU be cl-compatible
clang_args = ['--driver-mode=cl']
else:
clang_args = []
iwyu_args = ['-Xiwyu ' + a for a in iwyu_args]
command = ['include-what-you-use'] + clang_args + iwyu_args
command = '%s %s' % (' '.join(command), args.strip())
if verbose:
print('%s:', command)
formatter(get_output(cwd, command))
def main(compilation_db_path, source_files, verbose, formatter, iwyu_args):
""" Entry point. """
# Canonicalize compilation database path
if os.path.isdir(compilation_db_path):
compilation_db_path = os.path.join(compilation_db_path,
'compile_commands.json')
compilation_db_path = os.path.realpath(compilation_db_path)
if not os.path.isfile(compilation_db_path):
print('ERROR: No such file or directory: \'%s\'', compilation_db_path)
return 1
# Read compilation db from disk
with open(compilation_db_path, 'r') as fileobj:
compilation_db = json.load(fileobj)
# expand symlinks
for entry in compilation_db:
entry['file'] = os.path.realpath(entry['file'])
# Cross-reference source files with compilation database
source_files = [os.path.realpath(s) for s in source_files]
if not source_files:
# No source files specified, analyze entire compilation database
entries = compilation_db
else:
# Source files specified, analyze the ones appearing in compilation db,
# warn for the rest.
entries = []
for source in source_files:
matches = [e for e in compilation_db if e['file'] == source]
if matches:
entries.extend(matches)
else:
print("{} not in compilation database".format(source))
# TODO: As long as there is no complete compilation database available this check cannot be performed
pass
#print('WARNING: \'%s\' not found in compilation database.', source)
# Run analysis
try:
for entry in entries:
cwd, compile_command = entry['directory'], entry['command']
run_iwyu(cwd, compile_command, iwyu_args, verbose, formatter)
except OSError as why:
print('ERROR: Failed to launch include-what-you-use: %s', why)
return 1
return 0
def _bootstrap():
""" Parse arguments and dispatch to main(). """
# This hackery is necessary to add the forwarded IWYU args to the
# usage and help strings.
def customize_usage(parser):
""" Rewrite the parser's format_usage. """
original_format_usage = parser.format_usage
parser.format_usage = lambda: original_format_usage().rstrip() + \
' -- [<IWYU args>]' + os.linesep
def customize_help(parser):
""" Rewrite the parser's format_help. """
original_format_help = parser.format_help
def custom_help():
""" Customized help string, calls the adjusted format_usage. """
helpmsg = original_format_help()
helplines = helpmsg.splitlines()
helplines[0] = parser.format_usage().rstrip()
return os.linesep.join(helplines) + os.linesep
parser.format_help = custom_help
# Parse arguments
parser = argparse.ArgumentParser(
description='Include-what-you-use compilation database driver.',
epilog='Assumes include-what-you-use is available on the PATH.')
customize_usage(parser)
customize_help(parser)
parser.add_argument('-v', '--verbose', action='store_true',
help='Print IWYU commands')
parser.add_argument('-o', '--output-format', type=str,
choices=FORMATTERS.keys(), default=DEFAULT_FORMAT,
help='Output format (default: %s)' % DEFAULT_FORMAT)
parser.add_argument('-p', metavar='<build-path>', required=True,
help='Compilation database path', dest='dbpath')
parser.add_argument('source', nargs='*',
help='Zero or more source files to run IWYU on. '
'Defaults to all in compilation database.')
def partition_args(argv):
""" Split around '--' into driver args and IWYU args. """
try:
double_dash = argv.index('--')
return argv[:double_dash], argv[double_dash+1:]
except ValueError:
return argv, []
argv, iwyu_args = partition_args(sys.argv[1:])
args = parser.parse_args(argv)
sys.exit(main(args.dbpath, args.source, args.verbose,
FORMATTERS[args.output_format], iwyu_args))
if __name__ == '__main__':
_bootstrap()
| laurentgo/arrow | cpp/build-support/iwyu/iwyu_tool.py | Python | apache-2.0 | 10,318 |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
INTERFACE_TYPE_SAS = 'SAS'
INTERFACE_TYPE_SCSI = 'SCSI'
INTERFACE_TYPE_SATA = 'SATA'
DISK_TYPE_HDD = 'HDD'
DISK_TYPE_SSD = 'SSD'
RAID_0 = '0'
RAID_1 = '1'
RAID_1_ADM = '1ADM'
RAID_10 = '10'
RAID_10_ADM = '10ADM'
RAID_5 = '5'
RAID_6 = '6'
RAID_50 = '50'
RAID_60 = '60'
INTERFACE_TYPE_MAP = {'SCSI': INTERFACE_TYPE_SCSI,
'SAS': INTERFACE_TYPE_SAS,
'SATA': INTERFACE_TYPE_SATA,
'SATASSD': INTERFACE_TYPE_SATA,
'SASSSD': INTERFACE_TYPE_SAS}
DISK_TYPE_MAP = {'SCSI': DISK_TYPE_HDD,
'SAS': DISK_TYPE_HDD,
'SATA': DISK_TYPE_HDD,
'SATASSD': DISK_TYPE_SSD,
'SASSSD': DISK_TYPE_SSD}
def get_interface_type(ssa_interface):
return INTERFACE_TYPE_MAP[ssa_interface]
def get_disk_type(ssa_interface):
return DISK_TYPE_MAP[ssa_interface]
| ramineni/proliantutils | proliantutils/hpssa/types.py | Python | apache-2.0 | 1,503 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import pytest
from cryptography.hazmat.bindings.openssl.binding import Binding
class TestOpenSSL(object):
def test_binding_loads(self):
binding = Binding()
assert binding
assert binding.lib
assert binding.ffi
def test_is_available(self):
assert Binding.is_available() is True
def test_crypto_lock_init(self):
b = Binding()
b.init_static_locks()
lock_cb = b.lib.CRYPTO_get_locking_callback()
assert lock_cb != b.ffi.NULL
def _skip_if_not_fallback_lock(self, b):
# only run this test if we are using our locking cb
original_cb = b.lib.CRYPTO_get_locking_callback()
if original_cb != b._lock_cb_handle:
pytest.skip(
"Not using the fallback Python locking callback "
"implementation. Probably because import _ssl set one"
)
def test_fallback_crypto_lock_via_openssl_api(self):
b = Binding()
b.init_static_locks()
self._skip_if_not_fallback_lock(b)
# check that the lock state changes appropriately
lock = b._locks[b.lib.CRYPTO_LOCK_SSL]
# starts out unlocked
assert lock.acquire(False)
lock.release()
b.lib.CRYPTO_lock(
b.lib.CRYPTO_LOCK | b.lib.CRYPTO_READ,
b.lib.CRYPTO_LOCK_SSL, b.ffi.NULL, 0
)
# becomes locked
assert not lock.acquire(False)
b.lib.CRYPTO_lock(
b.lib.CRYPTO_UNLOCK | b.lib.CRYPTO_READ,
b.lib.CRYPTO_LOCK_SSL, b.ffi.NULL, 0
)
# then unlocked
assert lock.acquire(False)
lock.release()
def test_fallback_crypto_lock_via_binding_api(self):
b = Binding()
b.init_static_locks()
self._skip_if_not_fallback_lock(b)
lock = b._locks[b.lib.CRYPTO_LOCK_SSL]
with pytest.raises(RuntimeError):
b._lock_cb(0, b.lib.CRYPTO_LOCK_SSL, "<test>", 1)
# errors shouldnt cause locking
assert lock.acquire(False)
lock.release()
b._lock_cb(b.lib.CRYPTO_LOCK | b.lib.CRYPTO_READ,
b.lib.CRYPTO_LOCK_SSL, "<test>", 1)
# locked
assert not lock.acquire(False)
b._lock_cb(b.lib.CRYPTO_UNLOCK | b.lib.CRYPTO_READ,
b.lib.CRYPTO_LOCK_SSL, "<test>", 1)
# unlocked
assert lock.acquire(False)
lock.release()
def test_add_engine_more_than_once(self):
b = Binding()
res = b.lib.Cryptography_add_osrandom_engine()
assert res == 2
| Lukasa/cryptography | tests/hazmat/bindings/test_openssl.py | Python | apache-2.0 | 3,198 |
from slList import *
def readFile( filename ):
"""
This function creates a linked list then it opens a file and appends the contents of the file to the linked list.
:param filename: Inputed file
:return: the appended linked list is returned.
"""
myList = createList()
file = open(filename)
for currentLine in file:
myList.append(int(currentLine))
return myList
def main():
"""
In the main function, the user inputs a filename, then the file gets converted to a linked list, gets converted
to a string(with the toString() function) and that is then printed. Then the linked list is sorted with the
linkSort function and finally printed(again as a string).
"""
filename = input("Input filename: ")
linkedList = readFile(filename)
printList = linkedList.toString()
print(printList)
linkSort(linkedList)
sortedList = linkedList.toString()
print(sortedList)
main() | moiseslorap/RIT | Computer Science 1/Homeworks/hw8/testLinkSort.py | Python | mit | 986 |
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Lead",
"description": _("Database of potential customers."),
},
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
},
{
"type": "doctype",
"name": "Opportunity",
"description": _("Potential opportunities for selling."),
},
{
"type": "doctype",
"name": "Order Register",
"description": _("Customer orders list."),
},
{
"type": "doctype",
"name": "Newsletter",
"description": _("Newsletters to contacts, leads."),
},
]
},
{
"label": _("Masters"),
"icon": "icon-wrench",
"items": [
{
"type": "doctype",
"name": "City",
"description":_("All Cities."),
},
{
"type": "doctype",
"name": "State",
"description":_("All State."),
},
{
"type": "doctype",
"name": "District",
"description":_("All District."),
},
]
},
{
"label": _("Tools"),
"icon": "icon-wrench",
"items": [
{
"type": "doctype",
"name": "SMS Center",
"description":_("Send mass SMS to your contacts"),
},
{
"type": "doctype",
"name": "SMS Log",
"description":_("Logs for maintaining sms delivery status"),
}
]
},
{
"label": _("Setup"),
"icon": "icon-cog",
"items": [
{
"type": "doctype",
"name": "Campaign",
"description": _("Sales campaigns."),
},
{
"type": "page",
"label": _("Customer Group"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Customer Group",
"description": _("Manage Customer Group Tree."),
"doctype": "Customer Group",
},
{
"type": "page",
"label": _("Territory"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Territory",
"description": _("Manage Territory Tree."),
"doctype": "Territory",
},
{
"type": "page",
"label": _("Sales Person"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Sales Person",
"description": _("Manage Sales Person Tree."),
"doctype": "Sales Person",
},
{
"type": "doctype",
"name": "Newsletter List",
"description": _("Newsletter Mailing List"),
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
},
]
},
{
"label": _("Main Reports"),
"icon": "icon-table",
"items": [
{
"type": "page",
"name": "sales-funnel",
"label": _("Sales Funnel"),
"icon": "icon-bar-chart",
},
]
},
{
"label": _("Standard Reports"),
"icon": "icon-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Lead Details",
"doctype": "Lead"
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Addresses and Contacts",
"doctype": "Contact"
},
{
"type": "report",
"is_query_report": True,
"name": "Customers Not Buying Since Long Time",
"doctype": "Sales Order"
},
]
},
{
"label": _("Help"),
"items": [
{
"type": "help",
"label": _("Lead to Quotation"),
"youtube_id": "TxYX4r4JAKA"
},
]
},
]
| Tejal011089/trufil-erpnext | erpnext/config/crm.py | Python | agpl-3.0 | 3,449 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2014 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2004-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2008 Matt Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import re
import time
from trac.admin.api import AdminCommandError, IAdminCommandProvider, \
console_date_format, get_console_locale
from trac.core import Component, ExtensionPoint, TracError, implements
from trac.util import hex_entropy, lazy
from trac.util.datefmt import get_datetime_format_hint, format_date, \
parse_date, to_datetime, to_timestamp
from trac.util.text import print_table
from trac.util.translation import _
from trac.web.api import IRequestHandler, is_valid_default_handler
UPDATE_INTERVAL = 3600 * 24 # Update session last_visit time stamp after 1 day
PURGE_AGE = 3600 * 24 * 90 # Purge session after 90 days idle
COOKIE_KEY = 'trac_session'
# Note: as we often manipulate both the `session` and the
# `session_attribute` tables, there's a possibility of table
# deadlocks (#9705). We try to prevent them to happen by always
# accessing the tables in the same order within the transaction,
# first `session`, then `session_attribute`.
class DetachedSession(dict):
def __init__(self, env, sid):
dict.__init__(self)
self.env = env
self.sid = None
if sid:
self.get_session(sid, authenticated=True)
else:
self.authenticated = False
self.last_visit = 0
self._new = True
self._old = {}
def __setitem__(self, key, value):
dict.__setitem__(self, key, unicode(value))
def set(self, key, value, default=None):
"""Set a variable in the session, or remove it if it's equal to the
default value.
"""
value = unicode(value)
if default is not None:
default = unicode(default)
if value == default:
self.pop(key, None)
return
dict.__setitem__(self, key, value)
def get_session(self, sid, authenticated=False):
self.env.log.debug("Retrieving session for ID %r", sid)
with self.env.db_query as db:
self.sid = sid
self.authenticated = authenticated
self.clear()
for last_visit, in db("""
SELECT last_visit FROM session
WHERE sid=%s AND authenticated=%s
""", (sid, int(authenticated))):
self._new = False
self.last_visit = int(last_visit or 0)
self.update(db("""
SELECT name, value FROM session_attribute
WHERE sid=%s and authenticated=%s
""", (sid, int(authenticated))))
self._old = self.copy()
break
else:
self.last_visit = 0
self._new = True
self._old = {}
def save(self):
items = self.items()
if not self._old and not items:
# The session doesn't have associated data, so there's no need to
# persist it
return
authenticated = int(self.authenticated)
now = int(time.time())
# We can't do the session management in one big transaction,
# as the intertwined changes to both the session and
# session_attribute tables are prone to deadlocks (#9705).
# Therefore we first we save the current session, then we
# eventually purge the tables.
session_saved = False
with self.env.db_transaction as db:
# Try to save the session if it's a new one. A failure to
# do so is not critical but we nevertheless skip the
# following steps.
if self._new:
self.last_visit = now
self._new = False
# The session might already exist even if _new is True since
# it could have been created by a concurrent request (#3563).
try:
db("""INSERT INTO session (sid, last_visit, authenticated)
VALUES (%s,%s,%s)
""", (self.sid, self.last_visit, authenticated))
except self.env.db_exc.IntegrityError:
self.env.log.warning('Session %s already exists', self.sid)
db.rollback()
return
# Remove former values for session_attribute and save the
# new ones. The last concurrent request to do so "wins".
if self._old != self:
if self._old.get('name') != self.get('name') or \
self._old.get('email') != self.get('email'):
self.env.invalidate_known_users_cache()
if not items and not authenticated:
# No need to keep around empty unauthenticated sessions
db("DELETE FROM session WHERE sid=%s AND authenticated=0",
(self.sid,))
db("""DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s
""", (self.sid, authenticated))
self._old = dict(self.items())
# The session variables might already have been updated by a
# concurrent request.
try:
db.executemany("""
INSERT INTO session_attribute
(sid,authenticated,name,value)
VALUES (%s,%s,%s,%s)
""", [(self.sid, authenticated, k, v)
for k, v in items])
except self.env.db_exc.IntegrityError:
self.env.log.warning('Attributes for session %s already '
'updated', self.sid)
db.rollback()
return
session_saved = True
# Purge expired sessions. We do this only when the session was
# changed as to minimize the purging.
if session_saved and now - self.last_visit > UPDATE_INTERVAL:
self.last_visit = now
mintime = now - PURGE_AGE
with self.env.db_transaction as db:
# Update the session last visit time if it is over an
# hour old, so that session doesn't get purged
self.env.log.info("Refreshing session %s", self.sid)
db("""UPDATE session SET last_visit=%s
WHERE sid=%s AND authenticated=%s
""", (self.last_visit, self.sid, authenticated))
self.env.log.debug('Purging old, expired, sessions.')
db("""DELETE FROM session_attribute
WHERE authenticated=0 AND sid IN (
SELECT sid FROM session
WHERE authenticated=0 AND last_visit < %s
)
""", (mintime,))
# Avoid holding locks on lot of rows on both session_attribute
# and session tables
with self.env.db_transaction as db:
db("""
DELETE FROM session
WHERE authenticated=0 AND last_visit < %s
""", (mintime,))
class Session(DetachedSession):
"""Basic session handling and per-session storage."""
def __init__(self, env, req):
super(Session, self).__init__(env, None)
self.req = req
if req.authname == 'anonymous':
if COOKIE_KEY not in req.incookie:
self.sid = hex_entropy(24)
self.bake_cookie()
else:
sid = req.incookie[COOKIE_KEY].value
self.get_session(sid)
else:
if COOKIE_KEY in req.incookie:
sid = req.incookie[COOKIE_KEY].value
self.promote_session(sid)
self.get_session(req.authname, authenticated=True)
def bake_cookie(self, expires=PURGE_AGE):
assert self.sid, 'Session ID not set'
self.req.outcookie[COOKIE_KEY] = self.sid
self.req.outcookie[COOKIE_KEY]['path'] = self.req.base_path or '/'
self.req.outcookie[COOKIE_KEY]['expires'] = expires
if self.env.secure_cookies:
self.req.outcookie[COOKIE_KEY]['secure'] = True
self.req.outcookie[COOKIE_KEY]['httponly'] = True
_valid_sid_re = re.compile(r'[_A-Za-z0-9]+\Z')
def get_session(self, sid, authenticated=False):
refresh_cookie = False
if not authenticated and not self._valid_sid_re.match(sid):
raise TracError(_("Session ID must be alphanumeric."))
if self.sid and sid != self.sid:
refresh_cookie = True
super(Session, self).get_session(sid, authenticated)
if self.last_visit and time.time() - self.last_visit > UPDATE_INTERVAL:
refresh_cookie = True
# Refresh the session cookie if this is the first visit after a day
if not authenticated and refresh_cookie:
self.bake_cookie()
def change_sid(self, new_sid):
assert self.req.authname == 'anonymous', \
'Cannot change ID of authenticated session'
assert new_sid, 'Session ID cannot be empty'
if new_sid == self.sid:
return
if not self._valid_sid_re.match(new_sid):
raise TracError(_("Session ID must be alphanumeric."),
_("Error renaming session"))
with self.env.db_transaction as db:
if db("SELECT sid FROM session WHERE sid=%s", (new_sid,)):
raise TracError(_("Session '%(id)s' already exists. "
"Please choose a different session ID.",
id=new_sid),
_("Error renaming session"))
self.env.log.debug("Changing session ID %s to %s", self.sid,
new_sid)
db("UPDATE session SET sid=%s WHERE sid=%s AND authenticated=0",
(new_sid, self.sid))
db("""UPDATE session_attribute SET sid=%s
WHERE sid=%s and authenticated=0
""", (new_sid, self.sid))
self.sid = new_sid
self.bake_cookie()
def promote_session(self, sid):
"""Promotes an anonymous session to an authenticated session, if there
is no preexisting session data for that user name.
"""
assert self.req.authname != 'anonymous', \
"Cannot promote session of anonymous user"
with self.env.db_transaction as db:
authenticated_flags = [authenticated for authenticated, in db(
"SELECT authenticated FROM session WHERE sid=%s OR sid=%s",
(sid, self.req.authname))]
if len(authenticated_flags) == 2:
# There's already an authenticated session for the user,
# we simply delete the anonymous session
db("DELETE FROM session WHERE sid=%s AND authenticated=0",
(sid,))
db("""DELETE FROM session_attribute
WHERE sid=%s AND authenticated=0
""", (sid,))
elif len(authenticated_flags) == 1:
if not authenticated_flags[0]:
# Update the anonymous session records so the session ID
# becomes the user name, and set the authenticated flag.
self.env.log.debug("Promoting anonymous session %s to "
"authenticated session for user %s",
sid, self.req.authname)
db("""UPDATE session SET sid=%s, authenticated=1
WHERE sid=%s AND authenticated=0
""", (self.req.authname, sid))
db("""UPDATE session_attribute SET sid=%s, authenticated=1
WHERE sid=%s
""", (self.req.authname, sid))
else:
# We didn't have an anonymous session for this sid. The
# authenticated session might have been inserted between the
# SELECT above and here, so we catch the error.
try:
db("""INSERT INTO session (sid, last_visit, authenticated)
VALUES (%s, %s, 1)
""", (self.req.authname, int(time.time())))
except self.env.db_exc.IntegrityError:
self.env.log.warning('Authenticated session for %s '
'already exists', self.req.authname)
db.rollback()
self._new = False
self.sid = sid
self.bake_cookie(0) # expire the cookie
class SessionAdmin(Component):
"""trac-admin command provider for session management"""
implements(IAdminCommandProvider)
request_handlers = ExtensionPoint(IRequestHandler)
def get_admin_commands(self):
hints = {
'datetime': get_datetime_format_hint(get_console_locale(self.env)),
'iso8601': get_datetime_format_hint('iso8601'),
}
yield ('session list', '[sid[:0|1]] [...]',
"""List the name and email for the given sids
Specifying the sid 'anonymous' lists all unauthenticated
sessions, and 'authenticated' all authenticated sessions.
'*' lists all sessions, and is the default if no sids are
given.
An sid suffix ':0' operates on an unauthenticated session with
the given sid, and a suffix ':1' on an authenticated session
(the default).""",
self._complete_list, self._do_list)
yield ('session add', '<sid[:0|1]> [name] [email]',
"""Create a session for the given sid
Populates the name and email attributes for the given session.
Adding a suffix ':0' to the sid makes the session
unauthenticated, and a suffix ':1' makes it authenticated (the
default if no suffix is specified).""",
None, self._do_add)
yield ('session set', '<name|email|default_handler> '
'<sid[:0|1]> <value>',
"""Set the name or email attribute of the given sid
An sid suffix ':0' operates on an unauthenticated session with
the given sid, and a suffix ':1' on an authenticated session
(the default).""",
self._complete_set, self._do_set)
yield ('session delete', '<sid[:0|1]> [...]',
"""Delete the session of the specified sid
An sid suffix ':0' operates on an unauthenticated session with
the given sid, and a suffix ':1' on an authenticated session
(the default). Specifying the sid 'anonymous' will delete all
anonymous sessions.""",
self._complete_delete, self._do_delete)
yield ('session purge', '<age>',
"""Purge anonymous sessions older than the given age or date
Age may be specified as a relative time like "90 days ago", or
as a date in the "%(datetime)s" or "%(iso8601)s" (ISO 8601)
format.""" % hints,
None, self._do_purge)
@lazy
def _valid_default_handlers(self):
return sorted(handler.__class__.__name__
for handler in self.request_handlers
if is_valid_default_handler(handler))
def _split_sid(self, sid):
if sid.endswith(':0'):
return sid[:-2], 0
elif sid.endswith(':1'):
return sid[:-2], 1
else:
return sid, 1
def _get_sids(self):
rows = self.env.db_query("SELECT sid, authenticated FROM session")
return ['%s:%d' % (sid, auth) for sid, auth in rows]
def _get_list(self, sids):
all_anon = 'anonymous' in sids or '*' in sids
all_auth = 'authenticated' in sids or '*' in sids
sids = set(self._split_sid(sid) for sid in sids
if sid not in ('anonymous', 'authenticated', '*'))
rows = self.env.db_query("""
SELECT DISTINCT s.sid, s.authenticated, s.last_visit,
n.value, e.value, h.value
FROM session AS s
LEFT JOIN session_attribute AS n
ON (n.sid=s.sid AND n.authenticated=s.authenticated
AND n.name='name')
LEFT JOIN session_attribute AS e
ON (e.sid=s.sid AND e.authenticated=s.authenticated
AND e.name='email')
LEFT JOIN session_attribute AS h
ON (h.sid=s.sid AND h.authenticated=s.authenticated
AND h.name='default_handler')
ORDER BY s.sid, s.authenticated
""")
for sid, authenticated, last_visit, name, email, handler in rows:
if all_anon and not authenticated or all_auth and authenticated \
or (sid, authenticated) in sids:
yield (sid, authenticated,
format_date(to_datetime(last_visit),
console_date_format),
name, email, handler)
def _complete_list(self, args):
all_sids = self._get_sids() + ['*', 'anonymous', 'authenticated']
return set(all_sids) - set(args)
def _complete_set(self, args):
if len(args) == 1:
return ['name', 'email']
elif len(args) == 2:
return self._get_sids()
def _complete_delete(self, args):
all_sids = self._get_sids() + ['anonymous']
return set(all_sids) - set(args)
def _do_list(self, *sids):
if not sids:
sids = ['*']
headers = (_("SID"), _("Auth"), _("Last Visit"), _("Name"),
_("Email"), _("Default Handler"))
print_table(self._get_list(sids), headers)
def _do_add(self, sid, name=None, email=None):
sid, authenticated = self._split_sid(sid)
with self.env.db_transaction as db:
try:
db("INSERT INTO session VALUES (%s, %s, %s)",
(sid, authenticated, int(time.time())))
except Exception:
raise AdminCommandError(_("Session '%(sid)s' already exists",
sid=sid))
if name is not None:
db("INSERT INTO session_attribute VALUES (%s,%s,'name',%s)",
(sid, authenticated, name))
if email is not None:
db("INSERT INTO session_attribute VALUES (%s,%s,'email',%s)",
(sid, authenticated, email))
self.env.invalidate_known_users_cache()
def _do_set(self, attr, sid, val):
if attr not in ('name', 'email', 'default_handler'):
raise AdminCommandError(_("Invalid attribute '%(attr)s'",
attr=attr))
if attr == 'default_handler':
if val and val not in self._valid_default_handlers:
raise AdminCommandError(_("Invalid default_handler '%(val)s'",
val=val))
sid, authenticated = self._split_sid(sid)
with self.env.db_transaction as db:
if not db("""SELECT sid FROM session
WHERE sid=%s AND authenticated=%s""",
(sid, authenticated)):
raise AdminCommandError(_("Session '%(sid)s' not found",
sid=sid))
db("""
DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s AND name=%s
""", (sid, authenticated, attr))
db("INSERT INTO session_attribute VALUES (%s, %s, %s, %s)",
(sid, authenticated, attr, val))
self.env.invalidate_known_users_cache()
def _do_delete(self, *sids):
with self.env.db_transaction as db:
for sid in sids:
sid, authenticated = self._split_sid(sid)
if sid == 'anonymous':
db("DELETE FROM session WHERE authenticated=0")
db("DELETE FROM session_attribute WHERE authenticated=0")
else:
db("""
DELETE FROM session
WHERE sid=%s AND authenticated=%s
""", (sid, authenticated))
db("""
DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s
""", (sid, authenticated))
self.env.invalidate_known_users_cache()
def _do_purge(self, age):
when = parse_date(age, hint='datetime',
locale=get_console_locale(self.env))
with self.env.db_transaction as db:
ts = to_timestamp(when)
db("""
DELETE FROM session
WHERE authenticated=0 AND last_visit<%s
""", (ts,))
db("""
DELETE FROM session_attribute
WHERE authenticated=0
AND sid NOT IN (SELECT sid FROM session
WHERE authenticated=0)
""")
| pkdevbox/trac | trac/web/session.py | Python | bsd-3-clause | 22,324 |
"""
SPDX-License-Identifier: Apache-2.0
Copyright (c) 2020 Arm Limited. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations
"""
# Asumptions for this script:
# 1. directory_name is scanned directory.
# Files are copied to this directory with full tree. As result, if we find
# license offender, we can have full path (just scrape directory_name). We do this
# magic because scancode allows to scan directories/one file.
# 2. SPDX and license text is a must for all code files
import json
import argparse
import sys
import os.path
import logging
import re
userlog = logging.getLogger("scancode-evaluate")
userlog.setLevel(logging.INFO)
logfile = os.path.join(os.getcwd(), 'scancode-evaluate.log')
log_file_handler = logging.FileHandler(logfile, mode='w')
userlog.addHandler(log_file_handler)
MISSING_LICENSE_TEXT = "Missing license header"
MISSING_PERMISIVE_LICENSE_TEXT = "Non-permissive license"
MISSING_SPDX_TEXT = "Missing SPDX license identifier"
def license_check(directory_name, file):
""" Check licenses in the scancode json file for specified directory
This function does not verify if file exists, should be done prior the call.
Args:
directory_name - where scancode was run, used to scrape this from paths
file - scancode json output file (output from scancode --license --json-pp)
Returns:
0 if nothing found
>0 - count how many license isses found
-1 if any error in file licenses found
"""
offenders = []
try:
# find all licenses in the files, must be licensed and permissive
with open(file, 'r') as scancode_output:
results = json.load(scancode_output)
except ValueError:
userlog.warning("JSON could not be decoded")
return -1
try:
for file in results['files']:
license_offender = {}
license_offender['file'] = file
# ignore directory, not relevant here
if license_offender['file']['type'] == 'directory':
continue
if not license_offender['file']['licenses']:
license_offender['reason'] = MISSING_LICENSE_TEXT
offenders.append(license_offender)
continue
found_spdx = False
for i in range(len(license_offender['file']['licenses'])):
if license_offender['file']['licenses'][i]['category'] != 'Permissive':
license_offender['reason'] = MISSING_PERMISIVE_LICENSE_TEXT
offenders.append(license_offender)
# find SPDX, it shall be one of licenses found
if license_offender['file']['licenses'][i]['matched_rule']['identifier'].find("spdx") != -1:
found_spdx = True
if not found_spdx:
try:
# Issue reported here https://github.com/nexB/scancode-toolkit/issues/1913
# We verify here if SPDX is not really there as SDPX is part of the license text
# scancode has some problems detecting it properly
with open(os.path.join(os.path.abspath(license_offender['file']['path'])), 'r') as spdx_file_check:
filetext = spdx_file_check.read()
matches = re.findall("SPDX-License-Identifier:?", filetext)
if matches:
continue
license_offender['reason'] = MISSING_SPDX_TEXT
offenders.append(license_offender)
except UnicodeDecodeError:
# not valid file for license check
continue
except KeyError:
userlog.warning("Invalid scancode json file")
return -1
if offenders:
userlog.warning("Found files with missing license details, please review and fix")
for offender in offenders:
userlog.warning("File: " + offender['file']['path'][len(directory_name):] + " " + "reason: " + offender['reason'])
return len(offenders)
def parse_args():
parser = argparse.ArgumentParser(
description="License check.")
parser.add_argument('-f', '--file',
help="scancode-toolkit output json file")
parser.add_argument('-d', '--directory_name', default="SCANCODE",
help='Directory name where are files being checked')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
if args.file and os.path.isfile(args.file):
count = license_check(args.directory_name, args.file)
if count == 0:
sys.exit(0)
else:
sys.exit(-1)
else:
userlog.warning("Could not find the scancode json file")
sys.exit(-1)
| mbedmicro/mbed | tools/test/travis-ci/scancode-evaluate.py | Python | apache-2.0 | 5,239 |
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from menubar import MenuBar
from statusbar import StatusBar
from toolbar import ToolBar
from panel import Panel | whiterabbitengine/fifeplusplus | tools/editor/scripts/gui/__init__.py | Python | lgpl-2.1 | 1,143 |
#!/usr/bin/env python
#
# PySTDF - The Pythonic STDF Parser
# Copyright (C) 2006 Casey Marshall
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import sys
from distutils.core import setup
import py2exe
# If run without args, build executables, in quiet mode.
if len(sys.argv) == 1:
sys.argv.append("py2exe")
sys.argv.append("-q")
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
# for the versioninfo resources
self.version = "1.3.1"
self.company_name = "Casey Marshall"
self.copyright = "Copyright (c) Casey Marshall 2006, All Rights Reserved"
self.name = "StdfExplorer"
################################################################
# A program using wxPython
# The manifest will be inserted as resource into test_wx.exe. This
# gives the controls the Windows XP appearance (if run on XP ;-)
#
# Another option would be to store it in a file named
# test_wx.exe.manifest, and copy it with the data_files option into
# the dist-dir.
#
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
stdfexplorer_wx = Target(
# used for the versioninfo resource
description = "StdfExplorer version 1.3.1, Codename: Raving Rabbid",
# what to build
script = "pystdf/explorer/StdfExplorer.pyw",
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="StdfExplorer"))],
## icon_resources = [(1, "icon.ico")],
dest_base = "StdfExplorer")
setup(name='pystdf',
version='1.3.1',
description="Python module for working with STDF files",
long_description="""
PySTDF is a Python module that makes it easy to work with STDF (Teradyne's Standard Test Data Format). STDF is a commonly used file format in semiconductor test -- automated test equipment (ATE) from such vendors as Teradyne, Verigy, LTX, Credence, and others support this format.
PySTDF provides event-based stream parsing of STDF version 4, along with indexers that can help you rearrange the data into a more useful tabular form, as well as generate missing summary records or new types of derivative records.
The parser architecture is very flexible and can easily be extended to support STDF version 3 as well as custom record types.
Potential applications of PySTDF include:
* Debugging a vendor's STDF implementation
* Straight conversion to ASCII-readable form
* Repairing STDF files
* Developing an application that leverages STDF
- Conversion to tabular form for statistical analysis tools
- Loading data into a relational database
PySTDF is released under a GPL license. Applications developed with PySTDF can only be released with a GPL-compatible license. Commercial applications can purchase an alternate license agreement for closed-source distribution.
""",
author='Casey Marshall',
author_email='casey.marshall@gmail.com',
url='http://code.google.com/p/pystdf/',
packages=['pystdf','pystdf.explorer'],
scripts=['scripts/stdf_slice', 'scripts/rec_index', 'scripts/stdf2atdf'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: Free for non-commercial use',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Intended Audience :: Manufacturing',
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Pre-processors',
],
# windows=['pystdf/explorer/StdfExplorer.pyw',],
windows = [stdfexplorer_wx],
zipfile = None,
options = {
"py2exe": {
"compressed": 1,
"optimize": 2,
"ascii": 1,
"bundle_files": 1,
"packages": ['pystdf', 'pystdf.explorer'],
}
},
)
| cmars/pystdf | setup_py2exe.py | Python | gpl-2.0 | 5,187 |
# -*- coding: utf-8 -*-
import os
def cubicbezier(self, x0, y0, x1, y1, x2, y2, x3, y3, n=20):
pts = []
for i in range(n + 1):
t = i / n
a = (1. - t)**3
b = 3. * t * (1. - t)**2
c = 3.0 * t**2 * (1.0 - t)
d = t**3
x = int(a * x0 + b * x1 + c * x2 + d * x3)
y = int(a * y0 + b * y1 + c * y2 + d * y3)
pts.append((x, y))
for i in range(n):
self.line(pts[i][0], pts[i][1], pts[i + 1][0], pts[i + 1][1])
Bitmap.cubicbezier = cubicbezier
bitmap = Bitmap(17, 17)
bitmap.cubicbezier(16, 1, 1, 4, 3, 16, 15, 11)
bitmap.chardisplay()
'''
The origin, 0,0; is the lower left, with x increasing to the right,
and Y increasing upwards.
The chardisplay above produces the following output :
+-----------------+
| |
| |
| |
| |
| @@@@ |
| @@@ @@@ |
| @ |
| @ |
| @ |
| @ |
| @ |
| @ |
| @ |
| @ |
| @@@@ |
| @@@@|
| |
+-----------------+
'''
os.system("pause")
| NicovincX2/Python-3.5 | Représentations graphiques/Bitmap/bitmap_cubic_bezier.py | Python | gpl-3.0 | 1,174 |
#!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "getresults_order.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| botswana-harvard/getresults-order | manage.py | Python | gpl-2.0 | 260 |
import os
import unittest
from autoprefixer.compiler import autoprefixer, AutoprefixerError
from js_host.conf import settings
settings.configure(USE_MANAGER=True)
TEST_CSS_FILE = os.path.join(os.path.dirname(__file__), 'test.css')
class TestDjangoFrontendTools(unittest.TestCase):
def test_autoprefixer_can_process_css_string(self):
css = '.foo { -moz-border-radius: 100%; border-radius: 100%; }'
expected = '.foo { border-radius: 100%; }'
self.assertEqual(autoprefixer(css), expected)
def test_autoprefixer_can_accept_an_options_dict(self):
css = '.foo { -moz-border-radius: 100%; border-radius: 100%; }'
self.assertEqual(autoprefixer(css, options={'browsers': ['Firefox 3']}), css)
def test_autoprefixer_can_process_css_file(self):
expected = '.foo { border-radius: 100%; }'
self.assertEqual(autoprefixer(TEST_CSS_FILE, is_file=True), expected)
def test_raises_autoprefixer_error(self):
self.assertRaises(AutoprefixerError, autoprefixer, css=None) | markfinger/python-autoprefixer | tests/tests.py | Python | mit | 1,036 |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.core.management.base import BaseCommand, CommandError
from django.test.client import Client
from django.urls import reverse
from django.conf import settings
from myuw.urls import urlpatterns
from myuw.test.api import get_user, get_user_pass
from myuw.util.cache_implementation import TestingMemoryCache
from django.test.utils import override_settings
import time
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--url_name', help="The named MyUW URL to test")
def handle(self, *args, **options):
get_user('javerage')
skipped = []
print("URL Name,0 seconds,0.1 seconds,0.5 seconds,1.0 seconds")
for pattern in urlpatterns:
values = [pattern.name]
if options["url_name"] and pattern.name != options["url_name"]:
continue
if pattern.name is None:
skipped.append(pattern.regex.pattern)
continue
if pattern.name == 'myuw_book_api':
skipped.append(pattern.regex.pattern)
continue
if pattern.name == 'myuw_links_api':
skipped.append(pattern.regex.pattern)
continue
if pattern.name == 'myuw_myplan_api':
skipped.append(pattern.regex.pattern)
continue
if pattern.name == 'myuw_future_summer_schedule_api':
skipped.append(pattern.regex.pattern)
continue
if pattern.name == 'myuw_future_schedule_api':
skipped.append(pattern.regex.pattern)
continue
delay = 0.0
delay_values = [0.0, 0.1, 0.5, 1.0]
# delay_values = [1.0]
cache_dao = 'myuw.util.cache_implementation.TestingMemoryCache'
for delay in delay_values:
TestingMemoryCache.clear_cache()
@override_settings(RESTCLIENTS_MOCKDATA_DELAY=delay,
RESTCLIENTS_USE_THREADING=True,
MYUW_PREFETCH_THREADING=True,
RESTCLIENTS_DAO_CACHE_CLASS=cache_dao)
def run_it():
client = Client()
client.login(username='javerage',
password=get_user_pass('javerage'))
t0 = time.time()
resp = client.get(reverse(pattern.name))
t1 = time.time()
return str(t1-t0)
values.append(run_it())
print(",".join(values))
print("skipped: ", skipped)
| uw-it-aca/myuw | myuw/management/commands/myuw_speed_matrix.py | Python | apache-2.0 | 2,762 |
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
OpenEdXInstance model - Factories
"""
# Imports #####################################################################
import uuid
import factory
from factory.django import DjangoModelFactory
from instance.models.instance import OpenEdXInstance
# Classes #####################################################################
class OpenEdXInstanceFactory(DjangoModelFactory):
"""
Factory for OpenEdXInstance
"""
class Meta: #pylint: disable=missing-docstring
model = OpenEdXInstance
sub_domain = factory.LazyAttribute(lambda o: '{}.integration'.format(str(uuid.uuid4())[:8]))
name = factory.Sequence('Test Instance {}'.format)
fork_name = 'edx/edx-platform'
ref_type = 'tags'
branch_name = 'named-release/cypress' # Use a known working version
ansible_source_repo_url = 'https://github.com/open-craft/configuration.git'
configuration_version = 'integration'
ansible_playbook_name = 'opencraft_integration'
forum_version = 'named-release/cypress'
notifier_version = 'named-release/cypress'
xqueue_version = 'named-release/cypress'
certs_version = 'named-release/cypress'
| omarkhan/opencraft | instance/tests/integration/factories/instance.py | Python | agpl-3.0 | 1,964 |
#!/usr/bin/env python
import sys
import argparse
import numpy as np
from mapTools import *
from utilities import filesFromList, writeLog
from plotTools import addImagePlot, addContourf, addScatterPlot
import matplotlib.pyplot as plt
'''
Description:
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addBlocks( T, stride, Lx, h ):
Tdims = T.shape
sy = stride[0]; sx = stride[1]
ly = Lx[0]; lx = Lx[1]
for i in xrange( int(np.ceil(Tdims[1]/sx)+1) ):
ix = i*sx
ix2 = ix + lx
for j in xrange( int( np.ceil(Tdims[0]/sy)+1) ):
jy = j*sy + int(np.mod(i,2)*(sy/2))
jy2 = jy+ly
if( ix2 > Tdims[1] or jy2 > Tdims[0] ):
break
else:
#print(' ix2 = {}, jy2 = {} '.format(ix2,jy2))
T[jy:jy2, ix:ix2] += h
return T
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
#==========================================================#
parser = argparse.ArgumentParser(prog='addBlockMargin.py')
parser.add_argument("-f", "--filename",type=str, help="Name of the comp domain data file.")
parser.add_argument("-fo", "--fileOut",type=str, help="Name of output Palm topography file.")
parser.add_argument("-s","--stride", help="Stride lengths for the block arrangement. [N, E]",\
type=int,nargs=2,default=[None,None])
parser.add_argument("-L","--Lblocks", help="Block dimensions. [W, L]",\
type=int,nargs=2,default=[None,None])
parser.add_argument("-mw","--mrgnW", help="Zero or non-zero margin widths as ratios (0-1): [L,R,B,T]",\
type=float,nargs=4,default=[None,None,None,None])
parser.add_argument("-mh","--mrgnH", help="Margins block heights: [L,R,B,T]. Default=0",\
type=float,nargs=4,default=[0.,0.,0.,0.])
parser.add_argument("-wa", "--writeAscii", help="Write 'TOPOGRAPHY_DATA' ascii file.",\
action="store_true", default=False)
parser.add_argument("-z", "--zero", help="Zero the raster file first.",\
action="store_true", default=False)
parser.add_argument("-p", "--printOn", help="Print the resulting raster data.",\
action="store_true", default=False)
parser.add_argument("-pp", "--printOnly", help="Only print the resulting data. Don't save.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#==========================================================#
filename = args.filename
fileOut = args.fileOut
mw = args.mrgnW
mh = args.mrgnH
stride = args.stride
Lb = args.Lblocks
zeroAll = args.zero
printOn = args.printOn
printOnly = args.printOnly
writeAscii = args.writeAscii
if( mw.count(None) != 0 ):
sys.exit(' Error! One of the margins widths is None. Exiting ...')
if( stride.count(None) != 0 ):
sys.exit(' Error! One of the stride lengths is None. Exiting ...')
if( Lb.count(None) != 0 ):
sys.exit(' Error! One of the block dimensions is None. Exiting ...')
# Read the raster tile to be processed.
Rdict = readNumpyZTile(filename)
R = Rdict['R']
Rdims = np.array(np.shape(R))
ROrig = Rdict['GlobOrig']
print(' Rdims = {} '.format(Rdims))
print(' ROrig = {} '.format(ROrig))
if( zeroAll ):
R[:,:] = 0.
L12, R12, B12, T12 = marginIds( Rdims, mw )
L1 = L12[0]; L2 = L12[1]
R1 = R12[0]; R2 = R12[1]
B1 = B12[0]; B2 = B12[1]
T1 = T12[0]; T2 = T12[1]
if( not all( L12 == 0 ) ): R[:,L1:L2] = addBlocks( R[:,L1:L2], stride, Lb, mh[0] )
if( not all( R12 == 0 ) ): R[:,R1:R2] = addBlocks( R[:,R1:R2], stride, Lb, mh[1] )
if( not all( T12 == 0 ) ): R[T1:T2,:] = addBlocks( R[T1:T2,:], stride, Lb, mh[2] )
if( not all( B12 == 0 ) ): R[B1:B2,:] = addBlocks( R[B1:B2,:], stride, Lb, mh[3] )
if( not args.printOnly ):
Rdict['R'] = R
saveTileAsNumpyZ( fileOut, Rdict )
if( writeAscii ):
fx = open( 'TOPOGRAPHY_DATA_BLOCK' , 'w' )
np.savetxt(fx,np.round(R),fmt='%g')
fx.close()
if( args.printOn or args.printOnly ):
figDims = 13.*(Rdims[::-1].astype(float)/np.max(Rdims))
fig = plt.figure(num=1, figsize=figDims)
fig = addImagePlot( fig, R, filename )
plt.show()
| saskartt/P4UL | pyRaster/addBlockMargin.py | Python | mit | 4,134 |
import difflib
from pyprint.ConsolePrinter import ConsolePrinter
from coalib.results.result_actions.ResultAction import ResultAction
from coalib.results.Result import Result
def format_line(line, real_nr="", sign="|", mod_nr="", symbol="", ):
return "|{:>4}{}{:>4}|{:1}{}".format(real_nr,
sign,
mod_nr,
symbol,
line.rstrip("\n"))
def print_beautified_diff(difflines, printer):
current_line_added = None
current_line_subtracted = None
for line in difflines:
if line.startswith("@@"):
values = line[line.find("-"):line.rfind(" ")]
subtracted, added = tuple(values.split(" "))
current_line_added = int(added.split(",")[0][1:])
current_line_subtracted = int(subtracted.split(",")[0][1:])
elif line.startswith("---"):
printer.print(format_line(line[4:], real_nr="----"), color="red")
elif line.startswith("+++"):
printer.print(format_line(line[4:], mod_nr="++++"),
color="green")
elif line.startswith("+"):
printer.print(format_line(line[1:],
mod_nr=current_line_added,
symbol="+"),
color="green")
current_line_added += 1
elif line.startswith("-"):
printer.print(format_line(line[1:],
real_nr=current_line_subtracted,
symbol="-"),
color="red")
current_line_subtracted += 1
else:
printer.print(format_line(line[1:],
real_nr=current_line_subtracted,
mod_nr=current_line_added,
symbol=" "))
current_line_subtracted += 1
current_line_added += 1
class ShowPatchAction(ResultAction):
@classmethod
def is_applicable(cls, result, original_file_dict, file_diff_dict):
return isinstance(result, Result) and result.diffs is not None
def apply(self,
result,
original_file_dict,
file_diff_dict,
colored: bool=True):
'''
Print a diff of the patch that would be applied.
:param colored: Wether or not to use colored output.
'''
printer = ConsolePrinter(colored)
for filename, this_diff in sorted(result.diffs.items()):
original_file = original_file_dict[filename]
try:
current_file = file_diff_dict[filename].modified
new_file = (file_diff_dict[filename] + this_diff).modified
except KeyError:
current_file = original_file
new_file = this_diff.modified
print_beautified_diff(difflib.unified_diff(current_file,
new_file,
fromfile=filename,
tofile=filename),
printer)
return file_diff_dict
| scriptnull/coala | coalib/results/result_actions/ShowPatchAction.py | Python | agpl-3.0 | 3,352 |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import math
import random
import re
import os.path
import pyglet
from pyglet.gl import *
import xml.dom
import xml.dom.minidom
class SmoothLineGroup(pyglet.graphics.Group):
def set_state(self):
glPushAttrib(GL_ENABLE_BIT)
glEnable(GL_LINE_SMOOTH)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)
def unset_state(self):
glPopAttrib()
class Curve:
PATH_RE = re.compile(r'([MLHVCSQTAZ])([^MLHVCSQTAZ]+)', re.IGNORECASE)
INT = r'([+-]?\d+)'
FLOAT = r'(?:[\s,]*)([+-]?\d+(?:\.\d+)?)'
HANDLERS = dict()
def handle(char, rx, types, HANDLERS=HANDLERS):
def register(function):
HANDLERS[char] = (rx and re.compile(rx), function, types)
return function
return register
def __init__(self, spec, batch):
self.batch = batch
self.start = None
self.current = None
self.min_x = self.min_y = self.max_x = self.max_y = None
for cmd, value in self.PATH_RE.findall(spec):
# print (cmd, value)
if not cmd:
continue
rx, handler, types = self.HANDLERS[cmd.upper()]
if rx is None:
handler(self, cmd)
else:
v = list()
for fields in rx.findall(value):
v.append([types[i](e) for i, e in enumerate(fields)])
handler(self, cmd, v)
def _determine_rect(self, x, y):
y = -y
if self.min_x is None:
self.min_x = self.max_x = x
self.min_y = self.max_y = y
else:
if self.min_x > x:
self.min_x = x
elif self.max_x < x:
self.max_x = x
if self.min_y > y:
self.min_y = y
elif self.max_y < y:
self.max_y = y
@handle('M', FLOAT * 2, (float, float))
def moveto(self, cmd, points):
"""Start a new sub-path at the given (x,y) coordinate. M (uppercase)
indicates that absolute coordinates will follow; m (lowercase)
indicates that relative coordinates will follow. If a relative moveto
(m) appears as the first element of the path, then it is treated as a
pair of absolute coordinates. If a moveto is followed by multiple pairs
of coordinates, the subsequent pairs are treated as implicit lineto
commands.
Parameters are (x y)+
"""
points = [list(map(float, point)) for point in points]
# TODO: handle relative
# TODO: confirm that we always reset start here
self.start = self.current = points[0]
if len(points) > 2:
self.lineto({'m': 'l', 'M': 'L'}[cmd], points[1:])
@handle('L', FLOAT * 2, (float, float))
def lineto(self, cmd, points):
"""Draw a line from the current point to the given (x,y) coordinate
which becomes the new current point. L (uppercase) indicates that
absolute coordinates will follow; l (lowercase) indicates that relative
coordinates will follow. A number of coordinates pairs may be specified
to draw a polyline. At the end of the command, the new current point is
set to the final set of coordinates provided.
Parameters are (x y)+
"""
l = list()
self._determine_rect(*self.current)
for point in points:
cx, cy = self.current
x, y = list(map(float, point))
l.extend([cx, -cy])
l.extend([x, -y])
self.current = (x, y)
self._determine_rect(x, y)
self.batch.add(len(l) / 2, GL_LINES, SmoothLineGroup(), ('v2f', l))
@handle('H', FLOAT, (float,))
def horizontal_lineto(self, cmd, xvals):
"""Draws a horizontal line from the current point (cpx, cpy) to (x,
cpy). H (uppercase) indicates that absolute coordinates will follow; h
(lowercase) indicates that relative coordinates will follow. Multiple x
values can be provided (although usually this doesn't make sense). At
the end of the command, the new current point becomes (x, cpy) for the
final value of x.
Parameters are x+
"""
cx, cy = self.current
self._determine_rect(*self.current)
x = float(xvals[-1])
self.batch.add(2, GL_LINES, None, ('v2f', (cx, -cy, x, -cy)))
self.current = (x, cy)
self._determine_rect(x, cy)
@handle('V', FLOAT, (float,))
def vertical_lineto(self, cmd, yvals):
"""Draws a vertical line from the current point (cpx, cpy) to (cpx, y).
V (uppercase) indicates that absolute coordinates will follow; v
(lowercase) indicates that relative coordinates will follow. Multiple y
values can be provided (although usually this doesn't make sense). At
the end of the command, the new current point becomes (cpx, y) for the
final value of y.
Parameters are y+
"""
cx, cy = self.current
self._determine_rect(*self.current)
y = float(yvals[-1])
self.batch.add(2, GL_LINES, None, ('v2f', [cx, -cy, cx, -y]))
self.current = (cx, y)
self._determine_rect(cx, y)
@handle('Z', None, None)
def closepath(self, cmd):
"""Close the current subpath by drawing a straight line from the
current point to current subpath's initial point.
"""
self.batch.add(2, GL_LINES, SmoothLineGroup(),
('v2f', self.current + tuple(self.start)))
@handle('C', FLOAT * 6, (float, ) * 6)
def curveto(self, cmd, control_points):
"""Draws a cubic Bézier curve from the current point to (x,y) using
(x1,y1) as the control point at the beginning of the curve and (x2,y2)
as the control point at the end of the curve. C (uppercase) indicates
that absolute coordinates will follow; c (lowercase) indicates that
relative coordinates will follow. Multiple sets of coordinates may be
specified to draw a polybézier. At the end of the command, the new
current point becomes the final (x,y) coordinate pair used in the
polybézier.
Control points are (x1 y1 x2 y2 x y)+
"""
l = list()
last = None
for entry in control_points:
x1, y1, x2, y2, x, y = list(map(float, entry))
t = 0
cx, cy = self.current
self.last_control = (x2, y2)
self.current = (x, y)
x1 *= 3
x2 *= 3
y1 *= 3
y2 *= 3
while t <= 1.01:
a = t
a2 = a ** 2
a3 = a ** 3
b = 1 - t
b2 = b ** 2
b3 = b ** 3
px = cx * b3 + x1 * b2 * a + x2 * b * a2 + x * a3
py = cy * b3 + y1 * b2 * a + y2 * b * a2 + y * a3
if last is not None:
l.extend(last)
l.extend((px, -py))
last = (px, -py)
self._determine_rect(px, py)
t += 0.01
self.batch.add(len(l) / 2, GL_LINES, SmoothLineGroup(), ('v2f', l))
@handle('S', FLOAT * 4, (float, ) * 4)
def smooth_curveto(self, cmd, control_points):
"""Draws a cubic Bézier curve from the current point to (x,y). The
first control point is assumed to be the reflection of the second
control point on the previous command relative to the current point.
(If there is no previous command or if the previous command was not an
C, c, S or s, assume the first control point is coincident with the
current point.) (x2,y2) is the second control point (i.e., the control
point at the end of the curve). S (uppercase) indicates that absolute
coordinates will follow; s (lowercase) indicates that relative
coordinates will follow. Multiple sets of coordinates may be specified
to draw a polybézier. At the end of the command, the new current point
becomes the final (x,y) coordinate pair used in the polybézier.
Control points are (x2 y2 x y)+
"""
assert self.last_control is not None, 'S must follow S or C'
l = list()
last = None
for entry in control_points:
x2, y2, x, y = list(map(float, entry))
# Reflect last control point
cx, cy = self.current
lcx, lcy = self.last_control
dx, dy = cx - lcx, cy - lcy
x1, y1 = cx + dx, cy + dy
t = 0
cx, cy = self.current
self.last_control = (x2, y2)
self.current = (x, y)
x1 *= 3
x2 *= 3
y1 *= 3
y2 *= 3
while t <= 1.01:
a = t
a2 = a ** 2
a3 = a ** 3
b = 1 - t
b2 = b ** 2
b3 = b ** 3
px = cx * b3 + x1 * b2 * a + x2 * b * a2 + x * a3
py = cy * b3 + y1 * b2 * a + y2 * b * a2 + y * a3
if last is not None:
l.extend(last)
l.extend((px, -py))
last = (px, -py)
self._determine_rect(px, py)
t += 0.01
# degenerate vertices
self.batch.add(len(l) / 2, GL_LINES, SmoothLineGroup(), ('v2f', l))
@handle('Q', FLOAT * 4, (float, ) * 4)
def quadratic_curveto(self, cmd, control_points):
"""Draws a quadratic Bézier curve from the current point to (x,y)
using (x1,y1) as the control point. Q (uppercase) indicates that
absolute coordinates will follow; q (lowercase) indicates that
relative coordinates will follow. Multiple sets of coordinates may
be specified to draw a polybézier. At the end of the command, the
new current point becomes the final (x,y) coordinate pair used in
the polybézier.
Control points are (x1 y1 x y)+
"""
raise NotImplementedError('not implemented')
@handle('T', FLOAT * 2, (float, ) * 2)
def smooth_quadratic_curveto(self, cmd, control_points):
"""Draws a quadratic Bézier curve from the current point to (x,y).
The control point is assumed to be the reflection of the control
point on the previous command relative to the current point. (If
there is no previous command or if the previous command was not a
Q, q, T or t, assume the control point is coincident with the
current point.) T (uppercase) indicates that absolute coordinates
will follow; t (lowercase) indicates that relative coordinates will
follow. At the end of the command, the new current point becomes
the final (x,y) coordinate pair used in the polybézier.
Control points are (x y)+
"""
raise NotImplementedError('not implemented')
@handle('A', FLOAT * 3 + INT * 2 + FLOAT * 2,
(float,) * 3 + (int,) * 2 + (float,) * 2)
def elliptical_arc(self, cmd, parameters):
"""Draws an elliptical arc from the current point to (x, y). The
size and orientation of the ellipse are defined by two radii (rx,
ry) and an x-axis-rotation, which indicates how the ellipse as a
whole is rotated relative to the current coordinate system. The
center (cx, cy) of the ellipse is calculated automatically to
satisfy the constraints imposed by the other parameters.
large-arc-flag and sweep-flag contribute to the automatic
calculations and help determine how the arc is drawn.
Parameters are (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+
"""
raise NotImplementedError('not implemented')
class SVG:
def __init__(self, filename, rect=None):
dom = xml.dom.minidom.parse(filename)
tag = dom.documentElement
if tag.tagName != 'svg':
raise ValueError('document is <%s> instead of <svg>' % tag.tagName)
# generate all the drawing elements
self.batch = pyglet.graphics.Batch()
self.objects = list()
for tag in tag.getElementsByTagName('g'):
for tag in tag.getElementsByTagName('path'):
self.objects.append(Curve(tag.getAttribute('d'), self.batch))
# determine drawing bounds
self.min_x = min(o.min_x for o in self.objects)
self.max_x = max(o.max_x for o in self.objects)
self.min_y = min(o.min_y for o in self.objects)
self.max_y = max(o.max_y for o in self.objects)
# determine or apply drawing rect
if rect is None:
self._rect = (self.min_x, self.min_y, self.max_x - self.min_x,
self.max_y - self.min_y)
else:
self.set_rect(rect)
def set_rect(self, rect):
self._rect = rect
# figure transform for display rect
self.translate_x, self.translate_y, rw, rh = rect
self.scale_x = abs(rw / float(self.max_x - self.min_x))
self.scale_y = abs(rh / float(self.max_y - self.min_y))
rect = property(lambda self: self._rect, set_rect)
def draw(self):
glPushMatrix()
if self._rect is not None:
glScalef(self.scale_x, self.scale_y, 1)
glTranslatef(self.translate_x - self.min_x,
self.translate_x - self.min_y, 0)
self.batch.draw()
glPopMatrix()
w = pyglet.window.Window(width=600, height=300, resizable=True)
dirname = os.path.dirname(__file__)
svg = SVG(os.path.join(dirname, 'hello_world.svg'), rect=(0, 0, 600, 300))
@w.event
def on_draw():
w.clear()
svg.draw()
@w.event
def on_resize(w, h):
svg.rect = svg.rect[:2] + (w, h)
pyglet.app.run()
| bitcraft/pyglet | contrib/experimental/svg_test.py | Python | bsd-3-clause | 14,032 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Commit', fields ['repository_id', 'date_added']
db.create_index('sentry_commit', ['repository_id', 'date_added'])
def backwards(self, orm):
# Removing index on 'Commit', fields ['repository_id', 'date_added']
db.delete_index('sentry_commit', ['repository_id', 'date_added'])
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True'}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 10, 17, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'),)", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Repository'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'M3q5J1slH8D6cKCdQ80XjNpgY9lanKSB'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry'] | JackDanger/sentry | src/sentry/south_migrations/0274_auto__add_index_commit_repository_id_date_added.py | Python | bsd-3-clause | 60,056 |
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns(
'',
url(r'^manifesto$', TemplateView.as_view(template_name='docs/manifesto.html'), name='docs_manifesto'),
url(r'^about$', TemplateView.as_view(template_name='docs/about.html'), name='docs_about'),
url(r'^guidelines$', TemplateView.as_view(template_name='docs/guidelines.html'), name='docs_guidelines'),
url(r'^terms$', TemplateView.as_view(template_name='docs/terms.html'), name='docs_terms'),
)
| urlist/devcharm | docs/urls.py | Python | gpl-3.0 | 532 |
# Import the Solver class
from Solver import *
# Ask for file path
satFile = raw_input("Enter path to CNF file: ")
# Create a SAT problem to solve
x = Solver(satFile)
# Now solve the SAT problem (finding all solutions)
solutions = x.simpleSolveAll()
# Print out all solutions
print "Solutions:\n", solutions
| VictorLoren/python-sat-solver | runMe.py | Python | mit | 313 |
import os
import webapp2
import jinja2
import webapp2
import json
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class UserRating(ndb.Model):
username = ndb.StringProperty()
ratings = ndb.JsonProperty()
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
template = JINJA_ENVIRONMENT.get_template('index.html')
template_value = { 'value': 8}
self.response.write(template.render(template_value))
class RatingPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
template = JINJA_ENVIRONMENT.get_template('index.html')
template_value = { 'value': 8}
self.response.write('dammit')
def post(self):
self.response.headers['Content-Type'] = 'text/html'
template = JINJA_ENVIRONMENT.get_template('index.html')
template_value = { 'value': 8}
json_data = json.loads(self.request.body)
json_data['stuff'] = "marcos"
self.response.write(json.dumps(json_data))
class APIPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
id = 6473924464345088
user = ndb.Key(UserRating, id).get()
name = user.username
self.response.write(json.dumps(name))
def post(self):
self.response.headers['Content-Type'] = 'application/json'
#TODO:// Make this more secure
json_data = json.loads(self.request.body)
user_rating = UserRating()
user_rating.username = json_data['username']
user_rating.ratings = json_data['ratings']
user_key = user_rating.put()
self.response.write('{"user_key":"' + str(user_key.id()) +'"}')
class RatingsPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
all_ratings = UserRating.query().fetch()
result = {}
result['stuff'] = [x.ratings for x in all_ratings]
self.response.write(json.dumps(result))
def delete(self):
ndb.delete_multi(UserRating.query().fetch(keys_only=True))
application = webapp2.WSGIApplication([
('/', MainPage),
('/api/rating', RatingPage),
('/api/test', APIPage),
('/api/ratings', RatingsPage),
], debug=True)
| msavoury/machine-learning | apps/rateawatch/rateawatch.py | Python | mit | 2,526 |
from pypers.core.step import Step
from pypers.steps.mothur import Mothur
import os
import json
import re
class MothurGetGroups(Mothur):
"""
The get.groups command selects sequences from a specific group or set of groups from the following file types: fasta, name, group, list, taxonomy.
"""
spec = {
'name' : 'MothurGetGroups',
'version' : '20151106',
'descr' : [
'Selects sequences for a specific group or set of groups, currently we only use this to create a counts file for error estimating'
],
'url' : 'www.mothur.org/wiki/Get.groups',
'args' : {
'inputs' : [
{
'name' : 'input_fasta',
'type' : 'file',
'iterable' : True,
'descr' : 'input fasta filename'
},
{
'name' : 'input_counts',
'type' : 'file',
'iterable' : True,
'descr' : 'input counts filename'
}
],
'outputs' : [
{
'name' : 'output_fasta',
'type' : 'file',
'descr': 'output fasta filename'
},
{
'name' : 'output_counts',
'type' : 'file',
'descr': 'output counts filename'
}
],
'params' : [
{
'name' : 'groups',
'type' : 'str',
'descr': 'Group on which to select',
'readonly': True
}
]
},
'requirements' : {
'cpus' : '8'
}
}
def process(self):
"""
Create the necessary input file links and run mothur command
"""
if type(self.input_fasta) != list:
self.input_fasta = [self.input_fasta]
if type(self.input_counts) != list:
self.input_counts = [self.input_counts]
for idx, input_fasta in enumerate(self.input_fasta):
self.mk_links([input_fasta],self.output_dir)
self.mk_links([self.input_counts[idx]],self.output_dir)
input_fasta = os.path.join(self.output_dir,os.path.basename(input_fasta))
input_counts = os.path.join(self.output_dir,os.path.basename(self.input_counts[idx]))
groups = self.groups.replace("-","\-") # need to escape hiphens in groups name param
extra_params={'fasta':input_fasta, 'groups':groups, 'count':input_counts}
self.run_cmd('get.groups',extra_params)
self.output_fasta = re.sub('.fasta$','.pick.fasta',input_fasta)
self.output_counts = re.sub('.count_table$','.pick.count_table',input_counts)
| frankosan/pypers | pypers/steps/mothur/MothurGetGroups.py | Python | gpl-3.0 | 2,992 |
import logging
import re
import operator
from google.appengine.ext import db
from django.shortcuts import redirect
from piston.handler import BaseHandler
from piston.utils import rc, throttle
from appengine_django.auth import models as authModels
from app import models, forms
class SearchHandler(BaseHandler):
allowed_methods = ('GET')#, 'PUT', 'DELETE')
viewname = 'search_results'
def read(self, request, query=None):
logging.info('passed: %s', query)
if not query or len(query) == 0:
query = request.GET.get('q')
logging.info('picked: %s', query)
barQry = models.Bar.get_by_name(query)
beerQry = models.Beer.get_by_name(query)
results = list(barQry) + list(beerQry)
results.sort(key=operator.attrgetter('name'))
return {'results': results}
class BarHandler(BaseHandler):
allowed_methods = ('GET')#, 'PUT', 'DELETE')
fields = ('name', 'remote_url', 'location')
model = models.Bar
viewname = 'bar_detail'
#@staticmethod
#def resource_uri(bar):
# return bar.get_absolute_uri()
def read(self, request, bar_id=None):
if bar_id:
bar = models.Bar.get_by_id(int(bar_id))
return {
'bar': bar,
}
class BarListHandler(BarHandler):
fields = ('resource_uri', 'name')
viewname = 'bar_list'
def read(self, request):
return models.Bar.all()
class BeerHandler(BaseHandler):
allowed_methods = ('GET')
model = models.Beer
viewname = 'beer_detail'
def read(self, request, beer_id):
if beer_id:
beer = models.Beer.get_by_id(int(beer_id))
return {'beer': beer}
#@staticmethod
#def resource_uri(beer):
# return beer.get_absolute_url()
class StockedBeerHandler(BaseHandler):
allowed_methods = ('GET', 'DELETE') #, 'POST', 'PUT')
fields = ('resource_uri', 'name', 'brewery')
viewname = 'stockedbeer_detail'
model = models.StockedBeer
def read(self, request, bar_id, beer_id):
if bar_id and beer_id:
bar = models.Bar.get_by_id(int(bar_id))
beer = bar.get_beer_in_stock(int(beer_id))
return {'beer': beer}
def delete(self, request, bar_id, beer_id):
if bar_id and beer_id:
bar = models.Bar.get_by_id(int(bar_id))
beer = bar.get_beer_in_stock(int(beer_id))
beer.delete()
#return {'beer': beer}
return rc.DELETED
else:
return rc.BAD_REQUEST
@staticmethod
def resource_uri(stockedbeer):
bar_id = stockedbeer.bar.key().id()
beer_id = stockedbeer.beer.key().id()
return ('stockedbeer', [bar_id, beer_id])
@staticmethod
def name(stockedbeer):
return stockedbeer.beer_name
@staticmethod
def brewery(stockedbeer):
return stockedbeer.beer.brewery
@staticmethod
def stocked(stockedbeer):
return stockedbeer.currently_stocked
class StockedBeerListHandler(StockedBeerHandler):
allowed_methods = ('GET', 'POST')
fields = ('resource_uri', 'stocked', 'brewery', 'name')
viewname = 'beer_list'
def read(self, request, bar_id):
if bar_id:
bar = models.Bar.get_by_id(int(bar_id))
beers = bar.get_beer_in_stock()
return {'stockedbeers': beers}
def create(self, request, bar_id):
logging.info('bar_id: %s', bar_id)
logging.info('content_type: %s', request.content_type)
data = request.POST
logging.info('data: %s', data)
if bar_id: #request.content_type:
bar = models.Bar.get_by_id(int(bar_id))
form = forms.BeerForm(data)
beer = form.save(commit=False)
beer.name_parts = re.findall('[a-z]+', beer.name.lower())
logging.info('Beer name parts: %s', beer.name_parts)
beer.put()
stockedbeer = models.StockedBeer(beer=beer, bar=bar)
stockedbeer.update_ref_values()
stockedbeer.put()
logging.info('Beer %s, Stocked at %s',stockedbeer.beer_name, stockedbeer.bar_name)
return {'stockedbeers': bar.get_beer_in_stock()}
else:
super(models.StockedBeer, self).create(request)
class UserHandler(BaseHandler):
allowed_methods = ('GET')
fields = ('resource_uri', 'username')
viewname = 'user_detail'
model = authModels.User
def read(self, request, user_id):
if user_id:
user = authModels.User.get_by_username(user_id)
return {'user': user}
@staticmethod
def resource_uri(user):
user_id = user.username
return ('user', [user_id])
class BeerSubscriberHandler(BaseHandler):
allowed_methods = ('GET', 'DELETE')
fields = ('resource_uri', 'beer_name')
viewname = 'beersubscriber_detail'
model = models.BeerSubscriber
def read(self, request, user_id, beer_id):
if user_id:
user = authModels.User.get_by_username(user_id)
subscriber = user.beersubscriber_set.filter('beer =', db.Key.from_path('Beer', int(beer_id))).get()
return {'subscriber': subscriber}
def delete(self, request, user_id, beer_id):
logging.info('BeerSubscriber.delete')
if user_id and beer_id:
user = authModels.User.get_by_username(user_id)
subscriber = user.beersubscriber_set.filter('beer =', db.Key.from_path('Beer', int(beer_id))).get()
subscriber.delete()
return rc.DELETED
else:
return rc.BAD_REQUEST
@staticmethod
def resource_uri(beersubscriber):
user_id = beersubscriber.user.username
beer_id = beersubscriber.beer.key().id()
return ('beersubscriber', [user_id, beer_id])
class BeerSubscriberListHandler(BaseHandler):
allowed_methods = ('GET', 'POST')
viewname = 'beersubscriber_list'
def read(self, request, user_id):
if user_id:
user = authModels.User.get_by_username(user_id)
subscribers = user.beersubscriber_set
return {'subscribers': subscribers}
def create(self, request, user_id):
#TODO: check for duplicates before adding
beer_id = request.POST['beer_id']
if user_id and beer_id:
logging.info('user_id: %s | beer_id: %s',user_id, beer_id)
user = authModels.User.get_by_username(user_id)
beer = models.Beer.get_by_id(int(beer_id))
beersubscriber = models.BeerSubscriber(beer=beer, user=user)
beersubscriber.update_ref_values()
beersubscriber.put()
return rc.CREATED
#subscribers = user.beersubscriber_set
#return {'subscribers': subscribers}
else:
logging.info('no user_id or beer_id')
return rc.BAD_REQUEST
class BarSubscriberHandler(BaseHandler):
allowed_methods = ('GET', 'DELETE')
fields = ('resource_uri', 'bar_name', 'user_name')
viewname = 'barsubscriber_detail'
model = models.BarSubscriber
def read(self, request, user_id, bar_id):
if user_id and bar_id:
user = authModels.User.get_by_username(user_id)
subscriber = user.barsubscriber_set.filter('bar =', db.Key.from_path('Bar', int(bar_id))).get()
return {'subscriber': subscriber}
def delete(self, request, user_id, bar_id):
logging.info('BarSubscriber.delete')
if user_id and bar_id:
user = authModels.User.get_by_username(user_id)
subscriber = user.barsubscriber_set.filter('bar =', db.Key.from_path('Bar', int(bar_id))).get()
subscriber.delete()
return rc.DELETED
else:
return rc.BAD_REQUEST
@staticmethod
def resource_uri(barsubscriber):
user_id = barsubscriber.user.username
bar_id = barsubscriber.bar.key().id()
return ('barsubscriber', [user_id, bar_id])
class BarSubscriberListHandler(BarSubscriberHandler):
allowed_methods = ('GET', 'POST')
viewname = 'barsubscriber_list'
def read(self, request, user_id):
if user_id:
user = authModels.User.get_by_username(user_id)
subscribers = user.barsubscriber_set
return {'subscribers': subscribers}
def create(self, request, user_id):
bar_id = request.POST['bar_id']
if user_id and bar_id:
logging.info('user_id: %s | bar_id: %s',user_id, bar_id)
user = authModels.User.get_by_username(user_id)
bar = models.Bar.get_by_id(int(bar_id))
barsubscriber = models.BarSubscriber(bar=bar, user=user)
barsubscriber.update_ref_values()
barsubscriber.put()
return rc.CREATED
#subscribers = user.barsubscriber_set
#return {'subscribers': subscribers}
else:
logging.info('no user_id or bar_id')
return rc.BAD_REQUEST
| greggian/TapdIn | app/handlers.py | Python | apache-2.0 | 8,408 |
from nectr.chat import consumers
channel_routing = [
consumers.ChatServer.as_route(path=r"^/ws-chat/")
]
| nectR-Tutoring/nectr | config/routing.py | Python | mit | 111 |
#!/usr/bin/env python3
# TODO, anyway to better protect F?
import ctypes as ct
class _FlagBits(ct.Structure):
_fields_ = [
('_zero', ct.c_ubyte, 4),
('c_flag', ct.c_ubyte, 1),
('h_flag', ct.c_ubyte, 1),
('n_flag', ct.c_ubyte, 1),
('z_flag', ct.c_ubyte, 1),
]
class _FlagByte(ct.Union):
_anonymous_ = ('_part',)
_fields_ = [("_direct_f",ct.c_ubyte),("_part",_FlagBits)]
class _af(ct.Structure):
_anonymous_ = ('_f',)
_fields_ = [('_f',_FlagByte),('a',ct.c_ubyte)]
def _make_union(name, struct):
props = {'_anonymous_':('_part',),
'_fields_':[(name,ct.c_ushort),('_part',struct)]}
return type(name, (ct.Union,), props)
def _make_struct(name):
return type(name, (ct.Structure,), {
'_fields_':[(name[1],ct.c_ubyte),(name[0],ct.c_ubyte)]})
_afUnion = _make_union( '_direct_af', _af)
_bcUnion = _make_union( 'bc', _make_struct('bc') )
_deUnion = _make_union( 'de', _make_struct('de') )
_hlUnion = _make_union( 'hl', _make_struct('hl') )
class Registers(ct.Structure):
_anonymous_ = ('_af','_bc','_de','_hl')
_fields_ = [('_af',_afUnion),('_bc',_bcUnion),('_de',_deUnion),
('_hl',_hlUnion),('sp',ct.c_ushort),('pc',ct.c_ushort)]
_map_destination = ('b','c','d','e','h','l',None,'a') # 6th is (HL), ram address
_map_refrence = ('bc','de','hl','sp') # AF exception for push/pop (doesn't use SP)
def __init__(self):
super().__init__()
self.af = 0x01B0 # TODO these can probably just be changed to 0
self.bc = 0x0013
self.de = 0x00D8
self.hl = 0x014D
self.sp = 0x0000
self.pc = 0x0000
self.flag_code = ( lambda : not self.z_flag, lambda : self.z_flag,
lambda : not self.c_flag, lambda : self.c_flag )
@property
def f(self):
return self._direct_f
@f.setter
def f(self, val):
self._direct_f = val & 0xF0
@property
def af(self):
return self._direct_af
@af.setter
def af(self, val):
self._direct_af = val & 0xFFF0
def set_z_iff(self, value):
self.z_flag = 1 if value == 0 else 0
def set_h_iff(self, new_value, old_value):
self.h_flag = 1 if (new_value & 0x0F) < (old_value & 0x0F) else 0
def set_c_iff(self, new_value, old_value):
self.c_flag = 1 if new_value < old_value else 0
def map_destination(self, index):
def accessor(value=None):
if value is None:
return getattr(self, Registers._map_destination[index])
setattr(self, Registers._map_destination[index], value)
if index != 6:
return accessor
def map_refrence(self, index):
def accessor(value=None):
if value is None:
return getattr(self, Registers._map_refrence[index])
setattr(self, Registers._map_refrence[index], value)
return accessor
| aanunez/bbboy | bbboy/registers.py | Python | gpl-3.0 | 2,960 |
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.4.1"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
class Module_six_moves_urllib_parse(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse")
class Module_six_moves_urllib_error(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error")
sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
class Module_six_moves_urllib_request(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request")
sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
class Module_six_moves_urllib_response(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response")
sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
class Module_six_moves_urllib_robotparser(types.ModuleType):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ +
".moves.urllib_robotparser")
sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ +
".moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
parse = sys.modules[__name__ + ".moves.urllib_parse"]
error = sys.modules[__name__ + ".moves.urllib_error"]
request = sys.modules[__name__ + ".moves.urllib_request"]
response = sys.modules[__name__ + ".moves.urllib_response"]
robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | arokem/PyEMMA | pyemma/_ext/six.py | Python | bsd-2-clause | 20,765 |
# i need to put the scores in order from highest to least in a list, heres how:
# 1. open the file
# 2. split the names and scores
# 3.create an array and put the scores into the array
# 4. sort the scores
# 5. print the scores
scores={}
results=open("report_card")
for line in results:
(name, score)=line.split()
scores[score]=name
results.close()
# for each_score in scores.keys():
# print(scores[each_score]+each_score)
for each_score in sorted(scores.keys(), reverse=True):
print(scores[each_score]+" "+each_score) | vollov/python-test | dudu/2021-01-07.py | Python | mit | 537 |
#!/usr/bin/python
import StringIO
import urllib
import urllib2
from lxml import etree
class NcaaGrabber:
def __init__(self):
self.ncaaUrl = 'http://web1.ncaa.org'
self.ncaaStatsSite = self.ncaaUrl+'/football/exec/rankingSummary'
# self.ncaaTeamList2008 = self.ncaaUrl+'/mfb/%d/Internet/ranking_summary/DIVISIONB.HTML'
# self.ncaaWeeklyBase2008 = self.ncaaUrl+'/mfb/%d/Internet/worksheets'
# self.ncaaWeekly2008 = self.ncaaWeeklyBase2008+'/DIVISIONB.HTML'
self.ncaaTeamListBase = self.ncaaUrl+'/mfb/%d/Internet/ranking_summary'
self.ncaaWeeklyBase = self.ncaaUrl+'/mfb/%d/Internet/worksheets'
self.fbsDiv = '/DIVISIONB.HTML'
self.fcsDiv = '/DIVISIONC.HTML'
def getTeams(self, division, year):
fullUrl = self.ncaaTeamListBase % year
if ( division == 'fbs' ):
fullUrl = fullUrl + self.fbsDiv
else:
fullUrl = fullUrl + self.fcsDiv
response = urllib2.urlopen(fullUrl)
responseHtml = response.read()
htmlParser = etree.HTMLParser()
htmlTree = etree.parse(StringIO.StringIO(responseHtml), htmlParser)
mainTablePaths = htmlTree.xpath('//body/table')
linkPaths = mainTablePaths[0].xpath('.//td/a')
data = {}
for link in linkPaths:
team = link.text
org = -1
linkStr = link.get('href')
linkStrArr = linkStr.split('&')
for linkStrPart in linkStrArr:
if ( linkStrPart.startswith('org=') ):
linkStrPart = linkStrPart.replace('org=', '')
if ( linkStrPart.isdigit() ):
org = linkStrPart
data[team] = org
return data
# def getTeams(self, year):
# data = {}
# data['year'] = year
# data['org'] = 8
# data['week'] = 1
# getData = urllib.urlencode(data)
# fullUrl = self.ncaaStatsSite + '?' + getData
# response = urllib2.urlopen(fullUrl)
# responseHtml = response.read()
# htmlParser = etree.HTMLParser()
# htmlTree = etree.parse(StringIO.StringIO(responseHtml), htmlParser)
# optionRows = htmlTree.xpath('/html/body/span[@class="noprint"]/select[@name="teamSelection"]/option')
# teams = {}
# for teamOption in optionRows:
# teamName = teamOption.text
# teamValue = int(teamOption.get("value"))
# if ( teamValue > -1 ):
# teams[teamName] = teamValue
# return teams
def getStats(self, team, year, week):
data = {}
data['org'] = team
data['week'] = week
data['year'] = year
getData = urllib.urlencode(data)
fullUrl = self.ncaaStatsSite + '?' + getData
response = urllib2.urlopen(fullUrl)
responseHtml = response.read()
htmlParser = etree.HTMLParser()
htmlTree = etree.parse(StringIO.StringIO(responseHtml), htmlParser)
teamTableRows = htmlTree.xpath('//table[@id="teamRankings"]/tr[position()>4]')
stats = {}
for statRow in teamTableRows:
dataCells = statRow.xpath('./td')
if ( len(dataCells) < 1 ):
continue
category = dataCells[0].xpath('./a')[0].text
value = dataCells[2].text
rank = dataCells[1].text.lstrip('T-')
stats[category] = (value, rank)
return stats
def isHomeGame(self, team, year, week):
data = {}
data['org'] = team
data['week'] = week
data['year'] = year
getData = urllib.urlencode(data)
fullUrl = self.ncaaStatsSite + '?' + getData
print(fullUrl)
response = urllib2.urlopen(fullUrl)
responseHtml = response.read()
htmlParser = etree.HTMLParser()
htmlTree = etree.parse(StringIO.StringIO(responseHtml), htmlParser)
scheduleTableRows = htmlTree.xpath('//table[@id="schedule"]/tr/td[position()=1]/a/../../td[position()=2]')
lastScheduleRow = scheduleTableRows[-1]
isHome = False
if ( lastScheduleRow is not None ):
if ( lastScheduleRow.text is None ):
linkElement = lastScheduleRow.xpath('./a')[0]
gameLocation = linkElement.text
if ( gameLocation.isupper() and gameLocation.find("@") < 0 ):
if ( gameLocation.find("^") < 0 ):
isHome = 1
else:
isHome = 2
else:
isHome = 0
else:
gameLocation = lastScheduleRow.text
if ( gameLocation.isupper() and gameLocation.find("@") < 0 ):
if ( gameLocation.find("^") < 0 ):
isHome = 1
else:
isHome = 2
else:
isHome = 0
return isHome
def getNumWeeks(self, division, year):
fullUrl = self.ncaaWeeklyBase % year
if ( division == 'fbs' ):
fullUrl = fullUrl + self.fbsDiv
else:
fullUrl = fullUrl + self.fcsDiv
response = urllib2.urlopen(fullUrl)
responseHtml = response.read()
htmlParser = etree.HTMLParser()
htmlTree = etree.parse(StringIO.StringIO(responseHtml), htmlParser)
tableRowArr = htmlTree.xpath('//body/table/tr')
count = len(tableRowArr) - 1
return count
def processWeekly(self, year, week, teams):
return self.processWeekly("fbs", year, week, team)
def processWeekly(self, division, year, week, teams):
schedule = []
week = week - 1
fullUrl = self.ncaaWeeklyBase % year
if ( division == 'fbs' ):
fullUrl = fullUrl + self.fbsDiv
else:
fullUrl = fullUrl + self.fcsDiv
response = urllib2.urlopen(fullUrl)
responseHtml = response.read()
htmlParser = etree.HTMLParser()
htmlTree = etree.parse(StringIO.StringIO(responseHtml), htmlParser)
tableRowArr = htmlTree.xpath('//body/table/tr')
weekRow = tableRowArr[week+1]
weekLinkCol = weekRow.find('td')
weekLink = weekLinkCol.find('a')
weekUrl = (self.ncaaWeeklyBase + '/' + weekLink.values()[0]) % year
response = urllib2.urlopen(weekUrl)
responseHtml = response.read()
htmlTree = etree.parse(StringIO.StringIO(responseHtml), htmlParser)
trList = htmlTree.xpath('//body/table[@width="80%"]/tr')
for tr in trList[1:]:
tds = tr.findall('td')
if(len(tds) > 2):
team1 = tds[0].find('a').text
team2 = tds[1].text
result = ""
if ( len(tds) > 3 ):
result = tds[3].text
if ( team1 not in teams ):
continue
org1 = teams[team1]
org2 = None
if ( team2 in teams ):
org2 = teams[team2]
schedule.append((org1, org2, result))
return schedule
| cbuntain/ncaa_football_predictor | Grabber/StatsClass.py | Python | bsd-2-clause | 7,312 |
import pyglet
from random import random as r
from board import Board
from math import floor
cursor = None
xi, yi = 30, 30
x, y = xi, yi
lvl = 5
starts = True
mines = Board(xi, yi, lvl)
flag_mode = False
brd = {(_x,_y) for _x in range(x) for _y in range(y)}
scale = 20
black = 0, 0, 0, 255
white = 255, 255, 255
purple = 0.5, 0, 0.5
orange = 1, 0.5, 0
red = 255, 0, 0
default = white
number_sq = 0.5, 0.5, 0.5
zero_sq = 0.25, 0.25, 0.25
bomb_sq = red
unclicked_sq = 0.75, 0.75, 0.75
flag_sq = orange
set_live_color = pyglet.gl.glColor3f
gl_draw_sq = pyglet.graphics.draw_indexed
window = pyglet.window.Window(width=(x+1)*scale, height=(y+1)*scale)
four, gl_flag, indices = 4, 'v2i2', (0, 1, 2, 0, 2, 3)
vertices = 0,0, x*scale+scale,0, x*scale+scale,y*scale+scale, 0,y*scale+scale
set_live_color(*unclicked_sq)
gl_draw_sq(four, pyglet.gl.GL_TRIANGLES, indices, (gl_flag, vertices))
def draw_square(point, size=1, color=white):
set_live_color(*color)
x, y = point[0] * size, point[1] * size
x_size, y_size = x + size, y + size
vertices = x,y, x_size,y, x_size,y_size, x,y_size
gl_draw_sq(four, pyglet.gl.GL_TRIANGLES, indices, (gl_flag, vertices))
def draw_text(label="Txt", point=(10,10), size=1, color=black):
x, y = point[0] * scale + scale/4, point[1] * scale + scale/4
font = 'Sans'
label = pyglet.text.Label(label, font_name=font, font_size=size, x=x, y=y, color=color)
label.draw()
def process_board(board, start):
#set_live_color(*zero_sq)
#gl_draw_sq(four, pyglet.gl.GL_TRIANGLES, indices, (gl_flag, vertices))
these_nums, these_zeros = get_visible_sqs(board)
_draw_zeros(board, these_zeros)
_draw_nums(board, these_nums)
_draw_flags(board)
if not board.unclicked_squares:
_draw_bombs(board)
def _draw_nums(board, pts):
for pt in pts:
number = board.numbers[pt]
draw_square(pt, scale, number_sq)
draw_text(str(number), pt, size=8, color=black)
board.numbers.pop(pt)
def _draw_zeros(board, pts):
for pt in pts:
draw_square(pt, scale, zero_sq)
board.blank_board.remove(pt)
def _draw_bombs(board):
for bomb in board.bombs:
draw_square(bomb, scale, bomb_sq)
def _draw_flags(board):
for flag in board.flags:
draw_square(flag, scale, flag_sq)
def _draw_unclicked(board):
for sq in board.unclicked_squares:
draw_square(sq, scale, unclicked_sq)
def get_visible_sqs(board):
draw_these_nums = board.numbers.keys() - board.unclicked_squares
draw_these_zeros = board.blank_board - board.unclicked_squares
return draw_these_nums, draw_these_zeros
def update_title(dt, board):
if not board.start:
return
if board.round:
board.time += 1
str_mines = "Mines: " + str(len(board.bombs))
str_flags = "Flags: " + str(len(board.flags))
str_time = "Time: " + str(board.time)
window.set_caption(' '.join((str_mines, str_flags, str_time)))
def draw_clicks(board):
if not board.round:
board.start_game()
_draw_unclicked(board)
process_board(mines, starts)
if not board.start:
if board.win:
window.set_caption(":) You win. Time: " + str(board.time))
else:
window.set_caption(":( You lose.")
@window.event
def on_mouse_motion(x, y, dx, dy):
pass
def on_draw(*args): pass
@window.event
def on_key_press(symbol, modifiers, flag_mode=flag_mode):
if symbol == pyglet.window.key.F:
flag_mode = False if flag_mode else True
@window.event
def on_mouse_press(x, y, button, modifiers, board=mines):
point = floor(x / scale), floor(y / scale)
if flag_mode and button == 1:
start_button = 4
if button == 4:
if point not in mines.flags:
mines.select_sq(point)
elif button == 1:
truth = mines.set_flag(point)
if truth:
draw_square(point, scale, unclicked_sq)
elif button == 2:
mines.__init__(xi, xi, lvl)
draw_clicks(mines)
def main():
set_live_color(*unclicked_sq)
pyglet.clock.schedule_once(lambda args: gl_draw_sq(four, pyglet.gl.GL_TRIANGLES, indices, (gl_flag, vertices)), 0.1)
pyglet.clock.schedule_interval(update_title, 1, mines)
pyglet.app.run()
if __name__ == "__main__":
#pyglet.clock.schedule(on_draw)
main()
| thismachinechills/minesqeeper | mines_pyglet.py | Python | gpl-3.0 | 4,042 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
from oslo_config import cfg
from testtools import matchers
from keystone import exception
from keystone.tests.unit import test_v3
CONF = cfg.CONF
class CredentialBaseTestCase(test_v3.RestfulTestCase):
def _create_dict_blob_credential(self):
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
credential_id = hashlib.sha256(blob['access']).hexdigest()
credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
credential['id'] = credential_id
# Store the blob as a dict *not* JSON ref bug #1259584
# This means we can test the dict->json workaround, added
# as part of the bugfix for backwards compatibility works.
credential['blob'] = blob
credential['type'] = 'ec2'
# Create direct via the DB API to avoid validation failure
self.credential_api.create_credential(
credential_id,
credential)
expected_blob = json.dumps(blob)
return expected_blob, credential_id
class CredentialTestCase(CredentialBaseTestCase):
"""Test credential CRUD."""
def setUp(self):
super(CredentialTestCase, self).setUp()
self.credential_id = uuid.uuid4().hex
self.credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
self.credential['id'] = self.credential_id
self.credential_api.create_credential(
self.credential_id,
self.credential)
def test_credential_api_delete_credentials_for_project(self):
self.credential_api.delete_credentials_for_project(self.project_id)
# Test that the credential that we created in .setUp no longer exists
# once we delete all credentials for self.project_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=self.credential_id)
def test_credential_api_delete_credentials_for_user(self):
self.credential_api.delete_credentials_for_user(self.user_id)
# Test that the credential that we created in .setUp no longer exists
# once we delete all credentials for self.user_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=self.credential_id)
def test_list_credentials(self):
"""Call ``GET /credentials``."""
r = self.get('/credentials')
self.assertValidCredentialListResponse(r, ref=self.credential)
def test_list_credentials_filtered_by_user_id(self):
"""Call ``GET /credentials?user_id={user_id}``."""
credential = self.new_credential_ref(
user_id=uuid.uuid4().hex)
self.credential_api.create_credential(
credential['id'], credential)
r = self.get('/credentials?user_id=%s' % self.user['id'])
self.assertValidCredentialListResponse(r, ref=self.credential)
for cred in r.result['credentials']:
self.assertEqual(self.user['id'], cred['user_id'])
def test_create_credential(self):
"""Call ``POST /credentials``."""
ref = self.new_credential_ref(user_id=self.user['id'])
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
def test_get_credential(self):
"""Call ``GET /credentials/{credential_id}``."""
r = self.get(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id})
self.assertValidCredentialResponse(r, self.credential)
def test_update_credential(self):
"""Call ``PATCH /credentials/{credential_id}``."""
ref = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
del ref['id']
r = self.patch(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id},
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
def test_delete_credential(self):
"""Call ``DELETE /credentials/{credential_id}``."""
self.delete(
'/credentials/%(credential_id)s' % {
'credential_id': self.credential_id})
def test_create_ec2_credential(self):
"""Call ``POST /credentials`` for creating ec2 credential."""
ref = self.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is same as hash of access key id for
# ec2 credentials
self.assertEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
# Create second ec2 credential with the same access key id and check
# for conflict.
self.post(
'/credentials',
body={'credential': ref}, expected_status=409)
def test_get_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
expected_blob, credential_id = self._create_dict_blob_credential()
r = self.get(
'/credentials/%(credential_id)s' % {
'credential_id': credential_id})
self.assertEqual(expected_blob, r.result['credential']['blob'])
def test_list_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
expected_blob, credential_id = self._create_dict_blob_credential()
list_r = self.get('/credentials')
list_creds = list_r.result['credentials']
list_ids = [r['id'] for r in list_creds]
self.assertIn(credential_id, list_ids)
for r in list_creds:
if r['id'] == credential_id:
self.assertEqual(expected_blob, r['blob'])
def test_create_non_ec2_credential(self):
"""Call ``POST /credentials`` for creating non-ec2 credential."""
ref = self.new_credential_ref(user_id=self.user['id'])
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is not same as hash of access key id for
# non-ec2 credentials
self.assertNotEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
def test_create_ec2_credential_with_missing_project_id(self):
"""Call ``POST /credentials`` for creating ec2
credential with missing project_id.
"""
ref = self.new_credential_ref(user_id=self.user['id'])
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
# Assert 400 status for bad request with missing project_id
self.post(
'/credentials',
body={'credential': ref}, expected_status=400)
def test_create_ec2_credential_with_invalid_blob(self):
"""Call ``POST /credentials`` for creating ec2
credential with invalid blob.
"""
ref = self.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
ref['blob'] = '{"abc":"def"d}'
ref['type'] = 'ec2'
# Assert 400 status for bad request containing invalid
# blob
response = self.post(
'/credentials',
body={'credential': ref}, expected_status=400)
self.assertValidErrorResponse(response)
def test_create_credential_with_admin_token(self):
# Make sure we can create credential with the static admin token
ref = self.new_credential_ref(user_id=self.user['id'])
r = self.post(
'/credentials',
body={'credential': ref},
token=CONF.admin_token)
self.assertValidCredentialResponse(r, ref)
class TestCredentialTrustScoped(test_v3.RestfulTestCase):
"""Test credential with trust scoped token."""
def setUp(self):
super(TestCredentialTrustScoped, self).setUp()
self.trustee_user = self.new_user_ref(domain_id=self.domain_id)
password = self.trustee_user['password']
self.trustee_user = self.identity_api.create_user(self.trustee_user)
self.trustee_user['password'] = password
self.trustee_user_id = self.trustee_user['id']
def config_overrides(self):
super(TestCredentialTrustScoped, self).config_overrides()
self.config_fixture.config(group='trust', enabled=True)
def test_trust_scoped_ec2_credential(self):
"""Call ``POST /credentials`` for creating ec2 credential."""
# Create the trust
ref = self.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
impersonation=True,
expires=dict(minutes=1),
role_ids=[self.role_id])
del ref['id']
r = self.post('/OS-TRUST/trusts', body={'trust': ref})
trust = self.assertValidTrustResponse(r)
# Get a trust scoped token
auth_data = self.build_authentication_request(
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
r = self.v3_authenticate_token(auth_data)
self.assertValidProjectTrustScopedTokenResponse(r, self.user)
trust_id = r.result['token']['OS-TRUST:trust']['id']
token_id = r.headers.get('X-Subject-Token')
# Create the credential with the trust scoped token
ref = self.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref},
token=token_id)
# We expect the response blob to contain the trust_id
ret_ref = ref.copy()
ret_blob = blob.copy()
ret_blob['trust_id'] = trust_id
ret_ref['blob'] = json.dumps(ret_blob)
self.assertValidCredentialResponse(r, ref=ret_ref)
# Assert credential id is same as hash of access key id for
# ec2 credentials
self.assertEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
# Create second ec2 credential with the same access key id and check
# for conflict.
self.post(
'/credentials',
body={'credential': ref},
token=token_id,
expected_status=409)
class TestCredentialEc2(CredentialBaseTestCase):
"""Test v3 credential compatibility with ec2tokens."""
def setUp(self):
super(TestCredentialEc2, self).setUp()
def _validate_signature(self, access, secret):
"""Test signature validation with the access/secret provided."""
signer = ec2_utils.Ec2Signer(secret)
params = {'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AWSAccessKeyId': access}
request = {'host': 'foo',
'verb': 'GET',
'path': '/bar',
'params': params}
signature = signer.generate(request)
# Now make a request to validate the signed dummy request via the
# ec2tokens API. This proves the v3 ec2 credentials actually work.
sig_ref = {'access': access,
'signature': signature,
'host': 'foo',
'verb': 'GET',
'path': '/bar',
'params': params}
r = self.post(
'/ec2tokens',
body={'ec2Credentials': sig_ref},
expected_status=200)
self.assertValidTokenResponse(r)
def test_ec2_credential_signature_validate(self):
"""Test signature validation with a v3 ec2 credential."""
ref = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
blob = {"access": uuid.uuid4().hex,
"secret": uuid.uuid4().hex}
ref['blob'] = json.dumps(blob)
ref['type'] = 'ec2'
r = self.post(
'/credentials',
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is same as hash of access key id
self.assertEqual(r.result['credential']['id'],
hashlib.sha256(blob['access']).hexdigest())
cred_blob = json.loads(r.result['credential']['blob'])
self.assertEqual(blob, cred_blob)
self._validate_signature(access=cred_blob['access'],
secret=cred_blob['secret'])
def test_ec2_credential_signature_validate_legacy(self):
"""Test signature validation with a legacy v3 ec2 credential."""
cred_json, credential_id = self._create_dict_blob_credential()
cred_blob = json.loads(cred_json)
self._validate_signature(access=cred_blob['access'],
secret=cred_blob['secret'])
def _get_ec2_cred_uri(self):
return '/users/%s/credentials/OS-EC2' % self.user_id
def _get_ec2_cred(self):
uri = self._get_ec2_cred_uri()
r = self.post(uri, body={'tenant_id': self.project_id})
return r.result['credential']
def test_ec2_create_credential(self):
"""Test ec2 credential creation."""
ec2_cred = self._get_ec2_cred()
self.assertEqual(self.user_id, ec2_cred['user_id'])
self.assertEqual(self.project_id, ec2_cred['tenant_id'])
self.assertIsNone(ec2_cred['trust_id'])
self._validate_signature(access=ec2_cred['access'],
secret=ec2_cred['secret'])
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
self.assertThat(ec2_cred['links']['self'],
matchers.EndsWith(uri))
def test_ec2_get_credential(self):
ec2_cred = self._get_ec2_cred()
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
r = self.get(uri)
self.assertDictEqual(ec2_cred, r.result['credential'])
self.assertThat(ec2_cred['links']['self'],
matchers.EndsWith(uri))
def test_ec2_list_credentials(self):
"""Test ec2 credential listing."""
self._get_ec2_cred()
uri = self._get_ec2_cred_uri()
r = self.get(uri)
cred_list = r.result['credentials']
self.assertEqual(1, len(cred_list))
self.assertThat(r.result['links']['self'],
matchers.EndsWith(uri))
def test_ec2_delete_credential(self):
"""Test ec2 credential deletion."""
ec2_cred = self._get_ec2_cred()
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
cred_from_credential_api = (
self.credential_api
.list_credentials_for_user(self.user_id))
self.assertEqual(1, len(cred_from_credential_api))
self.delete(uri)
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
cred_from_credential_api[0]['id'])
| jumpstarter-io/keystone | keystone/tests/unit/test_v3_credential.py | Python | apache-2.0 | 16,723 |
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/CutLines/E5 case
# Create Cut Lines for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("CutLines/E5")
file = datadir + "hydro_sea_alv.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.CUTLINES], picturedir, pictureext)
| FedoraScientific/salome-paravis | test/VisuPrs/CutLines/E5.py | Python | lgpl-2.1 | 1,506 |
import json
import logging
from couchdbkit import ResourceNotFound
from django.utils.encoding import force_unicode
from django_countries.countries import COUNTRIES
from phonenumbers import COUNTRY_CODE_TO_REGION_CODE
from django.utils.translation import ugettext_lazy as _
from corehq.apps.accounting.utils import fmt_dollar_amount
from corehq.apps.hqwebapp.async_handler import BaseAsyncHandler
from corehq.apps.hqwebapp.encoders import LazyEncoder
from corehq.apps.sms.mixin import SMSBackend
from corehq.apps.sms.util import get_backend_by_class_name
from corehq.apps.smsbillables.exceptions import SMSRateCalculatorError
from corehq.apps.smsbillables.models import SmsGatewayFeeCriteria, SmsGatewayFee, SmsUsageFee
NONMATCHING_COUNTRY = 'nonmatching'
logger = logging.getLogger('accounting')
class SMSRatesAsyncHandler(BaseAsyncHandler):
slug = 'sms_get_rate'
allowed_actions = [
'get_rate'
]
@property
def get_rate_response(self):
gateway = self.data.get('gateway')
try:
backend = SMSBackend.get(gateway)
backend_api_id = get_backend_by_class_name(backend.doc_type).get_api_id()
except Exception as e:
logger.error("Failed to get backend for calculating an sms rate "
"due to: %s" % e)
raise SMSRateCalculatorError("Could not obtain connection information.")
country_code = self.data.get('country_code')
if country_code == NONMATCHING_COUNTRY:
country_code = None
direction = self.data.get('direction')
gateway_fee = SmsGatewayFee.get_by_criteria(
backend_api_id, direction, backend_instance=gateway,
country_code=country_code,
)
usage_fee = SmsUsageFee.get_by_criteria(direction, self.request.domain)
usd_gateway_fee = gateway_fee.amount / gateway_fee.currency.rate_to_default
usd_total = usage_fee.amount + usd_gateway_fee
return {
'rate': _("%s per 160 character SMS") % fmt_dollar_amount(usd_total),
}
class SMSRatesSelect2AsyncHandler(BaseAsyncHandler):
slug = 'sms_rate_calc'
allowed_actions = [
'country_code',
]
@property
def country_code_response(self):
gateway = self.data.get('gateway')
try:
backend = SMSBackend.get(gateway)
backend_api_id = get_backend_by_class_name(backend.doc_type).get_api_id()
except Exception:
return []
direction = self.data.get('direction')
criteria_query = SmsGatewayFeeCriteria.objects.filter(
direction=direction, backend_api_id=backend_api_id
)
country_codes = criteria_query.exclude(
country_code__exact=None
).values_list('country_code', flat=True).distinct()
final_codes = []
countries = dict(COUNTRIES)
for code in country_codes:
cc = COUNTRY_CODE_TO_REGION_CODE.get(code)
country_name = force_unicode(countries.get(cc[0])) if cc else ''
final_codes.append((code, country_name))
search_term = self.data.get('searchString')
if search_term:
search_term = search_term.lower().replace('+', '')
final_codes = filter(
lambda x: (str(x[0]).startswith(search_term)
or x[1].lower().startswith(search_term)),
final_codes
)
final_codes = [(c[0], "+%s%s" % (c[0], " (%s)" % c[1] if c[1] else '')) for c in final_codes]
if criteria_query.filter(country_code__exact=None).exists():
final_codes.append((
NONMATCHING_COUNTRY,
_('Any Country (Delivery not guaranteed via connection)')
))
return final_codes
def _fmt_success(self, response):
success = json.dumps({
'results': [{
'id': r[0],
'text': r[1],
} for r in response]
}, cls=LazyEncoder)
return success
| SEL-Columbia/commcare-hq | corehq/apps/smsbillables/async_handlers.py | Python | bsd-3-clause | 4,044 |
# Copyright 2017 Telstra Open Source
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class AbstractError(Exception):
pass
class InputDataError(AbstractError):
def __init__(self, message, *extra):
super().__init__(message, *extra)
def __str__(self):
return self.args[0]
class InsufficientPrivilegesError(InputDataError):
def __init__(self, *extra):
super().__init__(
'Not enough privileges. Need root right to be able to manage '
'network interface.', *extra)
class InvalidBindAddressError(InputDataError):
def __init__(self, value, details, *extra):
super().__init__(value, details, *extra)
def __str__(self):
return 'Invalid BIND address value {0!r}: {1}.'.format(*self.args)
class InvalidTargetIfaceError(InputDataError):
def __init__(self, iface, details, *extra):
super().__init__(iface, details, *extra)
def __str__(self):
return 'Invalid target network interface {0!r}: {1}.'.format(*self.args)
class InvalidLoggingConfigError(InputDataError):
def __init__(self, config, details, *extra):
super().__init__(config, details, *extra)
def __str__(self):
return 'Invalid logging config {0!r}: {1}'.format(*self.args)
class PidFileError(AbstractError):
def __init__(self, path, *extra):
super().__init__(path)
def __str__(self):
return 'Can\'t occupy pid file {0!r}: {}'.format(
self.args[0], self.__cause__)
class PidFileLockError(PidFileError):
def __str__(self):
return 'Can\'t acquire exclusive lock on pid file {0!r}'.format(
*self.args)
class PidFileBusyError(PidFileError):
def __init__(self, path, pid, *extra):
super().__init__(path, pid, *extra)
def __str__(self):
return 'Pid file {0!r} exists and point to alive process {1}'.format(
*self.args)
class PidFileStolenError(PidFileBusyError):
def __str__(self):
return 'Pid file {0!r} have been stolen by process {1}'.format(
*self.args)
class ServiceError(AbstractError):
pass
class ServiceLookupError(ServiceError):
def __init__(self, pool, key, *extra):
super().__init__(pool, key, *extra)
def __str__(self):
return 'Pool {0!r} have no record with key {1!r}'.format(*self.args)
class ServiceCreateError(ServiceError):
def __init__(self, pool, subject, *extra):
super().__init__(pool, subject, *extra)
def __str__(self):
return (
'Can\'t add item {args[1]!r} into {args[0]!r} due to error - '
'{cause}').format(args=self.args, cause=self.__cause__)
class ServiceDeleteError(ServiceError):
def __init__(self, pool, key, item, *extra):
super().__init__(pool, key, item, *extra)
def __str__(self):
return 'Error during delete operation in {0!r} for key {1!r}'.format(
*self.args)
class ServiceCreateCollisionError(ServiceCreateError):
def __init__(self, pool, subject, collision, *extra):
super().__init__(pool, subject, collision, *extra)
def __str__(self):
return (
'Can\'t add item {args[1]} into {args[0]!r} due to collision '
'with existing object {args[2]}').format(args=self.args)
class RegistryLookupError(AbstractError):
def __init__(self, context, klass, *extra):
super().__init__(context, klass, *extra)
def __str__(self):
return '{0!r} don\'t have resource belong to {1}'.format(*self.args)
class SystemResourceError(AbstractError):
def __init__(self, kind, name, *extra):
super().__init__(kind, name, *extra)
def __str__(self):
return ('System resource manipulation error kind={0} name={1!r}. '
'{cause}').format(*self.args, cuase=self.__cause__)
class SystemResourceExistsError(SystemResourceError):
def __str__(self):
return 'System resource already exists kind={0} name={1!r}'.format(
*self.args)
class SystemResourceNotFoundError(SystemResourceError):
def __str__(self):
return 'System resource not found kind={0} name={1!r}'.format(
*self.args)
class SystemResourceDamagedError(SystemResourceError):
def __init__(self, kind, name, details, *extra):
super().__init__(kind, name, details, *extra)
def __str__(self):
return 'Can\'t operate with resource kind={0} name={1!r}: {2}'.format(
*self.args)
class SystemCompatibilityError(SystemResourceError):
def __init__(self, kind, name, details):
super().__init__(kind, name, details)
def __str__(self):
return ('Unable to configure system resource kind={0} name={1!r}'
' - {2}').format(*self.args)
class SubprocessError(AbstractError):
def __init__(self, cmd, exit_code, output):
super().__init__(cmd, exit_code, output)
def __str__(self):
return ('Child process have failed - cmd={0!r} exit_code={1} '
'output={2!r}').format(*self.args)
| jonvestal/open-kilda | src-python/lab-service/traffexam/kilda/traffexam/exc.py | Python | apache-2.0 | 5,615 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# MonsterTV Regex de streamingfreetv
# Version 0.1 (17.10.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import json
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
def streamingfreetv0(params):
plugintools.log("[MonsterTV-0.3.0].streamingfreetv "+repr(params))
url_user = {}
# Construimos diccionario...
url = params.get("url")
url_extracted = url.split(" ")
for entry in url_extracted:
if entry.startswith("rtmp"):
entry = entry.replace("rtmp=", "")
url_user["rtmp"]=entry
elif entry.startswith("playpath"):
entry = entry.replace("playpath=", "")
url_user["playpath"]=entry
elif entry.startswith("swfUrl"):
entry = entry.replace("swfUrl=", "")
url_user["swfurl"]=entry
elif entry.startswith("pageUrl"):
entry = entry.replace("pageUrl=", "")
url_user["pageurl"]=entry
elif entry.startswith("token"):
entry = entry.replace("token=", "")
url_user["token"]=entry
elif entry.startswith("referer"):
entry = entry.replace("referer=", "")
url_user["referer"]=entry
plugintools.log("URL_user dict= "+repr(url_user))
pageurl = url_user.get("pageurl")
body = gethttp_headers(pageurl)
plugintools.log("body= "+body)
#<script type='text/javascript'> width=650, height=400, channel='sysf', e='1';</script><script type='text/javascript' src='http://privado.streamingfreetv.net/embedPlayer.js'></script>
pattern = plugintools.find_single_match(body, 'width=(.*?)</script>')
width= plugintools.find_single_match(pattern, '\'(.*?)\' ')
print width
height= plugintools.find_single_match(pattern, 'height=\'(.*?)\' ')
print height
playpath = url_user.get("playpath")
src=plugintools.find_single_match(pattern, 'src=(.*?) ')
#http://privado.streamingfreetv.net/embed/embed.php?channel=sysf&w=650&h=400
pageurl = 'http://privado.streamingfreetv.net/embed/embed.php?channel='+playpath+'&w='+width+'&h='+height
plugintools.log("pageurl= "+pageurl)
url_user["pageurl"]=pageurl
#<param name='flashvars' value='file=sysf&streamer=rtmp://94.23.247.151/redirect?token=0ngYvHGwJks-BswfZHOwTwExpired=1417266581&autostart=false&skin=http://privado.streamingfreetv.net/jw/classic.zip'>
rtmp=plugintools.find_single_match(body, 'streamer=(.*?)&autostart')
swfurl=plugintools.find_single_match(body, '<param name=\'movie\' value=\'(.*?)\'')
print 'swfurl',swfurl
print 'rtmp',rtmp
url = rtmp+' playpath='+playpath+' swfUrl='+swfurl+' pageUrl='+pageurl
#plugintools.log("url= "+url)
plugintools.play_resolved_url(url)
# Vamos a hacer una llamada a la página que nos dará el token
def gethttp_headers(pageurl):
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer", pageurl])
body,response_headers = plugintools.read_body_and_headers(pageurl, headers=request_headers)
#plugintools.log("body= "+body)
return body
def gethttp_referer_headers(url_user):
pageurl = url_user.get("pageurl")
referer = url_user.get("referer")
print 'referer',referer
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer", referer])
body,response_headers = plugintools.read_body_and_headers(pageurl, headers=request_headers)
#plugintools.log("body= "+body)
return body
| manusev/plugin.video.kuchitv | resources/regex/streamingfreetv.py | Python | gpl-2.0 | 4,423 |
'''
Created on Jul 7, 2014
@author: viejoemer
How to test if the element from a set is in other in Python?
¿Cómo comprobar si los elemento de un set estan en otro set en Python?
issuperset(other)
set >= other
Test whether every element in other is in the set.
set > other
Test whether the set is a proper superset of other, that is,
set >= other and set != other.
'''
#Create a set with values.
s = {0,1,2,3,4,5,6,7,8,9}
print(s)
s2 = {1,2}
print(s2)
#Verify if the item from a set is in other.
r = s.issuperset(s2)
print(r)
#Verify if the item from a set is in other.
r = s2.issuperset(s)
print(r)
#Verify if the item from a set is in other with a if.
if s >= s2:
print('Verify with a if s >= s2: True')
else:
print('Verify with a if s >= s2: False')
#Verify if the item from a set is in other with a if.
if s2 >= s:
print('Verify with a if s2 >= s: True')
else:
print('Verify with a if s2 >= s: False')
#Verify if the item from a set is in other with a if.
if s2 > s:
print('Verify with a if s2 > s: True')
else:
print('Verify with a if s2 > s: False')
#Verify if the item from a set is in other with a if.
if s > s2:
print('Verify with a if s > s2: True')
else:
print('Verify with a if s > s2: False') | OxPython/Python_set_issuperset | src/set_issuperset.py | Python | epl-1.0 | 1,260 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013,2014,2015,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Helper classes for network testing """
from functools import total_ordering
import os
from csv import DictReader
import six.moves.cPickle as pickle # pylint: disable=F0401
from six import itervalues, text_type
from ipaddress import (IPv4Network, IPv4Address, IPv6Network, IPv6Address,
ip_network, ip_address)
# Ranges for dynamic network allocation. The idea is to allocate a /N network
# inside 10.N.0.0/16.
SUBNET_RANGE = {
24: IPv4Network(u'10.24.0.0/16'),
25: IPv4Network(u'10.25.0.0/16'),
26: IPv4Network(u'10.26.0.0/16'),
27: IPv4Network(u'10.27.0.0/16'),
28: IPv4Network(u'10.28.0.0/16')}
@total_ordering
class DummyIP(object):
"""
Wrapper around an IP address
This class should work like IPv[46]Address, but it allows attaching some
convenience methods like MAC address generation.
"""
def __init__(self, *args, **kwargs):
self._ip = ip_address(*args, **kwargs)
if isinstance(self._ip, IPv4Address):
octets = [int(i) for i in str(self._ip).split('.')]
self.mac = "02:02:%02x:%02x:%02x:%02x" % tuple(octets)
else:
# FIXME
self.mac = None
def __str__(self):
return str(self._ip)
def __repr__(self):
return repr(self._ip)
def __eq__(self, other):
if isinstance(other, type(self)):
return self._ip == other._ip
else:
return self._ip == other
def __lt__(self, other):
if isinstance(other, type(self)):
return self._ip < other._ip
else:
return self._ip < other
def __int__(self):
return int(self._ip)
def __hash__(self):
return hash(self._ip)
def __add__(self, other):
return DummyIP(self._ip + other)
def __sub__(self, other):
return DummyIP(self._ip - other)
def __getattr__(self, name):
# Proxy lookups to the wrapped network object
if "_ip" not in self.__dict__:
# Happens while unpickling
raise AttributeError
return getattr(self._ip, name)
class IPGenerator(object):
"""
Helper for indexing into the usable IP range of a network
"""
def __init__(self, network, offset):
self.network = network
self.offset = offset
def __getitem__(self, index):
if index < 0:
# Skip the broadcast address
ip = DummyIP(self.network[index - 1])
if ip < self.network[self.offset]:
raise IndexError("Index too small")
else:
ip = DummyIP(self.network[index + self.offset])
if ip >= self.network.broadcast_address:
raise IndexError("Index too big")
return ip
@total_ordering
class NetworkInfo(object):
"""
Wrapper around a network
This class should work like IPv[46]Network, but it allows attaching
Aquilon-related metadata, and a few convenience methods.
"""
def __init__(self, name, cidr, nettype, loc_type, loc_name, side="a",
autocreate=False, comments=None):
if isinstance(cidr, (IPv4Network, IPv6Network)):
self._network = cidr
else:
self._network = ip_network(text_type(cidr))
self.name = name
self.nettype = nettype
self.reserved = list()
self.loc_type = loc_type
self.loc_name = loc_name
self.side = side
self.comments = comments
if isinstance(autocreate, bool):
self.autocreate = autocreate
elif autocreate == "True":
self.autocreate = True
elif autocreate == "False":
self.autocreate = False
else:
raise ValueError("Invalid value for autocreate: %r" % autocreate)
if nettype == 'tor_net':
offsets = [6, 7]
elif nettype == 'tor_net2':
offsets = [7, 8]
elif nettype == 'vm_storage_net':
offsets = [8]
else:
offsets = []
for offset in offsets:
self.reserved.append(DummyIP(self[offset]))
first_usable = max(offsets or [4]) + 1
self.usable = IPGenerator(self, first_usable)
def __getattr__(self, name):
# Proxy lookups to the wrapped network object
if "_network" not in self.__dict__:
# Happens while unpickling
raise AttributeError
return getattr(self._network, name)
def __getitem__(self, idx):
# Cast the result to DummyIP, so the .mac property can be used
return DummyIP(self._network[idx])
def __str__(self):
return str(self._network)
def __repr__(self):
return repr(self._network)
def __contains__(self, other):
# Using a network on the left hand side of "in" works with ipaddr, but
# will return the wrong answer with ipaddress.
assert isinstance(other, (IPv4Address, IPv6Address, DummyIP))
return other in self._network
def __eq__(self, other):
if isinstance(other, type(self)):
return self._network == other._network
else:
return self._network == other
def __lt__(self, other):
if isinstance(other, type(self)):
return self._network < other._network
else:
return self._network < other
def __hash__(self):
return hash(self._network)
@property
def gateway(self):
return self[1]
@property
def ip(self):
return DummyIP(self._network.network_address)
def subnet(self, new_prefix=None):
return [NetworkInfo(str(net.network_address), net, self.nettype,
self.loc_type, self.loc_name, self.side)
for net in self._network.subnets(new_prefix=new_prefix)]
def subnets(self, new_prefix=None):
for net in self._network.subnets(new_prefix=new_prefix):
yield NetworkInfo(str(net.network_address), net, self.nettype,
self.loc_type, self.loc_name, self.side)
@property
def is_ipv4(self):
return isinstance(self._network, IPv4Network)
@property
def is_ipv6(self):
return isinstance(self._network, IPv6Network)
class DummyNetworks(object):
# Borg
__shared_state = {}
def __init__(self, config, *args, **kwargs):
self.__dict__ = self.__shared_state
if getattr(self, "unknown", None):
return
object.__init__(self, *args, **kwargs)
self.statedir = os.path.join(config.get("unittest", "scratchdir"),
"networks")
self.networks = {}
dir = config.get("unittest", "datadir")
filename = os.path.join(dir, "networks.csv")
with open(filename, "r") as datafile:
# Filter out comments
lines = [line for line in datafile if not line.startswith('#')]
reader = DictReader(lines)
for row in reader:
n = NetworkInfo(row["name"], row["cidr"], row["type"],
row["loc_type"], row["loc_name"],
side=row["side"], autocreate=row["autocreate"],
comments=row["comments"])
# Sanity checks
if row["name"] in self.networks:
raise KeyError("Duplicate name '%s' in %s" % (row["name"],
filename))
for existing in itervalues(self.networks):
if n.overlaps(existing):
raise ValueError("Overlapping networks %s and %s in %s"
% (existing, n, filename))
for dynrange in itervalues(SUBNET_RANGE):
if n.overlaps(dynrange):
raise ValueError("Range %s is reserved for dynamic "
"allocation" % dynrange)
self.networks[row["name"]] = n
# Load dynamic networks
if os.path.exists(self.statedir):
for name in os.listdir(self.statedir):
with open(os.path.join(self.statedir, name), "rb") as f:
net = pickle.load(f)
self.networks[net.name] = net
else:
os.makedirs(self.statedir)
def __getitem__(self, name):
return self.networks[name]
def __iter__(self):
for net in itervalues(self.networks):
yield net
def allocate_network(self, testsuite, name, prefixlength, network_type,
loc_type, loc_name, side='a', comments=None):
if prefixlength not in SUBNET_RANGE:
raise ValueError("There's no address range defined for /%d networks"
% prefixlength)
if name in self.networks:
raise ValueError("There's already a network named %s" % name)
range = SUBNET_RANGE[prefixlength]
result = None
for net in range.subnets(new_prefix=prefixlength):
statefile = os.path.join(self.statedir, "%s" % net.network_address)
if os.path.exists(statefile):
continue
result = NetworkInfo(name, str(net), network_type, loc_type,
loc_name, side)
break
if not result:
raise ValueError("Could not allocate network of size /%d" %
prefixlength)
command = ["add_network", "--network", name,
"--ip", result.network_address,
"--netmask", result.netmask,
"--" + loc_type, loc_name, "--type", network_type]
if comments:
command.extend(["--comments", comments])
testsuite.noouttest(command)
with open(statefile, "wb") as f:
pickle.dump(result, f)
self.networks[name] = result
return result
def dispose_network(self, testsuite, name):
if name not in self.networks:
raise ValueError("Trying to dispose unknown network %s" % name)
net = self.networks[name]
command = ["del_network", "--ip", net.network_address]
testsuite.noouttest(command)
statefile = os.path.join(self.statedir, "%s" % net.network_address)
os.unlink(statefile)
del self.networks[name]
| quattor/aquilon | tests/broker/networktest.py | Python | apache-2.0 | 11,183 |
from imp import reload
from typing import Any, Sequence, Tuple, Union
import pytest
import polypie
@pytest.fixture(autouse=True)
def reload_polypie():
reload(polypie)
def test_without_annotations():
@polypie.polymorphic
def f():
return 0
@polypie.polymorphic
def f(a):
return 1
@polypie.polymorphic
def f(a, b):
return 2
assert f() == 0
assert f(120) == 1
assert f(120, 'foo') == 2
with pytest.raises(polypie.PolypieException, match='not found'):
f(120, 240, 'foo')
def test_same_types_annotations():
@polypie.polymorphic
def f(a: int, b: int):
return 'int, int first'
@polypie.polymorphic
def f(x: int, y: int):
return 'int, int second'
@polypie.polymorphic
def f(a: int, b: str):
return 'int, str'
assert f(120, 240) == 'int, int first'
assert f(a=120, b=240) == 'int, int first'
assert f(x=120, y=240) == 'int, int second'
assert f(120, 'foo') == 'int, str'
assert f(a=120, b='foo') == 'int, str'
def test_builtin_types_annotations():
@polypie.polymorphic
def f(a: int, b: int):
return 'int, int'
@polypie.polymorphic
def f(a: int, b: str):
return 'int, str'
@polypie.polymorphic
def f(a, b: dict):
return 'any, dict'
assert f(120, 240) == 'int, int'
assert f(120, 'foo') == 'int, str'
assert f(120, {}) == 'any, dict'
assert f('foo', {}) == 'any, dict'
def test_own_types_annotations():
class Foo:
pass
class Bar:
pass
@polypie.polymorphic
def f(a: Foo, b: Bar):
return 'Foo, Bar'
@polypie.polymorphic
def f(a: Bar, b: Foo):
return 'Bar, Foo'
foo = Foo()
bar = Bar()
assert f(foo, bar) == 'Foo, Bar'
assert f(bar, foo) == 'Bar, Foo'
with pytest.raises(polypie.PolypieException, match='not found'):
f(foo, foo)
def test_typing_annotations():
@polypie.polymorphic
def f(a: Any, b: Sequence):
return 'Any, Sequence'
@polypie.polymorphic
def f(a: Tuple[int, str], b: Union[int, bool]):
return 'Tuple, Union'
assert f(120, [1, 2, 3]) == 'Any, Sequence'
assert f((120, 'foo'), 120) == 'Tuple, Union'
assert f((120, 'foo'), True) == 'Tuple, Union'
with pytest.raises(polypie.PolypieException, match='not found'):
f(('foo', 120), 100)
with pytest.raises(polypie.PolypieException, match='not found'):
f((120, 'foo'), None)
def test_name_clashing():
@polypie.polymorphic
def check_clash(a: int):
return 'top'
class Wrapper:
@polypie.polymorphic
def check_clash(a: int):
return 'wrapped'
from samples import clash1, clash2
assert check_clash(1) == 'top'
assert Wrapper.check_clash(1), 'wrapped'
assert clash1.check_clash(1) == clash1.RESULT
assert clash2.check_clash(1) == clash2.RESULT
def test_methods():
class TestClass:
value = 'cls'
def __init__(self):
self.value = None
def getter(self):
return self.value
@polypie.polymorphic
def setter(self, value: str):
self.value = value
@polypie.polymorphic
def setter(self, value: int):
self.value = str(value)
@classmethod
def cls_getter(cls):
return cls.value
@classmethod
@polypie.polymorphic
def cls_setter(cls, value: str):
cls.value = value
@classmethod
@polypie.polymorphic
def cls_setter(cls, value: int):
cls.value = str(value)
@staticmethod
def static_getter(obj):
return obj.value
@staticmethod
@polypie.polymorphic
def static_setter(obj, value: str):
obj.value = value
@staticmethod
@polypie.polymorphic
def static_setter(obj, value: int):
obj.value = str(value)
instance = TestClass()
# instance methods
instance.setter('foo')
assert instance.getter() == 'foo'
instance.setter(1)
assert instance.getter() == '1'
# cls methods
assert instance.cls_getter() == 'cls'
instance.cls_setter('bar')
assert instance.cls_getter() == 'bar'
instance.cls_setter(2)
assert instance.cls_getter() == '2'
assert instance.getter() == '1'
# static methods
instance.static_setter(instance, 'baz')
instance.static_setter(TestClass, 'xyzzy')
assert instance.static_getter(instance) == 'baz'
assert instance.static_getter(TestClass) == 'xyzzy'
instance.static_setter(instance, 100)
instance.static_setter(TestClass, 200)
assert instance.static_getter(instance) == '100'
assert instance.static_getter(TestClass) == '200'
def test_exception_due_to_existent_signature():
@polypie.polymorphic
def f(a):
pass
@polypie.polymorphic
def f(a, b):
pass
@polypie.polymorphic
def f(a, b: str):
pass
@polypie.polymorphic
def f(a, b: int):
pass
with pytest.raises(polypie.PolypieException, match='already exists'):
@polypie.polymorphic
def f(a, b: str):
pass
def test_function_special_attrs():
from samples.specialattrs import Wrapper
assert Wrapper.check_special_attrs.__name__ == Wrapper.NAME
assert Wrapper.check_special_attrs.__qualname__ == Wrapper.QUALNAME
assert Wrapper.check_special_attrs.__module__ == Wrapper.MODULE
assert Wrapper.check_special_attrs.attr1 == Wrapper.ATTR1
assert Wrapper.check_special_attrs.attr2 == Wrapper.ATTR2
assert not Wrapper.check_special_attrs.__annotations__
assert not hasattr(Wrapper.check_special_attrs, '__wrapped__')
| un-def/polypie | tests/test_polypie.py | Python | bsd-2-clause | 5,791 |
"""
An ndarray subclass for working with arrays of strings.
"""
from functools import partial, total_ordering
from operator import eq, ne
import re
import numpy as np
from numpy import ndarray
import pandas as pd
from toolz import compose
from zipline.utils.compat import unicode
from zipline.utils.functional import instance
from zipline.utils.preprocess import preprocess
from zipline.utils.sentinel import sentinel
from zipline.utils.input_validation import (
coerce,
expect_kinds,
expect_types,
optional,
)
from zipline.utils.numpy_utils import (
bool_dtype,
unsigned_int_dtype_with_size_in_bytes,
is_object,
object_dtype,
)
from zipline.utils.pandas_utils import ignore_pandas_nan_categorical_warning
from ._factorize import (
factorize_strings,
factorize_strings_known_categories,
smallest_uint_that_can_hold,
)
def compare_arrays(left, right):
"Eq check with a short-circuit for identical objects."
return (
left is right
or ((left.shape == right.shape) and (left == right).all())
)
def _make_unsupported_method(name):
def method(*args, **kwargs):
raise NotImplementedError(
"Method %s is not supported on LabelArrays." % name
)
method.__name__ = name
method.__doc__ = "Unsupported LabelArray Method: %s" % name
return method
class MissingValueMismatch(ValueError):
"""
Error raised on attempt to perform operations between LabelArrays with
mismatched missing_values.
"""
def __init__(self, left, right):
super(MissingValueMismatch, self).__init__(
"LabelArray missing_values don't match:"
" left={}, right={}".format(left, right)
)
class CategoryMismatch(ValueError):
"""
Error raised on attempt to perform operations between LabelArrays with
mismatched category arrays.
"""
def __init__(self, left, right):
(mismatches,) = np.where(left != right)
assert len(mismatches), "Not actually a mismatch!"
super(CategoryMismatch, self).__init__(
"LabelArray categories don't match:\n"
"Mismatched Indices: {mismatches}\n"
"Left: {left}\n"
"Right: {right}".format(
mismatches=mismatches,
left=left[mismatches],
right=right[mismatches],
)
)
_NotPassed = sentinel('_NotPassed')
class LabelArray(ndarray):
"""
An ndarray subclass for working with arrays of strings.
Factorizes the input array into integers, but overloads equality on strings
to check against the factor label.
Parameters
----------
values : array-like
Array of values that can be passed to np.asarray with dtype=object.
missing_value : str
Scalar value to treat as 'missing' for operations on ``self``.
categories : list[str], optional
List of values to use as categories. If not supplied, categories will
be inferred as the unique set of entries in ``values``.
sort : bool, optional
Whether to sort categories. If sort is False and categories is
supplied, they are left in the order provided. If sort is False and
categories is None, categories will be constructed in a random order.
Attributes
----------
categories : ndarray[str]
An array containing the unique labels of self.
reverse_categories : dict[str -> int]
Reverse lookup table for ``categories``. Stores the index in
``categories`` at which each entry each unique entry is found.
missing_value : str or None
A sentinel missing value with NaN semantics for comparisons.
Notes
-----
Consumers should be cautious when passing instances of LabelArray to numpy
functions. We attempt to disallow as many meaningless operations as
possible, but since a LabelArray is just an ndarray of ints with some
additional metadata, many numpy functions (for example, trigonometric) will
happily accept a LabelArray and treat its values as though they were
integers.
In a future change, we may be able to disallow more numerical operations by
creating a wrapper dtype which doesn't register an implementation for most
numpy ufuncs. Until that change is made, consumers of LabelArray should
assume that it is undefined behavior to pass a LabelArray to any numpy
ufunc that operates on semantically-numerical data.
See Also
--------
https://docs.scipy.org/doc/numpy-1.11.0/user/basics.subclassing.html
"""
SUPPORTED_SCALAR_TYPES = (bytes, unicode, type(None))
SUPPORTED_NON_NONE_SCALAR_TYPES = (bytes, unicode)
@preprocess(
values=coerce(list, partial(np.asarray, dtype=object)),
# Coerce ``list`` to ``list`` to make a copy. Code internally may call
# ``categories.insert(0, missing_value)`` which will mutate this list
# in place.
categories=coerce((list, np.ndarray, set), list),
)
@expect_types(
values=np.ndarray,
missing_value=SUPPORTED_SCALAR_TYPES,
categories=optional(list),
)
@expect_kinds(values=("O", "S", "U"))
def __new__(cls,
values,
missing_value,
categories=None,
sort=True):
# Numpy's fixed-width string types aren't very efficient. Working with
# object arrays is faster than bytes or unicode arrays in almost all
# cases.
if not is_object(values):
values = values.astype(object)
if values.flags.f_contiguous:
ravel_order = 'F'
else:
ravel_order = 'C'
if categories is None:
codes, categories, reverse_categories = factorize_strings(
values.ravel(ravel_order),
missing_value=missing_value,
sort=sort,
)
else:
codes, categories, reverse_categories = (
factorize_strings_known_categories(
values.ravel(ravel_order),
categories=categories,
missing_value=missing_value,
sort=sort,
)
)
categories.setflags(write=False)
return cls.from_codes_and_metadata(
codes=codes.reshape(values.shape, order=ravel_order),
categories=categories,
reverse_categories=reverse_categories,
missing_value=missing_value,
)
@classmethod
def from_codes_and_metadata(cls,
codes,
categories,
reverse_categories,
missing_value):
"""
Rehydrate a LabelArray from the codes and metadata.
Parameters
----------
codes : np.ndarray[integral]
The codes for the label array.
categories : np.ndarray[object]
The unique string categories.
reverse_categories : dict[str, int]
The mapping from category to its code-index.
missing_value : any
The value used to represent missing data.
"""
ret = codes.view(type=cls, dtype=np.void)
ret._categories = categories
ret._reverse_categories = reverse_categories
ret._missing_value = missing_value
return ret
@classmethod
def from_categorical(cls, categorical, missing_value=None):
"""
Create a LabelArray from a pandas categorical.
Parameters
----------
categorical : pd.Categorical
The categorical object to convert.
missing_value : bytes, unicode, or None, optional
The missing value to use for this LabelArray.
Returns
-------
la : LabelArray
The LabelArray representation of this categorical.
"""
return LabelArray(
categorical,
missing_value,
categorical.categories,
)
@property
def categories(self):
# This is a property because it should be immutable.
return self._categories
@property
def reverse_categories(self):
# This is a property because it should be immutable.
return self._reverse_categories
@property
def missing_value(self):
# This is a property because it should be immutable.
return self._missing_value
@property
def missing_value_code(self):
return self.reverse_categories[self.missing_value]
def has_label(self, value):
return value in self.reverse_categories
def __array_finalize__(self, obj):
"""
Called by Numpy after array construction.
There are three cases where this can happen:
1. Someone tries to directly construct a new array by doing::
>>> ndarray.__new__(LabelArray, ...) # doctest: +SKIP
In this case, obj will be None. We treat this as an error case and
fail.
2. Someone (most likely our own __new__) does::
>>> other_array.view(type=LabelArray) # doctest: +SKIP
In this case, `self` will be the new LabelArray instance, and
``obj` will be the array on which ``view`` is being called.
The caller of ``obj.view`` is responsible for setting category
metadata on ``self`` after we exit.
3. Someone creates a new LabelArray by slicing an existing one.
In this case, ``obj`` will be the original LabelArray. We're
responsible for copying over the parent array's category metadata.
"""
if obj is None:
raise TypeError(
"Direct construction of LabelArrays is not supported."
)
# See docstring for an explanation of when these will or will not be
# set.
self._categories = getattr(obj, 'categories', None)
self._reverse_categories = getattr(obj, 'reverse_categories', None)
self._missing_value = getattr(obj, 'missing_value', None)
def as_int_array(self):
"""
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
"""
return self.view(
type=ndarray,
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
)
def as_string_array(self):
"""
Convert self back into an array of strings.
This is an O(N) operation.
"""
return self.categories[self.as_int_array()]
def as_categorical(self):
"""
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
"""
if len(self.shape) > 1:
raise ValueError("Can't convert a 2D array to a categorical.")
with ignore_pandas_nan_categorical_warning():
return pd.Categorical.from_codes(
self.as_int_array(),
# We need to make a copy because pandas >= 0.17 fails if this
# buffer isn't writeable.
self.categories.copy(),
ordered=False,
)
def as_categorical_frame(self, index, columns, name=None):
"""
Coerce self into a pandas DataFrame of Categoricals.
"""
if len(self.shape) != 2:
raise ValueError(
"Can't convert a non-2D LabelArray into a DataFrame."
)
expected_shape = (len(index), len(columns))
if expected_shape != self.shape:
raise ValueError(
"Can't construct a DataFrame with provided indices:\n\n"
"LabelArray shape is {actual}, but index and columns imply "
"that shape should be {expected}.".format(
actual=self.shape,
expected=expected_shape,
)
)
return pd.Series(
index=pd.MultiIndex.from_product([index, columns]),
data=self.ravel().as_categorical(),
name=name,
).unstack()
def __setitem__(self, indexer, value):
self_categories = self.categories
if isinstance(value, self.SUPPORTED_SCALAR_TYPES):
value_code = self.reverse_categories.get(value, None)
if value_code is None:
raise ValueError("%r is not in LabelArray categories." % value)
self.as_int_array()[indexer] = value_code
elif isinstance(value, LabelArray):
value_categories = value.categories
if compare_arrays(self_categories, value_categories):
return super(LabelArray, self).__setitem__(indexer, value)
elif (self.missing_value == value.missing_value and
set(value.categories) <= set(self.categories)):
rhs = LabelArray.from_codes_and_metadata(
*factorize_strings_known_categories(
value.as_string_array().ravel(),
list(self.categories),
self.missing_value,
False,
),
missing_value=self.missing_value
).reshape(value.shape)
super(LabelArray, self).__setitem__(indexer, rhs)
else:
raise CategoryMismatch(self_categories, value_categories)
else:
raise NotImplementedError(
"Setting into a LabelArray with a value of "
"type {type} is not yet supported.".format(
type=type(value).__name__,
),
)
def set_scalar(self, indexer, value):
"""
Set scalar value into the array.
Parameters
----------
indexer : any
The indexer to set the value at.
value : str
The value to assign at the given locations.
Raises
------
ValueError
Raised when ``value`` is not a value element of this this label
array.
"""
try:
value_code = self.reverse_categories[value]
except KeyError:
raise ValueError("%r is not in LabelArray categories." % value)
self.as_int_array()[indexer] = value_code
def __setslice__(self, i, j, sequence):
"""
This method was deprecated in Python 2.0. It predates slice objects,
but Python 2.7.11 still uses it if you implement it, which ndarray
does. In newer Pythons, __setitem__ is always called, but we need to
manuallly forward in py2.
"""
self.__setitem__(slice(i, j), sequence)
def __getitem__(self, indexer):
result = super(LabelArray, self).__getitem__(indexer)
if result.ndim:
# Result is still a LabelArray, so we can just return it.
return result
# Result is a scalar value, which will be an instance of np.void.
# Map it back to one of our category entries.
index = result.view(
unsigned_int_dtype_with_size_in_bytes(self.itemsize),
)
return self.categories[index]
def is_missing(self):
"""
Like isnan, but checks for locations where we store missing values.
"""
return (
self.as_int_array() == self.reverse_categories[self.missing_value]
)
def not_missing(self):
"""
Like ~isnan, but checks for locations where we store missing values.
"""
return (
self.as_int_array() != self.reverse_categories[self.missing_value]
)
def _equality_check(op):
"""
Shared code for __eq__ and __ne__, parameterized on the actual
comparison operator to use.
"""
def method(self, other):
if isinstance(other, LabelArray):
self_mv = self.missing_value
other_mv = other.missing_value
if self_mv != other_mv:
raise MissingValueMismatch(self_mv, other_mv)
self_categories = self.categories
other_categories = other.categories
if not compare_arrays(self_categories, other_categories):
raise CategoryMismatch(self_categories, other_categories)
return (
op(self.as_int_array(), other.as_int_array())
& self.not_missing()
& other.not_missing()
)
elif isinstance(other, ndarray):
# Compare to ndarrays as though we were an array of strings.
# This is fairly expensive, and should generally be avoided.
return op(self.as_string_array(), other) & self.not_missing()
elif isinstance(other, self.SUPPORTED_SCALAR_TYPES):
i = self._reverse_categories.get(other, -1)
return op(self.as_int_array(), i) & self.not_missing()
return op(super(LabelArray, self), other)
return method
__eq__ = _equality_check(eq)
__ne__ = _equality_check(ne)
del _equality_check
def view(self, dtype=_NotPassed, type=_NotPassed):
if type is _NotPassed and dtype not in (_NotPassed, self.dtype):
raise TypeError("Can't view LabelArray as another dtype.")
# The text signature on ndarray.view makes it look like the default
# values for dtype and type are `None`, but passing None explicitly has
# different semantics than not passing an arg at all, so we reconstruct
# the kwargs dict here to simulate the args not being passed at all.
kwargs = {}
if dtype is not _NotPassed:
kwargs['dtype'] = dtype
if type is not _NotPassed:
kwargs['type'] = type
return super(LabelArray, self).view(**kwargs)
def astype(self,
dtype,
order='K',
casting='unsafe',
subok=True,
copy=True):
if dtype == self.dtype:
if not subok:
array = self.view(type=np.ndarray)
else:
array = self
if copy:
return array.copy()
return array
if dtype == object_dtype:
return self.as_string_array()
if dtype.kind == 'S':
return self.as_string_array().astype(
dtype,
order=order,
casting=casting,
subok=subok,
copy=copy,
)
raise TypeError(
'%s can only be converted into object, string, or void,'
' got: %r' % (
type(self).__name__,
dtype,
),
)
# In general, we support resizing, slicing, and reshaping methods, but not
# numeric methods.
SUPPORTED_NDARRAY_METHODS = frozenset([
'astype',
'base',
'compress',
'copy',
'data',
'diagonal',
'dtype',
'flat',
'flatten',
'item',
'itemset',
'itemsize',
'nbytes',
'ndim',
'ravel',
'repeat',
'reshape',
'resize',
'setflags',
'shape',
'size',
'squeeze',
'strides',
'swapaxes',
'take',
'trace',
'transpose',
'view'
])
PUBLIC_NDARRAY_METHODS = frozenset([
s for s in dir(ndarray) if not s.startswith('_')
])
# Generate failing wrappers for all unsupported methods.
locals().update(
{
method: _make_unsupported_method(method)
for method in PUBLIC_NDARRAY_METHODS - SUPPORTED_NDARRAY_METHODS
}
)
def __repr__(self):
repr_lines = repr(self.as_string_array()).splitlines()
repr_lines[0] = repr_lines[0].replace('array(', 'LabelArray(', 1)
repr_lines[-1] = repr_lines[-1].rsplit(',', 1)[0] + ')'
# The extra spaces here account for the difference in length between
# 'array(' and 'LabelArray('.
return '\n '.join(repr_lines)
def empty_like(self, shape):
"""
Make an empty LabelArray with the same categories as ``self``, filled
with ``self.missing_value``.
"""
return type(self).from_codes_and_metadata(
codes=np.full(
shape,
self.reverse_categories[self.missing_value],
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
),
categories=self.categories,
reverse_categories=self.reverse_categories,
missing_value=self.missing_value,
)
def map_predicate(self, f):
"""
Map a function from str -> bool element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always return False.
"""
# Functions passed to this are of type str -> bool. Don't ever call
# them on None, which is the only non-str value we ever store in
# categories.
if self.missing_value is None:
def f_to_use(x):
return False if x is None else f(x)
else:
f_to_use = f
# Call f on each unique value in our categories.
results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories)
# missing_value should produce False no matter what
results[self.reverse_categories[self.missing_value]] = False
# unpack the results form each unique value into their corresponding
# locations in our indices.
return results[self.as_int_array()]
def map(self, f):
"""
Map a function from str -> str element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always map to ``self.missing_value``.
"""
# f() should only return None if None is our missing value.
if self.missing_value is None:
allowed_outtypes = self.SUPPORTED_SCALAR_TYPES
else:
allowed_outtypes = self.SUPPORTED_NON_NONE_SCALAR_TYPES
def f_to_use(x,
missing_value=self.missing_value,
otypes=allowed_outtypes):
# Don't call f on the missing value; those locations don't exist
# semantically. We return _sortable_sentinel rather than None
# because the np.unique call below sorts the categories array,
# which raises an error on Python 3 because None and str aren't
# comparable.
if x == missing_value:
return _sortable_sentinel
ret = f(x)
if not isinstance(ret, otypes):
raise TypeError(
"LabelArray.map expected function {f} to return a string"
" or None, but got {type} instead.\n"
"Value was {value}.".format(
f=f.__name__,
type=type(ret).__name__,
value=ret,
)
)
if ret == missing_value:
return _sortable_sentinel
return ret
new_categories_with_duplicates = (
np.vectorize(f_to_use, otypes=[object])(self.categories)
)
# If f() maps multiple inputs to the same output, then we can end up
# with the same code duplicated multiple times. Compress the categories
# by running them through np.unique, and then use the reverse lookup
# table to compress codes as well.
new_categories, bloated_inverse_index = np.unique(
new_categories_with_duplicates,
return_inverse=True
)
if new_categories[0] is _sortable_sentinel:
# f_to_use return _sortable_sentinel for locations that should be
# missing values in our output. Since np.unique returns the uniques
# in sorted order, and since _sortable_sentinel sorts before any
# string, we only need to check the first array entry.
new_categories[0] = self.missing_value
# `reverse_index` will always be a 64 bit integer even if we can hold a
# smaller array.
reverse_index = bloated_inverse_index.astype(
smallest_uint_that_can_hold(len(new_categories))
)
new_codes = np.take(reverse_index, self.as_int_array())
return self.from_codes_and_metadata(
new_codes,
new_categories,
dict(zip(new_categories, range(len(new_categories)))),
missing_value=self.missing_value,
)
def startswith(self, prefix):
"""
Element-wise startswith.
Parameters
----------
prefix : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self started with ``prefix``.
"""
return self.map_predicate(lambda elem: elem.startswith(prefix))
def endswith(self, suffix):
"""
Elementwise endswith.
Parameters
----------
suffix : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self ended with ``suffix``
"""
return self.map_predicate(lambda elem: elem.endswith(suffix))
def has_substring(self, substring):
"""
Elementwise contains.
Parameters
----------
substring : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self ended with ``suffix``.
"""
return self.map_predicate(lambda elem: substring in elem)
@preprocess(pattern=coerce(from_=(bytes, unicode), to=re.compile))
def matches(self, pattern):
"""
Elementwise regex match.
Parameters
----------
pattern : str or compiled regex
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self was matched by ``pattern``.
"""
return self.map_predicate(compose(bool, pattern.match))
# These types all implement an O(N) __contains__, so pre-emptively
# coerce to `set`.
@preprocess(container=coerce((list, tuple, np.ndarray), set))
def element_of(self, container):
"""
Check if each element of self is an of ``container``.
Parameters
----------
container : object
An object implementing a __contains__ to call on each element of
``self``.
Returns
-------
is_contained : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self was an element of ``container``.
"""
return self.map_predicate(container.__contains__)
@instance # This makes _sortable_sentinel a singleton instance.
@total_ordering
class _sortable_sentinel(object):
"""Dummy object that sorts before any other python object.
"""
def __eq__(self, other):
return self is other
def __lt__(self, other):
return True
@expect_types(trues=LabelArray, falses=LabelArray)
def labelarray_where(cond, trues, falses):
"""LabelArray-aware implementation of np.where.
"""
if trues.missing_value != falses.missing_value:
raise ValueError(
"Can't compute where on arrays with different missing values."
)
strs = np.where(cond, trues.as_string_array(), falses.as_string_array())
return LabelArray(strs, missing_value=trues.missing_value)
| quantopian/zipline | zipline/lib/labelarray.py | Python | apache-2.0 | 28,229 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ContactMessage'
db.create_table('website_contactmessage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sender', self.gf('django.db.models.fields.CharField')(max_length=51)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=512)),
('message', self.gf('django.db.models.fields.TextField')(max_length=4096)),
('sent', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('website', ['ContactMessage'])
def backwards(self, orm):
# Deleting model 'ContactMessage'
db.delete_table('website_contactmessage')
models = {
'website.contactmessage': {
'Meta': {'object_name': 'ContactMessage'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '512'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '4096'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '51'}),
'sent': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['website'] | audaciouscode/Roxy-Proxy | roxy/website/migrations/0001_initial.py | Python | gpl-3.0 | 1,538 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Basic authentication backend"""
from functools import wraps
from typing import Any, Callable, Optional, Tuple, TypeVar, Union, cast
from flask import Response, current_app, request
from flask_appbuilder.const import AUTH_LDAP
from flask_login import login_user
from airflow.www.fab_security.sqla.models import User
CLIENT_AUTH: Optional[Union[Tuple[str, str], Any]] = None
def init_app(_):
"""Initializes authentication backend"""
T = TypeVar("T", bound=Callable)
def auth_current_user() -> Optional[User]:
"""Authenticate and set current user if Authorization header exists"""
auth = request.authorization
if auth is None or not auth.username or not auth.password:
return None
ab_security_manager = current_app.appbuilder.sm
user = None
if ab_security_manager.auth_type == AUTH_LDAP:
user = ab_security_manager.auth_user_ldap(auth.username, auth.password)
if user is None:
user = ab_security_manager.auth_user_db(auth.username, auth.password)
if user is not None:
login_user(user, remember=False)
return user
def requires_authentication(function: T):
"""Decorator for functions that require authentication"""
@wraps(function)
def decorated(*args, **kwargs):
if auth_current_user() is not None:
return function(*args, **kwargs)
else:
return Response("Unauthorized", 401, {"WWW-Authenticate": "Basic"})
return cast(T, decorated)
| apache/incubator-airflow | airflow/api/auth/backend/basic_auth.py | Python | apache-2.0 | 2,258 |
import os
import pandas as pd
from .config import get_sch_ref_df
from . import simple_cysh as cysh
sch_ref_df = get_sch_ref_df()
def academic_sections_to_create():
"""
Gather ACM deployment docs to determine which 'Tutoring: Math' and 'Tutoring: Literacy' sections to make
"""
xl = pd.ExcelFile(r'Z:\Impact Analytics Team\SY19 ACM Deployment.xlsx')
df_list = []
for sheet in xl.sheet_names:
if sheet != 'Sample Deployment':
df = xl.parse(sheet).iloc[:,0:6]
df['Informal Name'] = sheet
df_list.append(df)
del df
acm_dep_df = pd.concat(df_list)
acm_dep_df.rename(columns={
'ACM Name':'ACM',
'Related IA (ELA/Math)':'SectionName'
}, inplace=True)
acm_dep_df = acm_dep_df.loc[~acm_dep_df['ACM'].isnull() & ~acm_dep_df['SectionName'].isnull()]
acm_dep_df['ACM'] = acm_dep_df['ACM'].str.strip()
acm_dep_df['SectionName'] = acm_dep_df['SectionName'].str.strip().str.upper()
acm_dep_df.loc[acm_dep_df['SectionName'].str.contains('MATH'), 'SectionName_MATH'] = 'Tutoring: Math'
acm_dep_df.loc[acm_dep_df['SectionName'].str.contains('ELA'), 'SectionName_ELA'] = 'Tutoring: Literacy'
acm_dep_df = acm_dep_df.fillna('')
acm_dep_df = acm_dep_df[['ACM', 'Informal Name', 'SectionName_MATH', 'SectionName_ELA']].groupby(['ACM', 'Informal Name']).agg(lambda x: ''.join(x.unique()))
acm_dep_df.reset_index(inplace=True)
acm_dep_df = pd.melt(
acm_dep_df,
id_vars=['ACM', 'Informal Name'],
value_vars=['SectionName_MATH', 'SectionName_ELA'],
value_name='SectionName'
)
acm_dep_df = acm_dep_df.loc[~acm_dep_df['SectionName'].isnull() & (acm_dep_df['SectionName'] != '')]
acm_dep_df = acm_dep_df.sort_values('ACM')
acm_dep_df['key'] = acm_dep_df['ACM'] + acm_dep_df['SectionName']
section_df = cysh.get_section_df(sections_of_interest=['Tutoring: Literacy', 'Tutoring: Math'])
staff_df = cysh.get_staff_df()
section_df = section_df.merge(staff_df, how='left', left_on='Intervention_Primary_Staff__c', right_on='Staff__c')
section_df['key'] = section_df['Staff__c_Name'] + section_df['Program__c_Name']
acm_dep_df = acm_dep_df.loc[
acm_dep_df['ACM'].isin(staff_df['Staff__c_Name']) &
~acm_dep_df['key'].isin(section_df['key'])
]
df = acm_dep_df.merge(sch_ref_df[['School', 'Informal Name']], how='left', on='Informal Name')
df = df[['School', 'ACM', 'SectionName']]
df['In_School_or_Extended_Learning'] = 'In School'
df['Start_Date'] = '09/04/2018'
df['End_Date'] = '06/07/2019'
df['Target_Dosage'] = 0
df.to_excel(os.path.join(os.path.dirname(__file__), 'input_files/section-creator-input.xlsx'), index=False)
return df
def non_CP_sections_to_create(sections_of_interest=['Coaching: Attendance', 'SEL Check In Check Out']):
"""
Produce table of sections to create, with the assumption that all 'Corps Member' roles should have 1 of each section.
"""
section_df = cysh.get_section_df(sections_of_interest)
section_df['key'] = section_df['Intervention_Primary_Staff__c'] + section_df['Program__c_Name']
staff_df = cysh.get_object_df('Staff__c', ['Id', 'Name', 'Role__c', 'Organization__c'], where="Site__c='Chicago'", rename_name=True)
school_df = cysh.get_object_df('Account', ['Id', 'Name'])
school_df.rename(columns={'Id':'School__c', 'Name':'School'}, inplace=True)
staff_df = staff_df.merge(school_df, how='left', left_on='Organization__c', right_on='School__c')
acm_df = staff_df.loc[staff_df['Role__c'].str.contains('Corps Member')==True].copy()
acm_df['key'] = 1
section_deployment = pd.DataFrame.from_dict({'SectionName': sections_of_interest})
section_deployment['key'] = 1
acm_df = acm_df.merge(section_deployment, on='key')
acm_df['key'] = acm_df['Id'] + acm_df['SectionName']
acm_df = acm_df.loc[~acm_df['key'].isin(section_df['key'])]
acm_df.rename(columns={'Staff__c_Name':'ACM'}, inplace=True)
acm_df = acm_df[['School', 'ACM', 'SectionName']]
acm_df['In_School_or_Extended_Learning'] = 'In School'
acm_df['Start_Date'] = '09/04/2018'
acm_df['End_Date'] = '06/07/2019'
acm_df['Target_Dosage'] = 0
return acm_df
def MIRI_sections_to_create():
"""
Produce table of ACM 'Math Inventory' and 'Rreading Inventory' sections to make
"""
program_df = cysh.get_object_df('Program__c', ['Id', 'Name'], rename_id=True, rename_name=True)
school_df = cysh.get_object_df('Account', ['Id', 'Name'])
school_df.rename(columns={'Id':'School__c', 'Name':'School'}, inplace=True)
staff_df = cysh.get_object_df('Staff__c', ['Id', 'Name'], where="Site__c='Chicago'", rename_name=True)
section_df = cysh.get_object_df('Section__c', ['Id', 'Name', 'Intervention_Primary_Staff__c', 'School__c', 'Program__c'], rename_id=True, rename_name=True)
section_df = section_df.merge(school_df, how='left', on='School__c')
section_df = section_df.merge(program_df, how='left', on='Program__c')
section_df = section_df.merge(staff_df, how='left', left_on='Intervention_Primary_Staff__c', right_on='Id')
highschools = [
'Tilden Career Community Academy High School',
'Gage Park High School',
'Collins Academy High School',
'Schurz High School',
'Sullivan High School',
'Chicago Academy High School',
'Roberto Clemente Community Academy',
'Wendell Phillips Academy',
]
section_df = section_df.loc[section_df['School'].isin(highschools)]
miri_section_df = section_df.loc[section_df['Program__c_Name'].str.contains('Inventory')]
section_df = section_df.loc[section_df['Program__c_Name'].str.contains('Tutoring')]
section_df['Program__c_Name'] = section_df['Program__c_Name'].map({
'Tutoring: Literacy':'Reading Inventory',
'Tutoring: Math':'Math Inventory'
})
for df in [section_df, miri_section_df]:
df['key'] = df['Staff__c_Name'] + df['Program__c_Name']
section_df = section_df.loc[~section_df['key'].isin(miri_section_df['key'])]
section_df.rename(columns={'Staff__c_Name':'ACM', 'Program__c_Name':'SectionName'}, inplace=True)
section_df['In_School_or_Extended_Learning'] = 'In School'
section_df['Start_Date'] = '09/04/2018'
section_df['End_Date'] = '06/07/2019'
section_df['Target_Dosage'] = 0
section_df = section_df[['School', 'ACM', 'SectionName', 'In_School_or_Extended_Learning', 'Start_Date', 'End_Date', 'Target_Dosage']]
return section_df
def deactivate_all_sections(section_type):
"""
This is necessary due to a bug in section creation. When section creation fails,
a `50 Acts of Greatness` section is made, as the default section type selection.
We don't provide this programming in Chicago, so we can safely deactivate all.
"""
# De-activate 50 Acts sections
section_df = cysh.get_object_df('Section__c', ['Id', 'Name', 'Intervention_Primary_Staff__c', 'School__c', 'Program__c', 'Active__c'], rename_id=True, rename_name=True)
program_df = cysh.get_object_df('Program__c', ['Id', 'Name'], rename_id=True, rename_name=True)
df = section_df.merge(program_df, how='left', on='Program__c')
sections_to_delete = df.loc[
(df['Program__c_Name']==section_type) &
(section_df['Active__c']==True),
'Section__c'
]
print(f"{len(sections_to_delete)} '50 Acts' sections to de-activate.")
for section_id in sections_to_delete:
cysh.sf.Section__c.update(section_id, {'Active__c':False})
| mrklees/cy-automation-library | cyautomation/cyschoolhouse/section_creation_chi.py | Python | gpl-3.0 | 7,625 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-05-14 06:33
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cron', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='asynccronmail',
name='log_file',
field=models.FileField(null=True, upload_to='emails/'),
),
migrations.AlterField(
model_name='asynccronmail',
name='csvfile',
field=models.FileField(upload_to='emails/', validators=[django.core.validators.FileExtensionValidator(['csv'])]),
),
]
| Spoken-tutorial/spoken-website | cron/migrations/0002_auto_20200514_1203.py | Python | gpl-3.0 | 725 |
# http://realiseyourdreams.wordpress.com/latex-scripts/
#Script to remove all LaTex-Comments from .tex-files
import os,subprocess
#browse the directory
for dirname, dirnames, filenames in os.walk('.'):
for filename in filenames:
#Check every file for:
filepath = os.path.join(dirname, filename)
#tex file
if filename.endswith('.tex'):
print "open file: "+filepath
#rename the file
tmpfilepath = filepath+"~"
process = subprocess.Popen(["mv",filepath,tmpfilepath])
process.wait()
newfile = open(filepath,"w")
oldfile = open(tmpfilepath,'r')
for line in oldfile.readlines():
#Search for a comment
pos = line.find('%')
if pos != -1:
#Write the line back without the comment
#Sometimes the percent-sign is important. Therefore just erase the comment
newfile.write(line[0:pos+1]+"\n")
else:
#Write the line completely
newfile.write(line)
newfile.close()
oldfile.close()
process = subprocess.Popen(["rm",tmpfilepath])
process.wait()
# no attemp is made to preserve \% or comments inside verbose envirinments
| rvelseg/MuDoVLaGM | utils/removeLatexComments.py | Python | gpl-3.0 | 1,034 |
import os,logging
from autotest.client.shared import error
from virttest import utils_test
def run_9p(test, params, env):
"""
Run an autotest test inside a guest.
@param test: kvm test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
mount_dir = params.get("9p_mount_dir")
if mount_dir is None:
logging.info("User Variable for mount dir is not set")
else:
session.cmd("mkdir -p %s" % mount_dir)
mount_option = " trans=virtio"
p9_proto_version = params.get("9p_proto_version", "9p2000.L")
mount_option += ",version=" + p9_proto_version
guest_cache = params.get("9p_guest_cache")
if guest_cache == "yes":
mount_option += ",cache=loose"
posix_acl = params.get("9p_posix_acl")
if posix_acl == "yes":
mount_option += ",posixacl"
logging.info("Mounting 9p mount point with options %s" % mount_option)
cmd = "mount -t 9p -o %s autotest_tag %s" % (mount_option, mount_dir)
mount_status = session.get_command_status(cmd)
if (mount_status != 0):
logging.error("mount failed")
raise error.TestFail('mount failed.')
# Collect test parameters
timeout = int(params.get("test_timeout", 14400))
control_path = os.path.join(test.virtdir, "autotest_control",
params.get("test_control_file"))
outputdir = test.outputdir
utils_test.run_autotest(vm, session, control_path,
timeout, outputdir, params)
| ehabkost/virt-test | qemu/tests/9p.py | Python | gpl-2.0 | 1,818 |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
import sh
from stackalytics.openstack.common import log as logging
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
class Vcs(object):
def __init__(self, repo, sources_root):
self.repo = repo
self.sources_root = sources_root
if not os.path.exists(sources_root):
os.mkdir(sources_root)
else:
if not os.access(sources_root, os.W_OK):
raise Exception('Sources root folder %s is not writable' %
sources_root)
def fetch(self):
pass
def get_release_index(self):
pass
def log(self, branch, head_commit_id):
pass
def get_last_id(self, branch):
pass
GIT_LOG_PARAMS = [
('commit_id', '%H'),
('date', '%at'),
('author_name', '%an'),
('author_email', '%ae'),
('subject', '%s'),
('message', '%b'),
]
GIT_LOG_FORMAT = ''.join([(r[0] + ':' + r[1] + '%n')
for r in GIT_LOG_PARAMS]) + 'diff_stat:'
DIFF_STAT_PATTERN = ('[^\d]+(\d+)\s+[^\s]*\s+changed'
'(,\s+(\d+)\s+([^\d\s]*)\s+(\d+)?)?')
GIT_LOG_PATTERN = re.compile(''.join([(r[0] + ':(.*?)\n')
for r in GIT_LOG_PARAMS]) +
'diff_stat:' + DIFF_STAT_PATTERN,
re.DOTALL)
MESSAGE_PATTERNS = {
'bug_id': re.compile(r'bug[\s#:]*(?P<id>\d+)', re.IGNORECASE),
'blueprint_id': re.compile(r'\b(?:blueprint|bp)\b[ \t]*[#:]?[ \t]*'
r'(?P<id>[a-z0-9-]+)', re.IGNORECASE),
'change_id': re.compile('Change-Id: (?P<id>I[0-9a-f]{40})', re.IGNORECASE),
}
class Git(Vcs):
def __init__(self, repo, sources_root):
super(Git, self).__init__(repo, sources_root)
uri = self.repo['uri']
match = re.search(r'([^\/]+)\.git$', uri)
if match:
self.folder = os.path.normpath(self.sources_root + '/' +
match.group(1))
else:
raise Exception('Unexpected uri %s for git' % uri)
self.release_index = {}
def _checkout(self, branch):
try:
sh.git('checkout', 'origin/' + branch)
return True
except sh.ErrorReturnCode as e:
LOG.error('Unable to checkout branch %(branch)s from repo '
'%(uri)s. Ignore it',
{'branch': branch, 'uri': self.repo['uri']})
LOG.exception(e)
return False
def fetch(self):
LOG.debug('Fetching repo uri %s' % self.repo['uri'])
if os.path.exists(self.folder):
os.chdir(self.folder)
uri = str(sh.git('config', '--get', 'remote.origin.url')).strip()
if uri != self.repo['uri']:
LOG.debug('Repo uri %(uri)s differs from cloned %(old)s',
{'uri': self.repo['uri'], 'old': uri})
os.chdir('..')
shutil.rmtree(self.folder)
if not os.path.exists(self.folder):
os.chdir(self.sources_root)
try:
sh.git('clone', self.repo['uri'])
except sh.ErrorReturnCode as e:
LOG.error('Unable to clone git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
os.chdir(self.folder)
else:
os.chdir(self.folder)
try:
sh.git('fetch')
except sh.ErrorReturnCode as e:
LOG.error('Unable to fetch git repo %s. Ignore it',
self.repo['uri'])
LOG.exception(e)
self.get_release_index()
def get_release_index(self):
if not os.path.exists(self.folder):
return {}
LOG.debug('Get release index for repo uri: %s', self.repo['uri'])
os.chdir(self.folder)
if not self.release_index:
for release in self.repo['releases']:
release_name = release['release_name'].lower()
if 'branch' in release:
branch = release['branch']
else:
branch = 'master'
if not self._checkout(branch):
continue
if 'tag_from' in release:
tag_range = release['tag_from'] + '..' + release['tag_to']
else:
tag_range = release['tag_to']
git_log_iterator = sh.git('log', '--pretty=%H', tag_range,
_tty_out=False)
for commit_id in git_log_iterator:
self.release_index[commit_id.strip()] = release_name
return self.release_index
def log(self, branch, head_commit_id):
LOG.debug('Parsing git log for repo uri %s', self.repo['uri'])
os.chdir(self.folder)
if not self._checkout(branch):
return
commit_range = 'HEAD'
if head_commit_id:
commit_range = head_commit_id + '..HEAD'
output = sh.git('log', '--pretty=%s' % GIT_LOG_FORMAT, '--shortstat',
'-M', '--no-merges', commit_range, _tty_out=False,
_decode_errors='ignore')
for rec in re.finditer(GIT_LOG_PATTERN, str(output)):
i = 1
commit = {}
for param in GIT_LOG_PARAMS:
commit[param[0]] = unicode(rec.group(i), 'utf8')
i += 1
if not utils.check_email_validity(commit['author_email']):
continue
commit['files_changed'] = int(rec.group(i))
i += 1
lines_changed_group = rec.group(i)
i += 1
lines_changed = rec.group(i)
i += 1
deleted_or_inserted = rec.group(i)
i += 1
lines_deleted = rec.group(i)
i += 1
if lines_changed_group: # there inserted or deleted lines
if not lines_deleted:
if deleted_or_inserted[0] == 'd': # deleted
lines_deleted = lines_changed
lines_changed = 0
commit['lines_added'] = int(lines_changed or 0)
commit['lines_deleted'] = int(lines_deleted or 0)
for pattern_name, pattern in MESSAGE_PATTERNS.iteritems():
collection = set()
for item in re.finditer(pattern, commit['message']):
collection.add(item.group('id'))
commit[pattern_name] = list(collection)
commit['date'] = int(commit['date'])
commit['module'] = self.repo['module']
commit['branches'] = set([branch])
if commit['commit_id'] in self.release_index:
commit['release'] = self.release_index[commit['commit_id']]
else:
commit['release'] = None
if 'blueprint_id' in commit:
commit['blueprint_id'] = [(commit['module'] + ':' + bp_name)
for bp_name
in commit['blueprint_id']]
yield commit
def get_last_id(self, branch):
LOG.debug('Get head commit for repo uri: %s', self.repo['uri'])
os.chdir(self.folder)
if not self._checkout(branch):
return None
return str(sh.git('rev-parse', 'HEAD')).strip()
def get_vcs(repo, sources_root):
uri = repo['uri']
LOG.debug('Factory is asked for VCS uri: %s', uri)
match = re.search(r'\.git$', uri)
if match:
return Git(repo, sources_root)
else:
LOG.warning('Unsupported VCS, fallback to dummy')
return Vcs(repo, uri)
| joshuamckenty/stackalytics | stackalytics/processor/vcs.py | Python | apache-2.0 | 8,415 |
# -*- coding: utf-8 -*-
# Copyright 2017 Ignacio Ibeas <ignacio@acysos.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def _connect_wsdl(self, wsdl, port_name):
self.ensure_one()
company = self.company_id
if company.state_id.code == 'NA' or company.state_id.code == '31':
client = self._connect_sii(wsdl)
client._default_service_name = 'siiService'
port_name = self._get_test_mode(port_name)
client._default_port_name = port_name
# Navarra temporal fix
wsdl2 = wsdl.replace(
'https://www.agenciatributaria.es/static_files/AEAT/Contenidos_Comunes/La_Agencia_Tributaria/Modelos_y_formularios/Suministro_inmediato_informacion/FicherosSuministros/V_1_1/',
'https://www2.agenciatributaria.gob.es/static_files/common/internet/dep/aplicaciones/es/aeat/ssii/fact/ws/'
)
binding_name = '{'+wsdl2+'}siiBinding'
if company.sii_test:
url = self.env['ir.config_parameter'].get_param(
'l10n_es_aeat_sii.url_soap_test.31', False)
else:
url = self.env['ir.config_parameter'].get_param(
'l10n_es_aeat_sii.url_soap.31', False)
return client.create_service(binding_name, url)
else:
return super(AccountInvoice, self)._connect_wsdl(wsdl, port_name)
| acysos/odoo-addons | l10n_es_aeat_sii_navarra/models/account_invoice.py | Python | agpl-3.0 | 1,550 |
#!/usr/bin/env python
#
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all targeting presets.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
targeting_preset_service = client.GetService(
'TargetingPresetService', version='v202111')
# Create a statement to select suggested ad units.
statement = ad_manager.StatementBuilder(version='v202111')
# Retrieve a small number of targeting presets at a time, paging
# through until all targeting presets have been retrieved.
while True:
response = targeting_preset_service.getTargetingPresetsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for targeting_preset in response['results']:
# Print out some information for each targeting preset.
print(
'Targeting preset with ID "%d" and name "%s" was found.\n'
% (targeting_preset['id'], targeting_preset['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| googleads/googleads-python-lib | examples/ad_manager/v202111/targeting_preset_service/get_all_targeting_presets.py | Python | apache-2.0 | 1,892 |
import os
f = open('clientcalls')
f2 = open('times')
BUCKET=250
CASE=os.getenv('CASE')
sums = []
cnt = 0
tmp = 0
try:
for s in f:
parts = s.split(',')
if len(parts) != 2: continue
v_total = float(parts[1])
v_off = float(f2.next())
v = v_total - v_off
tmp += v
cnt += 1
if cnt % BUCKET == 0:
sums.append(tmp / BUCKET)
tmp = 0
except StopIteration:
pass
f.close()
f = open('/results/timing_%s.tsv' % CASE, 'w')
f.truncate()
i = 1
for p in sums:
f.write("%d\t%s\n" % (i * BUCKET, p))
i += 1
f.close()
f = open('bytes')
def conf_int_native(x, ci=0.95):
if len(x) == 0: return (0,0,0)
ci2 = (1-ci)*.5
low_idx = int(ci2*x.size)
high_idx = int((1-ci2)*x.size)
x.sort()
return x.mean(), x[low_idx], x[high_idx]
xs = f.readlines()
f.close()
xs = [int(x) for x in xs]
import numpy
a=numpy.array(xs)
if CASE == 'evernote':
item = 10
elif CASE == 'twitter':
item = 20
else:
item = 30
(b, c, d) = conf_int_native(a)
print "%d\t%d\t%d\t%d" % (item, b, c, d)
f = open('memory')
sums = []
cnt = 0.
tmp = 0
buf = 0
buf2 = 0
try:
for s in f:
parts = s.split(',')
if len(parts) != 4: continue
req = int(parts[3])
mem = float(parts[1])
store = float(parts[2])
buf += mem
buf2 += store
cnt += 1.
if req - tmp > BUCKET:
sums.append((tmp, buf / cnt, buf2 / cnt))
tmp = req
cnt = 0
buf = 0
buf2 = 0
except StopIteration:
pass
f.close()
f = open('/results/memory_%s.tsv' % CASE, 'w')
f.truncate()
for (i,p,s) in sums:
f.write("%d\t%s\t%s\n" % (i, p, s))
i += 1
f.close()
| wayetender/whip | benchmarks/parse_results/parse.py | Python | gpl-2.0 | 1,744 |
from pynhost import matching, dynamic, utilities
class Command:
def __init__(self, words):
self.words = words
self.remaining_words = words
self.action_lists = []
def set_results(self, gram_handler, log_handler):
while self.remaining_words:
action_list = ActionList(self)
rule_match = self.get_rule_match(gram_handler)
if rule_match is not None:
action_list.add_rule_match(rule_match)
if action_list.contains_non_repeat_actions():
action_list.actions = gram_handler.triggered['match']['before'] + \
action_list.actions + gram_handler.triggered['match']['after']
self.remaining_words = rule_match.remaining_words
utilities.log_message(log_handler, 'info', 'Input "{}" matched rule {} '
'in grammar {}'.format(' '.join(rule_match.matched_words), rule_match.rule, rule_match.rule.grammar))
else:
action_list.add_string(self.remaining_words[0], gram_handler)
self.remaining_words = self.remaining_words[1:]
gram_handler.add_actions_to_recording_macros(action_list)
if action_list.actions:
self.action_lists.append(action_list)
# add command level triggers
non_repeats = [l for l in self.action_lists if l.contains_non_repeat_actions()]
if non_repeats:
non_repeats[0].actions = gram_handler.triggered['command']['before'] + non_repeats[0].actions
non_repeats[-1].actions.extend(gram_handler.triggered['command']['after'])
def get_rule_match(self, gram_handler):
for grammar in gram_handler.get_matching_grammars():
for rule in grammar._rules:
rule_match = matching.get_rule_match(rule,
self.remaining_words,
grammar.settings['filtered words'])
if rule_match is not None:
return rule_match
def remove_repeats(self):
purged_lists = []
for action_list in self.action_lists:
if action_list.contains_non_repeat_actions():
purged_lists.append(action_list)
self.action_lists = purged_lists
class ActionList:
def __init__(self, command):
self.command = command
self.actions = []
self.matched_words = []
self.rule_match = None
def add_rule_match(self, rule_match):
self.actions = new_action_list(rule_match.rule.actions, rule_match,)
self.rule_match = rule_match
def add_string(self, text, gram_handler):
if self.command.action_lists and self.command.action_lists[-1].rule_match is None:
self.actions.append(' ')
self.actions.extend(new_action_list(gram_handler.triggered['word']['before']))
self.actions.append(text)
self.actions.extend(new_action_list(gram_handler.triggered['word']['after']))
def contains_non_repeat_actions(self):
'''
Because repeating repeat actions can get ugly real fast
'''
for action in self.actions:
if not isinstance(action, (int, dynamic.RepeatCommand)):
return True
return False
def __str__(self):
return '<ActionList matching words {}>'.format(' '.join(self.matched_words))
def __repr__(self):
return str(self)
def new_action_list(raw_actions, rule_match=None):
words = rule_match.matched_words if rule_match else ()
new_actions = []
for action in raw_actions:
if isinstance(action, dynamic.Num):
action = action.evaluate(rule_match)
elif isinstance (action, (list, tuple)):
func = action[0]
for action in self.actions:
if not isinstance(action, (int, dynamic.RepeatCommand)):
return True
return False
def __str__(self):
return '<ActionList matching words {}>'.format(' '.join(self.matched_words))
def __repr__(self):
return str(self)
def new_action_list(raw_actions, rule_match=None):
words = rule_match.matched_words if rule_match else ()
new_actions = []
for action in raw_actions:
if isinstance(action, dynamic.Num):
action = action.evaluate(rule_match)
elif isinstance (action, (list, tuple)):
func = action[0]
args = action[1] if len(action) > 1 else []
kwargs = action[2] if len(action) > 2 else {}
if len(action) > 3 and action[3]:
args.insert(0, words)
action = (func, args, kwargs)
elif callable(action):
action = (action, [words], {})
new_actions.append(action)
return new_actions
| evfredericksen/pynacea | pynhost/pynhost/commands.py | Python | mit | 4,812 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import logging
import re
import signal
import socket
import traceback
from collections import Sequence
from ansible import constants as C
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.six import BytesIO, binary_type
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.loader import cliconf_loader, terminal_loader
from ansible.plugins.connection.paramiko_ssh import Connection as _Connection
from ansible.utils.jsonrpc import Rpc
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(Rpc, _Connection):
''' CLI (shell) SSH connections on Paramiko '''
transport = 'network_cli'
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._terminal = None
self._cliconf = None
self._shell = None
self._matched_prompt = None
self._matched_pattern = None
self._last_response = None
self._history = list()
self._play_context = play_context
if play_context.verbosity > 3:
logging.getLogger('paramiko').setLevel(logging.DEBUG)
def update_play_context(self, play_context):
"""Updates the play context information for the connection"""
display.display('updating play_context for connection', log_only=True)
if self._play_context.become is False and play_context.become is True:
auth_pass = play_context.become_pass
self._terminal.on_authorize(passwd=auth_pass)
elif self._play_context.become is True and not play_context.become:
self._terminal.on_deauthorize()
self._play_context = play_context
def _connect(self):
"""Connections to the device and sets the terminal type"""
if self._play_context.password and not self._play_context.private_key_file:
C.PARAMIKO_LOOK_FOR_KEYS = False
super(Connection, self)._connect()
display.display('ssh connection done, setting terminal', log_only=True)
self._shell = self.ssh.invoke_shell()
self._shell.settimeout(self._play_context.timeout)
network_os = self._play_context.network_os
if not network_os:
raise AnsibleConnectionFailure(
'Unable to automatically determine host network os. Please '
'manually configure ansible_network_os value for this host'
)
self._terminal = terminal_loader.get(network_os, self)
if not self._terminal:
raise AnsibleConnectionFailure('network os %s is not supported' % network_os)
display.display('loaded terminal plugin for network_os %s' % network_os, log_only=True)
self._cliconf = cliconf_loader.get(network_os, self)
if self._cliconf:
self._rpc.add(self._cliconf)
display.display('loaded cliconf plugin for network_os %s' % network_os, log_only=True)
else:
display.display('unable to load cliconf for network_os %s' % network_os)
self.receive()
display.display('firing event: on_open_shell()', log_only=True)
self._terminal.on_open_shell()
if getattr(self._play_context, 'become', None):
display.display('firing event: on_authorize', log_only=True)
auth_pass = self._play_context.become_pass
self._terminal.on_authorize(passwd=auth_pass)
self._connected = True
display.display('ssh connection has completed successfully', log_only=True)
def close(self):
"""Close the active connection to the device
"""
display.display("closing ssh connection to device", log_only=True)
if self._shell:
display.display("firing event: on_close_shell()", log_only=True)
self._terminal.on_close_shell()
self._shell.close()
self._shell = None
display.display("cli session is now closed", log_only=True)
super(Connection, self).close()
self._connected = False
display.display("ssh connection has been closed successfully", log_only=True)
def receive(self, command=None, prompts=None, answer=None):
"""Handles receiving of output from command"""
recv = BytesIO()
handled = False
self._matched_prompt = None
while True:
data = self._shell.recv(256)
recv.write(data)
offset = recv.tell() - 256 if recv.tell() > 256 else 0
recv.seek(offset)
window = self._strip(recv.read())
if prompts and not handled:
handled = self._handle_prompt(window, prompts, answer)
if self._find_prompt(window):
self._last_response = recv.getvalue()
resp = self._strip(self._last_response)
return self._sanitize(resp, command)
def send(self, command, prompts=None, answer=None, send_only=False):
"""Sends the command to the device in the opened shell"""
try:
self._history.append(command)
self._shell.sendall(b'%s\r' % command)
if send_only:
return
return self.receive(command, prompts, answer)
except (socket.timeout, AttributeError):
display.display(traceback.format_exc(), log_only=True)
raise AnsibleConnectionFailure("timeout trying to send command: %s" % command.strip())
def _strip(self, data):
"""Removes ANSI codes from device response"""
for regex in self._terminal.ansi_re:
data = regex.sub(b'', data)
return data
def _handle_prompt(self, resp, prompts, answer):
"""
Matches the command prompt and responds
:arg resp: Byte string containing the raw response from the remote
:arg prompts: Sequence of byte strings that we consider prompts for input
:arg answer: Byte string to send back to the remote if we find a prompt.
A carriage return is automatically appended to this string.
:returns: True if a prompt was found in ``resp``. False otherwise
"""
prompts = [re.compile(r, re.I) for r in prompts]
for regex in prompts:
match = regex.search(resp)
if match:
self._shell.sendall(b'%s\r' % answer)
return True
return False
def _sanitize(self, resp, command=None):
"""Removes elements from the response before returning to the caller"""
cleaned = []
for line in resp.splitlines():
if (command and line.strip() == command.strip()) or self._matched_prompt.strip() in line:
continue
cleaned.append(line)
return b'\n'.join(cleaned).strip()
def _find_prompt(self, response):
"""Searches the buffered response for a matching command prompt"""
errored_response = None
is_error_message = False
for regex in self._terminal.terminal_stderr_re:
if regex.search(response):
is_error_message = True
# Check if error response ends with command prompt if not
# receive it buffered prompt
for regex in self._terminal.terminal_stdout_re:
match = regex.search(response)
if match:
errored_response = response
break
if not is_error_message:
for regex in self._terminal.terminal_stdout_re:
match = regex.search(response)
if match:
self._matched_pattern = regex.pattern
self._matched_prompt = match.group()
if not errored_response:
return True
if errored_response:
raise AnsibleConnectionFailure(errored_response)
return False
def alarm_handler(self, signum, frame):
"""Alarm handler raised in case of command timeout """
display.display('closing shell due to sigalarm', log_only=True)
self.close()
def exec_command(self, cmd):
"""Executes the cmd on in the shell and returns the output
The method accepts three forms of cmd. The first form is as a byte
string that represents the command to be executed in the shell. The
second form is as a utf8 JSON byte string with additional keywords.
The third form is a json-rpc (2.0)
Keywords supported for cmd:
:command: the command string to execute
:prompt: the expected prompt generated by executing command.
This can be a string or a list of strings
:answer: the string to respond to the prompt with
:sendonly: bool to disable waiting for response
:arg cmd: the byte string that represents the command to be executed
which can be a single command or a json encoded string.
:returns: a tuple of (return code, stdout, stderr). The return
code is an integer and stdout and stderr are byte strings
"""
try:
obj = json.loads(to_text(cmd, errors='surrogate_or_strict'))
except (ValueError, TypeError):
obj = {'command': to_bytes(cmd.strip(), errors='surrogate_or_strict')}
obj = dict((k, to_bytes(v, errors='surrogate_or_strict', nonstring='passthru')) for k, v in obj.items())
if 'prompt' in obj:
if isinstance(obj['prompt'], binary_type):
# Prompt was a string
obj['prompt'] = [obj['prompt']]
elif not isinstance(obj['prompt'], Sequence):
# Convert nonstrings into byte strings (to_bytes(5) => b'5')
if obj['prompt'] is not None:
obj['prompt'] = [to_bytes(obj['prompt'], errors='surrogate_or_strict')]
else:
# Prompt was a Sequence of strings. Make sure they're byte strings
obj['prompt'] = [to_bytes(p, errors='surrogate_or_strict') for p in obj['prompt'] if p is not None]
if 'jsonrpc' in obj:
if self._cliconf:
out = self._exec_rpc(obj)
else:
out = self.internal_error("cliconf is not supported for network_os %s" % self._play_context.network_os)
return 0, to_bytes(out, errors='surrogate_or_strict'), b''
if obj['command'] == b'prompt()':
return 0, self._matched_prompt, b''
try:
if not signal.getsignal(signal.SIGALRM):
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self._play_context.timeout)
out = self.send(obj['command'], obj.get('prompt'), obj.get('answer'), obj.get('sendonly'))
signal.alarm(0)
return 0, out, b''
except (AnsibleConnectionFailure, ValueError) as exc:
return 1, b'', to_bytes(exc)
| nrwahl2/ansible | lib/ansible/plugins/connection/network_cli.py | Python | gpl-3.0 | 11,931 |
import requests
from django.core.management.base import BaseCommand
from django.core.files.base import ContentFile
from candidates.models import Ballot
from elections.models import Election
from official_documents.models import OfficialDocument
class Command(BaseCommand):
"""This command uses the ballots endpoint to loop over each
ballot and store each sopn pdf (uploaded_file) locally"""
def add_arguments(self, parser):
parser.add_argument(
"--date",
"-d",
action="store",
help="Election date in ISO format, defaults to 2021-05-06",
default="2021-05-06",
type=str,
)
parser.add_argument(
"--site_url",
"-u",
action="store",
help="URL of site to download from",
default="https://candidates.democracyclub.org.uk/",
type=str,
)
parser.add_argument(
"--election-count",
"-c",
action="store",
help="URL of site to download from",
default=50,
type=int,
)
parser.add_argument(
"--election-slugs", "-s", action="store", required=False
)
def handle(self, *args, **options):
site_url = options.get("site_url")
election_date = options.get("date")
election_count = options.get("election_count")
if options["election_slugs"]:
election_slugs = options["election_slugs"].split(",")
else:
election_slugs = Election.objects.filter(
election_date=election_date
).values_list("slug", flat=True)[:election_count]
for slug in election_slugs:
url = f"{site_url}api/next/ballots/?has_sopn=1&page_size=200&election_id={slug}"
self.create_official_documents(url=url)
def create_official_documents(self, url):
data = requests.get(url=url).json()
next_page = data["next"]
for ballot_data in data["results"]:
ballot = Ballot.objects.get(
ballot_paper_id=ballot_data["ballot_paper_id"]
)
sopn_data = ballot_data["sopn"]
# if we already have the SOPN no need to recreate
if ballot.officialdocument_set.filter(
source_url=sopn_data["source_url"]
).exists():
self.stdout.write(
f"SOPN already exists for {ballot.ballot_paper_id}"
)
continue
# check if we already have an OfficialDocument with this source
# downloaded
official_document = OfficialDocument.objects.filter(
source_url=sopn_data["source_url"]
).first()
if official_document:
# if so we dont need to redownload the file, we can create a new
# object for this ballot with the same file
self.stdout.write(
f"Found SOPN for source {sopn_data['source_url']}"
)
OfficialDocument.objects.create(
ballot=ballot,
source_url=sopn_data["source_url"],
uploaded_file=official_document.uploaded_file,
document_type=OfficialDocument.NOMINATION_PAPER,
)
continue
# otherwise we dont have this file stored already, so download it as
# part of creating the OfficialDocument
self.stdout.write(
f"Downloading SOPN from {sopn_data['uploaded_file']}"
)
file_response = requests.get(sopn_data["uploaded_file"])
file_object = ContentFile(content=file_response.content)
official_document = OfficialDocument(
ballot=ballot,
source_url=sopn_data["source_url"],
document_type=OfficialDocument.NOMINATION_PAPER,
)
file_extension = sopn_data["uploaded_file"].split(".")[-1]
filename = f"{ballot.ballot_paper_id}.{file_extension}"
official_document.uploaded_file.save(
name=filename, content=file_object
)
# this should only be the case where the election object has > 200
# ballots e.g. parliamentary elections
if next_page:
return self.create_official_documents(url=next_page)
| DemocracyClub/yournextrepresentative | ynr/apps/sopn_parsing/management/commands/sopn_tooling_create_official_documents.py | Python | agpl-3.0 | 4,476 |
from __future__ import division
from numba import autojit
import numpy as np
from math import sqrt
__author__ = 'larry'
def calc_nucl_repulsion(atom_charge, cart_matrix):
"""calculates the nuclear repulsion energy between atoms in system
Parameters
----------
atom_charge : array-like
list of nuclear charges for each atom in system
cart_matrix : ndarray
matrix of coordinates for each atom in system
Returns
-------
nuc_repl : float
nuclear repulsion energy
Notes
-----
The nuclear repulsion energy can be calculated using the classical
coulomb potential formula:
.. math:: E_{nuc} = \\sum \\frac{Z_i Z_j}{r_{ij}}
Only the lower diagonal portion of cart_matrix needs to be summed
due to the symmetry of the problem, and the diagonal should not be
summed.
"""
nuc_repl = 0
n_atom = atom_charge.size
for i in xrange(n_atom):
for j in xrange(i):
r_ij_v = cart_matrix[i, :] - cart_matrix[j, :]
r_ij = sqrt(np.dot(r_ij_v, r_ij_v))
z_ij = atom_charge[i] * atom_charge[j]
nuc_repl += z_ij / r_ij
return nuc_repl
'''
def DipoleMoments(self):
MuX_e = 2 * np.einsum('ij,ij', self.densityM, self.XDipoleMatrix)
MuY_e = 2 * np.einsum('ij,ij', self.densityM, self.YDipoleMatrix)
MuZ_e = 2 * np.einsum('ij,ij', self.densityM, self.ZDipoleMatrix)
MuX_N, MuY_N, MuZ_N = 0, 0, 0
Z = getAtomicCharge
if self.numAtom > 1:
for i in enumerate(self.atomType):
MuX_N += Z(i[1]) * self.cartMatrix[i[0], 0]
MuY_N += Z(i[1]) * self.cartMatrix[i[0], 1]
MuZ_N += Z(i[1]) * self.cartMatrix[i[0], 2]
MuX = MuX_e + MuX_N
MuY = MuY_e + MuY_N
MuZ = MuZ_e + MuZ_N
self.DipoleMoments = [MuX, MuY, MuZ]
def MullikenPopulation(self):
self.MullCharges = [0] * self.numAtom
Z = getAtomicCharge
# Gross Orbital Product
GOP = -2 * np.einsum('ij,ij->i', self.densityM,
self.OverlapMatrix)
for i in xrange(self.numAtom):
q = Z(self.atomType[i])
for j in enumerate(self.orbList):
if j[1].atom == i: q += GOP[j[0]]
self.MullCharges[i] = q
''' | LT12/LTPsi | molecular_props.py | Python | gpl-2.0 | 2,273 |
from setuptools import setup
setup(
name='sfdc-bulk',
packages=['sfdc_bulk'],
version='0.2',
description='Python client library for SFDC bulk API',
url='https://github.com/donaldrauscher/sfdc-bulk',
author='Donald Rauscher',
author_email='donald.rauscher@gmail.com',
license='MIT',
install_requires=[
'requests',
'simple_salesforce',
'pandas',
'pyyaml'
]
)
| donaldrauscher/sfdc-bulk | setup.py | Python | mit | 430 |
import sys
sys.path += ["."]
import os
os.environ['DJANGO_SETTINGS_MODULE']='solalim.settings'
import django
django.setup()
from floreal import models as m
import re
months={
"janvier": 1,
"février": 2,
"fevrier": 2,
"mars": 3,
"avril": 4,
"mai": 5,
"juin": 6,
"juillet": 7,
"août": 8,
"aout": 8,
"septembre": 9,
"octobre": 10,
"novembre": 11,
"décembre": 12,
"decembre": 12}
iregex = r"(\d+)\s*("+"|".join(months.keys())+")"
ciregex = re.compile(iregex, re.IGNORECASE)
def sort_deliveries_par_network_and_date():
dsets = {}
for dv in m.Delivery.objects.filter(name__iregex=iregex):
if not m.Purchase.objects.filter(product__delivery=dv).exists():
continue
d, mo = ciregex.search(dv.name).groups()
ds_id = str(d) + "/" + str(months[mo.lower()])
k = ds_id + ": " + dv.network.name
if k not in dsets:
dsets[k] = []
dsets[k].append(dv)
return dsets
def stats_dset(ds):
users = set()
ds_total = 0.
nb_pc = 0
for dv in ds:
qpc = list(m.Purchase.objects
.filter(product__delivery=dv)
.values('product__price', 'quantity', 'user__email'))
dv_total = sum(x['product__price'] * x['quantity'] for x in qpc)
ds_total += float(dv_total)
users |= {x['user__email'] for x in qpc}
nb_pc += len(qpc)
print(" * %10.2f€ CA %s" % (dv_total, dv.name))
print(" *** %10.2f€ CA du jour" % ds_total)
print(" *** %10.2f€ panier moyens, %d acheteurs" % (
ds_total / len(users), len(users)
))
return users, ds_total
def stats_all(dsets):
total = 0
users = set()
for name, ds in dsets.items():
print("\n"+name)
u, ds_total = stats_dset(ds)
users |= u
total += ds_total
print("\nGrand total:\n --> %10.2f€ Total" % total)
print(" *** %10.2f€ panier moyens, %d acheteurs" % (total/len(users), len(users)))
if __name__ == "__main__":
dsets = sort_deliveries_par_network_and_date()
stats_all(dsets)
| fab13n/caracole | solalim/stats.py | Python | mit | 2,120 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright: 2015 Bastian Blank
# License: MIT, see LICENSE.txt for details.
import array
import struct
import time
import sys
import os
from uuid import uuid4
class VHDFooter:
_struct = struct.Struct('>8sLLQL4sL4sQQ4sLL16sB427x')
size = _struct.size
vhd_timestamp_base = 946684800
def __init__(self, size, uuid=None, timestamp=None):
self.size = size
self.timestamp = timestamp or (int(time.time()) - self.vhd_timestamp_base)
self.uuid = uuid or uuid4()
@staticmethod
def _checksum(msg):
return 0x100000000 + ~sum(array.array("B", msg))
def _pack_geometry(self):
sectors = self.size // 512
if sectors > 65535 * 16 * 255:
sectors = 65535 * 16 * 255
if sectors >= 65535 * 16 * 63:
sectorsPerTrack = 255
heads = 16
cylinderTimesHeads = sectors // sectorsPerTrack
else:
sectorsPerTrack = 17
cylinderTimesHeads = sectors // sectorsPerTrack
heads = (cylinderTimesHeads + 1023) // 1024
if heads < 4:
heads = 4
if cylinderTimesHeads >= (heads * 1024) or heads > 16:
sectorsPerTrack = 31
heads = 16
cylinderTimesHeads = sectors // sectorsPerTrack
if cylinderTimesHeads >= (heads * 1024):
sectorsPerTrack = 63
heads = 16
cylinderTimesHeads = sectors // sectorsPerTrack
cylinders = cylinderTimesHeads // heads
return struct.pack('>HBB', cylinders, heads, sectorsPerTrack)
def _pack(self, checksum):
return self._struct.pack(
b'conectix', # Cookie
0x00000002, # Features
0x00010000, # File Format Version
0xffffffffffffffff, # Data Offset
self.timestamp, # Time Stamp
b'qemu', # Creator Application
0x00010000, # Creator Version
b'Wi2k', # Creator Host OS
self.size, # Original Size
self.size, # Current Size
self._pack_geometry(), # Disk Geometry
2, # Disk Type
checksum, # Checksum
self.uuid.bytes, # Unique Id
0, # Saved State
)
def pack(self):
c = self._checksum(self._pack(0))
return self._pack(c)
with open(sys.argv[1], 'rb+') as f:
f.seek(0, 2)
image_size = f.tell()
image_size_complete = image_size + VHDFooter.size
footer = VHDFooter(image_size)
f.write(footer.pack())
| UnicronNL/vyos-build | scripts/vhd.py | Python | gpl-2.0 | 2,790 |
class SftpException(Exception):
pass
class SftpError(SftpException):
pass
class SftpAlreadyExistsError(SftpError):
pass
class SshException(Exception):
pass
class SshError(SshException):
pass
class SshLoginError(SshError):
pass
class SshHostKeyException(SshException):
pass
class SshNonblockingTryAgainException(SshException):
pass
class SshNoDataReceivedException(SshException):
pass
class SshTimeoutException(SshException):
pass
| dsoprea/PySecure | pysecure/exceptions.py | Python | gpl-2.0 | 484 |
import os, time, shutil
from subprocess import Popen, PIPE
from script import Script
import tools, setup
def s2m(s):
m = int((s * 10)/60)/10
return m if m <= 10 else int(m)
def build_combo(fn, config, platform):
print('building {0} {1} {2}...'.format(fn.__name__, config, platform))
bat = Script(True, fn.__name__, config, platform) #build script
fn(bat)
bat.run()
print('installing {0} {1} {2}...'.format(fn.__name__, config, platform))
bat = Script(False, fn.__name__, config, platform) #istallation script
fn(bat)
bat.run()
def build_all(fn):
print('=' * 60)
print('Building {0}...'.format(fn.__name__))
print('=' * 60)
start = time.time()
if setup.BUILD_DEBUG and setup.BUILD_X86:
build_combo(fn, 'debug', 'x86')
if setup.BUILD_RELEASE and setup.BUILD_X86:
build_combo(fn, 'release', 'x86')
if setup.BUILD_DEBUG and setup.BUILD_X64:
build_combo(fn, 'debug', 'x64')
if setup.BUILD_RELEASE and setup.BUILD_X64:
build_combo(fn, 'release', 'x64')
print('-' * 60)
print('{0} complete: {1} min'.format(fn.__name__, s2m(time.time() - start)))
def ICU(bat):
if bat.build:
tools.extract('icu')
bat.command('cd icu\\source\\allinone')
bat.devenv('allinone.sln')
else:
bat.validate_devenv()
bat.include('icu\\include')
bat.bin('icu\\bin\\*.exe')
bat.bin('icu\\bin\\*.dll', 'icu\\bin64\\*.dll')
bat.lib('icu\\lib\\*.lib', 'icu\\lib64\\*.lib')
def BerkeleyDB(bat):
if bat.build:
tools.extract('bdb')
bat.command('cd bdb\\build_windows')
# building the .NET solution also builds native DLLs
bat.devenv('BDB_dotNet_vs2010.sln')
else:
bat.validate_devenv()
bat.include('bdb\\build_windows\\*.h')
bat.bin('bdb\\build_windows\\AnyCPU\\{0}\\*.dll'.format(bat.config))
bat.bin('bdb\\build_windows\\win32\\{0}\\*.dll'.format(bat.config),
'bdb\\build_windows\\x64\\{0}\\*.dll'.format(bat.config))
bat.lib('bdb\\build_windows\\win32\\{0}\\*.lib'.format(bat.config),
'bdb\\build_windows\\x64\\{0}\\*.lib'.format(bat.config))
def bjam(bat):
if bat.build:
tools.extract('boost')
bat.command('cd boost')
bat.command('bootstrap.bat >bjam.log')
def Boost(bat):
def param(config, platform):
p = ' --hash --without-mpi toolset=msvc-{0}'.format(setup.VC_VERSION)
p += ' -sICU_PATH={0}\\{1}'.format(setup.INSTALL, platform)
p += ' -sZLIB_SOURCE={0}\\zlib'.format(os.getcwd())
p += ' variant={0} link=static runtime-link=static runtime-link=shared threading=multi'.format(config)
p += ' --prefix={0}\\{1}'.format(setup.INSTALL, platform)
p += ' address-model={0}'.format(64 if platform=='x64' else 32)
return p
if bat.build:
tools.extract('boost')
tools.extract('zlib')
bat.command('cd boost')
bat.command('b2 -a {0} install'.format(param(bat.config, bat.platform)))
def clean_boost(platform):
tools.rename('{0}\\{1}\\include\\{2}\\boost'.format(setup.INSTALL, platform, setup.BOOST_INCLUDE),
'{0}\\{1}\\include\\boost'.format(setup.INSTALL, platform))
def FreeType(bat):
if bat.build:
tools.extract('freetype')
# add missing x64 support
bat.command('copy /y archives\\freetype-project\\* freetype\\builds\\win32\\vc2010')
bat.command('cd freetype\\builds\\win32\\vc2010')
bat.devenv('freetype.sln')
else:
bat.validate_devenv()
bat.command('pushd freetype\\objs\\win32\\vc2010')
if bat.config == 'release':
bat.command('copy /y freetype246.lib freetype.lib')
bat.command('del freetype246.lib')
else:
bat.command('copy /y freetype246_d.lib freetyped.lib')
bat.command('del freetype246_d.lib')
bat.command('popd')
bat.include('freetype\\include')
bat.lib('freetype\\objs\\win32\\vc2010\\*.lib')
def libjpeg(bat):
if bat.build:
tools.extract('libjpeg')
# add missing x64 support
bat.command('copy /y archives\\jpeg-project\\* libjpeg')
bat.command('cd libjpeg')
bat.command('copy /y jconfig.vc jconfig.h')
bat.devenv('jpeg.sln')
else:
bat.validate_devenv()
bat.include('libjpeg\\*.h')
bat.lib('libjpeg\\{0}\\*.lib'.format(bat.config), 'libjpeg\\x64\\{0}\\*.lib'.format(bat.config))
def zlib(bat):
if bat.build:
tools.extract('zlib')
bat.command('cd zlib')
bat.cmake()
else:
bat.validate_cmake()
def libpng(bat):
if bat.build:
tools.extract('libpng')
bat.command('cd libpng')
bat.cmake()
else:
bat.validate_cmake()
def OpenSSL(bat):
if bat.build:
tools.extract('openssl')
else:
bat.include('openssl\\include64')
bat.lib('openssl\\lib64')
bat.bin('openssl\\bin64')
def PoDoFo(bat):
if bat.build:
if not os.path.isdir('podofo'):
os.system('svn export -r {0} {1} podofo >scripts\\svn-export.log'.format(setup.PODOFO_REV, setup.PODOFO_SVN))
bat.command('cd podofo')
bat.cmake({
'target': 'podofo_static',
'params': '-DLIBCRYPTO_LIBRARY_NAMES=libcryptoMT{0} -DPODOFO_BUILD_STATIC:TYPE=BOOL=ON'
.format('d' if bat.config == 'debug' else ''),
})
else:
bat.validate_cmake()
bat.include('podofo\\src\\*.h', '\\podofo')
bat.include('podofo\\podofo_config.h', '\\podofo')
if bat.config == 'debug':
bat.command("copy /y podofo\\src\\podofo\\podofo.lib podofo\\src\\podofo\\podofod.lib")
bat.lib("podofo\\src\\podofo\\podofod.lib")
else:
bat.lib("podofo\\src\\podofo\\podofo.lib")
def need(program):
(out, err) = Popen(['where', program], stdout=PIPE, shell=True).communicate()
if len(out) == 0:
raise Exception('{0} is required.'.format(program))
def main():
need('7z')
need('svn')
need('perl')
need('cmake')
need('sed')
start = time.time()
tools.kill('scripts')
os.mkdir('scripts')
print('Setting up {0}...'.format(setup.INSTALL))
if setup.CLEAN:
tools.kill(setup.INSTALL)
try:
os.mkdir(setup.INSTALL)
os.mkdir('{0}/x86'.format(setup.INSTALL))
os.mkdir('{0}/x86/include'.format(setup.INSTALL))
os.mkdir('{0}/x86/lib'.format(setup.INSTALL))
os.mkdir('{0}/x86/bin'.format(setup.INSTALL))
os.mkdir('{0}/x64'.format(setup.INSTALL))
os.mkdir('{0}/x64/include'.format(setup.INSTALL))
os.mkdir('{0}/x64/lib'.format(setup.INSTALL))
os.mkdir('{0}/x64/bin'.format(setup.INSTALL))
except OSError as e:
print('warning: {0} ({1})'.format(e.strerror, e.filename))
if setup.FRESH:
print('Forcing a fresh rebuild...')
tools.kill('bdb')
tools.kill('boost')
tools.kill('freetype')
tools.kill('icu')
tools.kill('libjpeg')
tools.kill('libpng')
tools.kill('openssl')
tools.kill('podofo')
tools.kill('turtle')
tools.kill('zlib')
if setup.BUILD_ICU:
build_all(ICU)
if setup.BUILD_BOOST:
build_combo(bjam, 'release', 'x86')
build_all(Boost)
if setup.BUILD_X86:
clean_boost('x86')
if setup.BUILD_X64:
clean_boost('x64')
if setup.BUILD_PODOFO:
build_all(zlib)
build_all(libjpeg)
build_all(libpng) # depends on zlib
build_all(FreeType)
build_all(OpenSSL)
build_all(PoDoFo) # depends on zlib, libjpeg, libpng, FreeType and OpenSSL
if setup.BUILD_BDB:
build_all(BerkeleyDB)
print('=' * 60)
print('build complete: {0} min'.format(s2m(time.time() - start)))
print('=' * 60)
if __name__ == '__main__':
main() | ferruccio/cclibs | build.py | Python | unlicense | 8,027 |
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import context
from cinder import exception
from cinder import quota_utils
from cinder import test
from keystoneclient import exceptions
from oslo_config import cfg
from oslo_config import fixture as config_fixture
CONF = cfg.CONF
class QuotaUtilsTest(test.TestCase):
class FakeProject(object):
def __init__(self, id='foo', parent_id=None):
self.id = id
self.parent_id = parent_id
self.subtree = None
self.parents = None
self.domain_id = 'default'
def setUp(self):
super(QuotaUtilsTest, self).setUp()
self.auth_url = 'http://localhost:5000'
self.context = context.RequestContext('fake_user', 'fake_proj_id')
self.fixture = self.useFixture(config_fixture.Config(CONF))
self.fixture.config(auth_uri=self.auth_url, group='keystone_authtoken')
@mock.patch('keystoneclient.client.Client')
@mock.patch('keystoneclient.session.Session')
def test_keystone_client_instantiation(self, ksclient_session,
ksclient_class):
quota_utils._keystone_client(self.context)
ksclient_class.assert_called_once_with(auth_url=self.auth_url,
session=ksclient_session(),
version=(3, 0))
@mock.patch('keystoneclient.client.Client')
def test_get_project_keystoneclient_v2(self, ksclient_class):
keystoneclient = ksclient_class.return_value
keystoneclient.version = 'v2.0'
expected_project = quota_utils.GenericProjectInfo(
self.context.project_id, 'v2.0')
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id)
self.assertEqual(expected_project.__dict__, project.__dict__)
@mock.patch('keystoneclient.client.Client')
def test_get_project_keystoneclient_v3(self, ksclient_class):
keystoneclient = ksclient_class.return_value
keystoneclient.version = 'v3'
returned_project = self.FakeProject(self.context.project_id, 'bar')
del returned_project.subtree
keystoneclient.projects.get.return_value = returned_project
expected_project = quota_utils.GenericProjectInfo(
self.context.project_id, 'v3', 'bar')
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id)
self.assertEqual(expected_project.__dict__, project.__dict__)
@mock.patch('keystoneclient.client.Client')
def test_get_project_keystoneclient_v3_with_subtree(self, ksclient_class):
keystoneclient = ksclient_class.return_value
keystoneclient.version = 'v3'
returned_project = self.FakeProject(self.context.project_id, 'bar')
subtree_dict = {'baz': {'quux': None}}
returned_project.subtree = subtree_dict
keystoneclient.projects.get.return_value = returned_project
expected_project = quota_utils.GenericProjectInfo(
self.context.project_id, 'v3', 'bar', subtree_dict)
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, subtree_as_ids=True)
keystoneclient.projects.get.assert_called_once_with(
self.context.project_id, parents_as_ids=False, subtree_as_ids=True)
self.assertEqual(expected_project.__dict__, project.__dict__)
def _setup_mock_ksclient(self, mock_client, version='v3',
subtree=None, parents=None):
keystoneclient = mock_client.return_value
keystoneclient.version = version
proj = self.FakeProject(self.context.project_id)
proj.subtree = subtree
if parents:
proj.parents = parents
proj.parent_id = next(iter(parents.keys()))
keystoneclient.projects.get.return_value = proj
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_domain_as_parent(
self, mock_client):
# Test with a top level project (domain is direct parent)
self._setup_mock_ksclient(mock_client, parents={'default': None})
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertIsNone(project.parent_id)
self.assertIsNone(project.parents)
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_domain_as_grandparent(
self, mock_client):
# Test with a child project (domain is more than a parent)
self._setup_mock_ksclient(mock_client,
parents={'bar': {'default': None}})
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertEqual('bar', project.parent_id)
self.assertEqual({'bar': None}, project.parents)
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_no_domain_in_parents(
self, mock_client):
# Test that if top most parent is not a domain (to simulate an older
# keystone version) nothing gets removed from the tree
parents = {'bar': {'foo': None}}
self._setup_mock_ksclient(mock_client, parents=parents)
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertEqual('bar', project.parent_id)
self.assertEqual(parents, project.parents)
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_no_parents(
self, mock_client):
# Test that if top no parents are present (to simulate an older
# keystone version) things don't blow up
self._setup_mock_ksclient(mock_client)
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertIsNone(project.parent_id)
self.assertIsNone(project.parents)
@mock.patch('cinder.quota_utils._keystone_client')
def test_validate_nested_projects_with_keystone_v2(self, _keystone_client):
_keystone_client.side_effect = exceptions.VersionNotAvailable
self.assertRaises(exception.CinderException,
quota_utils.validate_setup_for_nested_quota_use,
self.context, [], None)
@mock.patch('cinder.quota_utils._keystone_client')
def test_validate_nested_projects_non_cloud_admin(self, _keystone_client):
# Covers not cloud admin or using old policy.json
_keystone_client.side_effect = exceptions.Forbidden
self.assertRaises(exception.CinderException,
quota_utils.validate_setup_for_nested_quota_use,
self.context, [], None)
def _process_reserve_over_quota(self, overs, usages, quotas,
expected_ex,
resource='volumes'):
ctxt = context.get_admin_context()
ctxt.project_id = 'fake'
size = 1
kwargs = {'overs': overs,
'usages': usages,
'quotas': quotas}
exc = exception.OverQuota(**kwargs)
self.assertRaises(expected_ex,
quota_utils.process_reserve_over_quota,
ctxt, exc,
resource=resource,
size=size)
def test_volume_size_exceed_quota(self):
overs = ['gigabytes']
usages = {'gigabytes': {'reserved': 1, 'in_use': 9}}
quotas = {'gigabytes': 10, 'snapshots': 10}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.VolumeSizeExceedsAvailableQuota)
def test_snapshot_limit_exceed_quota(self):
overs = ['snapshots']
usages = {'snapshots': {'reserved': 1, 'in_use': 9}}
quotas = {'gigabytes': 10, 'snapshots': 10}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.SnapshotLimitExceeded,
resource='snapshots')
def test_backup_gigabytes_exceed_quota(self):
overs = ['backup_gigabytes']
usages = {'backup_gigabytes': {'reserved': 1, 'in_use': 9}}
quotas = {'backup_gigabytes': 10}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.VolumeBackupSizeExceedsAvailableQuota,
resource='backups')
def test_backup_limit_quota(self):
overs = ['backups']
usages = {'backups': {'reserved': 1, 'in_use': 9}}
quotas = {'backups': 9}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.BackupLimitExceeded,
resource='backups')
def test_volumes_limit_quota(self):
overs = ['volumes']
usages = {'volumes': {'reserved': 1, 'in_use': 9}}
quotas = {'volumes': 9}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.VolumeLimitExceeded)
def test_unknown_quota(self):
overs = ['unknown']
usages = {'volumes': {'reserved': 1, 'in_use': 9}}
quotas = {'volumes': 9}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.UnexpectedOverQuota)
def test_unknown_quota2(self):
overs = ['volumes']
usages = {'volumes': {'reserved': 1, 'in_use': 9}}
quotas = {'volumes': 9}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.UnexpectedOverQuota,
resource='snapshots')
| bswartz/cinder | cinder/tests/unit/test_quota_utils.py | Python | apache-2.0 | 10,416 |
# Copyright (C) 2013 Equinor ASA, Norway.
#
# The file 'config_parser.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
import sys
import os.path
from cwrap import BaseCClass
from res import ResPrototype
from res.config import ConfigContent, UnrecognizedEnum
class ConfigParser(BaseCClass):
TYPE_NAME = "config_parser"
_alloc = ResPrototype("void* config_alloc()", bind=False)
_add = ResPrototype(
"schema_item_ref config_add_schema_item(config_parser, char*, bool)"
)
_free = ResPrototype("void config_free(config_parser)")
_parse = ResPrototype(
"config_content_obj config_parse(config_parser, char*, char*, char*, char*, hash, config_unrecognized_enum, bool)"
)
_size = ResPrototype("int config_get_schema_size(config_parser)")
_get_schema_item = ResPrototype(
"schema_item_ref config_get_schema_item(config_parser, char*)"
)
_has_schema_item = ResPrototype("bool config_has_schema_item(config_parser, char*)")
_add_key_value = ResPrototype(
"bool config_parser_add_key_values(config_parser, config_content, char*, stringlist, config_path_elm, char*, config_unrecognized_enum)"
)
_validate = ResPrototype("void config_validate(config_parser, config_content)")
def __init__(self):
c_ptr = self._alloc()
super(ConfigParser, self).__init__(c_ptr)
def __contains__(self, keyword):
return self._has_schema_item(keyword)
def __len__(self):
return self._size()
def __repr__(self):
return self._create_repr("size=%d" % len(self))
def add(self, keyword, required=False, value_type=None):
item = self._add(keyword, required).setParent(self)
if value_type:
item.iset_type(0, value_type)
return item
def __getitem__(self, keyword):
if keyword in self:
item = self._get_schema_item(keyword)
item.setParent(self)
return item
else:
raise KeyError("Config parser does not have item:%s" % keyword)
def parse(
self,
config_file,
comment_string="--",
include_kw="INCLUDE",
define_kw="DEFINE",
pre_defined_kw_map=None,
unrecognized=UnrecognizedEnum.CONFIG_UNRECOGNIZED_WARN,
validate=True,
):
"""@rtype: ConfigContent"""
assert isinstance(unrecognized, UnrecognizedEnum)
if not os.path.exists(config_file):
raise IOError("File: %s does not exists" % config_file)
config_content = self._parse(
config_file,
comment_string,
include_kw,
define_kw,
pre_defined_kw_map,
unrecognized,
validate,
)
config_content.setParser(self)
if validate and not config_content.isValid():
sys.stderr.write("Errors parsing:%s \n" % config_file)
for count, error in enumerate(config_content.getErrors()):
sys.stderr.write(" %02d:%s\n" % (count, error))
raise ValueError("Parsing:%s failed" % config_file)
return config_content
def free(self):
self._free()
def validate(self, config_content):
self._validate(config_content)
def add_key_value(
self,
config_content,
key,
value,
path_elm=None,
config_filename=None,
unrecognized_action=UnrecognizedEnum.CONFIG_UNRECOGNIZED_WARN,
):
return self._add_key_value(
config_content, key, value, path_elm, config_filename, unrecognized_action
)
| joakim-hove/ert | res/config/config_parser.py | Python | gpl-3.0 | 4,152 |
import calendar
from datetime import timedelta, date
from django.utils import timezone
from gerencex.core.models import Restday, Absences, HoursBalance
from gerencex.core.time_calculations import DateData
def dates(date1, date2):
"""
:param date1: begin date
:param date2: end date
:return: all dates between date1 and (date2 - 1)
"""
d = date1
while d < date2:
yield d
d += timedelta(days=1)
def comments(user, date_):
restday = Restday.objects.filter(date=date_).last()
absence = Absences.objects.filter(date=date_, user=user).last()
office = user.userdetail.office
start_balance = bool(date_ == office.hours_control_start_date)
weekend = bool(date_.weekday() in (5, 6))
msg = ''
if weekend:
msg += 'Fim de semana. '
if restday:
msg += restday.note + '. '
if absence:
msg += absence.get_cause_display() + '. '
if start_balance:
msg += 'Abertura da conta de horas. '
return msg
class UserBalance:
def __init__(self, user, **kwargs):
self.user = user
self.office = user.userdetail.office
self.year = int(kwargs['year'])
self.month = int(kwargs['month'])
today = timezone.localtime(timezone.now()).date()
days_in_month = calendar.monthrange(self.year, self.month)[1]
first_month_day = date(self.year, self.month, 1)
self.last_month_day = first_month_day + timedelta(days=days_in_month)
self.last_day = min(today, self.last_month_day)
self.start_date = user.userdetail.office.hours_control_start_date
self.first_day = max(first_month_day, self.start_date)
balance = [l for l in HoursBalance.objects.filter(date__year=self.year,
date__month=self.month,
user=self.user)]
self.balance_dates = [d.date for d in balance]
def get_monthly_lines(self):
lines = []
for date_ in dates(self.first_day, self.last_day):
if date_ not in self.balance_dates:
HoursBalance.objects.create(
date=date_,
user=self.user,
credit=DateData(self.user, date_).credit().total_seconds(),
debit=DateData(self.user, date_).debit().total_seconds()
)
line = HoursBalance.objects.get(user=self.user, date=date_)
lines.append({'date': date_,
'credit': line.time_credit(),
'debit': line.time_debit(),
'balance': line.time_balance(),
'comment': comments(self.user, date_)})
return lines
def create_or_update_line(self, date_):
credit = DateData(self.user, date_).credit().total_seconds()
debit = DateData(self.user, date_).debit().total_seconds()
updated_values = {'credit': credit, 'debit': debit}
HoursBalance.objects.update_or_create(
date=date_,
user=self.user,
defaults=updated_values
)
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
ips = []
if x_forwarded_for:
ips.append(x_forwarded_for.split(',')[0])
ips.append(x_forwarded_for.split(',')[-1])
else:
ips.append('')
ips.append(request.META.get('REMOTE_ADDR'))
return ips
def previous_next(date_, model, user):
first_day_of_month = date(date_.year, date_.month, 1)
try_previous = first_day_of_month - timedelta(days=1)
try_next = first_day_of_month + timedelta(days=31)
previous_exists = model.objects.filter(date__year=try_previous.year,
date__month=try_previous.month,
user=user)
next_exists = model.objects.filter(date__year=try_next.year,
date__month=try_next.month,
user=user)
previous = None
next_ = None
if previous_exists:
previous = {'year': str(try_previous.year), 'month': str(try_previous.month)}
if next_exists:
next_ = {'year': str(try_next.year), 'month': str(try_next.month)}
return previous, next_
def updates_hours_balance(office, date_):
"""
Calculates the hour balances of all workers in an office, from a given date up until yesterday
:param date_: the begin date for updating
:param office: the workers' office
:return: Nothing. It just updates the database
"""
users = [x.user for x in office.users.all()]
today = timezone.localtime(timezone.now()).date()
# date_ is present if calculate_hours_bank view was triggered. In this case, we must update
# or create the balances for all office workers, and for all dates between date_ and today
if date_:
for d in dates(date_, today):
for user in users:
updated_values = {
'credit': DateData(user, d).credit().total_seconds(),
'debit': DateData(user, d).debit().total_seconds()
}
HoursBalance.objects.update_or_create(
date=d,
user=user,
defaults=updated_values
)
# date_ is not present when we just want to see hours_bank. In this case, we must check if
# all office users have balances for yesterday, filling the blanks.
else:
for user in users:
existent_balance = bool(HoursBalance.objects.filter(user=user))
if not existent_balance:
last_user_balance_date = office.hours_control_start_date - timedelta(days=1)
else:
last_user_balance_date = HoursBalance.objects.filter(user=user).last().date
next_user_balance_date = last_user_balance_date + timedelta(days=1)
if next_user_balance_date < today:
for d in dates(next_user_balance_date, today):
HoursBalance.objects.create(
date=d,
user=user,
credit=DateData(user, d).credit().total_seconds(),
debit=DateData(user, d).debit().total_seconds()
)
office.last_balance_date = today
office.save()
| flavoso/gerencex | gerencex/core/functions.py | Python | gpl-3.0 | 6,476 |
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see http://www.gnu.org/licenses/agpl-3.0.html.
import json
import babelsubs
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import redirect_to_login
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse, Http404, HttpResponseServerError, HttpResponseForbidden
from django.db.models import Count
from django.conf import settings
from django.contrib import messages
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.utils.http import urlencode
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from django.views.generic import View
from django.shortcuts import render, get_object_or_404, redirect
from django.template.defaultfilters import urlize, linebreaks, force_escape
from django.views.decorators.clickjacking import xframe_options_exempt
from auth.models import CustomUser as User
from subtitles import shims
from subtitles.workflows import get_workflow
from subtitles.models import SubtitleLanguage, SubtitleVersion
from subtitles.permissions import user_can_access_subtitles_format
from subtitles.templatetags.new_subtitles_tags import visibility
from subtitles.forms import SubtitlesUploadForm
from teams.models import Task
from teams.permissions import can_perform_task
from utils.text import fmt
from videos.models import Video
from videos.types import video_type_registrar
def _version_data(version):
'''
Creates a dict with version info, suitable for encoding
into json and bootstrapping the editor.
'''
return {
'metadata': version.get_metadata(),
'subtitles': version.get_subtitles().to_xml(),
'title': version.title,
'description': version.description,
}
@require_POST
def regain_lock(request, video_id, language_code):
video = get_object_or_404(Video, video_id=video_id)
language = video.subtitle_language(language_code)
if not language.can_writelock(request.browser_id):
return HttpResponse(json.dumps({'ok': False}))
language.writelock(request.user, request.browser_id, save=True)
return HttpResponse(json.dumps({'ok': True}))
@require_POST
def release_lock(request, video_id, language_code):
video = get_object_or_404(Video, video_id=video_id)
language = video.subtitle_language(language_code)
if language.can_writelock(request.browser_id):
language.release_writelock()
return HttpResponse(json.dumps({'url': reverse('videos:video', args=(video_id,))}))
@login_required
@require_POST
def tutorial_shown(request):
request.user.tutorial_was_shown()
return HttpResponse(json.dumps({'success': True}))
@login_required
@require_POST
def set_playback_mode(request):
request.user.set_playback_mode(request.POST['playback_mode'])
return HttpResponse(json.dumps({'success': True}))
def old_editor(request, video_id, language_code):
video = get_object_or_404(Video, video_id=video_id)
language = get_object_or_404(SubtitleLanguage, video=video,
language_code=language_code)
url_path = shims.get_widget_url(language,
request.GET.get('mode'),
request.GET.get('task_id'))
return redirect("http://%s%s" % (request.get_host(), url_path))
class SubtitleEditorBase(View):
def dispatch(self, request, *args, **kwargs):
self.handle_special_user(request)
if not request.user.is_authenticated():
return redirect_to_login(request.build_absolute_uri())
return super(SubtitleEditorBase, self).dispatch(
request, *args, **kwargs)
def handle_special_user(self, request):
if 'special_user' not in request.GET:
return
try:
special_user = User.objects.get(id=request.session['editor-user-id'])
except (KeyError, User.DoesNotExist):
raise PermissionDenied()
# We use the editor user for this requests, but still don't log them
# in. Note that this will also control the auth headers that get sent
# to the editor, so the API calls will also use this user.
request.user = special_user
def get_video_urls(self):
"""Get video URLs to send to the editor."""
return self.workflow.editor_video_urls(self.language_code)
def get_redirect_url(self):
if 'return_url' in self.request.GET:
return self.request.GET['return_url']
else:
return self.video.get_absolute_url()
def get_custom_css(self):
return ""
def get_title(self):
return _('Amara')
def get_analytics_additions(self):
return None
def calc_base_language(self):
if (self.video.primary_audio_language_code and
SubtitleVersion.objects.extant().filter(
video=self.video,
language_code=self.video.primary_audio_language_code)
.exists()):
self.base_language = self.video.primary_audio_language_code
else:
self.base_language = None
def calc_editing_language(self):
self.editing_language = self.video.subtitle_language(self.language_code)
if self.editing_language is None:
self.editing_language = SubtitleLanguage(
video=self.video, language_code=self.language_code)
def check_can_writelock(self):
if not self.editing_language.can_writelock(self.request.browser_id):
msg = _("Sorry, you cannot edit these subtitles now because they are being edited by another user. Please check back later.")
messages.error(self.request, msg)
return False
else:
return True
def check_can_edit(self):
if self.workflow.user_can_edit_subtitles(self.user,
self.language_code):
return True
learn_more_link = u'<a href="{}">{}</a>'.format(
u'http://support.amara.org/solution/articles/212109-why-do-i-see-a-message-saying-that-i-am-not-permitted-to-edit-subtitles',
_(u'Learn more'))
messages.error(self.request,
fmt(_('Sorry, you do not have permission to edit '
'these subtitles. (%(learn_more_link)s)'),
learn_more_link=learn_more_link))
return False
def get_editor_data(self):
editor_data = {
'canSync': bool(self.request.GET.get('canSync', True)),
'canAddAndRemove': bool(self.request.GET.get('canAddAndRemove', True)),
# front end needs this to be able to set the correct
# api headers for saving subs
'authHeaders': {
'x-api-username': self.request.user.username,
'x-apikey': self.request.user.get_api_key()
},
'username': self.request.user.username,
'user_fullname': unicode(self.request.user),
'video': {
'id': self.video.video_id,
'title': self.video.title,
'description': self.video.description,
'duration': self.video.duration,
'primaryVideoURL': self.video.get_video_url(),
'primaryVideoURLType': video_type_registrar.video_type_for_url(self.video.get_video_url()).abbreviation,
'videoURLs': self.get_video_urls(),
'metadata': self.video.get_metadata(),
},
'editingVersion': {
'languageCode': self.editing_language.language_code,
'versionNumber': (self.editing_version.version_number
if self.editing_version else None),
},
'baseLanguage': self.base_language,
'languages': [self.editor_data_for_language(lang)
for lang in self.languages],
'languageCode': self.request.LANGUAGE_CODE,
'oldEditorURL': reverse('subtitles:old-editor', kwargs={
'video_id': self.video.video_id,
'language_code': self.editing_language.language_code,
}),
'playbackModes': self.get_editor_data_for_playback_modes(),
'preferences': {
'showTutorial': self.request.user.show_tutorial,
'playbackModeId': self.request.user.playback_mode
},
'staticURL': settings.STATIC_URL,
'notesHeading': 'Editor Notes',
'notesEnabled': True,
'redirectUrl': self.get_redirect_url(),
'customCss': self.get_custom_css(),
}
editor_data.update(self.workflow.editor_data(
self.user, self.language_code))
team_attributes = self.get_team_editor_data()
if team_attributes:
editor_data['teamAttributes'] = team_attributes
return editor_data
def editor_data_for_language(self, language):
versions_data = []
if self.workflow.user_can_view_private_subtitles(
self.user, language.language_code):
language_qs = language.subtitleversion_set.extant()
else:
language_qs = language.subtitleversion_set.public()
for i, version in enumerate(language_qs):
version_data = {
'version_no':version.version_number,
'visibility': visibility(version),
}
if self.editing_version == version:
version_data.update(_version_data(version))
elif self.translated_from_version == version:
version_data.update(_version_data(version))
elif (language.language_code == self.base_language and
i == len(language_qs) - 1):
version_data.update(_version_data(version))
versions_data.append(version_data)
return {
'translatedFrom': self.translated_from_version and {
'language_code': self.translated_from_version.subtitle_language.language_code,
'version_number': self.translated_from_version.version_number,
},
'editingLanguage': language == self.editing_language,
'language_code': language.language_code,
'name': language.get_language_code_display(),
'pk': language.pk,
'numVersions': language.num_versions,
'versions': versions_data,
'subtitles_complete': language.subtitles_complete,
'is_rtl': language.is_rtl(),
'is_original': language.is_primary_audio_language()
}
def get_editor_data_for_playback_modes(self):
return [
{
'id': User.PLAYBACK_MODE_MAGIC,
'idStr': 'magic',
'name': _('Magic'),
'desc': _('Recommended: magical auto-pause (just keep typing!)')
},
{
'id': User.PLAYBACK_MODE_STANDARD,
'idStr': 'standard',
'name': _('Standard'),
'desc': _('Standard: no automatic pausing, use TAB key')
},
{
'id': User.PLAYBACK_MODE_BEGINNER,
'idStr': 'beginner',
'name': _('Beginner'),
'desc': _('Beginner: play 4 seconds, then pause')
}
]
def get_team_editor_data(self):
if self.team_video:
team = self.team_video.team
return dict([('teamName', team.name), ('type', team.workflow_type),
('features', [f.key_name.split('_', 1)[-1] for f in team.settings.features()]),
('guidelines', dict(
[(s.key_name.split('_', 1)[-1],
linebreaks(urlize(force_escape(s.data))))
for s in team.settings.guidelines()
if s.data.strip()]))])
else:
return None
def assign_task_for_editor(self):
"""Try to assign any unassigned tasks to our user.
If we can't assign the task, return False.
"""
if self.team_video is None:
return True
task_set = self.team_video.task_set.incomplete().filter(
language=self.language_code)
tasks = list(task_set[:1])
if tasks:
task = tasks[0]
if task.assignee is None and can_perform_task(self.user, task):
task.assignee = self.user
task.set_expiration()
task.save()
if task.assignee != self.user:
msg = fmt(_("Another user is currently performing "
"the %(task_type)s task for these subtitles"),
task_type=task.get_type_display())
messages.error(self.request, msg)
return False
return True
def handle_task(self, context, editor_data):
"""Does most of the dirty-work to handle tasks. """
context['task'] = None
if self.team_video is None:
return
task = self.team_video.get_task_for_editor(self.language_code)
if not task:
return
context['task'] = task
editor_data['task_id'] = task.id
editor_data['savedNotes'] = task.body
editor_data['task_needs_pane'] = task.get_type_display() in ('Review', 'Approve')
editor_data['team_slug'] = task.team.slug
editor_data['oldEditorURL'] += '?' + urlencode({
'mode': Task.TYPE_NAMES[task.type].lower(),
'task_id': task.id,
})
def get(self, request, video_id, language_code):
self.video = get_object_or_404(Video, video_id=video_id)
self.team_video = self.video.get_team_video()
self.language_code = language_code
self.user = request.user
self.calc_base_language()
self.calc_editing_language()
self.workflow = get_workflow(self.video)
if (not self.check_can_edit() or
not self.check_can_writelock() or
not self.assign_task_for_editor()):
return redirect(self.video)
self.editing_language.writelock(self.user, self.request.browser_id,
save=True)
self.editing_version = self.editing_language.get_tip(public=False)
# we ignore forking because even if it *is* a fork, we still want to
# show the user the rererence languages:
self.translated_from_version = self.editing_language.\
get_translation_source_version(ignore_forking=True)
self.languages = self.video.newsubtitlelanguage_set.annotate(
num_versions=Count('subtitleversion'))
editor_data = self.get_editor_data()
context = {
'title': self.get_title(),
'video': self.video,
'DEBUG': settings.DEBUG,
'language': self.editing_language,
'other_languages': self.languages,
'version': self.editing_version,
'translated_from_version': self.translated_from_version,
'GOOGLE_ANALYTICS_ADDITIONS': self.get_analytics_additions(),
'upload_subtitles_form': SubtitlesUploadForm(
request.user, self.video,
initial={'language_code':
self.editing_language.language_code},
allow_all_languages=True),
}
self.handle_task(context, editor_data)
context['editor_data'] = json.dumps(editor_data, indent=4)
return render(request, "editor/editor.html", context)
class SubtitleEditor(SubtitleEditorBase):
@method_decorator(xframe_options_exempt)
def dispatch(self, request, *args, **kwargs):
return super(SubtitleEditor, self).dispatch(
request, *args, **kwargs)
def _user_for_download_permissions(request):
# check authorization... This is pretty hacky. We should implement
# pculture/amara-enterprise#89
if request.user.is_authenticated():
return request.user
username = request.META.get('HTTP_X_API_USERNAME', None)
api_key = request.META.get('HTTP_X_API_KEY',
request.META.get('HTTP_X_APIKEY', None))
if not username or not api_key:
return request.user
try:
import apiv2
from tastypie.models import ApiKey
except ImportError:
return request.user
try:
api_user = User.objects.get(username=username)
except User.DoesNotExist:
return request.user
if not ApiKey.objects.filter(user=api_user, key=api_key).exists():
return request.user
return api_user
def download(request, video_id, language_code, filename, format,
version_number=None):
user = _user_for_download_permissions(request)
if not user_can_access_subtitles_format(user, format):
raise HttpResponseForbidden(_(u'You are not allowed to download this subtitle format.'))
video = get_object_or_404(Video, video_id=video_id)
workflow = video.get_workflow()
if not workflow.user_can_view_video(user):
raise PermissionDenied()
language = video.subtitle_language(language_code)
if language is None:
raise PermissionDenied()
public_only = workflow.user_can_view_private_subtitles(user,
language_code)
version = language.version(public_only=not public_only,
version_number=version_number)
if not version:
raise Http404()
if not format in babelsubs.get_available_formats():
raise HttpResponseServerError("Format not found")
subs_text = babelsubs.to(version.get_subtitles(), format,
language=version.language_code)
# since this is a download, we can afford not to escape tags, specially
# true since speaker change is denoted by '>>' and that would get entirely
# stripped out
response = HttpResponse(subs_text, mimetype="text/plain")
response['Content-Disposition'] = 'attachment'
return response
def download_all(request, video_id, filename):
video = get_object_or_404(Video, video_id=video_id)
merged_dfxp = video.get_merged_dfxp()
if merged_dfxp is None:
raise Http404()
response = HttpResponse(merged_dfxp, mimetype="text/plain")
response['Content-Disposition'] = 'attachment'
return response
| wevoice/wesub | apps/subtitles/views.py | Python | agpl-3.0 | 19,462 |
""" A model of an Infrastructure Datastore in CFME
"""
import attr
from lxml.html import document_fromstring
from navmazing import NavigateToAttribute
from widgetastic.exceptions import NoSuchElementException
from widgetastic.utils import Version, VersionPick
from widgetastic.widget import ParametrizedView, View, Text
from widgetastic_patternfly import Dropdown, Accordion
from cfme.base.login import BaseLoggedInPage
from cfme.common import Taggable
from cfme.common.host_views import HostsView
from cfme.configure.tasks import is_datastore_analysis_finished, TasksView
from cfme.exceptions import ItemNotFound, MenuItemNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils import ParamClassName
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from cfme.utils.pretty import Pretty
from cfme.utils.wait import wait_for, TimedOutError
from widgetastic_manageiq import (ManageIQTree,
SummaryTable,
ItemsToolBarViewSelector,
BaseEntitiesView,
NonJSBaseEntity,
BaseListEntity,
BaseQuadIconEntity,
BaseTileIconEntity,
JSBaseEntity)
class DatastoreToolBar(View):
"""
represents datastore toolbar and its controls
"""
configuration = Dropdown(text='Configuration')
policy = Dropdown(text='Policy')
download = Dropdown(text='Download')
view_selector = View.nested(ItemsToolBarViewSelector)
class DatastoreSideBar(View):
"""
represents left side bar. it usually contains navigation, filters, etc
"""
@View.nested
class datastores(Accordion): # noqa
ACCORDION_NAME = "Datastores"
tree = ManageIQTree()
@View.nested
class clusters(Accordion): # noqa
ACCORDION_NAME = "Datastore Clusters"
tree = ManageIQTree()
class DatastoreQuadIconEntity(BaseQuadIconEntity):
@property
def data(self):
try:
return {
'type': self.browser.get_attribute("alt", self.QUADRANT.format(pos="a")),
'no_vm': int(self.browser.text(self.QUADRANT.format(pos="b"))),
'no_host': int(self.browser.text(self.QUADRANT.format(pos="c"))),
}
except (IndexError, NoSuchElementException):
return {}
class DatastoreTileIconEntity(BaseTileIconEntity):
quad_icon = ParametrizedView.nested(DatastoreQuadIconEntity)
class DatastoreListEntity(BaseListEntity):
pass
class NonJSDatastoreEntity(NonJSBaseEntity):
quad_entity = DatastoreQuadIconEntity
list_entity = DatastoreListEntity
tile_entity = DatastoreTileIconEntity
class JSDatastoreEntity(JSBaseEntity):
@property
def data(self):
data_dict = super(JSDatastoreEntity, self).data
try:
if 'quadicon' in data_dict and data_dict['quadicon']:
quad_data = document_fromstring(data_dict['quadicon'])
data_dict['type'] = quad_data.xpath(self.QUADRANT.format(pos="a"))[0].get('alt')
data_dict['no_vm'] = quad_data.xpath(self.QUADRANT.format(pos="b"))[0].text
data_dict['no_host'] = quad_data.xpath(self.QUADRANT.format(pos="c"))[0].text
return data_dict
except IndexError:
return {}
def DatastoreEntity(): # noqa
"""Temporary wrapper for Datastore Entity during transition to JS based Entity """
return VersionPick({
Version.lowest(): NonJSDatastoreEntity,
'5.9': JSDatastoreEntity,
})
class DatastoreEntities(BaseEntitiesView):
"""
represents central view where all QuadIcons, etc are displayed
"""
@property
def entity_class(self):
return DatastoreEntity().pick(self.browser.product_version)
class DatastoresView(BaseLoggedInPage):
"""
represents whole All Datastores page
"""
toolbar = View.nested(DatastoreToolBar)
sidebar = View.nested(DatastoreSideBar)
including_entities = View.include(DatastoreEntities, use_parent=True)
@property
def is_displayed(self):
return (super(BaseLoggedInPage, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Infrastructure',
'Datastores'] and
self.entities.title.text == 'All Datastores')
class HostAllDatastoresView(DatastoresView):
@property
def is_displayed(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ["Compute", "Infrastructure", "Hosts"] and
self.entities.title.text == "{} (All Datastores)".format(self.context["object"].name)
)
class ProviderAllDatastoresView(DatastoresView):
"""
This view is used in test_provider_relationships
"""
@property
def is_displayed(self):
msg = "{} (All Datastores)".format(self.context["object"].name)
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ["Compute", "Infrastructure", "Providers"] and
self.entities.title.text == msg
)
class DatastoreDetailsView(BaseLoggedInPage):
"""
represents Datastore Details page
"""
title = Text('//div[@id="main-content"]//h1')
toolbar = View.nested(DatastoreToolBar)
sidebar = View.nested(DatastoreSideBar)
@View.nested
class entities(View): # noqa
"""
represents Details page when it is switched to Summary aka Tables view
"""
properties = SummaryTable(title="Properties")
registered_vms = SummaryTable(title="Information for Registered VMs")
relationships = SummaryTable(title="Relationships")
content = SummaryTable(title="Content")
smart_management = SummaryTable(title="Smart Management")
@property
def is_displayed(self):
return (super(BaseLoggedInPage, self).is_displayed and
self.navigation.currently_selected == ['Compute', 'Infrastructure',
'Datastores'] and
self.title.text == 'Datastore "{name}"'.format(name=self.context['object'].name))
class RegisteredHostsView(HostsView):
"""
represents Hosts related to some datastore
"""
@property
def is_displayed(self):
# todo: to define correct check
return False
@attr.s
class Datastore(Pretty, BaseEntity, Taggable):
"""Model of an infrastructure datastore in cfme
Args:
name: Name of the datastore.
provider: provider this datastore is attached to.
"""
pretty_attrs = ['name', 'provider_key']
_param_name = ParamClassName('name')
name = attr.ib()
provider = attr.ib()
type = attr.ib(default=None)
def delete(self, cancel=True):
"""
Deletes a datastore from CFME
Args:
cancel: Whether to cancel the deletion, defaults to True
Note:
Datastore must have 0 hosts and 0 VMs for this to work.
"""
# BZ 1467989 - this button is never getting enabled for some resources
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Remove Datastore from Inventory'
if self.appliance.version >= '5.9'
else 'Remove Datastore',
handle_alert=(not cancel))
view.flash.assert_success_message('Delete initiated for Datastore from the CFME Database')
def get_hosts(self):
""" Returns names of hosts (from quadicons) that use this datastore
Returns: List of strings with names or `[]` if no hosts found.
"""
view = navigate_to(self, 'DetailsFromProvider')
view.entities.relationships.click_at('Hosts')
hosts_view = view.browser.create_view(RegisteredHostsView)
return hosts_view.entities.get_all()
def get_vms(self):
""" Returns names of VMs (from quadicons) that use this datastore
Returns: List of strings with names or `[]` if no vms found.
"""
view = navigate_to(self, 'Details')
if 'VMs' in view.entities.relationships.fields:
view.entities.relationships.click_at('VMs')
else:
view.entities.relationships.click_at('Managed VMs')
# todo: to replace with correct view
vms_view = view.browser.create_view(DatastoresView)
return [vm.name for vm in vms_view.entities.get_all()]
def delete_all_attached_vms(self):
view = navigate_to(self, 'Details')
view.entities.relationships.click_at('Managed VMs')
# todo: to replace with correct view
vms_view = view.browser.create_view(DatastoresView)
for entity in vms_view.entities.get_all():
entity.check()
view.toolbar.configuration.item_select('Remove selected items from Inventory'
if self.appliance.version >= '5.9'
else 'Remove selected items',
handle_alert=True)
wait_for(lambda: bool(len(vms_view.entities.get_all())), fail_condition=True,
message="Wait datastore vms to disappear", num_sec=1000,
fail_func=self.browser.refresh)
def delete_all_attached_hosts(self):
view = navigate_to(self, 'Details')
view.entities.relationships.click_at('Hosts')
hosts_view = view.browser.create_view(RegisteredHostsView)
for entity in hosts_view.entities.get_all():
entity.check()
view.toolbar.configuration.item_select('Remove items from Inventory'
if self.appliance.version >= '5.9'
else 'Remove items',
handle_alert=True)
wait_for(lambda: bool(len(hosts_view.entities.get_all())), fail_condition=True,
message="Wait datastore hosts to disappear", num_sec=1000,
fail_func=self.browser.refresh)
@property
def exists(self):
try:
view = navigate_to(self, 'Details')
return view.is_displayed
except ItemNotFound:
return False
@property
def host_count(self):
""" number of attached hosts.
Returns:
:py:class:`int` host count.
"""
view = navigate_to(self, 'Details')
return int(view.entities.relationships.get_text_of('Hosts'))
@property
def vm_count(self):
""" number of attached VMs.
Returns:
:py:class:`int` vm count.
"""
view = navigate_to(self, 'Details')
return int(view.entities.relationships.get_text_of('Managed VMs'))
def run_smartstate_analysis(self, wait_for_task_result=False):
""" Runs smartstate analysis on this host
Note:
The host must have valid credentials already set up for this to work.
"""
view = navigate_to(self, 'DetailsFromProvider')
try:
wait_for(lambda: view.toolbar.configuration.item_enabled('Perform SmartState Analysis'),
fail_condition=False, num_sec=10)
except TimedOutError:
raise MenuItemNotFound('Smart State analysis is disabled for this datastore')
view.toolbar.configuration.item_select('Perform SmartState Analysis', handle_alert=True)
view.flash.assert_success_message(('"{}": scan successfully '
'initiated'.format(self.name)))
if wait_for_task_result:
view = self.appliance.browser.create_view(TasksView)
wait_for(lambda: is_datastore_analysis_finished(self.name),
delay=15, timeout="15m",
fail_func=view.reload.click)
@attr.s
class DatastoreCollection(BaseCollection):
"""Collection class for :py:class:`cfme.infrastructure.datastore.Datastore`"""
ENTITY = Datastore
def delete(self, *datastores):
"""
Note:
Datastores must have 0 hosts and 0 VMs for this to work.
"""
datastores = list(datastores)
checked_datastores = list()
view = navigate_to(self, 'All')
for datastore in datastores:
try:
view.entities.get_entity(name=datastore.name, surf_pages=True).check()
checked_datastores.append(datastore)
except ItemNotFound:
raise ValueError('Could not find datastore {} in the UI'.format(datastore.name))
if set(datastores) == set(checked_datastores):
view.toolbar.configuration.item_select('Remove Datastores', handle_alert=True)
view.flash.assert_success_message(
'Delete initiated for Datastore from the CFME Database')
for datastore in datastores:
wait_for(lambda: not datastore.exists, num_sec=600, delay=30,
message='Wait for Datastore to be deleted')
def run_smartstate_analysis(self, *datastores):
datastores = list(datastores)
checked_datastores = list()
view = navigate_to(self, 'All')
for datastore in datastores:
try:
view.entities.get_entity(name=datastore.name, surf_pages=True).check()
checked_datastores.append(datastore)
except ItemNotFound:
raise ValueError('Could not find datastore {} in the UI'.format(datastore.name))
view.toolbar.configuration.item_select('Perform SmartState Analysis', handle_alert=True)
for datastore in datastores:
view.flash.assert_success_message(
'"{}": scan successfully initiated'.format(datastore.name))
@navigator.register(DatastoreCollection, 'All')
class All(CFMENavigateStep):
VIEW = DatastoresView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select('Compute', 'Infrastructure', 'Datastores')
def resetter(self):
"""
resets page to default state when user navigates to All Datastores destination
"""
# Reset view and selection
self.view.sidebar.datastores.tree.click_path('All Datastores')
tb = self.view.toolbar
if tb.view_selector.is_displayed and 'Grid View' not in tb.view_selector.selected:
tb.view_selector.select("Grid View")
self.view.entities.paginator.reset_selection()
@navigator.register(Datastore, 'Details')
class Details(CFMENavigateStep):
VIEW = DatastoreDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
self.prerequisite_view.entities.get_entity(name=self.obj.name, surf_pages=True).click()
@navigator.register(Datastore, 'DetailsFromProvider')
class DetailsFromProvider(CFMENavigateStep):
VIEW = DatastoreDetailsView
def prerequisite(self):
prov_view = navigate_to(self.obj.provider, 'Details')
prov_view.entities.summary('Relationships').click_at('Datastores')
return self.obj.create_view(DatastoresView)
def step(self):
self.prerequisite_view.entities.get_entity(name=self.obj.name, surf_pages=True).click()
def get_all_datastores():
"""Returns names (from quadicons) of all datastores"""
view = navigate_to(Datastore, 'All')
return [ds.name for ds in view.entities.get_all()]
| lkhomenk/integration_tests | cfme/infrastructure/datastore.py | Python | gpl-2.0 | 15,869 |
from oscar.apps.shipping.admin import *
| lyoniionly/django-cobra | sites/sandbox/apps/shipping/admin.py | Python | apache-2.0 | 40 |
# blaplay, Copyright (C) 2012 Niklas Koep
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import os
import re
from time import ctime
from copy import deepcopy
import gobject
import gtk
import pango
import blaplay
library = blaplay.bla.library
from blaplay.blacore import blaconst
from blaplay import blautil, blagui
from blaplay.formats._blatrack import BlaTrack
from blaplay.formats._identifiers import *
from blawindows import BlaWindow, BlaScrolledWindow
import blaguiutils
class BlaMetadataViewer(gtk.VBox):
__metaclass__ = blautil.BlaSingletonMeta
__gsignals__= {
"value_changed": blautil.signal(2)
}
class TreeView(blaguiutils.BlaTreeViewBase):
def __init__(self, *args, **kwargs):
self.__is_editable = kwargs.pop("is_editable", False)
super(BlaMetadataViewer.TreeView, self).__init__(*args, **kwargs)
def row_activated(treeview, path, column):
if not treeview.get_selection().path_is_selected(path):
return True
treeview.set_cursor(path, treeview.get_columns()[-1],
start_editing=True)
self.connect("row_activated", row_activated)
def _button_press_event(self, treeview, event):
x, y = map(int, [event.x, event.y])
try:
path = self.get_path_at_pos(x, y)[0]
except TypeError:
path = None
if (event.button == 1 and event.type == gtk.gdk.BUTTON_PRESS and
path is not None and
self.get_selection().path_is_selected(path)):
return False
if path is None:
self.grab_focus()
return super(BlaMetadataViewer.TreeView, self)._button_press_event(
treeview, event)
def __init__(self, is_editable, playlist_manager):
super(BlaMetadataViewer, self).__init__(
spacing=blaconst.WIDGET_SPACING)
model = gtk.ListStore(gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)
self._treeview = BlaMetadataViewer.TreeView(
model, is_editable=is_editable)
self._treeview.set_reorderable(False)
self._treeview.set_rubber_banding(True)
self._treeview.set_property("rules_hint", True)
# Name column
r = gtk.CellRendererText()
self._treeview.insert_column_with_data_func(
-1, "Name", r, self.__cdf_name)
# Value column
r = gtk.CellRendererText()
r.set_property("ellipsize", pango.ELLIPSIZE_END)
r.set_property("editable", is_editable)
self._treeview.insert_column_with_data_func(
-1, "Value", r, self.__cdf_value)
if is_editable:
def editing_started(renderer, editable, path):
self._treeview.set_cursor(path)
model = self._treeview.get_model()
# Remove the "Varies between tracks" label.
if model[path][1] is None:
editable.set_text("")
r.connect("editing_started", editing_started)
def edited(renderer, path, text):
row = self._treeview.get_model()[path]
identifier = row[0]
if row[1] != text:
row[1] = text
self.emit("value_changed", identifier, text)
r.connect("edited", edited)
for column in self._treeview.get_columns():
column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
# Wrap the treeview.
sw = BlaScrolledWindow()
sw.add(self._treeview)
self._pb = gtk.ProgressBar()
self._pb.set_visible(False)
self.pack_start(sw, expand=True)
self.pack_start(self._pb, expand=False)
playlist_manager.connect_object(
"selection_changed", BlaMetadataViewer._update_model, self)
self._uris = []
sw.show_all()
self.show()
def _populate_model(self, model):
pass
def _update_model(self, uris):
model = self._treeview.get_model()
model.clear()
self._uris = uris
if not self._uris:
return
sw = self.get_children()[0]
if len(uris) > blaconst.TAG_EDITOR_MAX_ITEMS:
if sw.child == self._treeview:
sw.remove(self._treeview)
viewport = gtk.Viewport()
viewport.add(gtk.Label("Too many items selected"))
viewport.set_shadow_type(gtk.SHADOW_NONE)
viewport.show_all()
sw.add(viewport)
return
else:
if sw.child != self._treeview:
sw.remove(sw.child)
sw.add(self._treeview)
self._populate_model(model)
def __cdf_name(self, column, renderer, model, iterator):
identifier = model[iterator][0]
try:
text = IDENTIFIER_LABELS[identifier]
except TypeError:
text = "<%s>" % identifier.upper()
renderer.set_property("text", text)
def __cdf_value(self, column, renderer, model, iterator):
value = model[iterator][1]
if value is None:
renderer.set_property("markup", "<i>Varies between tracks</i>")
else:
renderer.set_property("text", value)
class BlaTagEditor(BlaMetadataViewer):
__metaclass__ = blautil.BlaSingletonMeta
def __init__(self, playlist_manager):
super(BlaTagEditor, self).__init__(is_editable=True,
playlist_manager=playlist_manager)
def key_press_event(treeview, event):
if blagui.is_accel(event, "Delete"):
model, paths = treeview.get_selection().get_selected_rows()
identifiers = [model[path][0] for path in paths]
if identifiers:
self.__delete_tags(identifiers)
self._treeview.connect("key_press_event", key_press_event)
self._treeview.connect_object("popup", BlaTagEditor.__popup, self)
self.connect_object("value_changed", BlaTagEditor.__set_value, self)
self.__hbox = gtk.HBox()
buttons = [
("Undo changes", gtk.STOCK_UNDO, BlaTagEditor.__undo),
("Apply changes", gtk.STOCK_OK, BlaTagEditor.__apply)
]
for tooltip, stock, callback in buttons:
button = gtk.Button()
button.set_tooltip_text(tooltip)
button.set_relief(gtk.RELIEF_NONE)
button.set_focus_on_click(False)
button.add(
gtk.image_new_from_stock(stock, gtk.ICON_SIZE_MENU))
style = gtk.RcStyle()
style.xthickness = style.ythickness = 0
button.modify_style(style)
button.connect_object("clicked", callback, self)
self.__hbox.pack_start(button)
self.__hbox.set_sensitive(False)
self._modified = blautil.BlaNotifyDict()
def callback(dict_):
self.__hbox.set_sensitive(len(dict_) != 0)
self._modified.connect(callback)
def _populate_model(self, model):
tracks = [self._modified.get(uri, library[uri])
for uri in iter(self._uris)]
# The standard tags
for identifier in IDENTIFIER_TAGS:
value = tracks[0][identifier]
for track in tracks[1:]:
if value != track[identifier]:
value = None
break
if identifier == DATE:
try:
value = value.split("-")[0]
except AttributeError:
pass
model.append([identifier, value])
# Additional tags
additional_tags = set()
update = additional_tags.update
keys_additional_tags = BlaTrack.keys_additional_tags
map(update, map(keys_additional_tags, tracks))
for tag in additional_tags:
try:
value = tracks[0][tag]
except KeyError:
value = None
for track in tracks[1:]:
try:
next_value = track[tag]
except KeyError:
next_value = None
if value != next_value:
value = None
break
model.append([tag, value])
def __add_tag(self, *args):
diag = blaguiutils.BlaDialog(title="Add tag")
diag.set_size_request(250, -1)
table = gtk.Table(columns=2, rows=2, homogeneous=False)
entry_name = gtk.Entry()
entry_value = gtk.Entry()
idx = 0
for label, entry in [("Name", entry_name), ("Value", entry_value)]:
table.attach(gtk.Label("%s:" % label), 0, 1, idx, idx+1)
table.attach(entry, 1, 2, idx, idx+1, xpadding=5)
idx += 1
diag.vbox.set_border_width(10)
diag.vbox.pack_start(table)
diag.show_all()
response = diag.run()
if response == gtk.RESPONSE_OK:
name = entry_name.get_text()
value = entry_value.get_text()
if name not in IDENTIFIER_TAGS:
self.__set_value(name, value)
diag.destroy()
def __update_model_and_restore_selection(self):
try:
cursor_path, column = self._treeview.get_cursor()
except TypeError:
cursor_path = column = None
selection = self._treeview.get_selection()
model, paths = selection.get_selected_rows()
ids = [model[path][0] for path in paths]
# TODO: Only update the model if a field was removed or added.
self._update_model(self._uris)
paths = [row.path for row in model if row[0] in ids]
if cursor_path is not None:
self._treeview.set_cursor(cursor_path, column)
map(selection.select_path, paths)
def __set_value(self, identifier, value):
for uri in iter(self._uris):
if not self._modified.has_key(uri):
# copy-on-write
self._modified[uri] = deepcopy(library[uri])
self._modified[uri][identifier] = value
self.__update_model_and_restore_selection()
def __delete_tags(self, identifiers):
for uri in iter(self._uris):
if not self._modified.has_key(uri):
# copy-on-write
self._modified[uri] = deepcopy(library[uri])
for identifier in identifiers:
del self._modified[uri][identifier]
self.__update_model_and_restore_selection()
def __capitalize(self, identifiers):
def capitalize(s):
return re.sub(r"(^|\s)(\S)",
lambda m: m.group(1) + m.group(2).upper(), s)
for uri in iter(self._uris):
try:
track = self._modified[uri]
except KeyError:
track = library[uri]
for identifier in identifiers:
value = capitalize(track[identifier])
if value != track[identifier]:
if not self._modified.has_key(uri):
# copy-on-write
self._modified[uri] = deepcopy(track)
self._modified[uri][identifier] = value
self.__update_model_and_restore_selection()
def __popup(self, event):
menu = gtk.Menu()
m = gtk.MenuItem("Add tag...")
m.connect("activate", self.__add_tag)
menu.append(m)
try:
path, column, x, y = self._treeview.get_path_at_pos(
*map(int, [event.x, event.y]))
except TypeError:
pass
else:
model, paths = self._treeview.get_selection().get_selected_rows()
identifiers = [model[path][0] for path in paths]
items = [
("Delete tag", "Delete",
lambda *x: self.__delete_tags(identifiers)),
("Capitalize", None, lambda *x: self.__capitalize(identifiers))
]
accel_group = blaplay.bla.ui_manager.get_accel_group()
for label, accel, callback in items:
m = gtk.MenuItem(label)
if accel:
mod, key = gtk.accelerator_parse(accel)
m.add_accelerator("activate", accel_group, mod, key,
gtk.ACCEL_VISIBLE)
m.connect("activate", callback)
menu.append(m)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
def __apply(self):
if len(self._modified) == 0:
return
self._pb.set_visible(True)
unit = 1.0 / len(self._modified)
idx = 0
succeeded = 0
for uri, track in self._modified.iteritems():
self._pb.set_fraction(unit * (idx+1))
self._pb.set_text(uri)
library[uri] = track
succeeded += int(track.save())
idx += 1
self._pb.set_visible(False)
library.sync()
self._update_model(self._uris)
n_modified = len(self._modified)
if n_modified > 0 and succeeded != n_modified:
blaguiutils.warning_dialog(
"Failed to write tags for %d of %d files." %
((n_modified-succeeded), n_modified))
self._modified.clear()
def __undo(self):
self._modified.clear()
self.__update_model_and_restore_selection()
def get_control_widget(self):
return self.__hbox
class BlaProperties(BlaMetadataViewer):
def __init__(self, playlist_manager):
super(BlaProperties, self).__init__(is_editable=False,
playlist_manager=playlist_manager)
def _populate_model(self, model):
def get_value(track, identifier):
if not track[identifier] and identifier != MONITORED_DIRECTORY:
return None
elif identifier == FILESIZE:
value = track.get_filesize()
elif identifier == MTIME:
value = ctime(track[MTIME])
elif identifier == LENGTH:
value = track.duration
elif identifier == BITRATE:
value = track.bitrate
elif identifier == SAMPLING_RATE:
value = track.sampling_rate
elif identifier == CHANNELS:
value = str(track[CHANNELS])
else:
value = track[identifier]
return value
tracks = [library[uri] for uri in iter(self._uris)]
for identifier in IDENTIFIER_PROPERTIES:
value = get_value(tracks[0], identifier)
for track in tracks[1:]:
if value != get_value(track, identifier):
value = None
break
model.append([identifier, value])
| nkoep/blaplay | blaplay/blagui/blatagedit.py | Python | gpl-2.0 | 15,606 |
from __future__ import annotations
from typing import Any
import pandas._libs.json as json
from pandas._typing import (
FilePath,
StorageOptions,
WriteExcelBuffer,
)
from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import (
combine_kwargs,
validate_freeze_panes,
)
class _XlsxStyler:
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
"font": [
(("name",), "font_name"),
(("sz",), "font_size"),
(("size",), "font_size"),
(("color", "rgb"), "font_color"),
(("color",), "font_color"),
(("b",), "bold"),
(("bold",), "bold"),
(("i",), "italic"),
(("italic",), "italic"),
(("u",), "underline"),
(("underline",), "underline"),
(("strike",), "font_strikeout"),
(("vertAlign",), "font_script"),
(("vertalign",), "font_script"),
],
"number_format": [(("format_code",), "num_format"), ((), "num_format")],
"protection": [(("locked",), "locked"), (("hidden",), "hidden")],
"alignment": [
(("horizontal",), "align"),
(("vertical",), "valign"),
(("text_rotation",), "rotation"),
(("wrap_text",), "text_wrap"),
(("indent",), "indent"),
(("shrink_to_fit",), "shrink"),
],
"fill": [
(("patternType",), "pattern"),
(("patterntype",), "pattern"),
(("fill_type",), "pattern"),
(("start_color", "rgb"), "fg_color"),
(("fgColor", "rgb"), "fg_color"),
(("fgcolor", "rgb"), "fg_color"),
(("start_color",), "fg_color"),
(("fgColor",), "fg_color"),
(("fgcolor",), "fg_color"),
(("end_color", "rgb"), "bg_color"),
(("bgColor", "rgb"), "bg_color"),
(("bgcolor", "rgb"), "bg_color"),
(("end_color",), "bg_color"),
(("bgColor",), "bg_color"),
(("bgcolor",), "bg_color"),
],
"border": [
(("color", "rgb"), "border_color"),
(("color",), "border_color"),
(("style",), "border"),
(("top", "color", "rgb"), "top_color"),
(("top", "color"), "top_color"),
(("top", "style"), "top"),
(("top",), "top"),
(("right", "color", "rgb"), "right_color"),
(("right", "color"), "right_color"),
(("right", "style"), "right"),
(("right",), "right"),
(("bottom", "color", "rgb"), "bottom_color"),
(("bottom", "color"), "bottom_color"),
(("bottom", "style"), "bottom"),
(("bottom",), "bottom"),
(("left", "color", "rgb"), "left_color"),
(("left", "color"), "left_color"),
(("left", "style"), "left"),
(("left",), "left"),
],
}
@classmethod
def convert(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
"""
# Create a XlsxWriter format object.
props = {}
if num_format_str is not None:
props["num_format"] = num_format_str
if style_dict is None:
return props
if "borders" in style_dict:
style_dict = style_dict.copy()
style_dict["border"] = style_dict.pop("borders")
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
# src is a sequence of keys into a nested dict
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get("pattern"), str):
# TODO: support other fill patterns
props["pattern"] = 0 if props["pattern"] == "none" else 1
for k in ["border", "top", "right", "bottom", "left"]:
if isinstance(props.get(k), str):
try:
props[k] = [
"none",
"thin",
"medium",
"dashed",
"dotted",
"thick",
"double",
"hair",
"mediumDashed",
"dashDot",
"mediumDashDot",
"dashDotDot",
"mediumDashDotDot",
"slantDashDot",
].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get("font_script"), str):
props["font_script"] = ["baseline", "superscript", "subscript"].index(
props["font_script"]
)
if isinstance(props.get("underline"), str):
props["underline"] = {
"none": 0,
"single": 1,
"double": 2,
"singleAccounting": 33,
"doubleAccounting": 34,
}[props["underline"]]
return props
class XlsxWriter(ExcelWriter):
engine = "xlsxwriter"
supported_extensions = (".xlsx",)
def __init__(
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
date_format: str | None = None,
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions = None,
if_sheet_exists: str | None = None,
engine_kwargs: dict[str, Any] | None = None,
**kwargs,
):
# Use the xlsxwriter module as the Excel writer.
from xlsxwriter import Workbook
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
if mode == "a":
raise ValueError("Append mode is not supported with xlsxwriter!")
super().__init__(
path,
engine=engine,
date_format=date_format,
datetime_format=datetime_format,
mode=mode,
storage_options=storage_options,
if_sheet_exists=if_sheet_exists,
engine_kwargs=engine_kwargs,
)
self._book = Workbook(self._handles.handle, **engine_kwargs)
@property
def book(self):
"""
Book instance of class xlsxwriter.Workbook.
This attribute can be used to access engine-specific features.
"""
return self._book
@property
def sheets(self) -> dict[str, Any]:
result = self.book.sheetnames
return result
def _save(self) -> None:
"""
Save workbook to disk.
"""
self.book.close()
def _write_cells(
self,
cells,
sheet_name: str | None = None,
startrow: int = 0,
startcol: int = 0,
freeze_panes: tuple[int, int] | None = None,
) -> None:
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
wks = self.book.get_worksheet_by_name(sheet_name)
if wks is None:
wks = self.book.add_worksheet(sheet_name)
style_dict = {"null": None}
if validate_freeze_panes(freeze_panes):
wks.freeze_panes(*(freeze_panes))
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(
startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
val,
style,
)
else:
wks.write(startrow + cell.row, startcol + cell.col, val, style)
| pandas-dev/pandas | pandas/io/excel/_xlsxwriter.py | Python | bsd-3-clause | 8,835 |
import pytest
import dask
from dask.order import ndependencies, order
from dask.core import get_deps
from dask.utils_test import add, inc
@pytest.fixture(params=['abcde', 'edcba'])
def abcde(request):
return request.param
def issorted(L, reverse=False):
return sorted(L, reverse=reverse) == L
def f(*args):
pass
def test_ordering_keeps_groups_together(abcde):
a, b, c, d, e = abcde
d = dict(((a, i), (f,)) for i in range(4))
d.update({(b, 0): (f, (a, 0), (a, 1)),
(b, 1): (f, (a, 2), (a, 3))})
o = order(d)
assert abs(o[(a, 0)] - o[(a, 1)]) == 1
assert abs(o[(a, 2)] - o[(a, 3)]) == 1
d = dict(((a, i), (f,)) for i in range(4))
d.update({(b, 0): (f, (a, 0), (a, 2)),
(b, 1): (f, (a, 1), (a, 3))})
o = order(d)
assert abs(o[(a, 0)] - o[(a, 2)]) == 1
assert abs(o[(a, 1)] - o[(a, 3)]) == 1
@pytest.mark.xfail(reason="Can't please 'em all")
def test_avoid_broker_nodes(abcde):
"""
b0 b1 b2
| \ /
a0 a1
a0 should be run before a1
"""
a, b, c, d, e = abcde
dsk = {(a, 0): (f,), (a, 1): (f,),
(b, 0): (f, (a, 0)), (b, 1): (f, (a, 1)), (b, 2): (f, (a, 1))}
o = order(dsk)
assert o[(a, 0)] < o[(a, 1)]
# Switch name of 0, 1 to ensure that this isn't due to string comparison
dsk = {(a, 1): (f,), (a, 0): (f,),
(b, 0): (f, (a, 1)), (b, 1): (f, (a, 0)), (b, 2): (f, (a, 0))}
o = order(dsk)
assert o[(a, 0)] > o[(a, 1)]
def test_base_of_reduce_preferred(abcde):
"""
a3
/|
a2 |
/| |
a1 | |
/| | |
a0 | | |
| | | |
b0 b1 b2 b3
\ \ / /
c
We really want to run b0 quickly
"""
a, b, c, d, e = abcde
dsk = {(a, i): (f, (a, i - 1), (b, i)) for i in [1, 2, 3]}
dsk[(a, 0)] = (f, (b, 0))
dsk.update({(b, i): (f, c, 1) for i in [0, 1, 2, 3]})
dsk[c] = 1
o = order(dsk)
assert o[(b, 0)] <= 4
assert o[(b, 1)] <= 6
@pytest.mark.xfail(reason="Can't please 'em all")
def test_avoid_upwards_branching(abcde):
"""
a1
|
a2
|
a3 d1
/ \ /
b1 c1
| |
b2 c2
|
c3
Prefer b1 over c1 because it won't stick around waiting for d1 to complete
"""
a, b, c, d, e = abcde
dsk = {(a, 1): (f, (a, 2)),
(a, 2): (f, (a, 3)),
(a, 3): (f, (b, 1), (c, 1)),
(b, 1): (f, (b, 2)),
(c, 1): (f, (c, 2)),
(c, 2): (f, (c, 3)),
(d, 1): (f, (c, 1))}
o = order(dsk)
assert o[(b, 1)] < o[(c, 1)]
def test_avoid_upwards_branching_complex(abcde):
"""
a1
|
e2 a2 d2 d3
| | \ /
e1 a3 d1
\ / \ /
b1 c1
| |
b2 c2
|
c3
Prefer c1 over b1 because c1 will stay in memory less long while b1
computes
"""
a, b, c, d, e = abcde
dsk = {(a, 1): (f, (a, 2)),
(a, 2): (f, (a, 3)),
(a, 3): (f, (b, 1), (c, 1)),
(b, 1): (f, (b, 2)),
(b, 2): (f,),
(c, 1): (f, (c, 2)),
(c, 2): (f, (c, 3)),
(c, 3): (f,),
(d, 1): (f, (c, 1)),
(d, 2): (f, (d, 1)),
(d, 3): (f, (d, 1)),
(e, 1): (f, (b, 1)),
(e, 2): (f, (e, 1))}
o = order(dsk)
assert o[(c, 1)] < o[(b, 1)]
@pytest.mark.xfail(reason="this case is ambiguous")
def test_deep_bases_win_over_dependents(abcde):
"""
It's not clear who should run first, e or d
1. d is nicer because it exposes parallelism
2. e is nicer (hypothetically) because it will be sooner released
(though in this case we need d to run first regardless)
a
/ | \ .
b c |
/ \ | /
e d
"""
a, b, c, d, e = abcde
dsk = {a: (f, b, c, d), b: (f, d, e), c: (f, d), d: 1, e: 2}
o = order(dsk)
assert o[e] < o[d]
assert o[d] < o[b] or o[d] < o[c]
def test_prefer_deep(abcde):
"""
c
|
e b
| |
d a
Prefer longer chains first so we should start with c
"""
a, b, c, d, e = abcde
dsk = {a: 1, b: (f, a), c: (f, b),
d: 1, e: (f, d)}
o = order(dsk)
assert o[a] < o[d]
assert o[b] < o[d]
def test_stacklimit(abcde):
dsk = dict(('x%s' % (i + 1), (inc, 'x%s' % i)) for i in range(10000))
dependencies, dependents = get_deps(dsk)
ndependencies(dependencies, dependents)
@pytest.mark.xfail(reason="Can't please 'em all")
def test_break_ties_by_str(abcde):
a, b, c, d, e = abcde
dsk = {('x', i): (inc, i) for i in range(10)}
x_keys = sorted(dsk)
dsk['y'] = list(x_keys)
o = order(dsk)
expected = {'y': 0}
expected.update({k: i + 1 for i, k in enumerate(x_keys)})
assert o == expected
def test_order_doesnt_fail_on_mixed_type_keys(abcde):
order({'x': (inc, 1),
('y', 0): (inc, 2),
'z': (add, 'x', ('y', 0))})
def test_gh_3055():
da = pytest.importorskip('dask.array')
A, B = 20, 99
orig = x = da.random.normal(size=(A, B), chunks=(1, None))
for _ in range(2):
y = (x[:, None, :] * x[:, :, None]).cumsum(axis=0)
x = x.cumsum(axis=0)
w = (y * x[:, None]).sum(axis=(1,2))
dsk = dict(w.__dask_graph__())
o = order(dsk)
L = [o[k] for k in w.__dask_keys__()]
assert sum(x < len(o) / 2 for x in L) > len(L) / 3 # some complete quickly
L = [o[k] for kk in orig.__dask_keys__() for k in kk]
assert sum(x > len(o) / 2 for x in L) > len(L) / 3 # some start later
assert sorted(L) == L # operate in order
def test_type_comparisions_ok(abcde):
a, b, c, d, e = abcde
dsk = {a: 1, (a, 1): 2, (a, b, 1): 3}
order(dsk) # this doesn't err
def test_prefer_short_dependents(abcde):
"""
a
|
d b e
\ | /
c
Prefer to finish d and e before starting b. That way c can be released
during the long computations.
"""
a, b, c, d, e = abcde
dsk = {c: (f,), d: (f, c), e: (f, c), b: (f, c), a: (f, b)}
o = order(dsk)
assert o[d] < o[b]
assert o[e] < o[b]
@pytest.mark.xfail(reason="This is challenging to do precisely")
def test_run_smaller_sections(abcde):
"""
aa
/ |
b d bb dd
/ \ /| | /
a c e cc
Prefer to run acb first because then we can get that out of the way
"""
a, b, c, d, e = abcde
aa, bb, cc, dd = [x * 2 for x in [a, b, c, d]]
expected = [a, c, b, e, d, cc, bb, aa, dd]
log = []
def f(x):
def _(*args):
log.append(x)
return _
dsk = {a: (f(a),),
c: (f(c),),
e: (f(e),),
cc: (f(cc),),
b: (f(b), a, c),
d: (f(d), c, e),
bb: (f(bb), cc),
aa: (f(aa), d, bb),
dd: (f(dd), cc)}
dask.get(dsk, [aa, b, dd]) # trigger computation
assert log == expected
def test_local_parents_of_reduction(abcde):
"""
c1
|
b1 c2
| /|
a1 b2 c3
| /|
a2 b3
|
a3
Prefer to finish a1 stack before proceding to b2
"""
a, b, c, d, e = abcde
a1, a2, a3 = [a + i for i in '123']
b1, b2, b3 = [b + i for i in '123']
c1, c2, c3 = [c + i for i in '123']
expected = [a3, a2, a1,
b3, b2, b1,
c3, c2, c1]
log = []
def f(x):
def _(*args):
log.append(x)
return _
dsk = {a3: (f(a3),),
a2: (f(a2), a3),
a1: (f(a1), a2),
b3: (f(b3),),
b2: (f(b2), b3, a2),
b1: (f(b1), b2),
c3: (f(c3),),
c2: (f(c2), c3, b2),
c1: (f(c1), c2)}
order(dsk)
dask.get(dsk, [a1, b1, c1]) # trigger computation
assert log == expected
def test_nearest_neighbor(abcde):
"""
a1 a2 a3 a4 a5 a6 a7 a8 a9
\ | / \ | / \ | / \ | /
b1 b2 b3 b4
Want to finish off a local group before moving on.
This is difficult because all groups are connected.
"""
a, b, c, _, _ = abcde
a1, a2, a3, a4, a5, a6, a7, a8, a9 = [a + i for i in '123456789']
b1, b2, b3, b4 = [b + i for i in '1234']
dsk = {b1: (f,),
b2: (f,),
b3: (f,),
b4: (f,),
a1: (f, b1),
a2: (f, b1),
a3: (f, b1, b2),
a4: (f, b2),
a5: (f, b2, b3),
a6: (f, b3),
a7: (f, b3, b4),
a8: (f, b4),
a9: (f, b4)}
o = order(dsk)
assert 3 < sum(o[a + i] < len(o) / 2 for i in '123456789') < 7
assert 1 < sum(o[b + i] < len(o) / 2 for i in '1234') < 4
assert o[min([b1, b2, b3, b4])] == 0
def test_string_ordering():
""" Prefer ordering tasks by name first """
dsk = {('a', 1): (f,), ('a', 2): (f,), ('a', 3): (f,)}
o = order(dsk)
assert o == {('a', 1): 0,
('a', 2): 1,
('a', 3): 2}
def test_string_ordering_dependents():
""" Prefer ordering tasks by name first even when in dependencies """
dsk = {('a', 1): (f, 'b'), ('a', 2): (f, 'b'), ('a', 3): (f, 'b'),
'b': (f,)}
o = order(dsk)
assert o == {'b': 0,
('a', 1): 1,
('a', 2): 2,
('a', 3): 3}
def test_prefer_short_narrow(abcde):
# See test_prefer_short_ancestor for a fail case.
a, b, c, _, _ = abcde
dsk = {
(a, 0): 0,
(b, 0): 0,
(c, 0): 0,
(c, 1): (f, (c, 0), (a, 0), (b, 0)),
(a, 1): 1,
(b, 1): 1,
(c, 2): (f, (c, 1), (a, 1), (b, 1)),
}
o = order(dsk)
assert o[(b, 0)] < o[(b, 1)]
assert o[(b, 0)] < o[(c, 2)]
assert o[(c, 1)] < o[(c, 2)]
def test_prefer_short_ancestor(abcde):
"""
From https://github.com/dask/dask-ml/issues/206#issuecomment-395869929
Two cases, one where chunks of an array are independent, and one where the
chunks of an array have a shared source. We handled the independent one
"well" earlier.
Good:
c2
/ \ \
/ \ \
c1 \ \
/ | \ \ \
c0 a0 b0 a1 b1
Bad:
c2
/ \ \
/ \ \
c1 \ \
/ | \ \ \
c0 a0 b0 a1 b1
\ \ / /
\ \ / /
a-b
The difference is that all the `a` and `b` tasks now have a common
ancestor.
We would like to choose c1 *before* a1, and b1 because
* we can release a0 and b0 once c1 is done
* we don't need a1 and b1 to compute c1.
"""
a, b, c, _, _ = abcde
ab = a + b
dsk = {
ab: 0,
(a, 0): (f, ab, 0, 0),
(b, 0): (f, ab, 0, 1),
(c, 0): 0,
(c, 1): (f, (c, 0), (a, 0), (b, 0)),
(a, 1): (f, ab, 1, 0),
(b, 1): (f, ab, 1, 1),
(c, 2): (f, (c, 1), (a, 1), (b, 1)),
}
o = order(dsk)
assert o[(b, 0)] < o[(b, 1)]
assert o[(b, 0)] < o[(c, 2)]
assert o[(c, 1)] < o[(c, 2)]
assert o[(c, 1)] < o[(a, 1)]
def test_map_overlap(abcde):
"""
b1 b3 b5
|\ / | \ / |
c1 c2 c3 c4 c5
|/ | \ | / | \|
d1 d2 d3 d4 d5
| | |
e1 e2 e5
Want to finish b1 before we start on e5
"""
a, b, c, d, e = abcde
dsk = {
(e, 1): (f,),
(d, 1): (f, (e, 1)),
(c, 1): (f, (d, 1)),
(b, 1): (f, (c, 1), (c, 2)),
(d, 2): (f,),
(c, 2): (f, (d, 1), (d, 2), (d, 3)),
(e, 3): (f,),
(d, 3): (f, (e, 3)),
(c, 3): (f, (d, 3)),
(b, 3): (f, (c, 2), (c, 3), (c, 4)),
(d, 4): (f,),
(c, 4): (f, (d, 3), (d, 4), (d, 5)),
(e, 5): (f,),
(d, 5): (f, (e, 5)),
(c, 5): (f, (d, 5)),
(b, 5): (f, (c, 4), (c, 5))
}
o = order(dsk)
assert o[(b, 1)] < o[(e, 5)] or o[(b, 5)] < o[(e, 1)]
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/dask/tests/test_order.py | Python | gpl-3.0 | 12,295 |
#
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column, Text
from sqlalchemy import Boolean, Integer, String, DateTime, Float
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
alarm = Table(
'alarm', meta,
Column('id', String(255), primary_key=True, index=True),
Column('enabled', Boolean),
Column('name', Text()),
Column('description', Text()),
Column('timestamp', DateTime(timezone=False)),
Column('counter_name', String(255), index=True),
Column('user_id', String(255), index=True),
Column('project_id', String(255), index=True),
Column('comparison_operator', String(2)),
Column('threshold', Float),
Column('statistic', String(255)),
Column('evaluation_periods', Integer),
Column('period', Integer),
Column('state', String(255)),
Column('state_timestamp', DateTime(timezone=False)),
Column('ok_actions', Text()),
Column('alarm_actions', Text()),
Column('insufficient_data_actions', Text()),
Column('matching_metadata', Text()),
mysql_engine='InnoDB',
mysql_charset='utf8')
alarm.create()
| ityaptin/ceilometer | ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py | Python | apache-2.0 | 1,821 |
import wx
import os
import re
import time
import inspect
cmdFolder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
from settings import Settings
from cnc import CNC
from gcframe import GcFrame, RETRACTIONCOLOR, REVRETRACTIONCOLOR, PRINTCOLOR
from gclistctrl import GcodeListCtrl
from shiftmodel import ShiftModelDlg
from modtemps import ModifyTempsDlg
from modspeed import ModifySpeedDlg
from editgcode import EditGCodeDlg
from filamentchange import FilamentChangeDlg
from savelayer import SaveLayerDlg
from images import Images
from tools import formatElapsed
from gcsuffix import parseGCSuffix, modifyGCSuffix
from properties import PropertiesDlg
from propenums import PropertyEnum
gcRegex = re.compile("[-]?\d+[.]?\d*")
BUTTONDIM = (48, 48)
TITLE_PREFIX = "G Code Analyze/Edit"
reX = re.compile("(.*[xX])([0-9\.]+)(.*)")
reY = re.compile("(.*[yY])([0-9\.]+)(.*)")
reZ = re.compile("(.*[zZ])([0-9\.]+)(.*)")
reS = re.compile("(.*[sS])([0-9\.]+)(.*)")
reF = re.compile("(.*[fF])([0-9\.]+)(.*)")
reE = re.compile("(.*[eE])([0-9\.]+)(.*)")
class LegendDlg(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, wx.ID_ANY, "Legend", size=(500, 500))
self.parent = parent
self.SetBackgroundColour(wx.Colour(255, 255, 255))
self.Bind(wx.EVT_CLOSE, self.onClose)
lFont = wx.Font(16, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)
sz = wx.BoxSizer(wx.VERTICAL)
sz.AddSpacer(20)
st = wx.StaticText(self, wx.ID_ANY, "Print Speed < 20 mm/s")
st.SetForegroundColour(PRINTCOLOR[0])
st.SetFont(lFont)
sz.Add(st)
sz.AddSpacer(10)
st = wx.StaticText(self, wx.ID_ANY, "Print Speed < 40 mm/s")
st.SetForegroundColour(PRINTCOLOR[1])
st.SetFont(lFont)
sz.Add(st)
sz.AddSpacer(10)
st = wx.StaticText(self, wx.ID_ANY, "Print Speed < 60 mm/s")
st.SetForegroundColour(PRINTCOLOR[2])
st.SetFont(lFont)
sz.Add(st)
sz.AddSpacer(10)
st = wx.StaticText(self, wx.ID_ANY, "Print Speed >= 60 mm/s")
st.SetForegroundColour(PRINTCOLOR[3])
st.SetFont(lFont)
sz.Add(st)
sz.AddSpacer(20)
st = wx.StaticText(self, wx.ID_ANY, "Retractions")
st.SetForegroundColour(RETRACTIONCOLOR)
st.SetFont(lFont)
sz.Add(st)
sz.AddSpacer(10)
st = wx.StaticText(self, wx.ID_ANY, "Reverse Retractions")
st.SetForegroundColour(REVRETRACTIONCOLOR)
st.SetFont(lFont)
sz.Add(st)
sz.AddSpacer(20)
hsz = wx.BoxSizer(wx.HORIZONTAL)
hsz.AddSpacer(20)
hsz.Add(sz)
hsz.AddSpacer(20)
self.SetSizer(hsz)
self.Layout()
self.Fit()
self.Show()
def onClose(self, evt):
self.parent.legendClosed()
self.Destroy()
class GEditDlg(wx.Frame):
def __init__(self, parent):
self.parent = parent
self.history = parent.history
wx.Frame.__init__(self, None, wx.ID_ANY, TITLE_PREFIX, size=(600, 600))
self.Show()
ico = wx.Icon(os.path.join(cmdFolder, "images", "geditico.png"), wx.BITMAP_TYPE_PNG)
self.SetIcon(ico)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.settings = Settings(cmdFolder)
self.propDlg = None
self.legend = None
self.log = self.parent.log
self.images = Images(os.path.join(cmdFolder, "images"))
self.shiftX = 0
self.shiftY = 0
self.modified = False
self.filename = None
self.importFileName = None
self.okToImport = False
self.gObj = self.loadGCode(self.filename)
if self.gObj is not None:
self.updateTitle()
self.gcFrame = GcFrame(self, self.gObj, self.settings)
self.stLayerText = wx.StaticText(self, wx.ID_ANY, "Layer Height: 0.00")
ht = self.gcFrame.GetSize().Get()[1] - 2*BUTTONDIM[1] - 20
if self.gObj is None:
lmax = 1
else:
lmax = self.gObj.layerCount()-1
self.slLayers = wx.Slider(
self, wx.ID_ANY, 0, 0, 1000, size=(-1, ht),
style=wx.SL_VERTICAL | wx.SL_AUTOTICKS | wx.SL_LABELS | wx.SL_INVERSE)
self.Bind(wx.EVT_SCROLL, self.onLayerScroll, self.slLayers)
if self.gObj is None:
self.slLayers.Enable(False)
self.lcGCode = GcodeListCtrl(self, self.gcode, self.images)
self.lcGCode.setLineNumbers(self.settings.uselinenbrs)
self.currentLayer = 0
self.setLayerText()
if self.gObj is not None:
self.lcGCode.setLayerBounds(self.gObj.getGCodeLines(0))
self.bShift = wx.BitmapButton(self, wx.ID_ANY, self.images.pngShift, size=BUTTONDIM)
self.bShift.SetToolTip("Move model in x/y direction")
self.Bind(wx.EVT_BUTTON, self.doShiftModel, self.bShift)
self.bShift.Enable(False)
self.bModTemp = wx.BitmapButton(self, wx.ID_ANY, self.images.pngModtemp, size=BUTTONDIM)
self.bModTemp.SetToolTip("Modify Temperatures")
self.Bind(wx.EVT_BUTTON, self.onModTemps, self.bModTemp)
self.bModTemp.Enable(False)
self.bModSpeed = wx.BitmapButton(self, wx.ID_ANY, self.images.pngModspeed, size=BUTTONDIM)
self.bModSpeed.SetToolTip("Modify Speed")
self.Bind(wx.EVT_BUTTON, self.onModSpeed, self.bModSpeed)
self.bModSpeed.Enable(False)
self.bFilChange = wx.BitmapButton(self, wx.ID_ANY, self.images.pngFilchange, size=BUTTONDIM)
self.bFilChange.SetToolTip("Insert G Code to assist with changing filament")
self.Bind(wx.EVT_BUTTON, self.onFilChange, self.bFilChange)
self.bFilChange.Enable(False)
self.bEdit = wx.BitmapButton(self, wx.ID_ANY, self.images.pngEdit, size=BUTTONDIM)
self.bEdit.SetToolTip("Free edit G Code")
self.Bind(wx.EVT_BUTTON, self.onEditGCode, self.bEdit)
self.bEdit.Enable(False)
self.bUp = wx.BitmapButton(self, wx.ID_ANY, self.images.pngUp, size=BUTTONDIM)
self.bUp.SetToolTip("Move up one layer")
self.Bind(wx.EVT_BUTTON, self.onUp, self.bUp)
self.bUp.Enable(False)
self.bDown = wx.BitmapButton(self, wx.ID_ANY, self.images.pngDown, size=BUTTONDIM)
self.bDown.SetToolTip("Move down one layer")
self.Bind(wx.EVT_BUTTON, self.onDown, self.bDown)
self.bDown.Enable(False)
self.bInfo = wx.BitmapButton(self, wx.ID_ANY, self.images.pngInfo, size=BUTTONDIM)
self.bInfo.SetToolTip("Information")
self.Bind(wx.EVT_BUTTON, self.onInfo, self.bInfo)
self.bInfo.Enable(False)
self.bLegend = wx.BitmapButton(self, wx.ID_ANY, self.images.pngLegend, size=BUTTONDIM)
self.bLegend.SetToolTip("Display a color legend")
self.Bind(wx.EVT_BUTTON, self.onLegend, self.bLegend)
self.bLegend.Enable(True)
self.bSaveLayers = wx.BitmapButton(self, wx.ID_ANY, self.images.pngSavelayers, size=BUTTONDIM)
self.bSaveLayers.SetToolTip("Save specific layers to a file")
self.Bind(wx.EVT_BUTTON, self.onSaveLayers, self.bSaveLayers)
self.bSaveLayers.Enable(False)
self.bOpen = wx.BitmapButton(self, wx.ID_ANY, self.images.pngFileopen, size=BUTTONDIM)
self.bOpen.SetToolTip("Open a G Code file")
self.Bind(wx.EVT_BUTTON, self.onOpen, self.bOpen)
self.bImport = wx.BitmapButton(self, wx.ID_ANY, self.images.pngImport, size=BUTTONDIM)
self.bImport.SetToolTip("Import the current toolbox G Code file")
self.Bind(wx.EVT_BUTTON, self.onImport, self.bImport)
self.bImport.Enable(False)
self.bImportQ = wx.BitmapButton(self, wx.ID_ANY, self.images.pngNext, size=BUTTONDIM)
self.bImportQ.SetToolTip("Import the next G Code file from the queue")
self.Bind(wx.EVT_BUTTON, self.onImportFromQueue, self.bImportQ)
self.bImportQ.Enable(False)
self.bExport = wx.BitmapButton(self, wx.ID_ANY, self.images.pngExport, size=BUTTONDIM)
self.bExport.SetToolTip("Export the current toolbox G Code file")
self.Bind(wx.EVT_CHECKBOX, self.onExport, self.bExport)
self.bExport.Enable(not self.settings.autoexport)
self.cbExport = wx.CheckBox(self, wx.ID_ANY, "Auto-export")
self.cbExport.SetToolTip("Auto-export the current G Code file when saving")
self.Bind(wx.EVT_CHECKBOX, self.onCbExport, self.cbExport)
self.cbEnqueue = wx.CheckBox(self, wx.ID_ANY, "Add to queue")
self.cbEnqueue.SetToolTip("Enqueue the current G Code file on the end of the G Code queue when exporting")
self.Bind(wx.EVT_BUTTON, self.onEnqueue, self.cbEnqueue)
self.bSave = wx.BitmapButton(self, wx.ID_ANY, self.images.pngFilesave, size=BUTTONDIM)
self.bSave.SetToolTip("Save G Code to the current file")
self.Bind(wx.EVT_BUTTON, self.onSave, self.bSave)
self.bSave.Enable(False)
self.bSaveAs = wx.BitmapButton(self, wx.ID_ANY, self.images.pngFilesaveas, size=BUTTONDIM)
self.bSaveAs.SetToolTip("Save G Code to a different file")
self.Bind(wx.EVT_BUTTON, self.onSaveAs, self.bSaveAs)
self.bSaveAs.Enable(False)
self.cbShowMoves = wx.CheckBox(self, wx.ID_ANY, "Show Moves")
self.cbShowMoves.SetToolTip("Show/Hide non-extrusion moves")
self.cbShowMoves.SetValue(self.settings.showmoves)
self.Bind(wx.EVT_CHECKBOX, self.onCbShowMoves, self.cbShowMoves)
self.cbShowPrevious = wx.CheckBox(self, wx.ID_ANY, "Show Previous Layer")
self.cbShowPrevious.SetToolTip("Show/Hide the previous layer")
self.cbShowPrevious.SetValue(self.settings.showprevious)
self.Bind(wx.EVT_CHECKBOX, self.onCbShowPrevious, self.cbShowPrevious)
self.cbShowRetractions = wx.CheckBox(self, wx.ID_ANY, "Show Retractions")
self.cbShowRetractions.SetToolTip("Show/Hide retractions")
self.cbShowRetractions.SetValue(self.settings.showretractions)
self.Bind(wx.EVT_CHECKBOX, self.onCbShowRetractions, self.cbShowRetractions)
self.cbShowRevRetractions = wx.CheckBox(self, wx.ID_ANY, "Show Reverse Retractions")
self.cbShowRevRetractions.SetToolTip("Show/Hide reverse retractions")
self.cbShowRevRetractions.SetValue(self.settings.showrevretractions)
self.Bind(wx.EVT_CHECKBOX, self.onCbShowRevRetractions, self.cbShowRevRetractions)
self.cmbTool = wx.ComboBox(self, wx.ID_ANY, "None", choices = ["None", "0", "1", "2", "3"],
style = wx.CB_DROPDOWN + wx.CB_READONLY)
self.cmbTool.SetToolTip("Choose which tool, if any, is highlighted in the display")
self.Bind(wx.EVT_COMBOBOX, self.onCmbTool, self.cmbTool)
self.cbLineNbrs = wx.CheckBox(self, wx.ID_ANY, "Line Numbers")
self.cbLineNbrs.SetToolTip("Use G Code line numbers")
self.cbLineNbrs.SetValue(self.settings.uselinenbrs)
self.Bind(wx.EVT_CHECKBOX, self.onCbLineNbrs, self.cbLineNbrs)
self.bBracketStart = wx.BitmapButton(self, wx.ID_ANY, self.images.pngBracketopen, size=BUTTONDIM)
self.bBracketStart.SetToolTip("Mark the beginning of a block of G code")
self.Bind(wx.EVT_BUTTON, self.onBracketStart, self.bBracketStart)
self.bBracketStart.Enable(False)
self.bBracketEnd = wx.BitmapButton(self, wx.ID_ANY, self.images.pngBracketclose, size=BUTTONDIM)
self.bBracketEnd.SetToolTip("Mark the end of a block of G code")
self.Bind(wx.EVT_BUTTON, self.onBracketEnd, self.bBracketEnd)
self.bBracketEnd.Enable(False)
self.bBracketDel = wx.BitmapButton(self, wx.ID_ANY, self.images.pngBracketdel, size=BUTTONDIM)
self.bBracketDel.SetToolTip("Delete the marked block of G code")
self.Bind(wx.EVT_BUTTON, self.onBracketDel, self.bBracketDel)
self.bBracketDel.Enable(False)
btnszr = wx.BoxSizer(wx.HORIZONTAL)
btnszr.AddSpacer(20)
btnszr.Add(self.bShift)
btnszr.AddSpacer(10)
btnszr.Add(self.bModTemp)
btnszr.AddSpacer(10)
btnszr.Add(self.bModSpeed)
btnszr.AddSpacer(10)
btnszr.Add(self.bFilChange)
btnszr.AddSpacer(10)
btnszr.Add(self.bEdit)
btnszr.AddSpacer(10)
btnszr.Add(self.bInfo)
btnszr.AddSpacer(10)
btnszr.Add(self.bLegend)
btnszr.AddSpacer(50)
btnszr.Add(self.bSaveLayers)
btnszr.AddSpacer(10)
btnszr.Add(self.bOpen)
btnszr.AddSpacer(10)
btnszr.Add(self.bImport)
btnszr.AddSpacer(10)
btnszr.Add(self.bImportQ)
btnszr.AddSpacer(10)
btnszr.Add(self.bExport)
btnszr.AddSpacer(5)
optszr = wx.BoxSizer(wx.VERTICAL)
optszr.AddSpacer(1)
optszr.Add(self.cbExport)
optszr.AddSpacer(1)
optszr.Add(self.cbEnqueue)
btnszr.Add(optszr)
btnszr.AddSpacer(10)
btnszr.Add(self.bSave)
btnszr.AddSpacer(10)
btnszr.Add(self.bSaveAs)
btnszr.AddSpacer(10)
hszr = wx.BoxSizer(wx.HORIZONTAL)
hszr.AddSpacer(20)
vszr = wx.BoxSizer(wx.VERTICAL)
vszr.Add(self.gcFrame)
vszr.Add(self.stLayerText, 1, wx.ALIGN_CENTER_HORIZONTAL, 1)
vszr.AddSpacer(10)
opthszr = wx.BoxSizer(wx.HORIZONTAL)
optszr = wx.BoxSizer(wx.VERTICAL)
optszr.AddSpacer(1)
optszr.Add(self.cbShowMoves)
optszr.AddSpacer(1)
optszr.Add(self.cbShowPrevious)
opthszr.Add(optszr)
opthszr.AddSpacer(10)
optszr = wx.BoxSizer(wx.VERTICAL)
optszr.AddSpacer(1)
optszr.Add(self.cbShowRetractions)
optszr.AddSpacer(1)
optszr.Add(self.cbShowRevRetractions)
opthszr.Add(optszr)
opthszr.AddSpacer(10)
optszr = wx.BoxSizer(wx.VERTICAL)
optszr.AddSpacer(1)
hsz = wx.BoxSizer(wx.HORIZONTAL)
hsz.Add(wx.StaticText(self, wx.ID_ANY, "Tool to Hi-light: "), 1, wx.TOP, 5)
hsz.AddSpacer(5)
hsz.Add(self.cmbTool)
optszr.Add(hsz)
opthszr.Add(optszr)
vszr.Add(opthszr)
hszr.Add(vszr)
szNav = wx.BoxSizer(wx.VERTICAL)
szNav.Add(self.bUp, 1, wx.ALIGN_CENTER_HORIZONTAL, 1)
szNav.AddSpacer(10)
szNav.Add(self.slLayers)
szNav.AddSpacer(10)
szNav.Add(self.bDown, 1, wx.ALIGN_CENTER_HORIZONTAL, 1)
hszr.Add(szNav)
hszr.AddSpacer(20)
listszr = wx.BoxSizer(wx.VERTICAL)
listszr.Add(self.lcGCode)
listszr.AddSpacer(10)
listszr.Add(self.cbLineNbrs, 1, wx.ALIGN_CENTER_HORIZONTAL, 1)
brksz = wx.BoxSizer(wx.HORIZONTAL)
brksz.Add(self.bBracketStart)
brksz.AddSpacer(20)
brksz.Add(self.bBracketDel)
brksz.AddSpacer(20)
brksz.Add(self.bBracketEnd)
listszr.AddSpacer(10)
listszr.Add(brksz, 0, wx.ALIGN_CENTER_HORIZONTAL, 1)
hszr.Add(listszr)
hszr.AddSpacer(20)
vszr = wx.BoxSizer(wx.VERTICAL)
vszr.AddSpacer(20)
vszr.Add(btnszr)
vszr.AddSpacer(10)
vszr.Add(hszr)
vszr.AddSpacer(20)
self.SetSizer(vszr)
self.Layout()
self.Fit()
self.slLayers.SetRange(0, lmax)
self.slLayers.SetPageSize(int(lmax/10))
if self.gObj is not None:
self.enableButtons()
def onLegend(self, evt):
if self.legend is None:
self.legend = LegendDlg(self)
self.legend.Show()
else:
self.legend.Show()
self.legend.Raise()
def legendClosed(self):
self.legend = None
def setImportFile(self, fn):
self.importFileName = fn
if fn is None:
self.bImport.SetToolTip("")
self.bImport.Enable(False)
else:
self.bImport.SetToolTip("Import G Code file (%s)" % fn)
self.bImport.Enable(True)
def onBracketStart(self, evt):
b = self.lcGCode.setBracketStart()
self.gcFrame.setBracket(b)
self.enableBracketDel(b)
def onBracketEnd(self, evt):
b = self.lcGCode.setBracketEnd()
self.gcFrame.setBracket(b)
self.enableBracketDel(b)
def enableBracketDel(self, b=None):
if b is None:
b = self.lcGCode.getBracket()
if b[0] is None or b[1] is None:
self.bBracketDel.Enable(False)
else:
self.bBracketDel.Enable(True)
def onBracketDel(self, evt):
b = self.lcGCode.getBracket()
if b[0] is None or b[1] is None:
return
self.gcode = self.gcode[:b[0]] + self.gcode[b[1]+1:]
self.setModified(True)
self.gObj = self.buildModel()
self.modGcSuffixTemps(self.gObj.getTemps())
l = self.gcFrame.getCurrentLayer()
self.gcFrame.loadModel(self.gObj, l, self.gcFrame.getZoom())
lmax = self.gObj.layerCount()-1
self.slLayers.SetRange(0, lmax)
self.slLayers.SetPageSize(int(lmax/10))
self.lcGCode.setGCode(self.gcode)
self.lcGCode.setLayerBounds(self.gObj.getGCodeLines(l))
self.bBracketDel.Enable(False)
self.updateInfoDlg(self.currentLayer)
def updateTitle(self):
if self.filename is None:
self.SetTitle("%s" % TITLE_PREFIX)
else:
txt = TITLE_PREFIX + " - "
if self.modified:
txt += "* "
txt += self.filename
self.SetTitle(txt)
def setModified(self, flag=True):
self.modified = flag
self.updateTitle()
def onExport(self, evt):
if self.modified:
dlg = wx.MessageDialog(self,
"You have unsaved changes.\nAre you sure you want to export?",
"Confirm Export With Pending Changes",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
self.parent.exportGcFile(self.filename, True, self.settings.autoenqueue)
def onCbExport(self, evt):
self.settings.autoexport = evt.IsChecked()
self.bExport.Enable(not self.settings.autoexport)
def onEnqueue(self, evt):
self.settings.autoenqueue = evt.IsChecked()
def onImport(self, evt):
fn = self.parent.importGcFile()
if fn is None:
return
self.loadGFile(fn)
def onImportFromQueue(self, evt):
fn = self.parent.importGcFromQueue()
if fn is None:
return
self.loadGFile(fn)
def setImportButton(self, msg):
if msg is None:
self.okToImport = False
self.bImportQ.SetToolTip("")
self.bImportQ.Enable(False)
else:
self.okToImport = True
self.bImportQ.SetToolTip(msg)
self.bImportQ.Enable(self.bOpen.IsEnabled())
def onOpen(self, evt):
if self.modified:
dlg = wx.MessageDialog(self,
"You have unsaved changes.\nAre you sure you want to open a different file?",
"Confirm Open With Pending Changes",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
self.gcodeFileDialog()
def onInfo(self, evt):
if self.propDlg is not None:
return
self.propDlg = PropertiesDlg(self, self, None, cb=self.onInfoClose)
self.showFileProperties()
self.showLayerProperties(self.currentLayer)
self.propDlg.Show()
def onInfoClose(self):
self.propDlg = None
def updateInfoDlg(self, lx):
if self.propDlg is None:
return
self.showFileProperties()
self.showLayerProperties(lx)
def showFileProperties(self):
slCfg, filSiz, tempsHE, tempsBed = parseGCSuffix(self.gcode)
ftime = time.strftime('%y/%m/%d-%H:%M:%S', time.localtime(os.path.getmtime(self.filename)))
if len(self.filename) > 50:
self.propDlg.setProperty(PropertyEnum.fileName, os.path.basename(self.filename))
else:
self.propDlg.setProperty(PropertyEnum.fileName, self.filename)
self.propDlg.setProperty(PropertyEnum.slicerCfg, slCfg)
self.propDlg.setProperty(PropertyEnum.filamentSize, filSiz)
self.propDlg.setProperty(PropertyEnum.temperatures, "HE:%s BED:%s" % (tempsHE, tempsBed))
self.propDlg.setProperty(PropertyEnum.sliceTime, ftime)
self.propDlg.setProperty(PropertyEnum.printEstimate, self.totalTimeStr)
def showLayerProperties(self, lx):
if self.propDlg is None:
return
self.propDlg.setProperty(PropertyEnum.layerNum, "%d" % lx)
x0, y0, xn, yn = self.gObj.getLayerMinMaxXY(lx)
if x0 is None:
self.propDlg.setProperty(PropertyEnum.minMaxXY, "")
else:
self.propDlg.setProperty(PropertyEnum.minMaxXY, "(%.2f, %.2f) - (%.2f, %.2f)" % (x0, y0, xn, yn))
le, prior, after = self.gObj.getLayerFilament(lx)
eUsed = self.gObj.getFilament()
s = []
for i in range(self.settings.nextruders):
s.append("%.2f/%.2f <: %.2f >: %.2f" % (le[i], eUsed[i], prior[i], after[i]))
self.propDlg.setProperty(PropertyEnum.filamentUsed, s)
f, l = self.gObj.getGCodeLines(lx)
if f is None:
self.propDlg.setProperty(PropertyEnum.gCodeRange, "")
else:
self.propDlg.setProperty(PropertyEnum.gCodeRange, "%d - %d" % (f, l))
self.propDlg.setProperty(PropertyEnum.layerPrintTime, self.layerTimeStr[lx])
if lx == 0:
self.propDlg.setProperty(PropertyEnum.timeUntil, "")
else:
t = sum(self.layerTimes[:lx])
self.propDlg.setProperty(PropertyEnum.timeUntil, formatElapsed(t))
def gcodeFileDialog(self):
wildcard = "GCode (*.gcode)|*.gcode;*.GCODE|" \
"All files (*.*)|*.*"
dlg = wx.FileDialog(
self, message="Choose a GCode file",
defaultDir=self.settings.lastdirectory,
defaultFile="",
wildcard=wildcard,
style=wx.FD_OPEN)
rc = dlg.ShowModal()
if rc == wx.ID_OK:
path = dlg.GetPath().encode('ascii','ignore')
dlg.Destroy()
if rc != wx.ID_OK:
return
self.loadGFile(path)
def loadGFile(self, path):
self.settings.lastdirectory = os.path.dirname(path)
self.gObj = self.loadGCode(path)
if self.gObj is None:
lmax = 1
self.slLayers.Enable(False)
self.bUp.Enable(False)
self.bDown.Enable(False)
self.filename = None
else:
lmax = self.gObj.layerCount()-1
self.slLayers.Enable(True)
self.bUp.Enable(True)
self.bDown.Enable(True)
self.filename = path
self.updateTitle()
self.slLayers.SetRange(0, lmax)
self.slLayers.SetPageSize(int(lmax/10))
self.gcFrame.loadModel(self.gObj)
self.lcGCode.setGCode(self.gcode)
self.currentLayer = 0
self.setLayerText()
self.slLayers.SetValue(0)
self.updateInfoDlg(0)
self.setModified(False)
if self.gObj is not None:
self.lcGCode.setLayerBounds(self.gObj.getGCodeLines(0))
self.enableButtons()
else:
self.enableButtons(False)
def enableButtons(self, flag=True, openButtons=False):
self.bShift.Enable(flag)
self.bModTemp.Enable(flag)
self.bModSpeed.Enable(flag)
self.bEdit.Enable(flag)
self.bInfo.Enable(flag)
self.bUp.Enable(flag)
self.bDown.Enable(flag)
self.bSaveLayers.Enable(flag)
self.bSave.Enable(flag)
self.bSaveAs.Enable(flag)
self.bExport.Enable(flag and (not self.settings.autoexport))
self.bFilChange.Enable(flag)
self.bBracketStart.Enable(flag)
self.bBracketEnd.Enable(flag)
self.enableBracketDel()
if openButtons:
if flag and self.importFileName is not None:
self.bImport.Enable(True)
else:
self.bImport.Enable(False)
if flag and self.okToImport:
self.bImportQ.Enable(True)
else:
self.bImportQ.Enable(False)
self.bOpen.Enable(flag)
def doShiftModel(self, evt):
dlg = ShiftModelDlg(self, self.gObj, self.settings.buildarea)
dlg.CenterOnScreen()
rc = dlg.ShowModal()
dlg.Destroy()
if rc == wx.ID_OK:
self.applyShift()
self.setModified()
else:
self.setShift(0, 0)
def setShift(self, sx, sy):
self.shiftX = sx
self.shiftY = sy
self.gcFrame.setShift(sx, sy)
def applyShift(self):
self.gcode = [self.applyAxisShift(self.applyAxisShift(l, 'y', self.shiftY), 'x', self.shiftX) for l in self.gcode]
self.shiftX = 0
self.shiftY = 0
self.gObj = self.buildModel()
self.gcFrame.loadModel(self.gObj, self.gcFrame.getCurrentLayer(), self.gcFrame.getZoom())
self.lcGCode.setGCode(self.gcode)
self.lcGCode.refreshList()
self.updateInfoDlg(self.currentLayer)
def applyAxisShift(self, s, axis, shift):
if "m117" in s or "M117" in s:
return s
if axis == 'x':
m = reX.match(s)
maxv = self.settings.buildarea[0]
elif axis == 'y':
m = reY.match(s)
maxv = self.settings.buildarea[1]
elif axis == 'z':
m = reZ.match(s)
maxv = self.settings.buildarea[1]
else:
return s
if m is None or m.lastindex != 3:
return s
value = float(m.group(2)) + float(shift)
if value < 0:
value = 0.0
elif value > maxv:
value = float(maxv)
return "%s%s%s" % (m.group(1), str(value), m.group(3))
def onModTemps(self, evt):
dlg = ModifyTempsDlg(self, self.gObj, self.settings.platemps, self.settings.abstemps)
dlg.CenterOnScreen()
rc = dlg.ShowModal()
if rc == wx.ID_OK:
bed, hes = dlg.getResult()
dlg.Destroy()
if rc != wx.ID_OK:
return
self.applyTempChange(bed, hes)
def applyTempChange(self, bed, hes):
self.currentTool = 0
self.gcode = [self.applySingleTempChange(l, bed, hes) for l in self.gcode]
self.setModified(True)
self.gObj = self.buildModel()
self.modGcSuffixTemps(self.gObj.getTemps())
self.gcFrame.loadModel(self.gObj, self.gcFrame.getCurrentLayer(), self.gcFrame.getZoom())
self.lcGCode.setGCode(self.gcode)
self.lcGCode.refreshList()
self.updateInfoDlg(self.currentLayer)
def modGcSuffixTemps(self, nTemps):
bstr = "%.1f" % nTemps[0]
h = []
nct = 0
for x in nTemps[1]:
if x is None:
nct += 1
else:
if nct != 0:
h.extend([None]*nct)
nct = 0
h.append("%.1f" % x)
hestr = ",".join(h)
modifyGCSuffix(self.gcode, None, None, hestr, bstr)
def applySingleTempChange(self, s, bed, hes):
if "m104" in s.lower() or "m109" in s.lower():
m = reS.match(s)
difference = hes[self.currentTool]
elif "m140" in s.lower() or "m190" in s.lower():
m = reS.match(s)
difference = bed
elif s.startswith("T"):
try:
t = int(s[1:])
except:
t = None
if t is not None:
self.currentTool = t
return s
else:
return s
if m is None or m.lastindex != 3:
return s
value = float(m.group(2))
if value == 0.0:
return s
value += float(difference)
return "%s%s%s" % (m.group(1), str(value), m.group(3))
def onModSpeed(self, evt):
dlg = ModifySpeedDlg(self)
dlg.CenterOnScreen()
val = dlg.ShowModal()
if val == wx.ID_OK:
modSpeeds = dlg.getResult()
dlg.Destroy()
if val != wx.ID_OK:
return
self.applySpeedChange([float(x)/100.0 for x in modSpeeds])
def applySpeedChange(self, speeds):
self.gcode = [self.applySingleSpeedChange(l, speeds) for l in self.gcode]
self.setModified(True)
self.gObj = self.buildModel()
self.gcFrame.loadModel(self.gObj, self.gcFrame.getCurrentLayer(), self.gcFrame.getZoom())
self.lcGCode.setGCode(self.gcode)
self.lcGCode.refreshList()
self.updateInfoDlg(self.currentLayer)
def applySingleSpeedChange(self, s, speeds):
if "m117" in s or "M117" in s:
return s
m = reF.match(s)
if m is None or m.lastindex != 3:
return s
e = reE.match(s)
if e is None: #no extrusion - must be a move
factor = speeds[1]
else:
factor = speeds[0]
value = float(m.group(2)) * float(factor)
return "%s%s%s" % (m.group(1), str(value), m.group(3))
def onFilChange(self, evt):
insertPoint = self.lcGCode.getSelectedLine()
dlg = FilamentChangeDlg(self, self.gcode, self.gObj,
insertPoint,
self.gObj[self.currentLayer].printHeight())
rc = dlg.ShowModal()
if rc == wx.ID_OK:
ngc = dlg.getValues()
dlg.Destroy()
if rc != wx.ID_OK:
return
if insertPoint == 0:
self.gcode = ngc + self.gcode
else:
self.gcode = self.gcode[:insertPoint] + ngc + self.gcode[insertPoint:]
self.setModified(True)
self.enableButtons()
self.gObj = self.buildModel()
lmax = self.gObj.layerCount()-1
self.slLayers.SetRange(0, lmax)
self.slLayers.SetPageSize(int(lmax/10))
self.gcFrame.loadModel(self.gObj, self.currentLayer, None)
self.lcGCode.setGCode(self.gcode)
self.lcGCode.setLayerBounds(self.gObj.getGCodeLines(self.currentLayer))
self.updateInfoDlg(self.currentLayer)
def onEditGCode(self, evt):
self.editDlg = EditGCodeDlg(self, self.gcode, "<live buffer>", self.editClosed)
self.editDlg.CenterOnScreen()
self.editDlg.Show()
self.enableButtons(flag=False, openButtons=True)
def editClosed(self, rc):
self.enableButtons(flag=True, openButtons=True)
if rc == wx.ID_OK:
data = self.editDlg.getData()
self.editDlg.Destroy()
if rc != wx.ID_OK:
return
self.gcode = data[:]
self.setModified(True)
self.gObj = self.buildModel()
self.modGcSuffixTemps(self.gObj.getTemps())
self.gcFrame.loadModel(self.gObj, 0, 1)
self.currentLayer = 0
self.setLayerText()
self.slLayers.SetValue(0)
lmax = self.gObj.layerCount()-1
self.slLayers.SetRange(0, lmax)
self.slLayers.SetPageSize(int(lmax/10))
self.lcGCode.setGCode(self.gcode)
self.lcGCode.setLayerBounds(self.gObj.getGCodeLines(0))
self.lcGCode.refreshList()
self.updateInfoDlg(0)
def onCbShowMoves(self, evt):
self.settings.showmoves = self.cbShowMoves.GetValue()
self.gcFrame.setShowMoves(self.settings.showmoves)
def onCbShowPrevious(self, evt):
self.settings.showprevious = self.cbShowPrevious.GetValue()
self.gcFrame.setShowPrevious(self.settings.showprevious)
def onCbShowRetractions(self, evt):
self.settings.showretractions = self.cbShowRetractions.GetValue()
self.gcFrame.setShowRetractions(self.settings.showretractions)
def onCbShowRevRetractions(self, evt):
self.settings.showrevretractions = self.cbShowRevRetractions.GetValue()
self.gcFrame.setShowRevRetractions(self.settings.showrevretractions)
def onCmbTool(self, evt):
sel = self.cmbTool.GetStringSelection()
if sel == "" or sel == "None":
sel = None
else:
try:
sel = int(sel)
except:
sel = None
self.gcFrame.setHilightTool(sel)
def onCbLineNbrs(self, evt):
self.settings.uselinenbrs = self.cbLineNbrs.GetValue()
self.lcGCode.setLineNumbers(self.settings.uselinenbrs)
def onLayerScroll(self, evt):
v = self.slLayers.GetValue()
if v == self.currentLayer:
return
self.changeLayer(v)
def onUp(self, evt):
lmax = self.slLayers.GetRange()[1]
if self.currentLayer >= lmax:
return
v = self.currentLayer + 1
self.changeLayer(v)
def onDown(self, evt):
if self.currentLayer <= 0:
return
v = self.currentLayer - 1
self.changeLayer(v)
def changeLayer(self, v):
self.currentLayer = v
self.gcFrame.setLayer(v)
self.slLayers.SetValue(v)
self.setLayerText()
self.lcGCode.setLayerBounds(self.gObj.getGCodeLines(v))
self.showLayerProperties(v)
def setLayerText(self):
if self.gObj is None:
ht = 0.0
else:
ht = self.gObj[self.currentLayer].printHeight()
self.stLayerText.SetLabel("Layer Height: %0.3f" % ht)
def reportSelectedLine(self, ln):
self.gcFrame.reportSelectedLine(ln)
def onClose(self, evt):
self.settings.save()
if self.modified:
dlg = wx.MessageDialog(self,
"You have unsaved changes.\nAre you sure you want to exit?",
"Confirm Exit With Pending Changes",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc == wx.ID_YES:
self.parent.GEditClosed()
else:
self.parent.GEditClosed()
def loadGCode(self, fn):
if fn is None:
self.gcode = []
return None
try:
self.gcode = list(open(fn))
except:
print "Error opening file %s" % fn
return None
return self.buildModel()
def buildModel(self):
rgcode = [s.rstrip() for s in self.gcode]
cnc = CNC(self.settings.acceleration, self.settings.layerheight)
ln = -1
for gl in rgcode:
ln += 1
if ";" in gl:
gl = gl.split(";")[0]
if gl.strip() == "":
continue
p = re.split("\\s+", gl, 1)
params = {}
if not (p[0].strip() in ["M117", "m117"]):
if len(p) >= 2:
self.paramStr = p[1]
if "X" in self.paramStr:
params["X"] = self._get_float("X")
if "Y" in self.paramStr:
params["Y"] = self._get_float("Y")
if "Z" in self.paramStr:
params["Z"] = self._get_float("Z")
if "E" in self.paramStr:
params["E"] = self._get_float("E")
if "F" in self.paramStr:
params["F"] = self._get_float("F")
if "S" in self.paramStr:
params["S"] = self._get_float("S")
cnc.execute(p[0], params, ln)
gobj = cnc.getGObject()
gobj.setMaxLine(ln)
self.totalTime, self.layerTimes = cnc.getTimes()
self.totalTimeStr = formatElapsed(self.totalTime)
self.layerTimeStr = [formatElapsed(s) for s in self.layerTimes]
return gobj
def _get_float(self,which):
try:
return float(gcRegex.findall(self.paramStr.split(which)[1])[0])
except:
print "exception: ", self.paramStr
def onSaveAs(self, evt):
wildcard = "GCode (*.gcode)|*.gcode;*.GCODE"
dlg = wx.FileDialog(
self, message="Save as ...", defaultDir=self.settings.lastdirectory,
defaultFile="", wildcard=wildcard, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
val = dlg.ShowModal()
if val != wx.ID_OK:
dlg.Destroy()
return
path = dlg.GetPath()
dlg.Destroy()
ext = os.path.splitext(os.path.basename(path))[1]
if ext == "":
path += ".gcode"
self.saveFile(path)
def onSave(self, evt):
if self.filename is None:
self.onSaveAs(evt)
else:
self.saveFile(self.filename)
def saveFile(self, path):
fp = file(path, 'w')
for ln in self.gcode:
fp.write("%s\n" % ln.rstrip())
self.setModified(False)
fp.close()
self.filename = path
self.parent.exportGcFile(path, self.settings.autoexport, self.settings.autoenqueue)
self.updateTitle()
dlg = wx.MessageDialog(self, "G Code file\n" + path + "\nwritten.",
'Save Successful', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def onSaveLayers(self, evt):
dlg = SaveLayerDlg(self, self.gObj)
rc = dlg.ShowModal()
if rc == wx.ID_OK:
sx, ex, ereset, zmodify, zdelta = dlg.getValues()
dlg.Destroy()
if rc != wx.ID_OK:
return
startLine = self.gObj.getGCodeLines(sx)[0]
endLine = self.gObj.getGCodeLines(ex)[1]
wildcard = "GCode (*.gcode)|*.gcode;*.GCODE"
dlg = wx.FileDialog(
self, message="Save as ...", defaultDir=self.settings.lastdirectory,
defaultFile="", wildcard=wildcard, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
val = dlg.ShowModal()
if val != wx.ID_OK:
dlg.Destroy()
return
path = dlg.GetPath()
dlg.Destroy()
ext = os.path.splitext(os.path.basename(path))[1]
if ext == "":
path += ".gcode"
fp = file(path, 'w')
if ereset:
fp.write("G92 E%0.5f\n" % self.gObj[sx].startingE())
if zmodify:
fp.write("\n".join([self.applyAxisShift(ln, 'z', zdelta).rstrip() for ln in self.gcode[startLine:endLine+1]]))
else:
fp.write("\n".join([ln.rstrip() for ln in self.gcode[startLine:endLine+1]]))
fp.close()
dlg = wx.MessageDialog(self, "G Code file\n" + path + "\nwritten.",
'Save Layers Successful', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def applyZMod(self, ln, modflag):
if not modflag:
return ln
return ln
| jbernardis/repraptoolbox | src/GEdit/gedit.py | Python | gpl-3.0 | 33,435 |
from django.test import TestCase
from dojo.tools.ort.parser import OrtParser
from dojo.models import Test
class TestOrtParser(TestCase):
def test_parse_without_file_has_no_finding(self):
parser = OrtParser()
findings = parser.get_findings(None, Test())
self.assertEqual(0, len(findings))
def test_parse_file_has_many_finding_one_tool(self):
testfile = open(
"dojo/unittests/scans/ort/evaluated-model-reporter-test-output.json"
)
parser = OrtParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(2, len(findings))
| rackerlabs/django-DefectDojo | dojo/unittests/tools/test_ort_parser.py | Python | bsd-3-clause | 646 |
# Departing.io, a web app to answer the question of "When will the next bus come?"
# Copyright (C) 2016 Jake Coppinger
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import csv
class NxtbusStops():
def __init__(self,csvFilename):
data = []
for row in csv.DictReader(open(csvFilename)):
data.append(row)
self._allData = data
def busStopIDDictionary(self):
busStops = {}
for row in self._allData:
busStops[row["Stop"]] = row
return busStops
def addressDictionary(self):
addresses = {}
for row in self._allData:
addresses[row["Description"]] = row
return addresses
def addressList(self):
addresses = []
for row in self._allData:
addresses.append(row["Description"])
return addresses
| jakecoppinger/departing.io | nxtbus_stops.py | Python | gpl-3.0 | 1,313 |
import buzzbot.background_runner
import buzzbot.bot
import buzzbot.botUtilities
import buzzbot.commands
import buzzbot.cpu_core_counter
import buzzbot.crawler
import buzzbot.feedparser
import buzzbot.model
import buzzbot.searcher
import buzzbot.visitor
import logging
| pbarton666/buzz_bot | bot_project/buzzbot/__init__.py | Python | mit | 281 |
# coding: utf-8
from __future__ import unicode_literals
from datetime import date
from django.contrib.gis.geos import GEOSGeometry
from boundaries.models import BoundarySet, Boundary
from boundaries.tests import ViewTestCase, ViewsTests, PrettyTests, PaginationTests, BoundaryListTests
class BoundaryListSetTestCase(ViewTestCase, ViewsTests, PrettyTests, PaginationTests, BoundaryListTests):
"""
Compare to BoundarySetListTestCase (/boundary-sets/) and BoundaryListTestCase (/boundaries/)
"""
maxDiff = None
url = '/boundaries/inc/'
json = {
'objects': [],
'meta': {
'next': None,
'total_count': 0,
'previous': None,
'limit': 20,
'offset': 0,
},
}
def setUp(self):
BoundarySet.objects.create(slug='inc', last_updated=date(2000, 1, 1))
def test_pagination(self):
geom = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,0 0)))')
Boundary.objects.create(slug='foo', set_id='inc', shape=geom, simple_shape=geom)
Boundary.objects.create(slug='bar', set_id='inc', shape=geom, simple_shape=geom)
Boundary.objects.create(slug='baz', set_id='inc', shape=geom, simple_shape=geom)
response = self.client.get(self.url, {'limit': 1})
self.assertResponse(response)
self.assertJSONEqual(response, '{"objects": [{"url": "/boundaries/inc/baz/", "boundary_set_name": "", "external_id": "", "name": "", "related": {"boundary_set_url": "/boundary-sets/inc/"}}], "meta": {"total_count": 3, "related": {"centroids_url": "/boundaries/inc/centroid?limit=1", "simple_shapes_url": "/boundaries/inc/simple_shape?limit=1", "shapes_url": "/boundaries/inc/shape?limit=1"}, "next": "/boundaries/inc/?limit=1&offset=1", "limit": 1, "offset": 0, "previous": null}}')
response = self.client.get(self.url, {'limit': 1, 'offset': 1})
self.assertResponse(response)
self.assertJSONEqual(response, '{"objects": [{"url": "/boundaries/inc/bar/", "boundary_set_name": "", "external_id": "", "name": "", "related": {"boundary_set_url": "/boundary-sets/inc/"}}], "meta": {"total_count": 3, "related": {"centroids_url": "/boundaries/inc/centroid?limit=1&offset=1", "simple_shapes_url": "/boundaries/inc/simple_shape?limit=1&offset=1", "shapes_url": "/boundaries/inc/shape?limit=1&offset=1"}, "next": "/boundaries/inc/?limit=1&offset=2", "limit": 1, "offset": 1, "previous": "/boundaries/inc/?limit=1&offset=0"}}')
response = self.client.get(self.url, {'limit': 1, 'offset': 2})
self.assertResponse(response)
self.assertJSONEqual(response, '{"objects": [{"url": "/boundaries/inc/foo/", "boundary_set_name": "", "external_id": "", "name": "", "related": {"boundary_set_url": "/boundary-sets/inc/"}}], "meta": {"total_count": 3, "related": {"centroids_url": "/boundaries/inc/centroid?limit=1&offset=2", "simple_shapes_url": "/boundaries/inc/simple_shape?limit=1&offset=2", "shapes_url": "/boundaries/inc/shape?limit=1&offset=2"}, "next": null, "limit": 1, "offset": 2, "previous": "/boundaries/inc/?limit=1&offset=1"}}')
def test_404_on_boundary_set(self):
response = self.client.get('/boundaries/nonexistent/')
self.assertNotFound(response)
| datamade/represent-boundaries | boundaries/tests/test_boundary_list_set.py | Python | mit | 3,252 |
#!/usr/bin/env python
import rospy
import actionlib
import random
import vrep
import time
import numpy as np
import geometry_msgs.msg
import std_msgs.msg
import os
from geometry_msgs.msg import Twist, Pose, Vector3
from kobuki_msgs.msg import BumperEvent
from nav_msgs.msg import Odometry
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from sensor_msgs.msg import LaserScan,Imu
from std_msgs.msg import Float64
from turtlebot2i_safety.msg import SafetyZone, VelocityScale, SafetyRisk
from collections import deque
from math import pi, sqrt, sin, cos, radians, atan2
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from shapely.geometry import Polygon, box, LineString, Point
from shapely.affinity import translate
#from matplotlib import pyplot as plt
class VrepManipulation():
def __init__(self):
self.scenarioNr = 0
self.clientID = vrep.simxStart('127.0.0.1', 20001, True, True, 5000, 5)
self.dirPath = os.path.dirname(os.path.realpath(__file__))
self.model_location = self.dirPath.replace('turtlebot2i_safety/src', 'turtlebot2i_description/v-rep_model/warehouse_scene/vrep_models/turtlebot2i_for_training.ttm')
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox0 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#0', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox1 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#1', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox2 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#2', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox3 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#3', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox4 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#4', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox5 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#5', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox6 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#6', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox7 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#7', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox8 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#8', vrep.simx_opmode_blocking)
returnCode, self.ConcreteBox9 = vrep.simxGetObjectHandle(self.clientID, 'ConcreteBox#9', vrep.simx_opmode_blocking)
returnCode, self.round1 = vrep.simxGetObjectHandle(self.clientID, '80cmHighPillar100cm', vrep.simx_opmode_blocking)
returnCode, self.round2 = vrep.simxGetObjectHandle(self.clientID, '80cmHighPillar100cm0', vrep.simx_opmode_blocking)
returnCode, self.conv = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt', vrep.simx_opmode_blocking)
returnCode, self.conv0 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#0', vrep.simx_opmode_blocking)
returnCode, self.conv1 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#1', vrep.simx_opmode_blocking)
returnCode, self.conv2 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#2', vrep.simx_opmode_blocking)
returnCode, self.conv3 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#3', vrep.simx_opmode_blocking)
returnCode, self.conv4 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#4', vrep.simx_opmode_blocking)
returnCode, self.conv5 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#5', vrep.simx_opmode_blocking)
returnCode, self.conv6 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#6', vrep.simx_opmode_blocking)
returnCode, self.conv7 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#7', vrep.simx_opmode_blocking)
returnCode, self.conv8 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#8', vrep.simx_opmode_blocking)
returnCode, self.conv9 = vrep.simxGetObjectHandle(self.clientID, 'ConveyorBelt#9', vrep.simx_opmode_blocking)
def setScenarioOriginal(self):
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox, -1, np.array([ 7.0, 4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox0, -1, np.array([ 1.0, 7.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox1, -1, np.array([-2.0,-3.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox2, -1, np.array([-3.0,-4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox3, -1, np.array([ 5.5, 0.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox4, -1, np.array([ 5.5,-4.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox5, -1, np.array([ 7.0,-2.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox6, -1, np.array([7.25,-4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox7, -1, np.array([ 9.0,-4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox8, -1, np.array([ 7.5,-6.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox9, -1, np.array([-7.0,-6.0, 0.5]), vrep.simx_opmode_oneshot_wait)
#Round object
returnCode = vrep.simxSetObjectPosition(self.clientID, self.round1, -1, np.array([ 2.0,-4.0, 0.35]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.round2, -1, np.array([ 2.0,-6.5, 0.35]), vrep.simx_opmode_oneshot_wait)
#conveyor belt
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv, -1, np.array([ 1.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv0, -1, np.array([-1.0,-0.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv1, -1, np.array([-3.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv2, -1, np.array([-5.0,-0.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv3, -1, np.array([-7.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv4, -1, np.array([-4.5,-6.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv5, -1, np.array([ 0.0, 4.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv6, -1, np.array([-9.0, 5.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv7, -1, np.array([-9.0,-3.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv8, -1, np.array([-4.0, 5.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv9, -1, np.array([-7.0, 3.0, 0.113]), vrep.simx_opmode_oneshot_wait)
def setScenarioMove1(self):
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox, -1, np.array([ 6.5, 4.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox0, -1, np.array([-0.5, 7.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox1, -1, np.array([-4.0,-6.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox2, -1, np.array([-2.0,-2.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox3, -1, np.array([ 7.0, 0.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox4, -1, np.array([ 5.5,-6.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox5, -1, np.array([ 8.0,-2.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox6, -1, np.array([ 6.0,-3.0, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox7, -1, np.array([ 8.0,-5.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox8, -1, np.array([ 7.0,-4.5, 0.5]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.ConcreteBox9, -1, np.array([-8.5,-3.0, 0.5]), vrep.simx_opmode_oneshot_wait)
#Round object
returnCode = vrep.simxSetObjectPosition(self.clientID, self.round1, -1, np.array([ 0.0,-3.0, 0.35]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.round2, -1, np.array([ 4.0,-6.0, 0.35]), vrep.simx_opmode_oneshot_wait)
#conveyor belt
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv, -1, np.array([ 2.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv0, -1, np.array([-2.0,-0.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv1, -1, np.array([-4.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv2, -1, np.array([-6.0,-0.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv3, -1, np.array([-8.0, 1.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv4, -1, np.array([-4.5,-4.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv5, -1, np.array([-4.0, 4.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv6, -1, np.array([-7.0, 2.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv7, -1, np.array([-9.0,-5.0, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv8, -1, np.array([0.25, 4.5, 0.113]), vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxSetObjectPosition(self.clientID, self.conv9, -1, np.array([-9.0, 5.0, 0.113]), vrep.simx_opmode_oneshot_wait)
def reset_robot_pos(self):
#reset robot position to origin
returnCode = vrep.simxRemoveModel(self.clientID, self.robot_handle, vrep.simx_opmode_oneshot_wait)
#print("Removing robot, robot handler:",self.robot_handle," | return code:",returnCode)
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_oneshot_wait)
while(returnCode == 0):
returnCode = vrep.simxRemoveModel(self.clientID, self.robot_handle, vrep.simx_opmode_oneshot_wait)
rospy.loginfo("Previous removal failed. Remove robot again, robot handler:",self.robot_handle," | return code:",returnCode)
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_oneshot_wait)
returnCode, self.robot_handle = vrep.simxLoadModel(self.clientID, self.model_location, 0, vrep.simx_opmode_oneshot_wait)
#print("Loading robot, robot handler:",self.robot_handle," | return code:",returnCode)
while(returnCode != 0):
returnCode, self.robot_handle = vrep.simxLoadModel(self.clientID, self.model_location, 0, vrep.simx_opmode_oneshot_wait)
rospy.loginfo("Previous loading failed. Reload robot. robot handler:",self.robot_handle," | return code:",returnCode)
def remove_all_turtlebot2i(self):
turtlebot2i_namelist = ['turtlebot2i', 'turtlebot2i#0', 'turtlebot2i#1', 'turtlebot2i#2', 'turtlebot2i#3', 'turtlebot2i#4', 'turtlebot2i#5', 'turtlebot2i#6', 'turtlebot2i#7', 'turtlebot2i#8', 'turtlebot2i#9',
'turtlebot_body_visual','turtlebot_reference','plate_middle_link_visual','plate_middle_link_respondable','GPS']
for turtlebot2i_name in turtlebot2i_namelist:
returnCode, temp_robot_handle = vrep.simxGetObjectHandle(self.clientID, turtlebot2i_name, vrep.simx_opmode_oneshot_wait)
returnCode = vrep.simxRemoveModel(self.clientID, temp_robot_handle, vrep.simx_opmode_oneshot_wait)
def check_robot_correctness(self):
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_oneshot_wait)
while(returnCode != 0):
rospy.loginfo("the exact 'turtlebot2i' is not found! Try to delete all possible robot and then load again.")
self.remove_all_turtlebot2i()
returnCode, self.robot_handle = vrep.simxLoadModel(self.clientID, self.model_location, 0, vrep.simx_opmode_oneshot_wait)
time.sleep(10)
returnCode, self.robot_handle = vrep.simxGetObjectHandle(self.clientID, 'turtlebot2i', vrep.simx_opmode_oneshot_wait)
def changeScenario(self):
self.scenarioNr += 1
if self.scenarioNr >= 2:
self.scenarioNr = 0
if self.scenarioNr == 0:
self.setScenarioOriginal()
elif self.scenarioNr == 1:
self.setScenarioMove1()
def shutdown(self):
vrep.simxFinish(self.clientID)
class Env():
def __init__(self):
self.client = actionlib.SimpleActionClient('turtlebot2i/move_base', MoveBaseAction)
self.vrep_control = VrepManipulation()
self.goal = MoveBaseGoal()
self.goal.target_pose.pose.position.x = 5.0
self.goal.target_pose.pose.position.y = 5.0
self.target_list = [[-9.0, 6.5],
[-9.0, 3.0],
[-4.0, 6.5],
[-4.0, 3.0],
[-0.5, 6.0],
[ 1.0, 3.0],
[ 5.0, 2.5],
[ 3.0, 0.0],
[-8.5, 0.0],
[-0.5,-2.0],
[ 9.0,-6.5],
[ 5.0,-2.0],
[-4.5,-2.5],
[ 8.5,-0.5],
[-9.0,-6.5]]
self.target_idx = random.randrange(0, len(self.target_list))
self.action_list = [[0.0, 1.2], [0.0, 0.8], [0.0, 0.4], [0.4, 1.2], [0.4, 0.8], [0.8, 1.2], [0.0, 0.0], [0.4, 0.4], [0.8, 0.8], [1.2, 1.2], [1.2, 0.8], [0.8, 0.4], [1.2, 0.4], [0.4, 0.0], [0.8, 0.0], [1.2, 0.0]]
self.action_size = len(self.action_list)
self.get_goalbox = False
self.position = Vector3()
self.prev_position = Vector3()
self.orientation = 0.0
self.sub_pos = rospy.Subscriber('/turtlebot2i/sensors/global_pose', geometry_msgs.msg.PoseStamped, self.update_pose_callback)
self.sub_safetyzone = rospy.Subscriber('/turtlebot2i/safety/safety_zone', SafetyZone, self.safety_zone_callback)
self.sub_vel = rospy.Subscriber('/turtlebot2i/commands/velocity', Twist, self.speed_callback)
self.sub_bumper = rospy.Subscriber('/turtlebot2i/events/bumper', BumperEvent, self.bumper_callback)
self.pub_safe_vel = rospy.Publisher('/turtlebot2i/safety/vel_scale', VelocityScale, queue_size=10) #init publisher
#Additional
self.robot_linear_speed = 0.0
self.robot_angular_speed = 0.0
self.origin = Point((0.0, 0.0))
self.camera_near_clipping = 0.2 #0.01 #in meters
self.camera_far_clipping = 3.5 #in meters
self.camera_fov_angle = 57.0 #in degree
self.n_direction = 12
self.direction_list = np.linspace(-self.camera_fov_angle, self.camera_fov_angle, self.n_direction+1)
self.obstacle_map = []
self.obstacle_distances = np.ones((self.n_direction))*self.camera_far_clipping
for i in range(self.n_direction):
self.obstacle_map.append(Polygon([[self.origin.x, self.origin.y],
[self.camera_far_clipping*cos(radians(self.direction_list[i+1])),self.camera_far_clipping*sin(radians(self.direction_list[i+1]))],
[self.camera_far_clipping*cos(radians(self.direction_list[i])), self.camera_far_clipping*sin(radians(self.direction_list[i]))]]))
self.r_critical = 0.295
self.r_warning = 0.31
self.r_clear = 0.32
self.collision = False
self.risk_max = 0.0
self.nearest_type = 0
self.min_distance = self.camera_far_clipping
self.nearest_direction = 0.0
self.nearest_speed = 0.0
self.speed_monitor = deque([])
def distance2D(self, pos1, pos2):
return sqrt((pos1.x - pos2.x)**2 + (pos1.y - pos2.y)**2)
def getGoalDistance(self):
return self.distance2D(self.goal.target_pose.pose.position, self.position)
def update_pose_callback(self, data):
self.position = data.pose.position
(roll, pitch, self.orientation) = euler_from_quaternion([data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w])
def speed_callback(self, data):
#getting data from move base module
self.robot_linear_speed = data.linear.x
self.robot_angular_speed = data.angular.z
def safety_zone_callback(self, data):
self.r_critical = data.critical_zone_radius
self.r_warning = data.warning_zone_radius
self.r_clear = data.clear_zone_radius
def bumper_callback(self, data):
if data.state == 1: #collision occurs
self.collision = True
def rotated_pos(self, pointX, pointY, centerX, centerY,r00, r01, r10, r11):
point_X_rotated = r00*pointX + r01*pointY + centerX - r00*centerX - r01*centerY
point_Y_rotated = r10*pointX + r11*pointY + centerY - r10*centerX - r11*centerY
return [point_X_rotated, point_Y_rotated]
def sceneGraphReconstruction(self, data):
self.obstacle_distances = np.ones((self.n_direction))*self.camera_far_clipping
n_obstacle = len(data.type) #count the number of detected object
if n_obstacle > 0:
self.risk_max = max(data.risk_value)
else:
self.risk_max = 0.0
#fig = plt.figure(1, figsize=(3.5,6), dpi=90)
#ax = fig.add_subplot(111)
for i in range(n_obstacle):
#### reconstruct the obstacle from scene graph ####
obs_center_x = (data.distance[i])*cos(radians(data.direction[i]))
obs_center_y = (data.distance[i])*sin(radians(data.direction[i]))
r00 = np.cos((-self.orientation))
r01 = -np.sin((-self.orientation))
r10 = np.sin((-self.orientation))
r11 = np.cos((-self.orientation))
obstacle = Polygon([self.rotated_pos(obs_center_x-data.size_x[i]/2, obs_center_y-data.size_y[i]/2, obs_center_x, obs_center_y, r00, r01, r10, r11),
self.rotated_pos(obs_center_x-data.size_x[i]/2, obs_center_y+data.size_y[i]/2, obs_center_x, obs_center_y, r00, r01, r10, r11),
self.rotated_pos(obs_center_x+data.size_x[i]/2, obs_center_y+data.size_y[i]/2, obs_center_x, obs_center_y, r00, r01, r10, r11),
self.rotated_pos(obs_center_x+data.size_x[i]/2, obs_center_y-data.size_y[i]/2, obs_center_x, obs_center_y, r00, r01, r10, r11)])
curr_distance = self.origin.distance(obstacle) # need to be translated
#print("distance to origin:",curr_distance,data.distance[i])
obstacle = translate(obstacle, (data.distance[i]-curr_distance)*cos(radians(data.direction[i])), (data.distance[i]-curr_distance)*sin(radians(data.direction[i])))
curr_distance = self.origin.distance(obstacle) # need to be translated
#print("distance to origin2:",curr_distance,data.distance[i])
while(data.distance[i] - curr_distance) > 0.02: #translate again if the distance is not close to the real distance
obstacle = translate(obstacle, (data.distance[i]-curr_distance)*cos(radians(data.direction[i])), (data.distance[i]-curr_distance)*sin(radians(data.direction[i])))
curr_distance = self.origin.distance(obstacle)
#print("distance to origin3:",curr_distance,data.distance[i])
#x,y = obstacle.exterior.xy
#ax.plot(x, y)
for i in range(self.n_direction):
#x,y = self.obstacle_map[i].exterior.xy
#ax.plot(x, y)
if obstacle.intersects(self.obstacle_map[i]):
intersection_poylgon = obstacle.intersection(self.obstacle_map[i])
#xC,yC= intersection_poylgon.exterior.xy
#ax.plot(xC, yC)
self.obstacle_distances[i] = min(self.obstacle_distances[i], self.origin.distance(intersection_poylgon))
#print("obstacle_distances: ", self.obstacle_distances)
#print("argmin_distance:",np.argmin(self.obstacle_distances))
#plt.show()
return self.obstacle_distances
def getState(self, safety_risk_msg):
obstacle_distances = list(self.sceneGraphReconstruction(safety_risk_msg))
done = False
if self.collision:
done = True
self.collision = False
if self.getGoalDistance() < 0.5:
self.get_goalbox = True
return obstacle_distances + [self.robot_linear_speed, self.robot_angular_speed, self.risk_max, self.r_warning, self.r_clear], done
def getEmptyState(self):
return list(np.ones((self.n_direction))*self.camera_far_clipping) + [0.0, 0.0, 0.0, 0.31, 0.32]
def publishScaleSpeed(self, left_vel_scale, right_vel_scale):
vel_scale_message = VelocityScale()
vel_scale_message.header = std_msgs.msg.Header()
vel_scale_message.header.stamp = rospy.Time.now()
vel_scale_message.left_vel_scale = left_vel_scale
vel_scale_message.right_vel_scale = right_vel_scale
self.pub_safe_vel.publish(vel_scale_message)
def respawn_goal(self, reset=False):
if reset:
self.vrep_control.changeScenario()
#self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
self.client.wait_for_server()
#create a move base goal message
self.goal.target_pose.header.frame_id = "map"
self.goal.target_pose.header.stamp = rospy.Time.now()
#choosing position randomly
next_target_idx = random.randrange(0, len(self.target_list))
while (self.target_idx == next_target_idx):
next_target_idx = random.randrange(0, len(self.target_list))
self.target_idx = next_target_idx
next_goal = self.target_list[self.target_idx]
self.goal.target_pose.pose.position.x = next_goal[0]
self.goal.target_pose.pose.position.y = next_goal[1]
self.goal.target_pose.pose.position.z = 0.063
#choosing orientation randomly
orientation=geometry_msgs.msg.Quaternion()
yaw = random.uniform(-pi, pi)#-90*pi/180 #unit: from deg. to rad.
orientation=quaternion_from_euler(0,0,yaw)#(roll, pitch,yaw) # return an array
self.goal.target_pose.pose.orientation.x=0.0
self.goal.target_pose.pose.orientation.y=0.0
self.goal.target_pose.pose.orientation.z=orientation[2]
self.goal.target_pose.pose.orientation.w=orientation[3]
self.client.send_goal(self.goal)
#rospy.loginfo("Goal position is sent! waiting the robot to finish....")
def setReward(self, state, done, action):
nearest_obstacle_distance = min(state[:12])
nearest_obstacle_direction = np.argmin(state[:12]) #index 0 start from right side of the robot
risk_max = max(1, state[-3])
yaw_reward = 1.0
if (nearest_obstacle_direction <= self.n_direction/3-1):#obstacle is on the right
if (action >= 10): #robot turns right
yaw_reward = -(action-9)*risk_max/6
elif (nearest_obstacle_direction >= self.n_direction*2/3):#obstacle is on the left
if (action <= 5): #robot turns left
yaw_reward = -(6-action)*risk_max/6
else:#obstacle is in the front
if (action in [6,7,8,9]):
yaw_reward = -(action-5)*risk_max/4
distance_rate = 1.0 / max(nearest_obstacle_distance, 0.175)
if nearest_obstacle_distance < 0.295 + 0.03: #r_critical + offset
reward = (yaw_reward * distance_rate) -50
elif nearest_obstacle_distance < state[-2] + 0.05: #r_warning + offset
reward = (yaw_reward * distance_rate) -10
elif self.distance2D(self.prev_position, self.position) > 0.4:
reward = 8
self.prev_position = self.position
elif nearest_obstacle_distance < state[-1] + 0.05: #r_clear + offset
reward = yaw_reward
else:
reward = -1
if done:
rospy.loginfo("Collision!!")
reward = -5000
self.publishScaleSpeed(0.0, 0.0)
self.prev_position = Pose()
if self.get_goalbox:
rospy.loginfo("Goal!!")
self.publishScaleSpeed(0.0, 0.0)
self.respawn_goal(reset=True)
self.get_goalbox = False
return reward
def execute(self, action):
self.publishScaleSpeed(self.action_list[action][0], self.action_list[action][1])
def step(self, action):
self.execute(action)
data = None
while data is None:
try:
data = rospy.wait_for_message('/turtlebot2i/safety/obstacles_risk', SafetyRisk, timeout=5)
except:
self.vrep_control.check_robot_correctness()
pass
state, done = self.getState(data)
reward = self.setReward(state, done, action)
return np.asarray(state), reward, done
def reset(self, data=None):
self.publishScaleSpeed(0,0)
self.vrep_control.reset_robot_pos()
self.respawn_goal(reset=True)
self.vrep_control.check_robot_correctness()
while data is None:
try:
data = rospy.wait_for_message('/turtlebot2i/safety/obstacles_risk', SafetyRisk, timeout=5)
except:
self.vrep_control.check_robot_correctness()
pass
state, done = self.getState(data)
self.prev_position = self.position
return np.asarray(state)
| EricssonResearch/scott-eu | simulation-ros/src/turtlebot2i/turtlebot2i_safety/src/environment_cnn.py | Python | apache-2.0 | 27,593 |
#!/usr/bin/env python
"""
Install the packages necessary for building conda
distributions.
Requires conda to be installed and on the path.
There are scripts to help with the installation of
miniconda in the same directory as this script.
"""
from __future__ import print_function
BUILD_PACKAGES = ['conda-build', 'anaconda-client', 'jinja2', 'setuptools']
if __name__ == '__main__':
import subprocess
cmd = ['conda', 'install', '--yes', '-n', 'root', '--quiet'] + BUILD_PACKAGES
subprocess.check_call(cmd)
| pelson/Obvious-CI | scripts/obvci_install_conda_build_tools.py | Python | bsd-3-clause | 522 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, inspect, traceback, re
from opus_core.tests import opus_unittest
from opus_core.logger import logger
class TestScanner(object):
def find_opus_test_cases_for_package(self, package):
return self._find_opus_test_cases_for_package(package, opus_unittest.OpusTestCase)
def find_opus_integration_test_cases_for_package(self, package):
return self._find_opus_test_cases_for_package(package, opus_unittest.OpusIntegrationTestCase)
def _find_opus_test_cases_for_package(self, package, test_case_class):
root = OpusPackage().get_path_for_package(package)
modules_with_test_cases = []
for path, dirs, files in os.walk(root, topdown=True):
for file in files:
if not file.endswith('.py'):
continue
f = open(os.path.join(path,file), 'r')
import_pattern = re.compile('^\s*(import|from).*unittest')
skip_pattern = re.compile('^.*#.*IGNORE_THIS_FILE')
found_import = False
for line in f:
if skip_pattern.match(line):
break
if import_pattern.match(line):
found_import = True
break
if not found_import: # No unittest import found in file.
continue
module_name = self._get_module_name(package, root, path, file)
try:
exec('import %s' % module_name)
except Exception, val:
logger.log_error("Could not import %s!" % module_name)
traceback.print_exc()
continue
module = eval(module_name)
if inspect.ismodule(module):
members = inspect.getmembers(module)
member_dict = {}
for key, value in members:
member_dict[key] = value
for key in member_dict.keys():
try:
is_subclass = issubclass(
member_dict[key],
test_case_class)
except: pass
else:
if is_subclass:
class_name = member_dict[key].__name__
modules_with_test_cases.append(
(module_name, class_name))
else:
logger.log_warning(
'WARNING: %s is not a module!' % module)
return modules_with_test_cases
def _get_module_name(self, package, root, path, file):
file = file[0:-len('.py')]
path = path.replace(root, '')
if path.startswith(os.sep):
path = path[1:]
module_path = path.replace(os.sep, '.')
if module_path is not '':
module_path = '.'.join([package, module_path])
else:
module_path = package
return '.'.join([module_path, file])
from opus_core.tests import opus_unittest
from random import randint
from opus_core.opus_package import OpusPackage
main_test_case = 'TestTestScanner'
class TestTestScanner(opus_unittest.OpusTestCase):
def setUp(self):
# While any warnings we receive ought to be legitimate, we are not
# concerned with those right now.
logger.enable_hidden_error_and_warning_words()
def tearDown(self):
logger.disable_hidden_error_and_warning_words()
def test_get_module_name(self):
package = 'opus_core'
base_path = os.path.join('workspace', 'opus', package)
module_name = 'test_scanner'
file_name = '%s.py' % module_name
path = TestScanner()._get_module_name(
package,
base_path,
os.path.join(base_path, 'tests', 'utils'),
file_name)
expected = '%s.tests.utils.%s' % (package, module_name)
self.assert_(path == expected,
"Unexpected module path: Expected %s. Received %s."
% (expected, path))
package = 'package'
base_path = os.path.join('workspc', package)
module_name = 'test_module'
file_name = '%s.py' % module_name
path = TestScanner()._get_module_name(
package,
base_path,
os.path.join(base_path, 'test'),
file_name)
expected = '%s.test.%s' % (package, module_name)
self.assert_(path == expected,
"Unexpected module path: Expected %s. Received %s."
% (expected, path))
def test_find_files_with_test_cases(self):
package = 'opus_core'
test_modules = TestScanner().find_opus_test_cases_for_package(package)
self.assert_(
'opus_core.tests.utils.test_scanner' in [i[0] for i in test_modules],
"TestScanner did not find itself "
"(opus_core.tests.utils.test_scanner)!")
for test_case in test_cases_in_this_file:
self.assert_(
test_case in [i[1] for i in test_modules],
"TestScanner did not find one of its own test cases "
"(%s)!" % test_case)
def test_does_not_find_files_without_test_cases(self):
path = OpusPackage().get_path_for_package('opus_core')
path = os.path.join(path, 'tests', 'utils')
module_name = 'test_scanner_test_file'
file_name = '%s.py' % module_name
file_name = os.path.join(path, file_name)
f = open(file_name, 'w')
f.write("""class TestClass(object):
def test_method(self):
print 'Delete me if you wish, for I am but a unit-test test file!'"""
)
f.close()
self.assert_(os.path.exists(file_name))
package = 'opus_core'
test_modules = TestScanner().find_opus_test_cases_for_package(package)
self.assert_(
'opus_core.tests.utils.%s' % module_name not in test_modules,
"TestScanner found a test file created without unit tests "
"(opus_core.tests.utils.%s)!" % module_name)
os.remove(file_name)
test_cases_in_this_file = [main_test_case]
for i in range(5):
name = 'AutoTestCase%s' % i
test_cases_in_this_file.append(name)
exec("""class %s(opus_unittest.OpusTestCase):
\"""Automatic test case generated for %s.\""" """ % (name, main_test_case))
if __name__ == '__main__':
opus_unittest.main() | apdjustino/DRCOG_Urbansim | src/opus_core/tests/utils/test_scanner.py | Python | agpl-3.0 | 7,287 |
# Sieve of Eratosthenes: The sieve of Eratosthenes is one of the most efficient ways
# to find all of the smaller primes (below 10 million or so).
import math
def sieve(n):
prime = [False] * n
i = 2
while i < n:
prime[i] = True
i += 1
i = 2
while (i * i) < n:
if prime[i]:
j = 0
while (i * i + i * j) < n:
prime[i * i + i * j] = False
j += 1
i += 1
i = 2
while i < n:
if prime[i]:
yield i
i += 1
def main():
while True:
try:
l = int(input("Enter length: "))
except ValueError:
print("Enter a valid number.\n")
continue
break
for _ in sieve(l):
print(_)
if __name__ == "__main__":
main() | idunnowhy9000/Projects | SOURCE/Python/Classic Algorithms/Sieve of Eratosthenes.py | Python | mit | 659 |
import sys
"""
INPUT:
hat
abc
Zu6
"""
def str_perm(word):
result = []
if len(word) == 1:
result = [word]
else:
for position, letter in enumerate(word):
for permutation in str_perm(word[:position] + word[position+1:]):
result.append(letter + permutation)
return result
with open(sys.argv[1], "r") as f:
for line in f:
print ",".join(sorted(str_perm(line.strip())))
| tyop/CodeEval | string_permutations.py | Python | mit | 443 |
import codecs
import re
import types
import sys
from constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from constants import encodings, ReparseException
#Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([str(item) for item in spaceCharacters])
asciiLettersBytes = frozenset([str(item) for item in asciiLetters])
asciiUppercaseBytes = frozenset([str(item) for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([">", "<"])
invalid_unicode_re = re.compile(u"[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile(ur"[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream:
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1,0] #chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
data = rv.append(bufferedData[bufferOffset:
bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return "".join(rv)
class HTMLInputStream:
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
#Craziness
if len(u"\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (codecName(encoding), "certain")
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
# Encoding Information
#Number of bytes to use when looking for a meta element with
#encoding information
self.numBytesMeta = 512
#Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
#Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
#Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
#Flag to indicate we may have a CR LF broken across a data chunk
self._lastChunkEndsWithCR = False
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
# Otherwise treat source as a string and convert to a file object
if isinstance(source, unicode):
source = source.encode('utf-8')
self.charEncoding = ("utf-8", "certain")
import cStringIO
stream = cStringIO.StringIO(str(source))
if (not(hasattr(stream, "tell") and hasattr(stream, "seek")) or
stream is sys.stdin):
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
#First look for a BOM
#This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
#If there is no BOM need to look for meta elements with encoding
#information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
#Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence="tentative"
encoding = self.defaultEncoding
#Substitute for equivalent encodings:
encodingSub = {"iso-8859-1":"windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException, "Encoding changed from %s to %s"%(self.charEncoding[0], newEncoding)
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count(u'\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind(u'\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line+1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
if not data:
return False
self.reportCharacterErrors(data)
data = data.replace(u"\u0000", u"\ufffd")
#Check for CR LF broken across chunks
if (self._lastChunkEndsWithCR and data[0] == u"\n"):
data = data[1:]
# Stop if the chunk is now empty
if not data:
return False
self._lastChunkEndsWithCR = data[-1] == u"\r"
data = data.replace(u"\r\n", u"\n")
data = data.replace(u"\r", u"\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in xrange(data.count(u"\u0000")):
self.errors.append("null-character")
for i in xrange(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
#Someone picked the wrong compile option
#You lose
for i in xrange(data.count(u"\u0000")):
self.errors.append("null-character")
skip = False
import sys
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
#Pretty sure there should be endianness issues here
if (codepoint >= 0xD800 and codepoint <= 0xDBFF and
pos < len(data) - 1 and
ord(data[pos + 1]) >= 0xDC00 and
ord(data[pos + 1]) <= 0xDFFF):
#We have a surrogate pair!
#From a perl manpage
char_val = (0x10000 + (codepoint - 0xD800) * 0x400 +
(ord(data[pos + 1]) - 0xDC00))
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
#This is still wrong if it is possible for a surrogate pair to break a
#chunk boundary
def charsUntil(self, characters, opposite = False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = u"".join([u"\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = u"^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile(u"[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = u"".join(rv)
return r
def charsUntilEOF(self):
""" Returns a string of characters from the stream up to EOF."""
rv = []
while True:
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = u"".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class EncodingBytes(str):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
return str.__new__(self, value.lower())
def __init__(self, value):
self._position=-1
def __iter__(self):
return self
def next(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p]
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p+len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes)-1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
("<!--",self.handleComment),
("<meta",self.handleMeta),
("</",self.handlePossibleEndTag),
("<!",self.handleOther),
("<?",self.handleOther),
("<",self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing=False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo("-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
#if we have <meta not followed by a space so just keep going
return True
#We have a valid meta element we want to search for attributes
while True:
#Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == "charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == "content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
self.data.next()
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
#If the next byte is not an ascii letter either ignore this
#fragment (possible start tag case) or treat it according to
#handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == "<":
#return to the first step in the overall "two step" algorithm
#reprocessing the < byte
data.previous()
else:
#Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset("/"))
# Step 2
if c in (">", None):
return None
# Step 3
attrName = []
attrValue = []
#Step 4 attribute name
while True:
if c == "=" and attrName:
break
elif c in spaceCharactersBytes:
#Step 6!
c = data.skip()
c = data.next()
break
elif c in ("/", ">"):
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c == None:
return None
else:
attrName.append(c)
#Step 5
c = data.next()
#Step 7
if c != "=":
data.previous()
return "".join(attrName), ""
#Step 8
data.next()
#Step 9
c = data.skip()
#Step 10
if c in ("'", '"'):
#10.1
quoteChar = c
while True:
#10.2
c = data.next()
#10.3
if c == quoteChar:
data.next()
return "".join(attrName), "".join(attrValue)
#10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
#10.5
else:
attrValue.append(c)
elif c == ">":
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = data.next()
if c in spacesAngleBrackets:
return "".join(attrName), "".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
self.data = data
def parse(self):
try:
#Check if the attr name is charset
#otherwise return
self.data.jumpTo("charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == "=":
#If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
#Look for an encoding between matching quote marks
if self.data.currentByte in ('"', "'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
#Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
#Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if (encoding is not None and type(encoding) in types.StringTypes):
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| havard024/prego | venv/lib/python2.7/site-packages/html5lib/inputstream.py | Python | mit | 27,975 |
from sneakers.modules import Encoder, Parameter
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA as cryptoRSA
class Rsa(Encoder):
info = {
"name": "RSA",
"author": "davinerd",
"description": "Encrypts data using RSA",
"comments": []
}
params = {
'sending': [
Parameter('publicKey', True,
'The filename of the public key, matched to the private key used for decryption.')
],
'receiving': [
Parameter('privateKey', True,
'The filename of the private key, matched to the public key used for decryption.')
]
}
def encode(self, data):
publicKey_file = self.param('sending', 'publicKey')
keystring = open(publicKey_file).read()
key = cryptoRSA.importKey(keystring)
cipher = PKCS1_OAEP.new(key)
ciphertext = cipher.encrypt(data)
return ciphertext
def decode(self, data):
privateKeyFile = self.param('receiving', 'privateKey')
keystring = open(privateKeyFile).read()
key = cryptoRSA.importKey(keystring)
cipher = PKCS1_OAEP.new(key)
message = cipher.decrypt(data)
return message
| davinerd/sneaky-creeper | sneakers/encoders/rsa.py | Python | mit | 1,250 |
import unittest
from lxml import html
from .boner import HtmlBoner
from .chunker import Elt, TextChunk, SkipChunk, HtmlChunker
from .exceptions import MismatchingText, UnasignableChunk
class IntroduceSemanticsTestCase(unittest.TestCase):
def setUp(self):
self.chunker = HtmlChunker()
def deconstruct_and_rebuild(self, original, element):
chunks = self.chunker.chunk_tree(html.fromstring(original))
sentence_chunk = chunks[0]
intro, children = self.chunker._introduce_semantics(
sentence_chunk.text, sentence_chunk.elts)
body = html.Element(element)
body.text = intro
for c in children:
body.append(c)
return body
def test_attribs(self):
original = (
"""<div>""" +
"""<b class="test">Some <em border="solid" style="width:3em;">text</em></b>""" +
"""</div>""")
body = self.deconstruct_and_rebuild(original, "div")
self.assertEqual(
original,
html.tostring(body, encoding="utf-8").decode("utf-8"))
def test_multi_level(self):
original = ("<div>" +
"<b>We <em>have some</em> <a>silly <code><small>sentence</small>" +
" </code><sub>with a lot</sub> of</a> </b>" +
"complicated<sup><strong></strong> semantics</sup>" +
"</div>")
body = self.deconstruct_and_rebuild(original, "div")
self.assertEqual(
original,
html.tostring(body, encoding="utf-8").decode("utf-8"))
class ChunkTreeTestCase(unittest.TestCase):
# TODO: add skipchunk tests
def setUp(self):
self.chunker = HtmlChunker()
def simple_example(self):
text = """<html>
<head></head>
<body>
A test
<h1>With foo</h1>
but <b>also</b>
<p>there are some <em>difficulties. <b>And</b> </em>sentences</p>
and tails.
</body>
<html>"""
# remove spaces for simplicity
text = "".join(l.strip() for l in text.split("\n"))
tree = html.fromstring(text)
head, body = list(tree)
h1, b, p = list(body)
em, = list(p)
em_b, = list(em)
chunks = [
TextChunk('', [], tree, False),
SkipChunk(head),
TextChunk('A test', [], body, False),
TextChunk('With foo', [], h1, False),
TextChunk('but also', [Elt(b, 4, 8)], h1, True),
TextChunk('there are some difficulties. And sentences',
[Elt(em, 15, 33), Elt(em_b, 29, 32)], p, False),
TextChunk('and tails.', [], p, True)]
return text, tree, chunks
def test_chunk(self):
text, tree, expected = self.simple_example()
chunks = self.chunker.chunk_tree(tree)
self.assertEqual(chunks, expected)
# verify positions
text, elt = chunks[4].text, chunks[4].elts[0]
self.assertEqual(text[elt.start: elt.end], "also")
text, elt = chunks[5].text, chunks[5].elts[0]
self.assertEqual(text[elt.start: elt.end], "difficulties. And ")
text, elt = chunks[5].text, chunks[5].elts[1]
self.assertEqual(text[elt.start: elt.end], "And")
def test_unchunk(self):
text, tree, chunks = self.simple_example()
classifications = [
[[None, 'X']],
[[None, 'Y']],
[['A test', 'X']],
[['With foo', 'Y']],
[['but also', 'Y']],
[['there are some difficulties.', 'X'], [' And sentences', 'Y']],
[['and tails.', 'Y']]]
expected = """<html>
<head></head>
<body>
<span class="X" id="chunk-2-0">A test</span>
<h1><span class="Y" id="chunk-3-0">With foo</span></h1>
<span class="Y" id="chunk-4-0">but <b>also</b></span>
<p><span class="X" id="chunk-5-0">there are some <em>difficulties.</em></span>
<span class="Y" id="chunk-5-1"><em> <b>And</b> </em>sentences</span></p>
<span class="Y" id="chunk-6-0">and tails.</span>
</body>
</html>"""
expected = "".join(e.strip() for e in expected.split("\n"))
def reformat(x):
return "\n<".join(">\n".join(x.split(">")).split("<")).split("\n")
result = self.chunker.unchunk(tree, chunks, classifications)
self.assertEqual(
reformat(html.tostring(result, encoding="utf-8").decode("utf-8")),
reformat(expected))
def _striped(l):
return [s.strip() for s in l]
class BonerTestCase(unittest.TestCase):
HTML = """
<html>
<head></head>
<body>
A test
<h1>With foo</h1>
And <em>bar</em>.
</body>
</html>""".strip()
def test_base_read_api(self):
tree = html.fromstring(self.HTML)
boner = HtmlBoner(tree)
texts = ["", None, "", "A test", "With foo", "And bar.", ""]
self.assertEqual(
list(s.strip() if s is not None else None for s in boner),
texts)
self.assertEqual(len(boner), len(texts))
self.assertEqual(boner[0].strip(), "")
self.assertEqual(boner[3].strip(), "A test")
self.assertEqual(boner[-2].strip(), "And bar.")
self.assertEqual(str(boner), self.HTML) # no change
def test_index_error(self):
tree = html.fromstring(self.HTML)
boner = HtmlBoner(tree)
with self.assertRaises(IndexError):
boner[1000]
def test_assign_class_str(self):
tree = html.fromstring(self.HTML)
boner = HtmlBoner(tree)
boner.set_classes(4, "foo")
# retrieve element
self.assertEqual(boner.tree.xpath("//span[@class = 'foo']/text()"), ["With foo"])
self.assertEqual(boner.tree.xpath("//span[@id = 'chunk-4-0']/text()"), ["With foo"])
boner.set_classes(-2, "foo")
# retrieve elements
self.assertEqual(
_striped(boner.tree.xpath("//span[@class = 'foo']//text()")),
["With foo", "And", "bar", "."])
self.assertEqual(
_striped(boner.tree.xpath("//span[@id = 'chunk-5-0']//text()")),
["And", "bar", "."])
def test_assign_class_list(self):
tree = html.fromstring(self.HTML)
boner = HtmlBoner(tree)
boner.set_classes(4, [["With ", None], ["foo", "foo"]])
self.assertEqual(boner.tree.xpath("//span[@class = 'foo']/text()"), ["foo"])
def test_assign_class_skip(self):
tree = html.fromstring(self.HTML)
boner = HtmlBoner(tree)
boner.set_classes(1, [["", None]])
boner.set_classes(1, [])
boner.set_classes(1, None)
with self.assertRaises(UnasignableChunk):
boner.set_classes(1, [["foo", "bar"]])
def test_bulk_set_classes(self):
# stripping for convenience
stripped_html = "\n".join(_striped(self.HTML.split("\n")))
tree = html.fromstring(stripped_html)
boned = HtmlBoner(tree)
boned.bulk_set_classes([
[["\n", None]],
None,
[["\n", None]],
[["\nA test\n", "foo"]],
[["With ", None], ["foo", "foo"]],
[["\nAnd bar", "bar"], [".\n", None]],
[["\n", "empty"]],
])
btree = boned.tree
self.assertEqual(btree.xpath("//span[@class='foo']/text()"), ["\nA test\n", "foo"])
self.assertEqual(btree.xpath("//span[@class='bar']//text()"), ["\nAnd ", "bar", ""])
self.assertEqual(btree.xpath("//span[@class='empty']/text()"), ["\n"])
def test_bad_set_class(self):
tree = html.fromstring(self.HTML)
boner = HtmlBoner(tree)
with self.assertRaises(MismatchingText):
boner.set_classes(0, [["foo", "bar"]])
with self.assertRaises(MismatchingText):
boner.set_classes(4, [["foo", "bar"]])
with self.assertRaises(MismatchingText):
boner.set_classes(4, [["", "bar"]])
def test_complex_tails(self):
original = ("<html><body>a <i>simple</i> paragraph <p>and</p> " +
"a <em>big <strong>complicated</strong></em> " +
"wizzy <i>long</i> tail</body></html>")
tree = html.fromstring(original)
boned = HtmlBoner(tree)
new = str(boned)
self.assertEqual(
original,
new)
# add classes to complexify
boned.set_classes(1, [["a simple", None], [" ", "x"], ["paragraph", None], [" ", None]])
boned.set_classes(
3, [[" a big", "x"], [" complicated ", None], ["wizzy long", None], [" tail", "y"]])
new = str(boned)
self.assertEqual(
new,
'<html><body>a <i>simple</i><span class="x" id="chunk-1-1"> </span>paragraph ' +
'<p>and</p><span class="x" id="chunk-3-0"> a <em>big</em></span><em> ' +
'<strong>complicated</strong></em> wizzy <i>long</i>' +
'<span class="y" id="chunk-3-3"> tail</span></body></html>')
def test_attr_kept(self):
orig_html = (
'<html style="color:black;"><body>' +
'some <p class="foo"> <a href="#">text</a> </p>' +
'</body></html>')
tree = html.fromstring(orig_html)
boner = HtmlBoner(tree, span_id_prefix="foo")
self.assertEqual(str(boner), orig_html)
boner.set_classes(1, "bar")
boner.set_classes(2, "baz")
btree = boner.tree
self.assertEqual(btree.xpath('//html/@style'), ["color:black;"])
self.assertEqual(btree.xpath('//p/@class'), ["foo"])
self.assertEqual(btree.xpath('//a/@href'), ["#"])
def test_span_id_prefix(self):
tree = html.fromstring(self.HTML)
boner = HtmlBoner(tree, span_id_prefix="foo-")
boner.set_classes(3, "bar")
boner.set_classes(4, "baz")
btree = boner.tree
self.assertTrue(
btree.xpath("//span[@id='foo-3-0']//text()"))
self.assertTrue(
btree.xpath("//span[@id='foo-4-0']//text()"))
def test_span_everywhere(self):
tree = html.fromstring("<html><body> a <p>simple</p> <b>test</b>. </body></html>")
boner = HtmlBoner(tree, no_class_no_span=False)
self.assertEqual(
str(boner),
"<html><body> a <p>simple</p> <b>test</b>. </body></html>")
| jurismarches/boned-html | boned_html/tests.py | Python | mit | 10,546 |
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
# Stdlib Imports
import datetime
import os
import re
# First Party Imports
import sickchill
from sickbeard import helpers, logger
from sickbeard.metadata import generic
from sickchill.helper.common import dateFormat, replace_extension
from sickchill.helper.encoding import ek
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
class WDTVMetadata(generic.GenericMetadata):
"""
Metadata generation class for WDTV
The following file structure is used:
show_root/folder.jpg (poster)
show_root/Season ##/folder.jpg (season thumb)
show_root/Season ##/filename.ext (*)
show_root/Season ##/filename.metathumb (episode thumb)
show_root/Season ##/filename.xml (episode metadata)
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'WDTV'
self._ep_nfo_extension = 'xml'
self.poster_name = "folder.jpg"
# web-ui metadata template
self.eg_show_metadata = "<i>not supported</i>"
self.eg_episode_metadata = "Season##\\<i>filename</i>.xml"
self.eg_fanart = "<i>not supported</i>"
self.eg_poster = "folder.jpg"
self.eg_banner = "<i>not supported</i>"
self.eg_episode_thumbnails = "Season##\\<i>filename</i>.metathumb"
self.eg_season_posters = "Season##\\folder.jpg"
self.eg_season_banners = "<i>not supported</i>"
self.eg_season_all_poster = "<i>not supported</i>"
self.eg_season_all_banner = "<i>not supported</i>"
# Override with empty methods for unsupported features
def retrieveShowMetadata(self, folder):
# no show metadata generated, we abort this lookup function
return None, None, None
def create_show_metadata(self, show_obj):
pass
def update_show_indexer_metadata(self, show_obj):
pass
def get_show_file_path(self, show_obj):
pass
def create_fanart(self, show_obj):
pass
def create_banner(self, show_obj):
pass
def create_season_banners(self, show_obj):
pass
def create_season_all_poster(self, show_obj):
pass
def create_season_all_banner(self, show_obj):
pass
@staticmethod
def get_episode_thumb_path(ep_obj):
"""
Returns the path where the episode thumbnail should be stored. Defaults to
the same path as the episode file but with a .metathumb extension.
ep_obj: a TVEpisode instance for which to create the thumbnail
"""
if ek(os.path.isfile, ep_obj.location):
tbn_filename = replace_extension(ep_obj.location, 'metathumb')
else:
return None
return tbn_filename
@staticmethod
def get_season_poster_path(show_obj, season):
"""
Season thumbs for WDTV go in Show Dir/Season X/folder.jpg
If no season folder exists, None is returned
"""
dir_list = [x for x in ek(os.listdir, show_obj.location) if
ek(os.path.isdir, ek(os.path.join, show_obj.location, x))]
season_dir_regex = r'^Season\s+(\d+)$'
season_dir = None
for cur_dir in dir_list:
if season == 0 and cur_dir == "Specials":
season_dir = cur_dir
break
match = re.match(season_dir_regex, cur_dir, re.I)
if not match:
continue
cur_season = int(match.group(1))
if cur_season == season:
season_dir = cur_dir
break
if not season_dir:
logger.log("Unable to find a season dir for season " + str(season), logger.DEBUG)
return None
logger.log("Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG)
return ek(os.path.join, show_obj.location, season_dir, 'folder.jpg')
def _ep_data(self, ep_obj):
"""
Creates an elementTree XML structure for a WDTV style episode.xml
and returns the resulting data object.
ep_obj: a TVShow instance to create the NFO for
"""
eps_to_write = [ep_obj] + ep_obj.relatedEps
myShow = ep_obj.idxr.series_from_episode(ep_obj)
rootNode = etree.Element("details")
# write an WDTV XML containing info for all matching episodes
for curEpToWrite in eps_to_write:
myEp = curEpToWrite.idxr.episode(curEpToWrite)
if not myEp:
logger.log("Metadata writer is unable to find episode {0:d}x{1:d} of {2} on {3}..."
"has it been removed? Should I delete from db?".format(
curEpToWrite.season, curEpToWrite.episode, curEpToWrite.show.name, ep_obj.idxr.name))
return None
if str(ep_obj.airdate) != str(datetime.date.fromordinal(1)) and not myEp.get('firstAired'):
myEp["firstAired"] = str(ep_obj.airdate)
if not (myEp.get('episodeName') and myEp.get('firstAired')):
return None
if len(eps_to_write) > 1:
episode = etree.SubElement(rootNode, "details")
else:
episode = rootNode
if myEp.get('id'):
episodeID = etree.SubElement(episode, "id")
episodeID.text = str(myEp['id'])
title = etree.SubElement(episode, "title")
title.text = ep_obj.pretty_name()
if getattr(myShow, 'seriesName', None):
seriesName = etree.SubElement(episode, "series_name")
seriesName.text = myShow.seriesName
if curEpToWrite.name:
episodeName = etree.SubElement(episode, "episode_name")
episodeName.text = curEpToWrite.name
seasonNumber = etree.SubElement(episode, "season_number")
seasonNumber.text = str(curEpToWrite.season)
episodeNum = etree.SubElement(episode, "episode_number")
episodeNum.text = str(curEpToWrite.episode)
firstAired = etree.SubElement(episode, "firstAired")
if curEpToWrite.airdate != datetime.date.fromordinal(1):
firstAired.text = str(curEpToWrite.airdate)
if getattr(myShow, 'firstAired', None):
try:
year_text = str(datetime.datetime.strptime(myShow.firstAired, dateFormat).year)
if year_text:
year = etree.SubElement(episode, "year")
year.text = year_text
except Exception:
pass
if curEpToWrite.season != 0 and getattr(myShow, 'runtime', None):
runtime = etree.SubElement(episode, "runtime")
runtime.text = myShow.runtime
if getattr(myShow, 'genre', None):
genre = etree.SubElement(episode, "genre")
genre.text = " / ".join(myShow.genre)
if myEp.get('directors') and isinstance(myEp['directors'], list):
for director in myEp['directors']:
director_element = etree.SubElement(episode, "director")
director_element.text = director
data = ep_obj.idxr.actors(myShow)
for actor in data:
if not ('name' in actor and actor['name'].strip()):
continue
cur_actor = etree.SubElement(episode, "actor")
cur_actor_name = etree.SubElement(cur_actor, "name")
cur_actor_name.text = actor['name']
if 'role' in actor and actor['role'].strip():
cur_actor_role = etree.SubElement(cur_actor, "role")
cur_actor_role.text = actor['role'].strip()
if curEpToWrite.description:
overview = etree.SubElement(episode, "overview")
overview.text = curEpToWrite.description
# Make it purdy
helpers.indentXML(rootNode)
data = etree.ElementTree(rootNode)
return data
# present a standard "interface" from the module
metadata_class = WDTVMetadata
| coderbone/SickRage-alt | sickbeard/metadata/wdtv.py | Python | gpl-3.0 | 10,008 |
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata."""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of chunksize characters separated by a space.
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of chunksize characters separated by a space.
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it."""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types."""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
*rdclass*, an ``int`` is the rdataclass of the Rdata.
*rdtype*, an ``int`` is the rdatatype of the Rdata.
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""Return the type a Rdata covers.
DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
Returns an ``int``.
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
Returns an ``int``.
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
Returns a ``text``.
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
Returns a ``binary``.
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form.
Returns a ``binary``.
"""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid.
If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
Raises various exceptions if there are problems.
Returns ``None``.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass.
Return < 0 if self < other in the DNSSEC ordering, 0 if self
== other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
elif our > their:
return 1
else:
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generic Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != '\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If *tok* is a ``text``, then a tokenizer is created and the string
is used as its input.
*rdclass*, an ``int``, the rdataclass.
*rdtype*, an ``int``, the rdatatype.
*tok*, a ``dns.tokenizer.Tokenizer`` or a ``text``.
*origin*, a ``dns.name.Name`` (or ``None``), the
origin to use for relative names.
*relativize*, a ``bool``. If true, name will be relativized to
the specified origin.
Returns an instance of the chosen Rdata subclass.
"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
*rdclass*, an ``int``, the rdataclass.
*rdtype*, an ``int``, the rdatatype.
*wire*, a ``binary``, the wire-format message.
*current*, an ``int``, the offset in wire of the beginning of
the rdata.
*rdlen*, an ``int``, the length of the wire-format rdata
*origin*, a ``dns.name.Name`` (or ``None``). If not ``None``,
then names will be relativized to this origin.
Returns an instance of the chosen Rdata subclass.
"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| pbaesse/Sissens | lib/python2.7/site-packages/eventlet/support/dns/rdata.py | Python | gpl-3.0 | 13,071 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class LoginConfig(AppConfig):
name = 'login'
| InspectorIncognito/visualization | login/apps.py | Python | gpl-3.0 | 150 |
"""
make_base_installers.py --
Build the installers for one or more of the supported operating systems,
depending on options given. Runs on Linux systems only.
Usage: python make_base_installer.py m|l|w|i|a|t path/to/trunk/ pubkey
privkey output/dir/ [version of seattle]
[--wg path/to/Windows/GUI/builder/makensis.exe]
Flags: m,l,w,i,a,d,t represent the OS for which the base installer is being
created. m = Macintosh, l = Linux, w = Windows, i = Windows Mobile,
d = Android, a = all systems. t = include tests in installer.
NOTE: The Windows GUI installer will ONLY be built if the 'w' or 'a' options
are passed ALONG WITH the '--wg' option.
Example of usage on command line:
python ./Seattle/trunk/dist/make_base_installers.py a ./Seattle/trunk/
user.publickey user.privatekey ./Installers/ 1.0a
"""
import os
import sys
import shutil
import subprocess
import tempfile
import zipfile
import tarfile
import clean_folder
# The name of the base directory in each installer.
BASE_INSTALL_DIR = "seattle"
BASE_PROGRAM_FILES_DIR = "seattle/seattle_repy"
# The base name of each installer = for instance, "seattle_win.zip"
INSTALLER_NAME = "seattle"
# The path to the directory, relative the trunk, of the OS-specific files.
WINDOWS_GUI_PATH = "/dist/win_gui"
WINDOWS_PATH = "/dist/win/scripts"
WINMOB_PATH = "/dist/winmob/scripts"
LINUX_PATH = "/dist/linux/scripts"
MAC_PATH = "/dist/mac/scripts"
# The path to the directory, relative the trunk, of the OS-specific script
# wrappers.
WINDOWS_SCRIPT_WRAPPERS_PATH = "/dist/script_wrappers/win"
LINUX_SCRIPT_WRAPPERS_PATH = "/dist/script_wrappers/linux"
MAC_SCRIPT_WRAPPERS_PATH = "/dist/script_wrappers/mac"
# The path to the Windows GUI builder.
WINDOWS_GUI_BUILDER_PATH = ""
def get_inst_name(dist, version):
"""
<Purpose>
Given the OS and the version, returns what the name of the installer
will be.
<Arguments>
dist:
The OS that the installer is intended for, should be Windows, Macintosh,
Linux, Winmob, or Android.
version:
A string to be appended between the dist and the extension - for
instance, if version is "0.1d", then the Linux installer name will
be "seattle_linux0.1d.tgz".
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A string of the installer name for the specified OS and version.
"""
if version:
base_name = INSTALLER_NAME + "_" + version + "_" + dist
else:
base_name = INSTALLER_NAME + "_" + dist
if "win" in dist or "Win" in dist or "WIN" in dist:
if "gui" in dist or "GUI" in dist:
base_name += ".exe"
else:
base_name += ".zip"
elif "android" in dist:
base_name += ".zip"
else:
base_name += ".tgz"
return base_name
def check_flags(flags):
"""
<Purpose>
Checks that each character in 'flags' is a valid flag and that there is at
least one valid flag (i.e., m,w,l,i,d,a).
<Arguments>
flags:
String containing the flags passed in by the user.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
If there is an invalid flag, returns a tuple containing False and the
offending flag(s). If there is not at least one valid flag, this function
returns a tuple containing False and the empty strings. Otherwise, if there
are no problems, a tuple with True and the empty string is returned.
"""
valid_flags = "mwlidat"
required_flags = "mwliad"
got_required_flag = False
no_invalid_flags = True
badflag = ""
# Check flags for invalid flags and required flags.
for char in flags:
if char not in valid_flags:
no_invalid_flags = False
if char not in badflag:
badflag += char
elif char in required_flags:
got_required_flag = True
# Return results.
if no_invalid_flags and got_required_flag:
return (True, badflag)
else:
return (False, badflag)
def prepare_gen_files(trunk_location, temp_install_dir, include_tests, pubkey, privkey, finalfiles):
"""
<Purpose>
Prepare the general non-installer-specific files (needed for all installers)
and deposit them into the temporary folder designated to hold the files
that will be present in the base installer(s), including the metainfo file.
<Arguments>
trunk_location:
The path to the trunk of the repository, used to find all the requisite
files that appear in the installer.
pubkey:
The path to a public key that will be used to generate the metainfo file.
privkey:
The path to a private key that will be used to generate the metainfo file.
temp_install_dir:
The temporary directory where the general files to be included in the
installer will be placed.
include_tests:
Boolean variable specifying whether or not to include tests in installer.
finalfiles:
Boolean variable specifying whether or not to prepare the final files
after the metafile has been written
<Exceptions>
IOError on bad file paths.
<Side Effects>
All general non-installer-specific files placed into the specified temporary
installation directory.
<Returns>
List of all the files in the temporary installation directory, which will
be added to the installer tarball.
"""
# Run preparetest to generate and place all the general installation files
# in the temporary installation directory.
# To run /trunk/preparetest.py, we must be in that directory (probably a bug
# in preparetest.py?)
original_dir = os.getcwd()
preparetest_dir = trunk_location + os.sep + "dist"
os.chdir(preparetest_dir)
if include_tests:
p = subprocess.Popen([sys.executable, preparetest_dir + os.sep +
"preparetest.py", "-t", temp_install_dir])
p.wait()
else:
p = subprocess.Popen([sys.executable, preparetest_dir + os.sep +
"preparetest.py", temp_install_dir])
p.wait()
os.chdir(original_dir)
# Copy the benchmarking scripts to the installer directory
shutil.copy2(trunk_location + "/resource/benchmark_resources.py",
temp_install_dir)
shutil.copy2(trunk_location + "/resource/Mac_BSD_resources.py",
temp_install_dir)
shutil.copy2(trunk_location + "/resource/create_installer_state.py",
temp_install_dir)
shutil.copy2(trunk_location + "/resource/measuredisk.py", temp_install_dir)
shutil.copy2(trunk_location + "/resource/vessel.restrictions",
temp_install_dir)
shutil.copy2(trunk_location + "/resource/Linux_resources.py",
temp_install_dir)
shutil.copy2(trunk_location + "/resource/measure_random.py",
temp_install_dir)
shutil.copy2(trunk_location + "/resource/Win_WinCE_resources.py",
temp_install_dir)
# Copy the universal installer and uninstaller to the program directory.
shutil.copy2(trunk_location + "/dist/seattleinstaller.py", temp_install_dir)
shutil.copy2(trunk_location + "/dist/seattleuninstaller.py", temp_install_dir)
# Copy the script that stops all running seattle processes.
shutil.copy2(trunk_location + "/dist/stop_all_seattle_processes.py",
temp_install_dir)
# Copy the script that will update old crontab entries on Linux and Darwin
# systems to the new 2009 seattle crontab entry. This must remain in the
# installer indefinitely (or at least for a while) in the event that a user
# installed seattle with the previous, old crontab entry, then lost permission
# to modify his crontab. In the event that he regains permission to modify
# his crontab, the previously installed crontab entry must be updated.
shutil.copy2(trunk_location + "/dist/update_crontab_entry.py",
temp_install_dir)
# Clean the folder of unnecessary files before generating metafile.
clean_folder.clean_folder(trunk_location + "/dist/initial_files.fi",
temp_install_dir)
# To run writemetainfo.py, we must be in that directory (probably a bug in
# writemetainfo.py?)
os.chdir(temp_install_dir)
# Generate the metainfo file.
p = subprocess.Popen([sys.executable, temp_install_dir + os.sep +
"writemetainfo.py", privkey, pubkey, "-n"])
p.wait()
os.chdir(original_dir)
# If specified, copy remaining files that should not be included in the
# metafile into the temporary installation directory.
if finalfiles:
# Copy the static files to the program directory.
shutil.copy2(trunk_location + "/dist/nodeman.cfg", temp_install_dir)
shutil.copy2(trunk_location + "/dist/resources.offcut", temp_install_dir)
# Run clean_folder a final time to ensure the final directory contains all
# the necessary files now that the last files have been added.
clean_folder.clean_folder(trunk_location + "/dist/final_files.fi",
temp_install_dir)
return os.listdir(temp_install_dir)
def package_win_gui(trunk_location, temp_tarball_dir, zip_inst_name, gui_inst_name):
"""
<Purpose>
Packages the installation files for Windows into a GUI executable file
and adds the specific installation scripts for this OS.
This function extracts the contents of the already-created Windows zipfile
installer because the zipfile installer contains special Windows files that
are not located anywhere else in the trunk.
<Arguments>
trunk_location:
The location of the repository trunk.
temp_tarball_dir:
The path to the directory in which the installer executable will be
stored.
zip_inst_name:
The name of the Windows zipfile installer.
gui_inst_name:
The name that the Windows GUI executable file will have.
<Exceptions>
IOError on bad file paths.
<Side Effects>
Puts the final executable in the temporary tarball directory.
<Returns>
None.
"""
# Create a subdirectory where the GUI installer will be created, and copy all
# necessary files there.
win_gui_location = tempfile.mkdtemp()
shutil.copy(trunk_location + os.sep + WINDOWS_GUI_PATH + os.sep +
"seattle_gui_creator.nsi", win_gui_location)
# Extract the zipfile to the win_gui_location to get all the contents that
# will be compressed into the Windows gui installer.
installer_zipfile = zipfile.ZipFile(temp_tarball_dir + os.sep +
zip_inst_name, 'r', zipfile.ZIP_DEFLATED)
installer_zipfile.extractall(win_gui_location)
shutil.copy(trunk_location + os.sep + "dist" + os.sep +
"extract_custom_info.py",win_gui_location + os.sep +
"seattle" + os.sep + "seattle_repy")
# Change directories to win_gui_location because the Windows gui creator
# will not work when full file paths are passed in as arguments for some
# reason.
original_dir = os.getcwd()
os.chdir(win_gui_location)
# Create the Win GUI executable with the Windows GUI builder (makensis.exe)
# via subprocess.
gui_creator = subprocess.Popen([WINDOWS_GUI_BUILDER_PATH,
"seattle_gui_creator.nsi"], stdout=subprocess.PIPE)
# The communicate() function must be called to prevent the subprocess call
# above from deadlocking.
gui_creator.communicate()
gui_creator.wait()
# The Windows GUI builder script has a built-in name that it gives to the
# installer (seattle_win_gui.exe), so rename this file to gui_inst_name.
os.rename("seattle_win_gui.exe", gui_inst_name)
# Change back to the original directory.
os.chdir(original_dir)
# Put the new GUI installer into the temp_tarball_dir with the other
# installers.
shutil.copy(win_gui_location + os.sep + gui_inst_name,temp_tarball_dir)
# Remove the temporary GUI installer directory.
shutil.rmtree(win_gui_location)
def package_win_or_winmob(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files):
"""
<Purpose>
Packages the installation files for Windows or Windows Mobile into a zipfile
and adds the specific installation scripts for this OS.
<Arguments>
trunk_location:
The location of the repository trunk.
temp_install_dir:
The path to the temporary installation directory.
temp_tarball_dir:
The path to the directory in which the installer zipfile(s) is stored.
inst_name:
The name that the final installer should have.
gen_files:
A list of the general non-installer-specific files located in the
temporary installer directory.
<Exceptions>
IOError on bad file paths.
<Side Effects>
Puts the final zipfile in the temporary tarball directory.
<Returns>
None.
"""
# Open the Windows zipfile for writing, or create a zipfile for Windows
# Mobile.
if not "winmob" in inst_name:
shutil.copy2(trunk_location + "/dist/win/partial_win.zip",
temp_tarball_dir + os.sep + inst_name)
installer_zipfile = zipfile.ZipFile(temp_tarball_dir + os.sep + inst_name,
"a", zipfile.ZIP_DEFLATED)
else:
installer_zipfile = zipfile.ZipFile(temp_tarball_dir + os.sep + inst_name,
"w", zipfile.ZIP_DEFLATED)
# Put all general program files into zipfile.
for fname in gen_files:
if os.path.isdir(temp_install_dir + os.sep + fname):
write_files_in_dir_to_zipfile(temp_install_dir + os.sep + fname,
BASE_PROGRAM_FILES_DIR + os.sep + fname + os.sep, installer_zipfile)
else:
installer_zipfile.write(temp_install_dir + os.sep + fname,
BASE_PROGRAM_FILES_DIR + os.sep + fname)
# Put all files specific to this installer into zipfile.
# First, copy all scripts that belong in the BASE_PROGRAM_FILES_DIR.
if not "winmob" in inst_name:
specific_installer_dir = trunk_location + os.sep + WINDOWS_PATH
else:
specific_installer_dir = trunk_location + os.sep + WINMOB_PATH
specific_files = os.listdir(specific_installer_dir)
# Add OS-specific files to the zipfile.
for fname in specific_files:
if not fname.startswith(".") and fname != "manifest.txt":
# Add the README and LICENSE files to the highest-level directory
# (BASE_INSTALL_DIR).
if "LICENSE" in fname or "README" in fname:
installer_zipfile.write(specific_installer_dir + os.sep + fname,
BASE_INSTALL_DIR + os.sep + fname)
else:
installer_zipfile.write(specific_installer_dir + os.sep + fname,
BASE_PROGRAM_FILES_DIR + os.sep + fname)
# Second, copy all script wrappers (which call those in the
# BASE_PROGRAM_FILES_DIR) to the BASE_INSTALL_DIR.
if "winmob" in inst_name:
return
else:
script_wrappers_dir = trunk_location + os.sep + WINDOWS_SCRIPT_WRAPPERS_PATH
script_wrappers = os.listdir(script_wrappers_dir)
# Add script wrappers to the zipfile.
for fname in script_wrappers:
if not fname.startswith("."):
installer_zipfile.write(script_wrappers_dir + os.sep + fname,
BASE_INSTALL_DIR + os.sep + fname)
installer_zipfile.close()
def write_files_in_dir_to_zipfile(sourcepath, arcpath, zipfile):
"""
<Purpose>
Inserts the files in the current directory into the specified zipfile.
<Arguments>
sourcepath:
The source path of the files to add.
arcpath:
The zip file's internal destination path to write to.
zipfile:
The zip file to write to.
files:
If specified, only these files are copied. Only files in the immediate
directory can be specified.
skipfiles:
If specified, these files will be skipped. Only files in the immediate
directory can be skipped.
<Side Effects>
Copies the files that are in sourcepath to arcpath in the zipfile. If files
is specified, then only those files are copied.
<Exceptions>
None
<Return>
None
"""
files = os.listdir(sourcepath)
for fname in files:
sourcefilepath = sourcepath + os.sep + fname
targetfilepath = arcpath + os.sep + fname
if os.path.isfile(sourcefilepath):
zipfile.write(sourcefilepath, targetfilepath)
else:
write_files_in_dir_to_zipfile(sourcefilepath, targetfilepath, zipfile)
def package_linux_or_mac(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files):
"""
<Purpose>
Packages the installation files specific to Linux or Macintosh into a
tarball and adds the specific installation scripts for this OS.
<Arguments>
trunk_location:
The location of the repository trunk.
temp_install_dir:
The path to the temporary installation directory.
temp_tarball_dir:
The path to the directory in which the installer tarball(s) is stored.
inst_name:
The name that the final installer should have.
gen_files:
A list of the general non-installer-specific files located in the
temporary installer directory.
<Exceptions>
IOError on bad file paths.
<Side Effects>
Puts the final tarball in the temporary tarball directory.
<Returns>
None.
"""
installer_tarfile = tarfile.open(temp_tarball_dir + os.sep + inst_name, "w:gz")
# Put all general installer files into the tar file.
for fname in gen_files:
if fname not in ['pyreadline']:
installer_tarfile.add(temp_install_dir + os.sep + fname,
BASE_PROGRAM_FILES_DIR + os.sep + fname, True)
# Put all Linux- and Mac-specific files in to tarball.
# First, copy all scripts that belong in BASE_PROGRAM_FILES_DIR.
if "linux" in inst_name:
specific_installer_dir = trunk_location + os.sep + LINUX_PATH
else:
specific_installer_dir = trunk_location + os.sep + MAC_PATH
specific_files = os.listdir(specific_installer_dir)
# Add the OS-specific files to the tarfile.
for fname in specific_files:
if not fname.startswith(".") and fname != "manifest.txt":
if "README" in fname or "LICENSE" in fname:
installer_tarfile.add(specific_installer_dir + os.sep + fname,
BASE_INSTALL_DIR + os.sep + fname, False)
else:
installer_tarfile.add(specific_installer_dir + os.sep + fname,
BASE_PROGRAM_FILES_DIR + os.sep + fname, False)
# Second, copy all script wrappers (which call those in the
# BASE_PROGRAM_FILES_DIR) to the BASE_INSTALL_DIR.
if "linux" in inst_name:
script_wrappers_dir = trunk_location + os.sep + LINUX_SCRIPT_WRAPPERS_PATH
else:
script_wrappers_dir = trunk_location + os.sep + MAC_SCRIPT_WRAPPERS_PATH
script_wrappers = os.listdir(script_wrappers_dir)
# Add script wrappers to the zipfile.
for fname in script_wrappers:
if not fname.startswith("."):
installer_tarfile.add(script_wrappers_dir + os.sep + fname,
BASE_INSTALL_DIR + os.sep + fname, False)
installer_tarfile.close()
def package_android(trunk_location, temp_install_dir, temp_tarball_dir, inst_name, gen_files):
"""
<Purpose>
Packages the installation files specific to Android into a
tarball and adds the specific installation scripts for this OS.
THIS IS CUT AND PASTED FROM ABOVE WITH ONLY MINOR CHANGES. NEEDS REFACTOR!
<Arguments>
trunk_location:
The location of the repository trunk.
temp_install_dir:
The path to the temporary installation directory.
temp_tarball_dir:
The path to the directory in which the installer zipfile(s) is stored.
inst_name:
The name that the final installer should have.
gen_files:
A list of the general non-installer-specific files located in the
temporary installer directory.
<Exceptions>
IOError on bad file paths.
<Side Effects>
Puts the final zipfile in the temporary tarball directory.
<Returns>
None.
"""
installer_zipfile = zipfile.ZipFile(temp_tarball_dir + os.sep + inst_name,
"w", zipfile.ZIP_DEFLATED)
# Put all general program files into zipfile.
for fname in gen_files:
if os.path.isdir(temp_install_dir + os.sep + fname):
if fname not in ['pyreadline']:
write_files_in_dir_to_zipfile(temp_install_dir + os.sep + fname,
BASE_PROGRAM_FILES_DIR + os.sep + fname + os.sep,
installer_zipfile)
else:
installer_zipfile.write(temp_install_dir + os.sep + fname,
BASE_PROGRAM_FILES_DIR + os.sep + fname)
# Put generic files in the zipfile. (Same as Linux)
specific_installer_dir = trunk_location + os.sep + LINUX_PATH
specific_files = os.listdir(specific_installer_dir)
# Add the OS-specific files to the zipfile.
for fname in specific_files:
if not fname.startswith(".") and fname != "manifest.txt":
if "README" in fname or "LICENSE" in fname:
installer_zipfile.write(specific_installer_dir + os.sep + fname,
BASE_INSTALL_DIR + os.sep + fname)
else:
installer_zipfile.write(specific_installer_dir + os.sep + fname,
BASE_PROGRAM_FILES_DIR + os.sep + fname)
# Second, copy all script wrappers (which call those in the
# BASE_PROGRAM_FILES_DIR) to the BASE_INSTALL_DIR.
script_wrappers_dir = trunk_location + os.sep + LINUX_SCRIPT_WRAPPERS_PATH
script_wrappers = os.listdir(script_wrappers_dir)
# Add script wrappers to the zipfile.
for fname in script_wrappers:
if not fname.startswith("."):
installer_zipfile.write(script_wrappers_dir + os.sep + fname,
BASE_INSTALL_DIR + os.sep + fname)
installer_zipfile.close()
def test_arguments(arguments):
"""
Check that the arguments supplied on the command line make sense.
"""
# Test argument flags
if len(arguments) < 6:
print "Too few arguments."
return False
elif len(arguments) > 9:
print "Too many arguments."
return False
flags = arguments[1]
passed, offenses = check_flags(flags)
if not passed:
if offenses == "":
print "Requires at least one of these flags: m,l,w,i,d,a"
else:
print "Invalid flag(s): " + offenses
return False
# Validate the existence of argument's paths and files
trunkdir, pubkey, privkey, outdir = arguments[2:6]
if not os.path.exists(trunkdir):
raise IOError("Trunk not found at " + trunkdir)
if not os.path.exists(outdir):
raise IOError("Output directory does not exist at " + outdir)
if not os.path.exists(pubkey):
raise IOError("Public key not found at " + pubkey)
if not os.path.exists(privkey):
raise IOError("Private key not found at " + privkey)
# All arguments are valid.
return True
def usage():
print """
USAGE: python make_base_installer.py m|l|w|i|d|a|t path/to/trunk/
pubkey privkey output/dir/ [version of seattle]
[--wg path/to/Windows/GUI/builder/makensis.exe]
FLAGS:
m,l,w,i,d,a,t represent the OS for which the base installer
is being created. m = Macintosh, l = Linux, w = Windows,
i = Windows Mobile, d = Android, a = all systems;
t = include tests in installer.
NOTE: The Windows GUI installer will ONLY be built if the 'w' or
'a' options are passed ALONG WITH the '--wg' option."
"""
def main():
# Prepare to create installer(s).
# Test arguments and find full pathnames.
arguments_valid = test_arguments(sys.argv)
if not arguments_valid:
usage()
return
# Reaching this point means all arguments are valid, so set the variables and
# get full pathnames when necessary.
# NOTE: IF MORE OPTIONS ARE EVER ADDED TO THIS PROGRAM, CONSIDER USING THE
# PYTHON MODULE getopt TO PARSE THE OPTIONS SINCE THE BELOW LOGIC WILL
# START TO GET REALLY COMPLICATED.
installer_type = sys.argv[1]
trunk_location = os.path.realpath(sys.argv[2])
output_dir = os.path.realpath(sys.argv[5])
pubkey = os.path.realpath(sys.argv[3])
privkey = os.path.realpath(sys.argv[4])
version = ""
# Figure out if the optional version number or the path to the Windows GUI
# builder was passed in.
if len(sys.argv) > 6:
if len(sys.argv) == 7:
# Only one extra option was passed, so it must be the version number.
version = sys.argv[6]
if version == "--wg":
print "Windows GUI builder path not specified"
usage()
return
else:
global WINDOWS_GUI_BUILDER_PATH
if sys.argv[6] == "--wg":
# The path to the Windows GUI builder was passed in.
if len(sys.argv) == 7:
# The path was not given with the "--wg" option.
usage()
return
elif len(sys.argv) > 8:
# The version number was also given.
version = sys.argv[8]
WINDOWS_GUI_BUILDER_PATH = sys.argv[7]
else:
# The version must have been given before the path to the Windows GUI
# builder if the path was given at all.
version = sys.argv[6]
if sys.argv[7] != "--wg":
# An extraneous option must have been given.
usage()
return
else:
WINDOWS_GUI_BUILDER_PATH = sys.argv[8]
if WINDOWS_GUI_BUILDER_PATH:
# Confirm that the path exists.
if not os.path.lexists(WINDOWS_GUI_BUILDER_PATH):
print "Invalid path to the Windows GUI builder: ",
print WINDOWS_GUI_BUILDER_PATH
print "Failed to build installers."
return
else:
# Get full file path.
WINDOWS_GUI_BUILDER_PATH = os.path.realpath(WINDOWS_GUI_BUILDER_PATH)
# Begin creating base installers.
print "Creating installer(s) - this may take a few moments...."
# Create temporary directory for the files to go into the installer.
temp_install_dir = tempfile.mkdtemp()
# Create temporary directory for creating the tarball(s) / zipfile(s).
temp_tarball_dir = tempfile.mkdtemp()
# Prepare all general non-installer-specific files to go into installer.
print "Preparing all general non-OS-specific files...."
include_tests = False
if "t" in installer_type:
include_tests = True
gen_files = prepare_gen_files(trunk_location, temp_install_dir,
include_tests, pubkey, privkey, True)
print "Complete."
# Build individual installer(s).
print "Customizing installer(s) for the specified operating system(s)...."
created_installers = []
# Package the Windows installer.
if "w" in installer_type or "a" in installer_type:
inst_name = get_inst_name("win", version)
package_win_or_winmob(trunk_location, temp_install_dir, temp_tarball_dir,
inst_name, gen_files)
created_installers.append(inst_name)
# See if we need to create the Windows GUI installer
if WINDOWS_GUI_BUILDER_PATH:
inst_name_gui = get_inst_name("win_gui", version)
package_win_gui(trunk_location, temp_tarball_dir, inst_name,
inst_name_gui)
created_installers.append(inst_name_gui)
# Package the Linux installer.
if "l" in installer_type or "a" in installer_type:
inst_name = get_inst_name("linux", version)
package_linux_or_mac(trunk_location, temp_install_dir, temp_tarball_dir,
inst_name, gen_files)
created_installers.append(inst_name)
# Package the Mac installer.
if "m" in installer_type or "a" in installer_type:
inst_name = get_inst_name("mac", version)
package_linux_or_mac(trunk_location, temp_install_dir, temp_tarball_dir,
inst_name, gen_files)
created_installers.append(inst_name)
# Package the Windows Mobile installer.
if "i" in installer_type or "a" in installer_type:
inst_name = get_inst_name("winmob", version)
package_win_or_winmob(trunk_location, temp_install_dir, temp_tarball_dir,
inst_name, gen_files)
created_installers.append(inst_name)
# Package the Android installer.
if "d" in installer_type or "a" in installer_type:
inst_name = get_inst_name("android", version)
package_android(trunk_location, temp_install_dir, temp_tarball_dir,
inst_name, gen_files)
created_installers.append(inst_name)
# Move the installer tarball(s) / zipfile(s) to the specified output
# directory.
for tarball in os.listdir(temp_tarball_dir):
shutil.copy2(temp_tarball_dir + os.sep + tarball, output_dir)
# Remove the temporary directories
shutil.rmtree(temp_install_dir)
shutil.rmtree(temp_tarball_dir)
print
print "Finished."
print
print "The following base installers have been placed in " + output_dir + ":"
for installer in created_installers:
print installer
if __name__ == "__main__":
main()
| SeattleTestbed/dist | make_base_installers.py | Python | mit | 28,125 |
from django import forms
from cyder.cydns.forms import DNSForm
from cyder.cydns.ptr.models import PTR
from cyder.cydhcp.forms import RangeWizard
from cyder.base.mixins import UsabilityFormMixin
class PTRForm(DNSForm, RangeWizard, UsabilityFormMixin):
def __init__(self, *args, **kwargs):
super(PTRForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = ['fqdn', 'vrf', 'site', 'range',
'ip_type', 'next_ip', 'ip_str', 'views', 'ttl',
'description', 'ctnr']
def delete_instance(self, instance):
instance.delete()
class Meta:
model = PTR
exclude = ('ip', 'reverse_domain', 'ip_upper',
'ip_lower')
widgets = {'views': forms.CheckboxSelectMultiple,
'ip_type': forms.RadioSelect}
| OSU-Net/cyder | cyder/cydns/ptr/forms.py | Python | bsd-3-clause | 848 |
# -*- coding: utf-8 -*-
# (c) 2015 Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import openerp.tests.common as common
class TestSaleOrderPriceRecalculation(common.TransactionCase):
def setUp(self):
super(TestSaleOrderPriceRecalculation, self).setUp()
self.sale_order_model = self.env['sale.order']
self.sale_order_line_model = self.env['sale.order.line']
self.partner = self.env.ref('base.res_partner_3')
self.product = self.env.ref('product.product_product_4')
order_vals = self.sale_order_model.onchange_partner_id(
self.partner.id)['value']
order_vals['partner_id'] = self.partner.id
self.sale_order = self.sale_order_model.create(order_vals)
self.product.uos_id = self.env.ref('product.product_uom_kgm')
self.product.uos_coeff = 12.0
line_vals = {
'product_id': self.product.id,
'name': self.product.name,
'product_uom_qty': 1.0,
'product_uom': self.product.uom_id.id,
'product_uos_qty': 12.0,
'product_uos': self.product.uos_id.id,
'price_unit': self.product.lst_price,
'order_id': self.sale_order.id,
}
self.sale_order_line = self.sale_order_line_model.create(line_vals)
def test_price_recalculation(self):
# Check current price
self.assertEqual(
self.sale_order_line.price_unit, self.product.lst_price)
# Change price
self.product.lst_price = 500
# Launch recalculation
self.sale_order.recalculate_prices()
# Check if the price has been updated
self.assertEqual(
self.sale_order_line.price_unit, self.product.lst_price)
# Check if quantities have changed
self.assertEqual(self.sale_order_line.product_uom_qty, 1.0)
self.assertEqual(self.sale_order_line.product_uos_qty, 12.0)
| Rona111/sale-workflow | sale_order_price_recalculation/tests/test_sale_order_price_recalculation.py | Python | agpl-3.0 | 1,988 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This is rating counter
# author: Alex Shteinikov
import __init__
import settings
import time
import random
tweenk_core = settings.core()
tweenk_balance = settings.balance()
import db
import logger
import time
class ratingsCounter:
K1 = 0.02
K2 = 1.5
K_kill = 100
RD = 25
mongo = db.mongoAdapter()
balance = settings.balance()
core = settings.core()
log = logger.logger('logs/system_events.log')
def __init__(self):
pass
def countUserRatings(self):
def countTheUserRating(sort_field, result_field):
self.players.sort(key=lambda x: x[sort_field])
self.players.reverse()
place = 1
for player in self.players:
if 'banned' in player and player['banned']:
player.update({result_field: 100500})
else:
player.update({result_field: place})
place += 1
starttime = time.time()
for player in self.players:
# Умножаем уровень на 100 млн и прибавляем опыт
# чтобы два раза не сравнивать (по уровню и по опыту)
# а учитывать общее значение
player.update({'rating':player['lvl']*100000000+player['exp']})
# Если нет информации о том сколько твитов игрока за день получено
# то считаем 0
if not 'today_parsed_tweets' in player:
player.update({'today_parsed_tweets': 0})
# Если нет информации о том сколько pvp points игрок за день набрал
# то считаем что все (что вчера у него было 0 очков)
if not 'prev_day_pvp' in player:
player.update({'pvp_per_day': player['pvp_score']})
else:
player.update({'pvp_per_day': player['pvp_score'] - player['prev_day_pvp']})
# Считаем рейтинг игрока по метрикам
global_metric = 0
if player['lvl'] == 1:
global_metric = 0
else:
if 'metrics' in player:
if 'monster_kill' in player['metrics']:
for hour in player['metrics']['monster_kill']:
global_metric += (self.balance.max_lvl-player['metrics']['monster_kill'][hour]['lvl']*self.K2)*self.K1*self.K_kill*player['metrics']['monster_kill'][hour]['value']
else:
global_metric = 0
try:
if player['ratings']['trending_position'] <= 10:
if player['ratings']['trending_position'] <= 3:
global_metric = global_metric * 0.7
elif player['ratings']['trending_position'] <= 7:
global_metric = global_metric * 0.8
else:
global_metric = global_metric * 0.9
except Exception:
pass
global_metric = global_metric + global_metric/100 * random.randint(0,self.RD)
player.update({'trending_score': global_metric})
# Считаем место игрока в глобальном рейтинге игроков по опыту,
# Если уровень одинаковый, то выше в рейтинге тот, у кого больше опыта
countTheUserRating('rating', 'rating_by_exp')
# ... в общем рейтинге игроков по pvp points
countTheUserRating('pvp_score', 'rating_by_pvp')
# ... в общем рейтинге игроков по achv_points
countTheUserRating('achv_points', 'rating_by_achv_points')
# ... trending players
countTheUserRating('trending_score', 'trending_position')
for player in self.players:
record = {
'rating_by_exp': player['rating_by_exp'],
'rating_by_pvp': player['rating_by_pvp'],
'rating_by_achv_points': player['rating_by_achv_points'],
'trending_position': player['trending_position'],
'trending_score': player['trending_score']
}
self.mongo.update('players', {'_id':player['_id']}, {'ratings':record})
message = 'Player ratings was counted by '+str(time.time()-starttime)+' seconds'
self.log.write(message)
print message
def countGuildRatings(self):
def countGuildRating(field):
self.guilds.sort(key=lambda x: x[field])
self.guilds.reverse()
place = 1
for guild in self.guilds:
guild.update({field: place})
place += 1
starttime = time.time()
for guild in self.guilds:
guild.update({
'buff_global_metric': 0,
'buff_rating': 0,
'buff_pvp': 0,
'pvp_score': 0,
})
query = []
for id in guild['people']:
query.append({'_id':id})
members = self.mongo.getu('players', search = {'$or':query}, fields = {'lvl':1, 'pvp_score':1, 'ratings':1})
for player in members:
try:
guild['buff_global_metric'] += player['ratings']['trending_score']
guild['buff_rating'] += player['lvl']
guild['buff_pvp'] += player['pvp_score']
except Exception:
pass
if len(members)<5:
guild['buff_global_metric'] = 0
guild['pvp_score'] = int(guild['buff_pvp'])
# Считает место гильдии в глобальном рейтинге гильдии
# по сумме уровня членов гильдии
countGuildRating('buff_rating')
# ... sum trending members
countGuildRating('buff_global_metric')
# .. по общему pvp_score участников
countGuildRating('buff_pvp')
for guild in self.guilds:
record = {
'rating_place_members_lvl': guild['buff_rating'],
'rating_place_members_pvp': guild['buff_pvp'],
'trending_position': guild['buff_global_metric'],
'pvp_score': guild['pvp_score']
}
self.mongo.update('guilds',{'_id':guild['_id']}, {'ratings':record})
message = 'Guilds ratings was counted by '+str(time.time()-starttime)+' seconds'
self.log.write(message)
print message
def countAll(self):
self.players = self.mongo.getu('players', {'banned':{'$exists':False}}, {'_id':1, 'lvl':1, 'exp':1, 'achv_points': 1, 'pvp_score':1, 'metrics':1, 'ratings':1})
self.banned_players = self.mongo.getu('players', {'banned':{'$exists':True}}, {'_id':1, 'lvl':1, 'exp':1, 'achv_points': 1, 'pvp_score':1, 'metrics':1})
self.guilds = self.mongo.getu('guilds',{},{'id':1, 'name':1, 'people':1})
self.countUserRatings()
self.countGuildRatings()
for player in self.banned_players:
record = {
'rating_by_exp': 100500,
'rating_by_pvp': 100500,
'rating_by_achv_points': 100500,
'trending_position': 100500,
'trending_score': 0
}
self.mongo.update('players', {'_id':player['_id']}, record)
self.exit()
def countGameStatistics(self):
count_players = []
for index in range(0, len(self.balance.faction)):
query = {'faction': index, '$or': [{'race': 0}, {'race':1}]}
count_players.append(self.mongo.count('players', query))
count_avg_level = [0,0,0]
players = self.mongo.getu('players', {}, {'lvl':1, 'faction':1})
for player in players:
count_avg_level[player['faction']] += player['lvl']
for index in range(0, len(self.balance.faction)):
try:
count_avg_level[index] = float(int(float(count_avg_level[index]) / count_players[index] * 10))/10
except Exception:
count_avg_level[index] = 0.0
current_time = time.localtime()
hashkey = str(current_time.tm_year) + str(current_time.tm_yday)
lastday_stat = self.mongo.find('game_statistics', {'type': 'lastday_avg_level'})
if not lastday_stat or time.localtime().tm_hour > 20 and not lastday_stat['hashkey'] == hashkey:
self.mongo.update('game_statistics', {'type': 'lastday_avg_level'}, {'type': 'lastday_avg_level', 'data': count_avg_level, 'hashkey': hashkey}, True)
self.mongo.update('game_statistics', {'type': 'lastday_count'}, {'type': 'lastday_count', 'data': count_players, 'hashkey': hashkey}, True)
self.mongo.update('game_statistics', {'type': 'players_count'}, {'type': 'players_count', 'data': count_players}, True)
self.mongo.update('game_statistics', {'type': 'players_avg_level'}, {'type': 'players_avg_level', 'data': count_avg_level}, True)
def exit(self):
self.log.closeFile()
if __name__ == "__main__":
urc = ratingsCounter()
urc.countGameStatistics()
urc.countAll()
| MicroWorldwide/tweeria | tools/ratings.py | Python | mit | 8,313 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
clean_html,
determine_ext,
ExtractorError,
int_or_none,
parse_duration,
try_get,
url_or_none,
)
from .dailymotion import DailymotionIE
class FranceTVBaseInfoExtractor(InfoExtractor):
def _make_url_result(self, video_or_full_id, catalog=None):
full_id = 'francetv:%s' % video_or_full_id
if '@' not in video_or_full_id and catalog:
full_id += '@%s' % catalog
return self.url_result(
full_id, ie=FranceTVIE.ie_key(),
video_id=video_or_full_id.split('@')[0])
class FranceTVIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
sivideo\.webservices\.francetelevisions\.fr/tools/getInfosOeuvre/v2/\?
.*?\bidDiffusion=[^&]+|
(?:
https?://videos\.francetv\.fr/video/|
francetv:
)
(?P<id>[^@]+)(?:@(?P<catalog>.+))?
)
'''
_TESTS = [{
# without catalog
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=162311093&callback=_jsonp_loader_callback_request_0',
'md5': 'c2248a8de38c4e65ea8fae7b5df2d84f',
'info_dict': {
'id': '162311093',
'ext': 'mp4',
'title': '13h15, le dimanche... - Les mystères de Jésus',
'description': 'md5:75efe8d4c0a8205e5904498ffe1e1a42',
'timestamp': 1502623500,
'upload_date': '20170813',
},
}, {
# with catalog
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=NI_1004933&catalogue=Zouzous&callback=_jsonp_loader_callback_request_4',
'only_matching': True,
}, {
'url': 'http://videos.francetv.fr/video/NI_657393@Regions',
'only_matching': True,
}, {
'url': 'francetv:162311093',
'only_matching': True,
}, {
'url': 'francetv:NI_1004933@Zouzous',
'only_matching': True,
}, {
'url': 'francetv:NI_983319@Info-web',
'only_matching': True,
}, {
'url': 'francetv:NI_983319',
'only_matching': True,
}, {
'url': 'francetv:NI_657393@Regions',
'only_matching': True,
}, {
# france-3 live
'url': 'francetv:SIM_France3',
'only_matching': True,
}]
def _extract_video(self, video_id, catalogue=None):
# Videos are identified by idDiffusion so catalogue part is optional.
# However when provided, some extra formats may be returned so we pass
# it if available.
info = self._download_json(
'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/',
video_id, 'Downloading video JSON', query={
'idDiffusion': video_id,
'catalogue': catalogue or '',
})
if info.get('status') == 'NOK':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, info['message']),
expected=True)
allowed_countries = info['videos'][0].get('geoblocage')
if allowed_countries:
georestricted = True
geo_info = self._download_json(
'http://geo.francetv.fr/ws/edgescape.json', video_id,
'Downloading geo restriction info')
country = geo_info['reponse']['geo_info']['country_code']
if country not in allowed_countries:
raise ExtractorError(
'The video is not available from your location',
expected=True)
else:
georestricted = False
def sign(manifest_url, manifest_id):
for host in ('hdfauthftv-a.akamaihd.net', 'hdfauth.francetv.fr'):
signed_url = url_or_none(self._download_webpage(
'https://%s/esi/TA' % host, video_id,
'Downloading signed %s manifest URL' % manifest_id,
fatal=False, query={
'url': manifest_url,
}))
if signed_url:
return signed_url
return manifest_url
is_live = None
formats = []
for video in info['videos']:
if video['statut'] != 'ONLINE':
continue
video_url = video['url']
if not video_url:
continue
if is_live is None:
is_live = (try_get(
video, lambda x: x['plages_ouverture'][0]['direct'],
bool) is True) or '/live.francetv.fr/' in video_url
format_id = video['format']
ext = determine_ext(video_url)
if ext == 'f4m':
if georestricted:
# See https://github.com/rg3/youtube-dl/issues/3963
# m3u8 urls work fine
continue
formats.extend(self._extract_f4m_formats(
sign(video_url, format_id) + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44',
video_id, f4m_id=format_id, fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
sign(video_url, format_id), video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=format_id,
fatal=False))
elif video_url.startswith('rtmp'):
formats.append({
'url': video_url,
'format_id': 'rtmp-%s' % format_id,
'ext': 'flv',
})
else:
if self._is_valid_url(video_url, video_id, format_id):
formats.append({
'url': video_url,
'format_id': format_id,
})
self._sort_formats(formats)
title = info['titre']
subtitle = info.get('sous_titre')
if subtitle:
title += ' - %s' % subtitle
title = title.strip()
subtitles = {}
subtitles_list = [{
'url': subformat['url'],
'ext': subformat.get('format'),
} for subformat in info.get('subtitles', []) if subformat.get('url')]
if subtitles_list:
subtitles['fr'] = subtitles_list
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': clean_html(info['synopsis']),
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
'timestamp': int_or_none(info['diffusion']['timestamp']),
'is_live': is_live,
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
catalog = mobj.group('catalog')
if not video_id:
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = qs.get('idDiffusion', [None])[0]
catalog = qs.get('catalogue', [None])[0]
if not video_id:
raise ExtractorError('Invalid URL', expected=True)
return self._extract_video(video_id, catalog)
class FranceTVSiteIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://(?:(?:www\.)?france\.tv|mobile\.france\.tv)/(?:[^/]+/)*(?P<id>[^/]+)\.html'
_TESTS = [{
'url': 'https://www.france.tv/france-2/13h15-le-dimanche/140921-les-mysteres-de-jesus.html',
'info_dict': {
'id': '162311093',
'ext': 'mp4',
'title': '13h15, le dimanche... - Les mystères de Jésus',
'description': 'md5:75efe8d4c0a8205e5904498ffe1e1a42',
'timestamp': 1502623500,
'upload_date': '20170813',
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}, {
# france3
'url': 'https://www.france.tv/france-3/des-chiffres-et-des-lettres/139063-emission-du-mardi-9-mai-2017.html',
'only_matching': True,
}, {
# france4
'url': 'https://www.france.tv/france-4/hero-corp/saison-1/134151-apres-le-calme.html',
'only_matching': True,
}, {
# france5
'url': 'https://www.france.tv/france-5/c-a-dire/saison-10/137013-c-a-dire.html',
'only_matching': True,
}, {
# franceo
'url': 'https://www.france.tv/france-o/archipels/132249-mon-ancetre-l-esclave.html',
'only_matching': True,
}, {
# france2 live
'url': 'https://www.france.tv/france-2/direct.html',
'only_matching': True,
}, {
'url': 'https://www.france.tv/documentaires/histoire/136517-argentine-les-500-bebes-voles-de-la-dictature.html',
'only_matching': True,
}, {
'url': 'https://www.france.tv/jeux-et-divertissements/divertissements/133965-le-web-contre-attaque.html',
'only_matching': True,
}, {
'url': 'https://mobile.france.tv/france-5/c-dans-l-air/137347-emission-du-vendredi-12-mai-2017.html',
'only_matching': True,
}, {
'url': 'https://www.france.tv/142749-rouge-sang.html',
'only_matching': True,
}, {
# france-3 live
'url': 'https://www.france.tv/france-3/direct.html',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
catalogue = None
video_id = self._search_regex(
r'(?:data-main-video\s*=|videoId\s*:)\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'video id', default=None, group='id')
if not video_id:
video_id, catalogue = self._html_search_regex(
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
webpage, 'video ID').split('@')
return self._make_url_result(video_id, catalogue)
class FranceTVEmbedIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://embed\.francetv\.fr/*\?.*?\bue=(?P<id>[^&]+)'
_TESTS = [{
'url': 'http://embed.francetv.fr/?ue=7fd581a2ccf59d2fc5719c5c13cf6961',
'info_dict': {
'id': 'NI_983319',
'ext': 'mp4',
'title': 'Le Pen Reims',
'upload_date': '20170505',
'timestamp': 1493981780,
'duration': 16,
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://api-embed.webservices.francetelevisions.fr/key/%s' % video_id,
video_id)
return self._make_url_result(video['video_id'], video.get('catalog'))
class FranceTVInfoIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetvinfo.fr'
_VALID_URL = r'https?://(?:www|mobile|france3-regions)\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&.]+)'
_TESTS = [{
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
'info_dict': {
'id': '84981923',
'ext': 'mp4',
'title': 'Soir 3',
'upload_date': '20130826',
'timestamp': 1377548400,
'subtitles': {
'fr': 'mincount:2',
},
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
'only_matching': True,
}, {
'url': 'http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html',
'only_matching': True,
}, {
'url': 'http://france3-regions.francetvinfo.fr/bretagne/cotes-d-armor/thalassa-echappee-breizh-ce-venredi-dans-les-cotes-d-armor-954961.html',
'only_matching': True,
}, {
# Dailymotion embed
'url': 'http://www.francetvinfo.fr/politique/notre-dame-des-landes/video-sur-france-inter-cecile-duflot-denonce-le-regard-meprisant-de-patrick-cohen_1520091.html',
'md5': 'ee7f1828f25a648addc90cb2687b1f12',
'info_dict': {
'id': 'x4iiko0',
'ext': 'mp4',
'title': 'NDDL, référendum, Brexit : Cécile Duflot répond à Patrick Cohen',
'description': 'Au lendemain de la victoire du "oui" au référendum sur l\'aéroport de Notre-Dame-des-Landes, l\'ancienne ministre écologiste est l\'invitée de Patrick Cohen. Plus d\'info : https://www.franceinter.fr/emissions/le-7-9/le-7-9-27-juin-2016',
'timestamp': 1467011958,
'upload_date': '20160627',
'uploader': 'France Inter',
'uploader_id': 'x2q2ez',
},
'add_ie': ['Dailymotion'],
}, {
'url': 'http://france3-regions.francetvinfo.fr/limousin/emissions/jt-1213-limousin',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
dailymotion_urls = DailymotionIE._extract_urls(webpage)
if dailymotion_urls:
return self.playlist_result([
self.url_result(dailymotion_url, DailymotionIE.ie_key())
for dailymotion_url in dailymotion_urls])
video_id, catalogue = self._search_regex(
(r'id-video=([^@]+@[^"]+)',
r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"'),
webpage, 'video id').split('@')
return self._make_url_result(video_id, catalogue)
class FranceTVInfoSportIE(FranceTVBaseInfoExtractor):
IE_NAME = 'sport.francetvinfo.fr'
_VALID_URL = r'https?://sport\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://sport.francetvinfo.fr/les-jeux-olympiques/retour-sur-les-meilleurs-moments-de-pyeongchang-2018',
'info_dict': {
'id': '6e49080e-3f45-11e8-b459-000d3a2439ea',
'ext': 'mp4',
'title': 'Retour sur les meilleurs moments de Pyeongchang 2018',
'timestamp': 1523639962,
'upload_date': '20180413',
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(r'data-video="([^"]+)"', webpage, 'video_id')
return self._make_url_result(video_id, 'Sport-web')
class GenerationWhatIE(InfoExtractor):
IE_NAME = 'france2.fr:generation-what'
_VALID_URL = r'https?://generation-what\.francetv\.fr/[^/]+/video/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://generation-what.francetv.fr/portrait/video/present-arms',
'info_dict': {
'id': 'wtvKYUG45iw',
'ext': 'mp4',
'title': 'Generation What - Garde à vous - FRA',
'uploader': 'Generation What',
'uploader_id': 'UCHH9p1eetWCgt4kXBYCb3_w',
'upload_date': '20160411',
},
'params': {
'skip_download': True,
},
'add_ie': ['Youtube'],
}, {
'url': 'http://generation-what.francetv.fr/europe/video/present-arms',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
youtube_id = self._search_regex(
r"window\.videoURL\s*=\s*'([0-9A-Za-z_-]{11})';",
webpage, 'youtube id')
return self.url_result(youtube_id, ie='Youtube', video_id=youtube_id)
class CultureboxIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://culturebox.francetvinfo.fr/opera-classique/musique-classique/c-est-baroque/concerts/cantates-bwv-4-106-et-131-de-bach-par-raphael-pichon-57-268689',
'info_dict': {
'id': 'EV_134885',
'ext': 'mp4',
'title': 'Cantates BWV 4, 106 et 131 de Bach par Raphaël Pichon 5/7',
'description': 'md5:19c44af004b88219f4daa50fa9a351d4',
'upload_date': '20180206',
'timestamp': 1517945220,
'duration': 5981,
},
'params': {
'skip_download': True,
},
'add_ie': [FranceTVIE.ie_key()],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
if ">Ce live n'est plus disponible en replay<" in webpage:
raise ExtractorError(
'Video %s is not available' % display_id, expected=True)
video_id, catalogue = self._search_regex(
r'["\'>]https?://videos\.francetv\.fr/video/([^@]+@.+?)["\'<]',
webpage, 'video id').split('@')
return self._make_url_result(video_id, catalogue)
class FranceTVJeunesseIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?(?:zouzous|ludo)\.fr/heros/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://www.zouzous.fr/heros/simon',
'info_dict': {
'id': 'simon',
},
'playlist_count': 9,
}, {
'url': 'https://www.ludo.fr/heros/ninjago',
'info_dict': {
'id': 'ninjago',
},
'playlist_count': 10,
}, {
'url': 'https://www.zouzous.fr/heros/simon?abc',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
playlist = self._download_json(
'%s/%s' % (mobj.group('url'), 'playlist'), playlist_id)
if not playlist.get('count'):
raise ExtractorError(
'%s is not available' % playlist_id, expected=True)
entries = []
for item in playlist['items']:
identity = item.get('identity')
if identity and isinstance(identity, compat_str):
entries.append(self._make_url_result(identity))
return self.playlist_result(entries, playlist_id)
| kidburglar/youtube-dl | youtube_dl/extractor/francetv.py | Python | unlicense | 19,069 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.