code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 10:52:38 2017
@author: Andrew Ruba
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import random
import csv
import os
from numpy.random import choice
import numpy as np
from scipy.optimize import curve_fit
import time
import matplotlib.pyplot as plt
from scipy import stats
## below arrays are for saving simulation data for statistical analysis
global gausslist
gausslist = []
global bimodallist
bimodallist = []
global bimodalmean
bimodalmean = []
global bimodalsd
bimodalsd = []
global bimodalheight
bimodalheight = []
global bimodalauc
bimodalauc = []
def sim(gui, PTNUM, RADIUS, PREC, ITER, BINSIZE, PERCERROR, A, ROTATION):
def simulation(num_points, radius, dr, ss, mm, aa, rotation):
def area_fn(X):
X = float(X)
A = -(dr**2)*np.pi
B = dr*2*np.pi
return X*B+A
def gauss_fn(x, s, m):
a = area_fn(m)
x = float(x)
s = float(s)
m = float(m)
return a*np.e**(-(x-m)**2.0/(2.0*s**2.0))
def combine(x):
s = ss
m = mm
return (area_fn(x) * gauss_fn(x, s, m))
##starting with perfect x,y and adding error
xydata = []
mm = mm + 0.00001
while len(xydata) < num_points:
theta = np.radians((np.random.random()*360.0*aa)-0.5*(1.0-aa)*360.0)
## precision distribution sampling
# ss = choice([3,5,7,9], p=[0.1475,0.2775,0.3075,0.2675])
# ss = choice([4.5,5.5,6.5,7.5,8.5,9.5], p=[0.02,0.05,0.07,0.11,0.2,0.55])
y_prec = np.random.normal(0.0, ss)
z_prec = np.random.normal(0.0, ss)
yy = mm*np.cos(theta)
zz = mm*np.sin(theta)
yyy = yy*np.cos(np.radians(rotation))+zz*np.sin(np.radians(rotation))
zzz = -yy*np.sin(np.radians(rotation))+zz*np.cos(np.radians(rotation))
xydata.append((yyy+y_prec, zzz+z_prec))
def gen_matrix(r, d_r):
##'be' is short for bin edges
if r%d_r > 0:
be = range(0, r+r%d_r, d_r)
else:
be = range(0, r+d_r, d_r)
matrix = []
for i in range(len(be)-1):
matrix.append([])
x = 0
for i in range(len(matrix)):
for j in range(x):
matrix[i].append(0)
x += 1
##generate areas of sections closest to x axis
for i in range(len(matrix)):
theta = np.arccos(float(be[len(be)-2-i])/float(be[len(be)-1-i]))
arc_area = (theta/(2*np.pi)) * np.pi * float(be[len(be)-1-i])**2
tri_area = 0.5 * float(be[len(be)-2-i]) * (np.sin(theta) * float(be[len(be)-1-i]))
matrix[i].append(4 * (arc_area - tri_area))
##skipping factor
x = 2
##generate areas of layers going further out from x axis
while len(matrix[0]) < len(matrix):
for i in range(len(matrix) - len(matrix[0])):
num = 0
for j in range(len(matrix)):
for k in range(len(matrix[i]) + 1):
if j == i and k < len(matrix[i]):
num += matrix[j][k]
elif j > i:
num += matrix[j][k]
theta = np.arccos(float(be[len(be)-1-x-i])/float(be[len(be)-1-i]))
arc_area = (theta/(2*np.pi)) * np.pi * float(be[len(be)-1-i])**2
tri_area = 0.5 * float(be[len(be)-1-x-i]) * (np.sin(theta) * float(be[len(be)-1-i]))
matrix[i].append(4 * (arc_area - tri_area) - num)
x += 1
return matrix
def smoothdata(data, r, d_r):
"""smoothds data with 3 moving window and takes abs value average"""
smooth_data = []
r += 1
##comment out for smoothing
smooth_data = []
for i in range(len(data)):
smooth_data.append(data[i])
##adds + and - bins
final_smooth_data = []
for i in range(int(r/d_r)):
final_smooth_data.append(smooth_data[i] + smooth_data[len(smooth_data)-1-i])
return list(reversed(final_smooth_data))
def deconvolution(hv, be, r, d_r):
"""hv = hist_values, be = bin_edges"""
density = []
matrix = gen_matrix(r, d_r)
while len(hv) > len(matrix):
hv.pop()
while len(matrix) > len(hv):
matrix.pop()
rev_hv = list(reversed(hv))
x = 0
for i in range(len(rev_hv)):
##calculate how much to subtract from bin
density_sub = 0
y = 0
for j in range(x):
density_sub += density[y] * matrix[j][i]
y += 1
##calculate final bin value
density.append((rev_hv[i] - density_sub) / matrix[i][i])
x += 1
unrev_hv = list(reversed(density))
smooth_data = []
for i in range(len(unrev_hv)):
if i == 0 or i == (len(unrev_hv) - 1):
smooth_data.append(unrev_hv[i])
else:
smooth_data.append(np.average([unrev_hv[i-1], unrev_hv[i], unrev_hv[i+1]]))
return unrev_hv, smooth_data, hv
def make_hist(data, r, d_r):
hist_values, bin_edges = np.histogram(data, bins = 2 * int(r/d_r), range = (-r, r))
new_bin_edges = []
for i in bin_edges:
if i >= 0:
new_bin_edges.append(i)
new_hist_values = smoothdata(hist_values, r, d_r)
return new_hist_values, new_bin_edges
def csv_read(path):
with open(path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
holdlist = []
for row in reader:
holdlist.append(float(row[1]))
return holdlist
jkl = []
for y,z in xydata:
jkl.append(y)
radius = int(np.floor(radius/dr))*dr
if num_points == PTNUM + 1:
## decide the proper bin size
minbinsize = 2
binsizes = []
binsizesdata = [[] for variable in range(1, int(PREC)+1)]
gui.message.set('0% done calculating ideal bin size...')
gui.update()
for binoptimization in range(10):
for binsize in range(1, int(PREC)+1):
if binsize >= minbinsize:
error = 0
# print ('binsize ' + str(binsize))
jkl = []
mm = mm + 0.00001
while len(jkl) < num_points-1:
theta = np.random.random()*360.0
## precision distribution sampling
# ss = choice([3,5,7,9], p=[0.1475,0.2775,0.3075,0.2675])
# ss = choice([4.5,5.5,6.5,7.5,8.5,9.5], p=[0.02,0.05,0.07,0.11,0.2,0.55])
y_prec = np.random.normal(0.0, ss)
jkl.append(mm*np.cos(theta)+y_prec)
a,b = make_hist(jkl, radius, binsize)
final_unsmooth, final_smooth, final_2d = deconvolution(a, b, radius, binsize)
holdlist = []
addZero = False
for val in list(reversed(final_unsmooth)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
final_unsmooth = list(reversed(holdlist))
##rescale ideal data
matrix = gen_matrix(radius, binsize)
newmatrix = []
for i in matrix:
newmatrix.append(list(reversed(i)))
matrix = list(reversed(newmatrix))
# print (a)
# print (final_unsmooth)
while len(a) > len(matrix):
a.pop()
while len(matrix) > len(a):
matrix.pop()
for ncol in range(len(matrix[0])):
binsub = 0.0
for mcol in range(len(matrix)):
binsub += float(matrix[mcol][ncol]*final_unsmooth[mcol])
try:
if a[ncol] != 0.0:
# print (binsub)
error += np.square(a[ncol] - binsub) / a[ncol]
except:
pass
popped = a.pop()
while popped == 0:
popped = a.pop()
binsizesdata[binsize-1].append((error, len(a)+1,1-stats.chi2.cdf(error, len(a)+1),binsize))
else:
binsizesdata[binsize-1].append((1000000.0,1,0.0,binsize))
gui.message.set(str((binoptimization*10) + 10) + ' % done calculating ideal bin size...')
gui.update()
finalbinsizes = []
for bintrial in range(len(binsizesdata)):
errhold = []
dfhold = []
pvalhold = []
binhold = []
for trial in range(len(binsizesdata[bintrial])):
chisq, df, pval, binsize = binsizesdata[bintrial][trial]
errhold.append(chisq)
dfhold.append(df)
pvalhold.append(pval)
binhold.append(binsize)
chisq = np.average(errhold)
df = np.round(np.average(dfhold))
pval = 1-stats.chi2.cdf(chisq,df)
binsize = binhold[0]
finalbinsizes.append((chisq,df,pval,binsize))
# print (finalbinsizes)
for binsizedata in finalbinsizes:
chisq, df, pval, binsize = binsizedata
if pval >= 0.95:
dr = binsize
break
else:
dr = int(PREC)
a,b = make_hist(jkl, radius, dr)
final = deconvolution(a,b,radius,dr)
if num_points != PTNUM + 1:
def gauss_fn(x, a, s, m):
return a*np.e**(-(x-m)**2.0/(2.0*s**2.0))
def bimodal(x,mu1,sigma1,A1,mu2,sigma2,A2):
return gauss_fn(x, A1, sigma1, mu1)+gauss_fn(x, A2, sigma2, mu2)
try:
guess = [np.max(final[0]), ss, mm]
tempbins = list(range(int(dr/2), radius+int(dr/2), dr))
tempdensity = final[0]
holdlist = []
addZero = False
for val in list(reversed(tempdensity)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
tempdensity = list(reversed(holdlist))
while len(tempdensity) > len(tempbins):
tempdensity.pop()
while len(tempbins) > len(tempdensity):
tempbins.pop()
revtempbins = list(np.negative(list(reversed(tempbins))))
revtempdensity = list(reversed(tempdensity))
bins = revtempbins + tempbins
density = revtempdensity + tempdensity
params, var = curve_fit(gauss_fn, bins, density, p0 = guess)
params_gauss = np.abs(params)
## computes 1 SD errors
var_gauss = np.sqrt(np.diag(var))
def frange(beg, end, step):
f_range = []
while beg < end - (step/2.0):
f_range.append(beg)
beg += step
return f_range
guess = [-mm, ss, np.max(final[0]), mm, ss, np.max(final[0])]
tempbins = frange(dr/2.0, radius, dr)
tempdensity = final[0]
holdlist = []
addZero = False
for val in list(reversed(tempdensity)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
tempdensity = list(reversed(holdlist))
while len(tempdensity) > len(tempbins):
tempdensity.pop()
while len(tempbins) > len(tempdensity):
tempbins.pop()
revtempbins = list(np.negative(list(reversed(tempbins))))
revtempdensity = list(reversed(tempdensity))
bins = revtempbins + tempbins
density = revtempdensity + tempdensity
params, var = curve_fit(bimodal, bins, density, p0 = guess)
params = np.abs(params)
## computes 1 SD errors
var = np.sqrt(np.diag(var))
## average paramters
stdev = np.average((params[1], params[4]))
mean = np.average((params[0], params[3]))
height = np.average((params[2], params[5]))
stdev_e = np.average((var[1], var[4]))
mean_e = np.average((var[0], var[3]))
height_e = np.average((var[2], var[5]))
params_bimodal = [height, stdev, mean]
var_bimodal = [height_e, stdev_e, mean_e]
## uncomment following for comparing central vs. peripheral peak fitting errors
# bimodalmean.append(params_gauss[0])
bimodalmean.append(mean)
# bimodalmean.append(tempdensity)
bimodalsd.append(stdev)
bimodalheight.append(height)
auc = 0.0
step = mean - 5.0*stdev
while step < mean + 5.0*stdev:
auc+=0.01*gauss_fn(step,height,stdev,mean)
step += 0.01
bimodalauc.append(auc)
# bimodallist.append(var_bimodal[1])
gausslist.append(var_gauss[1])
# if np.sum(var_bimodal) < np.sum(var_gauss):
params = params_bimodal
var = var_bimodal
# else:
# params = params_gauss
# var = var_gauss
except RuntimeError:
params = []
var = []
hist_mids = []
for i in range(len(b)-1):
hist_mids.append(np.average((b[i],b[i+1])))
norm_values = []
for i in final[1]:
norm_values.append(i/np.max(final[1]))
return params, var, norm_values, hist_mids, dr
else:
return dr
pt_min = PTNUM
pt_max = PTNUM
rt_min = RADIUS
rt_max = RADIUS
prec_min = PREC
prec_max = PREC
iterations = ITER
PREC = float(PREC)
one_diff = []
perc_err = PERCERROR*0.01
def roundup(x):
val = int(math.ceil(x / 10.0)) * 10
if val >= 30:
return val
else:
return 30
ptlist = range(pt_min, pt_max+100, 100)
for pt in ptlist:
for rt in range(rt_min, rt_max+1, 1):
for prec in range(prec_min, prec_max+1, 1):
prec = prec+0.000001
xrng = roundup(rt + prec*5.0)
# DR = simulation(pt+1, xrng, BINSIZE, float(prec), float(rt))
## uncomment below to manually set bin size
DR = PTNUM
# print ('ideal bin size: '+ str(DR))
p_ab, v, d, h_m, DR = simulation(1000000, xrng, DR, float(prec), float(rt), A, ROTATION)
print ('density')
for i in d:
print (i)
p_normal, v, d, h_m, DR = simulation(1000000, xrng, DR, float(prec), float(rt), 1.0, 0)
# print (p)
a, s, m_ab = p_ab
a, s, m_normal = p_normal
return '%.3f'%(m_ab-m_normal) | andrewruba/YangLab | JPC simulations 2019/Figure 8 - labeling efficiency/simulation.py | Python | gpl-3.0 | 13,992 |
'''Space of random variables over which stochastic collocation is performed'''
__copyright__ = 'Copyright (C) 2011 Aravind Alwan'
__license__ = '''
This file is part of UnyQuE.
UnyQuE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
UnyQuE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
For a copy of the GNU General Public License, please see
<http://www.gnu.org/licenses/>.
'''
import itertools as it
import operator as op
from .rdimension import RandomDimension
import unyque._internals as internals
from . import logmanager
class RandomDomain(object):
'''Basic random domain object that consists of a set of random dimensions.
'''
_log = logmanager.getLogger('unyque.rdom')
ndim = 0 # Number of random dimensions
q = -1 # Interpolation level used in stochastic collocation
_grid_indices = None # Grid points used in stochastic collocation
NUM_MOMENTS = 2 # Number of moments to calculate
def __init__(self, smodel, bounds):
self._log.info('Initializing %d-dimensional random domain',
smodel.ndim)
if self.ndim != smodel.ndim:
raise StateError(
'Dimensionality of smodel is different from RandomDomain.ndim '+
'attribute. Set number of dimensions using ' +
'RandomDomain.set_number_of_dimensions() before creating any ' +
'RandomDomain object.')
if len(bounds) != smodel.ndim:
raise ValueError(
'Number of tuples in bounds must equal the dimensionality of ' +
'smodel')
self.bounds = bounds
self.dims = [RandomDimension(bnd) for bnd in bounds]
self.stochastic_model = smodel
self.hsurpluses = None
@classmethod
def set_number_of_dimensions(cls, value):
cls.ndim = value
@classmethod
def set_interpolant_order(cls, value):
cls.q = value
# Maximum level of 1D interpolant will be q+1
RandomDimension.set_maximum_interp_level(cls.q+1)
# Initialize grid
cls._init_grid()
cls._log.info('Initialized grid with %d nodes', len(cls._grid_indices))
@classmethod
def _init_grid(cls):
'''Initialize the grid points using the Smolyak algorithm. The actual
node locations are not computed. For each grid point, we only store the
indices of the corresponding 1D nodal set for each dimension as a
tuple of the 1D grid level and the index of the point at that level.
'''
cls._grid_indices = list()
# Store indices of 1D nodes at each level
node_idx_1d = [range(len(i)) for i in RandomDimension.nodes]
# Generate combos as list of sublists, with each sublist containing the
# 1D interpolation levels for that combination
combos = it.chain( *(cls._get_index_set(sumk, cls.ndim)
for sumk in xrange(cls.ndim, cls.ndim + cls.q + 1)) )
# Generate indices for each gridpoint
gridpoints = it.chain( *(it.product( *(
# Product of level and indices of 1D nodes
it.product([ki], node_idx_1d[ki-1]) for ki in combo
) ) for combo in combos) )
cls._grid_indices.extend(gridpoints)
@staticmethod
def _get_index_set(sumk, d):
'''Return the index set containing 1D interpolation level combinations
that sum up to sumk. The equivalent combinatorial problem is one of
placing d-1 separators in the sumk-1 spaces between sumk values.
Idea: http://wordaligned.org/articles/partitioning-with-python
'''
splits = it.combinations(range(1,sumk),d-1)
return (list(map(op.sub, it.chain(s, [sumk]), it.chain([0], s)))
for s in splits)
def compute_grid(self):
'''Return the grid points for this domain
'''
return [ [self.dims[dim].get_node(*node) for dim, node in enumerate(gp)]
for gp in self._grid_indices ]
def update_hierarchical_surpluses(self, fValues):
'''Update the hierarchical supluses using the list of function
evaluations given in fValues
'''
if len(fValues) != len(self._grid_indices):
raise ValueError('Number of function evaluations given does not ' +
'match the number of grid points.')
gridLevels = [[gi[0] for gi in g] for g in self._grid_indices]
gridCoords = [[self.dims[dim].get_node(*node, normalized = True)
for dim, node in enumerate(g)]
for g in self._grid_indices]
self.hsurpluses = internals.ComputeHierarchicalSurpluses(
gridLevels, gridCoords, fValues, self.NUM_MOMENTS)
def interpolate(self, eval_points):
'''Evaluate the interpolant at the points given as a list of tuples,
eval_points, where each tuple contains the coordinates of a point in
the random space
'''
if len(self.hsurpluses) != len(self._grid_indices):
raise StateError(
'Hierarchical surpluses are missing or outdated.' +
'Call the update_hierarchical_surpluses() method first.')
interp = list()
for pt in eval_points:
# Evaluate basis function as product of unvariate basis functions
basis_funcs = [ reduce(op.mul, (self.dims[dim].interpolate(
node, pt[dim]) for dim, node in enumerate(gp)))
for gp in self._grid_indices ]
# Interpolant is sum of basis functions weighted by hierarchical
# surpluses
interp.append(sum(it.imap(
op.mul, (h[0] for h in self.hsurpluses), basis_funcs)))
return interp
def compute_moments(self, moments):
'''Compute the moments of the interpolant with respect to the given
stochastic model
'''
for m in moments:
if (m < 1) or (m > self.NUM_MOMENTS):
raise ValueError(
('Cannot compute moment of order {moment}. RandomDomain ' +
'only computes moments up to order {maximum}. For higher '+
'moments set RandomDomain.NUM_MOMENTS appropriately and ' +
're-run update_hierarchical_surpluses().').format(
moment = str(m), maximum = str(self.NUM_MOMENTS)))
if ((not self.hsurpluses) or
(len(self.hsurpluses) != len(self._grid_indices))):
raise StateError(
'Hierarchical surpluses are missing or outdated. ' +
'Call the update_hierarchical_surpluses() method first.')
# Evaluate expectation of basis function corresponding to each gridpoint
# where the multivariate basis function is expressed as a product of
# univariate basis functions
basis_func_integrals = [ self.stochastic_model.expectation(
[self.dims[dim].get_basis_function(node)
for dim, node in enumerate(gp)] )
for gp in self._grid_indices ]
# Moment is the sum of the basis function expectations weighted by
# hierarchical surpluses
return [sum(it.imap(op.mul, (h[m-1] for h in self.hsurpluses),
basis_func_integrals)) for m in moments]
def compute_error_indicator(self, modified = False):
'''Compute the error indicator for this domain, which is the maximum
value among the hierarchical surpluses of the gridpoints added at the
highest level. If modified is True, then the same is computed for the
products of the hierarchical surpluses with the expectations of the
corresponding basis functions
'''
# Global interpolation level of each gridpoint
interp_level = [ reduce(op.add, (gi[0] for gi in g))
for g in self._grid_indices ]
if modified:
return max( (self.hsurpluses[i][0]*
reduce(op.mul, (self.dims[dim].integrate(node)
for dim, node in enumerate(gp)))
for i, gp in enumerate(self._grid_indices)
if interp_level[i] == (self.q + self.ndim)) )
else:
return max( (self.hsurpluses[i][0]
for i in xrange(len(self._grid_indices))
if interp_level[i] == (self.q + self.ndim)) )
class StateError(Exception):
'''Exception that is raised when a method is called on the random domain
object when it is not in the right state. A typical example is when one
tries to evaluate the interpolant or compute the moments before the
hierarchical surpluses have been updated
'''
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
| aravindalwan/unyque | unyque/rdomain.py | Python | gpl-3.0 | 9,368 |
import unittest
import numpy as np
from kona.linalg.memory import KonaMemory
from dummy_solver import DummySolver
from kona.linalg.vectors.composite import ReducedKKTVector
class ReducedKKTVectorTestCase(unittest.TestCase):
def setUp(self):
solver = DummySolver(10, 0, 5)
self.km = km = KonaMemory(solver)
km.primal_factory.request_num_vectors(3)
km.dual_factory.request_num_vectors(3)
km.allocate_memory()
# can't create bare KonaVectors because the memory manager doesn't
# like them, so I'll just use the PrimalVector to test the
# KonaVectorMethods
self.pv1 = km.primal_factory.generate()
self.dv1 = km.dual_factory.generate()
self.pv1._data.data = 2*np.ones(10)
self.dv1._data.data = 3*np.ones(5)
self.pv2 = km.primal_factory.generate()
self.dv2 = km.dual_factory.generate()
self.pv2._data.data = 2*np.ones(10)
self.dv2._data.data = 2*np.ones(5)
self.rkkt_vec1 = ReducedKKTVector(self.pv1, self.dv1)
self.rkkt_vec2 = ReducedKKTVector(self.pv2, self.dv2)
def test_check_type(self):
try:
self.rkkt_vec1._check_type(self.pv1)
except TypeError as err:
self.assertEqual(
str(err),
"CompositeVector() >> Wrong vector type. Must be " +
"<class 'kona.linalg.vectors.composite.ReducedKKTVector'>")
else:
self.fail('TypeError expected')
def test_bad_init_args(self):
try:
ReducedKKTVector(self.dv1, self.dv1)
except TypeError as err:
self.assertEqual(
str(err),
'CompositeVector() >> Unidentified primal vector.')
else:
self.fail('TypeError expected')
try:
ReducedKKTVector(self.pv1, self.pv1)
except TypeError as err:
self.assertEqual(
str(err),
'CompositeVector() >> Unidentified dual vector.')
else:
self.fail('TypeError expected')
def test_equals(self):
self.rkkt_vec2.equals(self.rkkt_vec1)
err = self.dv2._data.data - self.dv1._data.data
self.assertEqual(np.linalg.norm(err), 0)
err = self.pv2._data.data - self.pv1._data.data
self.assertEqual(np.linalg.norm(err), 0)
def test_plus(self):
self.rkkt_vec2.plus(self.rkkt_vec1)
err = self.pv2._data.data - 4*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - 5*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_minus(self):
self.rkkt_vec2.minus(self.rkkt_vec1)
err = self.pv2._data.data - 0*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - -1*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_times_vector(self):
self.rkkt_vec2.times(self.rkkt_vec1)
self.assertEqual(self.rkkt_vec2.inner(self.rkkt_vec2), 340.)
def test_times_scalar(self):
self.rkkt_vec2.times(3)
err = self.pv2._data.data - 6*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - 6*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
self.rkkt_vec1.times(3.0)
err = self.pv1._data.data - 6*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv1._data.data - 9*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_divide_by(self):
self.rkkt_vec2.divide_by(2)
err = self.pv2._data.data - 1*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - 1*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_equals_ax_p_by(self):
self.rkkt_vec2.equals_ax_p_by(2, self.rkkt_vec1, 2, self.rkkt_vec2)
err = self.pv2._data.data - 8*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - 10*np.ones(5)
self.assertEqual(np.linalg.norm(err), 0)
def test_inner(self):
ip = self.rkkt_vec2.inner(self.rkkt_vec1)
self.assertEqual(ip, 70)
def test_norm2(self):
ip = self.rkkt_vec2.norm2
self.assertEqual(ip, 60**.5)
def test_equals_initial_guess(self):
self.rkkt_vec2.equals_init_guess()
err = self.pv2._data.data - 10*np.ones(10)
self.assertEqual(np.linalg.norm(err), 0)
err = self.dv2._data.data - (np.ones(5))
self.assertEqual(np.linalg.norm(err), 0)
if __name__ == "__main__":
unittest.main()
| JustinSGray/Kona | src/kona/test/test_reduced_kkt_vector.py | Python | lgpl-3.0 | 4,681 |
import logging
from monitor.core.utils import tr
class Colors:
STANDART = 1
HEALTH_POINTS = 2
POWER_POINTS = 3
ATTENTION = 4
MONEY = 5
HEALING = 6
class TextEntry:
def __init__(self, predefined_text, key, width, color = Colors.STANDART):
self.predefined_text = predefined_text
self.key = key
self.width = width
self.color = color
self.attribute = None
self.text = ''
def update(self, state, attribute = None):
logging.debug('%s: Updating entry \'%s\'',
self.update.__name__,
self.predefined_text)
key_width = self.width - len(self.predefined_text) - 2
custom_text = ''
text_format = '{0}{1:>{2}}'
if isinstance(self.key, str) and self.key == '' and self.predefined_text == '':
self.text = ''
return
# In case of empty predefined text use center alignment
if self.predefined_text == '':
text_format = '{0}{1:^{2}}'
elif isinstance(self.key, str) and self.key == '':
text_format = '{0:^{2}}'
if not isinstance(self.key, str):
custom_text = self.key(state)
if custom_text is None:
custom_text = tr('N/A')
elif self.key != '':
try:
custom_text = '{0}'.format(state[self.key])
except KeyError:
logging.warning('%s: Key not found \'%s\'',
self.update.__name__,
self.key)
custom_text = tr('N/A')
if key_width < 0:
self.text = tr("{0} text doesn't fit").format(self.key)
return
self.text = text_format.format(self.predefined_text,
custom_text,
key_width)
class ListEntry:
def __init__(self, list_generator, width, color = Colors.STANDART):
self.generator = list_generator
self.width = width
self.color = color
self.text = []
def update(self, state):
self.text = []
for item, color in self.generator(state):
if color is None:
color = Colors.STANDART
self.text.append( (item, color) )
| aserebryakov/godville-monitor-console | monitor/core/text_entry.py | Python | gpl-2.0 | 2,446 |
##############################################################
## PKI settings
##############################################################
import os
from django.conf import settings
from django.core.urlresolvers import get_script_prefix
PKI_APP_DIR = os.path.abspath(os.path.dirname(__file__))
# blacklisted CA names
PKI_CA_NAME_BLACKLIST = ('_SELF_SIGNED_CERTIFICATES',)
# base directory for pki storage (should be writable), defaults to PKI_APP_DIR/PKI
PKI_DIR = getattr(settings, 'PKI_DIR', os.path.join(PKI_APP_DIR, 'PKI'))
# path to openssl executable
PKI_OPENSSL_BIN = getattr(settings, 'PKI_OPENSSL_BIN', '/usr/bin/openssl')
# path to generated openssl.conf
PKI_OPENSSL_CONF = getattr(settings, 'PKI_OPENSSL_CONF', os.path.join(PKI_DIR, 'openssl.conf'))
# template name for openssl.conf
PKI_OPENSSL_TEMPLATE = getattr(settings, 'PKI_OPENSSL_TEMPLATE', 'pki/openssl.conf.in')
# jquery url (defaults to pki/jquery-1.3.2.min.js)
JQUERY_URL = getattr(settings, 'JQUERY_URL', 'pki/js/jquery-1.5.min.js')
# logging (TODO: syslog, handlers and formatters)
PKI_LOG = getattr(settings, 'PKI_LOG', os.path.join(PKI_DIR, 'pki.log'))
PKI_LOGLEVEL = getattr(settings, 'PKI_LOGLEVEL', 'debug')
# get other settings directly from settings.py:
ADMIN_MEDIA_PREFIX = getattr(settings, 'ADMIN_MEDIA_PREFIX')
# media url
MEDIA_URL = getattr(settings, 'MEDIA_URL')
# base url: Automatically determined
PKI_BASE_URL = getattr(settings, 'PKI_BASE_URL', get_script_prefix())
# self_signed_serial; The serial a self signed CA starts with. Set to 0 or 0x0 for a random number
PKI_SELF_SIGNED_SERIAL = getattr(settings, 'PKI_SELF_SIGNED_SERIAL', 0x0)
# default key length: The pre-selected key length
PKI_DEFAULT_KEY_LENGTH = getattr(settings, 'PKI_DEFAULT_KEY_LENGTH', 1024)
# default_country: The default country selected (2-letter country code)
PKI_DEFAULT_COUNTRY = getattr(settings, 'PKI_DEFAULT_COUNTRY', 'DE')
# passphrase_min_length: The minimum passphrase length
PKI_PASSPHRASE_MIN_LENGTH = getattr(settings, 'PKI_PASSPHRASE_MIN_LENGTH', 8)
# enable graphviz_support: When True django-pki will render Graphviz PNG's to show relations
PKI_ENABLE_GRAPHVIZ = getattr(settings, 'PKI_ENABLE_GRAPHVIZ', False)
# graphviz direction: From left to right (LR) or top down (TD)
PKI_GRAPHVIZ_DIRECTION = getattr(settings, 'PKI_GRAPHVIZ_DIRECTION', 'LR')
# enable email delivery: Certificates with defined email address can be sent via email
PKI_ENABLE_EMAIL = getattr(settings, 'PKI_ENABLE_EMAIL', False)
| dkerwin/django-pki | pki/settings.py | Python | gpl-2.0 | 2,505 |
# Copyright (c) 2009-2014 Denis Bilenko and gevent contributors. See LICENSE for details.
"""Cooperative low-level networking interface.
This module provides socket operations and some related functions.
The API of the functions and classes matches the API of the corresponding
items in the standard :mod:`socket` module exactly, but the synchronous functions
in this module only block the current greenlet and let the others run.
For convenience, exceptions (like :class:`error <socket.error>` and :class:`timeout <socket.timeout>`)
as well as the constants from the :mod:`socket` module are imported into this module.
"""
# Our import magic sadly makes this warning useless
# pylint: disable=undefined-variable
import sys
from gevent._compat import PY3
from gevent._util import copy_globals
if PY3:
from gevent import _socket3 as _source # python 2: pylint:disable=no-name-in-module
else:
from gevent import _socket2 as _source
# define some things we're expecting to overwrite; each module
# needs to define these
__implements__ = __dns__ = __all__ = __extensions__ = __imports__ = ()
class error(Exception):
errno = None
def getfqdn(*args):
# pylint:disable=unused-argument
raise NotImplementedError()
copy_globals(_source, globals(),
dunder_names_to_keep=('__implements__', '__dns__', '__all__',
'__extensions__', '__imports__', '__socket__'),
cleanup_globs=False)
# The _socket2 and _socket3 don't import things defined in
# __extensions__, to help avoid confusing reference cycles in the
# documentation and to prevent importing from the wrong place, but we
# *do* need to expose them here. (NOTE: This may lead to some sphinx
# warnings like:
# WARNING: missing attribute mentioned in :members: or __all__:
# module gevent._socket2, attribute cancel_wait
# These can be ignored.)
from gevent import _socketcommon
copy_globals(_socketcommon, globals(),
only_names=_socketcommon.__extensions__)
try:
_GLOBAL_DEFAULT_TIMEOUT = __socket__._GLOBAL_DEFAULT_TIMEOUT
except AttributeError:
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
A host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0 if has_ipv6 else AF_INET, SOCK_STREAM):
af, socktype, proto, _, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as ex:
# without exc_clear(), if connect() fails once, the socket is referenced by the frame in exc_info
# and the next bind() fails (see test__socket.TestCreateConnection)
# that does not happen with regular sockets though, because _socket.socket.connect() is a built-in.
# this is similar to "getnameinfo loses a reference" failure in test_socket.py
if not PY3:
sys.exc_clear() # pylint:disable=no-member,useless-suppression
if sock is not None:
sock.close()
err = ex
if err is not None:
raise err # pylint:disable=raising-bad-type
else:
raise error("getaddrinfo returns an empty list")
# This is promised to be in the __all__ of the _source, but, for circularity reasons,
# we implement it in this module. Mostly for documentation purposes, put it
# in the _source too.
_source.create_connection = create_connection
| burzillibus/RobHome | venv/lib/python2.7/site-packages/gevent/socket.py | Python | mit | 4,311 |
"""Categorisation related implementations."""
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from django.conf import settings
from ctrack import models
class Categoriser:
"""
A base categoriser.
Sub-classes need to provide ``fit`` and ``predict`` implentations.
"""
def fit(self):
raise NotImplementedError("Must be subclassed.")
def predict(self, text):
"""Suggest categories based on text."""
raise NotImplementedError("Must be subclassed.")
def to_bytes(self):
"""Serialise to bytes."""
raise NotImplementedError("Must be subclassed.")
@staticmethod
def from_bytes(data):
"""Load object from bytes."""
raise NotImplementedError("Must be subclassed.")
class SklearnCategoriser(Categoriser):
"""
A Scikit Learn based categoriser.
"""
#: The threshold at which to determine only a single category.
#: If prob for a category is less than this then we give multiple
#: suggestions.
THRESH = 0.2
def __init__(self, clf=None):
self._clf = clf
def fit(self):
"""Train a model using existing records."""
data = models.Transaction.objects.filter(category__isnull=False).values_list('description', 'category__name')
data = np.array(data)
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='log', penalty='l2',
alpha=1e-3,
random_state=42)),
])
text_clf = text_clf.fit(data[:, 0], data[:, 1])
self._clf = text_clf
def predict(self, text):
"""Use the model to predict categories."""
if self._clf is None:
self.fit()
try:
dumped_file = settings.CTRACK_CATEGORISER_FILE
except AttributeError:
dumped_file = None
if dumped_file:
with open(dumped_file, 'wb') as fobj:
fobj.write(self.to_bytes())
probs = self._clf.predict_proba([text])
probs = pd.Series(probs[0], index=self._clf.classes_).sort_values()[::-1]
if probs.iloc[0] > self.THRESH:
suggest = probs.iloc[:1]
else:
suggest = probs.ix[probs.cumsum() < self.THRESH]
return suggest
def to_bytes(self):
"""Serialise the model as a byte sequence."""
return pickle.dumps(self._clf)
@staticmethod
def from_bytes(data):
"""Load object from bytes."""
return SklearnCategoriser(pickle.loads(data))
def _init():
try:
dumped_file = settings.CTRACK_CATEGORISER_FILE
except AttributeError:
dumped_file = None
try:
clsname = settings.CTRACK_CATEGORISER
except AttributeError:
clsname = 'SklearnCategoriser'
cls = globals()[clsname]
if dumped_file and os.path.isfile(dumped_file):
categoriser = cls.from_bytes(open(dumped_file, 'rb').read())
else:
categoriser = cls()
return categoriser
categoriser = _init()
| dmkent/cattrack | ctrack/categories.py | Python | mit | 3,425 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
import os
import traceback
from pysollib.mygettext import _
from pysollib.settings import TITLE
from pysollib.settings import VERSION
from pysollib.settings import TOOLKIT, USE_TILE
from pysollib.settings import DEBUG
from pysollib.mfxutil import print_err
if TOOLKIT == 'tk':
if USE_TILE:
from pysollib.tile import ttk
def init_tile(app, top):
# load available themes
d = os.path.join(app.dataloader.dir, 'themes')
if os.path.isdir(d):
top.tk.eval('global auto_path; lappend auto_path {%s}' % d)
for t in os.listdir(d):
if os.path.exists(os.path.join(d, t, 'pkgIndex.tcl')):
try:
top.tk.eval('package require ttk::theme::'+t)
# print 'load theme:', t
except Exception:
traceback.print_exc()
pass
def set_theme(app, top, theme):
# set theme
style = ttk.Style(top)
try:
style.theme_use(theme)
except Exception:
print_err(_('invalid theme name: ') + theme)
style.theme_use(app.opt.default_tile_theme)
def get_font_name(font):
# create font name
# i.e. "helvetica 12" -> ("helvetica", 12, "roman", "normal")
if (TOOLKIT == 'kivy'):
return "helvetica 12"
from six.moves.tkinter_font import Font
font_name = None
try:
f = Font(font=font)
except Exception:
print_err(_('invalid font name: ') + font)
if DEBUG:
traceback.print_exc()
else:
fa = f.actual()
font_name = (fa['family'],
fa['size'],
fa['slant'],
fa['weight'])
return font_name
def base_init_root_window(root, app):
# root.wm_group(root)
root.wm_title(TITLE + ' ' + VERSION)
root.wm_iconname(TITLE + ' ' + VERSION)
# set minsize
sw, sh = (root.winfo_screenwidth(), root.winfo_screenheight())
if sw < 640 or sh < 480:
root.wm_minsize(400, 300)
else:
root.wm_minsize(520, 360)
if TOOLKIT == 'gtk':
pass
if TOOLKIT == 'kivy':
pass
elif USE_TILE:
theme = app.opt.tile_theme
init_tile(app, root)
set_theme(app, root, theme)
else:
pass
class BaseTkSettings:
canvas_padding = (0, 0)
horizontal_toolbar_padding = (0, 0)
vertical_toolbar_padding = (0, 1)
toolbar_button_padding = (2, 2)
toolbar_label_padding = (4, 4)
if USE_TILE:
toolbar_relief = 'flat'
toolbar_borderwidth = 0
else:
toolbar_relief = 'raised'
toolbar_button_relief = 'flat'
toolbar_separator_relief = 'sunken'
toolbar_borderwidth = 1
toolbar_button_borderwidth = 1
| jimsize/PySolFC | pysollib/winsystems/common.py | Python | gpl-3.0 | 3,751 |
import copy
from django.conf import settings
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed, KafkaCheckpointEventHandler
from corehq.elastic import get_es_new
from corehq.pillows.mappings.reportcase_mapping import REPORT_CASE_INDEX_INFO
from pillowtop.checkpoints.manager import get_checkpoint_for_elasticsearch_pillow
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors import ElasticProcessor
from pillowtop.reindexer.change_providers.case import get_domain_case_change_provider
from pillowtop.reindexer.reindexer import ElasticPillowReindexer, ReindexerFactory
from .base import convert_property_dict, is_couch_change_for_sql_domain
def report_case_filter(doc_dict):
"""
:return: True to filter out doc
"""
# full indexing is only enabled for select domains on an opt-in basis
return doc_dict.get('domain', None) not in getattr(settings, 'ES_CASE_FULL_INDEX_DOMAINS', [])
def transform_case_to_report_es(doc_dict):
doc_ret = copy.deepcopy(doc_dict)
convert_property_dict(
doc_ret,
REPORT_CASE_INDEX_INFO.mapping,
override_root_keys=['_id', 'doc_type', '_rev', '#export_tag']
)
return doc_ret
def get_case_to_report_es_processor():
return ElasticProcessor(
elasticsearch=get_es_new(),
index_info=REPORT_CASE_INDEX_INFO,
doc_prep_fn=transform_case_to_report_es,
doc_filter_fn=report_case_filter,
change_filter_fn=is_couch_change_for_sql_domain
)
def get_report_case_to_elasticsearch_pillow(pillow_id='ReportCaseToElasticsearchPillow',
num_processes=1, process_num=0, **kwargs):
# todo; To remove after full rollout of https://github.com/dimagi/commcare-hq/pull/21329/
assert pillow_id == 'ReportCaseToElasticsearchPillow', 'Pillow ID is not allowed to change'
checkpoint = get_checkpoint_for_elasticsearch_pillow(pillow_id, REPORT_CASE_INDEX_INFO, topics.CASE_TOPICS)
form_processor = ElasticProcessor(
elasticsearch=get_es_new(),
index_info=REPORT_CASE_INDEX_INFO,
doc_prep_fn=transform_case_to_report_es,
doc_filter_fn=report_case_filter,
)
kafka_change_feed = KafkaChangeFeed(
topics=topics.CASE_TOPICS, client_id='report-cases-to-es', num_processes=num_processes,
process_num=process_num
)
return ConstructedPillow(
name=pillow_id,
checkpoint=checkpoint,
change_feed=kafka_change_feed,
processor=form_processor,
change_processed_event_handler=KafkaCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=100, change_feed=kafka_change_feed
),
)
class ReportCaseReindexerFactory(ReindexerFactory):
slug = 'report-case'
arg_contributors = [
ReindexerFactory.elastic_reindexer_args,
]
def build(self):
"""Returns a reindexer that will only reindex data from enabled domains
"""
domains = getattr(settings, 'ES_CASE_FULL_INDEX_DOMAINS', [])
change_provider = get_domain_case_change_provider(domains=domains)
return ElasticPillowReindexer(
pillow_or_processor=get_report_case_to_elasticsearch_pillow(),
change_provider=change_provider,
elasticsearch=get_es_new(),
index_info=REPORT_CASE_INDEX_INFO,
**self.options
)
| dimagi/commcare-hq | corehq/pillows/reportcase.py | Python | bsd-3-clause | 3,461 |
# Copyright (C) 2012 University of Southern California
# This software is licensed under the GPLv3 license, included in
# ./GPLv3-LICENSE.txt in the source distribution
from base import Testbed
import logging
import socket
log = logging.getLogger(__name__)
class DesktopExperiment(Testbed):
def __init__(self, **hint):
Testbed.__init__(self)
self._store = {}
if 'node' in hint:
self._store['node'] = hint['node']
def setNodeName(self, nodename):
self._store['node'] = nodename
self.loadTopoGraph()
self.loadControlInfo()
self.loadIfConfig()
def getExperimentDir(self):
return "/tmp"
def getFQDN(self):
return self.nodename
def getServer(self, FQDN=False):
return Testbed.getServer(self, FQDN=False)
def toControlPlaneNodeName(self, nodename):
return nodename
""" Functions that actually load the data into our _store """
def loadEID(self):
""" Load the nickname file to get the node, experiment and project names """
try:
if 'node' not in self._store:
self._store['node'] = socket.gethostname()
except Exception:
log.exception("Can't load host information")
def loadControlInfo(self):
""" Load the control IP address and IF name """
try:
self._store.update(controlip='?', controlif='?')
self._store['controlip'] = socket.gethostbyname(self.nodename)
self._store['controlif'] = self.getInterfaceInfo(self.controlip).name
except:
log.exception("Can't load control interface info")
def loadIfConfig(self):
""" Load all of the interface info """
try:
iflist = []
iflist.append(self.getIfconfigData(self.controlip))
self._store['iflist'] = iflist
except:
log.exception("Can't load interface config data")
def loadTopoGraph(self):
import networkx as nx
graph = nx.Graph()
graph.add_node(self.nodename, links={})
self._store['topograph'] = graph
# Small test if running this file directly
if __name__ == "__main__":
logging.basicConfig()
x = DesktopExperiment()
print 'Node Name:', x.nodename
print 'FQDN:', x.fqdn
print 'Control IP:', x.controlip
print 'Control IF:', x.controlif
print 'Server Node:', x.getServer()
iplist = x.getLocalIPList()
print 'Exp. Addresses: %s' % iplist
print 'Exp. Interface info:'
for ip in iplist:
print '\t%s: %s' % (ip, x.getInterfaceInfo(ip))
y = DesktopExperiment(node='tau')
print 'Configured Node Name:', y.nodename
| deter-project/magi | magi/testbed/desktop.py | Python | gpl-2.0 | 2,768 |
# Generated by Django 2.2.10 on 2020-02-15 22:05
import diventi.accounts.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0247_auto_20200215_1909'),
]
operations = [
migrations.AlterModelManagers(
name='diventiuser',
managers=[
('objects', diventi.accounts.models.DiventiUserManager()),
],
),
]
| flavoi/diventi | diventi/accounts/migrations/0248_auto_20200215_2305.py | Python | apache-2.0 | 453 |
"""
Module that enables the use of neural networks with NowTrade.
"""
import cPickle
import numpy as np
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.supervised.trainers.backprop import BackpropTrainer
from nowtrade import logger
# Networks
FEED_FORWARD_NETWORK = 0
RECURRENT_NETWORK = 1
# Datasets
SUPERVISED_DATASET = 0
SEQUENTIAL_DATASET = 1
CLASSIFICATION_DATASET = 2
SEQUENTIAL_CLASSIFICATION_DATASET = 3
IMPORTANCE_DATASET = 4
# Trainers
BACKPROP_TRAINER = 0
RPROP_TRAINER = 1
def load(network, dataset=None):
"""
Load a previously pickled neural network.
"""
network = cPickle.loads(network)
if dataset:
network.build_network(dataset, new=False)
return network
def load_from_file(filename, dataset=None):
"""
Load a neural network from a previous one saved to file.
"""
file_handler = open(filename, 'rb')
network = cPickle.load(file_handler)
file_handler.close()
if dataset:
network.build_network(dataset, new=False)
return network
class InvalidNetworkType(Exception):
"""
Exception raised when an invalid network type is specified.
"""
pass
class InvalidTrainerType(Exception):
"""
Exception raised when an invalid trainer type is specified.
"""
pass
class InvalidNetworkDatasetType(Exception):
"""
Exception raised when an invalid network dataset type is specified.
"""
pass
class InvalidDataset(Exception):
"""
Exception raised when a invalid dataset is specified.
"""
pass
class NeuralNetwork(object):
"""
The neural network class does all the heavy lifting to incorporate pybrain
neural networks into the NowTrade ecosystem.
"""
def __init__(self, train_data, prediction_data, network_type=FEED_FORWARD_NETWORK,
network_dataset_type=SUPERVISED_DATASET,
trainer_type=BACKPROP_TRAINER):
self.train_data = train_data
self.prediction_data = prediction_data
self.network_type = network_type
self.network_dataset_type = network_dataset_type
self.trainer_type = trainer_type
self.network = None
self.network_dataset = None
self.dataset = None
self.trainer = None
self.trained_iterations = 0
self.momentum = None
self.learning_rate = None
self.hidden_layers = None
self.prediction_window = None
self.logger = logger.Logger(self.__class__.__name__)
self.logger.info('train_data: %s prediction_data: %s, network_type: %s, \
network_dataset_type: %s, trainer_type: %s'
%(train_data, prediction_data, network_type, \
network_dataset_type, trainer_type))
def save(self):
"""
Returns the pickled trained/tested neural network as a string.
"""
return cPickle.dumps(self)
def save_to_file(self, filename):
"""
Saves a neural network to file for later use.
Look into pybrain.datasets.supervised.SupervisedDataSet.saveToFile()
http://pybrain.org/docs/api/datasets/superviseddataset.html
"""
file_handler = open(filename, 'wb')
cPickle.dump(self, file_handler)
file_handler.close()
def build_network(self, dataset, new=True, **kwargs):
"""
Builds a neural network using the dataset provided.
Expected keyword args:
- 'hidden_layers'
- 'prediction_window'
- 'learning_rate'
- 'momentum'
"""
self.hidden_layers = kwargs.get('hidden_layers', 3)
self.prediction_window = kwargs.get('prediction_window', 1)
self.learning_rate = kwargs.get('learning_rate', 0.1)
self.momentum = kwargs.get('momentum', 0.01)
if not new:
self.network.sorted = False
self.network.sortModules()
if self.network_dataset_type == SUPERVISED_DATASET:
self.ready_supervised_dataset(dataset)
else: raise InvalidNetworkDatasetType()
else:
if self.network_type == FEED_FORWARD_NETWORK:
self.network = buildNetwork(len(self.train_data), self.hidden_layers, 1)
else: raise InvalidNetworkType()
if self.network_dataset_type == SUPERVISED_DATASET:
self.ready_supervised_dataset(dataset)
else: raise InvalidNetworkDatasetType()
if self.trainer_type == BACKPROP_TRAINER:
self.trainer = BackpropTrainer(self.network,
learningrate=self.learning_rate,
momentum=self.momentum,
verbose=True)
self.trainer.setData(self.network_dataset)
else: raise InvalidTrainerType()
def ready_supervised_dataset(self, dataset):
"""
Ready the supervised dataset for training.
@TODO: Need to randomize the data being fed to the network.
See randomBatches() here: http://pybrain.org/docs/api/datasets/superviseddataset.html
"""
self.network_dataset = SupervisedDataSet(len(self.train_data), 1)
# Currently only supports log function for normalizing data
training_values = np.log(dataset.data_frame[self.train_data])
results = np.log(dataset.data_frame[self.prediction_data].shift(-self.prediction_window))
training_values['PREDICTION_%s' %self.prediction_data[0]] = results
training_values = training_values.dropna()
for _, row_data in enumerate(training_values.iterrows()):
_, data = row_data
sample = list(data[:-1])
result = [data[-1]]
self.network_dataset.addSample(sample, result)
def train(self, cycles=1):
"""
Trains the network the number of iteration specified in the cycles parameter.
"""
for _ in range(cycles):
res = self.trainer.train()
self.trained_iterations += 1
return res
def train_until_convergence(self, max_cycles=1000, continue_cycles=10,
validation_proportion=0.25):
"""
Wrapper around the pybrain BackpropTrainer trainUntilConvergence method.
@see: http://pybrain.org/docs/api/supervised/trainers.html
"""
self.trainer = \
self.trainer.trainUntilConvergence(maxEpochs=max_cycles,
continueEpochs=continue_cycles,
validationProportion=validation_proportion)
def _activate(self, data):
"""
Activates the network using the data specified.
Returns the network's prediction.
"""
return self.network.activate(data)[0]
def activate_all(self, data_frame):
"""
Activates the network for all values in the dataframe specified.
"""
dataframe = np.log(data_frame[self.train_data])
res = []
for _, row_data in enumerate(dataframe.iterrows()):
_, data = row_data
sample = list(data)
res.append(self._activate(sample))
return np.exp(res)
| edouardpoitras/NowTrade | nowtrade/neural_network.py | Python | mit | 7,380 |
import datetime
import pytz
import requests
from flask import flash
from fernet import app
API_URL = app.config['TEKNOLOGKORENSE_API_URL']
AUTH = requests.auth.HTTPBasicAuth(
app.config['TEKNOLOGKORENSE_API_USERNAME'],
app.config['TEKNOLOGKORENSE_API_PASSWORD']
)
def make_request(requests_func, url, data=None, files=None):
"""Wrap a requests function.
Returns response if connection was successful and response is ok, flashes
an error and returns None otherwise.
"""
try:
r = requests_func(url, json=data, files=files, auth=AUTH)
except requests.exceptions.ConnectionError:
flash('Failed to connect to teknologkoren.se.', 'error')
return None
except requests.exceptions.Timeout:
flash('Connection to teknologkoren.se timed out.', 'error')
return None
if not r.ok:
if r.status_code == 404:
flash('Whatever you were trying access does not exist.', 'error')
return None
else:
flash('Something went wrong, try again or ask webmaster for help.',
'error')
# log error
print(('API error: request = {}, url = {}, data = {}, files = {}'
', status_code = {}, response = {}'
).format(requests_func, url, data, files, r.status_code,
r.json()))
return None
return r
def make_get(url):
r = make_request(requests.get, url)
return r
def make_post(url, data=None, files=None):
return make_request(requests.post, url, data, files)
def make_put(url, data=None, files=None):
return make_request(requests.put, url, data, files)
def make_delete(url):
return make_request(requests.delete, url)
def get_all_posts():
return make_get("{}/posts".format(API_URL))
def get_post(post_id):
return make_get("{}/posts/{}".format(API_URL, post_id))
def new_post(title, content_sv, content_en, readmore_sv, readmore_en,
published, image=None):
"""Upload a new post.
`image` is not the actual image, it is the filename returned by an
upload via `/api/images`.
"""
data = {
'title': title,
'content_sv': content_sv,
'content_en': content_en or None,
'readmore_sv': readmore_sv or None,
'readmore_en': readmore_en or None,
'published': published,
'image': image,
}
return make_post("{}/posts".format(API_URL), data)
def update_post(post_id, title, content_sv, content_en, readmore_sv,
readmore_en, published, image=None):
"""Update a post.
`image` is not the actual image, it is the filename returned by an
upload via `/api/images`.
"""
data = {
'title': title,
'content_sv': content_sv,
'content_en': content_en or None,
'readmore_sv': readmore_sv or None,
'readmore_en': readmore_en or None,
'published': published,
'image': image,
}
return make_put("{}/posts/{}".format(API_URL, post_id), data)
def delete_post(post_id):
return make_delete("{}/posts/{}".format(API_URL, post_id))
def get_all_events():
return make_get("{}/events".format(API_URL))
def get_event(event_id):
"""Get an event.
Returns modified response, converting iso-format date string to
datetime.
"""
r = make_get("{}/events/{}".format(API_URL, event_id))
if not r:
return r
d = r.json()
tz = pytz.timezone('Europe/Stockholm')
utc_start_time = datetime.datetime.strptime(d['start_time'],
'%Y-%m-%dT%H:%M')
local_time = pytz.utc.localize(utc_start_time, is_dst=None).astimezone(tz)
d['start_time'] = local_time
return d
def new_event(title, content_sv, content_en, readmore_sv, readmore_en,
published, start_time, location, image=None):
"""Upload a new event.
`image` is not the actual image, it is the filename returned by an
upload via `/api/images`.
"""
# User enters local time, convert to utc
tz = pytz.timezone('Europe/Stockholm')
utc_start_time = tz.localize(start_time, is_dst=None).astimezone(pytz.utc)
utc_start_time_str = utc_start_time.strftime('%Y-%m-%dT%H:%M')
data = {
'title': title,
'content_sv': content_sv,
'content_en': content_en or None,
'readmore_sv': readmore_sv or None,
'readmore_en': readmore_en or None,
'published': published,
'start_time': utc_start_time_str,
'location': location,
'image': image,
}
return make_post("{}/events".format(API_URL), data)
def update_event(event_id, title, content_sv, content_en, readmore_sv,
readmore_en, published, start_time, location, image=None):
"""Update an event.
`image` is not the actual image, it is the filename returned by an
upload via `/api/images`.
"""
# User enters local time, convert to utc
tz = pytz.timezone('Europe/Stockholm')
utc_start_time = tz.localize(start_time, is_dst=None).astimezone(pytz.utc)
utc_start_time_str = utc_start_time.strftime('%Y-%m-%dT%H:%M')
data = {
'title': title,
'content_sv': content_sv,
'content_en': content_en or None,
'readmore_sv': readmore_sv or None,
'readmore_en': readmore_en or None,
'published': published,
'start_time': utc_start_time_str,
'location': location,
'image': image,
}
return make_put("{}/events/{}".format(API_URL, event_id), data)
def delete_event(event_id):
return make_delete("{}/events/{}".format(API_URL, event_id))
def upload_image(image):
files = {'image': (image.filename, image.read())}
return make_post("{}/images".format(API_URL), files=files)
def get_all_contacts():
return make_get("{}/contact".format(API_URL))
def get_contact(contact_id):
return make_get("{}/contact/{}".format(API_URL))
def new_contact(title, first_name, last_name, email, phone, weight):
data = {
'title': title,
'first_name': first_name,
'last_name': last_name,
'email': email,
'phone': phone,
'weight': weight,
}
return make_post("{}/contact".format(API_URL), data)
def delete_contact(contact_id):
return make_delete("{}/contact/{}".format(API_URL, contact_id))
| teknologkoren/fernet | fernet/teknologkoren_se.py | Python | mpl-2.0 | 6,430 |
# Copyright (C) 2008-2010 INRIA - EDF R&D
# Author: Damien Garaud
#
# This file is part of the PuppetMaster project. It checks the module
# 'network'.
#
# This script is free; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
import sys
import socket
import unittest
from puppetmaster import network
test_method_name = ['testInit', 'testGetValue', 'testUsedMemory',
'testAvailableHost', 'testLaunchCommand']
class NetworkTestCase(unittest.TestCase):
def __init__(self, methodName='runTest', host_file = None,
forced_ssh_config = False):
unittest.TestCase.__init__(self, methodName)
self.host_file = host_file
self.forced_ssh_config = forced_ssh_config
# If there is file.
if self.host_file == None:
self.is_file = False
else:
self.is_file = True
def setUp(self):
import random
if self.is_file:
self.net = network.Network(self.host_file, self.forced_ssh_config)
# Just local host.
self.net_local = network.Network()
# The command which will be launched.
self.command = "echo 'Hello World!'"
def tearDown(self):
pass
def testInit(self):
# Checks the name and the number of cpu.
# For the local host.
self.assertTrue(self.net_local.hosts[0].name == socket.gethostname())
self.assertTrue(self.net_local.GetNhost() == 1)
self.assertTrue(self.net_local.hosts[0].connection)
# Is there a file?
if self.is_file:
self.assertTrue(self.net.GetNhost() > 0)
self.assertTrue(self.net.GetConnectedHostNumber() > 0)
# Wrong argument.
# An 'network' instance takes a list 'host' instance, list of string
# or a file.
self.assertRaises(ValueError, network.Network, 1)
self.assertRaises(ValueError, network.Network, [])
self.assertRaises(ValueError, network.Network, [1,2])
self.assertRaises(ValueError, network.Network, 'no_file')
def testGetValue(self):
# For the local host.
host_name = self.net_local.GetHostNames()
proc_num = self.net_local.GetProcessorNumber()
connected_num = self.net_local.GetConnectedHostNumber()
# 'host_name' must be a list of string.
self.assertTrue(isinstance(host_name, list))
self.assertTrue(isinstance(host_name[0], str))
# 'proc_num' must be a list of tuples (hostname, Nproc)
self.assertTrue(isinstance(proc_num, list))
self.assertTrue(isinstance(proc_num[0], tuple))
self.assertTrue(isinstance(proc_num[0][0], str))
self.assertTrue(isinstance(proc_num[0][1], int))
# 'connected_num' must be an integer greater than 0.
self.assertTrue(isinstance(connected_num, int))
# Checks size.
self.assertTrue(len(host_name) > 0)
self.assertTrue(len(proc_num[0]) == 2)
self.assertTrue(connected_num > 0)
# For a list of hosts.
if self.is_file:
host_name = self.net.GetHostNames()
proc_num = self.net.GetProcessorNumber()
connected_num = self.net.GetConnectedHostNumber()
# 'host_name' must be a list of string.
self.assertTrue(isinstance(host_name, list))
self.assertTrue(isinstance(host_name[0], str))
# 'proc_num' must be a list of tuples (hostname, Nproc)
self.assertTrue(isinstance(proc_num, list))
self.assertTrue(isinstance(proc_num[0], tuple))
self.assertTrue(isinstance(proc_num[0][0], str))
self.assertTrue(isinstance(proc_num[0][1], int))
# 'connected_num' must be an integer greater than 0.
self.assertTrue(isinstance(connected_num, int))
# Checks size.
self.assertTrue(len(host_name) > 0)
self.assertTrue(len(proc_num[0]) == 2)
self.assertTrue(connected_num > 0)
def testUsedMemory(self):
# Gets used memory ('free' Unix command).
# For the local host.
used_mem = self.net_local.GetUsedMemory()
# 'used_mem' must be a list of tuple (hostname, value).
self.assertTrue(isinstance(used_mem, list))
self.assertTrue(isinstance(used_mem[0], tuple))
self.assertTrue(isinstance(used_mem[0][0], str))
# Checks size.
self.assertTrue(len(used_mem) == 1)
self.assertTrue(len(used_mem[0]) == 2)
# For a list of hosts.
if self.is_file:
used_mem = self.net.GetUsedMemory()
# 'used_mem' must be a list of tuple (hostname, value).
self.assertTrue(isinstance(used_mem, list))
self.assertTrue(isinstance(used_mem[0], tuple))
self.assertTrue(isinstance(used_mem[0][0], str))
# Checks size.
self.assertTrue(len(used_mem) >= 1)
self.assertTrue(len(used_mem[0]) == 2)
def testAvailableHost(self):
# Gets available hosts (used 'uptime' Unix command).
# For the local host.
available_host = self.net_local.GetAvailableHosts()
# 'available_host' must be a list of tuple (hostname, available_cpu).
self.assertTrue(isinstance(available_host, list))
if len(available_host) > 0:
self.assertTrue(isinstance(available_host[0], tuple))
self.assertTrue(isinstance(available_host[0][0], str))
self.assertTrue(isinstance(available_host[0][1], int))
# For a list of hosts.
if self.is_file:
available_host = self.net.GetAvailableHosts()
# 'available_host' must be a list of tuple
# (hostname, available_cpu).
self.assertTrue(isinstance(available_host, list))
if len(available_host) > 0:
self.assertTrue(isinstance(available_host[0], tuple))
self.assertTrue(isinstance(available_host[0][0], str))
self.assertTrue(isinstance(available_host[0][1], int))
def testLaunchCommand(self):
import random
# For the local host.
status = self.net_local.LaunchInt(self.command)
statusout = self.net_local.LaunchFG(self.command)
popen4_instance = self.net_local.LaunchBG(self.command)
subproc = self.net_local.LaunchSubProcess(self.command)
wait_return = self.net_local.LaunchWait(self.command, 2., 0.2)
# Checks type.
self.assertTrue(isinstance(status, int))
self.assertTrue(isinstance(statusout, tuple))
self.assertTrue(isinstance(statusout[0], int))
self.assertTrue(isinstance(wait_return, tuple))
# The status must be '0'.
self.assertTrue(status == 0)
self.assertTrue(statusout[0] == 0)
self.assertTrue(popen4_instance.wait() == 0)
self.assertTrue(subproc.wait() == 0)
self.assertTrue(wait_return[0] == 0)
# For a random host.
if self.is_file:
index = random.randint(0, self.net.GetNhost() - 1)
random_host = self.net.hosts[index]
# Launches the command.
status = self.net.LaunchInt(self.command + ' 2>/dev/null',
random_host)
statusout = self.net.LaunchFG(self.command, random_host)
popen4_instance = self.net.LaunchBG(self.command, random_host)
subproc = self.net.LaunchSubProcess(self.command, random_host)
wait_return = self.net.LaunchWait(self.command, 2., 0.2,
random_host)
# Checks type.
self.assertTrue(isinstance(status, int))
self.assertTrue(isinstance(statusout, tuple))
self.assertTrue(isinstance(statusout[0], int))
self.assertTrue(isinstance(wait_return, tuple))
# The status must be '0' if the connection dit not fail.
if random_host.connection:
self.assertTrue(status == 0)
self.assertTrue(statusout[0] == 0)
self.assertTrue(popen4_instance.wait() == 0)
self.assertTrue(subproc.wait() == 0)
self.assertTrue(wait_return[0] == 0)
else:
self.assertTrue(status != 0)
self.assertTrue(statusout[0] != 0)
self.assertTrue(popen4_instance.wait() != 0)
self.assertTrue(subproc.wait() != 0)
self.assertTrue(wait_return[0] != 0)
if __name__ == '__main__':
unittest.main()
| garaud/puppetmaster | test/network_test.py | Python | gpl-2.0 | 8,973 |
import functools
from . import (
constants,
utils,
)
class Card():
def __init__(self, kind=None, strength=None, value=None, verbose=None, **kwargs):
if kind is None:
raise(TypeError("Missing required 'kind' argument."))
self.kind = kind
self.strength = strength
self.value = value
self.verbose = verbose if verbose is not None else kind
super().__init__(**kwargs)
def __valid_comparision(self, arg):
return hasattr(arg, "kind") and hasattr(arg, "strength")
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return NotImplemented
if self.strength is not None:
if value.strength is not None:
return self.strength < value.strength
else:
return False
elif value.strength is not None:
return True
return self.kind < value.kind
def __str__(self):
return self.kind
class SimpleCard(Card):
def __init__(self, colour=None, kind=None, strength=None, **kwargs):
if colour is None:
raise(TypeError("Missing required 'colour' argument."))
self.colour = colour
if kind is None:
if strength is not None:
kind = str(strength)
super().__init__(kind=kind, strength=strength, **kwargs)
def __valid_comparision(self, arg):
if super()._valid_comparision(arg):
if hasattr(arg, "colour") and (arg.colour is not None):
if arg.strength is not None:
return True
return False
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return super().__lt__(value)
if self.strength < value.strength:
return True
if self.strength == value.strength:
return self.colour < value.colour
return False
def __eq__(self, value):
if not self._valid_comparision(value):
return False
if (self.strength == value.strength) and (self.colour == value.colour):
return True
def __str__(self):
return self.kind + self.colour[0]
class MahJongg(Card):
def __init__(self):
super().__init__(kind='1', strength=1)
class Dragon(Card):
def __init__(self):
super().__init__(kind='R', value=25, verbose="Dragon")
class Pheonix(Card):
def __init__(self):
super().__init__(kind='P', value=-25, verbose="Pheonix")
class Dog(Card):
def __init__(self):
super().__init__(kind="D", verbose="Dog")
| julka2010/games | games/tichu/cards.py | Python | mpl-2.0 | 2,702 |
from cloudinit import cloud
from cloudinit.config import cc_ca_certs
from cloudinit import helpers
from cloudinit import util
from ..helpers import TestCase
import logging
import shutil
import tempfile
import unittest
try:
from unittest import mock
except ImportError:
import mock
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
class TestNoConfig(unittest.TestCase):
def setUp(self):
super(TestNoConfig, self).setUp()
self.name = "ca-certs"
self.cloud_init = None
self.log = logging.getLogger("TestNoConfig")
self.args = []
def test_no_config(self):
"""
Test that nothing is done if no ca-certs configuration is provided.
"""
config = util.get_builtin_cfg()
with ExitStack() as mocks:
util_mock = mocks.enter_context(
mock.patch.object(util, 'write_file'))
certs_mock = mocks.enter_context(
mock.patch.object(cc_ca_certs, 'update_ca_certs'))
cc_ca_certs.handle(self.name, config, self.cloud_init, self.log,
self.args)
self.assertEqual(util_mock.call_count, 0)
self.assertEqual(certs_mock.call_count, 0)
class TestConfig(TestCase):
def setUp(self):
super(TestConfig, self).setUp()
self.name = "ca-certs"
self.paths = None
self.cloud = cloud.Cloud(None, self.paths, None, None, None)
self.log = logging.getLogger("TestNoConfig")
self.args = []
self.mocks = ExitStack()
self.addCleanup(self.mocks.close)
# Mock out the functions that actually modify the system
self.mock_add = self.mocks.enter_context(
mock.patch.object(cc_ca_certs, 'add_ca_certs'))
self.mock_update = self.mocks.enter_context(
mock.patch.object(cc_ca_certs, 'update_ca_certs'))
self.mock_remove = self.mocks.enter_context(
mock.patch.object(cc_ca_certs, 'remove_default_ca_certs'))
def test_no_trusted_list(self):
"""
Test that no certificates are written if the 'trusted' key is not
present.
"""
config = {"ca-certs": {}}
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
self.assertEqual(self.mock_add.call_count, 0)
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
def test_empty_trusted_list(self):
"""Test that no certificate are written if 'trusted' list is empty."""
config = {"ca-certs": {"trusted": []}}
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
self.assertEqual(self.mock_add.call_count, 0)
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
def test_single_trusted(self):
"""Test that a single cert gets passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1"]}}
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
self.mock_add.assert_called_once_with(['CERT1'])
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
def test_multiple_trusted(self):
"""Test that multiple certs get passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
self.mock_add.assert_called_once_with(['CERT1', 'CERT2'])
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
def test_remove_default_ca_certs(self):
"""Test remove_defaults works as expected."""
config = {"ca-certs": {"remove-defaults": True}}
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
self.assertEqual(self.mock_add.call_count, 0)
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 1)
def test_no_remove_defaults_if_false(self):
"""Test remove_defaults is not called when config value is False."""
config = {"ca-certs": {"remove-defaults": False}}
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
self.assertEqual(self.mock_add.call_count, 0)
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
def test_correct_order_for_remove_then_add(self):
"""Test remove_defaults is not called when config value is False."""
config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}}
cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
self.mock_add.assert_called_once_with(['CERT1'])
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 1)
class TestAddCaCerts(TestCase):
def setUp(self):
super(TestAddCaCerts, self).setUp()
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
self.paths = helpers.Paths({
'cloud_dir': tmpdir,
})
def test_no_certs_in_list(self):
"""Test that no certificate are written if not provided."""
with mock.patch.object(util, 'write_file') as mockobj:
cc_ca_certs.add_ca_certs([])
self.assertEqual(mockobj.call_count, 0)
def test_single_cert_trailing_cr(self):
"""Test adding a single certificate to the trusted CAs
when existing ca-certificates has trailing newline"""
cert = "CERT1\nLINE2\nLINE3"
ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n"
expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n"
with ExitStack() as mocks:
mock_write = mocks.enter_context(
mock.patch.object(util, 'write_file'))
mock_load = mocks.enter_context(
mock.patch.object(util, 'load_file',
return_value=ca_certs_content))
cc_ca_certs.add_ca_certs([cert])
mock_write.assert_has_calls([
mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
cert, mode=0o644),
mock.call("/etc/ca-certificates.conf", expected, omode="wb")])
mock_load.assert_called_once_with("/etc/ca-certificates.conf")
def test_single_cert_no_trailing_cr(self):
"""Test adding a single certificate to the trusted CAs
when existing ca-certificates has no trailing newline"""
cert = "CERT1\nLINE2\nLINE3"
ca_certs_content = "line1\nline2\nline3"
with ExitStack() as mocks:
mock_write = mocks.enter_context(
mock.patch.object(util, 'write_file'))
mock_load = mocks.enter_context(
mock.patch.object(util, 'load_file',
return_value=ca_certs_content))
cc_ca_certs.add_ca_certs([cert])
mock_write.assert_has_calls([
mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
cert, mode=0o644),
mock.call("/etc/ca-certificates.conf",
"%s\n%s\n" % (ca_certs_content,
"cloud-init-ca-certs.crt"),
omode="wb")])
mock_load.assert_called_once_with("/etc/ca-certificates.conf")
def test_multiple_certs(self):
"""Test adding multiple certificates to the trusted CAs."""
certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
expected_cert_file = "\n".join(certs)
ca_certs_content = "line1\nline2\nline3"
with ExitStack() as mocks:
mock_write = mocks.enter_context(
mock.patch.object(util, 'write_file'))
mock_load = mocks.enter_context(
mock.patch.object(util, 'load_file',
return_value=ca_certs_content))
cc_ca_certs.add_ca_certs(certs)
mock_write.assert_has_calls([
mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
expected_cert_file, mode=0o644),
mock.call("/etc/ca-certificates.conf",
"%s\n%s\n" % (ca_certs_content,
"cloud-init-ca-certs.crt"),
omode='wb')])
mock_load.assert_called_once_with("/etc/ca-certificates.conf")
class TestUpdateCaCerts(unittest.TestCase):
def test_commands(self):
with mock.patch.object(util, 'subp') as mockobj:
cc_ca_certs.update_ca_certs()
mockobj.assert_called_once_with(
["update-ca-certificates"], capture=False)
class TestRemoveDefaultCaCerts(TestCase):
def setUp(self):
super(TestRemoveDefaultCaCerts, self).setUp()
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
self.paths = helpers.Paths({
'cloud_dir': tmpdir,
})
def test_commands(self):
with ExitStack() as mocks:
mock_delete = mocks.enter_context(
mock.patch.object(util, 'delete_dir_contents'))
mock_write = mocks.enter_context(
mock.patch.object(util, 'write_file'))
mock_subp = mocks.enter_context(mock.patch.object(util, 'subp'))
cc_ca_certs.remove_default_ca_certs()
mock_delete.assert_has_calls([
mock.call("/usr/share/ca-certificates/"),
mock.call("/etc/ssl/certs/")])
mock_write.assert_called_once_with(
"/etc/ca-certificates.conf", "", mode=0o644)
mock_subp.assert_called_once_with(
('debconf-set-selections', '-'),
"ca-certificates ca-certificates/trust_new_crts select no")
| prometheanfire/cloud-init | tests/unittests/test_handler/test_handler_ca_certs.py | Python | gpl-3.0 | 10,161 |
"""
NOTE: this code is a legacy from the early days of the application,
and currently not used.
"""
from __future__ import annotations
import json
from flask import Blueprint, make_response, request
from flask_login import current_user, login_required
from abilian.core.extensions import db
from abilian.core.models.subjects import Group, User
from abilian.core.util import get_params
from .models import Message
__all__ = ["restapi"]
restapi = Blueprint("restapi", __name__, url_prefix="/api")
# Util
def make_json_response(obj, response_code=200):
if isinstance(obj, list):
obj = [x.to_dict() if hasattr(x, "to_dict") else x for x in obj]
if hasattr(obj, "to_json"):
response = make_response(obj.to_json(), response_code)
elif hasattr(obj, "to_dict"):
response = make_response(json.dumps(obj.to_dict()), response_code)
else:
response = make_response(json.dumps(obj), response_code)
response.mimetype = "application/json"
return response
#
# Users
#
# [POST] /api/users/USER_ID Create User Profile
@restapi.route("/users", methods=["POST"])
@login_required
def create_user():
d = get_params(User.__editable__)
user = User(**d)
db.session.add(user)
db.session.commit()
return make_json_response(user, 201)
# [GET] /api/users List Users in Your Organization
@restapi.route("/users")
@login_required
def list_users():
# l = [ u.to_dict() for u in User.query.all() ]
users = list(User.query.all())
return make_json_response(users)
# [GET] /api/users/USER_ID View User Profile
@restapi.route("/users/<int:user_id>")
@login_required
def get_user(user_id):
user = User.query.get(user_id)
return make_json_response(user)
# [GET] /api/users/USER_ID/messages View Stream of Messages by User
@restapi.route("/users/<int:user_id>/messages")
@login_required
def user_stream(user_id):
user = User.query.get(user_id)
messages = Message.query.by_creator(user).all()
# messages = list(user.messages)
return make_json_response(messages)
# [PUT] /api/users/USER_ID Update User Profile
@restapi.route("/users/<int:user_id>", methods=["PUT"])
@login_required
def update_user(user_id):
user = User.query.get(user_id)
d = get_params(User.__editable__)
user.update(d)
db.session.commit()
return make_json_response(user)
# [DELETE] /api/users/USER_ID Deactivate a User
@restapi.route("/users/<int:user_id>", methods=["DELETE"])
@login_required
def delete_user(user_id):
user = User.query.get(user_id)
db.session.delete(user)
db.session.commit()
return make_response("", 204)
#
# Social graph: following
#
# [GET] /api/users/USER_ID/followers View Followers of User
@restapi.route("/users/<int:user_id>/followers")
@login_required
def get_followers(user_id):
user = User.query.get(user_id)
followers = list(user.followers)
return make_json_response(followers)
# [GET] /api/users/USER_ID/followees View List of Users Being Followed
@restapi.route("/users/<int:user_id>/followees")
@login_required
def get_followees(user_id):
user = User.query.get(user_id)
followees = list(user.followees)
return make_json_response(followees)
# [POST] /api/users/USER_ID/followers Follow a User
@restapi.route("/users/<int:user_id>/followers", methods=["POST"])
@login_required
def follow(user_id):
user = User.query.get(user_id)
current_user.follow(user)
db.session.commit()
return make_json_response("", 204)
# [DELETE] /api/users/USER_ID/followers/CONTACT_USER_ID Unfollow a User
@restapi.route(
"/users/<int:user_id>/followers/<int:contact_user_id>", methods=["DELETE"]
)
@login_required
def unfollow(user_id, contact_user_id):
user = User.query.get(user_id)
current_user.unfollow(user)
db.session.commit()
return make_json_response("", 204)
#
# Social graph: groups
#
# [GET] /api/groups Listing All Groups
@restapi.route("/groups")
@login_required
def list_groups():
groups = list(Group.query.all())
return make_json_response(groups)
# [GET] /api/groups/GROUP_ID Show a Single Group
@restapi.route("/groups/<int:group_id>")
@login_required
def get_group(group_id):
group = Group.query.get(group_id)
return make_json_response(group)
# [GET] /api/groups/GROUP_ID/members Listing Members of a Group
@restapi.route("/groups/<int:group_id>/members")
@login_required
def get_group_members(group_id):
group = Group.query.get(group_id)
return make_json_response(group.members)
# [GET] /api/group_memberships Listing Group Memberships
# [POST] /api/groups Create a Group
@restapi.route("/groups", methods=["POST"])
@login_required
def create_group():
d = get_params(Group.__editable__)
group = Group(**d)
db.session.add(group)
db.session.commit()
return make_json_response(group, 201)
# [PUT] /api/groups/GROUP_ID Updating Existing Group
# [DELETE] /api/groups/GROUP_ID/archive Archiving a Group
# [DELETE] /api/groups/GROUP_ID Destroy an Archived Message
#
# Messages
#
# [POST] /api/messages Creating New Messages
@restapi.route("/messages", methods=["POST"])
@login_required
def create_message():
d = get_params(Message.__editable__)
message = Message(creator_id=current_user.id, **d)
db.session.add(message)
db.session.commit()
return make_json_response(message, 201)
# [GET] /api/messages Reading Stream Messages
@restapi.route("/messages")
@login_required
def get_messages():
messages = list(Message.query.all())
return make_json_response(messages)
# [GET] /api/messages/MESSAGE_ID Read a Single Stream Message
@restapi.route("/messages/<int:message_id>")
@login_required
def get_message(message_id):
message = Message.query.get(message_id)
return make_json_response(message)
# [PUT] /api/messages/MESSAGE_ID Updating Existing Messages
@restapi.route("/messages/<int:message_id>", methods=["PUT"])
@login_required
def update_message(message_id):
message = Message.query.get(message_id)
d = get_params(["content"])
message.update(d)
db.session.commit()
return make_json_response(message)
# [DELETE] /api/messages/MESSAGE_ID Destroy an existing message
@restapi.route("/messages/<int:message_id>", methods=["DELETE"])
@login_required
def delete_message(message_id):
message = Message.query.get(message_id)
db.session.delete(message)
db.session.commit()
return make_response("", 204)
#
# Likes
#
# TODO: use an "objects" namespace instead to make it generic?
# [POST] /api/messages/MESSAGE_ID/likes Liking a Message
@restapi.route("/messages/<int:message_id>/likes", methods=["POST"])
@login_required
def like_message(message_id):
pass
# [POST] /api/comments/COMMENT_ID/likes/LIKES_ID Liking a Comment
@restapi.route("/comments/<int:comment_id>/likes", methods=["POST"])
@login_required
def like_comment(comment_id):
pass
# [DELETE] /api/messages/MESSAGE_ID/likes/LIKES_ID Un-liking a Message
@restapi.route("/messages/<int:message_id>/likes/<int:like_id>", methods=["DELETE"])
@login_required
def unlike_message(message_id, like_id):
pass
# [DELETE] /api/comments/COMMENT_ID/likes/LIKES_ID
@restapi.route("/comments/<int:comment_id>/likes/<int:like_id>", methods=["DELETE"])
@login_required
def unlike_comment(comment_id, like_id):
pass
#
# Search
#
# [GET] /api/messages/search Searching Messages
@restapi.route("/search/messages")
@login_required
def search_messages():
q = request.args.get("q")
if not q:
return make_json_response([])
messages = list(Message.search_query(q).all())
return make_json_response(messages)
# [GET] /api/users/search Search Users in Your Company
@restapi.route("/search/users")
@login_required
def search_users():
q = request.args.get("q")
if not q:
return make_json_response([])
users = list(User.search_query(q).all())
return make_json_response(users)
#
# Activity Streams
#
@restapi.route("/feed")
@login_required
def get_feed():
pass
| abilian/abilian-sbe | src/abilian/sbe/apps/social/restapi.py | Python | lgpl-2.1 | 7,998 |
# -*- coding: utf-8 -*-
from openerp.osv import osv, fields
class hr(osv.osv):
_inherit = 'hr.employee'
_columns = {
'website_published': fields.boolean('Available in the website', copy=False),
'public_info': fields.text('Public Info'),
}
_defaults = {
'website_published': False
}
| mycodeday/crm-platform | website_hr/models/hr.py | Python | gpl-3.0 | 329 |
import pytest
from pages.treeherder import TreeherderPage
@pytest.mark.nondestructive
def test_load_next_results(base_url, selenium):
page = TreeherderPage(selenium, base_url).open()
assert len(page.result_sets) == 10
page.get_next_ten_results()
assert len(page.result_sets) == 20
page.get_next_twenty_results()
page.wait_for_page_to_load()
assert len(page.result_sets) == 40
page.get_next_fifty_results()
assert len(page.result_sets) == 90
| tojonmz/treeherder | tests/jenkins/tests/test_load_page_results.py | Python | mpl-2.0 | 483 |
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from django.contrib.auth.models import User
from boards.models import Board, List, ListEntry, Comment
from django.http import Http404
@login_required
def boards(request):
return render(request, 'boards/boards.html',
{"created_boards": request.user.get_created_boards(),
"other_boards": request.user.get_other_boards()})
@login_required
def board(request, board_id):
board = get_object_or_404(Board, id=board_id)
if request.user not in board.members.all():
return redirect('boards')
return render(request, 'boards/board.html',
{"board": board.get_for_rendering(),
"other_boards": request.user.boards.all(),
"is_admin": request.user.is_admin(board)})
@login_required
def new_board(request):
title = request.POST['board_title']
board = Board.new(request.user, title)
return redirect("board", board.id)
@login_required
def delete_board(request):
board_id = int(request.POST['board_id'])
board = get_object_or_404(Board, id=board_id)
if board.creator != request.user:
raise Http404
board.delete()
return redirect("boards")
@login_required
def new_list(request):
title = request.POST['list_title']
board_id = int(request.POST['board_id'])
board = get_object_or_404(Board, id=board_id)
get_object_or_404(board.members.all(), id=request.user.id)
basic_list = List.objects.create(title=title, board=board)
return redirect("board", board.id)
@login_required
def delete_list(request):
list_id = int(request.POST['list_id'])
board_list = get_object_or_404(List, id=list_id)
get_object_or_404(board_list.board.members.all(), id=request.user.id)
board_list.delete()
return redirect("board", board_list.board.id)
@login_required
def move_list(request):
list_id = int(request.POST['list_id'])
target_board_id = int(request.POST['move_target_id'])
board_list = get_object_or_404(List, id=list_id)
get_object_or_404(board_list.board.members.all(), id=request.user.id)
other_board = get_object_or_404(Board, id=target_board_id)
get_object_or_404(other_board.members.all(), id=request.user.id)
board_list.board = other_board
board_list.save()
return redirect("board", other_board.id)
@login_required
def new_list_item(request):
title = request.POST['list_item_title']
description = request.POST['list_item_description']
list_id = int(request.POST['list_id'])
board_list = get_object_or_404(List, id=list_id)
get_object_or_404(board_list.board.members.all(), id=request.user.id)
ListEntry.objects.create(title=title,
description=description,
parent_list=board_list)
return redirect("board", board_list.board.id)
@login_required
def delete_list_item(request):
list_entry_id = int(request.POST['list_entry_id'])
list_entry = get_object_or_404(ListEntry, id=list_entry_id)
get_object_or_404(list_entry.parent_list.board.members.all(),
id=request.user.id)
list_entry.delete()
return redirect("board", list_entry.parent_list.board.id)
@login_required
def move_list_item(request):
list_entry_id = int(request.POST['list_entry_id'])
target_list_id = int(request.POST['move_target_id'])
list_entry = get_object_or_404(ListEntry, id=list_entry_id)
get_object_or_404(list_entry.parent_list.board.members.all(),
id=request.user.id)
other_list = get_object_or_404(List, id=target_list_id)
list_entry.parent_list = other_list
list_entry.save()
return redirect("board", list_entry.parent_list.board.id)
@login_required
def change_list_item(request):
title = request.POST['list_entry_title']
description = request.POST['list_entry_description']
list_id = int(request.POST['list_entry_id'])
list_entry = get_object_or_404(ListEntry, id=list_id)
get_object_or_404(list_entry.parent_list.board.members.all(),
id=request.user.id)
list_entry.title = title
list_entry.description = description
list_entry.save()
return redirect("board", list_entry.parent_list.board.id)
@login_required
def post_comment(request):
comment = request.POST['comment']
list_id = int(request.POST['list_entry_id'])
list_entry = get_object_or_404(ListEntry, id=list_id)
get_object_or_404(list_entry.parent_list.board.members.all(),
id=request.user.id)
comment = Comment(user=request.user, list_entry=list_entry, text=comment)
comment.save()
return redirect("board", list_entry.parent_list.board.id)
@login_required
def new_member(request):
username = request.POST['member_username']
board_id = int(request.POST['board_id'])
board = get_object_or_404(Board, id=board_id)
get_object_or_404(board.admins.all(), id=request.user.id)
user = get_object_or_404(User, username=username)
board.members.add(user)
board.save()
return redirect("board", board.id)
@login_required
def new_admin(request):
username = request.POST['member_username']
board_id = int(request.POST['board_id'])
board = get_object_or_404(Board, id=board_id)
get_object_or_404(board.admins.all(), id=request.user.id)
user = get_object_or_404(User, username=username)
board.members.add(user)
board.admins.add(user)
board.save()
return redirect("board", board.id)
@login_required
def remove_admin(request):
user_id = int(request.POST['user_id'])
board_id = int(request.POST['board_id'])
board = get_object_or_404(Board, id=board_id)
get_object_or_404(board.admins.all(), id=request.user.id)
user = get_object_or_404(User, id=user_id)
board.admins.remove(user)
board.save()
return redirect("board", board.id)
@login_required
def remove_member(request):
user_id = int(request.POST['user_id'])
board_id = int(request.POST['board_id'])
board = get_object_or_404(Board, id=board_id)
user = get_object_or_404(User, id=user_id)
if user != request.user:
get_object_or_404(board.admins.all(), id=request.user.id)
board.members.remove(user)
board.admins.remove(user)
board.save()
return redirect("board", board.id)
| KristianTashkov/prello | boards/views.py | Python | gpl-2.0 | 6,498 |
# Copyright (c) 2003-2009 Ralph Meijer
# See LICENSE for details.
"""
Tests for L{wokkel.disco}.
"""
from zope.interface import implements
from twisted.internet import defer
from twisted.trial import unittest
from twisted.words.protocols.jabber.jid import JID
from twisted.words.protocols.jabber.xmlstream import toResponse
from twisted.words.xish import domish
from wokkel import data_form, disco
from wokkel.generic import parseXml
from wokkel.subprotocols import XMPPHandler
from wokkel.test.helpers import TestableRequestHandlerMixin, XmlStreamStub
NS_DISCO_INFO = 'http://jabber.org/protocol/disco#info'
NS_DISCO_ITEMS = 'http://jabber.org/protocol/disco#items'
class DiscoFeatureTest(unittest.TestCase):
"""
Tests for L{disco.DiscoFeature}.
"""
def test_init(self):
"""
Test initialization with a with feature namespace URI.
"""
feature = disco.DiscoFeature(u'testns')
self.assertEqual(u'testns', feature)
def test_toElement(self):
"""
Test proper rendering to a DOM representation.
The returned element should be properly named and have a C{var}
attribute that holds the feature namespace URI.
"""
feature = disco.DiscoFeature(u'testns')
element = feature.toElement()
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(u'feature', element.name)
self.assertTrue(element.hasAttribute(u'var'))
self.assertEqual(u'testns', element[u'var'])
def test_fromElement(self):
"""
Test creating L{disco.DiscoFeature} from L{domish.Element}.
"""
element = domish.Element((NS_DISCO_INFO, u'feature'))
element['var'] = u'testns'
feature = disco.DiscoFeature.fromElement(element)
self.assertEqual(u'testns', feature)
class DiscoIdentityTest(unittest.TestCase):
"""
Tests for L{disco.DiscoIdentity}.
"""
def test_init(self):
"""
Test initialization with a category, type and name.
"""
identity = disco.DiscoIdentity(u'conference', u'text', u'The chatroom')
self.assertEqual(u'conference', identity.category)
self.assertEqual(u'text', identity.type)
self.assertEqual(u'The chatroom', identity.name)
def test_toElement(self):
"""
Test proper rendering to a DOM representation.
The returned element should be properly named and have C{conference},
C{type}, and C{name} attributes.
"""
identity = disco.DiscoIdentity(u'conference', u'text', u'The chatroom')
element = identity.toElement()
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(u'identity', element.name)
self.assertEqual(u'conference', element.getAttribute(u'category'))
self.assertEqual(u'text', element.getAttribute(u'type'))
self.assertEqual(u'The chatroom', element.getAttribute(u'name'))
def test_toElementWithoutName(self):
"""
Test proper rendering to a DOM representation without a name.
The returned element should be properly named and have C{conference},
C{type} attributes, no C{name} attribute.
"""
identity = disco.DiscoIdentity(u'conference', u'text')
element = identity.toElement()
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(u'identity', element.name)
self.assertEqual(u'conference', element.getAttribute(u'category'))
self.assertEqual(u'text', element.getAttribute(u'type'))
self.assertFalse(element.hasAttribute(u'name'))
def test_fromElement(self):
"""
Test creating L{disco.DiscoIdentity} from L{domish.Element}.
"""
element = domish.Element((NS_DISCO_INFO, u'identity'))
element['category'] = u'conference'
element['type'] = u'text'
element['name'] = u'The chatroom'
identity = disco.DiscoIdentity.fromElement(element)
self.assertEqual(u'conference', identity.category)
self.assertEqual(u'text', identity.type)
self.assertEqual(u'The chatroom', identity.name)
def test_fromElementWithoutName(self):
"""
Test creating L{disco.DiscoIdentity} from L{domish.Element}, no name.
"""
element = domish.Element((NS_DISCO_INFO, u'identity'))
element['category'] = u'conference'
element['type'] = u'text'
identity = disco.DiscoIdentity.fromElement(element)
self.assertEqual(u'conference', identity.category)
self.assertEqual(u'text', identity.type)
self.assertEqual(None, identity.name)
class DiscoInfoTest(unittest.TestCase):
"""
Tests for L{disco.DiscoInfo}.
"""
def test_toElement(self):
"""
Test C{toElement} creates a correctly namespaced element, no node.
"""
info = disco.DiscoInfo()
element = info.toElement()
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(u'query', element.name)
self.assertFalse(element.hasAttribute(u'node'))
def test_toElementNode(self):
"""
Test C{toElement} with a node.
"""
info = disco.DiscoInfo()
info.nodeIdentifier = u'test'
element = info.toElement()
self.assertEqual(u'test', element.getAttribute(u'node'))
def test_toElementChildren(self):
"""
Test C{toElement} creates a DOM with proper childs.
"""
info = disco.DiscoInfo()
info.append(disco.DiscoFeature(u'jabber:iq:register'))
info.append(disco.DiscoIdentity(u'conference', u'text'))
info.append(data_form.Form(u'result'))
element = info.toElement()
featureElements = domish.generateElementsQNamed(element.children,
u'feature',
NS_DISCO_INFO)
self.assertEqual(1, len(list(featureElements)))
identityElements = domish.generateElementsQNamed(element.children,
u'identity',
NS_DISCO_INFO)
self.assertEqual(1, len(list(identityElements)))
extensionElements = domish.generateElementsQNamed(element.children,
u'x',
data_form.NS_X_DATA)
self.assertEqual(1, len(list(extensionElements)))
def test_fromElement(self):
"""
Test properties when creating L{disco.DiscoInfo} from L{domish.Element}.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#info'>
<identity category='conference'
type='text'
name='A Dark Cave'/>
<feature var='http://jabber.org/protocol/muc'/>
<feature var='jabber:iq:register'/>
<x xmlns='jabber:x:data' type='result'>
<field var='FORM_TYPE' type='hidden'>
<value>http://jabber.org/protocol/muc#roominfo</value>
</field>
</x>
</query>"""
element = parseXml(xml)
info = disco.DiscoInfo.fromElement(element)
self.assertIn(u'http://jabber.org/protocol/muc', info.features)
self.assertIn(u'jabber:iq:register', info.features)
self.assertIn((u'conference', u'text'), info.identities)
self.assertEqual(u'A Dark Cave',
info.identities[(u'conference', u'text')])
self.assertIn(u'http://jabber.org/protocol/muc#roominfo',
info.extensions)
def test_fromElementItems(self):
"""
Test items when creating L{disco.DiscoInfo} from L{domish.Element}.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#info'>
<identity category='conference'
type='text'
name='A Dark Cave'/>
<feature var='http://jabber.org/protocol/muc'/>
<feature var='jabber:iq:register'/>
<x xmlns='jabber:x:data' type='result'>
<field var='FORM_TYPE' type='hidden'>
<value>http://jabber.org/protocol/muc#roominfo</value>
</field>
</x>
</query>"""
element = parseXml(xml)
info = disco.DiscoInfo.fromElement(element)
info = list(info)
self.assertEqual(4, len(info))
identity = info[0]
self.assertEqual(u'conference', identity.category)
self.assertEqual(u'http://jabber.org/protocol/muc', info[1])
self.assertEqual(u'jabber:iq:register', info[2])
extension = info[3]
self.assertEqual(u'http://jabber.org/protocol/muc#roominfo',
extension.formNamespace)
def test_fromElementNoNode(self):
"""
Test creating L{disco.DiscoInfo} from L{domish.Element}, no node.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#info'/>"""
element = parseXml(xml)
info = disco.DiscoInfo.fromElement(element)
self.assertEqual(u'', info.nodeIdentifier)
def test_fromElementNode(self):
"""
Test creating L{disco.DiscoInfo} from L{domish.Element}, with node.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#info'
node='test'>
</query>"""
element = parseXml(xml)
info = disco.DiscoInfo.fromElement(element)
self.assertEqual(u'test', info.nodeIdentifier)
class DiscoItemTest(unittest.TestCase):
"""
Tests for L{disco.DiscoItem}.
"""
def test_init(self):
"""
Test initialization with a category, type and name.
"""
item = disco.DiscoItem(JID(u'example.org'), u'test', u'The node')
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'test', item.nodeIdentifier)
self.assertEqual(u'The node', item.name)
def test_toElement(self):
"""
Test proper rendering to a DOM representation.
The returned element should be properly named and have C{jid}, C{node},
and C{name} attributes.
"""
item = disco.DiscoItem(JID(u'example.org'), u'test', u'The node')
element = item.toElement()
self.assertEqual(NS_DISCO_ITEMS, element.uri)
self.assertEqual(u'item', element.name)
self.assertEqual(u'example.org', element.getAttribute(u'jid'))
self.assertEqual(u'test', element.getAttribute(u'node'))
self.assertEqual(u'The node', element.getAttribute(u'name'))
def test_toElementWithoutName(self):
"""
Test proper rendering to a DOM representation without a name.
The returned element should be properly named and have C{jid}, C{node}
attributes, no C{name} attribute.
"""
item = disco.DiscoItem(JID(u'example.org'), u'test')
element = item.toElement()
self.assertEqual(NS_DISCO_ITEMS, element.uri)
self.assertEqual(u'item', element.name)
self.assertEqual(u'example.org', element.getAttribute(u'jid'))
self.assertEqual(u'test', element.getAttribute(u'node'))
self.assertFalse(element.hasAttribute(u'name'))
def test_fromElement(self):
"""
Test creating L{disco.DiscoItem} from L{domish.Element}.
"""
element = domish.Element((NS_DISCO_ITEMS, u'item'))
element[u'jid'] = u'example.org'
element[u'node'] = u'test'
element[u'name'] = u'The node'
item = disco.DiscoItem.fromElement(element)
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'test', item.nodeIdentifier)
self.assertEqual(u'The node', item.name)
def test_fromElementNoNode(self):
"""
Test creating L{disco.DiscoItem} from L{domish.Element}, no node.
"""
element = domish.Element((NS_DISCO_ITEMS, u'item'))
element[u'jid'] = u'example.org'
element[u'name'] = u'The node'
item = disco.DiscoItem.fromElement(element)
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'', item.nodeIdentifier)
self.assertEqual(u'The node', item.name)
def test_fromElementNoName(self):
"""
Test creating L{disco.DiscoItem} from L{domish.Element}, no name.
"""
element = domish.Element((NS_DISCO_ITEMS, u'item'))
element[u'jid'] = u'example.org'
element[u'node'] = u'test'
item = disco.DiscoItem.fromElement(element)
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'test', item.nodeIdentifier)
self.assertEqual(None, item.name)
def test_fromElementBadJID(self):
"""
Test creating L{disco.DiscoItem} from L{domish.Element}, bad JID.
"""
element = domish.Element((NS_DISCO_ITEMS, u'item'))
element[u'jid'] = u'ex@@@ample.org'
item = disco.DiscoItem.fromElement(element)
self.assertIdentical(None, item.entity)
class DiscoItemsTest(unittest.TestCase):
"""
Tests for L{disco.DiscoItems}.
"""
def test_toElement(self):
"""
Test C{toElement} creates a correctly namespaced element, no node.
"""
items = disco.DiscoItems()
element = items.toElement()
self.assertEqual(NS_DISCO_ITEMS, element.uri)
self.assertEqual(u'query', element.name)
self.assertFalse(element.hasAttribute(u'node'))
def test_toElementNode(self):
"""
Test C{toElement} with a node.
"""
items = disco.DiscoItems()
items.nodeIdentifier = u'test'
element = items.toElement()
self.assertEqual(u'test', element.getAttribute(u'node'))
def test_toElementChildren(self):
"""
Test C{toElement} creates a DOM with proper childs.
"""
items = disco.DiscoItems()
items.append(disco.DiscoItem(JID(u'example.org'), u'test', u'A node'))
element = items.toElement()
itemElements = domish.generateElementsQNamed(element.children,
u'item',
NS_DISCO_ITEMS)
self.assertEqual(1, len(list(itemElements)))
def test_fromElement(self):
"""
Test creating L{disco.DiscoItems} from L{domish.Element}.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#items'>
<item jid='example.org' node='test' name='A node'/>
</query>"""
element = parseXml(xml)
items = disco.DiscoItems.fromElement(element)
items = list(items)
self.assertEqual(1, len(items))
item = items[0]
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'test', item.nodeIdentifier)
self.assertEqual(u'A node', item.name)
def test_fromElementNoNode(self):
"""
Test creating L{disco.DiscoItems} from L{domish.Element}, no node.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#items'/>"""
element = parseXml(xml)
items = disco.DiscoItems.fromElement(element)
self.assertEqual(u'', items.nodeIdentifier)
def test_fromElementNode(self):
"""
Test creating L{disco.DiscoItems} from L{domish.Element}, with node.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#items'
node='test'>
</query>"""
element = parseXml(xml)
items = disco.DiscoItems.fromElement(element)
self.assertEqual(u'test', items.nodeIdentifier)
class DiscoClientProtocolTest(unittest.TestCase):
"""
Tests for L{disco.DiscoClientProtocol}.
"""
def setUp(self):
"""
Set up stub and protocol for testing.
"""
self.stub = XmlStreamStub()
self.protocol = disco.DiscoClientProtocol()
self.protocol.xmlstream = self.stub.xmlstream
self.protocol.connectionInitialized()
def test_requestItems(self):
"""
Test request sent out by C{requestItems} and parsing of response.
"""
def cb(items):
items = list(items)
self.assertEqual(2, len(items))
self.assertEqual(JID(u'test.example.org'), items[0].entity)
d = self.protocol.requestItems(JID(u'example.org'),u"foo")
d.addCallback(cb)
iq = self.stub.output[-1]
self.assertEqual(u'example.org', iq.getAttribute(u'to'))
self.assertEqual(u'get', iq.getAttribute(u'type'))
self.assertEqual(u'foo', iq.query.getAttribute(u'node'))
self.assertEqual(NS_DISCO_ITEMS, iq.query.uri)
response = toResponse(iq, u'result')
query = response.addElement((NS_DISCO_ITEMS, u'query'))
element = query.addElement(u'item')
element[u'jid'] = u'test.example.org'
element[u'node'] = u'music'
element[u'name'] = u'Music from the time of Shakespeare'
element = query.addElement(u'item')
element[u'jid'] = u"test2.example.org"
self.stub.send(response)
return d
def test_requestItemsFrom(self):
"""
A disco items request can be sent with an explicit sender address.
"""
d = self.protocol.requestItems(JID(u'example.org'),
sender=JID(u'test.example.org'))
iq = self.stub.output[-1]
self.assertEqual(u'test.example.org', iq.getAttribute(u'from'))
response = toResponse(iq, u'result')
response.addElement((NS_DISCO_ITEMS, u'query'))
self.stub.send(response)
return d
def test_requestInfo(self):
"""
Test request sent out by C{requestInfo} and parsing of response.
"""
def cb(info):
self.assertIn((u'conference', u'text'), info.identities)
self.assertIn(u'http://jabber.org/protocol/disco#info',
info.features)
self.assertIn(u'http://jabber.org/protocol/muc',
info.features)
d = self.protocol.requestInfo(JID(u'example.org'),'foo')
d.addCallback(cb)
iq = self.stub.output[-1]
self.assertEqual(u'example.org', iq.getAttribute(u'to'))
self.assertEqual(u'get', iq.getAttribute(u'type'))
self.assertEqual(u'foo', iq.query.getAttribute(u'node'))
self.assertEqual(NS_DISCO_INFO, iq.query.uri)
response = toResponse(iq, u'result')
query = response.addElement((NS_DISCO_INFO, u'query'))
element = query.addElement(u"identity")
element[u'category'] = u'conference' # required
element[u'type'] = u'text' # required
element[u"name"] = u'Romeo and Juliet, Act II, Scene II' # optional
element = query.addElement("feature")
element[u'var'] = u'http://jabber.org/protocol/disco#info' # required
element = query.addElement(u"feature")
element[u'var'] = u'http://jabber.org/protocol/muc'
self.stub.send(response)
return d
def test_requestInfoFrom(self):
"""
A disco info request can be sent with an explicit sender address.
"""
d = self.protocol.requestInfo(JID(u'example.org'),
sender=JID(u'test.example.org'))
iq = self.stub.output[-1]
self.assertEqual(u'test.example.org', iq.getAttribute(u'from'))
response = toResponse(iq, u'result')
response.addElement((NS_DISCO_INFO, u'query'))
self.stub.send(response)
return d
class DiscoHandlerTest(unittest.TestCase, TestableRequestHandlerMixin):
"""
Tests for L{disco.DiscoHandler}.
"""
def setUp(self):
self.service = disco.DiscoHandler()
def test_onDiscoInfo(self):
"""
C{onDiscoInfo} should process an info request and return a response.
The request should be parsed, C{info} called with the extracted
parameters, and then the result should be formatted into a proper
response element.
"""
xml = """<iq from='test@example.com' to='example.com'
type='get'>
<query xmlns='%s'/>
</iq>""" % NS_DISCO_INFO
def cb(element):
self.assertEqual('query', element.name)
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(NS_DISCO_INFO, element.identity.uri)
self.assertEqual('dummy', element.identity['category'])
self.assertEqual('generic', element.identity['type'])
self.assertEqual('Generic Dummy Entity', element.identity['name'])
self.assertEqual(NS_DISCO_INFO, element.feature.uri)
self.assertEqual('jabber:iq:version', element.feature['var'])
def info(requestor, target, nodeIdentifier):
self.assertEqual(JID('test@example.com'), requestor)
self.assertEqual(JID('example.com'), target)
self.assertEqual('', nodeIdentifier)
return defer.succeed([
disco.DiscoIdentity('dummy', 'generic', 'Generic Dummy Entity'),
disco.DiscoFeature('jabber:iq:version')
])
self.service.info = info
d = self.handleRequest(xml)
d.addCallback(cb)
return d
def test_onDiscoInfoWithNode(self):
"""
An info request for a node should return it in the response.
"""
xml = """<iq from='test@example.com' to='example.com'
type='get'>
<query xmlns='%s' node='test'/>
</iq>""" % NS_DISCO_INFO
def cb(element):
self.assertTrue(element.hasAttribute('node'))
self.assertEqual('test', element['node'])
def info(requestor, target, nodeIdentifier):
self.assertEqual('test', nodeIdentifier)
return defer.succeed([
disco.DiscoFeature('jabber:iq:version')
])
self.service.info = info
d = self.handleRequest(xml)
d.addCallback(cb)
return d
def test_onDiscoItems(self):
"""
C{onDiscoItems} should process an items request and return a response.
The request should be parsed, C{items} called with the extracted
parameters, and then the result should be formatted into a proper
response element.
"""
xml = """<iq from='test@example.com' to='example.com'
type='get'>
<query xmlns='%s'/>
</iq>""" % NS_DISCO_ITEMS
def cb(element):
self.assertEqual('query', element.name)
self.assertEqual(NS_DISCO_ITEMS, element.uri)
self.assertEqual(NS_DISCO_ITEMS, element.item.uri)
self.assertEqual('example.com', element.item['jid'])
self.assertEqual('test', element.item['node'])
self.assertEqual('Test node', element.item['name'])
def items(requestor, target, nodeIdentifier):
self.assertEqual(JID('test@example.com'), requestor)
self.assertEqual(JID('example.com'), target)
self.assertEqual('', nodeIdentifier)
return defer.succeed([
disco.DiscoItem(JID('example.com'), 'test', 'Test node'),
])
self.service.items = items
d = self.handleRequest(xml)
d.addCallback(cb)
return d
def test_onDiscoItemsWithNode(self):
"""
An items request for a node should return it in the response.
"""
xml = """<iq from='test@example.com' to='example.com'
type='get'>
<query xmlns='%s' node='test'/>
</iq>""" % NS_DISCO_ITEMS
def cb(element):
self.assertTrue(element.hasAttribute('node'))
self.assertEqual('test', element['node'])
def items(requestor, target, nodeIdentifier):
self.assertEqual('test', nodeIdentifier)
return defer.succeed([
disco.DiscoFeature('jabber:iq:version')
])
self.service.items = items
d = self.handleRequest(xml)
d.addCallback(cb)
return d
def test_info(self):
"""
C{info} should gather disco info from sibling handlers.
"""
discoItems = [disco.DiscoIdentity('dummy', 'generic',
'Generic Dummy Entity'),
disco.DiscoFeature('jabber:iq:version')
]
class DiscoResponder(XMPPHandler):
implements(disco.IDisco)
def getDiscoInfo(self, requestor, target, nodeIdentifier):
if not nodeIdentifier:
return defer.succeed(discoItems)
else:
return defer.succeed([])
def cb(result):
self.assertEquals(discoItems, result)
self.service.parent = [self.service, DiscoResponder()]
d = self.service.info(JID('test@example.com'), JID('example.com'), '')
d.addCallback(cb)
return d
def test_infoNotDeferred(self):
"""
C{info} should gather disco info from sibling handlers.
"""
discoItems = [disco.DiscoIdentity('dummy', 'generic',
'Generic Dummy Entity'),
disco.DiscoFeature('jabber:iq:version')
]
class DiscoResponder(XMPPHandler):
implements(disco.IDisco)
def getDiscoInfo(self, requestor, target, nodeIdentifier):
if not nodeIdentifier:
return discoItems
else:
return []
def cb(result):
self.assertEquals(discoItems, result)
self.service.parent = [self.service, DiscoResponder()]
d = self.service.info(JID('test@example.com'), JID('example.com'), '')
d.addCallback(cb)
return d
def test_items(self):
"""
C{info} should gather disco items from sibling handlers.
"""
discoItems = [disco.DiscoItem(JID('example.com'), 'test', 'Test node')]
class DiscoResponder(XMPPHandler):
implements(disco.IDisco)
def getDiscoItems(self, requestor, target, nodeIdentifier):
if not nodeIdentifier:
return defer.succeed(discoItems)
else:
return defer.succeed([])
def cb(result):
self.assertEquals(discoItems, result)
self.service.parent = [self.service, DiscoResponder()]
d = self.service.items(JID('test@example.com'), JID('example.com'), '')
d.addCallback(cb)
return d
def test_itemsNotDeferred(self):
"""
C{info} should also collect results not returned via a deferred.
"""
discoItems = [disco.DiscoItem(JID('example.com'), 'test', 'Test node')]
class DiscoResponder(XMPPHandler):
implements(disco.IDisco)
def getDiscoItems(self, requestor, target, nodeIdentifier):
if not nodeIdentifier:
return discoItems
else:
return []
def cb(result):
self.assertEquals(discoItems, result)
self.service.parent = [self.service, DiscoResponder()]
d = self.service.items(JID('test@example.com'), JID('example.com'), '')
d.addCallback(cb)
return d
| dustin/wokkel | wokkel/test/test_disco.py | Python | mit | 28,055 |
""" Utilities for matplotlib examples """
import pylab
from numpy import ones, array, double, meshgrid, reshape, linspace, \
concatenate, ravel, pi, sinc
from numpy.random import randn, rand
from modshogun import BinaryLabels, RegressionLabels, RealFeatures, SparseRealFeatures
QUITKEY='q'
NUM_EXAMPLES=100
DISTANCE=2
def quit (event):
if event.key==QUITKEY or event.key==QUITKEY.upper():
pylab.close()
def set_title (title):
quitmsg=" (press '"+QUITKEY+"' to quit)"
complete=title+quitmsg
manager=pylab.get_current_fig_manager()
# now we have to wrap the toolkit
if hasattr(manager, 'window'):
if hasattr(manager.window, 'setCaption'): # QT
manager.window.setCaption(complete)
if hasattr(manager.window, 'set_title'): # GTK
manager.window.set_title(complete)
elif hasattr(manager.window, 'title'): # TK
manager.window.title(complete)
def get_realdata(positive=True):
if positive:
return randn(2, NUM_EXAMPLES)+DISTANCE
else:
return randn(2, NUM_EXAMPLES)-DISTANCE
def get_realfeatures(pos, neg):
arr=array((pos, neg))
features = concatenate(arr, axis=1)
return RealFeatures(features)
def get_labels(raw=False, type='binary'):
data = concatenate(array(
(-ones(NUM_EXAMPLES, dtype=double), ones(NUM_EXAMPLES, dtype=double))
))
if raw:
return data
else:
if type == 'binary':
return BinaryLabels(data)
if type == 'regression':
return RegressionLabels(data)
return None
def compute_output_plot_isolines(classifier, kernel=None, train=None, sparse=False, pos=None, neg=None, regression=False):
size=100
if pos is not None and neg is not None:
x1_max=max(1.2*pos[0,:])
x1_min=min(1.2*neg[0,:])
x2_min=min(1.2*neg[1,:])
x2_max=max(1.2*pos[1,:])
x1=linspace(x1_min, x1_max, size)
x2=linspace(x2_min, x2_max, size)
else:
x1=linspace(-5, 5, size)
x2=linspace(-5, 5, size)
x, y=meshgrid(x1, x2)
dense=RealFeatures(array((ravel(x), ravel(y))))
if sparse:
test=SparseRealFeatures()
test.obtain_from_simple(dense)
else:
test=dense
if kernel and train:
kernel.init(train, test)
else:
classifier.set_features(test)
labels = None
if regression:
labels=classifier.apply().get_labels()
else:
labels=classifier.apply().get_values()
z=labels.reshape((size, size))
return x, y, z
def get_sinedata():
x=4*rand(1, NUM_EXAMPLES)-DISTANCE
x.sort()
y=sinc(pi*x)+0.1*randn(1, NUM_EXAMPLES)
return x, y
def compute_output_plot_isolines_sine(classifier, kernel, train, regression=False):
x=4*rand(1, 500)-2
x.sort()
test=RealFeatures(x)
kernel.init(train, test)
if regression:
y=classifier.apply().get_labels()
else:
y=classifier.apply().get_values()
return x, y
| AzamYahya/shogun | examples/undocumented/python_modular/graphical/util.py | Python | gpl-3.0 | 2,670 |
import factory
import datetime
from factory.django import DjangoModelFactory
from django.test import TestCase
from django.utils import timezone
from events.models import Tournament, Participant, Event
from members.models import Person, InvoiceItem, ItemType, Subscription
import logging
logger = logging.getLogger("factory").setLevel(logging.WARNING)
class EventFactory(DjangoModelFactory):
class Meta:
model = Event
name = factory.Sequence(lambda n: f"event_name_{n}")
cost = 10
active = True
class PersonFactory(DjangoModelFactory):
class Meta:
model = Person
first_name = factory.Sequence(lambda n: f"first_name_{n}")
class SubscriptionFactory(DjangoModelFactory):
class Meta:
model = Subscription
class TournamentFactory(DjangoModelFactory):
class Meta:
model = Tournament
event_cost = 5
draw_date = timezone.now()
finals_date = draw_date
class ItemTypeFactory(DjangoModelFactory):
class Meta:
model = ItemType
id = ItemType.SOCIAL
class TournamentItemTypeFactory(ItemTypeFactory):
id = ItemType.TOURNAMENT
class ParticipantFactory(DjangoModelFactory):
class Meta:
model = Participant
class EventTestCase(TestCase):
def test_bill_social_event(self):
social = ItemTypeFactory()
event = EventFactory(active=False, item_type=social)
event.description = "My event"
for i in range(10):
event.add_person(PersonFactory())
self.assertEqual(Person.objects.count(), 10)
# Unbilled total for the event should be 100
self.assertEqual(event.unbilled_total(), 100)
# Unbilled total for a person is 10
person1 = Person.objects.all()[0]
self.assertEqual(event.unbilled_total(person1), 10)
# billing should generate 10 invoice items
bill_data = event.billing_data()
bill_data.process()
items = InvoiceItem.objects.all().order_by("id")
self.assertEqual(len(items), 10)
# validate generated invoice items
for i in range(10):
self.assertEqual(items[i].amount, 10)
self.assertEqual(items[i].item_type_id, ItemType.SOCIAL)
self.assertEqual(items[i].description, "event_name_0")
self.assertEqual(items[i].person.first_name, f"first_name_{i}")
def test_tournament_active_manager(self):
TournamentItemTypeFactory()
tournament1 = TournamentFactory()
tournament1.add_standard_events()
tournament2 = TournamentFactory()
tournament2.add_standard_events()
tournament3 = TournamentFactory()
tournament3.add_standard_events()
self.assertEqual(Tournament.subset.active().count(), 3)
self.assertEqual(Tournament.subset.inactive().count(), 0)
tournament1.make_active(False)
self.assertEqual(Tournament.subset.active().count(), 2)
self.assertEqual(Tournament.subset.inactive().count(), 1)
def test_bill_tournament(self):
TournamentItemTypeFactory()
tournament = TournamentFactory()
tournament.add_standard_events()
main_events = Event.objects.filter(cost__gt=0)
self.assertEqual(main_events.count(), 5)
plate_events = Event.objects.filter(cost=0)
self.assertEqual(plate_events.count(), 3)
# should be 5 active events
self.assertEqual(Event.objects.filter(active=True).count(), 5)
# test make all events active
tournament.make_active(True)
self.assertEqual(Event.objects.filter(active=True).count(), 8)
# main is 2 singles events, + 3 doubles = 8 participanst
for event in main_events:
if event.with_partner():
event.add_person(PersonFactory(), partner=PersonFactory())
else:
event.add_person(PersonFactory())
self.assertEqual(Participant.objects.count(), 5)
# Each plate event has 2 entrants so 6 more people
for event in plate_events:
event.add_person(PersonFactory(), partner=PersonFactory())
self.assertEqual(Participant.objects.count(), 8)
# active tournament starts as unbilled
self.assertEqual(tournament.billed, False)
# generate bills should return 0 because tournament is active
count, value = tournament.generate_bills()
self.assertEqual(count, 0)
self.assertEqual(value, 0)
# deactivate tournament
tournament.make_active(False)
# there should be no active events in the tournament
self.assertEqual(Event.objects.filter(active=True).count(), 0),
# there should be 8 bills with value 40
count, value = tournament.generate_bills()
self.assertEqual(count, 8)
self.assertEqual(value, 40)
self.assertEqual(InvoiceItem.objects.filter(amount=5).count(), 8)
| ianastewart/cwltc-admin | events/tests/test_billing.py | Python | mit | 4,895 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import numpy.ma as ma
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import Gaussian2DKernel
from astropy.utils.exceptions import AstropyUserWarning
from astropy import units as u
from astropy.utils.compat.context import nullcontext
from numpy.testing import (assert_array_almost_equal_nulp,
assert_array_almost_equal,
assert_allclose)
import itertools
VALID_DTYPES = ('>f4', '<f4', '>f8', '<f8')
VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
NANHANDLING_OPTIONS = ['interpolate', 'fill']
NORMALIZE_OPTIONS = [True, False]
PRESERVE_NAN_OPTIONS = [True, False]
BOUNDARIES_AND_CONVOLUTIONS = (list(zip(itertools.cycle((convolve,)),
BOUNDARY_OPTIONS)) + [(convolve_fft,
'wrap'),
(convolve_fft,
'fill')])
HAS_SCIPY = True
try:
import scipy
except ImportError:
HAS_SCIPY = False
HAS_PANDAS = True
try:
import pandas
except ImportError:
HAS_PANDAS = False
class TestConvolve1D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [1, 4, 5, 6, 5, 7, 8]
y = [0.2, 0.6, 0.2]
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
def test_tuple(self):
"""
Test that convolve works correctly when inputs are tuples
"""
x = (1, 4, 5, 6, 5, 7, 8)
y = (0.2, 0.6, 0.2)
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_quantity(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve works correctly when input array is a Quantity
"""
x = np.array([1, 4, 5, 6, 5, 7, 8], dtype=dtype) * u.ph
y = np.array([0.2, 0.6, 0.2], dtype=dtype)
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
assert x.unit == z.unit
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve works correctly when inputs are lists
"""
array = [1., 4., 5., 6., 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
assert np.all(np.array(array, dtype=dtype) == x)
assert np.all(np.array(kernel, dtype=dtype) == y)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified_with_nan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve doesn't modify the input data
"""
array = [1., 4., 5., np.nan, 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
# make copies for post call comparison
x_copy = x.copy()
y_copy = y.copy()
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
# ( NaN == NaN ) = False
# Only compare non NaN values for canonical equivalance
# and then check NaN explicitly with np.isnan()
array_is_nan = np.isnan(array)
kernel_is_nan = np.isnan(kernel)
array_not_nan = ~array_is_nan
kernel_not_nan = ~kernel_is_nan
assert np.all(x_copy[array_not_nan] == x[array_not_nan])
assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])
assert np.all(np.isnan(x[array_is_nan]))
assert np.all(np.isnan(y[kernel_is_nan]))
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([1., 2., 3.], dtype=dtype_array)
y = np.array([0., 1., 0.], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('convfunc', 'boundary',), BOUNDARIES_AND_CONVOLUTIONS)
def test_unity_1_none(self, boundary, convfunc):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([1.], dtype='>f8')
z = convfunc(x, y, boundary=boundary)
np.testing.assert_allclose(z, x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3(self, boundary):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([0., 2., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3(self, boundary):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert np.all(z == np.array([0., 4., 0.], dtype='>f8'))
elif boundary == 'fill':
assert np.all(z == np.array([1., 4., 3.], dtype='>f8'))
elif boundary == 'wrap':
assert np.all(z == np.array([4., 4., 4.], dtype='>f8'))
else:
assert np.all(z == np.array([2., 4., 6.], dtype='>f8'))
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_unity_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
# Since the kernel is actually only one pixel wide (because of the
# zeros) the NaN value doesn't get interpolated over so a warning is
# expected.
if nan_treatment == 'interpolate' and not preserve_nan:
ctx = pytest.warns(AstropyUserWarning,
match="nan_treatment='interpolate', however, "
"NaN values detected")
else:
ctx = nullcontext()
with ctx:
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([0., 0., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_uniform_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
# boundary, nan_treatment, normalize_kernel
rslt = {
(None, 'interpolate', True): [0, 2, 0],
(None, 'interpolate', False): [0, 6, 0],
(None, 'fill', True): [0, 4/3., 0],
(None, 'fill', False): [0, 4, 0],
('fill', 'interpolate', True): [1/2., 2, 3/2.],
('fill', 'interpolate', False): [3/2., 6, 9/2.],
('fill', 'fill', True): [1/3., 4/3., 3/3.],
('fill', 'fill', False): [1, 4, 3],
('wrap', 'interpolate', True): [2, 2, 2],
('wrap', 'interpolate', False): [6, 6, 6],
('wrap', 'fill', True): [4/3., 4/3., 4/3.],
('wrap', 'fill', False): [4, 4, 4],
('extend', 'interpolate', True): [1, 2, 3],
('extend', 'interpolate', False): [3, 6, 9],
('extend', 'fill', True): [2/3., 4/3., 6/3.],
('extend', 'fill', False): [2, 4, 6],
}[boundary, nan_treatment, normalize_kernel]
if preserve_nan:
rslt[1] = 0
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_zero_sum_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with zero sum kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = [-1, -1, -1, -1, 8, -1, -1, -1, -1]
assert(np.isclose(sum(y), 0, atol=1e-8))
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 0., 0., 0., 0., 0.],
('fill'): [-6., -3., -1., 0., 0., 10., 21., 33., 46.],
('wrap'): [-36., -27., -18., -9., 0., 9., 18., 27., 36.],
('extend'): [-10., -6., -3., -1., 0., 1., 3., 6., 10.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_int_masked_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with integer masked kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = ma.array([-1, -1, -1, -1, 8, -1, -1, -1, -1], mask=[1, 0, 0, 0, 0, 0, 0, 0, 0], fill_value=0.)
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 9., 0., 0., 0., 0.],
('fill'): [-1., 3., 6., 8., 9., 10., 21., 33., 46.],
('wrap'): [-31., -21., -11., -1., 9., 10., 20., 30., 40.],
('extend'): [-5., 0., 4., 7., 9., 10., 12., 15., 19.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize('preserve_nan', PRESERVE_NAN_OPTIONS)
def test_int_masked_array(self, preserve_nan):
"""
Test that convolve works correctly with integer masked arrays.
"""
x = ma.array([3, 5, 7, 11, 13], mask=[0, 0, 1, 0, 0], fill_value=0.)
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[2])
z[2] = 8
assert_array_almost_equal_nulp(z, (8/3., 4, 8, 12, 8), 10)
class TestConvolve2D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=True)
assert_array_almost_equal_nulp(z, x, 10)
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z, np.array(x, float)*9, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1_none(self, boundary):
'''
Test that a 1x1 unit kernel returns the same array
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 5., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[0., 0., 3.],
[1., 0., 0.],
[0., 2., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 6., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[2., 7., 12.],
[4., 6., 8.],
[6., 5., 4.]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3_withnan(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
preserve_nan=True)
assert np.isnan(z[1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnanfilled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 8., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 5., 4.],
[4., 8., 7.],
[4., 4., 3.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[8., 8., 8.],
[8., 8., 8.],
[8., 8., 8.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., 9., 16.],
[5., 8., 11.],
[8., 7., 6.]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnaninterped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1./8, 5./8, 4./8],
[4./8, 8./8, 7./8],
[4./8, 4./8, 3./8]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2./8, 9./8, 16./8],
[5./8, 8./8, 11./8],
[8./8, 7./8, 6./8]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_normalized_kernel_2D(self, boundary):
x = np.array([[0., 0., 4.],
[1., 2., 0.],
[0., 3., 0.]], dtype='float')
y = np.array([[1., -1., 1.],
[-1., 0., -1.],
[1., -1., 1.]], dtype='float')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='float'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., -5., 2.],
[1., 0., -3.],
[-2., -1., -1.]], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[0., -8., 6.],
[5., 0., -4.],
[2., 3., -4.]], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., -1., -2.],
[0., 0., 1.],
[2., -4., 2.]], dtype='float'), 10)
else:
raise ValueError("Invalid boundary specification")
class TestConvolve3D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z / 27, x, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1x1_none(self, boundary):
'''
Test that a 1x1x1 unit kernel returns the same array
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 0., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.array([[[1.]]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3x3(self, boundary):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 3., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 81., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[23., 28., 16.], [35., 46., 25.], [25., 34., 18.]],
[[40., 50., 23.], [63., 81., 36.], [46., 60., 27.]],
[[32., 40., 16.], [50., 61., 22.], [36., 44., 16.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[[65., 54., 43.], [75., 66., 57.], [85., 78., 71.]],
[[96., 71., 46.], [108., 81., 54.], [120., 91., 62.]],
[[127., 88., 49.], [141., 96., 51.], [155., 104., 53.]]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS))
def test_unity_3x3x3_withnan(self, boundary, nan_treatment):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
preserve_nan=True)
assert np.isnan(z[1, 1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_filled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8'), 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_interped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
kernsum = y.sum() - 1 # one nan is missing
mid = x[np.isfinite(x)].sum() / kernsum
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.tile(mid.astype('>f8'), [3, 3, 3]), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8')/kernsum, 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
'''
Regression test for #6264: make sure that asymmetric convolution
functions go the right direction
'''
x = np.array([3., 0., 1.], dtype='>f8')
y = np.array([1, 2, 3], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10)
elif boundary is None:
assert_array_almost_equal_nulp(z, np.array([0., 10., 0.], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([15., 10., 3.], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10)
@pytest.mark.parametrize('ndims', (1, 2, 3))
def test_convolution_consistency(ndims):
np.random.seed(0)
array = np.random.randn(*([3]*ndims))
np.random.seed(0)
kernel = np.random.rand(*([3]*ndims))
conv_f = convolve_fft(array, kernel, boundary='fill')
conv_d = convolve(array, kernel, boundary='fill')
assert_array_almost_equal_nulp(conv_f, conv_d, 30)
def test_astropy_convolution_against_numpy():
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_SCIPY')
def test_astropy_convolution_against_scipy():
from scipy.signal import fftconvolve
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_PANDAS')
def test_regression_6099():
wave = np.array(np.linspace(5000, 5100, 10))
boxcar = 3
nonseries_result = convolve(wave, np.ones((boxcar,))/boxcar)
wave_series = pandas.Series(wave)
series_result = convolve(wave_series, np.ones((boxcar,))/boxcar)
assert_array_almost_equal(nonseries_result, series_result)
def test_invalid_array_convolve():
kernel = np.ones(3)/3.
with pytest.raises(TypeError):
convolve('glork', kernel)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_square_kernel_asymmetric(boundary):
# Regression test for a bug that occurred when using non-square kernels in
# 2D when using boundary=None
kernel = np.array([[1, 2, 3, 2, 1], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]])
image = np.zeros((13, 13))
image[6, 6] = 1
result = convolve(image, kernel, normalize_kernel=False, boundary=boundary)
assert_allclose(result[5:8, 4:9], kernel)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_uninterpolated_nan_regions(boundary, normalize_kernel):
#8086
# Test NaN interpolation of contiguous NaN regions with kernels of size
# identical and greater than that of the region of NaN values.
# Test case: kernel.shape == NaN_region.shape
kernel = Gaussian2DKernel(1, 5, 5)
nan_centroid = np.full(kernel.shape, np.nan)
image = np.pad(nan_centroid, pad_width=kernel.shape[0]*2, mode='constant',
constant_values=1)
with pytest.warns(AstropyUserWarning,
match=r"nan_treatment='interpolate', however, NaN values detected "
r"post convolution. A contiguous region of NaN values, larger "
r"than the kernel size, are present in the input array. "
r"Increase the kernel size to avoid this."):
result = convolve(image, kernel, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=normalize_kernel)
assert(np.any(np.isnan(result)))
# Test case: kernel.shape > NaN_region.shape
nan_centroid = np.full((kernel.shape[0]-1, kernel.shape[1]-1), np.nan) # 1 smaller than kerenel
image = np.pad(nan_centroid, pad_width=kernel.shape[0]*2, mode='constant',
constant_values=1)
result = convolve(image, kernel, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=normalize_kernel)
assert(~np.any(np.isnan(result))) # Note: negation
def test_regressiontest_issue9168():
"""
Issue #9168 pointed out that kernels can be (unitless) quantities, which
leads to crashes when inplace modifications are made to arrays in
convolve/convolve_fft, so we now strip the quantity aspects off of kernels.
"""
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]],)
kernel_fwhm = 1*u.arcsec
pixel_size = 1*u.arcsec
kernel = Gaussian2DKernel(x_stddev=kernel_fwhm/pixel_size)
result = convolve_fft(x, kernel, boundary='fill', fill_value=np.nan,
preserve_nan=True)
result = convolve(x, kernel, boundary='fill', fill_value=np.nan,
preserve_nan=True)
| StuartLittlefair/astropy | astropy/convolution/tests/test_convolve.py | Python | bsd-3-clause | 42,432 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import nltk.sem.logic as nll
from .condition import Condition
tlp = nll.LogicParser(type_check=True)
class SemRepRule:
"""A rule assigning a semantic representation if conditions are met.
A SemRepRule object consists of conditions that can be applied to
the nodes in a nltk.parse.DependencyGraph object. For each node,
each condition can be met (True) or not met (False) for each node.
If the conditions are met, then the node should be assigned the
semantic representation using the semRepPat.
Attributes:
conditions: A list of Condition objects.
semRepPat: A string holding a template for a semantic
representation. It can be formatted using the .format
method with the correct node as an argument.
semSig: A dictionary mapping expressions from the semRepPat to
a string representation of their types.
"""
def __init__(self, conditions, semRepPat, semSig):
"""Initialize SemRepRule with the given values.
conditions can be a list of Condition objects that is then
used as the attribute self.semRepPat or it can be a list of
strings that can be used to construct Condition objects. It
cannot be a mix of both.
Args:
conditions: A list of Condition objects or strings that can
be used to initialize Condition objects.
semRepPat: A string holding a template for a semantic
representation.
semSig: A dictionary mapping expressions from the semRepPat
to a string representation of their types.
Returns:
The initialized SemRepRule.
Raises:
TypeError: conditions is not a list of str or Condition.
"""
self.semRepPat = semRepPat
self.semSig = semSig
self.conditions = []
if len(conditions) > 0:
if isinstance(conditions[0], Condition):
self.conditions = conditions
elif isinstance(conditions[0], str):
self.conditions = [
Condition.fromstring(c) for c in conditions
]
else:
raise TypeError(
'conditions is not a list of str or Condition objects')
@classmethod
def fromstring(cls, s):
"""Read a SemRepRule from a string and return it."""
# TODO: Implement this.
pass
def testConditions(self, depGraph, address):
"""Test if a node satisfies all conditions of this SemRepRule.
Args:
depGraph: An nltk.parse.DependencyGraph object.
address: An integer denoting the address of a node in the
depGraph
Returns:
True, if all conditions are satisfied.
False, if one or more conditions are not satisfied.
"""
for cond in self.conditions:
if not cond(depGraph, address):
# One condition is not satisfied.
return False
else:
# All conditions are satisfied.
return True
def assignSemRep(self, node, ascii=False):
"""Assign the semantic representation of this rule to a node.
Format this rule's semRepPat with the given node, do the same
with the expressions in this rule's semSig. Then parse the
formatted semRepPat into a logic expression using the formatted
semSig as a signature. Add the resulting logic representation to
the node's dictionary under the new key 'semrep'.
Args:
node: A node of an nltk.parse.DependencyGraph object.
ascii: A boolean indicating whether the assigned
representation should be ascii-compatible.
Default: False
"""
expr = self.semRepPat.format(node)
exprSig = {k.format(node): v for k, v in self.semSig.items()}
if ascii:
expr = _toASCII(expr)
exprSig = {_toASCII(k): _toASCII(v) for k, v in exprSig.items()}
node['semrep'] = tlp.parse(expr, signature=exprSig)
def __str__(self):
s = '<SemRepRule: conditions={0.conditions}, semRepPat={0.semRepPat}>'
return s.format(self)
class SemRepAssigner:
"""An object assigning semantic representations to sentences.
A SemRepAssigner can assign semantic representations to nodes of an
nltk.parse.DependencyGraph object. Foundation for the decision on a
representation is the list of rules.
Attributes:
rules: A list of SemRepRule objects.
"""
def __init__(self, rules, ascii=False):
"""Initialize SemRepAssigner with the given rules.
The rules should be a sorted iterable, e. g. a list.
Args:
rules: A list of SemRepRule objects.
ascii: A boolean indicating whether the assigned
representations should be ascii-compatible.
Default: False
Returns:
The initialized SemRepAssigner.
"""
self.rules = rules
self.ascii = ascii
@classmethod
def fromfile(cls, filename, ascii=False):
"""Read a SemRepAssigner from a json file and return it.
The json file has to hold an array of objects (the rules).
The rules have to contain three keys: 'condition', 'semRepPat'
and 'semSig'. The value of the 'conditions' key has to be an
array of strings, each denoting one condition. The value of
'semRepPat' has to be a string holding a semRepPat meeting the
requirements imposed by SemRepRule. The value of 'semSig' has to
be an object (i. e. a python dict) meeting the requirements
imposed by SemRepRule.
Args:
filename: The name of json file.
ascii: A boolean indicating whether the assigned
representations should be ascii-compatible.
Default: False
Returns:
A SemRepAssigner object with the rules from the json file.
Raises:
ValueError if filename does not denote a valid json file.
"""
with open(filename) as f:
try:
json_rules = json.load(f)
except ValueError:
raise
rules = [SemRepRule(r['conditions'], r['semRepPat'], r['semSig'])
for r in json_rules]
return cls(rules, ascii)
def assignToDependencyGraph(self, depGraph):
"""Assign semantic representations to DependencyGraph nodes.
The dictionaries corresponding to the nodes in the depGraph are
extended with a 'semrep' key under which the representation is
stored.
Args:
depGraph: An nltk.parse.DependencyGraph object.
"""
for address in depGraph.nodes:
self.assignToNode(depGraph, address)
def assignToNode(self, depGraph, address):
"""Assign a semantic representation to a DependencyGraph node.
Args:
depGraph: An nltk.parse.DependencyGraph object.
address: An integer denoting a node in the depGraph.
"""
for r in self.rules:
if r.testConditions(depGraph, address):
r.assignSemRep(depGraph.get_by_address(address), self.ascii)
break
else:
# No rule's conditions are satisfied.
# Assign default semrep here.
pass
def _toASCII(s):
"""Replace non-ascii characters to make a string ascii-compatible."""
# XXX: This is an ugly hack. There has to be a proper way to do this.
transDict = {chr(n):chr(n) for n in range(0,128)}
germanLower = {'ß':'ss', 'ä': 'ae', 'ö': 'oe', 'ü': 'ue'}
germanUpper = {'ẞ':'SS', 'Ä': 'AE', 'Ö': 'OE', 'Ü': 'UE'}
transDict.update(germanLower)
transDict.update(germanUpper)
# Replace all characters that are not in the transDict with '?'.
sList = list(s)
for i in range(0, len(sList)):
if sList[i] not in transDict:
sList[i] = '?'
s = ''.join(sList)
t = str.maketrans(transDict)
return s.translate(t)
def demo():
import nltk.parse as nlp
ass = SemRepAssigner.fromfile('rules/heuristic_rules.json')
sTaube = open('test/conll/beissende_taube.conll').read()
dgTaube = nlp.DependencyGraph(sTaube)
sHase = open('test/conll/schenkender_hase.conll').read()
dgHase = nlp.DependencyGraph(sHase)
ass.assignToDependencyGraph(dgTaube)
print(dgTaube)
#print(dgTaube.get_by_address(3)['semrep'].type)
#print(dgTaube.get_by_address(2)['semrep'].type)
ass.assignToDependencyGraph(dgHase)
print(dgHase)
if __name__ == '__main__':
demo()
| Simon-Will/montesniere | montesniere/assign.py | Python | gpl-3.0 | 8,858 |
import zlib
import httplib
import urllib
import urllib2
import gzip
import StringIO
import json
from urlparse import urlparse
# POST
def post(host, url, params):
parameters = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain",
"Accept-Encoding": "gzip"}
connection = httplib.HTTPConnection("%s:80" % host)
connection.request("POST", url, parameters, headers)
response = connection.getresponse()
# Compressed (gzip) response...
if response.getheader("content-encoding") == "gzip":
html_gzipped_data = response.read()
string_io = StringIO.StringIO(html_gzipped_data)
gzipper = gzip.GzipFile(fileobj=string_io)
html_data = gzipper.read()
# Plain text response...
else:
html_data = response.read()
# Cleanup
connection.close()
# Return value
return html_data
def post_json(url, payload):
json_payload = json.dumps(payload)
headers = {"Content-type": "application/json; charset=UTF-8",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip", "X-Requested-With": "XMLHttpRequest",
"Content-Length": len(json_payload)}
# print headers
print "post_json: %s" % url
print "payload: %s" % json_payload
req = urllib2.Request(url, json_payload, headers)
response = urllib2.urlopen(req)
resp_info = response.info()
# Compressed (gzip) response...
if "Content-Encoding" in resp_info and resp_info["Content-Encoding"] == "gzip":
html_gzipped_data = response.read()
string_io = StringIO.StringIO(html_gzipped_data)
gzipper = gzip.GzipFile(fileobj=string_io)
response_data = gzipper.read()
else:
response_data = response.read()
response.close()
print response_data
return response_data
# GET
def get(url):
h = urllib2.HTTPHandler(debuglevel=0)
print "get: %s" % url
request = urllib2.Request(url)
request.add_header("Accept-Encoding", "gzip")
opener = urllib2.build_opener(h)
f = opener.open(request)
# Compressed (gzip) response...
if f.headers.get("content-encoding") == "gzip":
html_gzipped_data = f.read()
string_io = StringIO.StringIO(html_gzipped_data)
gzipper = gzip.GzipFile(fileobj=string_io)
html_data = gzipper.read()
# Plain text response...
else:
html_data = f.read()
# Cleanup
f.close()
# Return value
return html_data
| camalot/plugin.video.microsoftvirtualacademy | resources/lib/http_request.py | Python | apache-2.0 | 2,566 |
from mod_base import *
class AutoReJoin(Listener):
"""Automatically rejoin a channel after kick."""
def init(self):
self.events = [IRC_EVT_CHAN_KICK]
def event(self, event):
print event
print self.bot.me
print event.user
if event.user.GetNick() == self.bot.me.GetNick():
print "REJOIN"
self.bot.Join(event.win.GetName())
module = {
"class": AutoReJoin,
"type": MOD_LISTENER,
} | richrd/bx | modules/autorejoin.py | Python | apache-2.0 | 463 |
import argparse
import glob
import collections
import itertools
import random
import matplotlib
matplotlib.use("AGG")
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
import pipeline
_WINDOW_NAME = "Pipeline"
def add_and_return(s, x):
s.add(x)
return x
def advance(pos, npos):
pos = pos + 1
if pos == npos:
return 0
return pos
def reverse(pos, npos):
pos = pos - 1
if pos < 0:
return npos - 1
return pos
def bgr(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
def rgb(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
_CHOICES = set()
_CROP_STAGE = add_and_return(_CHOICES, "crop")
_H_CHANNEL_STAGE = add_and_return(_CHOICES, "h_channel")
_S_CHANNEL_STAGE = add_and_return(_CHOICES, "s_channel")
_WARP_STAGE = add_and_return(_CHOICES, "warp")
_DEBUG = add_and_return(_CHOICES, "debug")
def simple_fig(args, img1, img2):
fig, axarr = plt.subplots(nrows=1, ncols=2, figsize=(11, 8.5))
fig.tight_layout()
axarr[0].imshow(img1)
axarr[1].imshow(img2)
return fig
# returns fig
def generate_debug(args, processor, bgr_image, result):
fig, axarr = plt.subplots(nrows=4, ncols=4, figsize=(11, 8.5))
fig.tight_layout()
axarr[0, 0].imshow(rgb(bgr_image))
axarr[0, 1].set_title("yellow mask")
yellow_mask = rgb(cv2.bitwise_and(result.cropped, result.cropped, result.yellow_mask))
cv2.polylines(yellow_mask, [pipeline.get_ground_polygon()], True, [255, 0, 0], thickness=2)
axarr[0, 1].imshow(yellow_mask)
axarr[0, 2].set_title("binary image")
axarr[0, 2].imshow(result.binary)
axarr[0, 3].set_title("warped result")
axarr[0, 3].imshow(generate_warped(result))
axarr[2, 3].set_title("inverse warped result")
axarr[2, 3].imshow(generate_inv_warped(processor, result))
axarr[1, 0].set_title("h channel")
axarr[1, 0].imshow(result.hls[:, :, 0])
axarr[1, 1].set_title("h blurred")
axarr[1, 1].imshow(result.blurred_h)
axarr[1, 2].set_title("sobel h mag scaled")
axarr[1, 2].imshow(result.sobel_h_mag_scaled)
axarr[1, 3].set_title("sobel h thresholded")
axarr[1, 3].imshow(result.sobel_h_thresholded)
axarr[2, 0].set_title("l channel")
axarr[2, 0].imshow(result.hls[:, :, 1])
axarr[2, 1].set_title("l threshold mask")
axarr[2, 1].imshow(cv2.bitwise_and(result.hls[:, :, 1], result.hls[:, :, 1], result.l_threshold_mask))
axarr[2, 2].set_title("warped input")
axarr[2, 2].imshow(rgb(result.warped_input))
axarr[3, 0].set_title("s channel")
axarr[3, 0].imshow(result.hls[:, :, 2])
axarr[3, 1].set_title("s blurred")
axarr[3, 1].imshow(result.blurred_s)
axarr[3, 2].set_title("sobel s mag scaled")
axarr[3, 2].imshow(result.sobel_s_mag_scaled)
axarr[3, 3].set_title("sobel s thresholded")
axarr[3, 3].imshow(result.sobel_s_thresholded)
return fig
def generate_inv_warped(processor, result):
return processor.inv_warp(generate_warped(result))
def generate_warped(result):
if result.line_fit is not None:
assert len(result.line_fit) == 2
out_img = np.dstack((result.warped, np.zeros_like(result.warped), result.search_img))
miny = 0
maxy = 40
minx = np.int32(result.line_fit[0] * miny + result.line_fit[1])
maxx = np.int32(result.line_fit[0] * maxy + result.line_fit[1])
cv2.line(out_img, (minx, miny), (maxx, maxy), (255, 0, 0), thickness=2)
out_img[result.liney, result.linex] = [0, 255, 0]
return out_img
else:
return result.warped
def convert_file(args, bgr_image):
processor = pipeline.Processor(None, None)
result = processor.process(bgr_image, debug=True)
if args.stage == _CROP_STAGE:
fig = simple_fig(args, bgr_image, rgb(result.cropped))
elif args.stage == _S_CHANNEL_STAGE:
fig = simple_fig(args, bgr_image, result.hls[:, :, 2])
elif args.stage == _H_CHANNEL_STAGE:
fig = simple_fig(args, bgr_image, result.hls[:, :, 0])
elif args.stage == _WARP_STAGE:
fig = simple_fig(args, bgr_image, generate_warped(result))
elif args.stage == _DEBUG:
fig = generate_debug(args, processor, bgr_image, result)
else:
raise Exception("unrecognized stage")
return fig
def change_ext(args, filename, ext):
base = os.path.basename(filename)
root, _ = os.path.splitext(base)
return "%s.%s.%s" % (root, args.stage, ext)
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(itertools.islice(iterable, n))
def render_result(args, inpath, fig):
fig.canvas.draw()
buf = fig.canvas.tostring_rgb()
ncols, nrows = fig.canvas.get_width_height()
plt.close(fig)
rgb_image = np.fromstring(buf, dtype=np.uint8).reshape(nrows, ncols, 3)
bgr_image = bgr(rgb_image)
if args.show:
cv2.imshow(_WINDOW_NAME, bgr_image)
return cv2.waitKey()
else:
if not os.path.isdir(args.outdir):
os.makedirs(args.outdir)
outpath = os.path.join(args.outdir, change_ext(args, inpath, "jpg"))
cv2.imwrite(outpath, bgr_image)
def run(args):
if args.indir is not None:
filepaths = glob.glob(os.path.join(args.indir, "*.%s" % args.format))
if args.ntake is not None:
filepaths = take(args.ntake, filepaths)
if args.shuffle:
random.shuffle(filepaths)
values = dict(enumerate(filepaths))
idx = 0
while idx < len(values):
inpath = values[idx]
if args.format == "bgr":
bgr_image = pipeline.load_bgr_image(inpath)
else:
bgr_image = cv2.imread(inpath)
fig = convert_file(args, bgr_image)
result = render_result(args, inpath, fig)
print idx
if result is None:
idx += 1
elif chr(result & 255) == 'q':
break
elif chr(result & 255) == 'r':
reload(pipeline)
elif result == 1113939:
# right arrow
idx = advance(idx, len(values))
elif result == 1113937:
# left arrow
idx = reverse(idx, len(values))
else:
idx += 1
elif args.infile is not None:
bgr_image = cv2.imread(args.infile)
fig = convert_file(args, bgr_image)
render_result(args, args.infile, fig)
else:
raise Exception("expected either infile or indir to be defined")
# /mnt/disk2/run10-png/camera.2017_06_17_12_43_29_473634.png
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--stage", choices=_CHOICES, required=True)
parser.add_argument("--ntake", type=int)
parser.add_argument("--shuffle", action="store_true")
parser.add_argument("--format", choices=["png", "jpg", "bgr"], default="bgr")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--outdir")
group.add_argument("--show", action="store_true")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--indir")
group.add_argument("--infile")
args = parser.parse_args()
return args
def main():
args = parse_args()
run(args)
if __name__ == "__main__":
main()
| danblick/robocar | lines/inspect_stage.py | Python | mit | 6,898 |
from mock import Mock
import numpy
import pytest
import theano
class TestLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import Layer
return Layer(Mock())
def test_get_output_shape(self, layer):
assert layer.get_output_shape() == layer.input_layer.get_output_shape()
def test_get_output_without_arguments(self, layer):
layer.get_output_for = Mock()
output = layer.get_output()
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with(
layer.input_layer.get_output.return_value)
layer.input_layer.get_output.assert_called_with(None)
def test_get_output_passes_on_arguments_to_input_layer(self, layer):
input, arg, kwarg = object(), object(), object()
layer.get_output_for = Mock()
output = layer.get_output(input, arg, kwarg=kwarg)
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with(
layer.input_layer.get_output.return_value, arg, kwarg=kwarg)
layer.input_layer.get_output.assert_called_with(
input, arg, kwarg=kwarg)
def test_get_output_input_is_a_mapping(self, layer):
input = {layer: object()}
assert layer.get_output(input) is input[layer]
def test_get_output_input_is_a_mapping_no_key(self, layer):
layer.get_output_for = Mock()
output = layer.get_output({})
assert output is layer.get_output_for.return_value
def test_create_param_numpy_bad_shape_raises_error(self, layer):
param = numpy.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(RuntimeError):
layer.create_param(param, (3, 2))
def test_create_param_numpy_returns_shared(self, layer):
param = numpy.array([[1, 2, 3], [4, 5, 6]])
result = layer.create_param(param, (2, 3))
assert (result.get_value() == param).all()
assert isinstance(result, type(theano.shared(param)))
assert (result.get_value() == param).all()
def test_create_param_shared_returns_same(self, layer):
param = theano.shared(numpy.array([[1, 2, 3], [4, 5, 6]]))
result = layer.create_param(param, (2, 3))
assert result is param
def test_create_param_callable_returns_return_value(self, layer):
array = numpy.array([[1, 2, 3], [4, 5, 6]])
factory = Mock()
factory.return_value = array
result = layer.create_param(factory, (2, 3))
assert (result.get_value() == array).all()
factory.assert_called_with((2, 3))
class TestMultipleInputsLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import MultipleInputsLayer
return MultipleInputsLayer([Mock(), Mock()])
def test_get_output_shape(self, layer):
layer.get_output_shape_for = Mock()
result = layer.get_output_shape()
assert result is layer.get_output_shape_for.return_value
layer.get_output_shape_for.assert_called_with([
layer.input_layers[0].get_output_shape.return_value,
layer.input_layers[1].get_output_shape.return_value,
])
def test_get_output_without_arguments(self, layer):
layer.get_output_for = Mock()
output = layer.get_output()
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with([
layer.input_layers[0].get_output.return_value,
layer.input_layers[1].get_output.return_value,
])
layer.input_layers[0].get_output.assert_called_with(None)
layer.input_layers[1].get_output.assert_called_with(None)
def test_get_output_passes_on_arguments_to_input_layer(self, layer):
input, arg, kwarg = object(), object(), object()
layer.get_output_for = Mock()
output = layer.get_output(input, arg, kwarg=kwarg)
assert output is layer.get_output_for.return_value
layer.get_output_for.assert_called_with([
layer.input_layers[0].get_output.return_value,
layer.input_layers[1].get_output.return_value,
], arg, kwarg=kwarg)
layer.input_layers[0].get_output.assert_called_with(
input, arg, kwarg=kwarg)
layer.input_layers[1].get_output.assert_called_with(
input, arg, kwarg=kwarg)
def test_get_output_input_is_a_mapping(self, layer):
input = {layer: object()}
assert layer.get_output(input) is input[layer]
def test_get_output_input_is_a_mapping_no_key(self, layer):
layer.get_output_for = Mock()
output = layer.get_output({})
assert output is layer.get_output_for.return_value
class TestInputLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import InputLayer
return InputLayer((3, 2))
def test_input_var(self, layer):
assert layer.input_var.ndim == 2
def test_get_output_shape(self, layer):
assert layer.get_output_shape() == (3, 2)
def test_get_output_without_arguments(self, layer):
assert layer.get_output() is layer.input_var
def test_get_output_input_is_variable(self, layer):
variable = theano.Variable("myvariable")
assert layer.get_output(variable) is variable
def test_get_output_input_is_a_mapping(self, layer):
input = {layer: object()}
assert layer.get_output(input) is input[layer]
class TestDenseLayer:
@pytest.fixture
def layer_vars(self):
from nntools.layers.base import DenseLayer
input_layer = Mock()
W = Mock()
b = Mock()
nonlinearity = Mock()
input_layer.get_output_shape.return_value = (2, 3, 4)
W.return_value = numpy.ones((12, 3))
b.return_value = numpy.ones((3,)) * 3
layer = DenseLayer(
input_layer=input_layer,
num_units=3,
W=W,
b=b,
nonlinearity=nonlinearity,
)
return {
'W': W,
'b': b,
'nonlinearity': nonlinearity,
'layer': layer,
}
@pytest.fixture
def layer(self, layer_vars):
return layer_vars['layer']
def test_init(self, layer_vars):
layer = layer_vars['layer']
assert (layer.W.get_value() == layer_vars['W'].return_value).all()
assert (layer.b.get_value() == layer_vars['b'].return_value).all()
layer_vars['W'].assert_called_with((12, 3))
layer_vars['b'].assert_called_with((3,))
def test_get_params(self, layer):
assert layer.get_params() == [layer.W, layer.b]
def test_get_bias_params(self, layer):
assert layer.get_bias_params() == [layer.b]
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for((5, 6, 7)) == (5, 3)
def test_get_output_for(self, layer_vars):
layer = layer_vars['layer']
nonlinearity = layer_vars['nonlinearity']
W = layer_vars['W']()
b = layer_vars['b']()
input = theano.shared(numpy.ones((2, 12)))
result = layer.get_output_for(input)
assert result is nonlinearity.return_value
# Check that the input to the nonlinearity was what we expect
# from dense layer, i.e. the dot product plus bias
nonlinearity_arg = nonlinearity.call_args[0][0]
assert (nonlinearity_arg.eval() ==
numpy.dot(input.get_value(), W) + b).all()
def test_get_output_for_flattens_input(self, layer_vars):
layer = layer_vars['layer']
nonlinearity = layer_vars['nonlinearity']
W = layer_vars['W']()
b = layer_vars['b']()
input = theano.shared(numpy.ones((2, 3, 4)))
result = layer.get_output_for(input)
assert result is nonlinearity.return_value
# Check that the input to the nonlinearity was what we expect
# from dense layer, i.e. the dot product plus bias
nonlinearity_arg = nonlinearity.call_args[0][0]
assert (nonlinearity_arg.eval() ==
numpy.dot(input.get_value().reshape(2, -1), W) + b).all()
class TestDropoutLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import DropoutLayer
return DropoutLayer(Mock())
@pytest.fixture
def layer_no_rescale(self):
from nntools.layers.base import DropoutLayer
return DropoutLayer(Mock(), rescale=False)
@pytest.fixture
def layer_p_02(self):
from nntools.layers.base import DropoutLayer
return DropoutLayer(Mock(), p=0.2)
def test_get_output_for_non_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input)
result_eval = result.eval()
assert 0.99 < result_eval.mean() < 1.01
assert (numpy.unique(result_eval) == [0., 2.]).all()
def test_get_output_for_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input, deterministic=True)
result_eval = result.eval()
assert (result_eval == input.get_value()).all()
def test_get_output_for_no_rescale(self, layer_no_rescale):
input = theano.shared(numpy.ones((100, 100)))
result = layer_no_rescale.get_output_for(input)
result_eval = result.eval()
assert 0.49 < result_eval.mean() < 0.51
assert (numpy.unique(result_eval) == [0., 1.]).all()
def test_get_output_for_p_02(self, layer_p_02):
input = theano.shared(numpy.ones((100, 100)))
result = layer_p_02.get_output_for(input)
result_eval = result.eval()
assert 0.99 < result_eval.mean() < 1.01
assert (numpy.round(numpy.unique(result_eval), 2) == [0., 1.25]).all()
class TestGaussianNoiseLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import GaussianNoiseLayer
return GaussianNoiseLayer(Mock())
def test_get_output_for_non_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input, deterministic=False)
result_eval = result.eval()
assert (result_eval != input.eval()).all()
assert result_eval.mean() != 1.0
assert numpy.round(result_eval.mean()) == 1.0
def test_get_output_for_deterministic(self, layer):
input = theano.shared(numpy.ones((3, 3)))
result = layer.get_output_for(input, deterministic=True)
result_eval = result.eval()
assert (result_eval == input.eval()).all()
class TestConcatLayer:
@pytest.fixture
def layer(self):
from nntools.layers.base import ConcatLayer
return ConcatLayer([Mock(), Mock()], axis=1)
def test_get_output_for(self, layer):
inputs = [theano.shared(numpy.ones((3, 3))),
theano.shared(numpy.ones((3, 2)))]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = numpy.hstack([input.get_value() for input in inputs])
assert (result_eval == desired_result).all()
| 317070/nntools | nntools/tests/test_layers.py | Python | mit | 11,170 |
def VtkDefineIdFilter(nmbIdFilter):
# vtkIdFilter genera escalares a partir del identificador
nmbIdFilter= vtk.vtkIdFilter()
nmbIdFilter.SetInput(ugrid)
nmbIdFilter.CellIdsOff()
nmbIdFilter.PointIdsOff()
| lcpt/xc | python_modules/postprocess/xcVtk/vtk_define_id_filter.py | Python | gpl-3.0 | 215 |
"""empty message
Revision ID: 29d499553de3
Revises: 4b5f6069c5df
Create Date: 2015-03-11 10:34:38.801595
"""
# revision identifiers, used by Alembic.
revision = '29d499553de3'
down_revision = '4b5f6069c5df'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| jglamine/phamdb | webphamerator/migrations/versions/29d499553de3_.py | Python | gpl-3.0 | 506 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.apps import AppConfig as BaseAppConfig
def run_setup_hooks(*args, **kwargs):
from django.conf import settings
from .celery_app import app as celery_app
if 'celery_app' not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS += (celery_app, )
class AppConfig(BaseAppConfig):
name = "geonode"
label = "geonode"
def ready(self):
super(AppConfig, self).ready()
run_setup_hooks()
| timlinux/geonode | geonode/apps.py | Python | gpl-3.0 | 1,315 |
speed = [0.1, 17.5, 40, 48, 52, 69, 88]
lifespan = [2, 8, 70, 1.5, 25, 12, 28]
index = ['snail', 'pig', 'elephant',
'rabbit', 'giraffe', 'coyote', 'horse']
df = pd.DataFrame({'speed': speed,
'lifespan': lifespan}, index=index)
ax = df.plot.bar(rot=0)
| datapythonista/datapythonista.github.io | docs/new-pandas-doc/generated/pandas-DataFrame-plot-bar-2.py | Python | apache-2.0 | 279 |
# Задача 2. Вариант 8.
#Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Лао-Цзы. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Ionova A. K.
#30.04.2016
print("Нельзя обожествлять бесов.\n\t\t\t\t\t\t\t\tЛао-цзы")
input("Нажмите ENTER для выхода.") | Mariaanisimova/pythonintask | INBa/2015/Ionova_A_K/task_2_8.py | Python | apache-2.0 | 579 |
#!/usr/bin/env python
from flup.server.fcgi import WSGIServer
from frontend import app
WSGIServer(app, bindAddress=app.config['FCGI_SOCKET']).run()
| jkossen/imposter | examples/frontend_fcgi.py | Python | bsd-2-clause | 149 |
"""adhoc filters
Revision ID: bddc498dd179
Revises: afb7730f6a9c
Create Date: 2018-06-13 14:54:47.086507
"""
# revision identifiers, used by Alembic.
revision = 'bddc498dd179'
down_revision = '80a67c5192fa'
from collections import defaultdict
import json
import uuid
from alembic import op
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Text
from superset import db
from superset import utils
Base = declarative_base()
class Slice(Base):
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
params = Column(Text)
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
mapping = {'having': 'having_filters', 'where': 'filters'}
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
if not 'adhoc_filters' in params:
params['adhoc_filters'] = []
for clause, filters in mapping.items():
if clause in params and params[clause] != '':
params['adhoc_filters'].append({
'clause': clause.upper(),
'expressionType': 'SQL',
'filterOptionName': str(uuid.uuid4()),
'sqlExpression': params[clause],
})
if filters in params:
for filt in params[filters]:
params['adhoc_filters'].append({
'clause': clause.upper(),
'comparator': filt['val'],
'expressionType': 'SIMPLE',
'filterOptionName': str(uuid.uuid4()),
'operator': filt['op'],
'subject': filt['col'],
})
for key in ('filters', 'having', 'having_filters', 'where'):
if key in params:
del params[key]
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
def downgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
utils.split_adhoc_filters_into_base_filters(params)
if 'adhoc_filters' in params:
del params['adhoc_filters']
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
| dmigo/incubator-superset | superset/migrations/versions/bddc498dd179_adhoc_filters.py | Python | apache-2.0 | 2,689 |
fileXmlJs = open("demo.xml.js", 'w', encoding="utf-8")
fileXmlJs.write("var xml = '\\" + "\n")
with open("demo.xml", 'r', encoding="utf-8") as f:
for line in f:
fileXmlJs.write(line.replace("\n", "") + "\\\n")
fileXmlJs.write("';")
fileXmlJs.close()
fileXslJs = open("demo.xsl.js", 'w', encoding="utf-8")
fileXslJs.write("var xsl = '\\" + "\n")
with open("demo.xsl", 'r', encoding="utf-8") as f:
for line in f:
fileXslJs.write(line.replace("\n", "") + "\\\n")
fileXslJs.write("';")
fileXslJs.close() | pavelstudeny/mxmlt | updateXmlXlsInJS.py | Python | agpl-3.0 | 506 |
#=======================================================================
# Author: Donovan Parks
#
# Copyright 2009 Donovan Parks
#
# This file is part of GenGIS.
#
# GenGIS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GenGIS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GenGIS. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
from LocationTableLayout import LocationTableLayout
import GenGIS
import wx
class LocationTable( LocationTableLayout ):
def __init__(self, parent=None):
LocationTableLayout.__init__ ( self, parent )
self.SetIcon(wx.Icon(GenGIS.mainWindow.GetExeDir() + "images/CrazyEye.ico", wx.BITMAP_TYPE_ICO))
# check required data has been loaded
if GenGIS.layerTree.GetNumLocationSetLayers() == 0:
wx.MessageBox("This plugin requires location data to be loaded.", "Additional data required.")
self.Close()
return
locs = GenGIS.layerTree.GetLocationSetLayer(0).GetAllActiveLocationLayers()
self.UpdateTable(locs)
def UpdateTable(self, locs):
wx.BeginBusyCursor()
# setup table
self.table.Freeze()
self.table.SetSelectionMode(wx.grid.Grid.wxGridSelectRows)
self.table.EnableScrolling(True, True)
self.table.SetRowLabelSize(0)
# set field (column) labels
locationSetLayer = GenGIS.layerTree.GetLocationSetLayer(0)
fields = sorted(locationSetLayer.GetController().GetMetadataFields())
if fields.count('siteid') != 0:
fields.remove('siteid')
siteId = 'siteid'
elif fields.count('SiteId') != 0:
fields.remove('SiteId')
siteId = 'SiteId'
elif fields.count('site id') != 0:
fields.remove('site id')
siteId = 'site id'
elif fields.count('Site Id') != 0:
fields.remove('Site Id')
siteId = 'Site Id'
elif fields.count('Site ID') != 0:
fields.remove('Site ID')
siteId = 'Site ID'
fields = [siteId] + fields
self.table.DeleteCols(0, self.table.GetNumberCols());
self.table.AppendCols(len(fields))
for i in xrange(0, len(fields)):
self.table.SetColLabelValue(i, fields[i])
# set cell values
self.table.DeleteRows(0, self.table.GetNumberRows());
self.table.AppendRows(len(locs))
for i in xrange(0, len(locs)):
data = locs[i].GetController().GetData()
for j in xrange(0, len(fields)):
self.table.SetCellValue(i, j, data[fields[j]])
self.table.AutoSizeColumns()
self.table.Thaw()
wx.EndBusyCursor()
def OnClose(self, event):
event.Skip()
def OnOK( self, event ):
self.Close()
def OnHelp( self, event ):
wx.LaunchDefaultBrowser( 'http://kiwi.cs.dal.ca/GenGIS/index.php/Description_of_GenGIS_plugins#Location_Table_Viewer' )
def OnShowAll( self, event ):
if self.chkShowAll.IsChecked():
locs = GenGIS.layerTree.GetLocationSetLayer(0).GetAllLocationLayers()
else:
locs = GenGIS.layerTree.GetLocationSetLayer(0).GetAllActiveLocationLayers()
self.UpdateTable(locs)
| beiko-lab/gengis | plugins/LocationTable/LocationTable.py | Python | gpl-3.0 | 3,479 |
# -*- coding: utf8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from tuskar_ui import api
from tuskar_ui.utils import utils
def matching_deployment_mode():
deployment_mode = getattr(settings, 'DEPLOYMENT_MODE', 'scale')
return deployment_mode.lower() == 'scale'
def _get_unmatched_suggestions(request):
unmatched_suggestions = []
flavor_suggestions = [FlavorSuggestion.from_flavor(flavor)
for flavor in api.flavor.Flavor.list(request)]
for node in api.node.Node.list(request):
node_suggestion = FlavorSuggestion.from_node(node)
for flavor_suggestion in flavor_suggestions:
if flavor_suggestion == node_suggestion:
break
else:
unmatched_suggestions.append(node_suggestion)
return unmatched_suggestions
def get_flavor_suggestions(request):
return set(_get_unmatched_suggestions(request))
class FlavorSuggestion(object):
"""Describe node parameters in a way that is easy to compare."""
def __init__(self, vcpus=None, ram=None, disk=None, cpu_arch=None,
ram_bytes=None, disk_bytes=None, node_id=None):
self.vcpus = vcpus
self.ram_bytes = ram_bytes or ram * 1024 * 1024 or 0
self.disk_bytes = disk_bytes or (disk or 0) * 1024 * 1024 * 1024
self.cpu_arch = cpu_arch
self.id = node_id
@classmethod
def from_node(cls, node):
return cls(
node_id=node.uuid,
vcpus=utils.safe_int_cast(node.cpus),
ram=utils.safe_int_cast(node.memory_mb),
disk=utils.safe_int_cast(node.local_gb),
cpu_arch=node.cpu_arch
)
@classmethod
def from_flavor(cls, flavor):
return cls(
vcpus=flavor.vcpus,
ram_bytes=flavor.ram_bytes,
disk_bytes=flavor.disk_bytes,
cpu_arch=flavor.cpu_arch
)
@property
def name(self):
return 'Flavor-%scpu-%s-%sMB-%sGB' % (
self.vcpus or '0',
self.cpu_arch or '',
self.ram or '0',
self.disk or '0',
)
@property
def ram(self):
return self.ram_bytes / 1024 / 1024
@property
def disk(self):
return self.disk_bytes / 1024 / 1024 / 1024
def __hash__(self):
return self.name.__hash__()
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not self == other
def __repr__(self):
return (
'%s(vcpus=%r, ram_bytes=%r, disk_bytes=%r, '
'cpu_arch=%r, node_id=%r)' % (
self.__class__.__name__,
self.vcpus,
self.ram_bytes,
self.disk_bytes,
self.cpu_arch,
self.id,
)
)
def create_flavor(self, request):
return api.flavor.Flavor.create(
request,
name=self.name,
memory=self.ram,
vcpus=self.vcpus,
disk=self.disk,
cpu_arch=self.cpu_arch,
)
| rdo-management/tuskar-ui | tuskar_ui/infrastructure/flavors/utils.py | Python | apache-2.0 | 3,662 |
#
# Copyright (c) 2015 Intel Corporation
#
# Author: Alberto Murillo <alberto.murillo.silva@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from modules.horizon import Horizon
horizon = Horizon.get()
horizon.install()
horizon.start_server()
| clearlinux/clearstack | clearstack/templates/horizon.py | Python | apache-2.0 | 756 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import path, getcwd
from collections import defaultdict
config = defaultdict(defaultdict)
config["importer"] = "osm2pgsql" # either 'imposm' or 'osm2pgsql'
# The name given to the style. This is the name it will have in the TileMill
# project list, and a sanitized version will be used as the directory name
# in which the project is stored
config["name"] = "OSM Bright"
# The absolute path to your MapBox projects directory. You should
# not need to change this unless you have configured TileMill specially
config["path"] = path.expanduser(".")
# PostGIS connection setup
# Leave empty for Mapnik defaults. The only required parameter is dbname.
config["postgis"]["host"] = "gis-db"
config["postgis"]["port"] = ""
config["postgis"]["dbname"] = "gis"
config["postgis"]["user"] = "maposmatic"
config["postgis"]["password"] = "secret"
# Increase performance if you are only rendering a particular area by
# specifying a bounding box to restrict queries. Format is "XMIN,YMIN,XMAX,YMAX"
# in the same units as the database (probably spherical mercator meters). The
# whole world is "-20037508.34 -20037508.34 20037508.34 20037508.34".
# Leave blank to let Mapnik estimate.
config["postgis"]["extent"] = "-20037508.34,-20037508.34,20037508.34,20037508.34"
# Land shapefiles required for the style. If you have already downloaded
# these or wish to use different versions, specify their paths here.
# You will need to unzip these files before running make.py
# These OSM land shapefiles are updated daily and can be downloaded at:
# - http://data.openstreetmapdata.com/simplified-land-polygons-complete-3857.zip
# - http://data.openstreetmapdata.com/land-polygons-split-3857.zip
config["land-high"] = "/home/maposmatic/shapefiles/land-polygons-split-3857/land_polygons.shp"
config["land-low"] = "/home/maposmatic/shapefiles/simplified-land-polygons-complete-3857/simplified_land_polygons.shp"
# Places shapefile required for the osm2pgsql style
# - http://mapbox-geodata.s3.amazonaws.com/natural-earth-1.4.0/cultural/10m-populated-places-simple.zip
# or
# - http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/10m/cultural/ne_10m_populated_places.zip
config["ne_places"] = "/home/maposmatic/shapefiles/ne_10m_populated_places_simple/ne_10m_populated_places_simple.shp"
| hholzgra/maposmatic-vagrant | files/config-files/osmbright-configure.py | Python | unlicense | 2,366 |
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
"""
import re
from time import strptime, mktime
from module.plugins.Account import Account
class FreakshareCom(Account):
__name__ = "FreakshareCom"
__version__ = "0.1"
__type__ = "account"
__description__ = """freakshare.com account plugin"""
__author_name__ = ("RaNaN")
__author_mail__ = ("RaNaN@pyload.org")
def loadAccountInfo(self, user, req):
page = req.load("http://freakshare.com/")
validuntil = r"ltig bis:</td>\s*<td><b>([0-9 \-:.]+)</b></td>"
validuntil = re.search(validuntil, page, re.MULTILINE)
validuntil = validuntil.group(1).strip()
validuntil = mktime(strptime(validuntil, "%d.%m.%Y - %H:%M"))
traffic = r"Traffic verbleibend:</td>\s*<td>([^<]+)"
traffic = re.search(traffic, page, re.MULTILINE)
traffic = traffic.group(1).strip()
traffic = self.parseTraffic(traffic)
return {"validuntil": validuntil, "trafficleft": traffic}
def login(self, user, data, req):
page = req.load("http://freakshare.com/login.html", None, { "submit" : "Login", "user" : user, "pass" : data['password']}, cookies=True)
if "Falsche Logindaten!" in page or "Wrong Username or Password!" in page:
self.wrongPassword()
| fener06/pyload | module/plugins/accounts/FreakshareCom.py | Python | gpl-3.0 | 1,957 |
import os
# Django settings for oscar project.
PROJECT_DIR = os.path.dirname(__file__)
location = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), x)
DEBUG = True
TEMPLATE_DEBUG = True
SQL_DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': location('db.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
import django
django16 = django.VERSION[1] >= 6
if django16:
DATABASES['default']['ATOMIC_REQUESTS'] = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("public/media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
#ADMIN_MEDIA_PREFIX = '/media/admin/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (location('static/'),)
STATIC_ROOT = location('public/static')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
]
if not django16:
MIDDLEWARE_CLASSES.append('django.middleware.transaction.TransactionMiddleware')
INTERNAL_IPS = ('127.0.0.1',)
ROOT_URLCONF = 'urls'
from oscar import OSCAR_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = (
location('templates'),
OSCAR_MAIN_TEMPLATE_DIR,
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'verbose'
},
'oscar_stripe_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/tmp/oscar_stripe.log',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'django': {
'handlers':['null'],
'propagate': True,
'level':'INFO',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'oscar.checkout': {
'handlers': ['console'],
'propagate': True,
'level':'INFO',
},
'django.db.backends': {
'handlers':['null'],
'propagate': False,
'level':'DEBUG',
},
'oscar_stripe': {
'handlers': ['console', 'oscar_stripe_file'],
'propagate': True,
'level':'DEBUG',
},
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
# External apps
'django_extensions',
'debug_toolbar',
'haystack',
'sorl.thumbnail',
'oscar_stripe',
'compressor',
'south',
]
from oscar import get_core_apps
INSTALLED_APPS += get_core_apps()
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.Emailbackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/accounts/'
APPEND_SLASH = True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# Oscar settings
from oscar.defaults import *
OSCAR_ALLOW_ANON_CHECKOUT = True
# Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
OSCAR_SHOP_TAGLINE = 'Stripe sandbox'
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
# =================
# Stripe settings
# =================
STRIPE_SECRET_KEY = 'sk_test_iyXSkU6XayNItIIhF8rdrqm0'
STRIPE_PUBLISHABLE_KEY = 'pk_test_LAIdr6RxwTeebDA7OBDTgHao'
STRIPE_CURRENCY = 'USD'
| britco/django-oscar-stripe | sandbox/settings.py | Python | bsd-3-clause | 7,734 |
from underthesea import word_sent
from underthesea.pos_tag.model import CRFModel
def pos_tag(sentence, format=None):
"""
part of speech tagging
:param unicode|str sentence: raw sentence
:return: tagged sentence
:rtype: list
"""
sentence = word_sent(sentence)
crf_model = CRFModel.Instance()
result = crf_model.predict(sentence, format)
return result
| rain1024/underthesea | underthesea/pos_tag/pos_tag.py | Python | gpl-3.0 | 400 |
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.utils.html import escape
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth import authenticate, login, logout
import json
# Create your views here.
@ensure_csrf_cookie
def index(request):
return render(request, 'fin/index.html', {})
def table(request,ticker):
template_name='fin/table_'+ticker+'.html'
# Fill the type of user programmatically - TBD
return render(request, template_name, {'user_profile':'anonymous'})
#return render(request, template_name, {'user_profile':'nameduser'})
#return render(request, template_name, {'user_profile':'premiumuser'})
def jspractice(request):
return render(request, 'fin/js.html', {})
def dfcf_input_modify(request):
txt=""
for key in request.POST:
value = request.POST[key]
txt += str(key) + "::" + str(value) + "<br>"
txt += "<br><br>"
dat = request.POST['dfcf_ip_params']
jdat = json.loads(dat)
for key in jdat:
value = jdat[key]
txt += str(key) + "::" + str(value) + "<br>"
txt += "<br><br>"
for key in jdat:
rev_growth = float(jdat[key]['rev_growth'])
ear_growth = float(jdat[key]['earnings_growth'])
txt += str(key) + "::" + "revenue grows at" + str(100*rev_growth) + "% <br>"
txt += str(key) + "::" + "Earnings grow at" + str(100*ear_growth) + "% <br>"
txt += "<br><br>Changeset details<br><br>"
changeset = request.POST['dfcf_ip_changeset']
jchangeset = json.loads(changeset)
for key in jchangeset:
value = jchangeset[key]
txt += str(key) + "::" + str(value) + "<br>"
txt += "<br><br>"
txt += escape(repr(request))
return HttpResponse(txt)
# return HttpResponse(request.POST['fname'])
# caller should ensure it is a POST etc.
def fin_auth (request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return True
return False
@ensure_csrf_cookie
def dfcf_input(request, action="none"):
template_name='fin/dfcf_input_parameters.html'
u = request.user
if action == "logout":
logout(request)
return render(request, template_name, {'user_profile':'anonymous'})
if u.is_authenticated():
template_name = 'fin/'+u.username+'/dfcf_input_parameters.html'
return render(request, template_name, {'user_profile':'anonymous'})
if (request.method != 'POST'):
return render(request, template_name, {'user_profile':'anonymous'})
if (fin_auth(request)):
template_name='fin/'+request.POST.get('username')+'/dfcf_input_parameters.html'
return render(request, template_name, {'user_profile':'anonymous'})
#return render(request, template_name, {'user_profile':'nameduser'})
#return render(request, template_name, {'user_profile':'premiumuser'})
| saigkrish/finance | PY/views.py | Python | apache-2.0 | 3,119 |
#!/usr/bin/python3
class Person:
def sayHi(self):
print('Hello, how are you?')
p = Person()
p.sayHi()
| louistin/thinkstation | a_byte_of_python/unit_12_object_oriented_programming/method.py | Python | mit | 116 |
# test_num = 1406357289
import time
def is_pandigital(num):
set_of_num = set(i for i in str(num))
set_of_all_digits = set(str(i) for i in range(0, 10))
return len(str(num)) == len(set_of_num) and set_of_all_digits.difference(set_of_num) == set()
start_time = time.time()
multiples_of_17 =[str(i*17).zfill(3) for i in range(1,59) if i*17<999]
multiples_of_13 =[str(i*13).zfill(3) for i in range(1,77) if i*13<999]
multiples_of_11 =[str(i*11).zfill(3) for i in range(10,91) if i*11<999 and (str(i*11).zfill(3)[0] =='0' or str(i*11).zfill(3)[0] =='5')]
multiples_of_7 =[str(i*7).zfill(3) for i in range(1,143) if i*7<999]
last_part = []
for i in multiples_of_17:
for j in multiples_of_13:
for k in multiples_of_11:
for l in multiples_of_7:
# print(l, k, j, i)
if i[0:2] == j[1:] and k[1:] == j[0:2] and l[1:3]==k[0:2]: #combine the constraints that overlap in the last digits
last_part.append(l+k[2]+i[1:])
sum_all = 0
for i in range(1000, 10000,2): #fourth digit must be even
for j in last_part:
current = (str(i)+j)
if is_pandigital(int(current)) and int(current[2:5])%3==0: #here we consider the last constraint: the divisibility by 3
sum_all += int(current)
print(sum_all)
print("--- %s seconds ---" % (time.time() - start_time))
| BinDigit1/EulerProjects | Problem 43/substring_divisibility.py | Python | gpl-2.0 | 1,358 |
import job
import ci
import response
| vincent-psarga/job-sensors | src/jobsensors/jobs/__init__.py | Python | gpl-2.0 | 37 |
from openpds.core.models import *
from django.contrib import admin
def resetDays(modeladmin, request, queryset):
for q in queryset:
if q.start and q.end:
days = int((int(q.end.strftime("%s")) - int(q.start.strftime("%s"))) / 60.0/60.0/24.0)
if q.days != days:
q.days = days
q.save()
resetDays.short_description = "Reset Days based on start and end"
class Emoji2Admin(admin.ModelAdmin):
list_display = ['profile', 'emoji', 'created']
search_fields = ['profile__uuid', 'emoji', 'created']
class ProfileStartEndAdmin(admin.ModelAdmin):
list_display = ['profile', 'start', 'end', 'days']
search_fields = ['profile__uuid', 'start', 'end', 'days']
actions = [resetDays,]
class FluQuestionsAdmin(admin.ModelAdmin):
list_display = ['profile', 'fluThisSeason', 'fluLastSeason', 'vaccineThisSeason']
search_fields = ['profile__uuid',]
class ProfileAdmin(admin.ModelAdmin):
list_display = ['uuid', 'created']
search_fields = ['uuid']
class BaselineAdmin(admin.ModelAdmin):
list_display = ['profile', 'ip']
search_fields = ['profile__uuid', 'ip']
admin.site.register(Profile, ProfileAdmin)
admin.site.register(FB_Connection)
admin.site.register(Emoji, Emoji2Admin)
admin.site.register(Emoji2, Emoji2Admin)
admin.site.register(Device)
admin.site.register(QuestionInstance)
admin.site.register(QuestionType)
admin.site.register(FirebaseToken)
admin.site.register(IPReferral)
admin.site.register(AuditEntry)
admin.site.register(ProfileStartEnd, ProfileStartEndAdmin)
admin.site.register(FluQuestions, FluQuestionsAdmin)
admin.site.register(Baseline, BaselineAdmin)
| eschloss/FluFuture | openpds/core/admin.py | Python | mit | 1,670 |
# -*- coding: utf-8 -*-
# © 2015 Oihane Crucelaegui
# © 2015 Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
"name": "Partner multi-company",
"summary": "Select individually the partner visibility on each company",
"version": "8.0.1.0.1",
"license": "AGPL-3",
"depends": [
"base",
"base_suspend_security",
],
"author": "Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Odoo Community Association (OCA)",
"contributors": [
"Oihane Crucelaegui <oihanecruce@avanzosc.es>",
],
"category": "Partner Management",
"data": [
"views/res_partner_view.xml",
],
"installable": True,
'post_init_hook': 'post_init_hook',
'uninstall_hook': 'uninstall_hook',
}
| acsone/multi-company | partner_multi_company/__openerp__.py | Python | agpl-3.0 | 817 |
#!/usr/bin/env python
import warnings
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas_ml.imbaccessors as imbaccessors
import pandas_ml.skaccessors as skaccessors
import pandas_ml.smaccessors as smaccessors
import pandas_ml.snsaccessors as snsaccessors
import pandas_ml.util as util
import pandas_ml.xgboost as xgboost
from pandas_ml.compat import is_list_like, Appender, cache_readonly
from pandas_ml.core.accessor import _AccessorMethods
from pandas_ml.core.generic import ModelPredictor, _shared_docs
from pandas_ml.core.series import ModelSeries
class ModelFrame(ModelPredictor, pd.DataFrame):
"""
Data structure subclassing ``pandas.DataFrame`` to define a metadata to
specify target (response variable) and data (explanatory variable / features).
Parameters
----------
data : same as ``pandas.DataFrame``
target : str or array-like
Column name or values to be used as target
args : arguments passed to ``pandas.DataFrame``
kwargs : keyword arguments passed to ``pandas.DataFrame``
"""
_internal_caches = ['_estimator', '_predicted', '_proba', '_log_proba', '_decision']
_internal_names = (pd.core.generic.NDFrame._internal_names + _internal_caches)
_internal_names_set = set(_internal_names)
_metadata = ['_target_name']
_method_mapper = dict(fit={}, transform={}, predict={})
for cls in [skaccessors.CrossDecompositionMethods,
skaccessors.GaussianProcessMethods]:
_method_mapper = cls._update_method_mapper(_method_mapper)
@property
def _constructor(self):
return ModelFrame
_constructor_sliced = ModelSeries
_TARGET_NAME = '.target'
_DATA_NAME = '.data'
def __init__(self, data, target=None,
*args, **kwargs):
if data is None and target is None:
msg = '{0} must have either data or target'
raise ValueError(msg.format(self.__class__.__name__))
elif data is None and not is_list_like(target):
msg = 'target must be list-like when data is None'
raise ValueError(msg)
data, target = skaccessors._maybe_sklearn_data(data, target)
data, target = smaccessors._maybe_statsmodels_data(data, target)
# retrieve target_name
if isinstance(data, ModelFrame):
target_name = data.target_name
data, target = self._maybe_convert_data(data, target, *args, **kwargs)
if target is not None and not is_list_like(target):
if target in data.columns:
target_name = target
df = data
else:
msg = "Specified target '{0}' is not included in data"
raise ValueError(msg.format(target))
self._target_name = target_name
else:
df, target = self._concat_target(data, target)
if isinstance(target, pd.Series):
self._target_name = target.name
elif isinstance(target, pd.DataFrame):
if len(target.columns) > 1:
self._target_name = target.columns
else:
self._target_name = target.columns[0]
else:
# target may be None
self._target_name = self._TARGET_NAME
pd.DataFrame.__init__(self, df)
def _maybe_convert_data(self, data, target,
*args, **kwargs):
"""
Internal function to instanciate data and target
Parameters
----------
data : instance converted to ``pandas.DataFrame``
target : instance converted to ``pandas.Series``
args : argument passed from ``__init__``
kwargs : argument passed from ``__init__``
"""
init_df = isinstance(data, pd.DataFrame)
init_target = isinstance(target, (pd.Series, pd.DataFrame))
def _maybe_convert_target(data, target, index=None):
if data is not None:
index = data.index
target = np.array(target)
if len(target.shape) == 1:
target = pd.Series(target, index=index)
else:
target = pd.DataFrame(target, index=index)
return target
if not init_df and not init_target:
if data is not None:
data = pd.DataFrame(data, *args, **kwargs)
if is_list_like(target):
target = _maybe_convert_target(data, target)
elif not init_df:
if data is not None:
index = kwargs.pop('index', target.index)
data = pd.DataFrame(data, index=index, *args, **kwargs)
elif not init_target:
if is_list_like(target):
target = _maybe_convert_target(data, target)
else:
# no conversion required
pass
if isinstance(target, pd.Series) and target.name is None:
target = pd.Series(target, name=self._TARGET_NAME)
return data, target
def _concat_target(self, data, target):
if data is None and target is None:
msg = '{0} must have either data or target'
raise ValueError(msg.format(self.__class__.__name__))
elif data is None:
return target, target
elif target is None:
return data, None
if len(data) != len(target):
raise ValueError('data and target must have same length')
if not data.index.equals(target.index):
raise ValueError('data and target must have equal index')
def _add_meta_columns(df, meta_name):
df = df.copy()
if not is_list_like(meta_name):
meta_name = [meta_name]
df.columns = pd.MultiIndex.from_product([meta_name, df.columns])
return df
if isinstance(target, pd.DataFrame):
if len(target.columns.intersection(data.columns)) > 0:
target = _add_meta_columns(target, self._TARGET_NAME)
data = _add_meta_columns(data, self._DATA_NAME)
# overwrite target_name
self._target_name = target.columns
elif isinstance(target, pd.Series):
if target.name in data.columns:
raise ValueError('data and target must have unique names')
else:
raise ValueError('target cannot be converted to ModelSeries or ModelFrame')
return pd.concat([target, data], axis=1), target
def has_data(self):
"""
Return whether ``ModelFrame`` has data
Returns
-------
has_data : bool
"""
return len(self._data_columns) > 0
@property
def _data_columns(self):
# Index.difference results in sorted difference set
if self.has_multi_targets():
return self.columns[~self.columns.isin(self.target_name)]
else:
# This doesn't work for DatetimeIndex
# return self.columns[~(self.columns == self.target_name)]
return pd.Index([c for c in self.columns if c != self.target_name])
@property
def data(self):
"""
Return data (explanatory variable / features)
Returns
-------
data : ``ModelFrame``
"""
if self.has_data():
return self.loc[:, self._data_columns]
else:
return None
@data.setter
def data(self, data):
if data is None:
del self.data
return
if isinstance(data, ModelFrame):
if data.has_target():
msg = 'Cannot update with {0} which has target attribute'
raise ValueError(msg.format(self.__class__.__name__))
elif isinstance(data, pd.Series):
data = data.to_frame()
elif isinstance(data, pd.DataFrame):
pass
else:
msg = 'data must be ModelFrame, ModelSeries, DataFrame or Series, {0} passed'
raise TypeError(msg.format(data.__class__.__name__))
data, _ = self._maybe_convert_data(data, self.target, self.target_name)
if self.has_multi_targets():
if len(self.target_name.intersection(data.columns)) > 0:
msg = "Passed data has the same column name as the target '{0}'"
raise ValueError(msg.format(self.target_name))
else:
if self.target_name in data.columns:
msg = "Passed data has the same column name as the target '{0}'"
raise ValueError(msg.format(self.target_name))
if self.has_target():
data, _ = self._concat_target(data, self.target)
self._update_inplace(data)
@data.deleter
def data(self):
if self.has_target():
self._update_inplace(self.target.to_frame())
else:
msg = '{0} must have either data or target'
raise ValueError(msg.format(self.__class__.__name__))
def has_target(self):
"""
Return whether ``ModelFrame`` has target
Returns
-------
has_target : bool
"""
if self.has_multi_targets():
return len(self.target_name.intersection(self.columns)) > 0
return self.target_name in self.columns
def has_multi_targets(self):
"""
Return whether ``ModelFrame`` has multiple target columns
Returns
-------
has_multi_targets : bool
"""
return isinstance(self.target_name, pd.Index)
@property
def target_name(self):
"""
Return target column name
Returns
-------
target : object
"""
return self._target_name
@target_name.setter
def target_name(self, value):
self._target_name = value
@property
def target(self):
"""
Return target (response variable)
Returns
-------
target : ``ModelSeries``
"""
if self.has_target():
return self.loc[:, self.target_name]
else:
return None
@target.setter
def target(self, target):
if target is None:
del self.target
return
if not self.has_target():
# allow to update target_name only when target attibute doesn't exist
if isinstance(target, pd.Series):
# Series.name may be blank
if target.name is not None:
self.target_name = target.name
elif isinstance(target, pd.DataFrame):
# DataFrame.columns should have values
self.target_name = target.columns
if not is_list_like(target):
if target in self.columns:
self.target_name = target
else:
msg = "Specified target '{0}' is not included in data"
raise ValueError(msg.format(target))
return
if isinstance(target, pd.Series):
if target.name != self.target_name:
msg = "Passed data is being renamed to '{0}'".format(self.target_name)
warnings.warn(msg)
target = pd.Series(target, name=self.target_name)
elif isinstance(target, pd.DataFrame):
if not target.columns.equals(self.target_name):
if len(target.columns) == len(self.target_name):
msg = "Passed data is being renamed to '{0}'".format(self.target_name)
warnings.warn(msg)
target = target.copy()
target.columns = self.target_name
else:
msg = 'target and target_name are unmatched, target_name will be updated'
warnings.warn(msg)
data = self.data # hack
self.target_name = target.columns
self.data = data
else:
_, target = self._maybe_convert_data(self.data, target, self.target_name)
df, _ = self._concat_target(self.data, target)
self._update_inplace(df)
@target.deleter
def target(self):
if self.has_data():
self._update_inplace(self.data)
else:
msg = '{0} must have either data or target'
raise ValueError(msg.format(self.__class__.__name__))
def _get_method_mapper(self, estimator, method_name):
if method_name in self._method_mapper:
mapper = self._method_mapper[method_name]
return mapper.get(estimator.__class__.__name__, None)
def _call(self, estimator, method_name, *args, **kwargs):
method = self._check_attr(estimator, method_name)
data = self.data.values
if self.has_target():
target = self.target.values
try:
result = method(data, y=target, *args, **kwargs)
except TypeError:
result = method(data, *args, **kwargs)
else:
# not try to pass target if it doesn't exists
# to catch ValueError from estimator
result = method(data, *args, **kwargs)
self.estimator = estimator
return result
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='fit_predict', returned='returned : predicted result'))
def fit_predict(self, estimator, *args, **kwargs):
predicted = self._call(estimator, 'fit_predict', *args, **kwargs)
return self._wrap_predicted(predicted, estimator)
def _wrap_predicted(self, predicted, estimator):
"""
Wrapper for predict methods
"""
if isinstance(predicted, tuple):
return tuple(self._wrap_predicted(p, estimator) for p in predicted)
if util._is_1d_varray(predicted):
predicted = self._constructor_sliced(predicted, index=self.index)
else:
predicted = self._constructor(predicted, index=self.index)
self._predicted = predicted
return self._predicted
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='fit_sample', returned='returned : sampling result'))
def fit_sample(self, estimator, *args, **kwargs):
# for imblearn
msg = ".fit_sample is deprecated. Use .fit_resample instead"
warnings.warn(msg, DeprecationWarning)
return self.fit_resample(estimator, *args, **kwargs)
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='fit_resample', returned='returned : resampling result'))
def fit_resample(self, estimator, *args, **kwargs):
# for imblearn
sampled_X, sampled_y = self._call(estimator, 'fit_resample', *args, **kwargs)
return self._wrap_sampled(sampled_X, sampled_y)
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='sample', returned='returned : sampling result'))
def sample(self, estimator, *args, **kwargs):
# for imblearn
msg = ".sample is deprecated. Use .fit_resample instead"
warnings.warn(msg, DeprecationWarning)
return self.fit_resample(estimator, *args, **kwargs)
def _wrap_sampled(self, sampled_X, sampled_y):
# revert sampled results to ModelFrame, index is being reset
def _wrap(x, y):
y = self._constructor_sliced(y, name=self.target.name)
result = self._constructor(data=x, target=y,
columns=self.data.columns)
return result
if sampled_X.ndim == 3 or sampled_X.ndim == 1:
# ensemble
# ndim=3 for EasyEnsemble
# ndim=1 for BalanceCascade
results = []
for x, y in zip(sampled_X, sampled_y):
result = _wrap(x, y)
results.append(result)
else:
results = _wrap(sampled_X, sampled_y)
return results
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='transform', returned='returned : transformed result'))
def transform(self, estimator, *args, **kwargs):
try:
transformed = super(ModelFrame, self).transform(estimator, *args, **kwargs)
if not isinstance(estimator, compat.string_types):
# set inverse columns
estimator._pdml_original_columns = self.data.columns
return transformed
except ImportError:
# raise patsy error
raise
except Exception as e: # noqa
return pd.DataFrame.transform(self, estimator, *args, **kwargs)
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='fit_transform', returned='returned : transformed result'))
def fit_transform(self, estimator, *args, **kwargs):
transformed = super(ModelFrame, self).fit_transform(estimator, *args, **kwargs)
if not isinstance(estimator, compat.string_types):
# set inverse columns
estimator._pdml_original_columns = self.data.columns
return transformed
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='inverse_transform', returned='returned : transformed result'))
def inverse_transform(self, estimator, *args, **kwargs):
transformed = self._call(estimator, 'inverse_transform', *args, **kwargs)
original_columns = getattr(estimator, '_pdml_original_columns', None)
transformed = self._wrap_transform(transformed, columns=original_columns)
return transformed
def _wrap_transform(self, transformed, columns=None):
"""
Wrapper for transform methods
"""
if self.pp._keep_existing_columns(self.estimator):
columns = self.data.columns
if self.has_target():
return self._constructor(transformed, target=self.target,
index=self.index, columns=columns)
else:
return self._constructor(transformed, index=self.index,
columns=columns)
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='predict_proba', returned='returned : probabilities'))
def predict_proba(self, estimator, *args, **kwargs):
probability = self._call(estimator, 'predict_proba', *args, **kwargs)
self._proba = self._wrap_probability(probability, estimator)
return self._proba
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='predict_log_proba', returned='returned : probabilities'))
def predict_log_proba(self, estimator, *args, **kwargs):
probability = self._call(estimator, 'predict_log_proba', *args, **kwargs)
self._log_proba = self._wrap_probability(probability, estimator)
return self._log_proba
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='decision_function', returned='returned : decisions'))
def decision_function(self, estimator, *args, **kwargs):
decision = self._call(estimator, 'decision_function', *args, **kwargs)
self._decision = self._wrap_probability(decision, estimator)
return self._decision
def _wrap_probability(self, probability, estimator):
"""
Wrapper for probability methods
"""
try:
if util._is_1d_varray(probability):
# 2 class
probability = self._constructor(probability, index=self.index)
else:
probability = self._constructor(probability, index=self.index,
columns=estimator.classes_)
except ValueError:
msg = "Unable to instantiate ModelFrame for '{0}'"
warnings.warn(msg.format(estimator.__class__.__name__))
return probability
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='score', returned='returned : score'))
def score(self, estimator, *args, **kwargs):
score = self._call(estimator, 'score', *args, **kwargs)
return score
# accessors
@property
@Appender(_shared_docs['skaccessor_nolink'] %
dict(module='calibration'))
def calibration(self):
return self._calibration
@cache_readonly
def _calibration(self):
attrs = ['CalibratedClassifierCV']
return _AccessorMethods(self, module_name='sklearn.calibration',
attrs=attrs)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='cluster'))
def cluster(self):
return self._cluster
@cache_readonly
def _cluster(self):
return skaccessors.ClusterMethods(self)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='covariance'))
def covariance(self):
return self._covariance
@cache_readonly
def _covariance(self):
return skaccessors.CovarianceMethods(self)
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='cross_decomposition'))
def cross_decomposition(self):
return self._cross_decomposition
@cache_readonly
def _cross_decomposition(self):
attrs = ['PLSRegression', 'PLSCanonical', 'CCA', 'PLSSVD']
return _AccessorMethods(self, module_name='sklearn.cross_decomposition',
attrs=attrs)
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='decomposition'))
def decomposition(self):
return self._decomposition
@cache_readonly
def _decomposition(self):
return skaccessors.DecompositionMethods(self)
@property
@Appender(_shared_docs['skaccessor_nolink'] %
dict(module='discriminant_analysis'))
def discriminant_analysis(self):
return self._da
@property
@Appender(_shared_docs['skaccessor_nolink'] %
dict(module='discriminant_analysis'))
def da(self):
return self._da
@cache_readonly
def _da(self):
return _AccessorMethods(self,
module_name='sklearn.discriminant_analysis')
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='dummy'))
def dummy(self):
return self._dummy
@cache_readonly
def _dummy(self):
attrs = ['DummyClassifier', 'DummyRegressor']
return _AccessorMethods(self, module_name='sklearn.dummy', attrs=attrs)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='ensemble'))
def ensemble(self):
return self._ensemble
@cache_readonly
def _ensemble(self):
return skaccessors.EnsembleMethods(self)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='feature_extraction'))
def feature_extraction(self):
return self._feature_extraction
@cache_readonly
def _feature_extraction(self):
return skaccessors.FeatureExtractionMethods(self)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='feature_selection'))
def feature_selection(self):
return self._feature_selection
@cache_readonly
def _feature_selection(self):
return skaccessors.FeatureSelectionMethods(self)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='gaussian_process'))
def gaussian_process(self):
return self._gaussian_process
@property
@Appender(_shared_docs['skaccessor'] % dict(module='gaussian_process'))
def gp(self):
return self._gaussian_process
@cache_readonly
def _gaussian_process(self):
return skaccessors.GaussianProcessMethods(self)
@property
def imbalance(self):
""" Property to access ``imblearn``"""
return self._imbalance
@cache_readonly
def _imbalance(self):
return imbaccessors.ImbalanceMethods(self)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='isotonic'))
def isotonic(self):
return self._isotonic
@cache_readonly
def _isotonic(self):
return skaccessors.IsotonicMethods(self)
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='kernel_approximation'))
def kernel_approximation(self):
return self._kernel_approximation
@cache_readonly
def _kernel_approximation(self):
attrs = ['AdditiveChi2Sampler', 'Nystroem', 'RBFSampler', 'SkewedChi2Sampler']
return _AccessorMethods(self, module_name='sklearn.kernel_approximation',
attrs=attrs)
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='kernel_ridge'))
def kernel_ridge(self):
return self._kernel_ridge
@cache_readonly
def _kernel_ridge(self):
attrs = ['KernelRidge']
return _AccessorMethods(self, module_name='sklearn.kernel_ridge',
attrs=attrs)
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='lda'))
def lda(self):
msg = '.lda is deprecated. Use .da or .diccriminant_analysis'
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._da
@property
@Appender(_shared_docs['skaccessor'] % dict(module='linear_model'))
def linear_model(self):
return self._linear_model
@property
@Appender(_shared_docs['skaccessor'] % dict(module='linear_model'))
def lm(self):
return self._linear_model
@cache_readonly
def _linear_model(self):
return skaccessors.LinearModelMethods(self)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='manifold'))
def manifold(self):
return self._manifold
@cache_readonly
def _manifold(self):
return skaccessors.ManifoldMethods(self)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='metrics'))
def metrics(self):
return self._metrics
@cache_readonly
def _metrics(self):
return skaccessors.MetricsMethods(self)
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='mixture'))
def mixture(self):
return self._mixture
@cache_readonly
def _mixture(self):
return _AccessorMethods(self, module_name='sklearn.mixture')
@property
@Appender(_shared_docs['skaccessor'] % dict(module='model_selection'))
def model_selection(self):
return self._model_selection
@property
@Appender(_shared_docs['skaccessor'] % dict(module='model_selection'))
def ms(self):
return self._model_selection
@cache_readonly
def _model_selection(self):
return skaccessors.ModelSelectionMethods(self)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='multiclass'))
def multiclass(self):
return self._multiclass
@cache_readonly
def _multiclass(self):
return _AccessorMethods(self, module_name='sklearn.multiclass')
@property
@Appender(_shared_docs['skaccessor'] % dict(module='multioutput'))
def multioutput(self):
return self._multioutput
@cache_readonly
def _multioutput(self):
return _AccessorMethods(self, module_name='sklearn.multioutput')
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='naive_bayes'))
def naive_bayes(self):
return self._naive_bayes
@cache_readonly
def _naive_bayes(self):
return _AccessorMethods(self, module_name='sklearn.naive_bayes')
@property
@Appender(_shared_docs['skaccessor'] % dict(module='neighbors'))
def neighbors(self):
return self._neighbors
@cache_readonly
def _neighbors(self):
return skaccessors.NeighborsMethods(self)
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='neural_network'))
def neural_network(self):
return self._neural_network
@cache_readonly
def _neural_network(self):
return _AccessorMethods(self, module_name='sklearn.neural_network')
@property
@Appender(_shared_docs['skaccessor'] % dict(module='pipeline'))
def pipeline(self):
return self._pipeline
@cache_readonly
def _pipeline(self):
return skaccessors.PipelineMethods(self)
@property
@Appender(_shared_docs['skaccessor'] % dict(module='preprocessing'))
def preprocessing(self):
return self._preprocessing
@property
@Appender(_shared_docs['skaccessor'] % dict(module='preprocessing'))
def pp(self):
return self.preprocessing
@cache_readonly
def _preprocessing(self):
return skaccessors.PreprocessingMethods(self)
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='qda'))
def qda(self):
msg = '.qda is deprecated. Use .da or .diccriminant_analysis'
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._da
@property
@Appender(_shared_docs['skaccessor'] % dict(module='random_projection'))
def random_projection(self):
return self._random_projection
@cache_readonly
def _random_projection(self):
return _AccessorMethods(self, module_name='sklearn.random_projection')
@property
@Appender(_shared_docs['skaccessor'] % dict(module='semi_supervised'))
def semi_supervised(self):
return self._semi_supervised
@cache_readonly
def _semi_supervised(self):
return _AccessorMethods(self, module_name='sklearn.semi_supervised')
@property
@Appender(_shared_docs['skaccessor'] % dict(module='svm'))
def svm(self):
return self._svm
@cache_readonly
def _svm(self):
return skaccessors.SVMMethods(self)
@property
@Appender(_shared_docs['skaccessor_nolink'] % dict(module='tree'))
def tree(self):
return self._tree
@cache_readonly
def _tree(self):
return _AccessorMethods(self, module_name='sklearn.tree')
@property
def sns(self):
"""Property to access ``seaborn`` API"""
return self._seaborn
@property
def seaborn(self):
"""Property to access ``seaborn`` API"""
return self._seaborn
@cache_readonly
def _seaborn(self):
return snsaccessors.SeabornMethods(self)
@property
def xgb(self):
"""Property to access ``xgboost.sklearn`` API"""
return self._xgboost
@property
def xgboost(self):
"""Property to access ``xgboost.sklearn`` API"""
return self._xgboost
@cache_readonly
def _xgboost(self):
return xgboost.XGBoostMethods(self)
@Appender(pd.core.generic.NDFrame.groupby.__doc__)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False):
from pandas_ml.core.groupby import groupby
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return groupby(self, by=by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys, squeeze=squeeze)
| sinhrks/expandas | pandas_ml/core/frame.py | Python | bsd-3-clause | 32,117 |
# MolMod is a collection of molecular modelling tools for python.
# Copyright (C) 2007 - 2008 Toon Verstraelen <Toon.Verstraelen@UGent.be>
#
# This file is part of MolMod.
#
# MolMod is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# MolMod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from molmod.binning import InterAnalyseNeighboringObjects, \
IntraAnalyseNeighboringObjects, PositionedObject, SparseBinnedObjects
from molmod.unit_cell import UnitCell
from molmod.units import angstrom, degree
from molmod.data.periodic import periodic
from molmod.io.xyz import XYZFile
import math, numpy
import unittest
__all__ = ["BinningTestCase"]
class BinningTestCase(unittest.TestCase):
gridsize = periodic.max_radius*2
def load_binned_atoms(self, filename):
m = XYZFile("input/"+filename).get_molecule()
def yield_positioned_atoms():
for index in xrange(len(m.numbers)):
yield PositionedObject((m, index), m.coordinates[index])
return m, SparseBinnedObjects(yield_positioned_atoms(), self.gridsize)
def verify(self, yield_pairs, distances, unit_cell=None):
missing_pairs = []
wrong_distances = []
for (id1, coord1), (id2, coord2) in yield_pairs():
delta = coord2 - coord1
if unit_cell is not None:
delta = unit_cell.shortest_vector(delta)
distance = math.sqrt(numpy.dot(delta, delta))
if distance < self.gridsize:
identifier = frozenset([id1, id2])
fast_distance = distances.get(identifier)
if fast_distance == None:
missing_pairs.append(tuple(identifier) + (distance,))
elif fast_distance != distance:
wrong_distances.append(tuple(identifier) + (fast_distance, distance))
else:
del distances[identifier]
message = "-"*50+"\n"
message += "MISSING PAIRS: %i\n" % len(missing_pairs)
for missing_pair in missing_pairs:
message += "%10s %10s: \t % 10.7f\n" % missing_pair
message += "WRONG DISTANCES: %i\n" % len(wrong_distances)
for wrong_distance in wrong_distances:
message += "%10s %10s: \t % 10.7f != % 10.7f\n" % wrong_distance
message += "DUPLICATE PAIRS: %i\n" % len(distances)
for identifier, fast_distance in distances.iteritems():
message += "%10s %10s: \t % 10.7f\n" % (tuple(identifier) + (fast_distance,))
message += "-"*50+"\n"
self.assertEqual(len(missing_pairs), 0, message)
self.assertEqual(len(wrong_distances), 0, message)
self.assertEqual(len(distances), 0, message)
def verify_intra(self, molecule, distances, unit_cell=None):
def yield_atom_pairs():
for index1, coord1 in enumerate(molecule.coordinates):
for index2, coord2 in enumerate(molecule.coordinates[:index1]):
yield ((molecule, index1), coord1), ((molecule, index2), coord2)
self.verify(yield_atom_pairs, distances, unit_cell)
def verify_inter(self, molecule1, molecule2, distances, unit_cell=None):
def yield_atom_pairs():
for index1, coord1 in enumerate(molecule1.coordinates):
for index2, coord2 in enumerate(molecule2.coordinates):
yield ((molecule1, index1), coord1), ((molecule2, index2), coord2)
self.verify(yield_atom_pairs, distances, unit_cell)
def compare_function(self, positioned1, positioned2):
delta = positioned2.coordinate - positioned1.coordinate
distance = math.sqrt(numpy.dot(delta, delta))
if distance < self.gridsize:
return distance
def test_distances_intra(self):
molecule, binned_atoms = self.load_binned_atoms("precursor.xyz")
distances = dict(
(frozenset([positioned1.id, positioned2.id]), result)
for (positioned1, positioned2), result
in IntraAnalyseNeighboringObjects(binned_atoms, self.compare_function)()
)
self.verify_intra(molecule, distances)
def test_distances_intra_periodic(self):
molecule, binned_atoms = self.load_binned_atoms("lau.xyz")
unit_cell = UnitCell()
unit_cell.set_parameters(
numpy.array([14.59, 12.88, 7.61])*angstrom,
numpy.array([ 90.0, 111.0, 90.0])*degree,
)
distances = dict(
(frozenset([positioned1.id, positioned2.id]), result)
for (positioned1, positioned2), result
in IntraAnalyseNeighboringObjects(binned_atoms, self.compare_function)(unit_cell)
)
self.verify_intra(molecule, distances, unit_cell)
def test_distances_inter(self):
molecule1, binned_atoms1 = self.load_binned_atoms("precursor.xyz")
molecule2, binned_atoms2 = self.load_binned_atoms("precursor.xyz")
distances = dict(
(frozenset([positioned1.id, positioned2.id]), result)
for (positioned1, positioned2), result
in InterAnalyseNeighboringObjects(binned_atoms1, binned_atoms2, self.compare_function)()
)
self.verify_inter(molecule1, molecule2, distances)
def test_distances_inter_periodic(self):
molecule1, binned_atoms1 = self.load_binned_atoms("lau.xyz")
molecule2, binned_atoms2 = self.load_binned_atoms("lau.xyz")
unit_cell = UnitCell()
unit_cell.set_parameters(
numpy.array([14.59, 12.88, 7.61])*angstrom,
numpy.array([ 90.0, 111.0, 90.0])*degree,
)
distances = dict(
(frozenset([positioned1.id, positioned2.id]), result)
for (positioned1, positioned2), result
in InterAnalyseNeighboringObjects(binned_atoms1, binned_atoms2, self.compare_function)(unit_cell)
)
self.verify_inter(molecule1, molecule2, distances, unit_cell)
| woutersmet/Molmodsummer | test/binning.py | Python | gpl-3.0 | 6,466 |
"""
WSGI config for miniature project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django-cielo-exemplo.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| vitorh45/django-ecommerce-exemplo1 | django-cielo-exemplo/wsgi.py | Python | apache-2.0 | 1,151 |
from service import *
db = PymongoManger().getNew()
distinct_domain = db.users.find().distinct("web_accounts.domain");
for domain in distinct_domain:
account_count = db.users.find({"web_accounts.domain":domain}).count()
print "%s: %d" % (domain,account_count)
| kittolau/selepy | db_users_stat.py | Python | mit | 270 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'MppAllowEnum' : _MetaInfoEnum('MppAllowEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper',
{
'ssh':'ssh',
'telnet':'telnet',
'snmp':'snmp',
'tftp':'tftp',
'http':'http',
'xr-xml':'xr_xml',
'netconf':'netconf',
'all':'all',
}, 'Cisco-IOS-XR-lib-mpp-oper', _yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper']),
'MppAfIdBaseIdentity' : {
'meta_info' : _MetaInfoClass('MppAfIdBaseIdentity',
False,
[
],
'Cisco-IOS-XR-lib-mpp-oper',
'Mpp-af-id-base',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Outband.Vrf' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Outband.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Outband VRF name
''',
'vrf_name',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol.PeerAddress' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol.PeerAddress',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_IDENTITY_CLASS, 'MppAfIdBaseIdentity' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'MppAfIdBaseIdentity',
[], [],
''' AFName
''',
'af_name',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('ipv4-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'ipv6_address',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'peer-address',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol',
False,
[
_MetaInfoClassMember('allow', REFERENCE_ENUM_CLASS, 'MppAllowEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'MppAllowEnum',
[], [],
''' MPP allow
''',
'allow',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('is-all-peers-allowed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, all peers are allowed
''',
'is_all_peers_allowed',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('peer-address', REFERENCE_LIST, 'PeerAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol.PeerAddress',
[], [],
''' List of peer addresses
''',
'peer_address',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'protocol',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Outband.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Outband.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface name, specify 'all' for all
interfaces
''',
'interface_name',
'Cisco-IOS-XR-lib-mpp-oper', True),
_MetaInfoClassMember('protocol', REFERENCE_LIST, 'Protocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol',
[], [],
''' MPP Interface protocols
''',
'protocol',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Outband.Interfaces' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Outband.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Outband.Interfaces.Interface',
[], [],
''' MPP interface information
''',
'interface',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Outband' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Outband',
False,
[
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Outband.Interfaces',
[], [],
''' List of inband/outband interfaces
''',
'interfaces',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('vrf', REFERENCE_CLASS, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Outband.Vrf',
[], [],
''' Outband VRF information
''',
'vrf',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'outband',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol.PeerAddress' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol.PeerAddress',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_IDENTITY_CLASS, 'MppAfIdBaseIdentity' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'MppAfIdBaseIdentity',
[], [],
''' AFName
''',
'af_name',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('ipv4-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'ipv6_address',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'peer-address',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol',
False,
[
_MetaInfoClassMember('allow', REFERENCE_ENUM_CLASS, 'MppAllowEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'MppAllowEnum',
[], [],
''' MPP allow
''',
'allow',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('is-all-peers-allowed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' If TRUE, all peers are allowed
''',
'is_all_peers_allowed',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('peer-address', REFERENCE_LIST, 'PeerAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol.PeerAddress',
[], [],
''' List of peer addresses
''',
'peer_address',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'protocol',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Inband.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Inband.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface name, specify 'all' for all
interfaces
''',
'interface_name',
'Cisco-IOS-XR-lib-mpp-oper', True),
_MetaInfoClassMember('protocol', REFERENCE_LIST, 'Protocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol',
[], [],
''' MPP Interface protocols
''',
'protocol',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Inband.Interfaces' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Inband.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Inband.Interfaces.Interface',
[], [],
''' MPP interface information
''',
'interface',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection.Inband' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection.Inband',
False,
[
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Inband.Interfaces',
[], [],
''' List of inband/outband interfaces
''',
'interfaces',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'inband',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'ManagementPlaneProtection' : {
'meta_info' : _MetaInfoClass('ManagementPlaneProtection',
False,
[
_MetaInfoClassMember('inband', REFERENCE_CLASS, 'Inband' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Inband',
[], [],
''' Management Plane Protection (MPP) inband
interface data
''',
'inband',
'Cisco-IOS-XR-lib-mpp-oper', False),
_MetaInfoClassMember('outband', REFERENCE_CLASS, 'Outband' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper', 'ManagementPlaneProtection.Outband',
[], [],
''' Management Plane Protection (MPP) outband
interface data
''',
'outband',
'Cisco-IOS-XR-lib-mpp-oper', False),
],
'Cisco-IOS-XR-lib-mpp-oper',
'management-plane-protection',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'Ipv4Identity' : {
'meta_info' : _MetaInfoClass('Ipv4Identity',
False,
[
],
'Cisco-IOS-XR-lib-mpp-oper',
'ipv4',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
'Ipv6Identity' : {
'meta_info' : _MetaInfoClass('Ipv6Identity',
False,
[
],
'Cisco-IOS-XR-lib-mpp-oper',
'ipv6',
_yang_ns._namespaces['Cisco-IOS-XR-lib-mpp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_mpp_oper'
),
},
}
_meta_table['ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol.PeerAddress']['meta_info'].parent =_meta_table['ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol']['meta_info']
_meta_table['ManagementPlaneProtection.Outband.Interfaces.Interface.Protocol']['meta_info'].parent =_meta_table['ManagementPlaneProtection.Outband.Interfaces.Interface']['meta_info']
_meta_table['ManagementPlaneProtection.Outband.Interfaces.Interface']['meta_info'].parent =_meta_table['ManagementPlaneProtection.Outband.Interfaces']['meta_info']
_meta_table['ManagementPlaneProtection.Outband.Vrf']['meta_info'].parent =_meta_table['ManagementPlaneProtection.Outband']['meta_info']
_meta_table['ManagementPlaneProtection.Outband.Interfaces']['meta_info'].parent =_meta_table['ManagementPlaneProtection.Outband']['meta_info']
_meta_table['ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol.PeerAddress']['meta_info'].parent =_meta_table['ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol']['meta_info']
_meta_table['ManagementPlaneProtection.Inband.Interfaces.Interface.Protocol']['meta_info'].parent =_meta_table['ManagementPlaneProtection.Inband.Interfaces.Interface']['meta_info']
_meta_table['ManagementPlaneProtection.Inband.Interfaces.Interface']['meta_info'].parent =_meta_table['ManagementPlaneProtection.Inband.Interfaces']['meta_info']
_meta_table['ManagementPlaneProtection.Inband.Interfaces']['meta_info'].parent =_meta_table['ManagementPlaneProtection.Inband']['meta_info']
_meta_table['ManagementPlaneProtection.Outband']['meta_info'].parent =_meta_table['ManagementPlaneProtection']['meta_info']
_meta_table['ManagementPlaneProtection.Inband']['meta_info'].parent =_meta_table['ManagementPlaneProtection']['meta_info']
| 111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_lib_mpp_oper.py | Python | apache-2.0 | 17,334 |
age = dict(tom=23, jane=32, mike=27, linda=25)
print(age['mike'])
print(age['linda'])
print(age['jane'])
print(age['tom'])
| codermoji-contrib/python | start/Intro to Dicts/printdict/printval4.py | Python | mit | 123 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# jeepr documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 27 14:05:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import re
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'jeepr'
copyright = '2017, Agile Scientific'
author = 'Agile Scientific'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
verstr = 'unknown'
VERSIONFILE = "../jeepr/_version.py"
with open(VERSIONFILE, "r")as f:
verstrline = f.read().strip()
pattern = re.compile(r"__version__ = ['\"](.*)['\"]")
mo = pattern.search(verstrline)
if mo:
verstr = mo.group(1)
print("Version "+verstr)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
# The short X.Y version.
version = verstr[:3]
# The full version, including alpha/beta/rc tags.
release = verstr
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'jeeprdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'jeepr.tex', 'jeepr Documentation',
'Agile Scientific', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jeepr', 'jeepr Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'jeepr', 'jeepr Documentation',
author, 'jeepr', 'One line description of project.',
'Miscellaneous'),
]
| agile-geoscience/jeepr | docs/conf.py | Python | apache-2.0 | 5,577 |
'''
Created on May 17, 2013
@author: Yubin Bai
'''
class Solution:
# permute the sequence in place, allows duplicate elements
# raise error if reached end of permutation
# @param num, a list of integer
# @return a list of integer
def nextPermutation(self, num):
size = len(num)
if size <= 1:
return num
# 1.find the first acending order pair from the bottom
low = size - 2
high = size - 1
while num[low] >= num[high]:
if low == high - 1:
low -= 1
high = size - 1
if low < 0:
break
else:
high -= 1
if low < 0:
num.sort()
return num
# 2.swap num[lo] with num[hi] if lo >= 0
# print low, high
num[low], num[high] = num[high], num[low]
# 3.reverse lo+1 .. end
num[low + 1:size] = num[size - 1:low:-1]
return num
if __name__ == '__main__':
seq = [1, 2, 2, 3]
print(seq)
s = Solution()
for i in range(29):
print s.nextPermutation(seq)
| asraf209/leetcode | src/nextPermutation/main.py | Python | gpl-3.0 | 1,127 |
"""
Classes to support [encrypted] database export and import.
Classes in this module do not write out any files to the filesystem; it is the
responsibility of the calling code to handle the persistence of any file streams
(and make sure it's done securely, etc.).
"""
import os
import abc
from cStringIO import StringIO
import tempfile
import pexpect
import gnupg
from sqlalchemy import and_
import yaml
from ensconce import model, exc
from ensconce.model import meta
from ensconce.config import config
from ensconce.autolog import log
from ensconce.dao import passwords, resources, groups
from sqlalchemy.orm.exc import MultipleResultsFound
class GpgAes256(gnupg.GPG):
"""
By default we'd like to use AES256 instead of CAST5, since this will include
MDC integrity validation.
"""
def __init__(self, *args, **kwargs):
if kwargs.get('options') is not None:
kwargs['options'].append('--cipher-algo=AES256')
else:
kwargs['options'] = ['--cipher-algo=AES256']
super(GpgAes256, self).__init__(*args, **kwargs)
class Exporter(object):
__metaclass__ = abc.ABCMeta
include_key_metadata = False
resource_filters = None
password_filters = None
def __init__(self, resource_filters=None, password_filters=None, include_key_metadata=False):
if resource_filters is None:
resource_filters = []
if password_filters is None:
password_filters = []
self.resource_filters = resource_filters
self.password_filters = password_filters
self.include_key_metadata = include_key_metadata
@abc.abstractmethod
def export(self, stream):
"""
Export database contents to specified stream (file-like object).
"""
pass
# This is expected to expand as we probably want to be able to pass our exporters
# some standard options (about which entities to include, etc.)
class DictExporter(Exporter):
"""
Common functionality for exporting the model as a python dict.
"""
def build_structure(self):
"""
Builds a python dictionary of the entire database structure we want to export.
"""
session = meta.Session()
content = {}
# TODO key metadata?
if self.include_key_metadata:
content['key_metadata'] = [kmd.to_dict(encode=False) for kmd in session.query(model.KeyMetadata).all()]
content['resources'] = []
rsrc_t = model.resources_table
pass_t = model.passwords_table
grp_t = model.groups_table
q = session.query(model.Resource).order_by(rsrc_t.c.name)
if self.resource_filters:
q = q.join(model.GroupResource)
q = q.filter(and_(*self.resource_filters))
q = q.order_by(rsrc_t.c.name)
for resource in q.all():
rdict = resource.to_dict(decrypt=True)
pw_q = resource.passwords
if self.password_filters:
pw_q = pw_q.filter(and_(*self.password_filters))
pw_q = pw_q.order_by(pass_t.c.username)
rdict['passwords'] = [pw.to_dict(decrypt=True) for pw in pw_q.all()]
rdict['groups'] = [g.name for g in resource.groups.order_by(grp_t.c.name).all()]
content['resources'].append(rdict)
return content
class YamlExporter(DictExporter):
def __init__(self, use_tags=True, resource_filters=None, password_filters=None,
include_key_metadata=False):
super(YamlExporter, self).__init__(resource_filters=resource_filters,
password_filters=password_filters,
include_key_metadata=include_key_metadata)
self.use_tags = use_tags
def export(self, stream):
"""
"""
if not hasattr(stream, 'write'):
raise TypeError("stream must be a file-like object.")
if not self.use_tags:
DumperClass = yaml.SafeDumper
else:
DumperClass = yaml.Dumper
yaml.dump(self.build_structure(), stream=stream, Dumper=DumperClass)
class GpgYamlExporter(YamlExporter):
def __init__(self, passphrase, use_tags=True, resource_filters=None,
password_filters=None, include_key_metadata=False):
super(GpgYamlExporter, self).__init__(use_tags=use_tags,
resource_filters=resource_filters,
password_filters=password_filters,
include_key_metadata=include_key_metadata)
self.passphrase = passphrase
def export(self, stream):
"""
"""
# Create a new stream to pass in to the parent
yaml_stream = StringIO()
super(GpgYamlExporter, self).export(yaml_stream)
# reset our stream
yaml_stream.seek(0)
gpg = GpgAes256()
encrypted = gpg.encrypt_file(yaml_stream, recipients="user@example.com",
passphrase=self.passphrase, symmetric=True)
stream.write(str(encrypted))
stream.seek(0)
class KeepassExporter(GpgYamlExporter):
def __init__(self, passphrase, resource_filters=None, password_filters=None,
include_key_metadata=False):
super(KeepassExporter, self).__init__(use_tags=False,
passphrase=passphrase,
resource_filters=resource_filters,
password_filters=password_filters,
include_key_metadata=include_key_metadata)
def export(self, stream):
"""
"""
gpg_filename = None
kdb_filename = None
try:
with tempfile.NamedTemporaryFile(suffix='.yaml.gpg', prefix='export', delete=False) as gpg_fp:
super(KeepassExporter, self).export(gpg_fp)
gpg_filename = gpg_fp.name
kdb_fp = tempfile.NamedTemporaryFile(suffix='.kdb', prefix='export', delete=False) # We have to manually delete this one
kdb_filename = kdb_fp.name
kdb_fp.close()
cmd_exe = config['export.keepass.exe_path']
args = ['-i', gpg_filename, '-o', kdb_filename]
log.info("Executing command: {0} {1}".format(cmd_exe, ' '.join(args)))
child = pexpect.spawn(cmd_exe, args)
child.expect('ssphrase')
child.sendline(self.passphrase)
child.expect(pexpect.EOF)
log.debug(child.before)
with open(kdb_filename) as read_fp:
# Read contents of file into our own stream
kdb_bytes = read_fp.read()
stream.write(kdb_bytes)
log.debug("Read {0} bytes from kdb file stream".format(len(kdb_bytes)))
stream.seek(0)
finally:
if gpg_filename:
os.remove(gpg_filename)
if kdb_filename:
os.remove(kdb_filename)
# ----------------------------------------------------------------------------
# IMPORTER CLASSES
# ----------------------------------------------------------------------------
class Importer(object):
__metaclass__ = abc.ABCMeta
def __init__(self, force=False):
self.force = force
@abc.abstractmethod
def execute(self, stream):
"""
Import database contents from specified stream (file-like object).
"""
pass
class DictImporter(Importer):
"""
Common functionality for importing the model from a python dict.
"""
def from_structure(self, structure):
"""
Populates the SQLAlchemy model from a python dictionary of the database structure.
"""
session = meta.Session()
try:
for resource_s in structure['resources']:
log.debug("Importing: {0!r}".format(resource_s))
# First build up a list of group_ids for this resource that will correspond to groups
# in *this* database.
group_ids = []
for gname in resource_s['groups']:
group = groups.get_by_name(gname, assert_exists=False)
if not group:
group = groups.create(gname)
log.info("Created group: {0!r}".format(group))
else:
log.info("Found existing group: {0!r}".format(group))
group_ids.append(group.id)
# First we should see if there is a match for the id and name; we can't rely on name alone since
# there is no guarantee of name uniqueness (even with a group)
resource = None
resource_candidate = resources.get(resource_s['id'], assert_exists=False)
if resource_candidate and resource_candidate.name == resource_s['name']:
resource = resource_candidate
else:
# If we find a matching resource (by name) and there is only one then we'll use that.
try:
resource = resources.get_by_name(resource_s['name'], assert_single=True, assert_exists=True)
except MultipleResultsFound:
log.info("Multiple resource matched name {0!r}, will create a new one.".format(resource_s['name']))
except exc.NoSuchEntity:
log.debug("No resource found matching name: {0!r}".format(resource_s['name']))
pass
resource_attribs = ('name', 'addr', 'description', 'notes', 'tags')
resource_attribs_update = dict([(k,v) for (k,v) in resource_s.items() if k in resource_attribs])
if resource:
(resource, modified) = resources.modify(resource.id, group_ids=group_ids, **resource_attribs_update)
# (yes, we are overwriting 'resource' var with new copy returned from this method)
log.info("Updating existing resource: {0!r} (modified: {1!r})".format(resource, modified))
if modified and modified != ['group_ids']:
if not self.force:
raise RuntimeError("Refusing to modify existing resource attributes {0!r} on {1!r} (use 'force' to override this).".format(modified, resource))
else:
log.warning("Overwriting resource attributes {0!r} on {1!r}".format(modified, resource))
else:
# We will just assume that we need to create the resource. Yes, it's possible it'll match an existing
# one, but better to build a merge tool than end up silently merging things that are not the same.
resource = resources.create(group_ids=group_ids, **resource_attribs_update)
log.info("Created new resource: {0!r}".format(resource))
# Add the passwords
for password_s in resource_s['passwords']:
password_attribs = ('username', 'description', 'password', 'tags')
password_attribs_update = dict([(k,v) for (k,v) in password_s.items() if k in password_attribs])
# Look for a matching password. We do know that this is unique.
password = passwords.get_for_resource(password_s['username'], password_s['resource_id'], assert_exists=False)
if password:
(password, modified) = passwords.modify(password_id=password.id, **password_attribs_update)
# (Yeah, we overwrite password object.)
log.info("Updating existing password: {0!r} (modified: {1!r})".format(password, modified))
non_pw_modified = set(modified) - set(['password'])
if not modified:
log.debug("Password row not modified.")
else:
log.debug("Password modified: {0!r}".format(modified))
# If anything changed other than password, we need to ensure that force=true
if non_pw_modified:
if not self.force:
raise RuntimeError("Refusing to modify existing password attributes {0!r} on {1!r} (use 'force' to override this).".format(non_pw_modified, password))
else:
log.warning("Overwriting password attributes {0!r} on {1!r}".format(non_pw_modified, password))
else:
password = passwords.create(resource_id=resource.id, **password_attribs_update)
log.info("Creating new password: {0!r}".format(password))
# This probably isn't necessary as all the DAO methods should also flush session, but might as well.
session.flush()
except:
session.rollback()
raise
class YamlImporter(DictImporter):
def __init__(self, use_tags=True, force=False):
super(YamlImporter, self).__init__(force=force)
self.use_tags = use_tags
def execute(self, stream):
"""
"""
if not hasattr(stream, 'read'):
raise TypeError("stream must be a file-like object.")
if not self.use_tags:
LoaderClass = yaml.SafeLoader
else:
LoaderClass = yaml.Loader
structure = yaml.load(stream=stream, Loader=LoaderClass)
self.from_structure(structure)
class GpgYamlImporter(YamlImporter):
def __init__(self, passphrase, use_tags=True, force=False):
super(GpgYamlImporter, self).__init__(use_tags=use_tags,
force=force)
self.passphrase = passphrase
def execute(self, stream):
"""
"""
gpg = GpgAes256()
decrypted = gpg.decrypt_file(stream, passphrase=self.passphrase)
if not decrypted.ok:
raise ValueError("Failed to decrypt stream contents. stderr: {0}".format(decrypted.stderr))
# Create a new stream to pass in to the parent
yaml_stream = StringIO()
yaml_stream.write(str(decrypted))
# reset our stream
yaml_stream.seek(0)
super(GpgYamlImporter, self).execute(yaml_stream) | EliAndrewC/ensconce | ensconce/export.py | Python | bsd-3-clause | 15,339 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations:
"""SecurityRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
**kwargs: Any
) -> "_models.SecurityRule":
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
security_rule_parameters: "_models.SecurityRule",
**kwargs: Any
) -> "_models.SecurityRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_security_group_name: str,
security_rule_name: str,
security_rule_parameters: "_models.SecurityRule",
**kwargs: Any
) -> AsyncLROPoller["_models.SecurityRule"]:
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2017_10_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_10_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SecurityRuleListResult"]:
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_security_rules_operations.py | Python | mit | 22,275 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2019 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
import os.path
from abc import ABCMeta, abstractmethod
from datetime import datetime
from trac.admin import AdminCommandError, IAdminCommandProvider, get_dir_list
from trac.config import ConfigSection, Option
from trac.core import *
from trac.resource import IResourceManager, Resource, ResourceNotFound
from trac.util import as_bool, native_path
from trac.util.concurrency import get_thread_id, threading
from trac.util.datefmt import time_now, utc
from trac.util.text import exception_to_unicode, printout, to_unicode
from trac.util.translation import _
from trac.web.api import IRequestFilter
from trac.web.chrome import Chrome, ITemplateProvider, add_warning
def is_default(reponame):
"""Check whether `reponame` is the default repository."""
return not reponame or reponame in ('(default)', _('(default)'))
class InvalidRepository(TracError):
"""Exception raised when a repository is invalid."""
class InvalidConnector(TracError):
"""Exception raised when a repository connector is invalid."""
class IRepositoryConnector(Interface):
"""Provide support for a specific version control system."""
error = None # place holder for storing relevant error message
def get_supported_types():
"""Return the types of version control systems that are supported.
Yields `(repotype, priority)` pairs, where `repotype` is used to
match against the repository's `type` attribute.
If multiple provider match a given type, the `priority` is used to
choose between them (highest number is highest priority).
If the `priority` returned is negative, this indicates that the
connector for the given `repotype` indeed exists but can't be
used for some reason. The `error` property can then be used to
store an error message or exception relevant to the problem detected.
"""
def get_repository(repos_type, repos_dir, params):
"""Return a Repository instance for the given repository type and dir.
"""
class IRepositoryProvider(Interface):
"""Provide known named instances of Repository."""
def get_repositories():
"""Generate repository information for known repositories.
Repository information is a key,value pair, where the value is
a dictionary which must contain at the very least either of
the following entries:
- `'dir'`: the repository directory which can be used by the
connector to create a `Repository` instance. This
defines a "real" repository.
- `'alias'`: the name of another repository. This defines an
alias to another (real) repository.
Optional entries:
- `'type'`: the type of the repository (if not given, the
default repository type will be used).
- `'description'`: a description of the repository (can
contain WikiFormatting).
- `'hidden'`: if set to `'true'`, the repository is hidden
from the repository index (default: `'false'`).
- `'sync_per_request'`: if set to `'true'`, the repository will be
synchronized on every request (default:
`'false'`).
- `'url'`: the base URL for checking out the repository.
"""
class IRepositoryChangeListener(Interface):
"""Listen for changes in repositories."""
def changeset_added(repos, changeset):
"""Called after a changeset has been added to a repository."""
def changeset_modified(repos, changeset, old_changeset):
"""Called after a changeset has been modified in a repository.
The `old_changeset` argument contains the metadata of the changeset
prior to the modification. It is `None` if the old metadata cannot
be retrieved.
"""
class DbRepositoryProvider(Component):
"""Component providing repositories registered in the DB."""
implements(IRepositoryProvider, IAdminCommandProvider)
repository_attrs = ('alias', 'description', 'dir', 'hidden', 'name',
'sync_per_request', 'type', 'url')
# IRepositoryProvider methods
def get_repositories(self):
"""Retrieve repositories specified in the repository DB table."""
repos = {}
for id, name, value in self.env.db_query(
"SELECT id, name, value FROM repository WHERE name IN (%s)"
% ",".join("'%s'" % each for each in self.repository_attrs)):
if value is not None:
repos.setdefault(id, {})[name] = value
reponames = {}
for id, info in repos.iteritems():
if 'name' in info and ('dir' in info or 'alias' in info):
info['id'] = id
reponames[info['name']] = info
info['sync_per_request'] = as_bool(info.get('sync_per_request'))
return reponames.iteritems()
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('repository add', '<repos> <dir> [type]',
"Add a source repository",
self._complete_add, self._do_add)
yield ('repository alias', '<name> <target>',
"Create an alias for a repository",
self._complete_alias, self._do_alias)
yield ('repository remove', '<repos>',
"Remove a source repository",
self._complete_repos, self._do_remove)
yield ('repository set', '<repos> <key> <value>',
"""Set an attribute of a repository
The following keys are supported: %s
""" % ', '.join(self.repository_attrs),
self._complete_set, self._do_set)
def get_reponames(self):
rm = RepositoryManager(self.env)
return [reponame or '(default)' for reponame
in rm.get_all_repositories()]
def _complete_add(self, args):
if len(args) == 2:
return get_dir_list(args[-1], True)
elif len(args) == 3:
return RepositoryManager(self.env).get_supported_types()
def _complete_alias(self, args):
if len(args) == 2:
return self.get_reponames()
def _complete_repos(self, args):
if len(args) == 1:
return self.get_reponames()
def _complete_set(self, args):
if len(args) == 1:
return self.get_reponames()
elif len(args) == 2:
return self.repository_attrs
def _do_add(self, reponame, dir, type_=None):
self.add_repository(reponame, os.path.abspath(dir), type_)
def _do_alias(self, reponame, target):
self.add_alias(reponame, target)
def _do_remove(self, reponame):
self.remove_repository(reponame)
def _do_set(self, reponame, key, value):
if key not in self.repository_attrs:
raise AdminCommandError(_('Invalid key "%(key)s"', key=key))
if key == 'dir':
value = os.path.abspath(value)
self.modify_repository(reponame, {key: value})
if not reponame:
reponame = '(default)'
if key == 'dir':
printout(_('You should now run "repository resync %(name)s".',
name=reponame))
elif key == 'type':
printout(_('You may have to run "repository resync %(name)s".',
name=reponame))
# Public interface
def add_repository(self, reponame, dir, type_=None):
"""Add a repository."""
if not os.path.isabs(dir):
raise TracError(_("The repository directory must be absolute"))
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
if type_ and type_ not in rm.get_supported_types():
raise TracError(_("The repository type '%(type)s' is not "
"supported", type=type_))
with self.env.db_transaction as db:
id = rm.get_repository_id(reponame)
db.executemany(
"INSERT INTO repository (id, name, value) VALUES (%s, %s, %s)",
[(id, 'dir', dir),
(id, 'type', type_ or '')])
rm.reload_repositories()
def add_alias(self, reponame, target):
"""Create an alias repository."""
if is_default(reponame):
reponame = ''
if is_default(target):
target = ''
rm = RepositoryManager(self.env)
repositories = rm.get_all_repositories()
if target not in repositories:
raise TracError(_("Repository \"%(repo)s\" doesn't exist",
repo=target or '(default)'))
if 'alias' in repositories[target]:
raise TracError(_('Cannot create an alias to the alias "%(repo)s"',
repo=target or '(default)'))
with self.env.db_transaction as db:
id = rm.get_repository_id(reponame)
db.executemany(
"INSERT INTO repository (id, name, value) VALUES (%s, %s, %s)",
[(id, 'dir', None),
(id, 'alias', target)])
rm.reload_repositories()
def remove_repository(self, reponame):
"""Remove a repository."""
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
repositories = rm.get_all_repositories()
if any(reponame == repos.get('alias')
for repos in repositories.itervalues()):
raise TracError(_('Cannot remove the repository "%(repos)s" used '
'in aliases', repos=reponame or '(default)'))
with self.env.db_transaction as db:
id = rm.get_repository_id(reponame)
db("DELETE FROM repository WHERE id=%s", (id,))
db("DELETE FROM revision WHERE repos=%s", (id,))
db("DELETE FROM node_change WHERE repos=%s", (id,))
rm.reload_repositories()
def modify_repository(self, reponame, changes):
"""Modify attributes of a repository."""
if is_default(reponame):
reponame = ''
new_reponame = changes.get('name', reponame)
if is_default(new_reponame):
new_reponame = ''
rm = RepositoryManager(self.env)
if reponame != new_reponame:
repositories = rm.get_all_repositories()
if any(reponame == repos.get('alias')
for repos in repositories.itervalues()):
raise TracError(_('Cannot rename the repository "%(repos)s" '
'used in aliases',
repos=reponame or '(default)'))
with self.env.db_transaction as db:
id = rm.get_repository_id(reponame)
if reponame != new_reponame:
if db("""SELECT id FROM repository WHERE name='name' AND
value=%s""", (new_reponame,)):
raise TracError(_('The repository "%(name)s" already '
'exists.',
name=new_reponame or '(default)'))
for (k, v) in changes.iteritems():
if k not in self.repository_attrs:
continue
if k in ('alias', 'name') and is_default(v):
v = ''
if k in ('hidden', 'sync_per_request'):
v = '1' if as_bool(v) else None
if k == 'dir' and not os.path.isabs(native_path(v)):
raise TracError(_("The repository directory must be "
"absolute"))
db("UPDATE repository SET value=%s WHERE id=%s AND name=%s",
(v, id, k))
if not db(
"SELECT value FROM repository WHERE id=%s AND name=%s",
(id, k)):
db("""INSERT INTO repository (id, name, value)
VALUES (%s, %s, %s)
""", (id, k, v))
rm.reload_repositories()
class RepositoryManager(Component):
"""Version control system manager."""
implements(IRequestFilter, IResourceManager, IRepositoryProvider,
ITemplateProvider)
changeset_realm = 'changeset'
source_realm = 'source'
repository_realm = 'repository'
connectors = ExtensionPoint(IRepositoryConnector)
providers = ExtensionPoint(IRepositoryProvider)
change_listeners = ExtensionPoint(IRepositoryChangeListener)
repositories_section = ConfigSection('repositories',
"""One of the methods for registering repositories is to
populate the `[repositories]` section of `trac.ini`.
This is especially suited for setting up aliases, using a
[TracIni#GlobalConfiguration shared configuration], or specifying
repositories at the time of environment creation.
See [TracRepositoryAdmin#ReposTracIni TracRepositoryAdmin] for
details on the format of this section, and look elsewhere on the
page for information on other repository providers.
""")
default_repository_type = Option('versioncontrol',
'default_repository_type', 'svn',
"""Default repository connector type.
This is used as the default repository type for repositories
defined in the [TracIni#repositories-section repositories] section
or using the "Repositories" admin panel.
""")
def __init__(self):
self._cache = {}
self._lock = threading.Lock()
self._connectors = None
self._all_repositories = None
# IRequestFilter methods
def pre_process_request(self, req, handler):
if handler is not Chrome(self.env):
for repo_info in self.get_all_repositories().values():
if not as_bool(repo_info.get('sync_per_request')):
continue
start = time_now()
repo_name = repo_info['name'] or '(default)'
try:
repo = self.get_repository(repo_info['name'])
repo.sync()
except InvalidConnector:
continue
except TracError as e:
add_warning(req,
_("Can't synchronize with repository \"%(name)s\" "
"(%(error)s). Look in the Trac log for more "
"information.", name=repo_name,
error=to_unicode(e)))
except Exception as e:
add_warning(req,
_("Failed to sync with repository \"%(name)s\": "
"%(error)s; repository information may be out of "
"date. Look in the Trac log for more information "
"including mitigation strategies.",
name=repo_name, error=to_unicode(e)))
self.log.error(
"Failed to sync with repository \"%s\"; You may be "
"able to reduce the impact of this issue by "
"configuring the sync_per_request option; see "
"http://trac.edgewall.org/wiki/TracRepositoryAdmin"
"#ExplicitSync for more detail: %s", repo_name,
exception_to_unicode(e, traceback=True))
self.log.info("Synchronized '%s' repository in %0.2f seconds",
repo_name, time_now() - start)
return handler
def post_process_request(self, req, template, data, metadata):
return template, data, metadata
# IResourceManager methods
def get_resource_realms(self):
yield self.changeset_realm
yield self.source_realm
yield self.repository_realm
def get_resource_description(self, resource, format=None, **kwargs):
if resource.realm == self.changeset_realm:
parent = resource.parent
reponame = parent and parent.id
id = resource.id
if reponame:
return _("Changeset %(rev)s in %(repo)s", rev=id, repo=reponame)
else:
return _("Changeset %(rev)s", rev=id)
elif resource.realm == self.source_realm:
parent = resource.parent
reponame = parent and parent.id
id = resource.id
version = ''
if format == 'summary':
repos = self.get_repository(reponame)
node = repos.get_node(resource.id, resource.version)
if node.isdir:
kind = _("directory")
elif node.isfile:
kind = _("file")
if resource.version:
version = _(" at version %(rev)s", rev=resource.version)
else:
kind = _("path")
if resource.version:
version = '@%s' % resource.version
in_repo = _(" in %(repo)s", repo=reponame) if reponame else ''
# TRANSLATOR: file /path/to/file.py at version 13 in reponame
return _('%(kind)s %(id)s%(at_version)s%(in_repo)s',
kind=kind, id=id, at_version=version, in_repo=in_repo)
elif resource.realm == self.repository_realm:
if not resource.id:
return _("Default repository")
return _("Repository %(repo)s", repo=resource.id)
def get_resource_url(self, resource, href, **kwargs):
if resource.realm == self.changeset_realm:
parent = resource.parent
return href.changeset(resource.id, parent and parent.id or None)
elif resource.realm == self.source_realm:
parent = resource.parent
return href.browser(parent and parent.id or None, resource.id,
rev=resource.version or None)
elif resource.realm == self.repository_realm:
return href.browser(resource.id or None)
def resource_exists(self, resource):
if resource.realm == self.repository_realm:
reponame = resource.id
else:
reponame = resource.parent.id
repos = RepositoryManager(self.env).get_repository(reponame)
if not repos:
return False
if resource.realm == self.changeset_realm:
try:
repos.get_changeset(resource.id)
return True
except NoSuchChangeset:
return False
elif resource.realm == self.source_realm:
try:
repos.get_node(resource.id, resource.version)
return True
except NoSuchNode:
return False
elif resource.realm == self.repository_realm:
return True
# IRepositoryProvider methods
def get_repositories(self):
"""Retrieve repositories specified in TracIni.
The `[repositories]` section can be used to specify a list
of repositories.
"""
repositories = self.repositories_section
reponames = {}
# first pass to gather the <name>.dir entries
for option in repositories:
if option.endswith('.dir') and repositories.get(option):
reponames[option[:-4]] = {'sync_per_request': False}
# second pass to gather aliases
for option in repositories:
alias = repositories.get(option)
if '.' not in option: # Support <alias> = <repo> syntax
option += '.alias'
if option.endswith('.alias') and alias in reponames:
reponames.setdefault(option[:-6], {})['alias'] = alias
# third pass to gather the <name>.<detail> entries
for option in repositories:
if '.' in option:
name, detail = option.rsplit('.', 1)
if name in reponames and detail != 'alias':
reponames[name][detail] = repositories.get(option)
for reponame, info in reponames.iteritems():
yield (reponame, info)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
from pkg_resources import resource_filename
return [resource_filename('trac.versioncontrol', 'templates')]
# Public API methods
def get_supported_types(self):
"""Return the list of supported repository types."""
types = {type_
for connector in self.connectors
for (type_, prio) in connector.get_supported_types() or []
if prio >= 0}
return list(types)
def get_repositories_by_dir(self, directory):
"""Retrieve the repositories based on the given directory.
:param directory: the key for identifying the repositories.
:return: list of `Repository` instances.
"""
directory = os.path.join(os.path.normcase(native_path(directory)), '')
repositories = []
for reponame, repoinfo in self.get_all_repositories().iteritems():
dir = native_path(repoinfo.get('dir'))
if dir:
dir = os.path.join(os.path.normcase(dir), '')
if dir.startswith(directory):
repos = self.get_repository(reponame)
if repos:
repositories.append(repos)
return repositories
def get_repository_id(self, reponame):
"""Return a unique id for the given repository name.
This will create and save a new id if none is found.
Note: this should probably be renamed as we're dealing
exclusively with *db* repository ids here.
"""
with self.env.db_transaction as db:
for id, in db(
"SELECT id FROM repository WHERE name='name' AND value=%s",
(reponame,)):
return id
id = db("SELECT COALESCE(MAX(id), 0) FROM repository")[0][0] + 1
db("INSERT INTO repository (id, name, value) VALUES (%s, %s, %s)",
(id, 'name', reponame))
return id
def get_repository(self, reponame):
"""Retrieve the appropriate `Repository` for the given
repository name.
:param reponame: the key for specifying the repository.
If no name is given, take the default
repository.
:return: if no corresponding repository was defined,
simply return `None`.
:raises InvalidConnector: if the repository connector cannot be
opened.
:raises InvalidRepository: if the repository cannot be opened.
"""
reponame = reponame or ''
repoinfo = self.get_all_repositories().get(reponame, {})
if 'alias' in repoinfo:
reponame = repoinfo['alias']
repoinfo = self.get_all_repositories().get(reponame, {})
rdir = native_path(repoinfo.get('dir'))
if not rdir:
return None
rtype = repoinfo.get('type') or self.default_repository_type
# get a Repository for the reponame (use a thread-level cache)
with self.env.db_transaction: # prevent possible deadlock, see #4465
with self._lock:
tid = get_thread_id()
if tid in self._cache:
repositories = self._cache[tid]
else:
repositories = self._cache[tid] = {}
repos = repositories.get(reponame)
if not repos:
if not os.path.isabs(rdir):
rdir = os.path.join(self.env.path, rdir)
connector = self._get_connector(rtype)
repos = connector.get_repository(rtype, rdir,
repoinfo.copy())
repositories[reponame] = repos
return repos
def get_repository_by_path(self, path):
"""Retrieve a matching `Repository` for the given `path`.
:param path: the eventually scoped repository-scoped path
:return: a `(reponame, repos, path)` triple, where `path` is
the remaining part of `path` once the `reponame` has
been truncated, if needed.
"""
matches = []
path = path.strip('/') + '/' if path else '/'
for reponame in self.get_all_repositories():
stripped_reponame = reponame.strip('/') + '/'
if path.startswith(stripped_reponame):
matches.append((len(stripped_reponame), reponame))
if matches:
matches.sort()
length, reponame = matches[-1]
path = path[length:]
else:
reponame = ''
return (reponame, self.get_repository(reponame),
path.rstrip('/') or '/')
def get_default_repository(self, context):
"""Recover the appropriate repository from the current context.
Lookup the closest source or changeset resource in the context
hierarchy and return the name of its associated repository.
"""
while context:
if context.resource.realm in (self.source_realm,
self.changeset_realm) and \
context.resource.parent:
return context.resource.parent.id
context = context.parent
def get_all_repositories(self):
"""Return a dictionary of repository information, indexed by name."""
if not self._all_repositories:
all_repositories = {}
for provider in self.providers:
for reponame, info in provider.get_repositories() or []:
if reponame in all_repositories:
self.log.warning("Discarding duplicate repository "
"'%s'", reponame)
else:
info['name'] = reponame
if 'id' not in info:
info['id'] = self.get_repository_id(reponame)
all_repositories[reponame] = info
self._all_repositories = all_repositories
return self._all_repositories
def get_real_repositories(self):
"""Return a sorted list of all real repositories (i.e. excluding
aliases).
"""
repositories = set()
for reponame in self.get_all_repositories():
try:
repos = self.get_repository(reponame)
except TracError:
pass # Skip invalid repositories
else:
if repos is not None:
repositories.add(repos)
return sorted(repositories, key=lambda r: r.reponame)
def reload_repositories(self):
"""Reload the repositories from the providers."""
with self._lock:
# FIXME: trac-admin doesn't reload the environment
self._cache = {}
self._all_repositories = None
self.config.touch() # Force environment reload
def notify(self, event, reponame, revs):
"""Notify repositories and change listeners about repository events.
The supported events are the names of the methods defined in the
`IRepositoryChangeListener` interface.
"""
self.log.debug("Event %s on repository '%s' for changesets %r",
event, reponame or '(default)', revs)
# Notify a repository by name, and all repositories with the same
# base, or all repositories by base or by repository dir
repos = self.get_repository(reponame)
repositories = []
if repos:
base = repos.get_base()
else:
dir = os.path.abspath(reponame)
repositories = self.get_repositories_by_dir(dir)
if repositories:
base = None
else:
base = reponame
if base:
repositories = [r for r in self.get_real_repositories()
if r.get_base() == base]
if not repositories:
self.log.warning("Found no repositories matching '%s' base.",
base or reponame)
return [_("Repository '%(repo)s' not found",
repo=reponame or _("(default)"))]
errors = []
for repos in sorted(repositories, key=lambda r: r.reponame):
reponame = repos.reponame or '(default)'
repos.sync()
for rev in revs:
args = []
if event == 'changeset_modified':
try:
old_changeset = repos.sync_changeset(rev)
except NoSuchChangeset as e:
errors.append(exception_to_unicode(e))
self.log.warning(
"No changeset '%s' found in repository '%s'. "
"Skipping subscribers for event %s",
rev, reponame, event)
continue
else:
args.append(old_changeset)
try:
changeset = repos.get_changeset(rev)
except NoSuchChangeset:
try:
repos.sync_changeset(rev)
changeset = repos.get_changeset(rev)
except NoSuchChangeset as e:
errors.append(exception_to_unicode(e))
self.log.warning(
"No changeset '%s' found in repository '%s'. "
"Skipping subscribers for event %s",
rev, reponame, event)
continue
self.log.debug("Event %s on repository '%s' for revision '%s'",
event, reponame, rev)
for listener in self.change_listeners:
getattr(listener, event)(repos, changeset, *args)
return errors
def shutdown(self, tid=None):
"""Free `Repository` instances bound to a given thread identifier"""
if tid:
assert tid == get_thread_id()
with self._lock:
repositories = self._cache.pop(tid, {})
for reponame, repos in repositories.iteritems():
repos.close()
def read_file_by_path(self, path):
"""Read the file specified by `path`
:param path: the repository-scoped path. The repository revision may
specified by appending `@` followed by the revision,
otherwise the HEAD revision is assumed.
:return: the file content as a unicode string. `None` is returned if
the file is not found.
:since: 1.2.2
"""
repos, path = self.get_repository_by_path(path)[1:]
if not repos:
return None
rev = None
if '@' in path:
path, rev = path.split('@', 1)
try:
node = repos.get_node(path, rev)
except (NoSuchChangeset, NoSuchNode):
return None
content = node.get_content()
if content:
return to_unicode(content.read())
# private methods
def _get_connector(self, rtype):
"""Retrieve the appropriate connector for the given repository type.
Note that the self._lock must be held when calling this method.
"""
if self._connectors is None:
# build an environment-level cache for the preferred connectors
self._connectors = {}
for connector in self.connectors:
for type_, prio in connector.get_supported_types() or []:
keep = (connector, prio)
if type_ in self._connectors and \
prio <= self._connectors[type_][1]:
keep = None
if keep:
self._connectors[type_] = keep
if rtype in self._connectors:
connector, prio = self._connectors[rtype]
if prio >= 0: # no error condition
return connector
else:
raise InvalidConnector(
_('Unsupported version control system "%(name)s"'
': %(error)s', name=rtype,
error=to_unicode(connector.error)))
else:
raise InvalidConnector(
_('Unsupported version control system "%(name)s": '
'Can\'t find an appropriate component, maybe the '
'corresponding plugin was not enabled? ', name=rtype))
class NoSuchChangeset(ResourceNotFound):
def __init__(self, rev):
ResourceNotFound.__init__(self,
_('No changeset %(rev)s in the repository',
rev=rev),
_('No such changeset'))
class NoSuchNode(ResourceNotFound):
def __init__(self, path, rev, msg=None):
if msg is None:
msg = _("No node %(path)s at revision %(rev)s", path=path, rev=rev)
else:
msg = _("%(msg)s: No node %(path)s at revision %(rev)s",
msg=msg, path=path, rev=rev)
ResourceNotFound.__init__(self, msg, _('No such node'))
class Repository(object):
"""Base class for a repository provided by a version control system."""
__metaclass__ = ABCMeta
has_linear_changesets = False
scope = '/'
realm = RepositoryManager.repository_realm
@property
def resource(self):
return Resource(self.realm, self.reponame)
def __init__(self, name, params, log):
"""Initialize a repository.
:param name: a unique name identifying the repository, usually a
type-specific prefix followed by the path to the
repository.
:param params: a `dict` of parameters for the repository. Contains
the name of the repository under the key "name" and
the surrogate key that identifies the repository in
the database under the key "id".
:param log: a logger instance.
:raises InvalidRepository: if the repository cannot be opened.
"""
self.name = name
self.params = params
self.reponame = params['name']
self.id = params['id']
self.log = log
def __repr__(self):
return '<%s %r %r %r>' % (self.__class__.__name__,
self.id, self.name, self.scope)
@abstractmethod
def close(self):
"""Close the connection to the repository."""
pass
def get_base(self):
"""Return the name of the base repository for this repository.
This function returns the name of the base repository to which scoped
repositories belong. For non-scoped repositories, it returns the
repository name.
"""
return self.name
def clear(self, youngest_rev=None):
"""Clear any data that may have been cached in instance properties.
`youngest_rev` can be specified as a way to force the value
of the `youngest_rev` property (''will change in 0.12'').
"""
pass
def sync(self, rev_callback=None, clean=False):
"""Perform a sync of the repository cache, if relevant.
If given, `rev_callback` must be a callable taking a `rev` parameter.
The backend will call this function for each `rev` it decided to
synchronize, once the synchronization changes are committed to the
cache. When `clean` is `True`, the cache is cleaned first.
"""
pass
def sync_changeset(self, rev):
"""Resync the repository cache for the given `rev`, if relevant.
Returns a "metadata-only" changeset containing the metadata prior to
the resync, or `None` if the old values cannot be retrieved (typically
when the repository is not cached).
"""
return None
def get_quickjump_entries(self, rev):
"""Generate a list of interesting places in the repository.
`rev` might be used to restrict the list of available locations,
but in general it's best to produce all known locations.
The generated results must be of the form (category, name, path, rev).
"""
return []
def get_path_url(self, path, rev):
"""Return the repository URL for the given path and revision.
The returned URL can be `None`, meaning that no URL has been specified
for the repository, an absolute URL, or a scheme-relative URL starting
with `//`, in which case the scheme of the request should be prepended.
"""
return None
@abstractmethod
def get_changeset(self, rev):
"""Retrieve a Changeset corresponding to the given revision `rev`."""
pass
def get_changeset_uid(self, rev):
"""Return a globally unique identifier for the ''rev'' changeset.
Two changesets from different repositories can sometimes refer to
the ''very same'' changeset (e.g. the repositories are clones).
"""
def get_changesets(self, start, stop):
"""Generate Changeset belonging to the given time period (start, stop).
"""
rev = self.youngest_rev
while rev:
chgset = self.get_changeset(rev)
if chgset.date < start:
return
if chgset.date < stop:
yield chgset
rev = self.previous_rev(rev)
def has_node(self, path, rev=None):
"""Tell if there's a node at the specified (path,rev) combination.
When `rev` is `None`, the latest revision is implied.
"""
try:
self.get_node(path, rev)
return True
except TracError:
return False
@abstractmethod
def get_node(self, path, rev=None):
"""Retrieve a Node from the repository at the given path.
A Node represents a directory or a file at a given revision in the
repository.
If the `rev` parameter is specified, the Node corresponding to that
revision is returned, otherwise the Node corresponding to the youngest
revision is returned.
"""
pass
@abstractmethod
def get_oldest_rev(self):
"""Return the oldest revision stored in the repository."""
pass
oldest_rev = property(lambda self: self.get_oldest_rev())
@abstractmethod
def get_youngest_rev(self):
"""Return the youngest revision in the repository."""
pass
youngest_rev = property(lambda self: self.get_youngest_rev())
@abstractmethod
def previous_rev(self, rev, path=''):
"""Return the revision immediately preceding the specified revision.
If `path` is given, filter out ancestor revisions having no changes
below `path`.
In presence of multiple parents, this follows the first parent.
"""
pass
@abstractmethod
def next_rev(self, rev, path=''):
"""Return the revision immediately following the specified revision.
If `path` is given, filter out descendant revisions having no changes
below `path`.
In presence of multiple children, this follows the first child.
"""
pass
def parent_revs(self, rev):
"""Return a list of parents of the specified revision."""
parent = self.previous_rev(rev)
return [parent] if parent is not None else []
@abstractmethod
def rev_older_than(self, rev1, rev2):
"""Provides a total order over revisions.
Return `True` if `rev1` is an ancestor of `rev2`.
"""
pass
@abstractmethod
def get_path_history(self, path, rev=None, limit=None):
"""Retrieve all the revisions containing this path.
If given, `rev` is used as a starting point (i.e. no revision
''newer'' than `rev` should be returned).
The result format should be the same as the one of Node.get_history()
"""
pass
@abstractmethod
def normalize_path(self, path):
"""Return a canonical representation of path in the repos."""
pass
@abstractmethod
def normalize_rev(self, rev):
"""Return a (unique) canonical representation of a revision.
It's up to the backend to decide which string values of `rev`
(usually provided by the user) should be accepted, and how they
should be normalized. Some backends may for instance want to match
against known tags or branch names.
In addition, if `rev` is `None` or '', the youngest revision should
be returned.
:raise NoSuchChangeset: If the given `rev` isn't found.
"""
pass
def short_rev(self, rev):
"""Return a compact string representation of a revision in the
repos.
:raise NoSuchChangeset: If the given `rev` isn't found.
:since 1.2: Always returns a string or `None`.
"""
norm_rev = self.normalize_rev(rev)
return str(norm_rev) if norm_rev is not None else norm_rev
def display_rev(self, rev):
"""Return a string representation of a revision in the repos for
displaying to the user.
This can be a shortened revision string, e.g. for repositories
using long hashes.
:raise NoSuchChangeset: If the given `rev` isn't found.
:since 1.2: Always returns a string or `None`.
"""
norm_rev = self.normalize_rev(rev)
return str(norm_rev) if norm_rev is not None else norm_rev
@abstractmethod
def get_changes(self, old_path, old_rev, new_path, new_rev,
ignore_ancestry=1):
"""Generates changes corresponding to generalized diffs.
Generator that yields change tuples (old_node, new_node, kind, change)
for each node change between the two arbitrary (path,rev) pairs.
The old_node is assumed to be None when the change is an ADD,
the new_node is assumed to be None when the change is a DELETE.
"""
pass
def is_viewable(self, perm):
"""Return True if view permission is granted on the repository."""
return 'BROWSER_VIEW' in perm(self.resource.child('source', '/'))
can_view = is_viewable # 0.12 compatibility
class Node(object):
"""Represents a directory or file in the repository at a given revision."""
__metaclass__ = ABCMeta
DIRECTORY = "dir"
FILE = "file"
realm = RepositoryManager.source_realm
@property
def resource(self):
return Resource(self.realm, self.path, self.rev, self.repos.resource)
# created_path and created_rev properties refer to the Node "creation"
# in the Subversion meaning of a Node in a versioned tree (see #3340).
#
# Those properties must be set by subclasses.
#
created_rev = None
created_path = None
def __init__(self, repos, path, rev, kind):
assert kind in (Node.DIRECTORY, Node.FILE), \
"Unknown node kind %s" % kind
self.repos = repos
self.path = to_unicode(path)
self.rev = rev
self.kind = kind
def __repr__(self):
name = u'%s:%s' % (self.repos.name, self.path)
if self.rev is not None:
name += '@' + unicode(self.rev)
return '<%s %r>' % (self.__class__.__name__, name)
@abstractmethod
def get_content(self):
"""Return a stream for reading the content of the node.
This method will return `None` for directories.
The returned object must support a `read([len])` method.
"""
pass
def get_processed_content(self, keyword_substitution=True, eol_hint=None):
"""Return a stream for reading the content of the node, with some
standard processing applied.
:param keyword_substitution: if `True`, meta-data keywords
present in the content like ``$Rev$`` are substituted
(which keyword are substituted and how they are
substituted is backend specific)
:param eol_hint: which style of line ending is expected if
`None` was explicitly specified for the file itself in
the version control backend (for example in Subversion,
if it was set to ``'native'``). It can be `None`,
``'LF'``, ``'CR'`` or ``'CRLF'``.
"""
return self.get_content()
@abstractmethod
def get_entries(self):
"""Generator that yields the immediate child entries of a directory.
The entries are returned in no particular order.
If the node is a file, this method returns `None`.
"""
pass
@abstractmethod
def get_history(self, limit=None):
"""Provide backward history for this Node.
Generator that yields `(path, rev, chg)` tuples, one for each revision
in which the node was changed. This generator will follow copies and
moves of a node (if the underlying version control system supports
that), which will be indicated by the first element of the tuple
(i.e. the path) changing.
Starts with an entry for the current revision.
:param limit: if given, yield at most ``limit`` results.
"""
pass
def get_previous(self):
"""Return the change event corresponding to the previous revision.
This returns a `(path, rev, chg)` tuple.
"""
skip = True
for p in self.get_history(2):
if skip:
skip = False
else:
return p
@abstractmethod
def get_annotations(self):
"""Provide detailed backward history for the content of this Node.
Retrieve an array of revisions, one `rev` for each line of content
for that node.
Only expected to work on (text) FILE nodes, of course.
"""
pass
@abstractmethod
def get_properties(self):
"""Returns the properties (meta-data) of the node, as a dictionary.
The set of properties depends on the version control system.
"""
pass
@abstractmethod
def get_content_length(self):
"""The length in bytes of the content.
Will be `None` for a directory.
"""
pass
content_length = property(lambda self: self.get_content_length())
@abstractmethod
def get_content_type(self):
"""The MIME type corresponding to the content, if known.
Will be `None` for a directory.
"""
pass
content_type = property(lambda self: self.get_content_type())
def get_name(self):
return self.path.split('/')[-1]
name = property(lambda self: self.get_name())
@abstractmethod
def get_last_modified(self):
pass
last_modified = property(lambda self: self.get_last_modified())
isdir = property(lambda self: self.kind == Node.DIRECTORY)
isfile = property(lambda self: self.kind == Node.FILE)
def is_viewable(self, perm):
"""Return True if view permission is granted on the node."""
return ('BROWSER_VIEW' if self.isdir else 'FILE_VIEW') \
in perm(self.resource)
can_view = is_viewable # 0.12 compatibility
class Changeset(object):
"""Represents a set of changes committed at once in a repository."""
__metaclass__ = ABCMeta
ADD = 'add'
COPY = 'copy'
DELETE = 'delete'
EDIT = 'edit'
MOVE = 'move'
# change types which can have diff associated to them
DIFF_CHANGES = (EDIT, COPY, MOVE) # MERGE
OTHER_CHANGES = (ADD, DELETE)
ALL_CHANGES = DIFF_CHANGES + OTHER_CHANGES
realm = RepositoryManager.changeset_realm
@property
def resource(self):
return Resource(self.realm, self.rev, parent=self.repos.resource)
def __init__(self, repos, rev, message, author, date):
self.repos = repos
self.rev = rev
self.message = message or ''
self.author = author or ''
self.date = date
def __repr__(self):
name = u'%s@%s' % (self.repos.name, self.rev)
return '<%s %r>' % (self.__class__.__name__, name)
def get_properties(self):
"""Returns the properties (meta-data) of the node, as a dictionary.
The set of properties depends on the version control system.
Warning: this used to yield 4-elements tuple (besides `name` and
`text`, there were `wikiflag` and `htmlclass` values).
This is now replaced by the usage of IPropertyRenderer (see #1601).
"""
return []
@abstractmethod
def get_changes(self):
"""Generator that produces a tuple for every change in the changeset.
The tuple will contain `(path, kind, change, base_path, base_rev)`,
where `change` can be one of Changeset.ADD, Changeset.COPY,
Changeset.DELETE, Changeset.EDIT or Changeset.MOVE,
and `kind` is one of Node.FILE or Node.DIRECTORY.
The `path` is the targeted path for the `change` (which is
the ''deleted'' path for a DELETE change).
The `base_path` and `base_rev` are the source path and rev for the
action (`None` and `-1` in the case of an ADD change).
"""
pass
def get_branches(self):
"""Yield branches to which this changeset belong.
Each branch is given as a pair `(name, head)`, where `name` is
the branch name and `head` a flag set if the changeset is a head
for this branch (i.e. if it has no children changeset).
"""
return []
def get_tags(self):
"""Yield tags associated with this changeset.
.. versionadded :: 1.0
"""
return []
def get_bookmarks(self):
"""Yield bookmarks associated with this changeset.
.. versionadded :: 1.1.5
"""
return []
def is_viewable(self, perm):
"""Return True if view permission is granted on the changeset."""
return 'CHANGESET_VIEW' in perm(self.resource)
can_view = is_viewable # 0.12 compatibility
class EmptyChangeset(Changeset):
"""Changeset that contains no changes. This is typically used when the
changeset can't be retrieved."""
def __init__(self, repos, rev, message=None, author=None, date=None):
if date is None:
date = datetime(1970, 1, 1, tzinfo=utc)
super(EmptyChangeset, self).__init__(repos, rev, message, author,
date)
def get_changes(self):
return iter([])
# Note: Since Trac 0.12, Exception PermissionDenied class is gone,
# and class Authorizer is gone as well.
#
# Fine-grained permissions are now handled via normal permission policies.
| rbaumg/trac | trac/versioncontrol/api.py | Python | bsd-3-clause | 52,009 |
# coding=latin-1
from flask import request, g
from flask import abort, flash
from functools import wraps
def checa_permissao(permissao):
def decorator(f):
@wraps(f)
def inner(*args, **kwargs):
if g.user and g.user.checa_permissao(permissao):
return f(*args, **kwargs)
else:
flash(u'Atenção você não possui a permissão: %s. Se isto não estiver correto, entre em contato solicitando esta permissão.' % permissao.upper(),u'notice')
abort(401)
return inner
return decorator
| dedeco/cnddh-denuncias | cnddh/decorators.py | Python | apache-2.0 | 593 |
from testbase import *
import simpleobsoletestests
# Obsolete for conflict
class ComplicatedTests(OperationsTests):
@staticmethod
def buildPkgs(pkgs, *args):
simpleobsoletestests.SimpleObsoletesTests.buildPkgs(pkgs)
# conflicts
pkgs.conflicts = FakePackage('super-zippy', '0.3', '1', '0', 'i386')
pkgs.conflicts.addConflicts('zsh', 'EQ', ('0', '1', '1'))
def testObsoleteForConflict(self):
p = self.pkgs
res, msg = self.runOperation(['install', 'super-zippy'], [p.installed_i386], [p.obsoletes_i386, p.obsoletes_x86_64, p.conflicts])
if new_behavior:
self.assert_(res=='ok', msg)
self.assertResult((p.obsoletes_i386, p.conflicts))
class CombinedUpdateObsoletesTest(OperationsTests):
@staticmethod
def buildPkgs(pkgs, *args):
pkgs.k_1 = FakePackage('k', '3.5')
pkgs.kdevel_1 = FakePackage('k-devel', '3.5')
pkgs.kdevel_1.addRequires('k')
pkgs.klibs_1_i386 = FakePackage('klibs', '3.5', arch='i386')
pkgs.klibs_1_x86_64 = FakePackage('klibs', '3.5', arch='x86_64')
pkgs.k_2 = FakePackage('k', '3.5', '2')
pkgs.kdevel_2 = FakePackage('k-devel', '3.5', '2')
pkgs.kdevel_2.addRequires('k')
pkgs.klibs_2_i386 = FakePackage('klibs', '3.5', '2', arch='i386')
pkgs.klibs_2_i386.addObsoletes('klibs', 'LT', (None, '3.5', '2'))
pkgs.klibs_2_i386.addObsoletes('k', 'LT', (None, '3.5', '2'))
pkgs.klibs_2_x86_64 = FakePackage('klibs', '3.5', '2', arch='x86_64')
pkgs.klibs_2_x86_64.addObsoletes('klibs', 'LT', (None, '3.5', '2'))
pkgs.klibs_2_x86_64.addObsoletes('k', 'LT', (None, '3.5', '2'))
def testSelfObsolete(self):
p = self.pkgs
res, msg = self.runOperation(['update'], [p.klibs_1_x86_64], [p.klibs_2_i386, p.klibs_2_x86_64])
self.assert_(res=='ok', msg)
self.assertResult((p.klibs_2_x86_64,))
def testPackageSplitWithObsoleteAndRequiresForUpdate(self):
p = self.pkgs
res, msg = self.runOperation(['update'], [p.k_1, p.kdevel_1, p.klibs_1_x86_64],
[p.k_2, p.kdevel_2, p.klibs_2_x86_64])
self.assert_(res=='ok', msg)
self.assertResult((p.k_2, p.kdevel_2, p.klibs_2_x86_64,))
class ComplicatedObsoletesTests(OperationsTests):
@staticmethod
def buildPkgs(pkgs, *args):
pkgs.installed = FakePackage('foo', '1.4', '1')
pkgs.obsoletecircle = FakePackage('foo', '1.4', '1')
pkgs.obsoletecircle.addObsoletes('baz')
pkgs.obsoletes = FakePackage('bar', '1.2', '1')
pkgs.obsoletes.addObsoletes('foo')
pkgs.obsoletes2 = FakePackage('baz', '1.8', '1')
pkgs.obsoletes2.addObsoletes('bar')
def testObsoleteChain(self):
p = self.pkgs
res, msg = self.runOperation(['update'], [p.installed], [p.obsoletes, p.obsoletes2])
self.assert_(res=='ok', msg)
if True or new_behavior:
self.assertResult((p.obsoletes2,))
else:
self.assertResult((p.obsoletes,))
def testObsoleteChainNext(self):
p = self.pkgs
res, msg = self.runOperation(['update'], [p.obsoletes], [p.obsoletes2])
self.assert_(res=='ok', msg)
self.assertResult((p.obsoletes2,))
def testObsoleteCircle(self):
p = self.pkgs
res, msg = self.runOperation(['update'], [p.obsoletecircle], [p.obsoletes, p.obsoletes2])
self.assert_(res=='ok', msg)
if new_behavior:
self.assertResult((p.obsoletecircle,))
else:
self.assertResult((p.obsoletes2,))
def testObsoleteCircleNext(self):
p = self.pkgs
res, msg = self.runOperation(['update'], [p.obsoletes], [p.obsoletecircle, p.obsoletes, p.obsoletes2])
self.assert_(res=='ok', msg)
if new_behavior:
self.assertResult((p.obsoletes,))
else:
self.assertResult((p.obsoletes2,))
def testObsoleteCircleNextNext(self):
p = self.pkgs
res, msg = self.runOperation(['update'], [p.obsoletes2], [p.obsoletecircle, p.obsoletes, p.obsoletes2])
self.assert_(res=='ok', msg)
if new_behavior:
self.assertResult((p.obsoletes2,))
else:
self.assertResult((p.obsoletecircle,))
def testObsoleteCircleNextNextNext(self):
p = self.pkgs
res, msg = self.runOperation(['update'], [p.obsoletecircle], [p.obsoletes, p.obsoletes2])
self.assert_(res=='ok', msg)
if new_behavior:
self.assertResult((p.obsoletecircle,))
else:
self.assertResult((p.obsoletes2,))
# continue endlessly
class KernelTests(OperationsTests):
@staticmethod
def buildPkgs(pkgs, *args):
pkgs.inst = []
pkgs.inst.append(FakePackage('kernel', '2.6.23.8', '63',arch='i686'))
pkgs.inst.append(FakePackage('kernel', '2.6.23.1', '49',arch='i686'))
pkgs.avail = []
pkgs.avail.append(FakePackage('kernel', '2.6.23.8', '63',arch='i686'))
pkgs.avail.append(FakePackage('kernel', '2.6.23.8', '63',arch='i586'))
pkgs.avail.append(FakePackage('kernel', '2.6.23.1', '49',arch='i686'))
pkgs.avail.append(FakePackage('kernel', '2.6.23.1', '49',arch='i586'))
pkgs.avail.append(FakePackage('kernel', '2.6.23.1', '42',arch='i686'))
pkgs.avail.append(FakePackage('kernel', '2.6.23.1', '42',arch='i586'))
def testKernelInstall1(self):
p = self.pkgs
res, msg = self.runOperation(['install','kernel'], p.inst, p.avail)
self.assertResult(p.inst)
def testKernelInstall2(self):
p = self.pkgs
res, msg = self.runOperation(['install','kernel-2.6.23.1-42'], p.inst, p.avail)
self.assertResult(p.inst + [ p.avail[4] ] )
def testKernelInstall3(self):
p = self.pkgs
res, msg = self.runOperation(['install','kernel-2.6.23.8'], p.inst, p.avail)
self.assertResult(p.inst)
class MultiLibTests(OperationsTests):
@staticmethod
def buildPkgs(pkgs, *args):
pkgs.inst = []
pkgs.i_foo_1_12_x = FakePackage('foo', '1', '12',arch='x86_64')
pkgs.i_wbar_1_12_i = FakePackage('wbar', '1', '12', arch='i586')
pkgs.inst.append(pkgs.i_foo_1_12_x)
pkgs.inst.append(pkgs.i_wbar_1_12_i)
pkgs.avail = []
pkgs.a_foo_0_2_x = FakePackage('foo', '0', '2', arch='x86_64')
pkgs.a_foo_0_2_i = FakePackage('foo', '0', '2', arch='i686')
pkgs.a_foo_1_12_x = FakePackage('foo', '1', '12', arch='x86_64')
pkgs.a_foo_1_12_i = FakePackage('foo', '1', '12', arch='i686')
pkgs.a_foo_2_22_x = FakePackage('foo', '2', '22', arch='x86_64')
pkgs.a_foo_2_22_i = FakePackage('foo', '2', '22', arch='i686')
pkgs.a_bar_1_12_x = FakePackage('bar', '1', '12', arch='x86_64')
pkgs.a_bar_1_12_i = FakePackage('bar', '1', '12', arch='i686')
pkgs.a_bar_2_22_x = FakePackage('bar', '2', '22', arch='x86_64')
pkgs.a_bar_2_22_i = FakePackage('bar', '2', '22', arch='i686')
# ibar is .i?86 older
pkgs.a_ibar_2_22_x = FakePackage('ibar', '2', '22', arch='x86_64')
pkgs.a_ibar_1_12_i = FakePackage('ibar', '1', '12', arch='i686')
# xbar is .x86_64 older
pkgs.a_xbar_1_12_x = FakePackage('xbar', '1', '12', arch='x86_64')
pkgs.a_xbar_2_22_i = FakePackage('xbar', '2', '22', arch='i686')
# wbar is arch changing update/downgrade
pkgs.a_wbar_0_2_i = FakePackage('wbar', '0', '2', arch='i386')
pkgs.a_wbar_2_22_i = FakePackage('wbar', '2', '22', arch='i686')
for i in ('a_foo_0_2', 'a_foo_1_12', 'a_foo_2_22',
'a_bar_1_12', 'a_bar_2_22'):
pkgs.avail.append(getattr(pkgs, i + '_x'))
pkgs.avail.append(getattr(pkgs, i + '_i'))
pkgs.avail.append(pkgs.a_ibar_2_22_x)
pkgs.avail.append(pkgs.a_ibar_1_12_i)
pkgs.avail.append(pkgs.a_xbar_1_12_x)
pkgs.avail.append(pkgs.a_xbar_2_22_i)
pkgs.avail.append(pkgs.a_wbar_0_2_i)
pkgs.avail.append(pkgs.a_wbar_2_22_i)
def testBestInstall1(self):
p = self.pkgs
ninst = p.inst[:]
ninst.append(p.a_bar_2_22_x)
res, msg = self.runOperation(['install', 'bar'], p.inst, p.avail)
self.assertResult(ninst)
def testBestInstall2(self):
p = self.pkgs
ninst = p.inst[:]
ninst.append(p.a_bar_1_12_x)
res, msg = self.runOperation(['install', 'bar-1'], p.inst, p.avail)
self.assertResult(ninst)
def testAllInstall1(self):
p = self.pkgs
ninst = p.inst[:]
ninst.append(p.a_bar_2_22_x)
ninst.append(p.a_bar_2_22_i)
res, msg = self.runOperation(['install', 'bar'], p.inst, p.avail,
{'multilib_policy' : 'all'})
self.assertResult(ninst)
def testAllInstall2(self):
p = self.pkgs
ninst = p.inst[:]
ninst.append(p.a_bar_1_12_x)
ninst.append(p.a_bar_1_12_i)
res, msg = self.runOperation(['install', 'bar-1'], p.inst, p.avail,
{'multilib_policy' : 'all'})
self.assertResult(ninst)
def testAllInstall3(self):
p = self.pkgs
ninst = p.inst[:]
ninst.append(p.a_ibar_2_22_x)
res, msg = self.runOperation(['install', 'ibar'], p.inst, p.avail,
{'multilib_policy' : 'all'})
self.assertResult(ninst)
def testAllInstall4(self):
p = self.pkgs
ninst = p.inst[:]
ninst.append(p.a_xbar_2_22_i)
res, msg = self.runOperation(['install', 'xbar'], p.inst, p.avail,
{'multilib_policy' : 'all'})
self.assertResult(ninst)
def testDowngrade1(self):
p = self.pkgs
ninst = [p.i_foo_1_12_x, p.a_wbar_0_2_i]
res, msg = self.runOperation(['downgrade', 'wbar'], p.inst, p.avail)
self.assertResult(ninst)
def testDowngrade2(self):
p = self.pkgs
oinst = [p.i_foo_1_12_x, p.a_wbar_2_22_i]
ninst = [p.i_foo_1_12_x, p.i_wbar_1_12_i]
p.avail.append(p.i_wbar_1_12_i)
res, msg = self.runOperation(['downgrade', 'wbar'], oinst, p.avail)
self.assertResult(ninst)
def testDowngrade3(self):
p = self.pkgs
oinst = [p.i_foo_1_12_x, p.a_wbar_2_22_i]
ninst = [p.i_foo_1_12_x, p.a_wbar_0_2_i]
res, msg = self.runOperation(['downgrade', 'wbar'], oinst, p.avail)
self.assertResult(ninst)
def testDowngrade4(self):
p = self.pkgs
oinst = p.inst[:] + [p.a_ibar_2_22_x]
p.a_ibar_1_12_i.arch = 'noarch'
ninst = p.inst[:] + [p.a_ibar_1_12_i]
res, msg = self.runOperation(['downgrade', 'ibar'], oinst, p.avail)
self.assertResult(ninst)
def testDowngrade5(self):
p = self.pkgs
ninst = p.inst[:] + [p.a_xbar_1_12_x]
p.a_xbar_2_22_i.arch = 'noarch'
oinst = p.inst[:] + [p.a_xbar_2_22_i]
res, msg = self.runOperation(['downgrade', 'xbar'], oinst, p.avail)
self.assertResult(ninst)
| ErwanAliasr1/yum | test/operationstests.py | Python | gpl-2.0 | 11,202 |
'''
sipShell2Abq.py: procedure part of the 2DImage2Mesh project
Create a 2D quadrangular mesh or a 3D hexahedral mesh (extrusion mesh) from one image using scanIP (Simpleware Ltd) and Abaqus (Dassault Systeme)
----------------------------------------------
INSTRUCTIONS: see readme.md
----------------------------------------------
Author: MMENGONI, Jun 2015
----------------------------------------------
v 0.2 (06/2015): add 3D mesh by extrusion, no set/surfaces reconstruction
v 0.1 (01/2014): first release, 2D quad mesh
'''
## default abaqus modules
from abaqus import *
backwardCompatibility.setValues(reportDeprecated=False)
import mesh
from Image2MeshToolbox import *
#-----------------------------------------------------
def shellTo2DGeo(myModel,seedSize=0.5,elementType='CPS'):
'''
shellTo2DGeo is the function to be called from abaqus cae
mandatory arguments:
myModel: an abaqus model (model produced with scanIP and imported in abaqus)
optional arguments:
seedSize: the relative size of the new mesh points (0.5 means there will be two seeds per geometrical line ie one new element edge per old element edge at the boundaries)
elementType: choose type of 2d element, anything else than 'CPS' will produce a plane strain mesh, default keeps plane stress
'''
## load needed abaqus constants
from abaqusConstants import TWO_D_PLANAR,DEFORMABLE_BODY,QUAD,CPS3,CPS4R,CPE3,CPE4R,STANDARD,ENHANCED
## ELEMENT INTEGRATION (2D elements: CPE=plane strain, CPS=plane stress)
# defines abaqus elements - hourglass controlled standard quadrangles (abaqus asks for the definition of the triangular elements even if they a not used!!)
if elementType=='CPS':
elemType1 = mesh.ElemType(elemCode=CPS4R, elemLibrary=STANDARD, hourglassControl=ENHANCED)
elemType2 = mesh.ElemType(elemCode=CPS3, elemLibrary=STANDARD)
else:
elemType1 = mesh.ElemType(elemCode=CPE4R, elemLibrary=STANDARD, hourglassControl=ENHANCED)
elemType2 = mesh.ElemType(elemCode=CPE3, elemLibrary=STANDARD)
myAssembly = myModel.rootAssembly
clearUnwantedNodes(myModel)
myAssembly.regenerate()#needed to still have access to assembly node sets
## for each set that makes a shell, create a new part (by getting the external edges as Lines) and mesh it
# those sets are assembly sets defined by scanIP --> loop over assembly instances and over sets in those instances
for myInstance in myAssembly.instances.values():
for myISet in myInstance.sets.keys():
# find set which is a shell (each set will form a new part)
if myISet.endswith('WITH_ZMIN'):
# create a new sketch to create a new geometrical part
mySketch = createSketch(myModel,myInstance.sets[myISet])
# create new part whose name will be 'nameOfTheCorrespondingScanIPMask_Geo'
myNewPart = myModel.Part(name=myISet.split('_')[1]+'_Geo', dimensionality=TWO_D_PLANAR, type=DEFORMABLE_BODY)
myNewPart.BaseShell(sketch=mySketch)
# assign mesh controls and seeds
myNewPart.setMeshControls(regions=(myNewPart.faces[0],), elemShape=QUAD)
myNewPart.seedPart(size=seedSize)#, deviationFactor=0.1, minSizeFactor=0.1)
# create new mesh
myNewPart.generateMesh()
myNewPart.setElementType(regions=(myNewPart.faces[0],), elemTypes=(elemType1, elemType2))
del mySketch
# rebuild assembly sets as part sets and surfaces
# loop over assembly sets
for setName in myAssembly.sets.keys():
partName = setName.split('_')[1]+'_Geo'
# if the set belongs to a newly created part (found with their name as they all start with the name of the scanIP mask they are built on)
# then reads all nodes coordinates and build corresponding part set and surface
if myModel.parts.has_key(partName):
part = myModel.parts[partName]
#build a list of the assembly set node coord
nodeCoord = tuple(node.coordinates for node in myAssembly.sets[setName].nodes)
#find the part edges at those coord
myEdgeList = list(part.edges.findAt((nC,)) for nC in nodeCoord)
#for what ever reason when parts.edges[0] should be in the list it is not...
#so here is a long way to add it if needed...
#1/ build a list of all vertices in the edge list
listVertices = list()
for edge in myEdgeList:
#myEdgeList is a list of edge entities, the edge itself is the first element of that entity --> edge[0]
listVertices.extend([v for v in edge[0].getVertices() if v not in listVertices])
#2/ parts.edges[0] has vertices (0,1) --> if those two vertices are in the list of all vertices, add part.edges[0] to myEdgeList
if all(x in listVertices for x in [0,1]):
pt0 = part.edges[0].getNodes()[0].coordinates
pt1 = part.edges[0].getNodes()[1].coordinates
newPt = ((pt0[0]+pt1[0])/2.,(pt0[1]+pt1[1])/2.,(pt0[2]+pt1[2])/2.)
myEdgeList.append(part.edges.findAt((newPt,)))
#create set and surface
part.Set(edges=myEdgeList,name=setName)
part.Surface(side1Edges=myEdgeList,name=setName)
del myAssembly.sets[setName]
deleteOldFeatures(myModel)
addPartsToAssembly(myModel)
#-----------------------------------------------------
#-----------------------------------------------------
def shellTo3DExtruGeo(myModel,extrusionDepth=5.,seedSize=0.5):
'''
shellTo3DExtruGeo is the function to be called from abaqus cae
mandatory arguments:
myModel: an abaqus model (model produced with scanIP and imported in abaqus)
optional arguments:
extrusionDepth: the extrusion depth (default is 5.)
seedSize: the relative size of the new mesh points (0.5 means there will be two seeds per geometrical line ie one new element edge per old element edge at the boundaries)
'''
## load needed abaqus constants
from abaqusConstants import THREE_D,DEFORMABLE_BODY,C3D8R,STANDARD,ENHANCED,SWEEP
## ELEMENT INTEGRATION
elemType1 = mesh.ElemType(elemCode=C3D8R, elemLibrary=STANDARD, hourglassControl=ENHANCED)
myAssembly = myModel.rootAssembly
clearUnwantedNodes(myModel)
myAssembly.regenerate()#needed to still have access to assembly node sets
## for each set that makes a shell, create a new part (by getting the external edges as Lines) and mesh it
# those sets are assembly sets defined by scanIP --> loop over assembly instances and over sets in those instances
for myInstance in myAssembly.instances.values():
for myISet in myInstance.sets.keys():
# find set which is a shell (each set will form a new part)
if myISet.endswith('WITH_ZMIN'):
# create a new sketch to create a new geometrical part
mySketch = createSketch(myModel,myInstance.sets[myISet])
# create new part whose name will be 'nameOfTheCorrespondingScanIPMask_Geo'
myNewPart = myModel.Part(name=myISet.split('_')[1]+'_Geo', dimensionality=THREE_D, type=DEFORMABLE_BODY)
myNewPart.BaseSolidExtrude(sketch=mySketch, depth=extrusionDepth)
# assign mesh controls and seeds
myNewPart.setMeshControls(regions=(myNewPart.cells[0],), technique=SWEEP)
myNewPart.seedPart(size=seedSize)#, deviationFactor=0.1, minSizeFactor=0.1)
# create new mesh
myNewPart.generateMesh()
myNewPart.setElementType(regions=(myNewPart.cells[0],), elemTypes=(elemType1, ))
del mySketch
deleteOldFeatures(myModel)
addPartsToAssembly(myModel)
| mengomarlene/2DImage2Mesh | sipShell2Abq.py | Python | gpl-2.0 | 7,935 |
#!/usr/bin/env python
"""
Manipulates MacBinary files.
"""
from __future__ import absolute_import
from classicbox.io import BytesIO
from classicbox.io import print_structure_format
from classicbox.macbinary import _MACBINARY_HEADER_MEMBERS
from classicbox.macbinary import print_macbinary
from classicbox.macbinary import read_macbinary
from classicbox.macbinary import write_macbinary
import sys
# ------------------------------------------------------------------------------
_VERBOSE_HEADER_FORMAT = False
def main(args):
(command, macbinary_filepath, ) = args
if _VERBOSE_HEADER_FORMAT:
print_structure_format(_MACBINARY_HEADER_MEMBERS, 'MacBinary Header Format')
if macbinary_filepath == '-':
macbinary = None
else:
with open(macbinary_filepath, 'rb') as input:
macbinary = read_macbinary(input)
if command == 'info':
print_macbinary(macbinary)
elif command == 'test_read_write':
output_macbinary = BytesIO()
write_macbinary(output_macbinary, macbinary)
with open(macbinary_filepath, 'rb') as file:
expected_output = file.read()
actual_output = output_macbinary.getvalue()
matches = (actual_output == expected_output)
if matches:
print 'OK'
else:
print ' Expected: ' + repr(expected_output)
print ' Actual: ' + repr(actual_output)
print
elif command == 'test_write_custom':
output_macbinary = BytesIO()
write_macbinary(output_macbinary, {
'filename': 'Greetings.txt',
'file_type': 'TEXT',
'file_creator': 'ttxt',
'data_fork': b'Hello World!',
})
else:
sys.exit('Unrecognized command: %s' % command)
return
# ------------------------------------------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:])
| davidfstr/ClassicBox | macbinary_file.py | Python | gpl-2.0 | 1,986 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = 'c2c.recipe.cssmin',
version = '0.6',
license = 'MIT License',
author = 'Frederic Junod',
author_email = 'frederic.junod@camptocamp.com',
url = 'https://github.com/camptocamp/c2c.recipe.cssmin',
description = 'A buildout recipe to merge and compress css files',
long_description = open('README.rst').read(),
classifiers = [
'Framework :: Buildout',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License'
],
install_requires = ['cssmin'],
packages = find_packages(),
namespace_packages = ['c2c', 'c2c.recipe'],
entry_points = {'zc.buildout' : ['default = c2c.recipe.cssmin.buildout:CssMin']}
)
| camptocamp/c2c.recipe.cssmin | setup.py | Python | mit | 933 |
# -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2014 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import PathScripts.PathLog as PathLog
import PathScripts.PathPreferences as PathPreferences
import sys
PathLog.setLevel(PathLog.Level.INFO, PathLog.thisModule())
class PostProcessor:
@classmethod
def exists(cls, processor):
return processor in PathPreferences.allAvailablePostProcessors()
@classmethod
def load(cls, processor):
PathLog.track(processor)
syspath = sys.path
paths = PathPreferences.searchPathsPost()
paths.extend(sys.path)
sys.path = paths
postname = processor + "_post"
namespace = {}
#can't modify function local scope with exec in python3
exec("import %s as current_post" % postname, namespace) # pylint: disable=exec-used
current_post = namespace['current_post']
# make sure the script is reloaded if it was previously loaded
# should the script have been imported for the first time above
# then the initialization code of the script gets executed twice
# resulting in 2 load messages if the script outputs one of those.
try:
# Python 2.7
exec("reload(%s)" % 'current_post') # pylint: disable=exec-used
except NameError:
# Python 3.4+
from importlib import reload # pylint: disable=redefined-builtin,unused-import
exec("reload(%s)" % 'current_post') # pylint: disable=exec-used
sys.path = syspath
instance = PostProcessor(current_post)
if hasattr(current_post, "UNITS"):
if current_post.UNITS == "G21":
instance.units = "Metric"
else:
instance.units = "Inch"
if hasattr(current_post, "MACHINE_NAME"):
instance.machineName = current_post.MACHINE_NAME
if hasattr(current_post, "CORNER_MAX"):
instance.cornerMax = {'x': current_post.CORNER_MAX['x'],
'y': current_post.CORNER_MAX['y'],
'z': current_post.CORNER_MAX['z']}
if hasattr(current_post, "CORNER_MIN"):
instance.cornerMin = {'x': current_post.CORNER_MIN['x'],
'y': current_post.CORNER_MIN['y'],
'z': current_post.CORNER_MIN['z']}
if hasattr(current_post, "TOOLTIP"):
instance.tooltip = current_post.TOOLTIP
if hasattr(current_post, "TOOLTIP_ARGS"):
instance.tooltipArgs = current_post.TOOLTIP_ARGS
return instance
def __init__(self, script):
self.script = script
self.tooltip = None
self.tooltipArgs = None
self.cornerMax = None
self.cornerMin = None
self.units = None
self.machineName = None
def export(self, obj, filename, args):
return self.script.export(obj, filename, args)
| sanguinariojoe/FreeCAD | src/Mod/Path/PathScripts/PathPostProcessor.py | Python | lgpl-2.1 | 4,419 |
"""
Currency exchange rate support that comes from fixer.io.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fixer/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, ATTR_ATTRIBUTION
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['fixerio==0.1.1']
_LOGGER = logging.getLogger(__name__)
ATTR_BASE = 'Base currency'
ATTR_EXCHANGE_RATE = 'Exchange rate'
ATTR_TARGET = 'Target currency'
CONF_ATTRIBUTION = "Data provided by the European Central Bank (ECB)"
CONF_BASE = 'base'
CONF_TARGET = 'target'
DEFAULT_BASE = 'USD'
DEFAULT_NAME = 'Exchange rate'
ICON = 'mdi:currency'
MIN_TIME_BETWEEN_UPDATES = timedelta(days=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_TARGET): cv.string,
vol.Optional(CONF_BASE, default=DEFAULT_BASE): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Fixer.io sensor."""
from fixerio import (Fixerio, exceptions)
name = config.get(CONF_NAME)
base = config.get(CONF_BASE)
target = config.get(CONF_TARGET)
try:
Fixerio(base=base, symbols=[target], secure=True).latest()
except exceptions.FixerioException:
_LOGGER.error('One of the given currencies is not supported')
return False
data = ExchangeData(base, target)
add_devices([ExchangeRateSensor(data, name, target)])
# pylint: disable=too-few-public-methods
class ExchangeRateSensor(Entity):
"""Representation of a Exchange sensor."""
def __init__(self, data, name, target):
"""Initialize the sensor."""
self.data = data
self._target = target
self._name = name
self._state = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._target
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.data.rate is not None:
return {
ATTR_BASE: self.data.rate['base'],
ATTR_TARGET: self._target,
ATTR_EXCHANGE_RATE: self.data.rate['rates'][self._target],
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
self._state = round(self.data.rate['rates'][self._target], 3)
class ExchangeData(object):
"""Get the latest data and update the states."""
def __init__(self, base_currency, target_currency):
"""Initialize the data object."""
from fixerio import Fixerio
self.rate = None
self.base_currency = base_currency
self.target_currency = target_currency
self.exchange = Fixerio(base=self.base_currency,
symbols=[self.target_currency],
secure=True)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Fixer.io."""
self.rate = self.exchange.latest()
| Smart-Torvy/torvy-home-assistant | homeassistant/components/sensor/fixer.py | Python | mit | 3,748 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import quota
QUOTAS = quota.QUOTAS
authorize = extensions.soft_extension_authorizer('limits', 'used_limits')
class UsedLimitsController(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj):
context = req.environ['cinder.context']
if authorize(context):
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=True)
quota_map = {
'totalVolumesUsed': 'volumes',
'totalGigabytesUsed': 'gigabytes',
'totalSnapshotsUsed': 'snapshots',
'totalBackupsUsed': 'backups',
'totalBackupGigabytesUsed': 'backup_gigabytes'
}
used_limits = {}
for display_name, single_quota in quota_map.items():
if single_quota in quotas:
used_limits[display_name] = quotas[single_quota]['in_use']
resp_obj.obj['limits']['absolute'].update(used_limits)
class Used_limits(extensions.ExtensionDescriptor):
"""Provide data on limited resources that are being used."""
name = "UsedLimits"
alias = 'os-used-limits'
updated = "2013-10-03T00:00:00+00:00"
def get_controller_extensions(self):
controller = UsedLimitsController()
extension = extensions.ControllerExtension(self, 'limits', controller)
return [extension]
| Hybrid-Cloud/cinder | cinder/api/contrib/used_limits.py | Python | apache-2.0 | 2,116 |
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# pylint: disable=invalid-name
# This file includes all modules from the native module.
from __future__ import print_function
import sys
from os.path import dirname, realpath
from direct.stdpy.file import join, isfile
from rpcore.rpobject import RPObject
# Store a global flag, indicating whether the C++ modules were loaded or the python
# implemetation of them
NATIVE_CXX_LOADED = False
# Read the configuration from the flag-file
current_path = dirname(realpath(__file__))
cxx_flag_path = join(current_path, "use_cxx.flag")
if not isfile(cxx_flag_path):
RPObject.global_error("CORE", "Could not find cxx flag, please run the setup.py!")
sys.exit(1)
else:
with open(join(current_path, "use_cxx.flag"), "r") as handle:
NATIVE_CXX_LOADED = handle.read().strip() == "1"
# The native module should only be imported once, and that by the internal pipeline code
assert __package__ == "rpcore.native", "You have included the pipeline in the wrong way!"
# Classes which should get imported
classes_to_import = [
"GPUCommand",
"GPUCommandList",
"ShadowManager",
"InternalLightManager",
"PSSMCameraRig",
"IESDataset",
"TagStateManager",
]
# Classes which should get imported and renamed
classes_to_import_and_rename = {
"RPPointLight": "PointLight",
"RPSpotLight": "SpotLight"
}
native_module = None
# If the module was built, use it, otherwise use the python wrappers
if NATIVE_CXX_LOADED:
RPObject.global_debug("CORE", "Using native core module")
from rpcore.native import native_ as _native_module # pylint: disable=wrong-import-position
else:
from rpcore import pynative as _native_module # pylint: disable=wrong-import-position
RPObject.global_debug("CORE", "Using simulated python-wrapper module")
# Import all classes
for v in classes_to_import + list(classes_to_import_and_rename.keys()):
if hasattr(_native_module, v):
v_name = classes_to_import_and_rename[v] if v in classes_to_import_and_rename else v
globals()[v_name] = getattr(_native_module, v)
else:
print("ERROR: could not import class", v, "from", _native_module.__name__)
# Don't export all variables, only the required ones
__all__ = classes_to_import + list(classes_to_import_and_rename.values()) + ["NATIVE_CXX_LOADED"]
| eswartz/RenderPipeline | rpcore/native/__init__.py | Python | mit | 3,413 |
import redis
from os import chdir
from subprocess import check_call as call
import traceback
rd = redis.StrictRedis(host='localhost', port=6379, db=0)
try:
mapping_keys = rd.keys("rosetta-mapping-*")
for key in mapping_keys:
rd.delete(key)
call(["curl", "-XDELETE", "http://localhost:9200/throwtable"])
chdir("../elasticsearch_dumps/shared/")
call(["bash", "restore_shared.sh"])
chdir("../../AlgorithmNames")
# check this script
call(["python2", "index_elasticsearch_rosetta_using_crosswikis.py"])
chdir("../elasticsearch_dumps/")
# check this script
call(["bash", "backup_elasticsearch.sh"])
chdir("../AlgorithmNames")
for doc_type in ['category', 'algorithm', 'implementation']:
print '# of lines in %s: (remember to substract 2)' % doc_type
call(["sed", "-n", "$=",
"../elasticsearch_dumps/elasticsearch_%s_v4.4.json" % doc_type])
for key in ['rosetta-mapping-similars-success',
'rosetta-mapping-success-wikipedia-autosuggest',
'rosetta-mapping-success-crosswikis',
'rosetta-mapping-success-all-algo-links']:
print key,
print rd.scard(key)
except Exception, e:
print e
print(traceback.format_exc())
# finally:
# call(["vlc", "/home/minjingzhu/Dropbox/MUSIC/copy.mp3"])
| xkxx/algodb | AlgorithmNames/reindex_rosetta.py | Python | mit | 1,311 |
import re
import yaml
from . import gcloud
from .cluster_config import ClusterConfig
DEFAULT_PROPERTIES = {
"spark:spark.task.maxFailures": "20",
"spark:spark.driver.extraJavaOptions": "-Xss4M",
"spark:spark.executor.extraJavaOptions": "-Xss4M",
'spark:spark.speculation': 'true',
"hdfs:dfs.replication": "1",
'dataproc:dataproc.logging.stackdriver.enable': 'false',
'dataproc:dataproc.monitoring.stackdriver.enable': 'false'
}
# leadre (master) machine type to memory map, used for setting
# spark.driver.memory property
MACHINE_MEM = {
'n1-standard-1': 3.75,
'n1-standard-2': 7.5,
'n1-standard-4': 15,
'n1-standard-8': 30,
'n1-standard-16': 60,
'n1-standard-32': 120,
'n1-standard-64': 240,
'n1-highmem-2': 13,
'n1-highmem-4': 26,
'n1-highmem-8': 52,
'n1-highmem-16': 104,
'n1-highmem-32': 208,
'n1-highmem-64': 416,
'n1-highcpu-2': 1.8,
'n1-highcpu-4': 3.6,
'n1-highcpu-8': 7.2,
'n1-highcpu-16': 14.4,
'n1-highcpu-32': 28.8,
'n1-highcpu-64': 57.6,
'n2-standard-2': 8,
'n2-standard-4': 16,
'n2-standard-8': 32,
'n2-standard-16': 64,
'n2-standard-32': 128,
'n2-standard-48': 192,
'n2-standard-64': 256,
'n2-standard-80': 320,
'n2-highmem-2': 16,
'n2-highmem-4': 32,
'n2-highmem-8': 64,
'n2-highmem-16': 128,
'n2-highmem-32': 256,
'n2-highmem-48': 384,
'n2-highmem-64': 512,
'n2-highmem-80': 640,
'n2-highcpu-2': 2,
'n2-highcpu-4': 4,
'n2-highcpu-8': 8,
'n2-highcpu-16': 16,
'n2-highcpu-32': 32,
'n2-highcpu-48': 48,
'n2-highcpu-64': 64,
'n2-highcpu-80': 80,
'n2d-standard-2': 8,
'n2d-standard-4': 16,
'n2d-standard-8': 32,
'n2d-standard-16': 64,
'n2d-standard-32': 128,
'n2d-standard-48': 192,
'n2d-standard-64': 256,
'n2d-standard-80': 320,
'n2d-standard-96': 384,
'n2d-standard-128': 512,
'n2d-standard-224': 896,
'n2d-highmem-2': 16,
'n2d-highmem-4': 32,
'n2d-highmem-8': 64,
'n2d-highmem-16': 128,
'n2d-highmem-32': 256,
'n2d-highmem-48': 384,
'n2d-highmem-64': 512,
'n2d-highmem-80': 640,
'n2d-highmem-96': 786,
'n2d-highcpu-2': 2,
'n2d-highcpu-4': 4,
'n2d-highcpu-8': 8,
'n2d-highcpu-16': 16,
'n2d-highcpu-32': 32,
'n2d-highcpu-48': 48,
'n2d-highcpu-64': 64,
'n2d-highcpu-80': 80,
'n2d-highcpu-96': 96,
'n2d-highcpu-128': 128,
'n2d-highcpu-224': 224,
'e2-standard-2': 8,
'e2-standard-4': 16,
'e2-standard-8': 32,
'e2-standard-16': 64,
'e2-highmem-2': 16,
'e2-highmem-4': 32,
'e2-highmem-8': 64,
'e2-highmem-16': 128,
'e2-highcpu-2': 2,
'e2-highcpu-4': 4,
'e2-highcpu-8': 8,
'e2-highcpu-16': 16,
'm1-ultramem-40': 961,
'm1-ultramem-80': 1922,
'm1-ultramem-160': 3844,
'm1-megamem-96': 1433,
'm2-ultramem-2084': 5888,
'm2-ultramem-4164': 11776,
'c2-standard-4': 16,
'c2-standard-8': 32,
'c2-standard-16': 64,
'c2-standard-30': 120,
'c2-standard-60': 240,
}
REGION_TO_REPLICATE_MAPPING = {
'us-central1': 'us',
'us-east1': 'us',
'us-east4': 'us',
'us-west1': 'us',
'us-west2': 'us',
'us-west3': 'us',
# Europe != EU
'europe-north1': 'eu',
'europe-west1': 'eu',
'europe-west2': 'uk',
'europe-west3': 'eu',
'europe-west4': 'eu',
'australia-southeast1': 'aus-sydney'
}
ANNOTATION_DB_BUCKETS = ["hail-datasets-us", "hail-datasets-eu"]
IMAGE_VERSION = '2.0.29-debian10'
def init_parser(parser):
parser.add_argument('name', type=str, help='Cluster name.')
# arguments with default parameters
parser.add_argument('--master-machine-type', '--master', '-m', default='n1-highmem-8', type=str,
help='Master machine type (default: %(default)s).')
parser.add_argument('--master-memory-fraction', default=0.8, type=float,
help='Fraction of master memory allocated to the JVM. '
'Use a smaller value to reserve more memory '
'for Python. (default: %(default)s)')
parser.add_argument('--master-boot-disk-size', default=100, type=int,
help='Disk size of master machine, in GB (default: %(default)s).')
parser.add_argument('--num-master-local-ssds', default=0, type=int,
help='Number of local SSDs to attach to the master machine (default: %(default)s).')
parser.add_argument('--num-secondary-workers', '--num-preemptible-workers', '--n-pre-workers', '-p', default=0, type=int,
help='Number of secondary (preemptible) worker machines (default: %(default)s).')
parser.add_argument('--num-worker-local-ssds', default=0, type=int,
help='Number of local SSDs to attach to each worker machine (default: %(default)s).')
parser.add_argument('--num-workers', '--n-workers', '-w', default=2, type=int,
help='Number of worker machines (default: %(default)s).')
parser.add_argument('--secondary-worker-boot-disk-size', '--preemptible-worker-boot-disk-size', default=40, type=int,
help='Disk size of secondary (preemptible) worker machines, in GB (default: %(default)s).')
parser.add_argument('--worker-boot-disk-size', default=40, type=int,
help='Disk size of worker machines, in GB (default: %(default)s).')
parser.add_argument('--worker-machine-type', '--worker',
help='Worker machine type (default: n1-standard-8, or n1-highmem-8 with --vep).')
parser.add_argument('--region',
help='Compute region for the cluster.')
parser.add_argument('--zone',
help='Compute zone for the cluster.')
parser.add_argument('--properties',
help='Additional configuration properties for the cluster')
parser.add_argument('--metadata',
help='Comma-separated list of metadata to add: KEY1=VALUE1,KEY2=VALUE2...')
parser.add_argument('--packages', '--pkgs',
help='Comma-separated list of Python packages to be installed on the master node.')
parser.add_argument('--project', help='Google Cloud project to start cluster (defaults to currently set project).')
parser.add_argument('--configuration',
help='Google Cloud configuration to start cluster (defaults to currently set configuration).')
parser.add_argument('--max-idle', type=str, help='If specified, maximum idle time before shutdown (e.g. 60m).')
max_age_group = parser.add_mutually_exclusive_group()
max_age_group.add_argument('--expiration-time', type=str, help='If specified, time at which cluster is shutdown (e.g. 2020-01-01T00:00:00Z).')
max_age_group.add_argument('--max-age', type=str, help='If specified, maximum age before shutdown (e.g. 60m).')
parser.add_argument('--bucket', type=str,
help='The Google Cloud Storage bucket to use for cluster staging (just the bucket name, no gs:// prefix).')
parser.add_argument('--network', type=str, help='the network for all nodes in this cluster')
parser.add_argument('--service-account', type=str, help='The Google Service Account to use for cluster creation (default to the Compute Engine service account).')
parser.add_argument('--master-tags', type=str, help='comma-separated list of instance tags to apply to the mastern node')
parser.add_argument('--scopes', help='Specifies access scopes for the node instances')
parser.add_argument('--wheel', help='Non-default Hail installation. Warning: experimental.')
# initialization action flags
parser.add_argument('--init', default='', help='Comma-separated list of init scripts to run.')
parser.add_argument('--init_timeout', default='20m',
help='Flag to specify a timeout period for the initialization action')
parser.add_argument('--vep',
help='Install VEP for the specified reference genome.',
required=False,
choices=['GRCh37', 'GRCh38'])
parser.add_argument('--dry-run', action='store_true', help="Print gcloud dataproc command, but don't run it.")
parser.add_argument('--no-off-heap-memory', action='store_true',
help="If true, allocate all executor memory to the JVM heap.")
parser.add_argument('--big-executors', action='store_true',
help="If true, double memory allocated per executor, using half the cores of the cluster with an extra large memory allotment per core.")
parser.add_argument('--off-heap-memory-fraction', type=float, default=0.6,
help="Fraction of worker memory dedicated to off-heap Hail values.")
parser.add_argument('--yarn-memory-fraction', type=float,
help="Fraction of machine memory to allocate to the yarn container scheduler.",
default=0.95)
# requester pays
parser.add_argument('--requester-pays-allow-all',
help="Allow reading from all requester-pays buckets.",
action='store_true',
required=False)
parser.add_argument('--requester-pays-allow-buckets',
help="Comma-separated list of requester-pays buckets to allow reading from.")
parser.add_argument('--requester-pays-allow-annotation-db',
action='store_true',
help="Allows reading from any of the requester-pays buckets that hold data for the annotation database.")
parser.add_argument('--debug-mode',
action='store_true',
help="Enable debug features on created cluster (heap dump on out-of-memory error)")
async def main(args, pass_through_args):
import pkg_resources # pylint: disable=import-outside-toplevel
conf = ClusterConfig()
conf.extend_flag('image-version', IMAGE_VERSION)
if not pkg_resources.resource_exists('hailtop.hailctl', "deploy.yaml"):
raise RuntimeError("package has no 'deploy.yaml' file")
deploy_metadata = yaml.safe_load(
pkg_resources.resource_stream('hailtop.hailctl', "deploy.yaml"))['dataproc']
conf.extend_flag('properties', DEFAULT_PROPERTIES)
if args.properties:
conf.parse_and_extend('properties', args.properties)
if args.debug_mode:
conf.extend_flag('properties', {
"spark:spark.driver.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow",
"spark:spark.executor.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow",
})
# default to highmem machines if using VEP
if not args.worker_machine_type:
args.worker_machine_type = 'n1-highmem-8' if args.vep else 'n1-standard-8'
# default initialization script to start up cluster with
conf.extend_flag('initialization-actions',
[deploy_metadata['init_notebook.py']])
# requester pays support
if args.requester_pays_allow_all or args.requester_pays_allow_buckets or args.requester_pays_allow_annotation_db:
if args.requester_pays_allow_all and args.requester_pays_allow_buckets:
raise RuntimeError("Cannot specify both 'requester_pays_allow_all' and 'requester_pays_allow_buckets")
if args.requester_pays_allow_all:
requester_pays_mode = "AUTO"
else:
requester_pays_mode = "CUSTOM"
requester_pays_bucket_sources = []
if args.requester_pays_allow_buckets:
requester_pays_bucket_sources.append(args.requester_pays_allow_buckets)
if args.requester_pays_allow_annotation_db:
requester_pays_bucket_sources.extend(ANNOTATION_DB_BUCKETS)
conf.extend_flag("properties", {"spark:spark.hadoop.fs.gs.requester.pays.buckets": ",".join(requester_pays_bucket_sources)})
# Need to pick requester pays project.
requester_pays_project = args.project if args.project else gcloud.get_config("project")
conf.extend_flag("properties", {"spark:spark.hadoop.fs.gs.requester.pays.mode": requester_pays_mode,
"spark:spark.hadoop.fs.gs.requester.pays.project.id": requester_pays_project})
# gcloud version 277 and onwards requires you to specify a region. Let's just require it for all hailctl users for consistency.
if args.region:
project_region = args.region
else:
project_region = gcloud.get_config("dataproc/region")
if not project_region:
raise RuntimeError("Could not determine dataproc region. Use --region argument to hailctl, or use `gcloud config set dataproc/region <my-region>` to set a default.")
# add VEP init script
if args.vep:
# VEP is too expensive if you have to pay egress charges. We must choose the right replicate.
replicate = REGION_TO_REPLICATE_MAPPING.get(project_region)
if replicate is None:
raise RuntimeError(f"The --vep argument is not currently provided in your region.\n"
f" Please contact the Hail team on https://discuss.hail.is for support.\n"
f" Your region: {project_region}\n"
f" Supported regions: {', '.join(REGION_TO_REPLICATE_MAPPING.keys())}")
print(f"Pulling VEP data from bucket in {replicate}.")
conf.extend_flag('metadata', {"VEP_REPLICATE": replicate})
vep_config_path = "/vep_data/vep-gcloud.json"
conf.extend_flag('metadata', {"VEP_CONFIG_PATH": vep_config_path, "VEP_CONFIG_URI": f"file://{vep_config_path}"})
conf.extend_flag('initialization-actions', [deploy_metadata[f'vep-{args.vep}.sh']])
# add custom init scripts
if args.init:
conf.extend_flag('initialization-actions', args.init.split(','))
if args.metadata:
conf.parse_and_extend('metadata', args.metadata)
wheel = args.wheel or deploy_metadata['wheel']
conf.extend_flag('metadata', {'WHEEL': wheel})
# if Python packages requested, add metadata variable
packages = deploy_metadata['pip_dependencies'].strip('|').split('|||')
metadata_pkgs = conf.flags['metadata'].get('PKGS')
split_regex = r'[|,]'
if metadata_pkgs:
packages.extend(re.split(split_regex, metadata_pkgs))
if args.packages:
packages.extend(re.split(split_regex, args.packages))
conf.extend_flag('metadata', {'PKGS': '|'.join(set(packages))})
def disk_size(size):
if args.vep:
size = max(size, 200)
return str(size) + 'GB'
conf.extend_flag('properties',
{"spark:spark.driver.memory": "{driver_memory}g".format(
driver_memory=str(int(MACHINE_MEM[args.master_machine_type] * args.master_memory_fraction)))})
conf.flags['master-machine-type'] = args.master_machine_type
conf.flags['master-boot-disk-size'] = '{}GB'.format(args.master_boot_disk_size)
conf.flags['num-master-local-ssds'] = args.num_master_local_ssds
conf.flags['num-secondary-workers'] = args.num_secondary_workers
conf.flags['num-worker-local-ssds'] = args.num_worker_local_ssds
conf.flags['num-workers'] = args.num_workers
conf.flags['secondary-worker-boot-disk-size'] = disk_size(args.secondary_worker_boot_disk_size)
conf.flags['worker-boot-disk-size'] = disk_size(args.worker_boot_disk_size)
conf.flags['worker-machine-type'] = args.worker_machine_type
if not args.no_off_heap_memory:
worker_memory = MACHINE_MEM[args.worker_machine_type]
# A Google support engineer recommended the strategy of passing the YARN
# config params, and the default value of 95% of machine memory to give to YARN.
# yarn.nodemanager.resource.memory-mb - total memory per machine
# yarn.scheduler.maximum-allocation-mb - max memory to allocate to each container
available_memory_fraction = args.yarn_memory_fraction
available_memory_mb = int(worker_memory * available_memory_fraction * 1024)
cores_per_machine = int(args.worker_machine_type.split('-')[-1])
executor_cores = min(cores_per_machine, 4)
available_memory_per_core_mb = available_memory_mb // cores_per_machine
memory_per_executor_mb = int(available_memory_per_core_mb * executor_cores)
off_heap_mb = int(memory_per_executor_mb * args.off_heap_memory_fraction)
on_heap_mb = memory_per_executor_mb - off_heap_mb
off_heap_memory_per_core = off_heap_mb // executor_cores
print(f"hailctl dataproc: Creating a cluster with workers of machine type {args.worker_machine_type}.\n"
f" Allocating {memory_per_executor_mb} MB of memory per executor ({executor_cores} cores),\n"
f" with {off_heap_mb} MB for Hail off-heap values and {on_heap_mb} MB for the JVM.\n"
f" Using a maximum Hail memory reservation of {off_heap_memory_per_core} MB per core.")
conf.extend_flag('properties',
{
'yarn:yarn.nodemanager.resource.memory-mb': f'{available_memory_mb}',
'yarn:yarn.scheduler.maximum-allocation-mb': f'{executor_cores * available_memory_per_core_mb}',
'spark:spark.executor.cores': f'{executor_cores}',
'spark:spark.executor.memory': f'{on_heap_mb}m',
'spark:spark.executor.memoryOverhead': f'{off_heap_mb}m',
'spark:spark.memory.storageFraction': '0.2',
'spark:spark.executorEnv.HAIL_WORKER_OFF_HEAP_MEMORY_PER_CORE_MB': str(
off_heap_memory_per_core),
}
)
if args.region:
conf.flags['region'] = args.region
if args.zone:
conf.flags['zone'] = args.zone
conf.flags['initialization-action-timeout'] = args.init_timeout
if args.network:
conf.flags['network'] = args.network
if args.configuration:
conf.flags['configuration'] = args.configuration
if args.project:
conf.flags['project'] = args.project
if args.bucket:
conf.flags['bucket'] = args.bucket
if args.scopes:
conf.flags['scopes'] = args.scopes
account = gcloud.get_config("account")
if account:
conf.flags['labels'] = 'creator=' + re.sub(r'[^0-9a-z_\-]', '_', account.lower())[:63]
# rewrite metadata and properties to escape them
conf.flags['metadata'] = '^|||^' + '|||'.join(f'{k}={v}' for k, v in conf.flags['metadata'].items())
conf.flags['properties'] = '^|||^' + '|||'.join(f'{k}={v}' for k, v in conf.flags['properties'].items())
# command to start cluster
cmd = conf.get_command(args.name)
if args.beta:
cmd.insert(1, 'beta')
if args.max_idle:
cmd.append('--max-idle={}'.format(args.max_idle))
if args.max_age:
cmd.append('--max-age={}'.format(args.max_age))
if args.expiration_time:
cmd.append('--expiration_time={}'.format(args.expiration_time))
if args.service_account:
cmd.append('--service-account={}'.format(args.service_account))
cmd.extend(pass_through_args)
# print underlying gcloud command
print(' '.join(cmd[:5]) + ' \\\n ' + ' \\\n '.join(cmd[5:]))
# spin up cluster
if not args.dry_run:
print("Starting cluster '{}'...".format(args.name))
gcloud.run(cmd[1:])
if args.master_tags:
add_tags_command = ['compute', 'instances', 'add-tags', args.name + '-m', '--tags', args.master_tags]
if args.project:
add_tags_command.append(f"--project={args.project}")
if args.zone:
add_tags_command.append(f"--zone={args.zone}")
print('gcloud ' + ' '.join(add_tags_command))
if not args.dry_run:
gcloud.run(add_tags_command)
| hail-is/hail | hail/python/hailtop/hailctl/dataproc/start.py | Python | mit | 20,216 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# channeltools - Herramientas para trabajar con canales
# ------------------------------------------------------------
import os
import re
import config
import jsontools
import logger
import scrapertools
import jsontools
def is_adult(channel_name):
logger.info("pelisalacarta.core.channeltools is_adult channel_name="+channel_name)
channel_parameters = get_channel_parameters(channel_name)
return channel_parameters["adult"] == "true"
def get_channel_parameters(channel_name):
#logger.info("pelisalacarta.core.channeltools get_channel_parameters channel_name="+channel_name)
channel_xml = os.path.join(config.get_runtime_path(), 'channels', channel_name+".xml")
if os.path.exists(channel_xml):
#logger.info("pelisalacarta.core.channeltools get_channel_parameters "+channel_name+".xml found")
infile = open(channel_xml, "rb")
data = infile.read()
infile.close()
# TODO: Pendiente del json :)
channel_parameters = {}
channel_parameters["title"] = scrapertools.find_single_match(data, "<name>([^<]*)</name>")
channel_parameters["channel"] = scrapertools.find_single_match(data, "<id>([^<]*)</id>")
channel_parameters["active"] = scrapertools.find_single_match(data, "<active>([^<]*)</active>")
channel_parameters["adult"] = scrapertools.find_single_match(data, "<adult>([^<]*)</adult>")
channel_parameters["language"] = scrapertools.find_single_match(data, "<language>([^<]*)</language>")
channel_parameters["thumbnail"] = scrapertools.find_single_match(data, "<thumbnail>([^<]*)</thumbnail>")
channel_parameters["bannermenu"] = scrapertools.find_single_match(data, "<bannermenu>([^<]*)</bannermenu>")
channel_parameters["fanart"] = scrapertools.find_single_match(data, "<fanart>([^<]*)</fanart>")
channel_parameters["include_in_global_search"] = scrapertools.find_single_match(
data, "<include_in_global_search>([^<]*)</include_in_global_search>")
category_list = []
matches = scrapertools.find_multiple_matches(data, "<category>([^<]*)</category>")
for match in matches:
category_list.append(match)
channel_parameters["categories"] = category_list
logger.info("pelisalacarta.core.channeltools get_channel_parameters "+channel_name+" -> "+repr(channel_parameters) )
else:
logger.info("pelisalacarta.core.channeltools get_channel_parameters "+channel_name+".xml NOT found")
channel_parameters = dict()
channel_parameters["adult"] = "false"
return channel_parameters
def get_channel_json(channel_name):
#logger.info("pelisalacarta.core.channeltools get_channel_json channel_name="+channel_name)
channel_xml =os.path.join(config.get_runtime_path() , 'channels' , channel_name + ".xml")
channel_json = jsontools.xmlTojson(channel_xml)
return channel_json['channel']
def get_channel_controls_settings(channel_name):
#logger.info("pelisalacarta.core.channeltools get_channel_controls_settings channel_name="+channel_name)
dict_settings= {}
list_controls=[]
settings= get_channel_json(channel_name)['settings']
if type(settings) == list:
list_controls = settings
else:
list_controls.append(settings)
# Conversion de str a bool, etc...
for c in list_controls:
if not c.has_key('id') or not c.has_key('type') or not c.has_key('default'):
# Si algun control de la lista no tiene id, type o default lo ignoramos
continue
if not c.has_key('enabled') or c['enabled'] is None:
c['enabled']= True
else:
if c['enabled'].lower() == "true":
c['enabled'] = True
elif c['enabled'].lower() == "false":
c['enabled'] = False
if not c.has_key('visible') or c['visible'] is None:
c['visible']= True
else:
if c['visible'].lower() == "true":
c['visible'] = True
elif c['visible'].lower() == "false":
c['visible'] = False
if c['type'] == 'bool':
c['default'] = (c['default'].lower() == "true")
if unicode(c['default']).isnumeric():
c['default'] = int(c['default'])
dict_settings[c['id']] = c['default']
return list_controls, dict_settings
def get_channel_setting(name, channel):
"""
Retorna el valor de configuracion del parametro solicitado.
Devuelve el valor del parametro 'name' en la configuracion propia del canal 'channel'.
Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.pelisalacarta\settings_channels el archivo channel_data.json
y lee el valor del parametro 'name'. Si el archivo channel_data.json no existe busca en la carpeta channels el archivo
channel.xml y crea un archivo channel_data.json antes de retornar el valor solicitado.
Parametros:
name -- nombre del parametro
channel [ -- nombre del canal
Retorna:
value -- El valor del parametro 'name'
"""
#Creamos la carpeta si no existe
if not os.path.exists(os.path.join(config.get_data_path(), "settings_channels")):
os.mkdir(os.path.join(config.get_data_path(), "settings_channels"))
file_settings= os.path.join(config.get_data_path(), "settings_channels", channel+"_data.json")
dict_settings ={}
if os.path.exists(file_settings):
# Obtenemos configuracion guardada de ../settings/channel_data.json
try:
dict_file = jsontools.load_json(open(file_settings, "r").read())
if dict_file.has_key('settings'):
dict_settings = dict_file['settings']
except EnvironmentError:
logger.info("ERROR al leer el archivo: {0}".format(file_settings))
if len(dict_settings) == 0 or not dict_settings.has_key(name):
# Obtenemos controles del archivo ../channels/channel.xml
from core import channeltools
try:
list_controls, default_settings = channeltools.get_channel_controls_settings(channel)
except:
default_settings = {}
if default_settings.has_key(name): # Si el parametro existe en el channel.xml creamos el channel_data.json
default_settings.update(dict_settings)
dict_settings = default_settings
dict_file = {}
dict_file['settings']= dict_settings
# Creamos el archivo ../settings/channel_data.json
json_data = jsontools.dump_json(dict_file)
try:
open(file_settings, "w").write(json_data)
except EnvironmentError:
logger.info("[config.py] ERROR al salvar el archivo: {0}".format(file_settings))
# Devolvemos el valor del parametro local 'name' si existe
if dict_settings.has_key(name):
return dict_settings[name]
else:
return None
def set_channel_setting(name, value, channel):
"""
Fija el valor de configuracion del parametro indicado.
Establece 'value' como el valor del parametro 'name' en la configuracion propia del canal 'channel'.
Devuelve el valor cambiado o None si la asignacion no se ha podido completar.
Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.pelisalacarta\settings_channels el
archivo channel_data.json y establece el parametro 'name' al valor indicado por 'value'.
Si el parametro 'name' no existe lo añade, con su valor, al archivo correspondiente.
@param name: nombre del parametro
@type name: str
@param value: valor del parametro
@type value: str
@param channel: nombre del canal
@type channel: str
@return: 'value' en caso de que se haya podido fijar el valor y None en caso contrario
@rtype: str, None
"""
# Creamos la carpeta si no existe
if not os.path.exists(os.path.join(config.get_data_path(), "settings_channels")):
os.mkdir(os.path.join(config.get_data_path(), "settings_channels"))
file_settings = os.path.join(config.get_data_path(), "settings_channels", channel+"_data.json")
dict_settings = {}
dict_file = None
if os.path.exists(file_settings):
# Obtenemos configuracion guardada de ../settings/channel_data.json
try:
dict_file = jsontools.load_json(open(file_settings, "r").read())
dict_settings = dict_file.get('settings', {})
except EnvironmentError:
logger.info("ERROR al leer el archivo: {0}".format(file_settings))
dict_settings[name] = value
# comprobamos si existe dict_file y es un diccionario, sino lo creamos
if dict_file is None or not dict_file:
dict_file = {}
dict_file['settings'] = dict_settings
# Creamos el archivo ../settings/channel_data.json
try:
open(file_settings, "w").write(jsontools.dump_json(dict_file))
except EnvironmentError:
logger.info("[config.py] ERROR al salvar el archivo: {0}".format(file_settings))
return None
return value
def get_channel_module(channel_name, package = "channels"):
# Sustituye al que hay en servertools.py ...
# ...pero añade la posibilidad de incluir un paquete diferente de "channels"
if not package.endswith('.'): package +='.'
logger.info("pelisalacarta.core.channeltools Importando " + package + channel_name)
channels_module = __import__(package + channel_name)
channel_module = getattr(channels_module,channel_name)
logger.info("pelisalacarta.core.channeltools Importado " + package + channel_name)
return channel_module
| ChopChopKodi/pelisalacarta | python/main-classic/core/channeltools.py | Python | gpl-3.0 | 10,879 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
from lxml import html
import re
from lltk.scraping import DictScraper, register
class LeoIt(DictScraper):
def __init__(self, word):
super(LeoIt, self).__init__(word)
self.name = 'Leo.org'
self.url = 'http://dict.leo.org/dictQuery/m-vocab/itde/de.html?lp=itde&search=%s' % self.word
self.baseurl = 'http://dict.leo.org/itde/'
self.language = 'it'
def _normalize(self, string):
''' Returns a sanitized string. '''
string = string.replace(u'\xa0', ' ')
string = string.strip()
return string
@DictScraper._needs_download
def getelements(self):
self.elements = []
for element in self.tree.xpath('//div[contains(@id, "section-subst")]//td[contains(@lang, "it")]'):
content = self._normalize(element.text_content())
content = [x.strip() for x in content.split('|')]
content = [x for x in content if x.startswith(self.word)]
self.elements += content
for element in self.tree.xpath('//div[contains(@id, "section-adjadv")]//td[contains(@lang, "it")]'):
content = self._normalize(element.text_content())
self.elements.append(content)
for element in self.tree.xpath('//div[contains(@id, "section-verb")]//td[contains(@lang, "it")]'):
content = self._normalize(element.text_content())
if content.startswith(self.word):
self.elements.append(content + ' [VERB]')
@DictScraper._needs_elements
def pos(self, element = None):
''' Tries to decide about the part of speech. '''
tags = []
if element:
if re.search('[\w|\s]+ [m|f]\.', element, re.U):
tags.append('NN')
if '[VERB]' in element:
tags.append('VB')
if 'adj.' in element and re.search('([\w|\s]+, [\w|\s]+)', element, re.U):
tags.append('JJ')
else:
for element in self.elements:
if element.startswith(self.word):
tags += self.pos(element)
return list(set(tags))
@DictScraper._needs_elements
def gender(self):
''' Tries to scrape the gender for a given noun from leo.org. '''
element = self._first('NN')
if element:
if re.search('([m|f|n)])\.', element, re.U):
genus = re.findall('([m|f|n)])\.', element, re.U)[0]
return genus
register(LeoIt)
| lltk/lltk | lltk/it/scrapers/leo.py | Python | lgpl-3.0 | 2,175 |
from functools import wraps
import numpy
from theano import scalar as scal, Constant
from theano.gof import local_optimizer
from theano.tensor import (DimShuffle, get_scalar_constant_value,
NotScalarConstantError)
from .basic_ops import GpuFromHost, HostFromGpu, GpuAllocEmpty
from .elemwise import GpuDimShuffle, GpuElemwise
_one = scal.constant(numpy.asarray(1.0, dtype='float64'))
def grab_cpu_scalar(v, nd):
"""
Get a scalar variable value from the tree at `v`.
This function will dig through transfers and dimshuffles to get
the constant value. If no such constant is found, it returns None.
Parameters
----------
v : variable
Theano variable to extract the constant value from.
nd : int
Expected number of dimensions for the variable (for
broadcasted constants).
"""
if v.owner is not None:
n = v.owner
if (isinstance(n.op, (GpuDimShuffle, DimShuffle)) and
n.op.new_order == ('x',) * nd):
return grab_cpu_scalar(n.inputs[0], n.inputs[0].ndim)
elif isinstance(n.op, (GpuFromHost, HostFromGpu)):
return grab_cpu_scalar(n.inputs[0], nd)
else:
return None
else:
if (isinstance(v, Constant) and
v.broadcastable == (True,) * nd):
return v.dimshuffle(())
def find_node(v, cls, ignore_clients=False):
"""
Find the node that has an op of of type `cls` in `v`.
This digs through possibly redundant transfers to for the node
that has the type `cls`. If `ignore_clients` is False (the
default) it will only dig through nodes that have a single client
to avoid duplicating computations.
Parameters
----------
v : variable
The variable to dig through
cls : Op class
The type of the node we are looking for
ignore_clients : bool, optional
Whether to ignore multiple clients or not.
"""
if v.owner is not None and (ignore_clients or len(v.clients) == 1):
if isinstance(v.owner.op, cls):
return v.owner
elif (isinstance(v.owner.op, GpuFromHost) and
v.owner.inputs[0].owner is not None and
(ignore_clients or len(v.owner.inputs[0].clients) == 1) and
isinstance(v.owner.inputs[0].owner.op, HostFromGpu)):
return find_node(v.owner.inputs[0].owner.inputs[0], cls)
else:
return None
def is_equal(var, val):
"""
Returns True if `var` is always equal to `val`.
This will only return True if the variable will always be equal to
the value. If it might not be true in some cases then it returns False.
Parameters
----------
var : variable
Variable to compare
val : value
Python value
"""
try:
v = get_scalar_constant_value(var)
return v == val
except NotScalarConstantError:
return False
def alpha_merge(cls, alpha_in, beta_in):
"""
Decorator to merge multiplication by a scalar on the output.
This will find a pattern of scal * <yourop>(some, params, alpha,
beta) and update it so that the scalar multiplication happens as
part of your op.
The op needs to accept an alpha and a beta scalar which act this way:
out = Op() * alpha + out_like * beta
Where out_like is a buffer that has the same size as the output
and gets added to the "real" output of the operation. An example
of an operation that respects this pattern is GEMM from blas.
The decorated function must have this signature:
maker(node, *inputs)
The `node` argument you recieve is the original apply node that
contains your op. You should use it to grab relevant properties
for your op so that the new version performs the same computation.
The `*inputs` parameters contains the new inputs for your op. You
MUST use those inputs instead of the ones on `node`. Note that
this function can be as simple as:
def maker(node, *inputs):
return node.op(*inputs)
Parameters
----------
cls : op class
The class of the op you want to merge
alpha_in : int
The input index for the alpha scalar for your op (in node.inputs).
beta_in : int
The input index for the beta scalar for your op (in node.inputs).
Returns
-------
This returns an unregistered local optimizer that has the same
name as the decorated function.
Notes
-----
This was factored out since the code to deal with intervening
transfers and correctness in the presence of different values of
alpha and beta scaling factors is not trivial.
"""
def wrapper(maker):
@local_optimizer([GpuElemwise])
@wraps(maker)
def opt(node):
if (isinstance(node.op, GpuElemwise) and
node.op.scalar_op == scal.mul and
node.nin == 2):
targ = find_node(node.inputs[0], cls)
if targ is None:
targ = find_node(node.inputs[1], cls)
if targ is None:
return
lr = grab_cpu_scalar(node.inputs[0],
nd=targ.outputs[0].ndim)
else:
lr = grab_cpu_scalar(node.inputs[1],
nd=targ.outputs[0].ndim)
if lr is None or lr.dtype != targ.outputs[0].dtype:
return None
inputs = list(targ.inputs)
try:
c = get_scalar_constant_value(lr)
if c == 0:
inputs[alpha_in] = lr
inputs[beta_in] = lr
elif c == 1:
inputs[alpha_in] = targ.inputs[alpha_in]
inputs[beta_in] = targ.inputs[beta_in]
else:
inputs[alpha_in] = lr * targ.inputs[alpha_in]
inputs[beta_in] = lr * targ.inputs[beta_in]
except NotScalarConstantError:
inputs[alpha_in] = lr * targ.inputs[alpha_in]
inputs[beta_in] = lr * targ.inputs[beta_in]
return maker(targ, *inputs)
return opt
return wrapper
def output_merge(cls, alpha_in, beta_in, out_in):
"""
Decorator to merge addition by a value on the output.
This will find a pattern of val * <yourop>(some, params, alpha,
beta, out_like) and update it so that the addtition happens as
part of your op.
The op needs to accept an alpha and a beta scalar which act this way:
out = Op() * alpha + out_like * beta
Where out_like is a buffer that has the same size as the output
and gets added to the "real" output of the operation. An example
of an operation that respects this pattern is GEMM from blas.
The decorated function must have this signature:
maker(node, *inputs)
The `node` argument you recieve is the original apply node that
contains your op. You should use it to grab relevant properties
for your op so that the new version performs the same computation.
The `*inputs` parameters contains the new inputs for your op. You
MUST use those inputs instead of the ones on `node`. Note that
this function can be as simple as:
def maker(node, *inputs):
return node.op(*inputs)
Parameters
----------
cls : op class
The class of the op you want to merge
alpha_in : int
The input index for the alpha scalar for your op (in node.inputs).
beta_in : int
The input index for the beta scalar for your op (in node.inputs).
out_in : int
The input index for the out_like input for your op (in node.inputs).
Returns
-------
This returns an unregistered local optimizer that has the same
name as the decorated function.
Notes
-----
This was factored out since the code to deal with intervening
transfers and correctness in the presence of different values of
alpha and beta scaling factors is not trivial.
This also correctly handles the case where the added value is
broadcasted (by not performing the replacement).
"""
def wrapper(maker):
@local_optimizer([GpuElemwise])
@wraps(maker)
def opt(node):
if (isinstance(node.op, GpuElemwise) and
node.op.scalar_op == scal.add and
node.nin == 2):
targ = find_node(node.inputs[0], cls)
W = node.inputs[1]
if targ is None:
targ = find_node(node.inputs[1], cls)
W = node.inputs[0]
if targ is None:
return None
if W.dtype != targ.outputs[0].dtype:
return None
if not is_equal(targ.inputs[beta_in], 0.0):
# other cases are too complex for now
return None
if W.broadcastable != targ.inputs[out_in].broadcastable:
# Would need to explicitly tile the output to fill
# the full shape here. Disable for now.
return None
inputs = list(targ.inputs)
inputs[out_in] = W
inputs[beta_in] = _one.clone()
return maker(targ, *inputs)
return opt
return wrapper
def inplace_allocempty(op, idx):
"""
Wrapper to make an inplace optimization that deals with AllocEmpty
This will duplicate the alloc input if it has more than one client
to allow the op to work on it inplace.
The decorated function must have this signature:
maker(node, inputs)
The `node` argument you recieve is the original apply node that
contains your op. You should use it to grab relevant properties
for your op so that the new version performs the same computation.
You should also switch the op to work inplace. The `*inputs`
parameters contains the new inputs for your op. You MUST use
those inputs instead of the ones on `node`. Note that this
function can be as simple as:
def maker(node, inputs):
return [node.op.__class__(inplace=True)(*inputs)]
Parameters
----------
op : op class
The op class to look for to make inplace
idx : int
The index of the (possibly) AllocEmpty input (in node.inputs).
Returns
-------
This returns an unregistered inplace local optimizer that has the
same name as the decorated function.
"""
def wrapper(maker):
@local_optimizer([op], inplace=True)
@wraps(maker)
def opt(node):
if type(node.op) != op or node.op.inplace:
return
inputs = list(node.inputs)
alloc = inputs[idx]
if (alloc.owner and
isinstance(alloc.owner.op, GpuAllocEmpty) and
len(alloc.clients) > 1):
alloc_op = GpuAllocEmpty(alloc.owner.op.dtype,
alloc.owner.op.context_name)
inputs[idx] = alloc_op(*alloc.owner.inputs)
return maker(node, inputs)
return opt
return wrapper
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/sandbox/gpuarray/opt_util.py | Python | gpl-2.0 | 11,439 |
#!/usr/bin/env python3
#
# Copyright (C) 2015 Clifford Wolf <clifford@clifford.at>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import icebox
from icebox import re_match_cached
import getopt, sys, re, os
mode = None
def usage():
program_short_name = os.path.basename(sys.argv[0])
print("Usage:")
print(" %s -m bitmaps" % program_short_name)
print(" %s -m io_tile_nets_l" % program_short_name)
print(" %s -m io_tile_nets_r" % program_short_name)
print(" %s -m io_tile_nets_t" % program_short_name)
print(" %s -m io_tile_nets_b" % program_short_name)
print(" %s -m logic_tile_nets" % program_short_name)
print(" %s -m ramb_tile_nets" % program_short_name)
print(" %s -m ramt_tile_nets" % program_short_name)
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "m:")
except:
usage()
for o, a in opts:
if o == "-m":
mode = a.strip()
else:
usage()
if len(args) != 0:
usage()
def get_bit_group(x, y, db):
bit = "B%d[%d]" % (y, x)
nbit = "!B%d[%d]" % (y, x)
funcs = set()
for entry in db:
if bit in entry[0] or nbit in entry[0]:
if entry[1] in ("IOB_0", "IOB_1", "IoCtrl"):
funcs.add("i")
elif entry[1] == "routing":
funcs.add("r")
elif entry[1] == "buffer":
funcs.add("b")
elif re_match_cached("LC_", entry[1]):
funcs.add("l")
elif entry[1] == "NegClk":
funcs.add("N")
elif entry[1] == "ColBufCtrl":
funcs.add("o")
elif entry[1] == "CarryInSet":
funcs.add("C")
elif entry[1] == "Cascade":
funcs.add("a")
else:
funcs.add("?")
if len(funcs) == 1:
return funcs.pop()
if len(funcs) > 1:
return "X"
return "-"
def print_tilemap(stmt, db, n):
print()
print(stmt)
for y in range(16):
for x in range(n):
print(get_bit_group(x, y, db), end="")
print()
def print_db_nets(stmt, db, pos):
print()
print(stmt, end="")
netnames = set()
for entry in db:
if entry[1] in ("routing", "buffer"):
if icebox.pos_has_net(pos[0], entry[2]): netnames.add(entry[2])
if icebox.pos_has_net(pos[0], entry[3]): netnames.add(entry[3])
last_prefix = ""
for net in sorted(netnames, key=icebox.key_netname):
match = re_match_cached(r"(.*?)(\d+)$", net)
if match:
if last_prefix == match.group(1):
print(",%s" % match.group(2), end="")
else:
print()
print(net, end="")
last_prefix = match.group(1)
else:
print()
print(net, end="")
last_prefix = "*"
print()
if mode == "bitmaps":
print_tilemap(".io_tile_bitmap_l", icebox.iotile_l_db, 18)
print_tilemap(".io_tile_bitmap_r", icebox.iotile_r_db, 18)
print_tilemap(".io_tile_bitmap_t", icebox.iotile_t_db, 18)
print_tilemap(".io_tile_bitmap_b", icebox.iotile_b_db, 18)
print_tilemap(".logic_tile_bitmap", icebox.logictile_db, 54)
print_tilemap(".ramb_tile_bitmap", icebox.rambtile_db, 42)
print_tilemap(".ramt_tile_bitmap", icebox.ramttile_db, 42)
print()
print(".bitmap_legend")
print("- ... unknown bit")
print("? ... unknown bit type")
print("X ... database conflict")
print("i ... IOB_0 IOB_1 IoCtrl")
print("a ... Carry_In_Mux Cascade")
print("r ... routing")
print("b ... buffer")
print("l ... logic bits")
print("o ... ColBufCtrl")
print("C ... CarryInSet")
print("N ... NegClk")
print()
elif mode == "io_tile_nets_l":
print_db_nets(".io_tile_nets_l", icebox.iotile_l_db, "l")
elif mode == "io_tile_nets_r":
print_db_nets(".io_tile_nets_r", icebox.iotile_r_db, "r")
elif mode == "io_tile_nets_t":
print_db_nets(".io_tile_nets_t", icebox.iotile_t_db, "t")
elif mode == "io_tile_nets_b":
print_db_nets(".io_tile_nets_b", icebox.iotile_b_db, "b")
elif mode == "logic_tile_nets":
print_db_nets(".logic_tile_nets", icebox.logictile_db, "c")
elif mode == "ramb_tile_nets":
print_db_nets(".ramb_tile_nets", icebox.ramtile_db, "c")
elif mode == "ramt_tile_nets":
print_db_nets(".ramt_tile_nets", icebox.ramtile_db, "c")
else:
usage()
| SymbiFlow/icestorm | icebox/icebox_maps.py | Python | isc | 5,109 |
import mock
from nose import tools as nt
from django.test import RequestFactory
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Permission
from tests.base import AdminTestCase
from tests.factories import AuthUserFactory, ProjectFactory
from osf.models import AdminLogEntry, OSFUser, Node, NodeLog
from admin_tests.utilities import setup_log_view, setup_view
from admin.nodes.views import (NodeDeleteView, NodeRemoveContributorView, NodeView)
class TestNodeView(AdminTestCase):
def test_no_guid(self):
request = RequestFactory().get('/fake_path')
view = NodeView()
view = setup_view(view, request)
with nt.assert_raises(AttributeError):
view.get_object()
def test_load_data(self):
node = ProjectFactory()
guid = node._id
request = RequestFactory().get('/fake_path')
view = NodeView()
view = setup_view(view, request, guid=guid)
res = view.get_object()
nt.assert_is_instance(res, dict)
def test_name_data(self):
node = ProjectFactory()
guid = node._id
request = RequestFactory().get('/fake_path')
view = NodeView()
view = setup_view(view, request, guid=guid)
temp_object = view.get_object()
view.object = temp_object
res = view.get_context_data()
nt.assert_equal(res[NodeView.context_object_name], temp_object)
def test_no_user_permissions_raises_error(self):
user = AuthUserFactory()
node = ProjectFactory()
guid = node._id
request = RequestFactory().get(reverse('nodes:node', kwargs={'guid': guid}))
request.user = user
with nt.assert_raises(PermissionDenied):
NodeView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = AuthUserFactory()
node = ProjectFactory()
guid = node._id
change_permission = Permission.objects.get(codename='view_node')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('nodes:node', kwargs={'guid': guid}))
request.user = user
response = NodeView.as_view()(request, guid=guid)
nt.assert_equal(response.status_code, 200)
class TestNodeDeleteView(AdminTestCase):
def setUp(self):
super(TestNodeDeleteView, self).setUp()
self.node = ProjectFactory()
self.request = RequestFactory().post('/fake_path')
self.plain_view = NodeDeleteView
self.view = setup_log_view(self.plain_view(), self.request,
guid=self.node._id)
self.url = reverse('nodes:remove', kwargs={'guid': self.node._id})
def test_get_object(self):
obj = self.view.get_object()
nt.assert_is_instance(obj, Node)
def test_get_context(self):
res = self.view.get_context_data(object=self.node)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.node._id)
def test_remove_node(self):
count = AdminLogEntry.objects.count()
self.view.delete(self.request)
self.node.refresh_from_db()
nt.assert_true(self.node.is_deleted)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_restore_node(self):
self.view.delete(self.request)
self.node.refresh_from_db()
nt.assert_true(self.node.is_deleted)
count = AdminLogEntry.objects.count()
self.view.delete(self.request)
self.node.reload()
nt.assert_false(self.node.is_deleted)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user_permissions_raises_error(self):
user = AuthUserFactory()
guid = self.node._id
request = RequestFactory().get(self.url)
request.user = user
with nt.assert_raises(PermissionDenied):
self.plain_view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = AuthUserFactory()
guid = self.node._id
change_permission = Permission.objects.get(codename='delete_node')
view_permission = Permission.objects.get(codename='view_node')
user.user_permissions.add(change_permission)
user.user_permissions.add(view_permission)
user.save()
request = RequestFactory().get(self.url)
request.user = user
response = self.plain_view.as_view()(request, guid=guid)
nt.assert_equal(response.status_code, 200)
class TestRemoveContributor(AdminTestCase):
def setUp(self):
super(TestRemoveContributor, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.user_2 = AuthUserFactory()
self.node.add_contributor(self.user_2)
self.node.save()
self.view = NodeRemoveContributorView
self.request = RequestFactory().post('/fake_path')
self.url = reverse('nodes:remove_user', kwargs={'node_id': self.node._id, 'user_id': self.user._id})
def test_get_object(self):
view = setup_log_view(self.view(), self.request, node_id=self.node._id,
user_id=self.user._id)
node, user = view.get_object()
nt.assert_is_instance(node, Node)
nt.assert_is_instance(user, OSFUser)
@mock.patch('admin.nodes.views.Node.remove_contributor')
def test_remove_contributor(self, mock_remove_contributor):
user_id = self.user_2._id
node_id = self.node._id
view = setup_log_view(self.view(), self.request, node_id=node_id,
user_id=user_id)
view.delete(self.request)
mock_remove_contributor.assert_called_with(self.user_2, None, log=False)
def test_integration_remove_contributor(self):
nt.assert_in(self.user_2, self.node.contributors)
view = setup_log_view(self.view(), self.request, node_id=self.node._id,
user_id=self.user_2._id)
count = AdminLogEntry.objects.count()
view.delete(self.request)
nt.assert_not_in(self.user_2, self.node.contributors)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_do_not_remove_last_admin(self):
nt.assert_equal(
len(list(self.node.get_admin_contributors(self.node.contributors))),
1
)
view = setup_log_view(self.view(), self.request, node_id=self.node._id,
user_id=self.user._id)
count = AdminLogEntry.objects.count()
view.delete(self.request)
self.node.reload() # Reloads instance to show that nothing was removed
nt.assert_equal(len(list(self.node.contributors)), 2)
nt.assert_equal(
len(list(self.node.get_admin_contributors(self.node.contributors))),
1
)
nt.assert_equal(AdminLogEntry.objects.count(), count)
def test_no_log(self):
view = setup_log_view(self.view(), self.request, node_id=self.node._id,
user_id=self.user_2._id)
view.delete(self.request)
nt.assert_not_equal(self.node.logs.latest().action, NodeLog.CONTRIB_REMOVED)
def test_no_user_permissions_raises_error(self):
guid = self.node._id
request = RequestFactory().get(self.url)
request.user = self.user
with nt.assert_raises(PermissionDenied):
self.view.as_view()(request, node_id=guid, user_id=self.user)
def test_correct_view_permissions(self):
change_permission = Permission.objects.get(codename='change_node')
view_permission = Permission.objects.get(codename='view_node')
self.user.user_permissions.add(change_permission)
self.user.user_permissions.add(view_permission)
self.user.save()
request = RequestFactory().get(self.url)
request.user = self.user
response = self.view.as_view()(request, node_id=self.node._id, user_id=self.user._id)
nt.assert_equal(response.status_code, 200)
| hmoco/osf.io | admin_tests/nodes/test_views.py | Python | apache-2.0 | 8,170 |
"""
Streamline plotting like Mathematica.
Copyright (c) 2011 Tom Flannaghan.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
version = '4'
import numpy
import pylab
import matplotlib
import matplotlib.patches as mpp
def streamplot(x, y, u, v, density=1, linewidth=1,
color='k', cmap=None, norm=None, vmax=None, vmin=None,
arrowsize=1, INTEGRATOR='RK4'):
'''Draws streamlines of a vector flow.
* x and y are 1d arrays defining an *evenly spaced* grid.
* u and v are 2d arrays (shape [y,x]) giving velocities.
* density controls the closeness of the streamlines. For different
densities in each direction, use a tuple or list [densityx, densityy].
* linewidth is either a number (uniform lines) or a 2d array
(variable linewidth).
* color is either a color code (of any kind) or a 2d array. This is
then transformed into color by the cmap, norm, vmin and vmax args.
A value of None gives the default for each.
INTEGRATOR is experimental. Currently, RK4 should be used.
'''
## Sanity checks.
assert len(x.shape)==1
assert len(y.shape)==1
assert u.shape == (len(y), len(x))
assert v.shape == (len(y), len(x))
if type(linewidth) == numpy.ndarray:
assert linewidth.shape == (len(y), len(x))
if type(color) == numpy.ndarray:
assert color.shape == (len(y), len(x))
## Set up some constants - size of the grid used.
NGX = len(x)
NGY = len(y)
## Constants used to convert between grid index coords and user coords.
DX = x[1]-x[0]
DY = y[1]-y[0]
XOFF = x[0]
YOFF = y[0]
## Now rescale velocity onto axes-coordinates
u = u / (x[-1]-x[0])
v = v / (y[-1]-y[0])
speed = numpy.sqrt(u*u+v*v)
## s (path length) will now be in axes-coordinates, but we must
## rescale u for integrations.
u *= NGX
v *= NGY
## Now u and v in grid-coordinates.
## Blank array: This is the heart of the algorithm. It begins life
## zeroed, but is set to one when a streamline passes through each
## box. Then streamlines are only allowed to pass through zeroed
## boxes. The lower resolution of this grid determines the
## approximate spacing between trajectories.
if type(density) == float or type(density) == int:
assert density > 0
NBX = int(30*density)
NBY = int(30*density)
else:
assert len(density) > 0
NBX = int(30*density[0])
NBY = int(30*density[1])
blank = numpy.zeros((NBY,NBX))
## Constants for conversion between grid-index space and
## blank-index space
bx_spacing = NGX/float(NBX-1)
by_spacing = NGY/float(NBY-1)
def blank_pos(xi, yi):
## Takes grid space coords and returns nearest space in
## the blank array.
return int((xi / bx_spacing) + 0.5), \
int((yi / by_spacing) + 0.5)
def value_at(a, xi, yi):
## Linear interpolation - nice and quick because we are
## working in grid-index coordinates.
if type(xi) == numpy.ndarray:
x = xi.astype(numpy.int)
y = yi.astype(numpy.int)
else:
x = numpy.int(xi)
y = numpy.int(yi)
a00 = a[y,x]
a01 = a[y,x+1]
a10 = a[y+1,x]
a11 = a[y+1,x+1]
xt = xi - x
yt = yi - y
a0 = a00*(1-xt) + a01*xt
a1 = a10*(1-xt) + a11*xt
return a0*(1-yt) + a1*yt
def rk4_integrate(x0, y0):
## This function does RK4 forward and back trajectories from
## the initial conditions, with the odd 'blank array'
## termination conditions. TODO tidy the integration loops.
def f(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return ui*dt_ds, vi*dt_ds
def g(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return -ui*dt_ds, -vi*dt_ds
check = lambda xi, yi: xi>=0 and xi<NGX-1 and yi>=0 and yi<NGY-1
bx_changes = []
by_changes = []
## Integrator function
def rk4(x0, y0, f):
ds = 0.01 #min(1./NGX, 1./NGY, 0.01)
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK4
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y)
k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y)
k4x, k4y = f(xi + ds*k3x, yi + ds*k3y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6.
yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6.
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
## Alternative Integrator function
## RK45 does not really help in it's current state. The
## resulting trajectories are accurate but low-resolution in
## regions of high curvature and thus fairly ugly. Maybe a
## curvature based cap on the maximum ds permitted is the way
## forward.
def rk45(x0, y0, f):
maxerror = 0.001
maxds = 0.03
ds = 0.03
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK45
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .25*ds*k1x,
yi + .25*ds*k1y)
k3x, k3y = f(xi + 3./32*ds*k1x + 9./32*ds*k2x,
yi + 3./32*ds*k1y + 9./32*ds*k2y)
k4x, k4y = f(xi + 1932./2197*ds*k1x - 7200./2197*ds*k2x + 7296./2197*ds*k3x,
yi + 1932./2197*ds*k1y - 7200./2197*ds*k2y + 7296./2197*ds*k3y)
k5x, k5y = f(xi + 439./216*ds*k1x - 8*ds*k2x + 3680./513*ds*k3x - 845./4104*ds*k4x,
yi + 439./216*ds*k1y - 8*ds*k2y + 3680./513*ds*k3y - 845./4104*ds*k4y)
k6x, k6y = f(xi - 8./27*ds*k1x + 2*ds*k2x - 3544./2565*ds*k3x + 1859./4104*ds*k4x - 11./40*ds*k5x,
yi - 8./27*ds*k1y + 2*ds*k2y - 3544./2565*ds*k3y + 1859./4104*ds*k4y - 11./40*ds*k5y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
dx4 = ds*(25./216*k1x + 1408./2565*k3x + 2197./4104*k4x - 1./5*k5x)
dy4 = ds*(25./216*k1y + 1408./2565*k3y + 2197./4104*k4y - 1./5*k5y)
dx5 = ds*(16./135*k1x + 6656./12825*k3x + 28561./56430*k4x - 9./50*k5x + 2./55*k6x)
dy5 = ds*(16./135*k1y + 6656./12825*k3y + 28561./56430*k4y - 9./50*k5y + 2./55*k6y)
## Error is normalized to the axes coordinates (it's a distance)
error = numpy.sqrt(((dx5-dx4)/NGX)**2 + ((dy5-dy4)/NGY)**2)
if error < maxerror:
# Step is within tolerance so continue
xi += dx5
yi += dy5
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
# Modify ds for the next iteration.
if len(xf_traj) > 2:
## hacky curvature dependance:
v1 = numpy.array((xf_traj[-1]-xf_traj[-2], yf_traj[-1]-yf_traj[-2]))
v2 = numpy.array((xf_traj[-2]-xf_traj[-3], yf_traj[-2]-yf_traj[-3]))
costheta = (v1/numpy.sqrt((v1**2).sum()) * v2/numpy.sqrt((v2**2).sum())).sum()
if costheta < .8:
ds = .01
continue
ds = min(maxds, 0.85*ds*(maxerror/error)**.2)
return stotal, xf_traj, yf_traj
## Forward and backward trajectories
if INTEGRATOR == 'RK4':
integrator = rk4
elif INTEGRATOR == 'RK45':
integrator = rk45
sf, xf_traj, yf_traj = integrator(x0, y0, f)
sb, xb_traj, yb_traj = integrator(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
## Tests to check length of traj. Remember, s in units of axes.
if len(x_traj) < 1: return None
if stotal > .2:
initxb, inityb = blank_pos(x0, y0)
blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(bx_changes, by_changes):
blank[yb, xb] = 0
return None
## A quick function for integrating trajectories if blank==0.
trajectories = []
def traj(xb, yb):
if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY:
return
if blank[yb, xb] == 0:
t = rk4_integrate(xb*bx_spacing, yb*by_spacing)
if t != None:
trajectories.append(t)
## Now we build up the trajectory set. I've found it best to look
## for blank==0 along the edges first, and work inwards.
for indent in range((max(NBX,NBY))/2):
for xi in range(max(NBX,NBY)-2*indent):
traj(xi+indent, indent)
traj(xi+indent, NBY-1-indent)
traj(indent, xi+indent)
traj(NBX-1-indent, xi+indent)
## PLOTTING HERE.
#pylab.pcolormesh(numpy.linspace(x.min(), x.max(), NBX+1),
# numpy.linspace(y.min(), y.max(), NBY+1), blank)
# Load up the defaults - needed to get the color right.
if type(color) == numpy.ndarray:
if vmin == None: vmin = color.min()
if vmax == None: vmax = color.max()
if norm == None: norm = matplotlib.colors.normalize
if cmap == None: cmap = matplotlib.cm.get_cmap(
matplotlib.rcParams['image.cmap'])
for t in trajectories:
# Finally apply the rescale to adjust back to user-coords from
# grid-index coordinates.
tx = numpy.array(t[0])*DX+XOFF
ty = numpy.array(t[1])*DY+YOFF
tgx = numpy.array(t[0])
tgy = numpy.array(t[1])
points = numpy.array([tx, ty]).T.reshape(-1,1,2)
segments = numpy.concatenate([points[:-1], points[1:]], axis=1)
args = {}
if type(linewidth) == numpy.ndarray:
args['linewidth'] = value_at(linewidth, tgx, tgy)[:-1]
arrowlinewidth = args['linewidth'][len(tgx)/2]
else:
args['linewidth'] = linewidth
arrowlinewidth = linewidth
if type(color) == numpy.ndarray:
args['color'] = cmap(norm(vmin=vmin,vmax=vmax)
(value_at(color, tgx, tgy)[:-1]))
arrowcolor = args['color'][len(tgx)/2]
else:
args['color'] = color
arrowcolor = color
lc = matplotlib.collections.LineCollection\
(segments, **args)
pylab.gca().add_collection(lc)
## Add arrows half way along each trajectory.
n = len(tx)/2
p = mpp.FancyArrowPatch((tx[n],ty[n]), (tx[n+1],ty[n+1]),
arrowstyle='->', lw=arrowlinewidth,
mutation_scale=20*arrowsize, color=arrowcolor)
pylab.gca().add_patch(p)
pylab.xlim(x.min(), x.max())
pylab.ylim(y.min(), y.max())
return
def test():
pylab.figure(1)
x = numpy.linspace(-3,3,100)
y = numpy.linspace(-3,3,100)
u = -1-x**2+y[:,numpy.newaxis]
v = 1+x-y[:,numpy.newaxis]**2
speed = numpy.sqrt(u*u + v*v)
pylab.subplot(121)
streamplot(x, y, u, v, density=1, INTEGRATOR='RK4', color='b')
pylab.subplot(122)
streamplot(x, y, u, v, density=(1,1), INTEGRATOR='RK4', color=u,
linewidth=5*speed/speed.max())
pylab.show()
if __name__ == '__main__':
test()
| juselius/gimic | src/pygimic/streamplot.py | Python | gpl-2.0 | 15,026 |
"""Support for Axis devices."""
import logging
from homeassistant.const import CONF_DEVICE, EVENT_HOMEASSISTANT_STOP
from .const import DOMAIN as AXIS_DOMAIN
from .device import AxisNetworkDevice
LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Old way to set up Axis devices."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Axis component."""
hass.data.setdefault(AXIS_DOMAIN, {})
device = AxisNetworkDevice(hass, config_entry)
if not await device.async_setup():
return False
# 0.104 introduced config entry unique id, this makes upgrading possible
if config_entry.unique_id is None:
hass.config_entries.async_update_entry(
config_entry, unique_id=device.api.vapix.serial_number
)
hass.data[AXIS_DOMAIN][config_entry.unique_id] = device
await device.async_update_device_registry()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, device.shutdown)
return True
async def async_unload_entry(hass, config_entry):
"""Unload Axis device config entry."""
device = hass.data[AXIS_DOMAIN].pop(config_entry.unique_id)
return await device.async_reset()
async def async_migrate_entry(hass, config_entry):
"""Migrate old entry."""
LOGGER.debug("Migrating from version %s", config_entry.version)
# Flatten configuration but keep old data if user rollbacks HASS
if config_entry.version == 1:
config_entry.data = {**config_entry.data, **config_entry.data[CONF_DEVICE]}
config_entry.version = 2
LOGGER.info("Migration to version %s successful", config_entry.version)
return True
| tchellomello/home-assistant | homeassistant/components/axis/__init__.py | Python | apache-2.0 | 1,685 |
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from rest_framework.viewsets import ReadOnlyModelViewSet
from amcat.models import Sentence
from amcat.tools import sbd
from api.rest.mixins import DatatablesMixin
from api.rest.serializer import AmCATModelSerializer
from api.rest.viewset import AmCATViewSetMixin
__all__ = ("SentenceSerializer", "SentenceViewSetMixin", "SentenceViewSet")
class SentenceSerializer(AmCATModelSerializer):
model = Sentence
class SentenceViewSetMixin(AmCATViewSetMixin):
model_serializer_class = SentenceSerializer
model_key = "sentence"
model = Sentence
class SentenceViewSet(SentenceViewSetMixin, DatatablesMixin, ReadOnlyModelViewSet):
model = Sentence
def filter_queryset(self, queryset):
qs = super(SentenceViewSet, self).filter_queryset(queryset)
return qs.filter(article=self.article, id__in=sbd.get_or_create_sentences(self.article))
| tschmorleiz/amcat | api/rest/viewsets/sentence.py | Python | agpl-3.0 | 2,235 |
from lib import action
class VaultIsInitializedAction(action.VaultBaseAction):
def run(self):
return self.vault.is_initialized()
| pidah/st2contrib | packs/vault/actions/is_initialized.py | Python | apache-2.0 | 143 |
import zipfile
import io
from datetime import datetime
from util.ncconv.experimental.ocg_converter.subocg_converter import SubOcgConverter
#from xml.sax.saxutils import escape
class KmlConverter(SubOcgConverter):
'''Converts data to a KML string'''
def _convert_(self,request):
from pykml.factory import KML_ElementMaker as KML
from lxml import etree
## create the database
if self.use_stat:
raise(NotImplementedError)
else:
db = self.sub.to_db(wkt=True,to_disk=True)
meta = request.ocg
if request.environ['SERVER_PORT']=='80':
portstr = ''
else:
portstr = ':{port}'.format(port=request.environ['SERVER_PORT'])
url='{protocol}://{server}{port}{path}'.format(
protocol='http',
port=portstr,
server=request.environ['SERVER_NAME'],
path=request.environ['PATH_INFO'],
)
description = (
'<table border="1">'
'<tbody>'
'<tr><th>Archive</th><td>{archive}</td></tr>'
'<tr><th>Emissions Scenario</th><td>{scenario}</td></tr>'
'<tr><th>Climate Model</th><td>{model}</td></tr>'
'<tr><th>Run</th><td>{run}</td></tr>'
'<tr><th>Output Variable</th><td>{variable}</td></tr>'
'<tr><th>Units</th><td>{units}</td></tr>'
'<tr><th>Start Time</th><td>{start}</td></tr>'
'<tr><th>End Time</th><td>{end}</td></tr>'
'<tr>'
'<th>Request URL</th>'
'<td><a href="{url}">{url}</a></td>'
'</tr>'
'<tr>'
'<th>Other Available Formats</th>'
'<td>'
'<a href="{url}">KML</a> - Keyhole Markup Language<br/>'
'<a href="{url_kmz}">KMZ</a> - Keyhole Markup Language (zipped)<br/>'
'<a href="{url_shz}">Shapefile</a> - ESRI Shapefile<br/>'
'<a href="{url_csv}">CSV</a> - Comma Separated Values (text file)<br/>'
'<a href="{url_json}">JSON</a> - Javascript Object Notation'
'</td>'
'</tr>'
'</tbody>'
'</table>'
).format(
archive=meta.archive.name,
scenario=meta.scenario,
model=meta.climate_model,
run=meta.run,
variable=meta.variable,
units=meta.variable.units,
simout=meta.simulation_output.netcdf_variable,
start=meta.temporal[0],
end=meta.temporal[-1],
operation=meta.operation,
url=url,
url_kmz=url.replace('.kml', '.kmz'),
url_shz=url.replace('.kml', '.shz'),
url_csv=url.replace('.kml', '.csv'),
url_json=url.replace('.kml', '.geojson'),
)
##### TODO: build linked urls on the fly
#from piston.emitters import Emitter
#Emitter.EMITTERS.keys()
#['xml', 'sqlite', 'nc', 'shz', 'kml', 'kcsv', 'django', 'json', 'html', 'meta', 'lshz', 'csv', 'pickle', 'kmz']
doc = KML.kml(
KML.Document(
KML.name('Climate Simulation Output'),
KML.open(1),
KML.description(description),
KML.snippet(
'<i>Click for metadata!</i>',
maxLines="2",
),
KML.StyleMap(
KML.Pair(
KML.key('normal'),
KML.styleUrl('#style-normal'),
),
KML.Pair(
KML.key('highlight'),
KML.styleUrl('#style-highlight'),
),
id="smap",
),
KML.Style(
KML.LineStyle(
KML.color('ff0000ff'),
KML.width('2'),
),
KML.PolyStyle(
KML.color('400000ff'),
),
id="style-normal",
),
KML.Style(
KML.LineStyle(
KML.color('ff00ff00'),
KML.width('4'),
),
KML.PolyStyle(
KML.color('400000ff'),
),
KML.BalloonStyle(
KML.text(('<script type="text/javascript" src="http://dygraphs.com/dygraph-combined.js">'
'</script>'
'<div id="graphdiv"></div>'
'<script type="text/javascript">'
'g = new Dygraph('
'document.getElementById("graphdiv"),'
'$[csv_data],'
'{{'
'ylabel: \'{param} [{units}]\','
'legend: \'always\''
'}}'
');'
'</script>').format(
param=meta.variable.name,
units=meta.variable.units,
))
),
id="style-highlight",
),
#Time Folders will be appended here
),
)
try:
s = db.Session()
# create a folder to hold the geometries
geom_fld = KML.Folder(
KML.name('Geometries'),
)
for geom in s.query(db.Geometry).all():
coord_list = geom.as_kml_coords()
multigeom_args = [
KML.Polygon(
KML.tessellate('1'),
KML.outerBoundaryIs(
KML.LinearRing(
KML.coordinates(coords.text),
),
),
) for coords in coord_list
]
# TODO: sort values by time to speed loading
values = ['{0},{1}'.format(datetime.strftime(val.time, "%Y-%m-%d %H:%M:%S"),val.value) for val in geom.values]
pm = KML.Placemark(
KML.name('Geometry'),
KML.ExtendedData(
KML.Data(
KML.value('"Date,{param}\\n{data}"'.format(
param=meta.variable.name,
data='\\n'.join(values))
),
name="csv_data",
),
),
KML.description(''),
KML.styleUrl('#smap'),
KML.MultiGeometry(*multigeom_args),
)
geom_fld.append(pm)
doc.Document.append(geom_fld)
# for time in s.query(db.Time).all():
# # create a folder for the time
# timefld = KML.Folder(
## KML.Style(
## KML.ListStyle(
## KML.listItemType('checkHideChildren'),
## KML.bgColor('00ffffff'),
## KML.maxSnippetLines('2'),
## ),
## ),
# KML.name(time.as_xml_date()),
# # placemarks will be appended here
# )
# for val in time.values:
# poly_desc = (
# '<table border="1">'
# '<tbody>'
# '<tr><th>Variable</th><td>{variable}</td></tr>'
# '<tr><th>Date/Time (UTC)</th><td>{time}</td></tr>'
# '<tr><th>Value</th><td>{value:.{digits}f} {units}</td></tr>'
# '</tbody>'
# '</table>'
# ).format(
# variable=meta.variable.name,
# time=val.time_ref.as_xml_date(),
# value=val.value,
# digits=3,
# units=meta.variable.units,
# )
#
# coords = val.geometry.as_kml_coords()
# timefld.append(
# KML.Placemark(
# KML.name('Geometry'),
# KML.description(poly_desc),
# KML.styleUrl('#smap'),
# KML.Polygon(
# KML.tessellate('1'),
# KML.outerBoundaryIs(
# KML.LinearRing(
# KML.coordinates(coords),
# ),
# ),
# ),
# )
# )
# doc.Document.append(timefld)
# pass
finally:
s.close()
# return the pretty print string
output = etree.tostring(doc, pretty_print=True)
# Unescape newline characters
#return(output.replace('&#10;','\\n'))
return(output)
class KmzConverter(KmlConverter):
def _response_(self,payload):
'''Get the KML response and zip it up'''
# logger.info("starting KmzConverter._response_()...")
#kml = super(KmzConverter,self)._response_(payload)
iobuffer = io.BytesIO()
zf = zipfile.ZipFile(
iobuffer,
mode='w',
compression=zipfile.ZIP_DEFLATED,
)
try:
zf.writestr('doc.kml',payload)
finally:
zf.close()
iobuffer.flush()
zip_stream = iobuffer.getvalue()
iobuffer.close()
# logger.info("...ending KmzConverter._response_()")
return(zip_stream) | OpenSource-/OpenClimateGIS | src/openclimategis/util/ncconv/experimental/ocg_converter/kml.py | Python | bsd-3-clause | 9,896 |
## features.py
##
## Copyright (C) 2003-2004 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: features.py,v 1.24 2006/03/25 05:47:22 snakeru Exp $
"""
This module contains variable stuff that is not worth splitting into separate modules.
Here is:
DISCO client and agents-to-DISCO and browse-to-DISCO emulators.
IBR and password manager.
jabber:iq:privacy methods
All these methods takes 'disp' first argument that should be already connected
(and in most cases already authorised) dispatcher instance.
"""
from protocol import *
REGISTER_DATA_RECEIVED='REGISTER DATA RECEIVED'
### DISCO ### http://jabber.org/protocol/disco ### JEP-0030 ####################
### Browse ### jabber:iq:browse ### JEP-0030 ###################################
### Agents ### jabber:iq:agents ### JEP-0030 ###################################
def _discover(disp,ns,jid,node=None,fb2b=0,fb2a=1):
""" Try to obtain info from the remote object.
If remote object doesn't support disco fall back to browse (if fb2b is true)
and if it doesnt support browse (or fb2b is not true) fall back to agents protocol
(if gb2a is true). Returns obtained info. Used internally. """
iq=Iq(to=jid,typ='get',queryNS=ns)
if node: iq.setQuerynode(node)
rep=disp.SendAndWaitForResponse(iq)
if fb2b and not isResultNode(rep): rep=disp.SendAndWaitForResponse(Iq(to=jid,typ='get',queryNS=NS_BROWSE)) # Fallback to browse
if fb2a and not isResultNode(rep): rep=disp.SendAndWaitForResponse(Iq(to=jid,typ='get',queryNS=NS_AGENTS)) # Fallback to agents
if isResultNode(rep): return rep.getQueryPayload()
return []
def discoverItems(disp,jid,node=None):
""" Query remote object about any items that it contains. Return items list. """
""" According to JEP-0030:
query MAY have node attribute
item: MUST HAVE jid attribute and MAY HAVE name, node, action attributes.
action attribute of item can be either of remove or update value."""
ret=[]
disco_result = _discover(disp,NS_DISCO_ITEMS,jid,node)
if disco_result is not None:
for i in disco_result:
if i.getName()=='agent' and i.getTag('name'): i.setAttr('name',i.getTagData('name'))
ret.append(i.attrs)
return ret
def discoverInfo(disp,jid,node=None):
""" Query remote object about info that it publishes. Returns identities and features lists."""
""" According to JEP-0030:
query MAY have node attribute
identity: MUST HAVE category and name attributes and MAY HAVE type attribute.
feature: MUST HAVE var attribute"""
identities , features = [] , []
disco_result = _discover(disp,NS_DISCO_INFO,jid,node)
if disco_result is not None:
for i in disco_result:
try:
if i.getName()=='identity': identities.append(i.attrs)
elif i.getName()=='feature': features.append(i.getAttr('var'))
elif i.getName()=='agent':
if i.getTag('name'): i.setAttr('name',i.getTagData('name'))
if i.getTag('description'): i.setAttr('name',i.getTagData('description'))
identities.append(i.attrs)
if i.getTag('groupchat'): features.append(NS_GROUPCHAT)
if i.getTag('register'): features.append(NS_REGISTER)
if i.getTag('search'): features.append(NS_SEARCH)
except AttributeError:
pass
return identities , features
### Registration ### jabber:iq:register ### JEP-0077 ###########################
def getRegInfo(disp,host,info={},sync=True):
""" Gets registration form from remote host.
You can pre-fill the info dictionary.
F.e. if you are requesting info on registering user joey than specify
info as {'username':'joey'}. See JEP-0077 for details.
'disp' must be connected dispatcher instance."""
iq=Iq('get',NS_REGISTER,to=host)
for i in info.keys(): iq.setTagData(i,info[i])
if sync:
resp=disp.SendAndWaitForResponse(iq)
_ReceivedRegInfo(disp.Dispatcher,resp, host)
return resp
else: disp.SendAndCallForResponse(iq,_ReceivedRegInfo, {'agent': host})
def _ReceivedRegInfo(con, resp, agent):
iq=Iq('get',NS_REGISTER,to=agent)
if not isResultNode(resp): return
df=resp.getTag('query',namespace=NS_REGISTER).getTag('x',namespace=NS_DATA)
if df:
con.Event(NS_REGISTER,REGISTER_DATA_RECEIVED,(agent, DataForm(node=df)))
return
df=DataForm(typ='form')
for i in resp.getQueryPayload():
if type(i)<>type(iq): pass
elif i.getName()=='instructions': df.addInstructions(i.getData())
else: df.setField(i.getName()).setValue(i.getData())
con.Event(NS_REGISTER,REGISTER_DATA_RECEIVED,(agent, df))
def register(disp,host,info):
""" Perform registration on remote server with provided info.
disp must be connected dispatcher instance.
Returns true or false depending on registration result.
If registration fails you can get additional info from the dispatcher's owner
attributes lastErrNode, lastErr and lastErrCode.
"""
iq=Iq('set',NS_REGISTER,to=host)
if type(info)<>type({}): info=info.asDict()
for i in info.keys(): iq.setTag('query').setTagData(i,info[i])
resp=disp.SendAndWaitForResponse(iq)
if isResultNode(resp): return 1
def unregister(disp,host):
""" Unregisters with host (permanently removes account).
disp must be connected and authorized dispatcher instance.
Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_REGISTER,to=host,payload=[Node('remove')]))
if isResultNode(resp): return 1
def changePasswordTo(disp,newpassword,host=None):
""" Changes password on specified or current (if not specified) server.
disp must be connected and authorized dispatcher instance.
Returns true on success."""
if not host: host=disp._owner.Server
resp=disp.SendAndWaitForResponse(Iq('set',NS_REGISTER,to=host,payload=[Node('username',payload=[disp._owner.Server]),Node('password',payload=[newpassword])]))
if isResultNode(resp): return 1
### Privacy ### jabber:iq:privacy ### draft-ietf-xmpp-im-19 ####################
#type=[jid|group|subscription]
#action=[allow|deny]
def getPrivacyLists(disp):
""" Requests privacy lists from connected server.
Returns dictionary of existing lists on success."""
try:
dict={'lists':[]}
resp=disp.SendAndWaitForResponse(Iq('get',NS_PRIVACY))
if not isResultNode(resp): return
for list in resp.getQueryPayload():
if list.getName()=='list': dict['lists'].append(list.getAttr('name'))
else: dict[list.getName()]=list.getAttr('name')
return dict
except: pass
def getPrivacyList(disp,listname):
""" Requests specific privacy list listname. Returns list of XML nodes (rules)
taken from the server responce."""
try:
resp=disp.SendAndWaitForResponse(Iq('get',NS_PRIVACY,payload=[Node('list',{'name':listname})]))
if isResultNode(resp): return resp.getQueryPayload()[0]
except: pass
def setActivePrivacyList(disp,listname=None,typ='active'):
""" Switches privacy list 'listname' to specified type.
By default the type is 'active'. Returns true on success."""
if listname: attrs={'name':listname}
else: attrs={}
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[Node(typ,attrs)]))
if isResultNode(resp): return 1
def setDefaultPrivacyList(disp,listname=None):
""" Sets the default privacy list as 'listname'. Returns true on success."""
return setActivePrivacyList(disp,listname,'default')
def setPrivacyList(disp,list):
""" Set the ruleset. 'list' should be the simpleXML node formatted
according to RFC 3921 (XMPP-IM) (I.e. Node('list',{'name':listname},payload=[...]) )
Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[list]))
if isResultNode(resp): return 1
def delPrivacyList(disp,listname):
""" Deletes privacy list 'listname'. Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[Node('list',{'name':listname})]))
if isResultNode(resp): return 1
| jjculber/xmpp-server-scanner | pybot/include/xmpp/features.py | Python | gpl-2.0 | 8,833 |
"""A writable test, not specifically important what is tested"""
from django.db import transaction
from django.db.utils import IntegrityError
from django.test import TestCase
from .models import Lead, User
class Test(TestCase):
databases = '__all__'
def test_lead_owner_without_default(self):
test_user = User.objects.create(username='user', last_name='a', email='a@a.au')
test_lead = Lead(company='sf_test lead', last_name='name')
try:
with self.assertRaises(IntegrityError) as cm, transaction.atomic():
# can't be saved without owner
test_lead.save()
self.assertIn('NOT NULL constraint failed', cm.exception.args[0])
# can be saver with owner
test_lead.owner = test_user
test_lead.save()
finally:
if test_lead.id is not None:
test_lead.delete()
test_user.delete()
| django-salesforce/django-salesforce | tests/no_salesforce/tests.py | Python | mit | 939 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from keystone.common import dependency
from keystone.common import sql
from keystone import config
from keystone.i18n import _LW
from keystone.server import backends
CONF = cfg.CONF
LOG = log.getLogger(__name__)
def configure(version=None, config_files=None,
pre_setup_logging_fn=lambda: None):
config.configure()
sql.initialize()
config.set_default_for_default_log_levels()
CONF(project='keystone', version=version,
default_config_files=config_files)
pre_setup_logging_fn()
config.setup_logging()
if CONF.debug:
LOG.warn(_LW(
'debug is enabled so responses may include sensitive '
'information.'))
def setup_backends(load_extra_backends_fn=lambda: {},
startup_application_fn=lambda: None):
drivers = backends.load_backends()
drivers.update(load_extra_backends_fn())
res = startup_application_fn()
drivers.update(dependency.resolve_future_dependencies())
return drivers, res
| roopali8/keystone | keystone/server/common.py | Python | apache-2.0 | 1,635 |
import networkx as nx
from bokeh.io import output_file, show
from bokeh.plotting import figure, from_networkx
G = nx.karate_club_graph()
plot = figure(title="Networkx Integration Demonstration", x_range=(-1.1,1.1), y_range=(-1.1,1.1),
tools="", toolbar_location=None)
graph = from_networkx(G, nx.spring_layout, scale=2, center=(0,0))
plot.renderers.append(graph)
output_file("networkx_graph.html")
show(plot)
| ericmjl/bokeh | sphinx/source/docs/user_guide/examples/graph_networkx.py | Python | bsd-3-clause | 428 |
import hashlib
from io import StringIO
from datetime import timedelta
import re
import random
import string
import struct
import time
import sys
import shutil
import subprocess
import os
import numpy as np
import requests
import tempfile
import uuid
from studio.storage import storage_setup
from studio.storage.storage_type import StorageType
def event_reader(fileobj):
from tensorflow.core.util import event_pb2
if isinstance(fileobj, str):
fileobj = open(fileobj, 'rb')
header_len = 12
footer_len = 4
size_len = 8
while True:
try:
data_len = struct.unpack('Q', fileobj.read(size_len))[0]
fileobj.read(header_len - size_len)
data = fileobj.read(data_len)
event = None
event = event_pb2.Event()
event.ParseFromString(data)
fileobj.read(footer_len)
yield event
except BaseException:
check_for_kb_interrupt()
break
fileobj.close()
def get_experiment_metric(experiment):
info = dict()
info['metric_value'] = None
if experiment.metric is not None:
metric_str = experiment.metric.split(':')
metric_name = metric_str[0]
metric_type = metric_str[1] if len(metric_str) > 1 else None
tb_art = experiment.artifacts['tb']
tbtar = tb_art.stream() if tb_art else None
if metric_type == 'min':
def metric_accum(x, y):
return min(x, y) if x else y
elif metric_type == 'max':
def metric_accum(x, y):
return max(x, y) if x else y
else:
def metric_accum(x, y):
return y
metric_value = None
if tbtar is not None:
for f in tbtar:
if f.isreg():
for e in util.event_reader(tbtar.extractfile(f)):
for v in e.summary.value:
if v.tag == metric_name:
metric_value = metric_accum(
metric_value, v.simple_value)
info['metric_value'] = metric_value
return info
def rsync_cp(source, dest, ignore_arg='', logger=None):
try:
if os.path.exists(dest):
shutil.rmtree(dest) if os.path.isdir(dest) else os.remove(dest)
os.makedirs(dest)
except OSError:
pass
if ignore_arg != '':
source += "/"
tool = 'rsync'
args = [tool, ignore_arg, '-aHAXE', source, dest]
else:
try:
os.rmdir(dest)
except OSError:
pass
tool = 'cp'
args = [
tool,
'-pR',
source,
dest
]
pcp = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
cpout, _ = pcp.communicate()
if pcp.returncode != 0 and logger is not None:
logger.info('%s returned non-zero exit code. Output:' % tool)
logger.info(cpout)
class Progbar(object):
"""Displays a progress bar.
# Arguments
target: Total number of steps expected, None if unknown.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05):
self.width = width
if target is None:
target = -1
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.last_update = 0
self.interval = interval
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=None, force=False):
"""Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
force: Whether to force visual progress update.
"""
values = values or []
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far),
current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
if not force and (now - self.last_update) < self.interval:
return
prev_total_width = self.total_width
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
if self.target != -1:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target and self.target != -1:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if isinstance(self.sum_values[k], list):
avg = np.mean(
self.sum_values[k][0] / max(1, self.sum_values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * ' ')
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write('\n')
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
avg = np.mean(
self.sum_values[k][0] / max(1, self.sum_values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
sys.stdout.write(info + "\n")
self.last_update = now
def add(self, n, values=None):
self.update(self.seen_so_far + n, values)
def has_aws_credentials():
artifact_store = storage_setup.get_storage_artifact_store()
if artifact_store is None:
return False
storage_handler = artifact_store.get_storage_handler()
if storage_handler.type == StorageType.storageS3:
storage_client = storage_handler.get_client()
return storage_client._request_signer._credentials is not None
else:
return False
| studioml/studio | studio/extra_util.py | Python | apache-2.0 | 7,782 |
'''Jira client can be found at https://github.com/zebpalmer/JiraClient''' | Badger32d/JiraClient | jiraclient/__init__.py | Python | gpl-3.0 | 73 |
from __future__ import division
import operator
import os
import itertools
from django.template import loader
import olympia.core.logger
from olympia import amo
from olympia.amo.celery import task
from olympia.amo.decorators import use_primary_db
from olympia.amo.utils import extract_colors_from_image, pngcrush_image
from olympia.devhub.tasks import resize_image
from olympia.files.models import File
from olympia.files.utils import get_background_images
from olympia.versions.models import Version, VersionPreview
from .utils import (
AdditionalBackground, process_color_value,
encode_header, write_svg_to_png)
log = olympia.core.logger.getLogger('z.versions.task')
def _build_static_theme_preview_context(theme_manifest, file_):
# First build the context shared by both the main preview and the thumb
context = {'amo': amo}
context.update(dict(
process_color_value(prop, color)
for prop, color in theme_manifest.get('colors', {}).items()))
images_dict = theme_manifest.get('images', {})
header_url = images_dict.get(
'theme_frame', images_dict.get('headerURL', ''))
file_ext = os.path.splitext(header_url)[1]
backgrounds = get_background_images(file_, theme_manifest)
header_src, header_width, header_height = encode_header(
backgrounds.get(header_url), file_ext)
context.update(
header_src=header_src,
header_src_height=header_height,
header_width=header_width)
# Limit the srcs rendered to 15 to ameliorate DOSing somewhat.
# https://bugzilla.mozilla.org/show_bug.cgi?id=1435191 for background.
additional_srcs = images_dict.get('additional_backgrounds', [])[:15]
additional_alignments = (theme_manifest.get('properties', {})
.get('additional_backgrounds_alignment', []))
additional_tiling = (theme_manifest.get('properties', {})
.get('additional_backgrounds_tiling', []))
additional_backgrounds = [
AdditionalBackground(path, alignment, tiling, backgrounds.get(path))
for (path, alignment, tiling) in itertools.zip_longest(
additional_srcs, additional_alignments, additional_tiling)
if path is not None]
context.update(additional_backgrounds=additional_backgrounds)
return context
@task
@use_primary_db
def generate_static_theme_preview(theme_manifest, version_pk):
# Make sure we import `index_addons` late in the game to avoid having
# a "copy" of it here that won't get mocked by our ESTestCase
from olympia.addons.tasks import index_addons
tmpl = loader.get_template(
'devhub/addons/includes/static_theme_preview_svg.xml')
file_ = File.objects.filter(version_id=version_pk).first()
if not file_:
return
context = _build_static_theme_preview_context(theme_manifest, file_)
sizes = sorted(
amo.THEME_PREVIEW_SIZES.values(), key=operator.itemgetter('position'))
colors = None
for size in sizes:
# Create a Preview for this size.
preview = VersionPreview.objects.create(
version_id=version_pk, position=size['position'])
# Add the size to the context and render
context.update(svg_render_size=size['full'])
svg = tmpl.render(context).encode('utf-8')
if write_svg_to_png(svg, preview.image_path):
resize_image(
preview.image_path, preview.thumbnail_path, size['thumbnail'])
pngcrush_image(preview.image_path)
# Extract colors once and store it for all previews.
# Use the thumbnail for extra speed, we don't need to be super
# accurate.
if colors is None:
colors = extract_colors_from_image(preview.thumbnail_path)
data = {
'sizes': {
'image': size['full'],
'thumbnail': size['thumbnail'],
},
'colors': colors,
}
preview.update(**data)
addon_id = Version.objects.values_list(
'addon_id', flat=True).get(id=version_pk)
index_addons.delay([addon_id])
@task
def delete_preview_files(pk, **kw):
VersionPreview.delete_preview_files(
sender=None, instance=VersionPreview.objects.get(pk=pk))
| eviljeff/olympia | src/olympia/versions/tasks.py | Python | bsd-3-clause | 4,306 |
"""Primary methods that power time2relax."""
import json
from posixpath import join as urljoin
from requests import compat
from time2relax import utils # pylint: disable=import-self
_LIST = "_list"
_SHOW = "_show"
_VIEW = "_view"
def all_docs(**kwargs):
"""Fetch multiple documents.
- http://docs.couchdb.org/en/stable/api/database/bulk-api.html#get--db-_all_docs
- http://docs.couchdb.org/en/stable/api/database/bulk-api.html#post--db-_all_docs
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if "params" in kwargs:
params = kwargs["params"]
del kwargs["params"]
else:
params = None
method, _kwargs = utils.query_method_kwargs(params)
kwargs.update(_kwargs)
return method, "_all_docs", kwargs
def bulk_docs(docs, **kwargs):
"""Create, update or delete multiple documents.
http://docs.couchdb.org/en/stable/api/database/bulk-api.html#post--db-_bulk_docs
:param list docs: The sequence of documents to be sent.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if ("json" not in kwargs) or (not isinstance(kwargs["json"], dict)):
kwargs["json"] = {}
kwargs["json"]["docs"] = docs
return "POST", "_bulk_docs", kwargs
def compact(**kwargs):
"""Trigger a compaction operation.
http://docs.couchdb.org/en/stable/api/database/compact.html#post--db-_compact
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if "headers" not in kwargs or (not isinstance(kwargs["headers"], dict)):
kwargs["headers"] = {}
kwargs["headers"]["Content-Type"] = "application/json"
return "POST", "_compact", kwargs
def ddoc_list(ddoc_id, func_id, view_id, other_id=None, **kwargs):
"""Apply a list function against a view.
http://docs.couchdb.org/en/stable/api/ddoc/render.html#get--db-_design-ddoc-_list-func-view
:param str ddoc_id: The design document name.
:param str func_id: The list function name.
:param str view_id: The view function name.
:param str other_id: (optional) Other design document that holds the view function.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if other_id:
path = urljoin(utils.encode_document_id(other_id), view_id)
else:
path = view_id
return _ddoc("GET", ddoc_id, _LIST, func_id, path, **kwargs)
def ddoc_show(ddoc_id, func_id, doc_id=None, **kwargs):
"""Apply a show function against a document.
http://docs.couchdb.org/en/stable/api/ddoc/render.html#get--db-_design-ddoc-_show-func
:param str ddoc_id: The design document name.
:param str func_id: The show function name.
:param str doc_id: (optional) The document to execute the show function on.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if doc_id:
return _ddoc(
"GET", ddoc_id, _SHOW, func_id, utils.encode_document_id(doc_id), **kwargs
)
return _ddoc("GET", ddoc_id, _SHOW, func_id, **kwargs)
def ddoc_view(ddoc_id, func_id, **kwargs):
"""Execute a view function.
http://docs.couchdb.org/en/stable/api/ddoc/views.html#get--db-_design-ddoc-_view-view
:param str ddoc_id: The design document name.
:param str func_id: The view function name.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if "params" in kwargs:
params = kwargs["params"]
del kwargs["params"]
else:
params = None
method, _kwargs = utils.query_method_kwargs(params)
kwargs.update(_kwargs)
return _ddoc(method, ddoc_id, _VIEW, func_id, **kwargs)
def destroy(**kwargs):
"""Delete the database.
http://docs.couchdb.org/en/stable/api/database/common.html#delete--db
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
return "DELETE", "", kwargs
def get(doc_id, **kwargs):
"""Retrieve a document.
http://docs.couchdb.org/en/stable/api/document/common.html#get--db-docid
:param str doc_id: The document to retrieve.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if (
("params" in kwargs)
and (isinstance(kwargs["params"], dict))
and ("open_revs" in kwargs["params"])
and (kwargs["params"]["open_revs"] != "all")
):
# 'open_revs' needs to be JSON encoded
kwargs["params"]["open_revs"] = json.dumps(kwargs["params"]["open_revs"])
path = utils.encode_document_id(doc_id)
return "GET", path, kwargs
def get_att(doc_id, att_id, **kwargs):
"""Retrieve an attachment.
http://docs.couchdb.org/en/stable/api/document/attachments.html#get--db-docid-attname
:param str doc_id: The attachment document.
:param str att_id: The attachment to retrieve.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
path = urljoin(utils.encode_document_id(doc_id), utils.encode_attachment_id(att_id))
return "GET", path, kwargs
def info(**kwargs):
"""Get information about the database.
http://docs.couchdb.org/en/stable/api/database/common.html#get--db
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
return "GET", "", kwargs
def insert(doc, **kwargs):
"""Create or update an existing document.
- http://docs.couchdb.org/en/stable/api/document/common.html#put--db-docid
- http://docs.couchdb.org/en/stable/api/database/common.html#post--db
:param dict doc: The document to insert.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
kwargs["json"] = doc
if "_id" in doc:
return "PUT", utils.encode_document_id(doc["_id"]), kwargs
return "POST", "", kwargs
def insert_att(doc_id, doc_rev, att_id, att, att_type, **kwargs):
"""Create or update an existing attachment.
http://docs.couchdb.org/en/stable/api/document/attachments.html#put--db-docid-attname
:param str doc_id: The attachment document.
:param doc_rev: (optional) The document revision.
:param str att_id: The attachment name.
:param att: The dictionary, bytes, or file-like object to insert.
:param str att_type: The attachment MIME type.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if doc_rev:
if ("params" not in kwargs) or (not isinstance(kwargs["params"], dict)):
kwargs["params"] = {}
kwargs["params"]["rev"] = doc_rev
if ("headers" not in kwargs) or (not isinstance(kwargs["headers"], dict)):
kwargs["headers"] = {}
path = urljoin(utils.encode_document_id(doc_id), utils.encode_attachment_id(att_id))
kwargs["headers"]["Content-Type"] = att_type
kwargs["data"] = att
return "PUT", path, kwargs
def remove(doc_id, doc_rev, **kwargs):
"""Delete a document.
http://docs.couchdb.org/en/stable/api/document/common.html#delete--db-docid
:param str doc_id: The document to remove.
:param str doc_rev: The document revision.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if ("params" not in kwargs) or (not isinstance(kwargs["params"], dict)):
kwargs["params"] = {}
path = utils.encode_document_id(doc_id)
kwargs["params"]["rev"] = doc_rev
return "DELETE", path, kwargs
def remove_att(doc_id, doc_rev, att_id, **kwargs):
"""Delete an attachment.
http://docs.couchdb.org/en/stable/api/document/attachments.html#delete--db-docid-attname
:param str doc_id: The attachment document.
:param str doc_rev: The document revision.
:param str att_id: The attachment to remove.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if ("params" not in kwargs) or (not isinstance(kwargs["params"], dict)):
kwargs["params"] = {}
path = urljoin(utils.encode_document_id(doc_id), utils.encode_attachment_id(att_id))
kwargs["params"]["rev"] = doc_rev
return "DELETE", path, kwargs
def replicate_to(source, target, **kwargs):
"""Replicate data from source (this) to target.
http://docs.couchdb.org/en/stable/api/server/common.html#replicate
:param str target: The URL or name of the target database.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
url = urljoin(utils.get_database_host(source), "_replicate")
if ("json" not in kwargs) or (not isinstance(kwargs["json"], dict)):
kwargs["json"] = {}
kwargs["json"]["source"] = source
kwargs["json"]["target"] = target
return "POST", url, kwargs
def request(session, base_path, method, path, **kwargs):
"""Construct a :class:`requests.Request` object and send it.
:param requests.Session session:
:param str base_path:
:param str method: Method for the :class:`requests.Request` object.
:param str path: (optional) The path to join with :attr:`CouchDB.url`.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: requests.Response
"""
# Prepare the params dictionary
if ("params" in kwargs) and isinstance(kwargs["params"], dict):
params = kwargs["params"].copy()
for key, val in params.items():
# Handle titlecase booleans
if isinstance(val, bool):
params[key] = json.dumps(val)
kwargs["params"] = params
if compat.urlparse(path).scheme:
# Support absolute URLs
url = path
else:
url = urljoin(base_path, path).strip("/")
r = session.request(method, url, **kwargs)
# Raise exception on a bad status code
if not (200 <= r.status_code < 300):
utils.raise_http_exception(r)
return r
def _ddoc(method, ddoc_id, func_type, func_id, _path=None, **kwargs):
"""Apply or execute a design document function.
:param str method: Method for the :class:`requests.Request` object.
:param str ddoc_id: The design document name.
:param str func_type: The design function type.
:param str func_id: The design function name.
:param str _path: (internal)
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
doc_id = urljoin("_design", ddoc_id)
path = urljoin(utils.encode_document_id(doc_id), func_type, func_id)
if _path:
path = urljoin(path, _path)
return method, path, kwargs
| rwanyoike/time2relax | time2relax/time2relax.py | Python | mit | 11,119 |
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-pilot-info
# Author : Ricardo Graciani
########################################################################
"""
Retrieve available info about the given pilot
Example:
$ dirac-admin-get-pilot-info https://marlb.in2p3.fr:9000/26KCLKBFtxXKHF4_ZrQjkw
{'https://marlb.in2p3.fr:9000/26KCLKBFtxXKHF4_ZrQjkw': {'AccountingSent': 'False',
'BenchMark': 0.0,
'Broker': 'marwms.in2p3.fr',
'DestinationSite': 'cclcgceli01.in2p3.fr',
'GridSite': 'LCG.IN2P3.fr',
'GridType': 'gLite',
'LastUpdateTime': datetime.datetime(2011, 2, 21, 12, 49, 14),
'OutputReady': 'False',
'OwnerDN': '/O=GRID/C=FR/O=CNRS/OU=LPC/CN=Sebastien Guizard',
'OwnerGroup': '/biomed',
'ParentID': 0L,
'PilotID': 2241L,
'PilotJobReference': 'https://marlb.in2p3.fr:9000/2KHFrQjkw',
'PilotStamp': '',
'Status': 'Done',
'SubmissionTime': datetime.datetime(2011, 2, 21, 12, 27, 52),
'TaskQueueID': 399L}}
"""
# pylint: disable=wrong-import-position
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
extendedPrint = False
def setExtendedPrint(_arg):
global extendedPrint
extendedPrint = True
@Script()
def main():
Script.registerSwitch("e", "extended", "Get extended printout", setExtendedPrint)
_, args = Script.parseCommandLine(ignoreErrors=True)
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
dirac = Dirac()
exitCode = 0
errorList = []
for gridID in args:
result = diracAdmin.getPilotInfo(gridID)
if not result["OK"]:
errorList.append((gridID, result["Message"]))
exitCode = 2
else:
res = result["Value"][gridID]
if extendedPrint:
tab = ""
for key in [
"PilotJobReference",
"Status",
"OwnerDN",
"OwnerGroup",
"SubmissionTime",
"DestinationSite",
"GridSite",
]:
if key in res:
diracAdmin.log.notice("%s%s: %s" % (tab, key, res[key]))
if not tab:
tab = " "
diracAdmin.log.notice("")
for jobID in res["Jobs"]:
tab = " "
result = dirac.getJobAttributes(int(jobID))
if not result["OK"]:
errorList.append((gridID, result["Message"]))
exitCode = 2
else:
job = result["Value"]
diracAdmin.log.notice("%sJob ID: %s" % (tab, jobID))
tab += " "
for key in [
"OwnerDN",
"OwnerGroup",
"JobName",
"Status",
"StartExecTime",
"LastUpdateTime",
"EndExecTime",
]:
if key in job:
diracAdmin.log.notice("%s%s:" % (tab, key), job[key])
diracAdmin.log.notice("")
else:
print(diracAdmin.pPrint.pformat({gridID: res}))
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
if __name__ == "__main__":
main()
| ic-hep/DIRAC | src/DIRAC/Interfaces/scripts/dirac_admin_get_pilot_info.py | Python | gpl-3.0 | 4,570 |
#!/usr/bin/python3
# Tool to help searching the database
import argparse
import sqlite3
# Arguments
parser = argparse.ArgumentParser(description='Interactive SQLite3 Search Tool')
parser.add_argument('fname',action='store',nargs=1,help='Database file',
metavar='FILE')
args = vars(parser.parse_args())
conn = sqlite3.connect(args['fname'][0])
src = input('List of allowed src: ').split()
forbidden_src = input('List of forbidden src: ').split()
dst = input('List of allowed dst: ').split()
forbidden_dst = input('List of forbidden dst: ').split()
search_term = input('Search string: ')
#src
query = 'select raw from email inner join email_text on email.mid=email_text.mid where ('
for item in src:
query += "src='"+item+"' or "
if len(src):
query = query[0:-4]
query += ") and ("
for item in forbidden_src:
query += "not src='"+item+"' and "
if len(forbidden_src):
query = query[0:-5]
query += ") and ("
#dst
for item in dst:
query += "dst='"+item+"' or "
if len(dst):
query = query[0:-3]
query += ") and ("
for item in forbidden_dst:
query += "not dst='"+item+"' and "
if len(forbidden_dst):
query = query[0:-5]
query += ") and ("
#match
query += "raw match '" + search_term + "')"
print(query)
for item in conn.execute(query):
print(item)
conn.close()
| henfredemars/python-personal-projects | PyArchive/Search.py | Python | mit | 1,291 |
from selenium import webdriver
from unittest import skip
import unittest
from django.test import LiveServerTestCase
from selenium.webdriver.common.keys import Keys
import time
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
self.browser=webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def check_if_item_in_table(self,send_message):
table=self.browser.find_element_by_id('id_list_table')
rows=table.find_elements_by_tag_name('tr')
self.assertIn(send_message,[rows.text for row in rows])
def test_can_start_a_list_and_retrieve_it_later(self):
self.browser.get(self.live_server_url)
self.assertIn('To-Do lists',self.browser.title)
header_text=self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do',header_text)
inputbox=self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
@skip
def test_cannot_add_empty_list_items(self):
self.fail('wirte me')
if __name__=='__main__':
unittest.main(warnings='ignore')
| HaoPatrick/tdd-pyhton | functional_tests/bse.py | Python | mit | 1,224 |
#
# python module SettingReader.py
#
# Version: 0.0.1
# Author: Nguyen Linh
# Contact: nvl1109@gmail.com
# License: MIT License
#
# Tested with python 2.7 and 3.2
#
# Copyright (c) 2013 Alejandro Lopez Correa
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function # just for test() function at the end
import xml.dom.minidom
# -------------------------------------------------------------------
# SettingReader: main class that deals with xml dom
# xml config file syntax
"""
<configs>
<name>Test station 1</name>
<compilers>
<compiler name='iar'>C:/iar</compiler>
<compiler name='uv4'>C:/uv4</compiler>
<compiler name='kds'>C:/kds</compiler>
</compilers>
<specific_config>
jsut a confit
</specific_config>
</configs>
"""
class SettingReader( object ):
| nvl1109/testclient | lib/SettingReader.py | Python | gpl-2.0 | 1,916 |
# Copyright (C) 2015 Jeffrey Meyers
# This program is released under the "MIT License".
# Please see the file COPYING in the source
# distribution of this software for license terms.
import os
import sys
import time
import json
from decimal import Decimal
from flask import make_response, Blueprint, redirect
from flask import url_for,render_template, jsonify, request
from sqlalchemy import func
from models import Scans, OnOffPairs_Scans, OnOffPairs_Stops
from helper import Helper
from dashboard import debug, error
from dashboard import SessionONOFF as Session
from ..shared.helper import Helper as h
STATIC_DIR = '/onoff'
mod_onoff = Blueprint('onoff', __name__, url_prefix='/onoff', static_folder='static')
def static(html, static=STATIC_DIR):
"""returns correct path to static directory"""
return os.path.join(static, html)
@mod_onoff.route('/')
def index():
return render_template(static('index.html'))
#@mod_onoff.route('/overview')
#def overview():
# return render_template(static('base.html'))
@mod_onoff.route('/status')
def status():
routes = [ route['rte_desc'] for route in Helper.get_routes() ]
data = Helper.query_route_status()
web_session = Session()
query = web_session.execute("""
SELECT rte_desc, sum(count) AS count
FROM v.records
WHERE rte_desc LIKE 'Portland Streetcar%'
GROUP by rte_desc;""")
#hardcode streetcar targets, then populate the count
streetcar = {
"Portland Streetcar - NS Line":{'target':2182, 'count':0},
"Portland Streetcar - CL Line":{'target':766, 'count':0}
}
for record in query:
debug(record)
streetcar[record[0]]['count'] = int(record[1])
web_session.close()
summary = Helper.query_routes_summary()
return render_template(static('status.html'),
streetcar=streetcar, routes=routes, data=data, summary=summary)
@mod_onoff.route('/status/_details', methods=['GET'])
def status_details():
response = {'success':False}
if 'rte_desc' in request.args.keys():
data = Helper.query_route_status(rte_desc=request.args['rte_desc'])
chart = Helper.single_chart(data)
response['success'] = True
response['data'] = data
response['chart'] = chart
return jsonify(response)
@mod_onoff.route('/data')
def data():
"""Sets up table headers and dropdowns in template"""
headers = ['Date', 'Time', 'User', 'Route', 'Direction', 'On Stop', 'Off Stop']
routes = [ route['rte_desc'] for route in Helper.get_routes() ]
directions = Helper.get_directions()
users = Helper.get_users()
return render_template(static('data.html'),
routes=routes, directions=directions, headers=headers,
users=users)
@mod_onoff.route('/data/_query', methods=['GET'])
def data_query():
response = []
user = ""
rte_desc = ""
dir_desc = ""
csv = False
if 'rte_desc' in request.args.keys():
rte_desc = request.args['rte_desc'].strip()
if 'dir_desc' in request.args.keys():
dir_desc = request.args['dir_desc'].strip()
if 'user' in request.args.keys():
user = request.args['user'].strip()
debug(user)
if 'csv' in request.args.keys():
csv = request.args['csv']
if csv:
data = Helper.query_route_data(
user=user, rte_desc=rte_desc, dir_desc=dir_desc,csv=csv
)
response = ""
# build csv string
for record in data:
response += ','.join(record) + '\n'
else:
response = Helper.query_route_data(
user=user, rte_desc=rte_desc, dir_desc=dir_desc
)
return jsonify(data=response)
@mod_onoff.route('/surveyors')
def surveyor_status():
return render_template(static('surveyors.html'))
@mod_onoff.route('/surveyors/_summary', methods=['GET'])
def surveyor_summary_query():
response = []
date = time.strftime("%d-%m-%Y")
if 'date' in request.args.keys():
date = request.args['date'].strip()
response = Helper.current_users(date)
debug(response)
return jsonify(users=response)
@mod_onoff.route('/map')
def map():
routes = [ {
'rte':route['rte'], 'rte_desc':route['rte_desc']
} for route in h.get_routes() ]
directions = h.get_directions()
return render_template(static('map.html'),
routes=routes, directions=directions
)
@mod_onoff.route('/map/_details', methods=['GET'])
def map_offs_details():
response = {'success':False}
if 'rte_desc' in request.args.keys():
rte_desc = request.args['rte_desc'].strip()
rte = h.rte_lookup(rte_desc)
session = Session()
fields = ['dir', 'tad', 'centroid', 'stops', 'ons', 'count']
query_time = session.execute("""
SELECT """ + ','.join(fields) + """, bucket
FROM long.tad_time_stats
WHERE rte = :rte;""", {'rte':rte})
query_markers = session.execute("""
SELECT dir, tad, centroid, stops, ons, count
FROM long.tad_stats
WHERE rte = :rte;""", {'rte':rte})
query_tads = session.execute("""
SELECT
r.dir,
t.tadce10 AS tad,
ST_AsGeoJson(ST_Transform(ST_Union(t.geom), 4326)) AS geom,
ST_AsGeoJson(ST_Transform(ST_Centroid(ST_Union(t.geom)), 4326)) AS centroid
FROM tad AS t
JOIN tm_routes AS r
ON ST_Intersects(t.geom, r.geom)
WHERE r.rte = :rte
GROUP BY r.dir, t.tadce10;""", {'rte':rte})
query_data_summary = session.execute("""
SELECT
dir,
on_tad,
sum(count) AS ons
FROM long.tad_onoff
WHERE rte = :rte
GROUP BY dir, on_tad;""", {'rte':rte})
query_data = session.execute("""
SELECT
dir,
on_tad,
off_tad,
count
FROM long.tad_onoff
WHERE rte = :rte;""", {'rte':rte})
query_routes = session.execute("""
SELECT dir, ST_AsGeoJson(ST_Transform(ST_Union(geom), 4326))
FROM tm_routes
WHERE rte = :rte
GROUP BY dir;""", {'rte':rte})
query_minmax = session.execute("""
SELECT dir, label, stop_name, ST_AsGeoJson(ST_Transform(geom, 4326))
FROM long.stop_minmax
WHERE rte = :rte;""", {'rte':rte})
def build_data(record):
data = {}
for index in range(1, len(fields)):
field = record[index]
if isinstance(field, Decimal): field = int(field)
data[fields[index]] = field
return data
def int_zero(value):
try:
return int(value)
except:
return 0
tads = []
stops = {}
stops[0] = {}
stops[1] = {}
summary = {}
summary[0] = []
summary[1] = []
data = {}
data[0] = {}
data[1] = {}
routes_geom = {}
minmax = {}
time_data = {}
time_data[0] = {}
time_data[1] = {}
for record in query_tads:
dir_ = record[0]
tad = record[1]
geom = record[2]
centroid = record[3]
tads.append({'dir':dir_, 'tad':tad, 'geom':geom, 'centroid':centroid})
for record in query_markers:
dir_ = record[0]
tad = record[1]
centroid = json.loads(record[2])
stops_geom = json.loads(record[3])
ons = int_zero(record[4])
count = int_zero(record[5])
stops[dir_][tad] = {
'tad':tad, 'centroid':centroid, 'count':count, 'stops':stops_geom, 'ons':ons
}
for record in query_data_summary:
dir_ = record[0]
tad_on = record[1]
ons = int(record[2])
summary[dir_].append({'tad':tad_on, 'ons':ons})
for record in query_data:
dir_ = record[0]
tad_on = record[1]
tad_off = record[2]
offs = int(record[3])
if tad_on not in data[dir_]:
data[dir_][tad_on] = {}
data[dir_][tad_on]['offs'] = []
data[dir_][tad_on]['offs'].append({'tad':tad_off, 'offs':offs})
for record in query_routes:
routes_geom[record[0]] = {
'dir':record[0],
'geom':json.loads(record[1])
}
for record in query_minmax:
if record[0] not in minmax:
minmax[record[0]] = {}
minmax[record[0]][record[1]] = {
'geom':record[3],
'stop_name':record[2]
}
# time of day buckets
for record in query_time:
debug(record)
tad = record[1]
if tad not in time_data[record[0]]:
time_data[record[0]][tad] = []
ons = int_zero(record[4])
count = int_zero(record[5])
bucket = int_zero(record[6])
# insert because data comes in sorted and needs to go out sorted?
time_data[record[0]][tad].insert(bucket, {"count":count, "ons":ons})
response['success'] = True
response['stops'] = stops
response['summary'] = summary
response['data'] = data
response['tads'] = tads
response['routes'] = routes_geom
response['minmax'] = minmax
response['time_data'] = time_data
session.close()
return jsonify(response)
| TransitSurveyor/Dashboard | dashboard/mod_onoff/views.py | Python | mit | 9,739 |
#!/usr/bin/env python3
class ShellSort(object):
def __init__(self, elements, gaps):
self._elements = elements
self._gaps = gaps
def sort(self):
size = len(self._elements)
for gap in reversed(self._gaps):
if (gap > size):
continue
for idx in range(gap,size):
#print("Idx: {}".format(idx))
idx2 = idx
while (idx2 >= gap and self._elements[idx2] < self._elements[idx2-gap]):
self._exchange(idx2, idx2-gap)
idx2 = idx2 - gap
def _exchange(self, idx1, idx2):
old = self._elements[idx2]
#print('switching {}:{} with {}:{}'.format(idx1,self._elements[idx1], idx2, self._elements[idx2]))
self._elements[idx2] = self._elements[idx1]
self._elements[idx1] = old
if __name__ == '__main__':
from sort_resources import hundred_random_unsorted, hundred_sorted, thousand_random_unsorted, thousand_sorted, ten_k_random_unsorted, ten_k_sorted
tokuda = [1, 4, 9, 20, 46, 103, 233, 525, 1182, 2660, 5985, 13467, 30301, 68178, 153401, 345152, 776591,]
s248 = [1, 3, 7, 16, 38, 94, 233, 577, 1431, 3549, 8801, 21826, 54128, 134237, 332908, 825611,]
ciura = [1, 4, 10, 23, 57, 132, 301, 701,]
elements = hundred_random_unsorted
sorter = ShellSort(elements, ciura)
sorter.sort()
assert(elements == hundred_sorted)
print("100 elements sorted as expected")
elements = thousand_random_unsorted
sorter = ShellSort(elements, ciura)
sorter.sort()
assert(elements == thousand_sorted)
print("1000 elements sorted as expected")
elements = ten_k_random_unsorted
sorter = ShellSort(elements, ciura)
sorter.sort()
assert(elements == ten_k_sorted)
print("10000 elements sorted as expected")
| bertothunder/coursera-algorithms | ShellSort.py | Python | gpl-3.0 | 1,843 |
#-*-coding:utf-8-*-
from pyramid.events import subscriber
from ..events.resources import (
ResourceCreated,
ResourceChanged,
ResourceDeleted,
ResourceAssigned,
)
from ..scheduler.notifications import add_notification
from ..bl.subscriptions import subscribe_resource
@subscriber(ResourceCreated)
def resource_created(event):
subscribe_resource(event.request, event.obj.resource)
add_notification(event.descr, event.obj.resource_id)
@subscriber(ResourceChanged)
def resource_changed(event):
add_notification(event.descr, event.obj.resource_id)
@subscriber(ResourceDeleted)
def resource_deleted(event):
add_notification(event.descr, event.obj.id)
@subscriber(ResourceAssigned)
def resource_assigned(event):
add_notification(event.descr, event.obj.id)
| mazvv/travelcrm | travelcrm/lib/subscribers/resources.py | Python | gpl-3.0 | 796 |
# -*- coding:utf-8 -*-
import unittest
import mock
from ..models import JobPosting
class JobPostingTestCase(unittest.TestCase):
def test_unicode_should_return_position_name(self):
# setup
model = JobPosting()
model.position_name = 'Position Name'
# action
email = unicode(model)
# assert
self.assertEqual(model.position_name, email)
| hellhovnd/dentexchange | dentexchange/apps/employer/tests/test_job_posting.py | Python | bsd-3-clause | 398 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ... import units as u
from ..distances import Distance
from ..builtin_frames import (ICRS, FK5, FK4, FK4NoETerms, Galactic,
Supergalactic, Galactocentric, HCRS, GCRS, LSR)
from .. import SkyCoord
from ...tests.helper import (pytest, quantity_allclose as allclose,
assert_quantity_allclose as assert_allclose)
from .. import EarthLocation, CartesianRepresentation
from ...time import Time
from ...extern.six.moves import range
# used below in the next parametrized test
m31_sys = [ICRS, FK5, FK4, Galactic]
m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650), (10.0004738, 40.9952444), (121.1744050, -21.5729360)]
m31_dist = Distance(770, u.kpc)
convert_precision = 1 * u.arcsec
roundtrip_precision = 1e-4 * u.degree
dist_precision = 1e-9 * u.kpc
m31_params = []
for i in range(len(m31_sys)):
for j in range(len(m31_sys)):
if i < j:
m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j]))
@pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params)
def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED.
"""
coo1 = fromsys(ra=fromcoo[0]*u.deg, dec=fromcoo[1]*u.deg, distance=m31_dist)
coo2 = coo1.transform_to(tosys)
if tosys is FK4:
coo2_prec = coo2.transform_to(FK4(equinox=Time('B1950', scale='utc')))
assert (coo2_prec.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2_prec.spherical.lat - tocoo[1]*u.deg) < convert_precision
else:
assert (coo2.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2.spherical.lat - tocoo[1]*u.deg) < convert_precision
assert coo1.distance.unit == u.kpc
assert coo2.distance.unit == u.kpc
assert m31_dist.unit == u.kpc
assert (coo2.distance - m31_dist) < dist_precision
# check round-tripping
coo1_2 = coo2.transform_to(fromsys)
assert (coo1_2.spherical.lon - fromcoo[0]*u.deg) < roundtrip_precision
assert (coo1_2.spherical.lat - fromcoo[1]*u.deg) < roundtrip_precision
assert (coo1_2.distance - m31_dist) < dist_precision
def test_precession():
"""
Ensures that FK4 and FK5 coordinates precess their equinoxes
"""
j2000 = Time('J2000', scale='utc')
b1950 = Time('B1950', scale='utc')
j1975 = Time('J1975', scale='utc')
b1975 = Time('B1975', scale='utc')
fk4 = FK4(ra=1*u.radian, dec=0.5*u.radian)
assert fk4.equinox.byear == b1950.byear
fk4_2 = fk4.transform_to(FK4(equinox=b1975))
assert fk4_2.equinox.byear == b1975.byear
fk5 = FK5(ra=1*u.radian, dec=0.5*u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK4(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
def test_fk5_galactic():
"""
Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic.
"""
fk5 = FK5(ra=1*u.deg, dec=2*u.deg)
direct = fk5.transform_to(Galactic)
indirect = fk5.transform_to(FK4).transform_to(Galactic)
assert direct.separation(indirect).degree < 1.e-10
direct = fk5.transform_to(Galactic)
indirect = fk5.transform_to(FK4NoETerms).transform_to(Galactic)
assert direct.separation(indirect).degree < 1.e-10
def test_galactocentric():
# when z_sun=0, transformation should be very similar to Galactic
icrs_coord = ICRS(ra=np.linspace(0, 360, 10)*u.deg,
dec=np.linspace(-90, 90, 10)*u.deg,
distance=1.*u.kpc)
g_xyz = icrs_coord.transform_to(Galactic).cartesian.xyz
gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0*u.kpc)).cartesian.xyz
diff = np.abs(g_xyz - gc_xyz)
assert allclose(diff[0], 8.3*u.kpc, atol=1E-5*u.kpc)
assert allclose(diff[1:], 0*u.kpc, atol=1E-5*u.kpc)
# generate some test coordinates
g = Galactic(l=[0, 0, 45, 315]*u.deg, b=[-45, 45, 0, 0]*u.deg,
distance=[np.sqrt(2)]*4*u.kpc)
xyz = g.transform_to(Galactocentric(galcen_distance=1.*u.kpc, z_sun=0.*u.pc)).cartesian.xyz
true_xyz = np.array([[0, 0, -1.], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T*u.kpc
assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1E-5*u.kpc)
# check that ND arrays work
# from Galactocentric to Galactic
x = np.linspace(-10., 10., 100) * u.kpc
y = np.linspace(-10., 10., 100) * u.kpc
z = np.zeros_like(x)
g1 = Galactocentric(x=x, y=y, z=z)
g2 = Galactocentric(x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1),
z=z.reshape(100, 1, 1))
g1t = g1.transform_to(Galactic)
g2t = g2.transform_to(Galactic)
assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0])
# from Galactic to Galactocentric
l = np.linspace(15, 30., 100) * u.deg
b = np.linspace(-10., 10., 100) * u.deg
d = np.ones_like(l.value) * u.kpc
g1 = Galactic(l=l, b=b, distance=d)
g2 = Galactic(l=l.reshape(100, 1, 1), b=b.reshape(100, 1, 1),
distance=d.reshape(100, 1, 1))
g1t = g1.transform_to(Galactocentric)
g2t = g2.transform_to(Galactocentric)
np.testing.assert_almost_equal(g1t.cartesian.xyz.value,
g2t.cartesian.xyz.value[:, :, 0, 0])
def test_supergalactic():
"""
Check Galactic<->Supergalactic and Galactic<->ICRS conversion.
"""
# Check supergalactic North pole.
npole = Galactic(l=47.37*u.degree, b=+6.32*u.degree)
assert allclose(npole.transform_to(Supergalactic).sgb.deg, +90, atol=1e-9)
# Check the origin of supergalactic longitude.
lon0 = Supergalactic(sgl=0*u.degree, sgb=0*u.degree)
lon0_gal = lon0.transform_to(Galactic)
assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9)
assert allclose(lon0_gal.b.deg, 0, atol=1e-9)
# Test Galactic<->ICRS with some positions that appear in Foley et al. 2008
# (http://adsabs.harvard.edu/abs/2008A%26A...484..143F)
# GRB 021219
supergalactic = Supergalactic(sgl=29.91*u.degree, sgb=+73.72*u.degree)
icrs = SkyCoord('18h50m27s +31d57m17s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
# GRB 030320
supergalactic = Supergalactic(sgl=-174.44*u.degree, sgb=+46.17*u.degree)
icrs = SkyCoord('17h51m36s -25d18m52s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
class TestHCRS():
"""
Check HCRS<->ICRS coordinate conversions.
Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and
`tarr` as defined below, the ICRS Solar positions were predicted using, e.g.
coord.ICRS(coord.get_body_barycentric(tarr, 'sun')).
"""
def setup(self):
self.t1 = Time("2013-02-02T23:00")
self.t2 = Time("2013-08-02T23:00")
self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"])
self.sun_icrs_scalar = ICRS(ra=244.52984668*u.deg,
dec=-22.36943723*u.deg,
distance=406615.66347377*u.km)
# array of positions corresponds to times in `tarr`
self.sun_icrs_arr = ICRS(ra=[244.52989062, 271.40976248]*u.deg,
dec=[-22.36943605, -25.07431079]*u.deg,
distance=[406615.66347377, 375484.13558956]*u.km)
# corresponding HCRS positions
self.sun_hcrs_t1 = HCRS(CartesianRepresentation([0.0, 0.0, 0.0] * u.km),
obstime=self.t1)
twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km)
self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr)
self.tolerance = 5*u.km
def test_from_hcrs(self):
# test scalar transform
transformed = self.sun_hcrs_t1.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_scalar)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# test non-scalar positions and times
transformed = self.sun_hcrs_tarr.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_arr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
def test_from_icrs(self):
# scalar positions
transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1))
separation = transformed.separation_3d(self.sun_hcrs_t1)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# nonscalar positions
transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr))
separation = transformed.separation_3d(self.sun_hcrs_tarr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
class TestHelioBaryCentric():
"""
Check GCRS<->Heliocentric and Barycentric coordinate conversions.
Uses the WHT observing site (information grabbed from data/sites.json).
"""
def setup(self):
wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m)
self.obstime = Time("2013-02-02T23:00")
self.wht_itrs = wht.get_itrs(obstime=self.obstime)
def test_heliocentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
helio = gcrs.transform_to(HCRS(obstime=self.obstime))
# Check it doesn't change from previous times.
previous = [-1.02597256e+11, 9.71725820e+10, 4.21268419e+10] * u.m
assert_allclose(helio.cartesian.xyz, previous)
# And that it agrees with SLALIB to within 14km
helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au
assert np.sqrt(((helio.cartesian.xyz -
helio_slalib)**2).sum()) < 14. * u.km
def test_barycentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
bary = gcrs.transform_to(ICRS())
previous = [-1.02758958e+11, 9.68331109e+10, 4.19720938e+10] * u.m
assert_allclose(bary.cartesian.xyz, previous)
# And that it agrees with SLALIB answer to within 14km
bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au
assert np.sqrt(((bary.cartesian.xyz -
bary_slalib)**2).sum()) < 14. * u.km
def test_lsr_sanity():
# random numbers, but zero velocity in ICRS frame
icrs = ICRS(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
lsr = icrs.transform_to(LSR)
lsr_diff = lsr.data.differentials['s']
cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data)
lsr_vel = ICRS(cart_lsr_vel)
gal_lsr = lsr_vel.transform_to(Galactic).cartesian.xyz
assert allclose(gal_lsr.to(u.km/u.s, u.dimensionless_angles()),
lsr.v_bary.d_xyz)
# moving with LSR velocity
lsr = LSR(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
icrs = lsr.transform_to(ICRS)
icrs_diff = icrs.data.differentials['s']
cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data)
vel = ICRS(cart_vel)
gal_icrs = vel.transform_to(Galactic).cartesian.xyz
assert allclose(gal_icrs.to(u.km/u.s, u.dimensionless_angles()),
-lsr.v_bary.d_xyz)
| AustereCuriosity/astropy | astropy/coordinates/tests/test_celestial_transformations.py | Python | bsd-3-clause | 11,657 |
# -*- coding: cp1252 -*-
from __future__ import division
from sys import argv
from math import floor, fabs, log10, pow
# codigo encontrado em:
# http://stackoverflow.com/questions/15733772/convert-float-number-to-string-with-engineering-notation-with-si-prefixe-in-py
def ToSI(d):
incPrefixes = ['k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
decPrefixes = ['m', 'µ', 'n', 'p', 'f', 'a', 'z', 'y']
degree = int(floor(log10(fabs(d)) / 3))
prefix = ''
if degree!=0:
ds = degree/fabs(degree)
if ds == 1:
if degree - 1 < len(incPrefixes):
prefix = incPrefixes[degree - 1]
else:
prefix = incPrefixes[-1]
degree = len(incPrefixes)
elif ds == -1:
if -degree - 1 < len(decPrefixes):
prefix = decPrefixes[-degree - 1]
else:
prefix = decPrefixes[-1]
degree = -len(decPrefixes)
scaled = float(d * pow(1000, -degree))
s = "{scaled} {prefix}".format(scaled=scaled, prefix=prefix)
else:
s = "{d}".format(d=d)
return(s)
if __name__ == '__main__':
if argv < 2:
print 'ERRO: SEM ARGUMENTOS'
exit(-1)
v = float(argv[1])
print 'VRMS = %sV' % (ToSI(v/(2**(1/2))))
print 'VPICO = %sV' % (ToSI(v*(2**(1/2))))
print 'VLINHA = %sV' % (ToSI(v*(2**(1/3))))
print 'VFASE = %sV' % (ToSI(v/(2**(1/3))))
exit(0)
| felipeband/ASP | source/vv.py | Python | apache-2.0 | 1,372 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.