code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import pylab as pl
import scipy as sp
from serpentine import *
from elements import *
import visualize
class AtfExt :
def __init__(self) :
print 'AtfExt:__init__'
# set twiss parameters
mytwiss = Twiss()
mytwiss.betax = 6.85338806855804
mytwiss.alphax = 1.11230788371885
mytwiss.etax = 3.89188697330735e-012
mytwiss.etaxp = 63.1945125619190e-015
mytwiss.betay = 2.94129410712918
mytwiss.alphay = -1.91105724003646
mytwiss.etay = 0
mytwiss.etayp = 0
mytwiss.nemitx = 5.08807339588144e-006
mytwiss.nemity = 50.8807339588144e-009
mytwiss.sigz = 8.00000000000000e-003
mytwiss.sigP = 1.03999991965541e-003
mytwiss.pz_cor = 0
# load beam line
self.atfFull = Serpentine(line='newATF2lat.aml',twiss=mytwiss)
self.atfExt = Serpentine(line=beamline.Line(self.atfFull.beamline[947:]),twiss=mytwiss)
# zero zero cors
self.atfExt.beamline.ZeroCors()
# Track
self.atfExt.Track()
readings = self.atfExt.GetBPMReadings()
# Visualisation
self.v = visualize.Visualize()
def moverCalibration(self, mag, bpms) :
pass
def correctorCalibration(self, corr, bpms) :
pass
def bba(self, mag, bpm) :
pass
def magMoverCalibration(self, mag, bpm) :
pass
def setMagnet(self,name, value) :
ei = self.atfExt.beamline.FindEleByName(name)
print ei
e = self.atfExt.beamline[ei[0]]
e.B = value
def plotOrbit(self) :
self.v.PlotBPMReadings(self.atfExt)
def plotTwiss(self) :
self.v.PlotTwiss(self.atfExt)
def run(self) :
self.atfExt.Track()
def jitterBeam(self) :
r = 1+sp.random.standard_normal()
# self.s.beam_in.x[5,:] = (1+r/3e4)*self.nominalE
# print r,self.s.BeamIn.x[5,:]
|
OscarES/serpentinetracker
|
examples/atf/atfExt.py
|
Python
|
gpl-3.0
| 1,975
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 05 13:55:13 2014
@author: Alison Kirkby
"""
import mtpy.core.edi as mtedi
import os
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as si
import mtpy.utils.exceptions as MTex
import mtpy.utils.calculator as MTcc
import mtpy.analysis.geometry as MTg
import cmath
import math
class Control():
def __init__(self, **input_parameters):
self.run_input = [1, 0, 0.1, 40, 1.05, 1, 0]
# define control file parameters
self.iteration_max = 100 # max number of iterations
self.penalty_type_structure = 6
self.penalty_type_anisotropy = 2 # type of structure and anisotropy penalties
# values for the structure penalty weights
self.penalty_weight_structure = [0.1, 1.0, 10.0]
# values for the anisotropy penalty weights
self.penalty_weight_anisotropy = [0.1, 1.0, 10.0]
self.working_directory = '.'
for key in list(input_parameters.keys()):
setattr(self, key, input_parameters[key])
if not os.path.exists(self.working_directory):
os.mkdir(self.working_directory)
def write_ctlfile(self):
"""
write control file
"""
# create control file
# control file name is hardcoded into software!
ctlfile = open(os.path.join(
self.working_directory, 'inregulm.dat'), 'wb')
# define number of weights
nw_struct = len(self.penalty_weight_structure)
nw_aniso = len(self.penalty_weight_anisotropy)
for thing in [(2, self.iteration_max), (nw_struct, nw_aniso), (self.penalty_type_structure, self.penalty_type_anisotropy)]:
ctlfile.write('%1i%6i\n' % thing)
for thing in [self.penalty_weight_structure, self.penalty_weight_anisotropy]:
ctlfile.write(' '.join([str(i) for i in thing]) + '\n')
ctlfile.close()
print("written control file to {}".format(self.working_directory))
inmodel_kwds = ['inmodel_dictionary']
class Inmodel():
"""
**inmodel_
"""
def __init__(self, **input_parameters):
self.working_directory = '.'
self.inmodel_modeldir = None
self.inmodelfile = 'inmodel.dat'
# dictionary containing values for
self.inmodel_dictionary = {0: [100, 100, 0]}
# inmodel file, in format topdepth: [minres,maxres,strike]
for key in list(input_parameters.keys()):
setattr(self, key, input_parameters[key])
def build_inmodel(self):
"""
build an inmodel file to be used as a constraint
need to give it a dictionary containing values (list of rmin,rmax and strike) and bottom depths
depths are the keys, resistivities are the values
and a modeldir - needs to have the same steps as
the model planned to run.
"""
modelf = open(os.path.join(self.inmodel_modeldir, 'ai1mod.dat'))
modelf.readline()
flag = True
model = []
ii = 1
while flag:
try:
m = [float(i) for i in modelf.readline().strip().split()]
if len(m) > 0:
if ii % 2 == 0:
model.append(m)
ii += 1
except:
flag = False
model = np.array(model)
model[:, 2:] = 0.
mvals = model[:, 2:]
mi = model[:, 0]
mdepths = [0.] + list(model[:, 1])
mthick = np.array([mdepths[i + 1] - mdepths[i]
for i in range(len(mi))])
keys = list(self.inmodel_dictionary.keys())
keys.sort()
for key in keys:
cond = model[:, 1] >= key
mvals[cond] = np.array(self.inmodel_dictionary[key])
self.inmodel = np.vstack([mi, mthick, mvals.T]).T
def write_inmodel(self, wd=None):
"""
"""
if wd is not None:
self.working_directory = wd
if not hasattr(self, 'inmodel'):
self.build_inmodel()
np.savetxt(os.path.join(self.working_directory, 'inmodel.dat'),
self.inmodel,
fmt=['%5i', '%11.4e', '%11.4e', '%11.4e', '%11.4e'])
print("written inmodel file to {}".format(self.working_directory))
def read_inmodel(self):
"""
read the inmodel file
"""
# read in file
inmodel = np.loadtxt(os.path.join(
self.working_directory, self.inmodelfile))
# convert layer thicknesses to depths
depths = np.array([[sum(inmodel[:i, 1]), sum(inmodel[:i + 1, 1])]
for i in range(len(inmodel))]).flatten()
values = np.zeros((len(inmodel) * 2, 5))
ii = 0
for val in inmodel:
for i in range(2):
values[ii] = val
ii += 1
self.inmodel = np.vstack(
[values[:, 0], depths, values[:, 2], values[:, 3], values[:, 4]]).T
def get_boundaries(self):
"""
get points at which the resistivity changes in the inmodel file
"""
if not hasattr(self, 'inmodel'):
try:
self.read_inmodel()
except IOError:
print("please define working directory")
return
data = self.inmodel
bd = []
for i in range(len(data) - 1):
if data[i, 2] != data[i + 1, 2]:
bd.append(data[i, 1])
elif data[i, 2] != data[i + 1, 2]:
bd.append(data[i, 1])
self.boundary_depths = bd
class Data():
"""
deals with input data from 1d inversions, including creating a data file
and reading a data file afterwards to compare with inversion responses
"""
def __init__(self, working_directory, **input_parameters):
self.working_directory = working_directory
self.respfile = 'ai1dat.dat'
self.datafile = None
self.errorfloor = np.ones([2, 2]) * 0.1
self.errorfloor_type = 'relative' # relative, absolute or offdiagonals
self.edipath = None
self.mode = 'I'
for key in list(input_parameters.keys()):
if hasattr(self, key):
setattr(self, key, input_parameters[key])
# default working directory is epath if it is specified, otherwise
# current directory
if self.working_directory is None:
if self.edipath is not None:
self.working_directory = os.path.dirname(self.edipath)
else:
self.working_directory = '.'
def build_data(self):
"""
create data to write to an input file
"""
# read edi file to edi object
self.edi_object = mtedi.Edi(self.edipath)
# define z
zr = np.real(self.edi_object.Z.z)
# sign of imaginary component needs to be reversed for the pek1d
# inversion code
zi = -np.imag(self.edi_object.Z.z)
ze = self.edi_object.Z.z_err
z = zr + 1j * zi
# set errorfloors
if type(self.errorfloor) in [int, float]:
self.errorfloor = np.ones([2, 2]) * self.errorfloor
if self.errorfloor_type in ['relative', 'offdiagonals']:
zer = ze / np.abs(z)
for i in range(2):
for j in range(2):
zer[:, i, j][(zer[:, i, j] < self.errorfloor[
i, j])] = self.errorfloor[i, j]
ze = np.abs(z) * zer
if self.errorfloor_type == 'offdiagonals':
for i in range(2):
for iz in range(len(z)):
if ze[iz, i, i] < ze[iz, i, 1 - i]:
ze[iz, i, i] = ze[iz, i, 1 - i]
elif self.errorfloor_type == 'absolute':
for i in range(2):
for j in range(2):
ze[:, i, j][(ze[:, i, j] < self.errorfloor[
i, j])] = self.errorfloor[i, j]
# define header info for data file
header = '{:>5}\n{:>5}'.format(self.mode, len(self.edi_object.Z.resistivity))
# create data array
data_list = [1. / self.edi_object.Z.freq]
for i in range(2):
for j in range(2):
if self.mode == 'I':
dd = [zr, ze, zi, ze]
for d in dd:
data_list.append(d[:, i, j])
self.header = header
self.data = np.vstack(data_list).T
self.z = zr + 1j * zi
self.z_err = ze
def write_datafile(self, wd=None):
"""
write data to file
"""
if wd is not None:
self.working_directory = wd
self.build_data()
# define format list for writing data file
fmt = ['%14.5f'] + ['%12.5e'] * 16
# define file name and save data file
fname_bas = self.edi_object.station.split('_')[0]
self.datafile = fname_bas + '.dat'
fname = os.path.join(self.working_directory, self.datafile)
np.savetxt(fname, self.data, fmt=fmt, header=self.header, comments='')
def read_datafile(self):
"""
read data file into the data object.
calculate resistivity and phase
"""
if self.datafile is None:
default_files = ['ai1dat.dat', 'ai1mod.dat', 'ai1fit.dat',
'inmodel.dat', 'inregulm.dat']
dlst = [i for i in os.listdir(self.working_directory) if
(i[-4:] == '.dat') and (i not in default_files)]
if len(dlst) == 1:
self.datafile = dlst[0]
else:
print("please define datafile")
return
# define path to file
datafpath = os.path.join(self.working_directory, self.datafile)
self.mode = open(datafpath).readline().strip().split()[0]
data = np.loadtxt(datafpath, skiprows=2)
self.freq = 1. / data[:, 0]
if self.mode == 'I':
zr = np.vstack([data[:, i]
for i in range(len(data[0])) if (i - 1) % 4 == 0])
ze = np.vstack([data[:, i]
for i in range(len(data[0])) if (i - 2) % 4 == 0])
zi = -np.vstack([data[:, i]
for i in range(len(data[0])) if (i - 3) % 4 == 0])
z = zr + 1j * zi
self.z = z.T.reshape(len(z[0]), 2, 2)
self.z_err = ze.T.reshape(len(z[0]), 2, 2)
# make a frequency array that has the same shape as z
freq2 = np.zeros(np.shape(self.z))
for i in range(len(freq2)):
freq2[i, :, :] = 1. / data[:, 0][i]
# calculate resistivity
self.resistivity = 0.2 * np.abs(self.z)**2 / freq2
q = np.zeros(np.shape(self.resistivity))
# q[(zr<0)&(zi<0)] = np.pi
# q[(zr<0)&(zi>0)] = -np.pi
phase = np.zeros([len(self.z), 2, 2])
res = np.zeros([len(self.z), 2, 2])
self.resistivity_err = np.zeros([len(self.z), 2, 2])
self.phase_err = np.zeros([len(self.z), 2, 2])
self.q = q
for iz in range(len(self.z)):
for i in range(2):
for j in range(2):
phase[iz, i, j] = np.rad2deg(
cmath.phase(self.z[iz, i, j]))
res[iz, i, j] = 0.2 * \
np.abs(self.z[iz, i, j])**2 / self.freq[iz]
r_err, phi_err = MTcc.z_error2r_phi_error(
np.real(self.z[iz, i, j]),
self.z_err[iz, i, j],
np.imag(self.z[iz, i, j]),
self.z_err[iz, i, j])
self.resistivity_err[iz, i, j] = \
0.4 * np.abs(self.z[iz, i, j]) /\
self.freq[iz] * r_err
self.phase_err[iz, i, j] = phi_err
phase[phase < -180] += 360
self.phase = phase
self.resistivity = res
elif self.mode == 'R':
res = np.vstack([data[:, i]
for i in range(len(data[0])) if (i - 1) % 4 == 0])
self.resistivity = res.T.reshape(len(res[0]), 2, 2)
res_err = np.vstack([data[:, i]
for i in range(len(data[0])) if (i - 2) % 4 == 0])
self.resistivity_err = res_err.T.reshape(len(res_err[0]), 2, 2)
phs = np.vstack([data[:, i]
for i in range(len(data[0])) if (i - 3) % 4 == 0])
self.phase = phs.T.reshape(len(phs[0]), 2, 2)
phs_err = np.vstack([data[:, i]
for i in range(len(data[0])) if (i - 4) % 4 == 0])
self.phase_err = phs_err.T.reshape(len(phs_err[0]), 2, 2)
def rotate(self, rotation_angle):
"""
use mtpy.analysis.geometry to rotate a z array and recalculate res and phase
"""
from . import pek1dclasses as pek1dc
if not hasattr(self, 'z'):
self.read_datafile()
new_z = np.zeros_like(self.z)
new_ze = np.zeros_like(self.z_err, dtype=float)
# for iz,zarray in enumerate(self.z):
new_z, new_ze = MTg.MTz.rotate_z(
self.z, rotation_angle, z_err_array=self.z_err)
self.z = new_z
self.z_err = new_ze
self.resistivity, self.resistivity_err, self.phase, self.phase_err = \
pek1dc._compute_res_phase(self.z, self.z_err, self.freq)
self.rotation_angle = rotation_angle
class Response():
"""
deals with responses from 1d inversions
"""
def __init__(self, wkdir, **input_parameters):
self.working_directory = wkdir
self.respfile = 'ai1dat.dat'
self.misfit_threshold = 1.1
self.station = None
for key in list(input_parameters.keys()):
if hasattr(self, key):
setattr(self, key, input_parameters[key])
self.read_respfile()
def read_respfile(self):
"""
read respfile into a data object
"""
# define path to file
respfpath = os.path.join(self.working_directory, self.respfile)
respf = open(respfpath)
# find out number of models
n = 0
for line in respf.readlines():
if 'REG' in line:
n += 1
# load model responses into an array
resp = np.genfromtxt(respfpath, skiprows=1, invalid_raise=False)
resmod = np.vstack([resp[:, i]
for i in range(len(resp[0])) if (i - 1) % 2 == 0])
phsmod = np.vstack([resp[:, i] for i in range(
len(resp[0])) if i != 0 and (i - 2) % 2 == 0])
period = resp[:len(resp) / n, 0]
self.resistivity = resmod.T.reshape(n, len(resp) / n, 2, 2)
self._phase = phsmod.T.reshape(n, len(resp) / n, 2, 2)
self.freq = 1. / period
zabs = np.zeros((n, len(resp) / n, 2, 2))
for m in range(n):
for f in range(len(self.freq)):
zabs[m, f] = (self.resistivity[m, f] * self.freq[f] / 0.2)**0.5
zr = zabs * np.cos(np.deg2rad(self._phase))
zi = -zabs * np.sin(np.deg2rad(self._phase))
self.z = zr + 1j * zi
self.phase = -self._phase
def rotate(self, rotation_angle):
"""
use mtpy.analysis.geometry to rotate a z array and recalculate res and phase
"""
from . import pek1dclasses as pek1dc
if not hasattr(self, 'z'):
self.read_respfile()
new_z = np.zeros_like(self.z)
z_err = np.zeros_like(self.z, dtype=float)
for iz, zarray in enumerate(self.z):
new_z[iz], ze = MTg.MTz.rotate_z(zarray, rotation_angle)
self.z = new_z
self.resistivity = np.zeros_like(self.z, dtype=float)
self.phase = np.zeros_like(self.z, dtype=float)
for iz in range(len(self.z)):
r, re, p, pe = pek1dc._compute_res_phase(
self.z[iz], z_err[iz], self.freq)
self.resistivity[iz] = r
# self.resistivity_err[iz] = re
self.phase[iz] = p
# self.phase_err[iz] = pe
self.rotation_angle = rotation_angle
class Fit():
"""
deals with outputs from 1d inversions
"""
def __init__(self, wkdir, **input_parameters):
self.working_directory = wkdir
self.fitfile = 'ai1fit.dat'
self.respfile = 'ai1dat.dat'
self.misfit_threshold = 1.1
self.station = None
for key in list(input_parameters.keys()):
if hasattr(self, key):
setattr(self, key, input_parameters[key])
self.read_fit()
def find_nperiods(self):
"""
find number of periods used in inversion
"""
# find out number of periods
respfpath = os.path.join(self.working_directory, self.respfile)
respf = open(respfpath)
respf.readline()
n = 0
line = respf.readline()
while 'REG' not in line:
line = respf.readline()
n += 1
self.n_periods = n - 1
def read_fit(self):
"""
read fit file to give structure and anisotropy penalties and penalty weights
"""
# load the file with fit values in it
fit = np.loadtxt(os.path.join(self.working_directory, self.fitfile))
# print os.path.join(self.working_directory,self.fitfile)
# print np.shape(fit)
# find number of periods
self.find_nperiods()
# total misfit
self.misfit_mean = (fit[:, 5] / (self.n_periods * 8.))**0.5
# structure and anisotropy penalty
self.penalty_structure = fit[:, 6]
self.penalty_anisotropy = fit[:, 7]
self.weight_structure = fit[:, 2]
self.weight_anisotropy = fit[:, 4]
self.modelno = fit[:, 0]
self.fit = fit
def find_bestmodel(self):
"""
find the smoothest model that fits the data within self.misfit_threshold
"""
self.read_fit()
fit = self.fit
# define parameters
mis = self.misfit_mean
s = self.penalty_structure / np.median(self.penalty_structure)
a = self.penalty_anisotropy / np.median(self.penalty_anisotropy)
# define function to minimise
f = a * s * np.abs(a - s) / (a + s)
# define the parameters relating to the best model
self.params_bestmodel = fit[
f == min(f[mis < min(mis) * self.misfit_threshold])][0]
self.params_fittingmodels = fit[mis < min(mis) * self.misfit_threshold]
class Model():
"""
deals with outputs from 1d inversions
"""
def __init__(self, wkdir, **input_parameters):
self.working_directory = wkdir
self.modelfile = 'ai1mod.dat'
self.respfile = 'ai1dat.dat'
self.fitfile = 'ai1fit.dat'
self.inmodelfile = 'inmodel.dat'
self.datafile = None
self.modelno = 1
self.models = None
self.misfit_threshold = 1.1
self.station = None
self.Fit = None
self.Resp = None
self.Data = None
self.x = 0.
self.y = 0.
self.input_parameters = input_parameters
for key in list(input_parameters.keys()):
if hasattr(self, key):
setattr(self, key, input_parameters[key])
if self.station is None:
self.station = os.path.basename(
self.working_directory).split('_')[0]
self.read_model()
self.read_fit()
self.read_response()
self.read_datafile()
self._calculate_fit_vs_freq()
def read_model(self):
"""
read all models into an array
"""
fpath = os.path.join(self.working_directory, self.modelfile)
# print fpath
nlayers = 0
flag = True
modelf = open(fpath)
modelf.readline()
while flag:
try:
nlayers = int(modelf.readline().strip().split()[0])
except:
flag = False
models = np.genfromtxt(fpath, skiprows=1, invalid_raise=False)
self.models = models.reshape(
0.5 * len(models) / nlayers, 2 * nlayers, 5)
def read_fit(self):
if self.Fit is None:
self.Fit = Fit(self.working_directory, **self.input_parameters)
def read_response(self):
if self.Resp is None:
self.Resp = Response(self.working_directory,
**self.input_parameters)
def read_datafile(self):
if self.Data is None:
self.Data = Data(working_directory=self.working_directory,
**self.input_parameters)
self.Data.read_datafile()
def _calculate_fit_vs_freq(self):
misfit_real = ((np.real(
self.Resp.z[self.modelno - 1]) - np.real(self.Data.z)) / self.Data.z_err)**2
misfit_imag = ((np.imag(
self.Resp.z[self.modelno - 1]) - np.imag(self.Data.z)) / self.Data.z_err)**2
self.Fit.misfit = misfit_real + 1j * misfit_imag
def check_consistent_strike(self, depth,
window=5,
threshold=15.):
"""
check if a particular depth point corresponds to a consistent
strike direction
"""
if self.models is None:
self.read_model()
# get model of interest
model = self.models[self.modelno - 1]
#
depths = model[:, 1]
closest_depth = depths[
np.abs(depths - depth) == np.amin(np.abs(depths - depth))][0]
cdi = list(depths).index(closest_depth)
i1 = max(0, cdi - int(window / 2) * 2 - 1)
i2 = min(len(model) - 2, cdi + int(window / 2) * 2 + 1)
strikes = model[:, -1][i1:i2]
return np.std(strikes) < threshold
def find_max_anisotropy(self, min_depth=0.,
max_depth=None,
strike_window=5,
strike_threshold=10.):
"""
find the point of maximum anisotropy in a model result within a given
depth range. Check that the strike is stable below defined threshold
"""
if self.models is None:
self.read_model()
print(self.station)
# get model of interest
model = self.models[self.modelno - 1]
if max_depth is None:
max_depth = np.amax(model[:, 1])
# get values only between min and max depth
model_filt = model[(model[:, 1] > min_depth) &
(model[:, 1] < max_depth)]
aniso = 1. * model_filt[:, 3] / model_filt[:, 2]
aniso_max = np.amax(aniso)
# define an initial aniso max depth
depth_aniso_max = model_filt[:, 1][aniso == aniso_max][0]
i = 0
while not self.check_consistent_strike(depth_aniso_max,
window=strike_window,
threshold=strike_threshold):
aniso[aniso == aniso_max] = 1.
aniso_max = np.amax(aniso)
depth_aniso_max = model_filt[:, 1][aniso == aniso_max][0]
i += 1
if i > len(model_filt):
print("can't get stable strike")
break
params = model_filt[aniso == aniso_max][0]
# params[-1] = params[-1]%180
self.anisotropy_max_parameters = params
def update_location_from_file(self, xyfile, indices=[0, 999]):
"""
updates x and y location from an xy file with format
station x y
can give indices to search on if the station name in the file
is not exactly the same as defined in the model.
"""
return
class Model_suite():
"""
"""
def __init__(self,
working_directory,
**input_parameters):
self.working_directory = working_directory
self.model_list = []
self.inmodel_list = []
self.modelfile = 'ai1mod.dat'
self.respfile = 'ai1dat.dat'
self.fitfile = 'ai1fit.dat'
self.inmodelfile = 'inmodel.dat'
self.rotation_angle = 0
self.modelno = 1
self.station_list = []
self.station_listfile = None
self.station_search_indices = [0, 999]
self.station_xyfile = None
self.anisotropy_surface_file = 'model%03i_aniso_depth.dat'
for key in list(input_parameters.keys()):
setattr(self, key, input_parameters[key])
if self.station_listfile is not None:
try:
self.station_list = [i.strip() for i in open(
self.station_listfile).readlines()]
except:
print("can't open station list file")
if self.model_list == []:
self.inmodel_list = []
wd = self.working_directory
folder_list = [os.path.join(wd, f) for f in os.listdir(
wd) if os.path.isdir(os.path.join(wd, f))]
if len(self.station_list) > 0:
i1, i2 = self.station_search_indices
folder_list2 = []
for s in self.station_list:
for ff in folder_list:
if str.lower(os.path.basename(ff).split('_')[0][i1:i2]) == str.lower(s):
folder_list2.append(ff)
# print s
folder_list = folder_list2
for folder in folder_list:
try:
model = Model(folder)
model.read_model()
self.model_list.append(model)
except IOError:
print(folder, "model file not found")
try:
inmodel = Inmodel(working_directory=folder)
inmodel.read_inmodel()
self.inmodel_list.append(inmodel)
except IOError:
print("inmodel file not found")
if self.station_xyfile is not None:
self.update_multiple_locations_from_file()
def get_aniso_peak_depth(self,
min_depth=0,
max_depth=None,
strike_threshold=10.,
strike_window=5):
"""
get the min and max resistivities, depth and strike at point of maximum
anisotropy between min and max depth.
min and max depth can be float, integer or numpy array
the depth is only selected if the strike is stable within parameters
given by strike threshold and strike window.
"""
model_params = np.zeros([len(self.model_list), 6])
if type(min_depth) in [float, int]:
min_depth = np.zeros(len(self.model_list)) + min_depth
if type(max_depth) in [float, int]:
max_depth = np.zeros(len(self.model_list)) + max_depth
for i, model in enumerate(self.model_list):
model.modelno = self.modelno
model.find_max_anisotropy(min_depth=min_depth[i],
max_depth=max_depth[i],
strike_window=strike_window,
strike_threshold=strike_threshold)
x, y = model.x, model.y
depth, te, tm, strike = model.anisotropy_max_parameters[1:]
strike = strike + self.rotation_angle
model_params[i] = x, y, depth, te, tm, strike
self.anisotropy_max_parameters = model_params
if '%' in self.anisotropy_surface_file:
self.anisotropy_surface_file = self.anisotropy_surface_file % self.modelno
np.savetxt(os.path.join(self.working_directory,
self.anisotropy_surface_file),
model_params,
header=' '.join(
['x', 'y', 'z', 'resmin', 'resmax', 'strike']),
fmt=['%14.6f', '%14.6f', '%8.2f', '%8.2f', '%8.2f', '%8.2f'])
def update_multiple_locations_from_file(self):
"""
updates multiple x and y locations from an xy file with format
station x y
can give indices to search on if the station name in the file
is not exactly the same as defined in the model.
"""
xy = {}
i1, i2 = self.station_search_indices
for line in open(self.station_xyfile):
line = line.strip().split()
xy[str.lower(line[0])] = [float(line[1]), float(line[2])]
for model in self.model_list:
model.x, model.y = xy[str.lower(model.station[i1:i2])]
self.x = np.array([m.x for m in self.model_list])
self.y = np.array([m.y for m in self.model_list])
def get_median_misfit(self):
"""
"""
n = len(self.model_list)
model_misfits = np.zeros(n)
for m, model in enumerate(self.model_list):
fit = Fit(model.working_directory,
fitfile=self.fitfile,
respfile=self.respfile)
fit.read_fit()
model_misfits[m] = fit.misfit[self.modelno - 1]
self.model_misfits = model_misfits
self.median_misfit = np.median(model_misfits)
def _compute_res_phase(z, z_err, freq):
"""
calculates *resistivity*, *phase*, *resistivity_err*, *phase_err*
values for resistivity are in in Ohm m and phase in degrees.
"""
resistivity_err = np.zeros_like(z_err)
phase_err = np.zeros_like(z_err)
resistivity = np.zeros_like(z, dtype='float')
phase = np.zeros_like(z, dtype='float')
# calculate resistivity and phase
for idx_f in range(len(z)):
for i in range(2):
for j in range(2):
resistivity[idx_f, i, j] = np.abs(z[idx_f, i, j])**2 /\
freq[idx_f] * 0.2
phase[idx_f, i, j] = math.degrees(cmath.phase(
z[idx_f, i, j]))
if z_err is not None:
r_err, phi_err = MTcc.z_error2r_phi_error(
np.real(z[idx_f, i, j]),
z_err[idx_f, i, j],
np.imag(z[idx_f, i, j]),
z_err[idx_f, i, j])
resistivity_err[idx_f, i, j] = \
0.4 * np.abs(z[idx_f, i, j]) /\
freq[idx_f] * r_err
phase_err[idx_f, i, j] = phi_err
return resistivity, resistivity_err, phase, phase_err
|
MTgeophysics/mtpy
|
mtpy/modeling/pek1dclasses.py
|
Python
|
gpl-3.0
| 30,903
|
from django.core.urlresolvers import reverse
from tests.test_utils import create_generic_job
from treeherder.model.models import JobLog
def test_get_job_log_urls(test_repository, result_set_stored,
failure_classifications,
generic_reference_data, webapp):
job1 = create_generic_job('1234', test_repository, 1, 1,
generic_reference_data)
job2 = create_generic_job('5678', test_repository, 1, 2,
generic_reference_data)
JobLog.objects.create(job=job1,
name='test_log_1',
url='http://google.com',
status=JobLog.PENDING)
JobLog.objects.create(job=job1,
name='test_log_2',
url='http://yahoo.com',
status=JobLog.PARSED)
JobLog.objects.create(job=job2,
name='test_log_3',
url='http://yahoo.com',
status=JobLog.PARSED)
resp = webapp.get(reverse('job-log-url-list',
kwargs={"project": test_repository.name}) +
'?job_id=1')
assert resp.status_int == 200
assert len(resp.json) == 2
resp = webapp.get(reverse('job-log-url-list',
kwargs={"project": test_repository.name}) +
'?job_id=1&job_id=2')
assert resp.status_int == 200
assert len(resp.json) == 3
|
kapy2010/treeherder
|
tests/webapp/api/test_job_log_url_api.py
|
Python
|
mpl-2.0
| 1,524
|
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import
import HTMLParser
import StringIO
import ast
import base64
import cgi
from collections import Mapping
import datetime
from decimal import Decimal
import gzip
import hashlib
from io import BytesIO
import json
import re
from tempfile import TemporaryFile
from pyLibrary import strings
from pyLibrary.dot import wrap, wrap_leaves, unwrap, unwraplist, split_field, join_field, coalesce
from pyLibrary.collections.multiset import Multiset
from pyLibrary.debugs.logs import Log, Except
from pyLibrary.env.big_data import FileString, safe_size
from pyLibrary.jsons import quote
from pyLibrary.jsons.encoder import json_encoder, pypy_json_encode
from pyLibrary.strings import expand_template
from pyLibrary.times.dates import Date
"""
DUE TO MY POOR MEMORY, THIS IS A LIST OF ALL CONVERSION ROUTINES
IN <from_type> "2" <to_type> FORMAT
"""
def value2json(obj, pretty=False):
try:
json = json_encoder(obj, pretty=pretty)
if json == None:
Log.note(str(type(obj)) + " is not valid{{type}}JSON", type= " (pretty) " if pretty else " ")
Log.error("Not valid JSON: " + str(obj) + " of type " + str(type(obj)))
return json
except Exception, e:
e = Except.wrap(e)
try:
json = pypy_json_encode(obj)
return json
except Exception:
pass
Log.error("Can not encode into JSON: {{value}}", value=repr(obj), cause=e)
def remove_line_comment(line):
mode = 0 # 0=code, 1=inside_string, 2=escaping
for i, c in enumerate(line):
if c == '"':
if mode == 0:
mode = 1
elif mode == 1:
mode = 0
else:
mode = 1
elif c == '\\':
if mode == 0:
mode = 0
elif mode == 1:
mode = 2
else:
mode = 1
elif mode == 2:
mode = 1
elif c == "#" and mode == 0:
return line[0:i]
elif c == "/" and mode == 0 and line[i + 1] == "/":
return line[0:i]
return line
def json2value(json_string, params={}, flexible=False, leaves=False):
"""
:param json_string: THE JSON
:param params: STANDARD JSON PARAMS
:param flexible: REMOVE COMMENTS
:param leaves: ASSUME JSON KEYS ARE DOT-DELIMITED
:return: Python value
"""
if isinstance(json_string, str):
Log.error("only unicode json accepted")
try:
if flexible:
# REMOVE """COMMENTS""", # COMMENTS, //COMMENTS, AND \n \r
# DERIVED FROM https://github.com/jeads/datasource/blob/master/datasource/bases/BaseHub.py# L58
json_string = re.sub(r"\"\"\".*?\"\"\"", r"\n", json_string, flags=re.MULTILINE)
json_string = "\n".join(remove_line_comment(l) for l in json_string.split("\n"))
# ALLOW DICTIONARY'S NAME:VALUE LIST TO END WITH COMMA
json_string = re.sub(r",\s*\}", r"}", json_string)
# ALLOW LISTS TO END WITH COMMA
json_string = re.sub(r",\s*\]", r"]", json_string)
if params:
json_string = expand_template(json_string, params)
# LOOKUP REFERENCES
value = wrap(json_decoder(json_string))
if leaves:
value = wrap_leaves(value)
return value
except Exception, e:
e = Except.wrap(e)
if "Expecting '" in e and "' delimiter: line" in e:
line_index = int(strings.between(e.message, " line ", " column ")) - 1
column = int(strings.between(e.message, " column ", " ")) - 1
line = json_string.split("\n")[line_index].replace("\t", " ")
if column > 20:
sample = "..." + line[column - 20:]
pointer = " " + (" " * 20) + "^"
else:
sample = line
pointer = (" " * column) + "^"
if len(sample) > 43:
sample = sample[:43] + "..."
Log.error("Can not decode JSON at:\n\t" + sample + "\n\t" + pointer + "\n")
base_str = unicode2utf8(strings.limit(json_string, 1000))
hexx_str = bytes2hex(base_str, " ")
try:
char_str = " " + (" ".join(c.decode("latin1") if ord(c) >= 32 else ".") for c in base_str)
except Exception:
char_str = " "
Log.error("Can not decode JSON:\n" + char_str + "\n" + hexx_str + "\n", e)
def string2datetime(value, format=None):
return Date(value, format).value
def str2datetime(value, format=None):
return string2datetime(value, format)
def datetime2string(value, format="%Y-%m-%d %H:%M:%S"):
return Date(value).format(format=format)
def datetime2str(value, format="%Y-%m-%d %H:%M:%S"):
return Date(value).format(format=format)
def datetime2unix(d):
try:
if d == None:
return None
elif isinstance(d, datetime.datetime):
epoch = datetime.datetime(1970, 1, 1)
elif isinstance(d, datetime.date):
epoch = datetime.date(1970, 1, 1)
else:
Log.error("Can not convert {{value}} of type {{type}}", value= d, type= d.__class__)
diff = d - epoch
return Decimal(long(diff.total_seconds() * 1000000)) / 1000000
except Exception, e:
Log.error("Can not convert {{value}}", value= d, cause=e)
def datetime2milli(d):
return datetime2unix(d) * 1000
def timedelta2milli(v):
return v.total_seconds()
def unix2datetime(u):
try:
if u == None:
return None
if u == 9999999999: # PYPY BUG https://bugs.pypy.org/issue1697
return datetime.datetime(2286, 11, 20, 17, 46, 39)
return datetime.datetime.utcfromtimestamp(u)
except Exception, e:
Log.error("Can not convert {{value}} to datetime", value= u, cause=e)
def milli2datetime(u):
if u == None:
return None
return unix2datetime(u / 1000.0)
def dict2Multiset(dic):
if dic == None:
return None
output = Multiset()
output.dic = unwrap(dic).copy()
return output
def multiset2dict(value):
"""
CONVERT MULTISET TO dict THAT MAPS KEYS TO MAPS KEYS TO KEY-COUNT
"""
if value == None:
return None
return dict(value.dic)
def table2list(
column_names, # tuple of columns names
rows # list of tuples
):
return wrap([dict(zip(column_names, r)) for r in rows])
def table2tab(
column_names, # tuple of columns names
rows # list of tuples
):
def row(r):
return "\t".join(map(value2json, r))
return row(column_names)+"\n"+("\n".join(row(r) for r in rows))
def list2tab(rows):
columns = set()
for r in wrap(rows):
columns |= set(k for k, v in r.leaves())
keys = list(columns)
output = []
for r in wrap(rows):
output.append("\t".join(value2json(r[k]) for k in keys))
return "\t".join(keys) + "\n" + "\n".join(output)
def list2table(rows, column_names=None):
if column_names:
keys = list(set(column_names))
else:
columns = set()
for r in rows:
columns |= set(r.keys())
keys = list(columns)
output = [[unwraplist(r[k]) for k in keys] for r in rows]
return wrap({
"meta": {"format": "table"},
"header": keys,
"data": output
})
def list2cube(rows, column_names=None):
if column_names:
keys = column_names
else:
columns = set()
for r in rows:
columns |= set(r.keys())
keys = list(columns)
data = {k: [] for k in keys}
output = wrap({
"meta": {"format": "cube"},
"edges": [
{
"name": "rownum",
"domain": {"type": "rownum", "min": 0, "max": len(rows), "interval": 1}
}
],
"data": data
})
for r in rows:
for k in keys:
data[k].append(r[k])
return output
def value2string(value):
# PROPER NULL HANDLING
if value == None:
return None
return unicode(value)
def value2quote(value):
# RETURN PRETTY PYTHON CODE FOR THE SAME
if isinstance(value, basestring):
return string2quote(value)
else:
return repr(value)
def string2quote(value):
if value == None:
return "None"
return quote(value)
string2regexp = re.escape
def string2url(value):
if isinstance(value, unicode):
return "".join([_map2url[c] for c in unicode2latin1(value)])
elif isinstance(value, str):
return "".join([_map2url[c] for c in value])
else:
Log.error("Expecting a string")
def value2url(value):
if value == None:
Log.error("")
if isinstance(value, Mapping):
output = "&".join([value2url(k) + "=" + (value2url(v) if isinstance(v, basestring) else value2url(value2json(v))) for k,v in value.items()])
elif isinstance(value, unicode):
output = "".join([_map2url[c] for c in unicode2latin1(value)])
elif isinstance(value, str):
output = "".join([_map2url[c] for c in value])
elif hasattr(value, "__iter__"):
output = ",".join(value2url(v) for v in value)
else:
output = unicode(value)
return output
def url_param2value(param):
"""
CONVERT URL QUERY PARAMETERS INTO DICT
"""
if isinstance(param, unicode):
param = param.encode("ascii")
def _decode(v):
output = []
i = 0
while i < len(v):
c = v[i]
if c == "%":
d = hex2bytes(v[i + 1:i + 3])
output.append(d)
i += 3
else:
output.append(c)
i += 1
output = (b"".join(output)).decode("latin1")
try:
return json2value(output)
except Exception:
pass
return output
query = {}
for p in param.split(b'&'):
if not p:
continue
if p.find(b"=") == -1:
k = p
v = True
else:
k, v = p.split(b"=")
v = _decode(v)
u = query.get(k)
if u is None:
query[k] = v
elif isinstance(u, list):
u += [v]
else:
query[k] = [u, v]
return query
def html2unicode(value):
# http://stackoverflow.com/questions/57708/convert-xml-html-entities-into-unicode-string-in-python
return HTMLParser.HTMLParser().unescape(value)
def unicode2html(value):
return cgi.escape(value)
def unicode2latin1(value):
output = value.encode("latin1")
return output
def quote2string(value):
try:
return ast.literal_eval(value)
except Exception:
pass
# RETURN PYTHON CODE FOR THE SAME
def value2code(value):
return repr(value)
def DataFrame2string(df, columns=None):
output = StringIO.StringIO()
try:
df.to_csv(output, sep="\t", header=True, cols=columns, engine='python')
return output.getvalue()
finally:
output.close()
def ascii2char(ascii):
return chr(ascii)
def char2ascii(char):
return ord(char)
def ascii2unicode(value):
return value.decode("latin1")
def latin12hex(value):
return value.encode("hex")
def int2hex(value, size):
return (("0" * size) + hex(value)[2:])[-size:]
def hex2bytes(value):
return value.decode("hex")
def bytes2hex(value, separator=" "):
return separator.join("%02X" % ord(x) for x in value)
def base642bytearray(value):
return bytearray(base64.b64decode(value))
def base642bytes(value):
return base64.b64decode(value)
def bytes2base64(value):
return base64.b64encode(value).decode("utf8")
def bytes2sha1(value):
if isinstance(value, unicode):
Log.error("can not convert unicode to sha1")
sha = hashlib.sha1(value)
return sha.hexdigest()
def value2intlist(value):
if value == None:
return None
elif hasattr(value, '__iter__'):
output = [int(d) for d in value if d != "" and d != None]
return output
elif value.strip() == "":
return None
else:
return [int(value)]
def value2int(value):
if value == None:
return None
else:
return int(value)
def value2number(v):
try:
if isinstance(v, float) and round(v, 0) != v:
return v
# IF LOOKS LIKE AN INT, RETURN AN INT
return int(v)
except Exception:
try:
return float(v)
except Exception, e:
Log.error("Not a number ({{value}})", value= v, cause=e)
def utf82unicode(value):
return value.decode('utf8')
def unicode2utf8(value):
return value.encode('utf8')
def latin12unicode(value):
if isinstance(value, unicode):
Log.error("can not convert unicode from latin1")
try:
return unicode(value.decode('iso-8859-1'))
except Exception, e:
Log.error("Can not convert {{value|quote}} to unicode", value=value)
def pipe2value(value):
type = value[0]
if type == '0':
return None
if type == 'n':
return value2number(value[1::])
if type != 's' and type != 'a':
Log.error("unknown pipe type ({{type}}) in {{value}}", type= type, value= value)
# EXPECTING MOST STRINGS TO NOT HAVE ESCAPED CHARS
output = _unPipe(value)
if type == 's':
return output
return [pipe2value(v) for v in output.split("|")]
def zip2bytes(compressed):
"""
UNZIP DATA
"""
if hasattr(compressed, "read"):
return gzip.GzipFile(fileobj=compressed, mode='r')
buff = BytesIO(compressed)
archive = gzip.GzipFile(fileobj=buff, mode='r')
return safe_size(archive)
def bytes2zip(bytes):
"""
RETURN COMPRESSED BYTES
"""
if hasattr(bytes, "read"):
buff = TemporaryFile()
archive = gzip.GzipFile(fileobj=buff, mode='w')
for b in bytes:
archive.write(b)
archive.close()
buff.seek(0)
return FileString(buff)
buff = BytesIO()
archive = gzip.GzipFile(fileobj=buff, mode='w')
archive.write(bytes)
archive.close()
return buff.getvalue()
def ini2value(ini_content):
"""
INI FILE CONTENT TO Dict
"""
from ConfigParser import ConfigParser
buff = StringIO.StringIO(ini_content)
config = ConfigParser()
config._read(buff, "dummy")
output = {}
for section in config.sections():
output[section]=s = {}
for k, v in config.items(section):
s[k]=v
return wrap(output)
_map2url = {chr(i): latin12unicode(chr(i)) for i in range(32, 256)}
for c in " {}<>;/?:@&=+$,":
_map2url[c] = "%" + int2hex(ord(c), 2)
def _unPipe(value):
s = value.find("\\", 1)
if s < 0:
return value[1::]
result = ""
e = 1
while True:
c = value[s + 1]
if c == 'p':
result = result + value[e:s] + '|'
s += 2
e = s
elif c == '\\':
result = result + value[e:s] + '\\'
s += 2
e = s
else:
s += 1
s = value.find("\\", s)
if s < 0:
break
return result + value[e::]
json_decoder = json.JSONDecoder().decode
def json_schema_to_markdown(schema):
from pyLibrary.queries import qb
def _md_code(code):
return "`"+code+"`"
def _md_italic(value):
return "*"+value+"*"
def _inner(schema, parent_name, indent):
more_lines = []
for k,v in schema.items():
full_name = join_field(split_field(parent_name)+[k])
details = indent+"* "+_md_code(full_name)
if v.type:
details += " - "+_md_italic(v.type)
else:
Log.error("{{full_name}} is missing type", full_name=full_name)
if v.description:
details += " " + v.description
more_lines.append(details)
if v.type in ["object", "array", "nested"]:
more_lines.extend(_inner(v.properties, full_name, indent+" "))
return more_lines
lines = []
if schema.title:
lines.append("#"+schema.title)
lines.append(schema.description)
lines.append("")
for k, v in qb.sort(schema.properties.items(), 0):
full_name = k
if v.type in ["object", "array", "nested"]:
lines.append("##"+_md_code(full_name)+" Property")
if v.description:
lines.append(v.description)
lines.append("")
if v.type in ["object", "array", "nested"]:
lines.extend(_inner(v.properties, full_name, " "))
else:
lines.append("##"+_md_code(full_name)+" ("+v.type+")")
if v.description:
lines.append(v.description)
return "\n".join(lines)
|
klahnakoski/MoDevETL
|
pyLibrary/convert.py
|
Python
|
mpl-2.0
| 17,324
|
#!/usr/bin/env python3
# coding:utf-8
from __future__ import print_function
'PackagesHTTPD - stream folder content as .tar over http'
__author__ = 'Mathias Gumz <mgumz@tpip.net>'
__license__ = 'MPL2'
__version__ = ''
import sys
import os, os.path
import zipfile, tarfile
from StringIO import StringIO
import cgi
try:
from http.server import SimpleHTTPRequestHandler, HTTPServer
except ImportError: # assume py2
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class PackagesHTTPD(SimpleHTTPRequestHandler):
'''
httpd-server to stream the contents of a given folder as
/packages.tar if /packages.tar is accessed. otherwise
it acts just like SimpleHTTPRequestHandler
'''
def do_GET(self):
'''
/packages.tar - serve the contents of the folder referenced in
self.server.packages as a streamd .tar file
/packages/* - serve the files of the folder referenced in
self.server.packages (chrooting into it)
/* - serve the files of the folder referenced in
self.server.chroot
'''
if self.path == '/packages.tar':
self._serve_folder_as_tar(self.server.packages)
return
SimpleHTTPRequestHandler.do_GET(self)
def list_directory(self, path):
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
if path == self.server.chroot:
list.append("packages/")
list.append("packages.tar")
list.sort(lambda a, b: cmp(a.lower(), b.lower()))
f = StringIO()
f.write("<title>Directory listing for %s</title>\n" % self.path)
f.write("<h2>Directory listing for %s</h2>\n" % self.path)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name = cgi.escape(name)
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n' % (linkname, displayname))
f.write("</ul>\n<hr>\n")
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
return f
def translate_path(self, path):
'''
translates 'path' (the path-part of an uri) to a file-system based
path.
we assume self.server.folder to be the standard chroot-folder. if
the user tries to access /packages, the self.server.packages folder
is used as the chroot
'''
chroot = self.server.chroot
if path.find('/packages/') == 0:
chroot = self.server.packages
_, path = path.split('/packages/', 1)
if not os.path.isabs(chroot):
chroot = os.path.abspath(chroot)
result = SimpleHTTPRequestHandler.translate_path(self, path)
_, result = result.split(os.getcwd(), 1)
if len(result) > 0 and result[0] == '/':
result = result[1:]
result = os.path.join(chroot, result)
return result
def _serve_folder_as_tar(self, folder):
tfile = tarfile.open(name='packages.tar', mode='w|', fileobj=self.wfile)
self.send_response(200)
self.send_header('Content-type', 'application/x-tar')
self.end_headers()
tfile.add(folder, arcname='packages')
tfile.close()
def _serve_zip_entry(self, name):
try:
entry = self.server.zipfile.open(name, 'r')
except KeyError:
self.send_response(404)
self.end_headers()
return
@staticmethod
def _create_zipfile(zname, zdir):
zfile = zipfile.ZipFile(zname, 'w', zipfile.ZIP_STORED, True)
for root, dirs, files in os.walk(zdir):
for f in files:
fname = os.path.join(root, f)
zfile.write(fname)
zfile.close()
if __name__ == '__main__':
def main():
if len(sys.argv) < 4:
print('usage: %s <port> <chroot> <packages_chroot>' % __file__)
return
port, chroot, packages_chroot = int(sys.argv[1]), sys.argv[2], sys.argv[3]
server_class = HTTPServer
httpd = server_class(('', port), PackagesHTTPD)
httpd.chroot = chroot
httpd.packages = packages_chroot
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
main()
|
pokymobo/redomat
|
libredo/data/result_httpd.py
|
Python
|
mpl-2.0
| 4,908
|
# coding: utf-8
'''Common test fixtures
@author: Jesse Schwartzentruber (:truber)
@license:
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''
import logging
import os
import shutil
import subprocess
import tempfile
import pytest
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from covmanager.models import Collection, CollectionFile, Repository
from crashmanager.models import Client, Tool, User as cmUser
LOG = logging.getLogger("fm.covmanager.tests")
def _check_git():
try:
proc = subprocess.Popen(["git"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()
if output and proc.wait() == 1:
return True
except OSError: # FileNotFoundError
pass
return False
def _check_hg():
try:
proc = subprocess.Popen(["hg"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()
if output and proc.wait() == 0:
return True
except OSError: # FileNotFoundError
pass
return False
HAVE_GIT = _check_git()
HAVE_HG = _check_hg()
@pytest.fixture
def covmanager_test(db): # pylint: disable=invalid-name,unused-argument
"""Common setup/teardown tasks for all server unittests"""
user = User.objects.create_user('test', 'test@mozilla.com', 'test')
user.user_permissions.clear()
content_type = ContentType.objects.get_for_model(cmUser)
perm = Permission.objects.get(content_type=content_type, codename='view_covmanager')
user.user_permissions.add(perm)
user_np = User.objects.create_user('test-noperm', 'test@mozilla.com', 'test')
user_np.user_permissions.clear()
@pytest.fixture
def cm(request, settings, tmpdir):
class _result(object):
have_git = HAVE_GIT
have_hg = HAVE_HG
@classmethod
def create_repository(cls, repotype, name="testrepo"):
location = tempfile.mkdtemp(prefix='testrepo', dir=os.path.dirname(__file__))
request.addfinalizer(lambda: shutil.rmtree(location))
if repotype == "git":
if not HAVE_GIT:
pytest.skip("git is not available")
classname = "GITSourceCodeProvider"
elif repotype == "hg":
if not HAVE_HG:
pytest.skip("hg is not available")
classname = "HGSourceCodeProvider"
else:
raise Exception("unknown repository type: %s (expecting git or hg)" % repotype)
result = Repository.objects.create(classname=classname, name=name, location=location)
LOG.debug("Created Repository pk=%d", result.pk)
if repotype == "git":
cls.git(result, "init")
elif repotype == "hg":
cls.hg(result, "init")
return result
@staticmethod
def create_collection_file(data):
# Use a specific temporary directory to upload covmanager files
# This is required as Django now needs a path relative to that folder in FileField
location = str(tmpdir)
CollectionFile.file.field.storage.location = location
tmp_fd, path = tempfile.mkstemp(suffix=".data", dir=location)
os.close(tmp_fd)
with open(path, "w") as fp:
fp.write(data)
result = CollectionFile.objects.create(file=os.path.basename(path))
LOG.debug("Created CollectionFile pk=%d", result.pk)
return result
@classmethod
def create_collection(cls,
created=None,
description="",
repository=None,
revision="",
branch="",
tools=("testtool",),
client="testclient",
coverage='{"linesTotal":0,'
'"name":null,'
'"coveragePercent":0.0,'
'"children":{},'
'"linesMissed":0,'
'"linesCovered":0}'):
# create collectionfile
coverage = cls.create_collection_file(coverage)
# create client
client, created = Client.objects.get_or_create(name=client)
if created:
LOG.debug("Created Client pk=%d", client.pk)
# create repository
if repository is None:
repository = cls.create_repository("git")
result = Collection.objects.create(description=description,
repository=repository,
revision=revision,
branch=branch,
client=client,
coverage=coverage)
LOG.debug("Created Collection pk=%d", result.pk)
# create tools
for tool in tools:
tool, created = Tool.objects.get_or_create(name=tool)
if created:
LOG.debug("Created Tool pk=%d", tool.pk)
result.tools.add(tool)
return result
@staticmethod
def git(repo, *args):
path = os.getcwd()
try:
os.chdir(repo.location)
return subprocess.check_output(["git"] + list(args)).decode("utf-8")
finally:
os.chdir(path)
@staticmethod
def hg(repo, *args):
path = os.getcwd()
try:
os.chdir(repo.location)
return subprocess.check_output(["hg"] + list(args)).decode("utf-8")
finally:
os.chdir(path)
return _result()
|
MozillaSecurity/FuzzManager
|
server/covmanager/tests/conftest.py
|
Python
|
mpl-2.0
| 6,179
|
#!/usr/bin/python
# vim:fileencoding=utf8
from __future__ import unicode_literals
import unittest
class TestUDPipe(unittest.TestCase):
def test_model(self):
import ufal.udpipe
model = ufal.udpipe.Model.load('test/data/test.model')
self.assertTrue(model)
tokenizer = model.newTokenizer(model.DEFAULT)
conlluOutput = ufal.udpipe.OutputFormat.newOutputFormat("conllu")
sentence = ufal.udpipe.Sentence()
error = ufal.udpipe.ProcessingError();
tokenizer.setText("Znamená to, že realitě nepodléhá. ");
self.assertTrue(tokenizer.nextSentence(sentence, error))
self.assertFalse(error.occurred())
self.assertTrue(model.tag(sentence, model.DEFAULT))
self.assertTrue(model.parse(sentence, model.DEFAULT))
self.assertEqual(conlluOutput.writeSentence(sentence), """\
# newdoc
# newpar
# sent_id = 1
# text = Znamená to, že realitě nepodléhá.
1 Znamená znamenat VERB VB-S---3P-AA--- Aspect=Imp|Mood=Ind|Negative=Pos|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin|Voice=Act 0 root _ _
2 to ten PRON PDNS1---------- Case=Nom|Gender=Neut|Number=Sing|PronType=Dem 1 nsubj _ SpaceAfter=No
3 , , PUNCT Z:------------- _ 6 punct _ _
4 že že SCONJ J,------------- _ 6 mark _ _
5 realitě realita NOUN NNFS3-----A---- Case=Dat|Gender=Fem|Negative=Pos|Number=Sing 6 dobj _ _
6 nepodléhá podléhat VERB VB-S---3P-NA--- Aspect=Imp|Mood=Ind|Negative=Neg|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin|Voice=Act 1 ccomp _ SpaceAfter=No
7 . . PUNCT Z:------------- _ 1 punct _ _
""")
self.assertFalse(tokenizer.nextSentence(sentence))
if __name__ == '__main__':
unittest.main()
|
ufal/udpipe
|
releases/pypi/test/test_udpipe.py
|
Python
|
mpl-2.0
| 1,690
|
#!/usr/bin/env python
'''
Set of analytics based on ssdeep hash.
- compare
Simple implementation of ssdeep comparisions using a few optimizations
described at the links below
https://www.virusbulletin.com/virusbulletin/2015/11/optimizing-ssdeep-use-scale
http://www.intezer.com/intezer-community-tip-ssdeep-comparisons-with-elasticsearch/
Designed to be run on a regular basis (e.g., nightly).
For each sample that has not run ssdeep analytic, search for samples where
ssdeep.compare > 0 based on chunksize, chunk 7grams, and double-chunk
7grams. Update sample with any matches and mark ssdeep analytic as having
run.
- group
Returns SHA256 hashes of samples grouped based on ssdeep hash.
'''
import argparse
import configparser
import json
import os
import sys
from pprint import pprint
import ssdeep
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.join(MS_WD, 'storage') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'storage'))
if MS_WD not in sys.path:
sys.path.insert(0, os.path.join(MS_WD))
import common
import elasticsearch_storage
import multiscanner
class SSDeepAnalytic:
def __init__(self, debug=False):
storage_conf = multiscanner.common.get_config_path(multiscanner.CONFIG, 'storage')
config_object = configparser.SafeConfigParser()
config_object.optionxform = str
config_object.read(storage_conf)
conf = common.parse_config(config_object)
storage_handler = multiscanner.storage.StorageHandler(configfile=storage_conf)
es_handler = None
for handler in storage_handler.loaded_storage:
if isinstance(handler, elasticsearch_storage.ElasticSearchStorage):
es_handler = handler
break
if not es_handler:
print('[!] ERROR: This analytic only works with ES stroage module.')
sys.exit(0)
# probably not ideal...
self.es = es_handler.es
self.index = conf['ElasticSearchStorage']['index']
self.doc_type = 'sample'
self.debug = debug
def ssdeep_compare(self):
# get all of the samples where ssdeep_compare has not been run
# e.g., ssdeepmeta.analyzed == false
query = {
'_source': ['ssdeep', 'SHA256'],
'query': {
'bool': {
'must': [
{'match': {'ssdeep.analyzed': 'false'}}
]
}
}
}
page = self.es.search(
self.index,
scroll='2m',
size=1000,
body=query)
records_list = []
while len(page['hits']['hits']) > 0:
for hit in page['hits']['hits']:
records_list.append(hit)
sid = page['_scroll_id']
page = self.es.scroll(scroll_id=sid, scroll='2m')
for new_ssdeep_hit in records_list:
new_ssdeep_hit_src = new_ssdeep_hit.get('_source')
chunksize = new_ssdeep_hit_src.get('ssdeep').get('chunksize')
chunk = new_ssdeep_hit_src.get('ssdeep').get('chunk')
double_chunk = new_ssdeep_hit_src.get('ssdeep').get('double_chunk')
new_sha256 = new_ssdeep_hit_src.get('SHA256')
# build new query for docs that match our optimizations
# https://github.com/intezer/ssdeep-elastic/blob/master/ssdeep_elastic/ssdeep_querying.py#L35
opti_query = {
'_source': ['ssdeep', 'SHA256'],
'query': {
'bool': {
'must': [
{
'terms': {
'ssdeep.chunksize': [chunksize, chunksize / 2, chunksize * 2]
}
},
{
'bool': {
'should': [
{
'match': {
'ssdeep.chunk': {
'query': chunk
}
}
},
{
'match': {
'ssdeep.double_chunk': {
'query': double_chunk
}
}
}
],
'minimum_should_match': 1
}
},
{
'bool': {
'must_not': {
'match': {
'SHA256': new_sha256
}
}
}
}
]
}
}
}
# this bool condition isn't working how I expect
# if we have already updated the match dictionary to
# include a hit, don't rerun it for the inverse
# {
# 'bool': {
# 'must_not': {
# 'exists': {
# 'field': 'ssdeep.matches.' + new_sha256
# }
# }
# }
# }
opti_page = self.es.search(
self.index,
scroll='2m',
size=1000,
body=opti_query)
while len(opti_page['hits']['hits']) > 0:
# for each hit, ssdeep.compare != 0; update the matches
for opti_hit in opti_page['hits']['hits']:
opti_hit_src = opti_hit.get('_source')
opti_sha256 = opti_hit_src.get('SHA256')
result = ssdeep.compare(
new_ssdeep_hit_src.get('ssdeep').get('ssdeep_hash'),
opti_hit_src.get('ssdeep').get('ssdeep_hash'))
if self.debug:
print(
new_ssdeep_hit_src.get('SHA256'),
opti_hit_src.get('SHA256'),
result)
msg = {'doc': {'ssdeep': {'matches': {opti_sha256: result}}}}
self.es.update(
index=self.index,
doc_type=self.doc_type,
id=new_ssdeep_hit.get('_id'),
body=json.dumps(msg))
msg = {'doc': {'ssdeep': {'matches': {new_sha256: result}}}}
self.es.update(
index=self.index,
doc_type=self.doc_type,
id=opti_hit.get('_id'),
body=json.dumps(msg))
opti_sid = opti_page['_scroll_id']
opti_page = self.es.scroll(scroll_id=opti_sid, scroll='2m')
# analytic has run against sample, set ssdeep.analyzed = true
msg = {'doc': {'ssdeep': {'analyzed': 'true'}}}
self.es.update(
index=self.index,
doc_type=self.doc_type,
id=new_ssdeep_hit.get('_id'),
body=json.dumps(msg))
def ssdeep_group(self):
# get all of the samples where ssdeep_compare has not been run
# e.g., ssdeepmeta.analyzed == false
query = {
'_source': ['ssdeep', 'SHA256'],
'query': {
'exists': {
'field': 'ssdeep.matches'
}
}
}
page = self.es.search(
self.index,
scroll='2m',
size=1000,
body=query)
records = {}
while len(page['hits']['hits']) > 0:
for hit in page['hits']['hits']:
hit_src = hit.get('_source')
records[hit_src.get('SHA256')] = hit_src.get('ssdeep', {}) \
.get('matches', {})
sid = page['_scroll_id']
page = self.es.scroll(scroll_id=sid, scroll='2m')
# inspired by ssdc
groups = []
for sha256_, matches_dict in records.items():
in_group = False
for i in range(len(groups)):
if sha256_ in groups:
in_group = True
continue
should_add = True
for match_hash in groups[i]:
if match_hash not in records.get(sha256_):
should_add = False
if should_add:
groups[i].append(sha256_)
in_group = True
if not in_group:
groups.append([sha256_])
return groups
def main():
parser = argparse.ArgumentParser(description='Script to interact with '
'Multiscanner\'s Elasticsearch datastore to run analytics based on '
'ssdeep hash.')
group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Increase output to stdout')
group.add_argument('-c', '--compare', dest='compare', action='store_true',
help='Run ssdeep.compare using a few optimizations based on ssdeep'
' hash structure.')
group.add_argument('-g', '--group', dest='group', action='store_true',
help='Returns group of samples based on ssdeep hash.')
args = parser.parse_args()
ssdeep_analytic = SSDeepAnalytic(debug=args.verbose)
if args.compare:
ssdeep_analytic.ssdeep_compare()
print('[*] Success')
elif args.group:
pprint(ssdeep_analytic.ssdeep_group())
print('[*] Success')
if __name__ == '__main__':
main()
|
jmlong1027/multiscanner
|
analytics/ssdeep_analytics.py
|
Python
|
mpl-2.0
| 10,551
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('home', '0023_remove_homepage_live_feed_intro'),
]
operations = [
migrations.CreateModel(
name='HomePageFeaturedPage',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('sort_order', models.IntegerField(blank=True, null=True, editable=False)),
('title', models.CharField(blank=True, max_length=255, help_text='Leave blank to use page title.')),
('subtitle', models.CharField(max_length=255)),
('featured_page', models.ForeignKey(verbose_name='page to feature', to='wagtailcore.Page', related_name='+')),
('home_page', modelcluster.fields.ParentalKey(to='home.HomePage', related_name='featured_pages')),
],
options={
'abstract': False,
'ordering': ['sort_order'],
},
),
]
|
ghostwords/localore
|
localore/home/migrations/0024_homepagefeaturedpage.py
|
Python
|
mpl-2.0
| 1,191
|
# Generated by Django 2.0.5 on 2018-05-10 22:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("studies", "0001_initial")]
operations = [migrations.AlterModelOptions(name="extension", options={"ordering": ("-id",)})]
|
mozilla/normandy
|
normandy/studies/migrations/0002_auto_20180510_2256.py
|
Python
|
mpl-2.0
| 272
|
"""
Serializers for Video Abstraction Layer
Serialization is usually sent through the VideoSerializer which uses the
EncodedVideoSerializer which uses the profile_name as it's profile field.
"""
from rest_framework import serializers
from django.core.exceptions import ValidationError
from edxval.models import Profile, Video, EncodedVideo, Subtitle, CourseVideo
class EncodedVideoSerializer(serializers.ModelSerializer):
"""
Serializer for EncodedVideo object.
Uses the profile_name as it's profile value instead of a Profile object.
"""
profile = serializers.SlugRelatedField(slug_field="profile_name")
class Meta: # pylint: disable=C1001, C0111
model = EncodedVideo
fields = (
"created",
"modified",
"url",
"file_size",
"bitrate",
"profile",
)
def get_identity(self, data):
"""
This hook is required for bulk update.
We need to override the default, to use the slug as the identity.
"""
return data.get('profile', None)
class SubtitleSerializer(serializers.ModelSerializer):
"""
Serializer for Subtitle objects
"""
content_url = serializers.CharField(source='get_absolute_url', read_only=True)
content = serializers.CharField(write_only=True)
def validate_content(self, attrs, source):
"""
Validate that the subtitle is in the correct format
"""
value = attrs[source]
if attrs.get('fmt') == 'sjson':
import json
try:
loaded = json.loads(value)
except ValueError:
raise serializers.ValidationError("Not in JSON format")
else:
attrs[source] = json.dumps(loaded)
return attrs
class Meta: # pylint: disable=C1001, C0111
model = Subtitle
lookup_field = "id"
fields = (
"fmt",
"language",
"content_url",
"content",
)
class CourseSerializer(serializers.RelatedField):
"""
Field for CourseVideo
"""
def to_native(self, value):
return value.course_id
def from_native(self, data):
if data:
course_video = CourseVideo(course_id=data)
course_video.full_clean(exclude=["video"])
return course_video
class VideoSerializer(serializers.ModelSerializer):
"""
Serializer for Video object
encoded_videos takes a list of dicts EncodedVideo data.
"""
encoded_videos = EncodedVideoSerializer(many=True, allow_add_remove=True)
subtitles = SubtitleSerializer(many=True, allow_add_remove=True, required=False)
courses = CourseSerializer(many=True, read_only=False)
url = serializers.SerializerMethodField('get_url')
class Meta: # pylint: disable=C1001, C0111
model = Video
lookup_field = "edx_video_id"
exclude = ('id',)
def get_url(self, obj):
"""
Return relative url for the object
"""
return obj.get_absolute_url()
def restore_fields(self, data, files):
"""
Overridden function used to check against duplicate profile names.
Converts a dictionary of data into a dictionary of deserialized fields. Also
checks if there are duplicate profile_name(s). If there is, the deserialization
is rejected.
"""
reverted_data = {}
if data is not None and not isinstance(data, dict):
self._errors['non_field_errors'] = ['Invalid data']
return None
try:
profiles = [ev["profile"] for ev in data.get("encoded_videos", [])]
if len(profiles) != len(set(profiles)):
self._errors['non_field_errors'] = ['Invalid data: duplicate profiles']
except KeyError:
raise ValidationError("profile required for deserializing")
except TypeError:
raise ValidationError("profile field needs to be a profile_name (str)")
for field_name, field in self.fields.items():
field.initialize(parent=self, field_name=field_name)
try:
field.field_from_native(data, files, field_name, reverted_data)
except ValidationError as err:
self._errors[field_name] = list(err.messages)
return reverted_data
|
GbalsaC/bitnamiP
|
edx-val/edxval/serializers.py
|
Python
|
agpl-3.0
| 4,408
|
from tfidf import *
import psycopg2
import psycopg2.extensions
import math
def cos_sim(A,B):
def dot_product(a,b):
sum = 0.0
for key in a.keys():
if key in b:
sum += a[key]*b[key]
return sum
return dot_product(A,B)/(math.sqrt(dot_product(A,A)) * math.sqrt(dot_product(B,B)))
conn = psycopg2.connect("host=localhost dbname=SOFTFile user=AUREA password=AUREA")
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
c = conn.cursor()
qry = "SELECT dataset_id, dataset_title, dataset_description \
FROM dataset"
#WHERE dataset_id < 20"
c.execute(qry)
documentList = []
documentNumber = 0
docMap = []
for id,title, description in c.fetchall():
documentList.append(title + description)
docMap.append(id)
c.close()
vectors = []
print "gotDocs"
for x in range(len(documentList)):
words = {}
for word in documentList[documentNumber].split(None):
words[word] = tfidf(word,documentList[documentNumber],documentList)
#for item in sorted(words.items(), key=itemgetter(1), reverse=True):
# print "%f <= %s" % (item[1], item[0])
vectors.append(words)
documentNumber = x+1
print "got vectors"
sim = []
for i in range(len(vectors[:-1])):
for j in range(i+1, len(vectors)):
sim = cos_sim(vectors[i], vectors[j])
db_id1 = docMap[i]
db_id2 = docMap[j]
qry = "INSERT into cosine_similarity(id1, id2, score) VALUES (%s, %s, %s)"
c = conn.cursor()
c.execute(qry, (db_id1, db_id2, sim))
c.close()
|
JohnCEarls/AUREA
|
scripts/testScripts/testTFIDF.py
|
Python
|
agpl-3.0
| 1,566
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('ccx', '0008_auto_20170523_0630'),
]
operations = [
migrations.AlterField(
model_name='customcourseforedx',
name='delivery_mode',
field=models.CharField(default=b'IN_PERSON', max_length=255, choices=[(b'IN_PERSON', b'In Person'), (b'ONLINE_ONLY', b'Online')]),
),
migrations.AlterField(
model_name='customcourseforedx',
name='time',
field=models.DateTimeField(default=datetime.datetime(2017, 6, 8, 5, 24, 53, 908103)),
),
]
|
mbareta/edx-platform-ft
|
lms/djangoapps/ccx/migrations/0009_auto_20170608_0525.py
|
Python
|
agpl-3.0
| 735
|
import subprocess
import logging
import os, sys, traceback
import urllib
from fluxclient import check_platform
from .misc import BinaryUploadHelper, BinaryHelperMixin, OnTextMessageMixin
logger = logging.getLogger("API.SLICING")
StlSlicer = None
StlSlicerCura = None
def get_default_cura():
return os.environ["cura"]
def get_default_cura2():
return os.environ["cura2"]
def stl_slicing_parser_api_mixin(cls):
class StlSlicingParserApi(BinaryHelperMixin, OnTextMessageMixin, cls):
"""
This websocket is use to slice a stl/obj model
"""
POOL_TIME = 30.0
def __init__(self, *args):
super().__init__(*args)
try:
global StlSlicer
global StlSlicerCura
if StlSlicer is None:
from fluxclient.printer.stl_slicer import (
StlSlicer as _StlSlicer,
StlSlicerCura as _StlSlicerCura)
StlSlicer = _StlSlicer
StlSlicerCura = _StlSlicerCura
except ImportError as e:
logger.exception("Slicer error")
self.send_fatal("LIBRARY_NOT_FOUND")
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stderr)
return
self.m_stl_slicer = StlSlicer('')
self._change_engine('slic3r', 'default')
# self.change_engine('cura default')
self.cmd_mapping = {
'upload': [self.begin_recv_stl, 'upload'],
'upload_image': [self.begin_recv_stl, 'upload_image'],
'load_stl_from_path': [self.load_stl_from_path],
'set': [self.set],
'go': [self.gcode_generate],
'delete': [self.delete],
'advanced_setting': [self.advanced_setting],
'get_path': [self.get_path],
'get_path_async': [self.get_path_async],
'duplicate': [self.duplicate],
'meta_option': [self.meta_option],
'begin_slicing': [self.begin_slicing],
'end_slicing': [self.end_slicing],
'report_slicing': [self.report_slicing],
'get_result': [self.get_result],
'change_engine': [self.change_engine],
'check_engine': [self.check_engine],
}
self.ext_metadata = {}
def begin_recv_stl(self, params, flag):
if flag == 'upload':
params = params.split()
if len(params) == 2:
name, file_length = params
buf_type = 'stl'
elif len(params) == 3:
name, file_length, buf_type = params
elif flag == 'upload_image':
# useless
name = ''
buf_type = ''
file_length = params
logger.debug('upload_image {}'.format(file_length))
if int(file_length) == 0:
self.send_error('12', info='empty file!')
else:
helper = BinaryUploadHelper(int(file_length), self.end_recv_stl, name, flag, buf_type)
self.set_binary_helper(helper)
self.send_continue()
def end_recv_stl(self, buf, *args):
if args[1] == 'upload':
logger.debug('upload ' + args[0] + args[2])
ret = self.m_stl_slicer.upload(args[0], buf, args[2])
if ret:
self.send_ok()
else:
self.send_error('15', info="File parsing fail")
return
elif args[1] == 'upload_image':
ret = self.m_stl_slicer.upload_image(buf)
self.send_ok()
def load_stl_from_path(self, params):
logger.debug('load_stl_from_path ' + params)
params = params.split()
if len(params) == 2:
name, path = params
buf_type = 'stl'
elif len(params) == 3:
name, path, buf_type = params
#buf_type could be stl or obj
ret = self.m_stl_slicer.upload(name, urllib.parse.unquote(path), buf_type)
if ret:
self.send_ok()
else:
self.send_error('15', info="File parsing fail")
return
def duplicate(self, params):
logger.debug('duplicate ' + params)
name_in, name_out = params.split()
flag = self.m_stl_slicer.duplicate(name_in, name_out)
if flag:
self.send_ok()
else:
self.send_error('13', info='{} not exist'.format(name_in))
def set(self, params):
params = params.split()
assert len(params) == 10, 'wrong number of parameters %d' % len(params)
name = params[0]
position_x = float(params[1])
position_y = float(params[2])
position_z = float(params[3])
rotation_x = float(params[4])
rotation_y = float(params[5])
rotation_z = float(params[6])
scale_x = float(params[7])
scale_y = float(params[8])
scale_z = float(params[9])
logger.debug('set {} {} {} {} {} {} {} {} {} {}'.format(name, position_x, position_y, position_z, rotation_x, rotation_y, rotation_z, scale_x, scale_y, scale_z))
set_result = self.m_stl_slicer.set(name, [position_x, position_y, position_z, rotation_x, rotation_y, rotation_z, scale_x, scale_y, scale_z])
if set_result == 'ok':
self.send_ok()
else:
self.send_error('14', info=set_result)
def advanced_setting(self, params):
bad_lines = self.m_stl_slicer.advanced_setting(params)
if bad_lines != []:
for line_num, err_msg in bad_lines:
self.send_error('7', info='line %d: %s' % (line_num, err_msg))
logger.debug('line %d: %s' % (line_num, err_msg))
self.send_ok()
def gcode_generate(self, params):
raise RuntimeError('is this still working?')
names = params.split()
if names[-1] == '-g':
output_type = '-g'
names = names[:-1]
elif names[-1] == '-f':
output_type = '-f'
names = names[:-1]
else:
output_type = '-f'
output, metadata = self.m_stl_slicer.gcode_generate(names, self, output_type)
# self.send_progress('finishing', 1.0)
if output:
self.send_text('{"slice_status": "complete", "length": %d, "time": %.3f, "filament_length": %.2f}' % (len(output), metadata[0], metadata[1]))
self.send_binary(output)
logger.debug('slicing finish')
else:
self.send_error(metadata)
logger.debug('slicing fail')
def begin_slicing(self, params):
names = params.split()
self.path_bytes = None
if names[-1] == '-g':
output_type = '-g'
names = names[:-1]
elif names[-1] == '-f':
output_type = '-f'
names = names[:-1]
else:
output_type = '-f'
ret, msg = self.m_stl_slicer.begin_slicing(names, self, output_type)
if ret:
self.send_ok()
else:
self.send_error('16', info=msg)
def end_slicing(self, *args):
self.m_stl_slicer.end_slicing()
self.send_ok()
def report_slicing(self, *args):
for m in self.m_stl_slicer.report_slicing():
self.send_text(m)
self.send_ok()
def get_result(self, *args):
if self.m_stl_slicer.output:
self.send_ok(info=str(len(self.m_stl_slicer.output)))
self.send_binary(self.m_stl_slicer.output)
else:
self.send_error('8', info='No result to send')
def get_path(self, *args):
path = self.m_stl_slicer.get_path()
if path:
self.send_text(path)
else:
self.send_error('9', info='No path data to send')
def get_path_async(self, params):
params = params.split()
per_layers = int(params[0])
if self.path_bytes is None:
self.path_bytes = self.m_stl_slicer.get_path().encode('ascii')
len_of_path = len(self.path_bytes)
i = 1
left_bracket = 0;
right_bracket = 0;
cutting_index = i;
layers = 0
while i < len_of_path:
if self.path_bytes[i] == 91: #[
left_bracket += 1
if self.path_bytes[i] == 93: #]
right_bracket += 1
if left_bracket == right_bracket:
layers += 1
if layers >= per_layers:
layers = 0
cropped_path = '[%s]' % ( self.path_bytes[cutting_index:i].decode('ascii') );
self.send_text(cropped_path)
cutting_index = i + 1
i += 1
if layers > 0:
cropped_path = '[%s]' % ( self.path_bytes[cutting_index:len_of_path-1].decode('ascii') );
self.send_text(cropped_path)
if self.path_bytes:
self.send_text('{"end":0}');
else:
self.send_error('9', info='No path data to send')
def delete(self, params):
name = params.rstrip()
flag, message = self.m_stl_slicer.delete(name)
if flag:
self.send_ok()
else:
self.send_error('10', info=message)
def meta_option(self, params):
key, value = params.split()
self.m_stl_slicer.ext_metadata[key] = value
self.send_ok()
def change_engine(self, params):
"""
change_engine for front-end
"""
logger.debug('change_engine ' + params)
engine, engine_path = params.split()
if self._change_engine(engine, engine_path):
self.send_ok()
else:
self.send_error('11', info="wrong engine {}, should be 'cura' or 'slic3r'".format(engine))
def _change_engine(self, engine, engine_path):
"""
normal chaning engine
"""
if engine == 'slic3r':
logger.debug("Using slic3r()")
if engine_path == 'default':
if 'slic3r' in os.environ:
engine_path = os.environ["slic3r"]
else:
engine_path = "../Slic3r/slic3r.pl"
self.m_stl_slicer = StlSlicer(engine_path).from_other(self.m_stl_slicer)
elif engine == 'cura':
logger.debug("Using %s" % (engine))
if engine_path == 'default':
engine_path = get_default_cura()
self.m_stl_slicer = StlSlicerCura(engine_path).from_other(self.m_stl_slicer)
elif engine == 'cura2':
if engine_path == 'default':
engine_path = get_default_cura2()
self.m_stl_slicer = StlSlicerCura(engine_path, 2).from_other(self.m_stl_slicer)
else:
return False
return True
def check_engine(self, params):
engine = params
error_code, ret = self._check_engine(engine)
if error_code == 0:
self.send_ok()
elif error_code < 5:
self.send_error(str(error_code), info=ret)
else:
self.send_fatal(str(error_code), info=ret)
def _check_engine(self, params):
engine, engine_path = params.split()
if engine == 'cura':
if engine_path == 'default':
engine_path = get_default_cura()
if os.path.isfile(engine_path):
try:
out = subprocess.check_output(engine_path, stderr=subprocess.STDOUT, timeout=5)
except Exception:
logger.exception("Unknown error")
return 1, 'execution fail'
else:
out = out.split(b'\n')[0].rstrip()
if out.startswith(b'Cura_SteamEngine version'):
if out.endswith(b'15.04.5'):
return 0, 'ok'
else:
return 2, 'version error:{}'.format(out.split()[-1].decode())
else:
return 3, '{} is not cura'.format(engine_path)
else:
return 4, '{} not exist'.format(engine_path)
elif engine == 'slic3r':
return 0, 'ok'
else:
return 5, '{} check not supported'.format(engine)
return StlSlicingParserApi
|
flux3dp/fluxghost
|
fluxghost/api/stl_slicing_parser.py
|
Python
|
agpl-3.0
| 13,536
|
# -*- coding: utf-8 -*-
# © 2017 Cesar Barron Bautista
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import purchase
|
Gebesa-Dev/Addons-gebesa
|
purchase_order_o2o_procurement/models/__init__.py
|
Python
|
agpl-3.0
| 148
|
import time
import RPi.GPIO as GPIO
GPIO.VERSION
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(12,GPIO.OUT)
from smbus import SMBus
bus = SMBus(1)
def read_ain(i):
global bus
#bus.write_byte_data(0x48, 0x40 | ((i) & 0x03), 0)
bus.write_byte(0x48, i)
bus.read_byte(0x48)#first 2 are last state, and last state repeated.
bus.read_byte(0x48)
return bus.read_byte(0x48)
while(True):
alcohol = read_ain(2)*0.001
heartrate = read_ain(1)
print "-------------------------\n"
print("Alcohol Sensor: {0:.3f}%".format(alcohol))
if(heartrate<60) or (heartrate>100):
GPIO.output(11,0)
GPIO.output(12,1)
else:
GPIO.output(11,1)
GPIO.output(12,0)
print("Heart Rate Sensor: {0:.0f} BPM\n".format(heartrate))
time.sleep(1)#sec
|
EugeneHasJeans/EugeneHasJeans.github.io
|
documents/lifelines.py
|
Python
|
agpl-3.0
| 799
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, h.goebel@goebel-consult.de
# Nicolas Dupeux, nicolas@dupeux.net
# Grégory Starck, g.starck@gmail.com
# Gerhard Lausser, gerhard.lausser@consol.de
# Sebastien Coavoux, s.coavoux@free.fr
# Jean Gabes, naparuba@gmail.com
# Romain Forlot, rforlot@yahoo.com
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# The resultmodulation class is used for in scheduler modulation of results
# like the return code or the output.
"""
This module provide Resultmodulation and Resultmodulations classes used to describe
the modulation of a check command. Modulation occurs on a modulation period (Timeperiod)
"""
import time
from alignak.objects.item import Item, Items
from alignak.property import StringProp, IntegerProp, IntListProp
class Resultmodulation(Item):
"""Resultmodulation class is simply a modulation of a check result exit code
during a modulation_period.
"""
_id = 1 # zero is always special in database, so we do not take risk here
my_type = 'resultmodulation'
properties = Item.properties.copy()
properties.update({
'resultmodulation_name': StringProp(),
'exit_codes_match': IntListProp(default=[]),
'exit_code_modulation': IntegerProp(default=None),
'modulation_period': StringProp(default=None),
})
def get_name(self):
"""Accessor to resultmodulation_name attribute
:return: result modulation name
:rtype: str
"""
return self.resultmodulation_name
def module_return(self, return_code):
"""Module the exit code if necessary ::
* modulation_period is legit
* exit_code_modulation
* return_code in exit_codes_match
:param return_code: actual code returned by the check
:type return_code: int
:return: return_code modulated if necessary (exit_code_modulation)
:rtype: int
"""
# Only if in modulation_period of modulation_period == None
if self.modulation_period is None or self.modulation_period.is_time_valid(time.time()):
# Try to change the exit code only if a new one is defined
if self.exit_code_modulation is not None:
# First with the exit_code_match
if return_code in self.exit_codes_match:
return_code = self.exit_code_modulation
return return_code
def pythonize(self):
"""Pythonization function for Resultmodulation.
We override it because we need to convert exit code into integers
:return: None
"""
# First apply Item pythonize
super(Resultmodulation, self).pythonize()
# Then very special cases
# Intify the exit_codes_match, and make list
self.exit_codes_match = [int(ec) for ec in getattr(self, 'exit_codes_match', [])]
if hasattr(self, 'exit_code_modulation'):
self.exit_code_modulation = int(self.exit_code_modulation)
else:
self.exit_code_modulation = None
class Resultmodulations(Items):
"""Resultmodulations class allowed to handle easily several CheckModulation objects
"""
name_property = "resultmodulation_name"
inner_class = Resultmodulation
def linkify(self, timeperiods):
"""Wrapper for linkify_rm_by_tp
Replace check_period by real Timeperiod object into each Resultmodulation
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
self.linkify_rm_by_tp(timeperiods)
def linkify_rm_by_tp(self, timeperiods):
"""Replace check_period by real Timeperiod object into each Resultmodulation
:param timeperiods: timeperiods to link to
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
for resultmod in self:
mtp_name = resultmod.modulation_period.strip()
# The new member list, in id
mtp = timeperiods.find_by_name(mtp_name)
if mtp_name != '' and mtp is None:
err = "Error: the result modulation '%s' got an unknown modulation_period '%s'" % \
(resultmod.get_name(), mtp_name)
resultmod.configuration_errors.append(err)
resultmod.modulation_period = mtp
|
gst/alignak
|
alignak/objects/resultmodulation.py
|
Python
|
agpl-3.0
| 5,934
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from shoop.notify import Context
from shoop_tests.notify.fixtures import get_initialized_test_event
@pytest.mark.django_db
def test_log_entries():
event = get_initialized_test_event()
ctx = Context.from_event(event)
order = ctx.get("order")
n_log_entries = ctx.log_entry_queryset.count()
ctx.add_log_entry_on_log_target("blap", "blorr")
order.add_log_entry("blep")
assert ctx.log_entry_queryset.count() == n_log_entries + 2 # they got added
assert order.log_entries.last().message == "blep" # it's what we added
assert ctx.log_entry_queryset.last().message == "blep" # from this perspective too
@pytest.mark.django_db
@pytest.mark.parametrize("target_obj", (None, object()))
def test_log_entry_on_unloggable_object(target_obj):
event = get_initialized_test_event()
event.variable_values["order"] = target_obj # invalidate log target _before_ creating context
ctx = Context.from_event(event)
n_log_entries = ctx.log_entry_queryset.count()
ctx.add_log_entry_on_log_target("blap", "blorr")
assert ctx.log_entry_queryset.count() == n_log_entries # couldn't add :(
|
taedori81/shoop
|
shoop_tests/notify/test_log_entries.py
|
Python
|
agpl-3.0
| 1,388
|
import logging
import time
from django.conf import settings
from django.core.mail import get_connection as dj_get_connection
logger = logging.getLogger(__name__)
class BackendWrapper(object):
"""A wrapper around Django's Email Backend, providing hooks
for instrumentation and testing.
"""
def __init__(self, backend):
self._backend = backend
logger.info("initialized connection wrapper with email backend: %s", backend)
def send_messages(self, email_messages):
# check settings hook for rewriting email recipient, act accordingly
if settings.EMAIL_REWRITE_RECIPIENT:
for message in email_messages:
message.to = [settings.EMAIL_REWRITE_RECIPIENT]
# send the messages
t = time.time()
msg_count = self._backend.send_messages(email_messages)
elapsed = time.time() - t
if msg_count > 0:
logger.info('sent %s messages, elapsed: %.3fs' % (msg_count, elapsed))
for msg in email_messages:
hdrs = dict((k, v) for k, v in dict(msg.message()).iteritems()
if k.lower() not in ('date', 'from', 'subject', 'content-type', 'mime-version'))
logger.info("sent email: {}".format(repr(hdrs)))
if msg_count != len(email_messages):
logger.warn('send_messages() was called with %s messages but return value was %s',
len(email_messages), msg_count)
return msg_count
def close(self):
# never raise Exceptions on close().
try:
self._backend.close()
except Exception as e:
logger.debug("self._backend.close() failed: %s", e)
def __getattr__(self, a):
return getattr(self._backend, a)
def get_connection(*a, **kw):
return BackendWrapper(dj_get_connection(*a, **kw))
|
EDUlib/notifier
|
notifier/connection_wrapper.py
|
Python
|
agpl-3.0
| 1,867
|
# Copyright 2016 - 2018 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
from pkg_resources import resource_filename
import marv_node.testing
from marv_node.testing import make_dataset, run_nodes, temporary_directory
from marv_robotics.detail import connections_section as node
from marv_store import Store
class TestCase(marv_node.testing.TestCase):
# TODO: Generate bags instead, but with connection info!
BAGS = [
resource_filename('marv_node.testing._robotics_tests', 'data/test_0.bag'),
resource_filename('marv_node.testing._robotics_tests', 'data/test_1.bag'),
]
async def test_node(self):
with temporary_directory() as storedir:
store = Store(storedir, {})
dataset = make_dataset(self.BAGS)
store.add_dataset(dataset)
streams = await run_nodes(dataset, [node], store)
self.assertNodeOutput(streams[0], node)
# TODO: test also header
|
ternaris/marv-robotics
|
code/marv/marv_node/testing/_robotics_tests/test_section_topics.py
|
Python
|
agpl-3.0
| 960
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <martin.reisenhofer@funkring.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.addons.report_aeroo import report_aeroo
from openerp.addons.at_base import util
from openerp.osv import fields, osv
from openerp.tools.translate import _
class inovice_attachment_wizard(osv.TransientModel):
_name = "account.invoice.attachment.wizard"
_description = "Invoice Attachment Wizard"
def action_import(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0])
invoice_id = util.active_id(context, "account.invoice")
if not invoice_id:
raise osv.except_osv(_("Error!"), _("No invoice found"))
report_obj = self.pool.get("ir.actions.report.xml")
data=base64.decodestring(wizard.document)
data = report_aeroo.fixPdf(data)
if not data:
raise osv.except_osv(_("Error!"), _("PDF is corrupted and unable to fix!"))
if not report_obj.write_attachment(cr, uid, "account.invoice", invoice_id, report_name="account.report_invoice", datas=base64.encodestring(data), context=context, origin="account.invoice.attachment.wizard"):
raise osv.except_osv(_("Error!"), _("Unable to import document (check if invoice is validated)"))
return { "type" : "ir.actions.act_window_close" }
_columns = {
"document" : fields.binary("Document")
}
|
funkring/fdoo
|
addons-funkring/at_account/wizard/invoice_attachment_wizard.py
|
Python
|
agpl-3.0
| 2,242
|
"""StoreServer provides a number of plugins which can provide a store
service on a server.
There are currently 2 plugins available: ``webserver`` and ``gitdaemon``.
These can be used to simplify provision of a store, e.g using the
``webserver`` StoreServer instead of installing a 3rd party webserver
such as Apache.
"""
|
certain/certain
|
certain/StoreServer/__init__.py
|
Python
|
agpl-3.0
| 325
|
from __future__ import division
import datetime
import re
import itertools
import random
from django.conf import settings
from django.core import exceptions
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.db import models
from markitup.fields import MarkupField
from django_date_extensions.fields import ApproximateDateField, ApproximateDate
from tasks.models import Task
from images.models import HasImageMixin, Image
from scorecards.models import ScorecardMixin
from mapit import models as mapit_models
# tell South how to handle the custom fields
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^django_date_extensions\.fields\.ApproximateDateField"])
add_introspection_rules([], ["^django.contrib\.gis\.db\.models\.fields\.PointField"])
date_help_text = "Format: '2011-12-31', '31 Jan 2011', 'Jan 2011' or '2011' or 'future'"
class ModelBase(models.Model):
created = models.DateTimeField( auto_now_add=True, default=datetime.datetime.now(), )
updated = models.DateTimeField( auto_now=True, default=datetime.datetime.now(), )
def css_class(self):
return self._meta.module_name
def get_admin_url(self):
url = reverse(
'admin:%s_%s_change' % ( self._meta.app_label, self._meta.module_name),
args=[self.id]
)
return url
class Meta:
abstract = True
class ManagerBase(models.GeoManager):
def update_or_create(self, filter_attrs, attrs):
"""Given unique look-up attributes, and extra data attributes, either
updates the entry referred to if it exists, or creates it if it doesn't.
Returns the object updated or created, having saved the changes.
"""
try:
obj = self.get(**filter_attrs)
changed = False
for k, v in attrs.items():
if obj.__dict__[k] != v:
changed = True
obj.__dict__[k] = v
if changed:
obj.save()
except exceptions.ObjectDoesNotExist:
attrs.update(filter_attrs)
obj = self.create(**attrs)
obj.save()
return obj
class ContactKind(ModelBase):
name = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True,
help_text="created from name")
objects = ManagerBase()
def __unicode__(self):
return self.name
class Meta:
ordering = ["slug"]
class Contact(ModelBase):
kind = models.ForeignKey('ContactKind')
value = models.TextField()
note = models.TextField(blank=True,
help_text="publicly visible, use to clarify contact detail")
source = models.CharField(max_length=500, blank=True, default='',
help_text="where did this contact detail come from")
# link to other objects using the ContentType system
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
objects = ManagerBase()
def __unicode__(self):
return "%s (%s for %s)" % (self.value, self.kind, self.content_object)
def generate_tasks(self):
"""generate tasks for ourselves, and for the foreign object"""
Task.call_generate_tasks_on_if_possible(self.content_object)
return []
class Meta:
ordering = ["content_type", "object_id", "kind"]
class InformationSource(ModelBase):
source = models.CharField(max_length=500)
note = models.TextField(blank=True)
entered = models.BooleanField(default=False,
help_text="has the information in this source been entered into this system?")
# link to other objects using the ContentType system
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
objects = ManagerBase()
def __unicode__(self):
return "%s: %s" % (self.source, self.content_object)
class Meta:
ordering = ["content_type", "object_id", "source"]
class PersonQuerySet(models.query.GeoQuerySet):
def is_politician(self, when=None):
# FIXME - Don't like the look of this, rather a big subquery.
return self.filter(position__in=Position.objects.all().current_politician_positions(when))
class PersonManager(ManagerBase):
def get_query_set(self):
return PersonQuerySet(self.model)
def loose_match_name(self, name):
"""Search for a loose match on a name. May not be too reliable"""
# import here to avoid creating an import loop
from haystack.query import SearchQuerySet
# Try matching all the bits
results = SearchQuerySet().filter_and(content=name).models(self.model)
# if that fails try matching all the bits in any order
if not len(results):
results = SearchQuerySet().models(Person)
for bit in re.split(r'\s+', name):
results = results.filter_and(content=bit)
# If we have exactly one result return that
if len(results) == 1:
return results[0].object
else:
return None
def get_next_featured(self, current_slug, want_previous=False):
""" Returns the next featured person, in slug order: using slug order because it's unique and easy to
exclude the current person.
If no slug is provided, returns a random person.
If the slug is purely numeric (n), this consisently returns a person (actually the nth wrapping around
where necessary): this allows js to generate random calls that can nonetheless be served from the cache."""
all_results = self.filter(can_be_featured=True)
if not all_results.exists():
return None
sort_order = 'slug'
if not current_slug:
return random.choice(all_results)
elif current_slug.isdigit():
all_results = all_results.order_by(sort_order)
return all_results[int(current_slug) % len(all_results)] # ignore direction: just provide a person
else:
all_results = all_results.exclude(slug=current_slug)
if len(all_results) == 0: # special case: return the excluded person if they are the only one or nothing
all_results = self.filter(can_be_featured=True)
if all_results.exists():
return all_results[0]
else:
return None
if want_previous:
sort_order = '-slug'
results = all_results.order_by(sort_order).filter(slug__lt=current_slug)[:1]
else:
results = all_results.order_by(sort_order).filter(slug__gt=current_slug)[:1]
if len(results) == 1:
return results[0]
else: # we're at the start/end of the list, wrap round to the other end
results = all_results.order_by(sort_order)[:1]
if len(results) == 1:
return results[0]
else:
return None
class Person(ModelBase, HasImageMixin, ScorecardMixin):
title = models.CharField(max_length=100, blank=True)
legal_name = models.CharField(max_length=300)
other_names = models.TextField(blank=True, default='', help_text="other names the person might be known by - one per line")
slug = models.SlugField(max_length=200, unique=True, help_text="auto-created from first name and last name")
gender = models.CharField(max_length=1, choices=(('m','Male'),('f','Female')) )
date_of_birth = ApproximateDateField(blank=True, help_text=date_help_text)
date_of_death = ApproximateDateField(blank=True, help_text=date_help_text)
original_id = models.PositiveIntegerField(blank=True, null=True, help_text='temporary - used to link to members in original mzalendo.com db')
# religion
# tribe
summary = MarkupField(blank=True, default='')
contacts = generic.GenericRelation(Contact)
images = generic.GenericRelation(Image)
objects = PersonManager()
can_be_featured = models.BooleanField(default=False, help_text="can this person be featured on the home page (e.g., is their data appropriate and extant)?")
def clean(self):
# strip other_names and flatten multiple newlines
self.other_names = re.sub(r"\n+", "\n", self.other_names).strip()
@property
def name(self):
if self.other_names:
return self.other_names.split("\n")[0]
else:
return self.legal_name
def additional_names(self):
if self.other_names:
return self.other_names.split("\n")[1:]
else:
return []
def aspirant_positions(self):
return self.position_set.all().current_aspirant_positions()
def is_aspirant(self):
return self.aspirant_positions().exists()
def politician_positions(self):
return self.position_set.all().current_politician_positions()
def is_politician(self):
return self.politician_positions().exists()
def parties(self):
"""Return list of parties that this person is currently a member of"""
party_memberships = self.position_set.all().currently_active().filter(title__slug='member').filter(organisation__kind__slug='party')
return Organisation.objects.filter(position__in=party_memberships)
def constituencies(self):
"""Return list of constituencies that this person is currently an politician for"""
return Place.objects.filter(position__in=self.politician_positions())
def __unicode__(self):
return self.legal_name
@models.permalink
def get_absolute_url(self):
return ('person', [self.slug])
def generate_tasks(self):
"""Generate tasks for missing contact details etc"""
task_slugs = []
wanted_contact_slugs = ['phone','email','address']
have_contact_slugs = [c.kind.slug for c in self.contacts.all()]
for wanted in wanted_contact_slugs:
if wanted not in have_contact_slugs:
task_slugs.append("find-missing-" + wanted)
return task_slugs
def scorecard_overall(self):
total_count = super(Person, self).active_scorecards().count()
total_score = super(Person, self).active_scorecards().aggregate(models.Sum('score'))['score__sum']
for constituency in self.constituencies():
constituency_count = constituency.active_scorecards().count()
if constituency_count:
total_count += constituency_count
total_score += constituency.active_scorecards().aggregate(models.Sum('score'))['score__sum']
return total_score / total_count
def scorecards(self):
"""This is the list of scorecards that will actually be displayed on the site."""
scorecard_lists = []
# We're only showing scorecards for current MPs
if self.is_politician():
scorecard_lists.append(super(Person, self).scorecards())
scorecard_lists.extend([x.scorecards() for x in self.constituencies()])
return itertools.chain(*scorecard_lists)
def has_scorecards(self):
# We're only showing scorecards for current MPs
if self.is_politician():
return super(Person, self).has_scorecards() or any([x.has_scorecards() for x in self.constituencies()])
class Meta:
ordering = ["slug"]
class OrganisationKind(ModelBase):
name = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True, help_text="created from name")
summary = MarkupField(blank=True, default='')
objects = ManagerBase()
def __unicode__(self):
return self.name
class Meta:
ordering = ["slug"]
class OrganisationQuerySet(models.query.GeoQuerySet):
def parties(self):
return self.filter(kind__slug='party')
def active_parties(self):
# FIXME - What a lot of subqueries...
active_politician_positions = Position.objects.all().current_politician_positions()
active_member_positions = Position.objects.all().filter(title__slug='member').currently_active()
current_politicians = Person.objects.all().filter(position__in=active_politician_positions).distinct()
current_members = Person.objects.all().filter(position__in=active_member_positions).distinct()
return (
self
.parties()
.filter(position__person__in=current_politicians)
.filter(position__person__in=current_members)
.distinct()
)
class OrganisationManager(ManagerBase):
def get_query_set(self):
return OrganisationQuerySet(self.model)
class Organisation(ModelBase):
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True, help_text="created from name")
summary = MarkupField(blank=True, default='')
kind = models.ForeignKey('OrganisationKind')
started = ApproximateDateField(blank=True, help_text=date_help_text)
ended = ApproximateDateField(blank=True, help_text=date_help_text)
original_id = models.PositiveIntegerField(blank=True, null=True, help_text='temporary - used to link to parties in original mzalendo.com db')
objects = OrganisationManager()
contacts = generic.GenericRelation(Contact)
def __unicode__(self):
return "%s (%s)" % (self.name, self.kind)
@models.permalink
def get_absolute_url(self):
return ('organisation', [self.slug])
class Meta:
ordering = ["slug"]
class PlaceKind(ModelBase):
name = models.CharField(max_length=200, unique=True)
plural_name = models.CharField(max_length=200, blank=True)
slug = models.SlugField(max_length=200, unique=True, help_text="created from name")
summary = MarkupField(blank=True, default='')
objects = ManagerBase()
def __unicode__(self):
return self.name
class Meta:
ordering = ["slug"]
class PlaceQuerySet(models.query.GeoQuerySet):
def constituencies(self):
return self.filter(kind__slug='constituency')
def counties(self):
return self.filter(kind__slug='county')
class PlaceManager(ManagerBase):
def get_query_set(self):
return PlaceQuerySet(self.model)
class Place(ModelBase, ScorecardMixin):
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=100, unique=True, help_text="created from name")
kind = models.ForeignKey('PlaceKind')
summary = MarkupField(blank=True, default='')
shape_url = models.URLField(verify_exists=True, blank=True )
location = models.PointField(null=True, blank=True)
organisation = models.ForeignKey('Organisation', null=True, blank=True, help_text="use if the place uniquely belongs to an organisation - eg a field office" )
original_id = models.PositiveIntegerField(blank=True, null=True, help_text='temporary - used to link to constituencies in original mzalendo.com db')
mapit_area = models.ForeignKey( mapit_models.Area, null=True, blank=True )
parent_place = models.ForeignKey('self', blank=True, null=True, related_name='child_places')
objects = PlaceManager()
is_overall_scorecard_score_applicable = False
@property
def position_with_organisation_set(self):
return self.position_set.filter(organisation__isnull=False)
def __unicode__(self):
return "%s (%s)" % (self.name, self.kind)
def is_constituency(self):
return self.kind.slug == 'constituency'
def current_politician_position(self):
"""Return the current politician position, or None"""
qs = self.position_set.all().current_politician_positions()
try:
return qs[0]
except IndexError:
return None
def related_people(self):
# Can't order by the sorting_end_date_high of position
# because that ruins the distinct.
return Person.objects.filter(position__place=self).distinct()#.order_by('-position__sorting_end_date_high')
@models.permalink
def get_absolute_url(self):
return ('place', [self.slug])
class Meta:
ordering = ["slug"]
class PositionTitle(ModelBase):
name = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True, help_text="created from name")
summary = MarkupField(blank=True, default='')
original_id = models.PositiveIntegerField(blank=True, null=True,
help_text='temporary - used to link to data in original mzalendo.com db')
requires_place = models.BooleanField(default=False,
help_text="Does this job title require a place to complete the position?")
objects = ManagerBase()
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('position', [self.slug])
def organisations(self):
"""
Return a qs of organisations, with the most frequently related first.
Each organisation is also annotated with 'position_count' which might be
useful.
This is intended as an alternative to assigning a org to each
position_title. Instead we can deduce it from the postions.
"""
orgs = (
Organisation
.objects
.filter(position__title=self)
.annotate(position_count=models.Count('position'))
.order_by('-position_count')
)
return orgs
class Meta:
ordering = ["slug"]
class PositionQuerySet(models.query.GeoQuerySet):
def currently_active(self, when=None):
"""Filter on start and end dates to limit to currently active postitions"""
if when == None:
when = datetime.date.today()
now_approx = repr(ApproximateDate(year=when.year, month=when.month, day=when.day))
qs = (
self
.filter(start_date__lte=now_approx)
.filter(Q(sorting_end_date_high__gte=now_approx) | Q(end_date=''))
)
return qs
def currently_inactive(self, when=None):
"""Filter on start and end dates to limit to currently inactive postitions"""
if when == None:
when = datetime.date.today()
now_approx = repr(ApproximateDate(year=when.year, month=when.month, day=when.day))
start_criteria = Q(start_date__gt=now_approx)
end_criteria = Q(sorting_end_date_high__lt=now_approx) & ~Q(end_date='')
qs = self.filter(start_criteria | end_criteria)
return qs
def aspirant_positions(self):
"""
Filter down to only positions which are aspirant ones. This uses the
convention that the slugs always start with 'aspirant-'.
"""
return self.filter( title__slug__startswith='aspirant-' )
def current_aspirant_positions(self, when=None):
"""Filter down to only positions which are those of current aspirantsns."""
return self.aspirant_positions().currently_active(when)
def politician_positions(self):
"""Filter down to only positions which are one of the two kinds of
politician (those with constituencies, and nominated ones).
"""
return self.filter(title__slug__in=settings.POLITICIAN_TITLE_SLUGS)
def current_politician_positions(self, when=None):
"""Filter down to only positions which are those of current politicians."""
return self.politician_positions().currently_active(when)
def political(self):
"""Filter down to only the political category"""
return self.filter(category='political')
def education(self):
"""Filter down to only the education category"""
return self.filter(category='education')
def other(self):
"""Filter down to only the other category"""
return self.filter(category='other')
def order_by_place(self):
"""Sort by the place name"""
return self.order_by('place__name')
class PositionManager(ManagerBase):
def get_query_set(self):
return PositionQuerySet(self.model)
class Position(ModelBase):
category_choices = (
('political', 'Political'),
('education', 'Education (as a learner)'),
('other', 'Anything else'),
)
person = models.ForeignKey('Person')
organisation = models.ForeignKey('Organisation', null=True, blank=True)
place = models.ForeignKey('Place', null=True, blank=True, help_text="use if needed to identify the position - eg add constituency for a politician" )
title = models.ForeignKey('PositionTitle', null=True, blank=True)
subtitle = models.CharField(max_length=200, blank=True, default='')
category = models.CharField(max_length=20, choices=category_choices, default='other',
help_text="What sort of position was this?")
note = models.CharField(max_length=300, blank=True, default='')
start_date = ApproximateDateField(blank=True, help_text=date_help_text)
end_date = ApproximateDateField(blank=True, help_text=date_help_text, default="future")
# hidden fields that are only used to do sorting. Filled in by code.
sorting_start_date = models.CharField(editable=True, default='', max_length=10)
sorting_end_date = models.CharField(editable=True, default='', max_length=10)
sorting_start_date_high = models.CharField(editable=True, default='', max_length=10)
sorting_end_date_high = models.CharField(editable=True, default='', max_length=10)
objects = PositionManager()
def clean(self):
if not (self.organisation or self.title or self.place):
raise exceptions.ValidationError('Must have at least one of organisation, title or place.')
if self.title and self.title.requires_place and not self.place:
raise exceptions.ValidationError("The job title '%s' requires a place to be set" % self.title.name)
def display_dates(self):
"""Nice HTML for the display of dates"""
# no dates
if not (self.start_date or self.end_date):
return ''
# start but no end
if self.start_date and not self.end_date:
return "Started %s" % self.start_date
# both dates
if self.start_date and self.end_date:
if self.end_date.future:
return "Started %s" % self.start_date
else:
return "%s → %s" % (self.start_date, self.end_date)
# end but no start
if not self.start_date and self.end_date:
return 'ongoing'
def display_start_date(self):
"""Return text that represents the start date"""
if self.start_date:
return str(self.start_date)
return '?'
def display_end_date(self):
"""Return text that represents the end date"""
if self.end_date:
return str(self.end_date)
return '?'
def is_ongoing(self):
"""Return True or False for whether the position is currently ongoing"""
if not self.end_date:
return False
elif self.end_date.future:
return True
else:
# turn today's date into an ApproximateDate object and cmp to that
now = datetime.date.today()
now_approx = ApproximateDate(year=now.year, month=now.month, day=now.day)
return now_approx <= self.end_date
def has_known_dates(self):
"""Is there at least one known (not future) date?"""
return (self.start_date and not self.start_date.future) or \
(self.end_date and not self.end_date.future)
def _set_sorting_dates(self):
"""Set the sorting dates from the actual dates (does not call save())"""
# value can be yyyy-mm-dd, future or None
start = repr(self.start_date) if self.start_date else ''
end = repr(self.end_date) if self.end_date else ''
# set the value or default to something sane
sorting_start_date = start or '0000-00-00'
sorting_end_date = end or start or '0000-00-00'
# To make the sorting consistent special case some parts
if not end and start == 'future':
sorting_start_date = 'a-future' # come after 'future'
self.sorting_start_date = sorting_start_date
self.sorting_end_date = sorting_end_date
self.sorting_start_date_high = re.sub('-00', '-99', sorting_start_date)
self.sorting_end_date_high = re.sub('-00', '-99', sorting_end_date)
def is_nominated_politician(self):
return self.title.slug == 'nominated-member-parliament'
def save(self, *args, **kwargs):
self._set_sorting_dates()
super(Position, self).save(*args, **kwargs)
def __unicode__(self):
title = self.title or '???'
if self.organisation:
organisation = self.organisation.name
else:
organisation = '???'
return "%s (%s at %s)" % ( self.person.name, title, organisation)
class Meta:
ordering = ['-sorting_end_date', '-sorting_start_date']
|
Hutspace/odekro
|
mzalendo/core/models.py
|
Python
|
agpl-3.0
| 25,729
|
import requests
LRS = "http://cygnus.ic.uva.nl:8000/XAPI/statements"
u = raw_input("LRS username: ")
p = raw_input("LRS password: ")
r = requests.get(LRS,headers={"X-Experience-API-Version":"1.0"},auth=(u,p));
if r.status_code == 200:
print "Success"
else:
print "Server returns",r.status_code
|
ictofnwi/coach
|
test_lrs.py
|
Python
|
agpl-3.0
| 305
|
from config import cloudplatform
storage_adapter = None
if cloudplatform == "google":
import googlestorage
storage_adapter = googlestorage
elif cloudplatform == "aws":
import awsstorage
storage_adapter = awsstorage
elif cloudplatform == "azure":
from FlaskWebProject import azurestorage
storage_adapter = azurestorage
def create_container(bucketID):
""" Creates Container with given bucketID
:param string bucketID: container name
:return boolean: true if succeed
"""
return storage_adapter.create_container(bucketID)
def container_exists(bucketID):
""" Check if container with ID exists
:param string bucketID: container name
:return boolean: true if exists
"""
return storage_adapter.container_exists(bucketID)
def file_exists(bucketID, filename):
""" Checks if file in container exists
:param string bucketID: container name
:param string filename: file to search
:return boolean: true if exists
"""
return storage_adapter.file_exists(bucketID, filename)
def list_files(bucketID):
""" Lists files in specified bucket
:param string bucketID: container name
:return list: list of FileIDs
"""
return storage_adapter.list_files(bucketID)
def delete_file(bucketID, filename):
""" delete file from container
:param string bucketID: container name
:param string filename: file to delete
:return boolean: true if succeed
"""
return storage_adapter.delete_file(bucketID, filename)
def delete_container(bucketID):
""" delete container
:param string bucketID: container name
:return boolean: true if succeed
"""
return storage_adapter.delete_container(bucketID)
def upload_from_path(bucketID, path):
""" Uploads a local file from client to the cloud
:param string bucketID: container name
:param string path: local filepath
:return boolean: true if succeed
"""
return storage_adapter.upload_from_path(bucketID, path)
def upload_from_text(bucketID, filename, text):
""" Uploads text to container in specified file
:param string bucketID: container name
:param string filename: destination file
:param string text: text to upload
:return boolean: true if succeed
"""
return storage_adapter.upload_from_text(bucketID, filename, text)
def download_file_to_path(bucketID, filename, path):
""" Downloads file from container to local path
:param string bucketID: container name
:param string filename: file to download
:param string path: destination local filepath
:return boolean: true if succeed
"""
return storage_adapter.download_file_to_path(bucketID, filename, path)
def download_file_to_text(bucketID, filename):
""" Downloads file from container to text
:param string bucketID: container name
:param string filename: file to download
:return string: text that got downloaded
"""
return storage_adapter.download_file_to_text(bucketID, filename)
def get_download_url(bucketID, filename):
""" Returns a download for specified file in container
:param string bucketID: container name
:param string filename: file to download
:return string: the url to download the file from
"""
return storage_adapter.get_download_url(bucketID, filename)
|
ohaz/amos-ss15-proj1
|
FlaskWebProject/storageinterface.py
|
Python
|
agpl-3.0
| 3,348
|
# -*- coding: utf-8 -*-
#
##############################################################################
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_companyweb_report_wizard
from . import partner_update_companyweb
|
QANSEE/l10n-belgium
|
account_companyweb/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,052
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from item import Item, Items
from shinken.util import strip_and_uniq
from shinken.property import BoolProp, IntegerProp, StringProp
from shinken.log import logger, naglog_result
_special_properties = ('service_notification_commands', 'host_notification_commands',
'service_notification_period', 'host_notification_period',
'service_notification_options', 'host_notification_options',
'host_notification_commands', 'contact_name')
_simple_way_parameters = ('service_notification_period', 'host_notification_period',
'service_notification_options', 'host_notification_options',
'service_notification_commands', 'host_notification_commands',
'min_business_impact')
class Contact(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'contact'
properties = Item.properties.copy()
properties.update({
'contact_name': StringProp(fill_brok=['full_status']),
'alias': StringProp(default='none', fill_brok=['full_status']),
'contactgroups': StringProp(default='', fill_brok=['full_status']),
'host_notifications_enabled': BoolProp(default='1', fill_brok=['full_status']),
'service_notifications_enabled': BoolProp(default='1', fill_brok=['full_status']),
'host_notification_period': StringProp(fill_brok=['full_status']),
'service_notification_period': StringProp(fill_brok=['full_status']),
'host_notification_options': StringProp(fill_brok=['full_status']),
'service_notification_options': StringProp(fill_brok=['full_status']),
'host_notification_commands': StringProp(fill_brok=['full_status']),
'service_notification_commands': StringProp(fill_brok=['full_status']),
'min_business_impact': IntegerProp(default='0', fill_brok=['full_status']),
'email': StringProp(default='none', fill_brok=['full_status']),
'pager': StringProp(default='none', fill_brok=['full_status']),
'address1': StringProp(default='none', fill_brok=['full_status']),
'address2': StringProp(default='none', fill_brok=['full_status']),
'address3': StringProp(default='none', fill_brok=['full_status']),
'address4': StringProp(default='none', fill_brok=['full_status']),
'address5': StringProp(default='none', fill_brok=['full_status']),
'address6': StringProp(default='none', fill_brok=['full_status']),
'can_submit_commands': BoolProp(default='0', fill_brok=['full_status']),
'is_admin': BoolProp(default='0', fill_brok=['full_status']),
'retain_status_information': BoolProp(default='1', fill_brok=['full_status']),
'notificationways': StringProp(default='', fill_brok=['full_status']),
'password': StringProp(default='NOPASSWORDSET', fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'modified_attributes': IntegerProp(default=0L, fill_brok=['full_status'], retention=True),
'downtimes': StringProp(default=[], fill_brok=['full_status'], retention=True),
})
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Shinken deprecated names like criticity
old_properties = {
'min_criticity': 'min_business_impact',
}
macros = {
'CONTACTNAME': 'contact_name',
'CONTACTALIAS': 'alias',
'CONTACTEMAIL': 'email',
'CONTACTPAGER': 'pager',
'CONTACTADDRESS1': 'address1',
'CONTACTADDRESS2': 'address2',
'CONTACTADDRESS3': 'address3',
'CONTACTADDRESS4': 'address4',
'CONTACTADDRESS5': 'address5',
'CONTACTADDRESS6': 'address6',
'CONTACTGROUPNAME': 'get_groupname',
'CONTACTGROUPNAMES': 'get_groupnames'
}
# For debugging purpose only (nice name)
def get_name(self):
try:
return self.contact_name
except AttributeError:
return 'UnnamedContact'
# Search for notification_options with state and if t is
# in service_notification_period
def want_service_notification(self, t, state, type, business_impact, cmd=None):
if not self.service_notifications_enabled:
return False
# If we are in downtime, we do nto want notification
for dt in self.downtimes:
if dt.is_in_effect:
return False
# Now the rest is for sub notificationways. If one is OK, we are ok
# We will filter in another phase
for nw in self.notificationways:
nw_b = nw.want_service_notification(t, state, type, business_impact, cmd)
if nw_b:
return True
# Oh... no one is ok for it? so no, sorry
return False
# Search for notification_options with state and if t is in
# host_notification_period
def want_host_notification(self, t, state, type, business_impact, cmd=None):
if not self.host_notifications_enabled:
return False
# If we are in downtime, we do nto want notification
for dt in self.downtimes:
if dt.is_in_effect:
return False
# Now it's all for sub notificationways. If one is OK, we are OK
# We will filter in another phase
for nw in self.notificationways:
nw_b = nw.want_host_notification(t, state, type, business_impact, cmd)
if nw_b:
return True
# Oh, nobody..so NO :)
return False
# Call to get our commands to launch a Notification
def get_notification_commands(self, type):
r = []
# service_notification_commands for service
notif_commands_prop = type + '_notification_commands'
for nw in self.notificationways:
r.extend(getattr(nw, notif_commands_prop))
return r
# Check is required prop are set:
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
# All of the above are checks in the notificationways part
for prop, entry in cls.properties.items():
if prop not in _special_properties:
if not hasattr(self, prop) and entry.required:
logger.error("[contact::%s] %s property not set", self.get_name(), prop)
state = False # Bad boy...
# There is a case where there is no nw: when there is not special_prop defined
# at all!!
if self.notificationways == []:
for p in _special_properties:
if not hasattr(self, p):
logger.error("[contact::%s] %s property is missing", self.get_name(), p)
state = False
if hasattr(self, 'contact_name'):
for c in cls.illegal_object_name_chars:
if c in self.contact_name:
logger.error("[contact::%s] %s character not allowed in contact_name", self.get_name(), c)
state = False
else:
if hasattr(self, 'alias'): # take the alias if we miss the contact_name
self.contact_name = self.alias
return state
# Raise a log entry when a downtime begins
# CONTACT DOWNTIME ALERT: test_contact;STARTED; Contact has entered a period of scheduled downtime
def raise_enter_downtime_log_entry(self):
naglog_result('info', "CONTACT DOWNTIME ALERT: %s;STARTED; Contact has "
"entered a period of scheduled downtime"
% self.get_name())
# Raise a log entry when a downtime has finished
# CONTACT DOWNTIME ALERT: test_contact;STOPPED; Contact has exited from a period of scheduled downtime
def raise_exit_downtime_log_entry(self):
naglog_result('info', "CONTACT DOWNTIME ALERT: %s;STOPPED; Contact has "
"exited from a period of scheduled downtime"
% self.get_name())
# Raise a log entry when a downtime prematurely ends
# CONTACT DOWNTIME ALERT: test_contact;CANCELLED; Contact has entered a period of scheduled downtime
def raise_cancel_downtime_log_entry(self):
naglog_result('info', "CONTACT DOWNTIME ALERT: %s;CANCELLED; Scheduled "
"downtime for contact has been cancelled."
% self.get_name())
class Contacts(Items):
name_property = "contact_name"
inner_class = Contact
def linkify(self, timeperiods, commands, notificationways):
#self.linkify_with_timeperiods(timeperiods, 'service_notification_period')
#self.linkify_with_timeperiods(timeperiods, 'host_notification_period')
#self.linkify_command_list_with_commands(commands, 'service_notification_commands')
#self.linkify_command_list_with_commands(commands, 'host_notification_commands')
self.linkify_with_notificationways(notificationways)
# We've got a notificationways property with , separated contacts names
# and we want have a list of NotificationWay
def linkify_with_notificationways(self, notificationways):
for i in self:
if not hasattr(i, 'notificationways'):
continue
new_notificationways = []
for nw_name in strip_and_uniq(i.notificationways.split(',')):
nw = notificationways.find_by_name(nw_name)
if nw is not None:
new_notificationways.append(nw)
else:
err = "The 'notificationways' of the %s '%s' named '%s' is unknown!" % (i.__class__.my_type, i.get_name(), nw_name)
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.notificationways = list(set(new_notificationways))
def late_linkify_c_by_commands(self, commands):
for i in self:
for nw in i.notificationways:
nw.late_linkify_nw_by_commands(commands)
# We look for contacts property in contacts and
def explode(self, contactgroups, notificationways):
# Contactgroups property need to be fullfill for got the informations
self.apply_partial_inheritance('contactgroups')
# _special properties maybe came from a template, so
# import them before grok ourselves
for prop in _special_properties:
if prop == 'contact_name':
continue
self.apply_partial_inheritance(prop)
# Register ourself into the contactsgroups we are in
for c in self:
if c.is_tpl() or not (hasattr(c, 'contact_name') and hasattr(c, 'contactgroups')):
continue
for cg in c.contactgroups.split(','):
contactgroups.add_member(c.contact_name, cg.strip())
# Now create a notification way with the simple parameter of the
# contacts
for c in self:
if not c.is_tpl():
need_notificationway = False
params = {}
for p in _simple_way_parameters:
if hasattr(c, p):
need_notificationway = True
params[p] = getattr(c, p)
else: # put a default text value
# Remove the value and put a default value
setattr(c, p, '')
if need_notificationway:
#print "Create notif way with", params
cname = getattr(c, 'contact_name', getattr(c, 'alias', ''))
nw_name = cname + '_inner_notificationway'
notificationways.new_inner_member(nw_name, params)
if not hasattr(c, 'notificationways'):
c.notificationways = nw_name
else:
c.notificationways = c.notificationways + ',' + nw_name
|
h4wkmoon/shinken
|
shinken/objects/contact.py
|
Python
|
agpl-3.0
| 13,143
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
import time, os
import sys, fileinput
from bs4 import BeautifulSoup
class Grabber(object):
def use(self):
print ""
print "* This just Fucking whatever for grabbing."
print "* For license just fucking to change this. ^Summon Agus Created."
print "-------------------------------------------------------------------------------------"
print "[1] Add Note : ./notes.py addnote <file_name> <title> <content> <tag1, tag2>"
print "[2] List Note : ./notes.py listnote <file_name>"
print "[3] Delete Note : ./notes.py delnote <file_name> <numb_line>"
print "[4] Add Url to Grab : ./notes.py addurl <file_name> <url>"
print "-------------------------------------------------------------------------------------"
print ""
def addnote(self, args):
self.help = "./notes.py addnote <file_name> <title> <content> <tag1, tag2>"
if len(sys.argv) < 5:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
f_note_out = sys.argv[2]
title = sys.argv[3]
content = sys.argv[4]
tags = sys.argv[5]
print "[+] Your args is: ./notes.py", args, f_note_out, title, content, tags
time.sleep(1)
print "[>] Waiting for save your note ..."
my_note = '"'+title+'": "'+content+'"'+ ' tag: '+ tags
""" [?] Trying if file was exists, so note will add in new line.
[?] But, if file is doesn't exists, this program will automatically write file with your first argument.
"""
try:
f_note = open(f_note_out, 'a')
my_note = my_note + '\n'
except IOError:
f_note = open(f_note_out, 'w')
my_note = '\n' + my_note
f_note.write(my_note)
f_note.close()
time.sleep(1)
print "[>] Your note was saved in <"+ f_note_out +">"
def listnote(self, args):
self.help = "./notes.py listnote <file_name>"
if len(sys.argv) < 2:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
print "[+] Your args is: ./notes.py", args, sys.argv[2]
try:
with open(sys.argv[2], "r") as f:
print " -------------------------------------- "
for line in f:
print line.replace("\n", "")
time.sleep(0.3)
print " -------------------------------------- "
except IOError:
sys.exit("[-] File Doesn't exists!!"+\
"\n[?] This your path now: " +str(os.getcwd())+\
"\n[?] This files and folders in your path now: " + str(os.listdir('.')) )
def delnote(self, args):
self.help = "./notes.py delnote <file_name> <numb_line>"
if len(sys.argv) < 3:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
f_note_out = str(sys.argv[2])
try:
for numb, line in enumerate(fileinput.input(f_note_out, inplace=True)): #start index from 0
if numb == int(sys.argv[3]):
continue
else:
sys.stdout.write(line)
sys.exit("[+] Success delete line <"+sys.argv[3]+"> in file of <"+ f_note_out +">")
except OSError:
sys.exit("[-] File Doesn't exists!!"+\
"\n[?] This your path now: " +str(os.getcwd())+\
"\n[?] This files and folders in your path now: " + str(os.listdir('.')) )
def addurl(self, args):
self.help = "./notes.py addurl <file_name> <url>"
if len(sys.argv) < 3:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
url = str(sys.argv[3])
f_note_out = str(sys.argv[2])
print "[+] Your args is: ./notes.py", args, f_note_out, url
agent = {'User-Agent':'Mozilla/5.0'}
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
soup = BeautifulSoup(page)
title = soup.title.string.encode('utf-8')
descriptions = soup.findAll(attrs={"name":"description"})[0]['content'].encode('utf-8')
keywords = soup.findAll(attrs={"name":"keywords"})[0]['content'].encode('utf-8')
print "[>] Waiting for save your note ..."
time.sleep(1)
my_note = '"'+title+'": "'+descriptions+'"'+ ' tag: '+ keywords
try:
f_note = open(f_note_out, 'a')
my_note = my_note + '\n'
except IOError:
f_note = open(f_note_out, 'w')
my_note = '\n' + my_note
f_note.write(my_note)
f_note.close()
time.sleep(1)
print "[>] Your url note was saved in <"+ f_note_out +">"
if __name__ == "__main__":
mome = Grabber()
try:
args = str(sys.argv[1])
if args == 'addnote':
mome.addnote(args)
elif args == 'listnote':
mome.listnote(args)
elif args == 'delnote':
mome.delnote(args)
elif args == 'addurl':
mome.addurl(args)
else:
print "Funcking damn!, please checkout your input"
except IndexError:
mome.use()
|
agusmakmun/Some-Examples-of-Simple-Python-Script
|
grabbing/notes.py
|
Python
|
agpl-3.0
| 5,474
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-04 16:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('meinberlin_organisations', '0006_update_orga_type_string'),
]
operations = [
migrations.RemoveField(
model_name='organisation',
name='type',
),
]
|
liqd/a4-meinberlin
|
meinberlin/apps/organisations/migrations/0007_remove_organisation_type.py
|
Python
|
agpl-3.0
| 420
|
"""
Forms and validation code for user registration.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from registration.models import RegistrationProfile
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = { 'class': 'required' }
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should either preserve the base ``save()`` or implement
a ``save()`` which accepts the ``profile_callback`` keyword
argument and passes it through to
``RegistrationProfile.objects.create_inactive_user()``.
"""
username = forms.RegexField(regex=r'^\w+$',
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_(u'username'))
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_(u'email address'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_(u'password'))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_(u'password (again)'))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
try:
user = User.objects.get(username__iexact=self.cleaned_data['username'])
except User.DoesNotExist:
return self.cleaned_data['username']
raise forms.ValidationError(_(u'This username is already taken. Please choose another.'))
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
def save(self, request, profile_callback=None):
"""
Create the new ``User`` and ``RegistrationProfile``, and
returns the ``User``.
This is essentially a light wrapper around
``RegistrationProfile.objects.create_inactive_user()``,
feeding it the form data and a profile callback (see the
documentation on ``create_inactive_user()`` for details) if
supplied.
"""
new_user = RegistrationProfile.objects.create_inactive_user(request,
username=self.cleaned_data['username'],
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'],
profile_callback=profile_callback)
return new_user
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u'I have read and agree to the Terms of Service'))
def clean_tos(self):
"""
Validate that the user accepted the Terms of Service.
"""
if self.cleaned_data.get('tos', False):
return self.cleaned_data['tos']
raise forms.ValidationError(_(u'You must agree to the terms to register'))
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_(u'Registration using free email addresses is prohibited. Please supply a different email address.'))
return self.cleaned_data['email']
|
umitproject/openmonitor-aggregator
|
registration/forms.py
|
Python
|
agpl-3.0
| 6,034
|
"""
flask.ext.restless.views
~~~~~~~~~~~~~~~~~~~~~~~~
Provides the following view classes, subclasses of
:class:`flask.MethodView` which provide generic endpoints for interacting
with an entity of the database:
:class:`flask.ext.restless.views.API`
Provides the endpoints for each of the basic HTTP methods. This is the
main class used by the
:meth:`flask.ext.restless.manager.APIManager.create_api` method to create
endpoints.
:class:`flask.ext.restless.views.FunctionAPI`
Provides a :http:method:`get` endpoint which returns the result of
evaluating some function on the entire collection of a given model.
:copyright: 2011 by Lincoln de Sousa <lincoln@comum.org>
:copyright: 2012, 2013, 2014, 2015 Jeffrey Finkelstein
<jeffrey.finkelstein@gmail.com> and contributors.
:license: GNU AGPLv3+ or BSD
"""
from __future__ import division
from collections import defaultdict
from functools import wraps
import math
import warnings
from flask import current_app
from flask import json
from flask import jsonify as _jsonify
from flask import request
from flask.views import MethodView
from mimerender import FlaskMimeRender
from sqlalchemy import Column
from sqlalchemy.exc import DataError
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.ext.associationproxy import AssociationProxy
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.query import Query
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import HTTPException
from werkzeug.urls import url_quote_plus
from .helpers import count
from .helpers import evaluate_functions
from .helpers import get_by
from .helpers import get_columns
from .helpers import get_or_create
from .helpers import get_related_model
from .helpers import get_relations
from .helpers import has_field
from .helpers import is_like_list
from .helpers import partition
from .helpers import primary_key_name
from .helpers import query_by_primary_key
from .helpers import session_query
from .helpers import strings_to_dates
from .helpers import to_dict
from .helpers import upper_keys
from .helpers import get_related_association_proxy_model
from .search import create_query
from .search import search
#: Format string for creating Link headers in paginated responses.
LINKTEMPLATE = '<{0}?page={1}&results_per_page={2}>; rel="{3}"'
#: String used internally as a dictionary key for passing header information
#: from view functions to the :func:`jsonpify` function.
_HEADERS = '__restless_headers'
#: String used internally as a dictionary key for passing status code
#: information from view functions to the :func:`jsonpify` function.
_STATUS = '__restless_status_code'
class ProcessingException(HTTPException):
"""Raised when a preprocessor or postprocessor encounters a problem.
This exception should be raised by functions supplied in the
``preprocessors`` and ``postprocessors`` keyword arguments to
:class:`APIManager.create_api`. When this exception is raised, all
preprocessing or postprocessing halts, so any processors appearing later in
the list will not be invoked.
`code` is the HTTP status code of the response supplied to the client in
the case that this exception is raised. `description` is an error message
describing the cause of this exception. This message will appear in the
JSON object in the body of the response to the client.
"""
def __init__(self, description='', code=400, *args, **kwargs):
super(ProcessingException, self).__init__(*args, **kwargs)
self.code = code
self.description = description
class NotAuthorizedException(HTTPException):
"""Raised whenever you want a 403.
"""
def __init__(self, description='Not Authorized', code=403, *args, **kwargs):
super(NotAuthorizedException, self).__init__(*args, **kwargs)
self.code = code
self.description = description
class ValidationError(Exception):
"""Raised when there is a problem deserializing a dictionary into an
instance of a SQLAlchemy model.
"""
pass
def _is_msie8or9():
"""Returns ``True`` if and only if the user agent of the client making the
request indicates that it is Microsoft Internet Explorer 8 or 9.
.. note::
We have no way of knowing if the user agent is lying, so we just make
our best guess based on the information provided.
"""
# request.user_agent.version comes as a string, so we have to parse it
version = lambda ua: tuple(int(d) for d in ua.version.split('.'))
return (request.user_agent is not None
and request.user_agent.version is not None
and request.user_agent.browser == 'msie'
and (8, 0) <= version(request.user_agent) < (10, 0))
def create_link_string(page, last_page, per_page):
"""Returns a string representing the value of the ``Link`` header.
`page` is the number of the current page, `last_page` is the last page in
the pagination, and `per_page` is the number of results per page.
"""
linkstring = ''
if page < last_page:
next_page = page + 1
linkstring = LINKTEMPLATE.format(request.base_url, next_page,
per_page, 'next') + ', '
linkstring += LINKTEMPLATE.format(request.base_url, last_page,
per_page, 'last')
return linkstring
def catch_processing_exceptions(func):
"""Decorator that catches :exc:`ProcessingException`s and subsequently
returns a JSON-ified error response.
"""
@wraps(func)
def decorator(*args, **kw):
try:
return func(*args, **kw)
except ProcessingException as exception:
status = exception.code
message = exception.description or str(exception)
return jsonify(message=message), status
return decorator
def catch_integrity_errors(session):
"""Returns a decorator that catches database integrity errors.
`session` is the SQLAlchemy session in which all database transactions will
be performed.
View methods can be wrapped like this::
@catch_integrity_errors(session)
def get(self, *args, **kw):
return '...'
Specifically, functions wrapped with the returned decorator catch
:exc:`IntegrityError`s, :exc:`DataError`s, and
:exc:`ProgrammingError`s. After the exceptions are caught, the session is
rolled back, the exception is logged on the current Flask application, and
an error response is returned to the client.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kw):
try:
return func(*args, **kw)
# TODO should `sqlalchemy.exc.InvalidRequestError`s also be caught?
except (DataError, IntegrityError, ProgrammingError) as exception:
session.rollback()
current_app.logger.exception(str(exception))
return dict(message=type(exception).__name__), 400
return wrapped
return decorator
def set_headers(response, headers):
"""Sets the specified headers on the specified response.
`response` is a Flask response object, and `headers` is a dictionary of
headers to set on the specified response. Any existing headers that
conflict with `headers` will be overwritten.
"""
for key, value in headers.items():
response.headers[key] = value
def jsonify(*args, **kw):
"""Same as :func:`flask.jsonify`, but sets response headers.
If ``headers`` is a keyword argument, this function will construct the JSON
response via :func:`flask.jsonify`, then set the specified ``headers`` on
the response. ``headers`` must be a dictionary mapping strings to strings.
"""
response = _jsonify(*args, **kw)
if 'headers' in kw:
set_headers(response, kw['headers'])
return response
# This code is (lightly) adapted from the ``requests`` library, in the
# ``requests.utils`` module. See <http://python-requests.org> for more
# information.
def _link_to_json(value):
"""Returns a list representation of the specified HTTP Link header
information.
`value` is a string containing the link header information. If the link
header information (the part of after ``Link:``) looked like this::
<url1>; rel="next", <url2>; rel="foo"; bar="baz"
then this function returns a list that looks like this::
[{"url": "url1", "rel": "next"},
{"url": "url2", "rel": "foo", "bar": "baz"}]
This example is adapted from the documentation of GitHub's API.
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
def _headers_to_json(headers):
"""Returns a dictionary representation of the specified dictionary of HTTP
headers ready for use as a JSON object.
Pre-condition: headers is not ``None``.
"""
link = headers.pop('Link', None)
# Shallow copy is fine here because the `headers` dictionary maps strings
# to strings to strings.
result = headers.copy()
if link:
result['Link'] = _link_to_json(link)
return result
def jsonpify(*args, **kw):
"""Passes the specified arguments directly to :func:`jsonify` with a status
code of 200, then wraps the response with the name of a JSON-P callback
function specified as a query parameter called ``'callback'`` (or does
nothing if no such callback function is specified in the request).
If the keyword arguments include the string specified by :data:`_HEADERS`,
its value must be a dictionary specifying headers to set before sending the
JSONified response to the client. Headers on the response will be
overwritten by headers specified in this dictionary.
If the keyword arguments include the string specified by :data:`_STATUS`,
its value must be an integer representing the status code of the response.
Otherwise, the status code of the response will be :http:status:`200`.
"""
# HACK In order to make the headers and status code available in the
# content of the response, we need to send it from the view function to
# this jsonpify function via its keyword arguments. This is a limitation of
# the mimerender library: it has no way of making the headers and status
# code known to the rendering functions.
headers = kw.pop(_HEADERS, {})
status_code = kw.pop(_STATUS, 200)
response = jsonify(*args, **kw)
callback = request.args.get('callback', False)
if callback:
# Reload the data from the constructed JSON string so we can wrap it in
# a JSONP function.
data = json.loads(response.data)
# Force the 'Content-Type' header to be 'application/javascript'.
#
# Note that this is different from the mimetype used in Flask for JSON
# responses; Flask uses 'application/json'. We use
# 'application/javascript' because a JSONP response is valid
# Javascript, but not valid JSON.
headers['Content-Type'] = 'application/javascript'
# Add the headers and status code as metadata to the JSONP response.
meta = _headers_to_json(headers) if headers is not None else {}
meta['status'] = status_code
inner = json.dumps(dict(meta=meta, data=data))
content = '{0}({1})'.format(callback, inner)
# Note that this is different from the mimetype used in Flask for JSON
# responses; Flask uses 'application/json'. We use
# 'application/javascript' because a JSONP response is not valid JSON.
mimetype = 'application/javascript'
response = current_app.response_class(content, mimetype=mimetype)
# Set the headers on the HTTP response as well.
if headers:
set_headers(response, headers)
response.status_code = status_code
return response
def _parse_includes(column_names):
"""Returns a pair, consisting of a list of column names to include on the
left and a dictionary mapping relation name to a list containing the names
of fields on the related model which should be included.
`column_names` must be a list of strings.
If the name of a relation appears as a key in the dictionary, then it will
not appear in the list.
"""
dotted_names, columns = partition(column_names, lambda name: '.' in name)
# Create a dictionary mapping relation names to fields on the related
# model.
relations = defaultdict(list)
for name in dotted_names:
relation, field = name.split('.', 1)
# Only add the relation if it's column has been specified.
if relation in columns:
relations[relation].append(field)
# Included relations need only be in the relations dictionary, not the
# columns list.
for relation in relations:
if relation in columns:
columns.remove(relation)
return columns, relations
def _parse_excludes(column_names):
"""Returns a pair, consisting of a list of column names to exclude on the
left and a dictionary mapping relation name to a list containing the names
of fields on the related model which should be excluded.
`column_names` must be a list of strings.
If the name of a relation appears in the list then it will not appear in
the dictionary.
"""
dotted_names, columns = partition(column_names, lambda name: '.' in name)
# Create a dictionary mapping relation names to fields on the related
# model.
relations = defaultdict(list)
for name in dotted_names:
relation, field = name.split('.', 1)
# Only add the relation if it's column has not been specified.
if relation not in columns:
relations[relation].append(field)
# Relations which are to be excluded entirely need only be in the columns
# list, not the relations dictionary.
for column in columns:
if column in relations:
del relations[column]
return columns, relations
def extract_error_messages(exception):
"""Tries to extract a dictionary mapping field name to validation error
messages from `exception`, which is a validation exception as provided in
the ``validation_exceptions`` keyword argument in the constructor of this
class.
Since the type of the exception is provided by the user in the constructor
of this class, we don't know for sure where the validation error messages
live inside `exception`. Therefore this method simply attempts to access a
few likely attributes and returns the first one it finds (or ``None`` if no
error messages dictionary can be extracted).
"""
# 'errors' comes from sqlalchemy_elixir_validations
if hasattr(exception, 'errors'):
return exception.errors
# 'message' comes from savalidation
if hasattr(exception, 'message'):
# TODO this works only if there is one validation error
try:
left, right = str(exception).rsplit(':', 1)
left_bracket = left.rindex('[')
right_bracket = right.rindex(']')
except ValueError as exc:
current_app.logger.exception(str(exc))
# could not parse the string; we're not trying too hard here...
return None
msg = right[:right_bracket].strip(' "')
fieldname = left[left_bracket + 1:].strip()
return {fieldname: msg}
return None
#: Creates the mimerender object necessary for decorating responses with a
#: function that automatically formats the dictionary in the appropriate format
#: based on the ``Accept`` header.
#:
#: Technical details: the first pair of parantheses instantiates the
#: :class:`mimerender.FlaskMimeRender` class. The second pair of parentheses
#: creates the decorator, so that we can simply use the variable ``mimerender``
#: as a decorator.
# TODO fill in xml renderer
mimerender = FlaskMimeRender()(default='json', json=jsonpify)
class ModelView(MethodView):
"""Base class for :class:`flask.MethodView` classes which represent a view
of a SQLAlchemy model.
The model class for this view can be accessed from the :attr:`model`
attribute, and the session in which all database transactions will be
performed when dealing with this model can be accessed from the
:attr:`session` attribute.
When subclasses wish to make queries to the database model specified in the
constructor, they should access the ``self.query`` function, which
delegates to the appropriate SQLAlchemy query object or Flask-SQLAlchemy
query object, depending on how the model has been defined.
"""
#: List of decorators applied to every method of this class.
decorators = [mimerender]
def __init__(self, session, model, *args, **kw):
"""Calls the constructor of the superclass and specifies the model for
which this class provides a ReSTful API.
`session` is the SQLAlchemy session in which all database transactions
will be performed.
`model` is the SQLALchemy declarative model class of the database model
for which this instance of the class is an API.
"""
super(ModelView, self).__init__(*args, **kw)
self.session = session
self.model = model
def query(self, model=None):
"""Returns either a SQLAlchemy query or Flask-SQLAlchemy query object
(depending on the type of the model) on the specified `model`, or if
`model` is ``None``, the model specified in the constructor of this
class.
"""
return session_query(self.session, model or self.model)
class FunctionAPI(ModelView):
"""Provides method-based dispatching for :http:method:`get` requests which
wish to apply SQL functions to all instances of a model.
.. versionadded:: 0.4
"""
def get(self):
"""Returns the result of evaluating the SQL functions specified in the
body of the request.
For a description of the request and response formats, see
:ref:`functionevaluation`.
"""
if 'q' not in request.args or not request.args.get('q'):
return dict(message='Empty query parameter'), 400
# if parsing JSON fails, return a 400 error in JSON format
try:
data = json.loads(str(request.args.get('q'))) or {}
except (TypeError, ValueError, OverflowError) as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to decode data'), 400
try:
result = evaluate_functions(self.session, self.model,
data.get('functions', []))
if not result:
return {}, 204
return result
except AttributeError as exception:
current_app.logger.exception(str(exception))
message = 'No such field "{0}"'.format(exception.field)
return dict(message=message), 400
except OperationalError as exception:
current_app.logger.exception(str(exception))
message = 'No such function "{0}"'.format(exception.function)
return dict(message=message), 400
class API(ModelView):
"""Provides method-based dispatching for :http:method:`get`,
:http:method:`post`, :http:method:`patch`, :http:method:`put`, and
:http:method:`delete` requests, for both collections of models and
individual models.
"""
#: List of decorators applied to every method of this class.
decorators = ModelView.decorators + [catch_processing_exceptions]
def __init__(self, session, model, exclude_columns=None,
include_columns=None, include_methods=None,
validation_exceptions=None, results_per_page=10,
max_results_per_page=100, post_form_preprocessor=None,
preprocessors=None, postprocessors=None, primary_key=None,
serializer=None, deserializer=None, *args, **kw):
"""Instantiates this view with the specified attributes.
`session` is the SQLAlchemy session in which all database transactions
will be performed.
`model` is the SQLAlchemy model class for which this instance of the
class is an API. This model should live in `database`.
`validation_exceptions` is the tuple of exceptions raised by backend
validation (if any exist). If exceptions are specified here, any
exceptions which are caught when writing to the database. Will be
returned to the client as a :http:statuscode:`400` response with a
message specifying the validation error which occurred. For more
information, see :ref:`validation`.
If either `include_columns` or `exclude_columns` is not ``None``,
exactly one of them must be specified. If both are not ``None``, then
the behavior of this function is undefined. `exclude_columns` must be
an iterable of strings specifying the columns of `model` which will
*not* be present in the JSON representation of the model provided in
response to :http:method:`get` requests. Similarly, `include_columns`
specifies the *only* columns which will be present in the returned
dictionary. In other words, `exclude_columns` is a blacklist and
`include_columns` is a whitelist; you can only use one of them per API
endpoint. If either `include_columns` or `exclude_columns` contains a
string which does not name a column in `model`, it will be ignored.
If `include_columns` is an iterable of length zero (like the empty
tuple or the empty list), then the returned dictionary will be
empty. If `include_columns` is ``None``, then the returned dictionary
will include all columns not excluded by `exclude_columns`.
If `include_methods` is an iterable of strings, the methods with names
corresponding to those in this list will be called and their output
included in the response.
See :ref:`includes` for information on specifying included or excluded
columns on fields of related models.
`results_per_page` is a positive integer which represents the default
number of results which are returned per page. Requests made by clients
may override this default by specifying ``results_per_page`` as a query
argument. `max_results_per_page` is a positive integer which represents
the maximum number of results which are returned per page. This is a
"hard" upper bound in the sense that even if a client specifies that
greater than `max_results_per_page` should be returned, only
`max_results_per_page` results will be returned. For more information,
see :ref:`serverpagination`.
.. deprecated:: 0.9.2
The `post_form_preprocessor` keyword argument is deprecated in
version 0.9.2. It will be removed in version 1.0. Replace code that
looks like this::
manager.create_api(Person, post_form_preprocessor=foo)
with code that looks like this::
manager.create_api(Person, preprocessors=dict(POST=[foo]))
See :ref:`processors` for more information and examples.
`post_form_preprocessor` is a callback function which takes
POST input parameters loaded from JSON and enhances them with other
key/value pairs. The example use of this is when your ``model``
requires to store user identity and for security reasons the identity
is not read from the post parameters (where malicious user can tamper
with them) but from the session.
`preprocessors` is a dictionary mapping strings to lists of
functions. Each key is the name of an HTTP method (for example,
``'GET'`` or ``'POST'``). Each value is a list of functions, each of
which will be called before any other code is executed when this API
receives the corresponding HTTP request. The functions will be called
in the order given here. The `postprocessors` keyword argument is
essentially the same, except the given functions are called after all
other code. For more information on preprocessors and postprocessors,
see :ref:`processors`.
`primary_key` is a string specifying the name of the column of `model`
to use as the primary key for the purposes of creating URLs. If the
`model` has exactly one primary key, there is no need to provide a
value for this. If `model` has two or more primary keys, you must
specify which one to use.
`serializer` and `deserializer` are custom serialization functions. The
former function must take a single argument representing the instance
of the model to serialize, and must return a dictionary representation
of that instance. The latter function must take a single argument
representing the dictionary representation of an instance of the model
and must return an instance of `model` that has those attributes. For
more information, see :ref:`serialization`.
.. versionadded:: 0.17.0
Added the `serializer` and `deserializer` keyword arguments.
.. versionadded:: 0.13.0
Added the `primary_key` keyword argument.
.. versionadded:: 0.10.2
Added the `include_methods` keyword argument.
.. versionchanged:: 0.10.0
Removed `authentication_required_for` and `authentication_function`
keyword arguments.
Use the `preprocesors` and `postprocessors` keyword arguments
instead. For more information, see :ref:`authentication`.
.. versionadded:: 0.9.2
Added the `preprocessors` and `postprocessors` keyword arguments.
.. versionadded:: 0.9.0
Added the `max_results_per_page` keyword argument.
.. versionadded:: 0.7
Added the `exclude_columns` keyword argument.
.. versionadded:: 0.6
Added the `results_per_page` keyword argument.
.. versionadded:: 0.5
Added the `include_columns`, and `validation_exceptions` keyword
arguments.
.. versionadded:: 0.4
Added the `authentication_required_for` and
`authentication_function` keyword arguments.
"""
super(API, self).__init__(session, model, *args, **kw)
if exclude_columns is None:
self.exclude_columns, self.exclude_relations = (None, None)
else:
self.exclude_columns, self.exclude_relations = _parse_excludes(
[self._get_column_name(column) for column in exclude_columns])
if include_columns is None:
self.include_columns, self.include_relations = (None, None)
else:
self.include_columns, self.include_relations = _parse_includes(
[self._get_column_name(column) for column in include_columns])
self.include_methods = include_methods
self.validation_exceptions = tuple(validation_exceptions or ())
self.results_per_page = results_per_page
self.max_results_per_page = max_results_per_page
self.primary_key = primary_key
# Use our default serializer and deserializer if none are specified.
if serializer is None:
self.serialize = self._inst_to_dict
else:
self.serialize = serializer
if deserializer is None:
self.deserialize = self._dict_to_inst
# And check for our own default ValidationErrors here
self.validation_exceptions = tuple(list(self.validation_exceptions)
+ [ValidationError])
else:
self.deserialize = deserializer
self.postprocessors = defaultdict(list)
self.preprocessors = defaultdict(list)
self.postprocessors.update(upper_keys(postprocessors or {}))
self.preprocessors.update(upper_keys(preprocessors or {}))
# move post_form_preprocessor to preprocessors['POST'] for backward
# compatibility
if post_form_preprocessor:
msg = ('post_form_preprocessor is deprecated and will be removed'
' in version 1.0; use preprocessors instead.')
warnings.warn(msg, DeprecationWarning)
self.preprocessors['POST'].append(post_form_preprocessor)
# postprocessors for PUT are applied to PATCH because PUT is just a
# redirect to PATCH
for postprocessor in self.postprocessors['PUT_SINGLE']:
self.postprocessors['PATCH_SINGLE'].append(postprocessor)
for preprocessor in self.preprocessors['PUT_SINGLE']:
self.preprocessors['PATCH_SINGLE'].append(preprocessor)
for postprocessor in self.postprocessors['PUT_MANY']:
self.postprocessors['PATCH_MANY'].append(postprocessor)
for preprocessor in self.preprocessors['PUT_MANY']:
self.preprocessors['PATCH_MANY'].append(preprocessor)
# HACK: We would like to use the :attr:`API.decorators` class attribute
# in order to decorate each view method with a decorator that catches
# database integrity errors. However, in order to rollback the session,
# we need to have a session object available to roll back. Therefore we
# need to manually decorate each of the view functions here.
decorate = lambda name, f: setattr(self, name, f(getattr(self, name)))
for method in ['get', 'post', 'patch', 'put', 'delete']:
decorate(method, catch_integrity_errors(self.session))
def _get_column_name(self, column):
"""Retrieve a column name from a column attribute of SQLAlchemy
model class, or a string.
Raises `TypeError` when argument does not fall into either of those
options.
Raises `ValueError` if argument is a column attribute that belongs
to an incorrect model class.
"""
if hasattr(column, '__clause_element__'):
clause_element = column.__clause_element__()
if not isinstance(clause_element, Column):
msg = ('Column must be a string or a column attribute'
' of SQLAlchemy ORM class')
raise TypeError(msg)
model = column.class_
if model is not self.model:
msg = ('Cannot specify column of model {0} while creating API'
' for model {1}').format(model.__name__,
self.model.__name__)
raise ValueError(msg)
return clause_element.key
return column
def _add_to_relation(self, query, relationname, toadd=None):
"""Adds a new or existing related model to each model specified by
`query`.
This function does not commit the changes made to the database. The
calling function has that responsibility.
`query` is a SQLAlchemy query instance that evaluates to all instances
of the model specified in the constructor of this class that should be
updated.
`relationname` is the name of a one-to-many relationship which exists
on each model specified in `query`.
`toadd` is a list of dictionaries, each representing the attributes of
an existing or new related model to add. If a dictionary contains the
key ``'id'``, that instance of the related model will be
added. Otherwise, the :func:`helpers.get_or_create` class method will
be used to get or create a model to add.
"""
submodel = get_related_model(self.model, relationname)
if isinstance(toadd, dict):
toadd = [toadd]
for dictionary in toadd or []:
subinst = get_or_create(self.session, submodel, dictionary)
try:
for instance in query:
getattr(instance, relationname).append(subinst)
except AttributeError as exception:
current_app.logger.exception(str(exception))
setattr(instance, relationname, subinst)
def _remove_from_relation(self, query, relationname, toremove=None):
"""Removes a related model from each model specified by `query`.
This function does not commit the changes made to the database. The
calling function has that responsibility.
`query` is a SQLAlchemy query instance that evaluates to all instances
of the model specified in the constructor of this class that should be
updated.
`relationname` is the name of a one-to-many relationship which exists
on each model specified in `query`.
`toremove` is a list of dictionaries, each representing the attributes
of an existing model to remove. If a dictionary contains the key
``'id'``, that instance of the related model will be
removed. Otherwise, the instance to remove will be retrieved using the
other attributes specified in the dictionary. If multiple instances
match the specified attributes, only the first instance will be
removed.
If one of the dictionaries contains a mapping from ``'__delete__'`` to
``True``, then the removed object will be deleted after being removed
from each instance of the model in the specified query.
"""
submodel = get_related_model(self.model, relationname)
for dictionary in toremove or []:
remove = dictionary.pop('__delete__', False)
if 'id' in dictionary:
subinst = get_by(self.session, submodel, dictionary['id'])
else:
subinst = self.query(submodel).filter_by(**dictionary).first()
for instance in query:
getattr(instance, relationname).remove(subinst)
if remove:
self.session.delete(subinst)
def _set_on_relation(self, query, relationname, toset=None):
"""Sets the value of the relation specified by `relationname` on each
instance specified by `query` to have the new or existing related
models specified by `toset`.
This function does not commit the changes made to the database. The
calling function has that responsibility.
`query` is a SQLAlchemy query instance that evaluates to all instances
of the model specified in the constructor of this class that should be
updated.
`relationname` is the name of a one-to-many relationship which exists
on each model specified in `query`.
`toset` is either a dictionary or a list of dictionaries, each
representing the attributes of an existing or new related model to
set. If a dictionary contains the key ``'id'``, that instance of the
related model will be added. Otherwise, the
:func:`helpers.get_or_create` method will be used to get or create a
model to set.
"""
submodel = get_related_model(self.model, relationname)
if isinstance(toset, list):
value = [get_or_create(self.session, submodel, d) for d in toset]
else:
value = get_or_create(self.session, submodel, toset)
for instance in query:
setattr(instance, relationname, value)
# TODO change this to have more sensible arguments
def _update_relations(self, query, params):
"""Adds, removes, or sets models which are related to the model
specified in the constructor of this class.
This function does not commit the changes made to the database. The
calling function has that responsibility.
This method returns a :class:`frozenset` of strings representing the
names of relations which were modified.
`query` is a SQLAlchemy query instance that evaluates to all instances
of the model specified in the constructor of this class that should be
updated.
`params` is a dictionary containing a mapping from name of the relation
to modify (as a string) to either a list or another dictionary. In the
former case, the relation will be assigned the instances specified by
the elements of the list, which are dictionaries as described below.
In the latter case, the inner dictionary contains at most two mappings,
one with the key ``'add'`` and one with the key ``'remove'``. Each of
these is a mapping to a list of dictionaries which represent the
attributes of the object to add to or remove from the relation.
If one of the dictionaries specified in ``add`` or ``remove`` (or the
list to be assigned) includes an ``id`` key, the object with that
``id`` will be attempt to be added or removed. Otherwise, an existing
object with the specified attribute values will be attempted to be
added or removed. If adding, a new object will be created if a matching
object could not be found in the database.
If a dictionary in one of the ``'remove'`` lists contains a mapping
from ``'__delete__'`` to ``True``, then the removed object will be
deleted after being removed from each instance of the model in the
specified query.
"""
relations = get_relations(self.model)
tochange = frozenset(relations) & frozenset(params)
for columnname in tochange:
# Check if 'add' or 'remove' is being used
if (isinstance(params[columnname], dict)
and any(k in params[columnname] for k in ['add', 'remove'])):
toadd = params[columnname].get('add', [])
toremove = params[columnname].get('remove', [])
self._add_to_relation(query, columnname, toadd=toadd)
self._remove_from_relation(query, columnname,
toremove=toremove)
else:
toset = params[columnname]
self._set_on_relation(query, columnname, toset=toset)
return tochange
def _handle_validation_exception(self, exception):
"""Rolls back the session, extracts validation error messages, and
returns a :func:`flask.jsonify` response with :http:statuscode:`400`
containing the extracted validation error messages.
Again, *this method calls
:meth:`sqlalchemy.orm.session.Session.rollback`*.
"""
self.session.rollback()
errors = extract_error_messages(exception) or \
'Could not determine specific validation errors'
return dict(validation_errors=errors), 400
def _compute_results_per_page(self):
"""Helper function which returns the number of results per page based
on the request argument ``results_per_page`` and the server
configuration parameters :attr:`results_per_page` and
:attr:`max_results_per_page`.
"""
try:
results_per_page = int(request.args.get('results_per_page'))
except:
results_per_page = self.results_per_page
if results_per_page <= 0:
results_per_page = self.results_per_page
return min(results_per_page, self.max_results_per_page)
# TODO it is ugly to have `deep` as an arg here; can we remove it?
def _paginated(self, instances, deep):
"""Returns a paginated JSONified response from the specified list of
model instances.
`instances` is either a Python list of model instances or a
:class:`~sqlalchemy.orm.Query`.
`deep` is the dictionary which defines the depth of submodels to output
in the JSON format of the model instances in `instances`; it is passed
directly to :func:`helpers.to_dict`.
The response data is JSON of the form:
.. sourcecode:: javascript
{
"page": 2,
"total_pages": 3,
"num_results": 8,
"objects": [{"id": 1, "name": "Jeffrey", "age": 24}, ...]
}
"""
if isinstance(instances, list):
num_results = len(instances)
else:
num_results = count(self.session, instances)
results_per_page = self._compute_results_per_page()
if results_per_page > 0:
# get the page number (first page is page 1)
page_num = int(request.args.get('page', 1))
start = (page_num - 1) * results_per_page
end = min(num_results, start + results_per_page)
total_pages = int(math.ceil(num_results / results_per_page))
else:
page_num = 1
start = 0
end = num_results
total_pages = 1
objects = [to_dict(x, deep, exclude=self.exclude_columns,
exclude_relations=self.exclude_relations,
include=self.include_columns,
include_relations=self.include_relations,
include_methods=self.include_methods)
for x in instances[start:end]]
return dict(page=page_num, objects=objects, total_pages=total_pages,
num_results=num_results)
def _inst_to_dict(self, inst):
"""Returns the dictionary representation of the specified instance.
This method respects the include and exclude columns specified in the
constructor of this class.
"""
# create a placeholder for the relations of the returned models
relations = frozenset(get_relations(self.model))
# do not follow relations that will not be included in the response
if self.include_columns is not None:
cols = frozenset(self.include_columns)
rels = frozenset(self.include_relations)
relations &= (cols | rels)
elif self.exclude_columns is not None:
relations -= frozenset(self.exclude_columns)
deep = dict((r, {}) for r in relations)
return to_dict(inst, deep, exclude=self.exclude_columns,
exclude_relations=self.exclude_relations,
include=self.include_columns,
include_relations=self.include_relations,
include_methods=self.include_methods)
def _dict_to_inst(self, data):
"""Returns an instance of the model with the specified attributes."""
# Check for any request parameter naming a column which does not exist
# on the current model.
for field in data:
if not has_field(self.model, field):
msg = "Model does not have field '{0}'".format(field)
raise ValidationError(msg)
# Getting the list of relations that will be added later
cols = get_columns(self.model)
relations = get_relations(self.model)
# Looking for what we're going to set on the model right now
colkeys = cols.keys()
paramkeys = data.keys()
props = set(colkeys).intersection(paramkeys).difference(relations)
# Special case: if there are any dates, convert the string form of the
# date into an instance of the Python ``datetime`` object.
data = strings_to_dates(self.model, data)
# Instantiate the model with the parameters.
modelargs = dict([(i, data[i]) for i in props])
instance = self.model(**modelargs)
# Handling relations, a single level is allowed
for col in set(relations).intersection(paramkeys):
submodel = get_related_model(self.model, col)
if type(data[col]) == list:
# model has several related objects
for subparams in data[col]:
subinst = get_or_create(self.session, submodel,
subparams)
try:
getattr(instance, col).append(subinst)
except AttributeError:
attribute = getattr(instance, col)
attribute[subinst.key] = subinst.value
else:
# model has single related object
subinst = get_or_create(self.session, submodel,
data[col])
setattr(instance, col, subinst)
return instance
def _instid_to_dict(self, instid):
"""Returns the dictionary representation of the instance specified by
`instid`.
If no such instance of the model exists, this method aborts with a
:http:statuscode:`404`.
"""
inst = get_by(self.session, self.model, instid, self.primary_key)
if inst is None:
return {_STATUS: 404}, 404
return self._inst_to_dict(inst)
def _search(self):
"""Defines a generic search function for the database model.
If the query string is empty, or if the specified query is invalid for
some reason (for example, searching for all person instances with), the
response will be the JSON string ``{"objects": []}``.
To search for entities meeting some criteria, the client makes a
request to :http:get:`/api/<modelname>` with a query string containing
the parameters of the search. The parameters of the search can involve
filters. In a filter, the client specifies the name of the field by
which to filter, the operation to perform on the field, and the value
which is the argument to that operation. In a function, the client
specifies the name of a SQL function which is executed on the search
results; the result of executing the function is returned to the
client.
The parameters of the search must be provided in JSON form as the value
of the ``q`` request query parameter. For example, in a database of
people, to search for all people with a name containing a "y", the
client would make a :http:method:`get` request to ``/api/person`` with
query parameter as follows::
q={"filters": [{"name": "name", "op": "like", "val": "%y%"}]}
If multiple objects meet the criteria of the search, the response has
:http:status:`200` and content of the form::
.. sourcecode:: javascript
{"objects": [{"name": "Mary"}, {"name": "Byron"}, ...]}
If the result of the search is a single instance of the model, the JSON
representation of that instance would be the top-level object in the
content of the response::
.. sourcecode:: javascript
{"name": "Mary", ...}
For more information SQLAlchemy operators for use in filters, see the
`SQLAlchemy SQL expression tutorial
<http://docs.sqlalchemy.org/en/latest/core/tutorial.html>`_.
The general structure of request data as a JSON string is as follows::
.. sourcecode:: javascript
{
"single": true,
"order_by": [{"field": "age", "direction": "asc"}],
"limit": 2,
"offset": 1,
"disjunction": true,
"filters":
[
{"name": "name", "val": "%y%", "op": "like"},
{"name": "age", "val": [18, 19, 20, 21], "op": "in"},
{"name": "age", "op": "gt", "field": "height"},
...
]
}
For a complete description of all possible search parameters and
responses, see :ref:`searchformat`.
"""
# try to get search query from the request query parameters
try:
search_params = json.loads(request.args.get('q', '{}'))
except (TypeError, ValueError, OverflowError) as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to decode data'), 400
for preprocessor in self.preprocessors['GET_MANY']:
returned_values = preprocessor(search_params=search_params)
if returned_values:
search_params = returned_values
# resolve date-strings as required by the model
for param in search_params.get('filters', list()):
if 'name' in param and 'val' in param:
query_model = self.model
query_field = param['name']
if '__' in param['name']:
fieldname, relation = param['name'].split('__')
submodel = getattr(self.model, fieldname)
if isinstance(submodel, InstrumentedAttribute):
query_model = submodel.property.mapper.class_
query_field = relation
elif isinstance(submodel, AssociationProxy):
# For the sake of brevity, rename this function.
get_assoc = get_related_association_proxy_model
query_model = get_assoc(submodel)
query_field = relation
to_convert = {query_field: param['val']}
try:
result = strings_to_dates(query_model, to_convert)
except ValueError as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to construct query'), 400
param['val'] = result.get(query_field)
# perform a filtered search
try:
result = search(self.session, self.model, search_params)
except NoResultFound:
return dict(message='No result found'), 404
except MultipleResultsFound:
return dict(message='Multiple results found'), 400
except NotAuthorizedException:
return dict(message='Not Authorized'), 403
except Exception as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to construct query'), 400
# create a placeholder for the relations of the returned models
relations = frozenset(get_relations(self.model))
# do not follow relations that will not be included in the response
if self.include_columns is not None:
cols = frozenset(self.include_columns)
rels = frozenset(self.include_relations)
relations &= (cols | rels)
elif self.exclude_columns is not None:
relations -= frozenset(self.exclude_columns)
deep = dict((r, {}) for r in relations)
# for security purposes, don't transmit list as top-level JSON
if isinstance(result, Query):
result = self._paginated(result, deep)
# Create the Link header.
#
# TODO We are already calling self._compute_results_per_page() once
# in _paginated(); don't compute it again here.
page, last_page = result['page'], result['total_pages']
linkstring = create_link_string(page, last_page,
self._compute_results_per_page())
headers = dict(Link=linkstring)
else:
primary_key = self.primary_key or primary_key_name(result)
result = to_dict(result, deep, exclude=self.exclude_columns,
exclude_relations=self.exclude_relations,
include=self.include_columns,
include_relations=self.include_relations,
include_methods=self.include_methods)
# The URL at which a client can access the instance matching this
# search query.
url = '{0}/{1}'.format(request.base_url, result[primary_key])
headers = dict(Location=url)
for postprocessor in self.postprocessors['GET_MANY']:
returned_value = postprocessor(result=result, search_params=search_params)
if returned_value:
result = returned_value
# HACK Provide the headers directly in the result dictionary, so that
# the :func:`jsonpify` function has access to them. See the note there
# for more information.
result[_HEADERS] = headers
return result, 200, headers
def get(self, instid, relationname, relationinstid):
"""Returns a JSON representation of an instance of model with the
specified name.
If ``instid`` is ``None``, this method returns the result of a search
with parameters specified in the query string of the request. If no
search parameters are specified, this method returns all instances of
the specified model.
If ``instid`` is an integer, this method returns the instance of the
model with that identifying integer. If no such instance exists, this
method responds with :http:status:`404`.
"""
if instid is None:
return self._search()
for preprocessor in self.preprocessors['GET_SINGLE']:
returned_values = preprocessor(instance_id=instid)
if returned_values:
instid = returned_values
# get the instance of the "main" model whose ID is instid
instance = get_by(self.session, self.model, instid, self.primary_key)
if instance is None:
return {_STATUS: 404}, 404
# If no relation is requested, just return the instance. Otherwise,
# get the value of the relation specified by `relationname`.
if relationname is None:
result = self.serialize(instance)
else:
related_value = getattr(instance, relationname)
# create a placeholder for the relations of the returned models
related_model = get_related_model(self.model, relationname)
relations = frozenset(get_relations(related_model))
deep = dict((r, {}) for r in relations)
if relationinstid is not None:
related_value_instance = get_by(self.session, related_model,
relationinstid)
if related_value_instance is None:
return {_STATUS: 404}, 404
result = to_dict(related_value_instance, deep)
else:
# for security purposes, don't transmit list as top-level JSON
if is_like_list(instance, relationname):
result = self._paginated(list(related_value), deep)
else:
result = to_dict(related_value, deep)
if result is None:
return {_STATUS: 404}, 404
for postprocessor in self.postprocessors['GET_SINGLE']:
returned_value = postprocessor(result=result)
if returned_value:
result = returned_value
return result
def _delete_many(self):
"""Deletes multiple instances of the model.
If search parameters are provided via the ``q`` query parameter, only
those instances matching the search parameters will be deleted.
If no instances were deleted, this returns a
:http:status:`404`. Otherwise, it returns a :http:status:`200` with the
number of deleted instances in the body of the response.
"""
# try to get search query from the request query parameters
try:
search_params = json.loads(request.args.get('q', '{}'))
except (TypeError, ValueError, OverflowError) as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to decode search query'), 400
for preprocessor in self.preprocessors['DELETE_MANY']:
preprocessor(search_params=search_params)
# perform a filtered search
try:
# HACK We need to ignore any ``order_by`` request from the client,
# because for some reason, SQLAlchemy does not allow calling
# delete() on a query that has an ``order_by()`` on it. If you
# attempt to call delete(), you get this error:
#
# sqlalchemy.exc.InvalidRequestError: Can't call Query.delete()
# when order_by() has been called
#
result = search(self.session, self.model, search_params,
_ignore_order_by=True)
except NoResultFound:
return dict(message='No result found'), 404
except MultipleResultsFound:
return dict(message='Multiple results found'), 400
except Exception as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to construct query'), 400
# for security purposes, don't transmit list as top-level JSON
if isinstance(result, Query):
# Implementation note: `synchronize_session=False`, described in
# the SQLAlchemy documentation for
# :meth:`sqlalchemy.orm.query.Query.delete`, states that this is
# the most efficient option for bulk deletion, and is reliable once
# the session has expired, which occurs after the session commit
# below.
num_deleted = result.delete(synchronize_session=False)
else:
self.session.delete(result)
num_deleted = 1
self.session.commit()
result = dict(num_deleted=num_deleted)
for postprocessor in self.postprocessors['DELETE_MANY']:
postprocessor(result=result, search_params=search_params)
return (result, 200) if num_deleted > 0 else 404
def delete(self, instid, relationname, relationinstid):
"""Removes the specified instance of the model with the specified name
from the database.
Although :http:method:`delete` is an idempotent method according to
:rfc:`2616`, idempotency only means that subsequent identical requests
cannot have additional side-effects. Since the response code is not a
side effect, this method responds with :http:status:`204` only if an
object is deleted, and with :http:status:`404` when nothing is deleted.
If `relationname
.. versionadded:: 0.12.0
Added the `relationinstid` keyword argument.
.. versionadded:: 0.10.0
Added the `relationname` keyword argument.
"""
if instid is None:
# If no instance ID is provided, this request is an attempt to
# delete many instances of the model via a search with possible
# filters.
return self._delete_many()
was_deleted = False
for preprocessor in self.preprocessors['DELETE']:
returned_values =preprocessor(instance_id=instid, relation_name=relationname,
relation_instance_id=relationinstid)
if returned_values:
instid, relationname, relationinstid = returned_values
inst = get_by(self.session, self.model, instid, self.primary_key)
if relationname:
# If the request is ``DELETE /api/person/1/computers``, error 400.
if not relationinstid:
msg = ('Cannot DELETE entire "{0}"'
' relation').format(relationname)
return dict(message=msg), 400
# Otherwise, get the related instance to delete.
relation = getattr(inst, relationname)
related_model = get_related_model(self.model, relationname)
relation_instance = get_by(self.session, related_model,
relationinstid)
# Removes an object from the relation list.
relation.remove(relation_instance)
was_deleted = len(self.session.dirty) > 0
elif inst is not None:
self.session.delete(inst)
was_deleted = len(self.session.deleted) > 0
self.session.commit()
for postprocessor in self.postprocessors['DELETE_SINGLE']:
postprocessor(was_deleted=was_deleted)
return {}, 204 if was_deleted else 404
def post(self):
"""Creates a new instance of a given model based on request data.
This function parses the string contained in
:attr:`flask.request.data`` as a JSON object and then validates it with
a validator specified in the constructor of this class.
The :attr:`flask.request.data` attribute will be parsed as a JSON
object containing the mapping from field name to value to which to
initialize the created instance of the model.
After that, it separates all columns that defines relationships with
other entities, creates a model with the simple columns and then
creates instances of these submodels and associates them with the
related fields. This happens only at the first level of nesting.
Currently, this method can only handle instantiating a model with a
single level of relationship data.
"""
content_type = request.headers.get('Content-Type', None)
content_is_json = content_type.startswith('application/json')
is_msie = _is_msie8or9()
# Request must have the Content-Type: application/json header, unless
# the User-Agent string indicates that the client is Microsoft Internet
# Explorer 8 or 9 (which has a fixed Content-Type of 'text/html'; see
# issue #267).
if not is_msie and not content_is_json:
msg = 'Request must have "Content-Type: application/json" header'
return dict(message=msg), 415
# try to read the parameters for the model from the body of the request
try:
# HACK Requests made from Internet Explorer 8 or 9 don't have the
# correct content type, so request.get_json() doesn't work.
if is_msie:
data = json.loads(request.get_data()) or {}
else:
data = request.get_json() or {}
except (BadRequest, TypeError, ValueError, OverflowError) as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to decode data'), 400
# apply any preprocessors to the POST arguments
for preprocessor in self.preprocessors['POST']:
returned_values = preprocessor(data=data)
if returned_values:
data = returned_values
try:
# Convert the dictionary representation into an instance of the
# model.
instance = self.deserialize(data)
# Add the created model to the session.
self.session.add(instance)
self.session.commit()
# Get the dictionary representation of the new instance.
result = self.serialize(instance)
# Determine the value of the primary key for this instance and
# encode URL-encode it (in case it is a Unicode string).
pk_name = self.primary_key or primary_key_name(instance)
primary_key = result[pk_name]
try:
primary_key = str(primary_key)
except UnicodeEncodeError:
primary_key = url_quote_plus(primary_key.encode('utf-8'))
# The URL at which a client can access the newly created instance
# of the model.
url = '{0}/{1}'.format(request.base_url, primary_key)
# Provide that URL in the Location header in the response.
headers = dict(Location=url)
for postprocessor in self.postprocessors['POST']:
returned_value = postprocessor(result=result)
if returned_value:
result = returned_value
return result, 201, headers
except self.validation_exceptions as exception:
return self._handle_validation_exception(exception)
# Determine the value of the primary key for this instance and
# encode URL-encode it (in case it is a Unicode string).
pk_name = self.primary_key or primary_key_name(instance)
primary_key = result[pk_name]
try:
primary_key = str(primary_key)
except UnicodeEncodeError:
primary_key = url_quote_plus(primary_key.encode('utf-8'))
# The URL at which a client can access the newly created instance
# of the model.
url = '{0}/{1}'.format(request.base_url, primary_key)
# Provide that URL in the Location header in the response.
headers = dict(Location=url)
for postprocessor in self.postprocessors['POST']:
postprocessor(result=result)
return result, 201, headers
def patch(self, instid, relationname, relationinstid):
"""Updates the instance specified by ``instid`` of the named model, or
updates multiple instances if ``instid`` is ``None``.
The :attr:`flask.request.data` attribute will be parsed as a JSON
object containing the mapping from field name to value to which to
update the specified instance or instances.
If ``instid`` is ``None``, the query string will be used to search for
instances (using the :func:`_search` method), and all matching
instances will be updated according to the content of the request data.
See the :func:`_search` documentation on more information about search
parameters for restricting the set of instances on which updates will
be made in this case.
This function ignores the `relationname` and `relationinstid` keyword
arguments.
.. versionadded:: 0.12.0
Added the `relationinstid` keyword argument.
.. versionadded:: 0.10.0
Added the `relationname` keyword argument.
"""
content_type = request.headers.get('Content-Type', None)
content_is_json = content_type.startswith('application/json')
is_msie = _is_msie8or9()
# Request must have the Content-Type: application/json header, unless
# the User-Agent string indicates that the client is Microsoft Internet
# Explorer 8 or 9 (which has a fixed Content-Type of 'text/html'; see
# issue #267).
if not is_msie and not content_is_json:
msg = 'Request must have "Content-Type: application/json" header'
return dict(message=msg), 415
# try to load the fields/values to update from the body of the request
try:
# HACK Requests made from Internet Explorer 8 or 9 don't have the
# correct content type, so request.get_json() doesn't work.
if is_msie:
data = json.loads(request.get_data()) or {}
else:
data = request.get_json() or {}
except (BadRequest, TypeError, ValueError, OverflowError) as exception:
# this also happens when request.data is empty
current_app.logger.exception(str(exception))
return dict(message='Unable to decode data'), 400
# Check if the request is to patch many instances of the current model.
patchmany = instid is None
# Perform any necessary preprocessing.
if patchmany:
# Get the search parameters; all other keys in the `data`
# dictionary indicate a change in the model's field.
search_params = data.pop('q', {})
for preprocessor in self.preprocessors['PATCH_MANY']:
returned_values = preprocessor(search_params=search_params, data=data)
if returned_values:
search_params, data = returned_values
else:
for preprocessor in self.preprocessors['PATCH_SINGLE']:
returned_values = preprocessor(instance_id=instid, data=data)
if returned_values:
instid, data = returned_values
# Check for any request parameter naming a column which does not exist
# on the current model.
for field in data:
if not has_field(self.model, field):
msg = "Model does not have field '{0}'".format(field)
return dict(message=msg), 400
if patchmany:
try:
# create a SQLALchemy Query from the query parameter `q`
query = create_query(self.session, self.model, search_params)
except NotAuthorizedException:
return dict(message='Not Authorized'), 403
except Exception as exception:
current_app.logger.exception(str(exception))
return dict(message='Unable to construct query'), 400
else:
# create a SQLAlchemy Query which has exactly the specified row
query = query_by_primary_key(self.session, self.model, instid,
self.primary_key)
if query.count() == 0:
return {_STATUS: 404}, 404
assert query.count() == 1, 'Multiple rows with same ID'
try:
relations = self._update_relations(query, data)
except self.validation_exceptions as exception:
current_app.logger.exception(str(exception))
return self._handle_validation_exception(exception)
field_list = frozenset(data) ^ relations
data = dict((field, data[field]) for field in field_list)
# Special case: if there are any dates, convert the string form of the
# date into an instance of the Python ``datetime`` object.
data = strings_to_dates(self.model, data)
try:
# Let's update all instances present in the query
num_modified = 0
if data:
for item in query.all():
for field, value in data.items():
setattr(item, field, value)
num_modified += 1
self.session.commit()
except self.validation_exceptions as exception:
current_app.logger.exception(str(exception))
return self._handle_validation_exception(exception)
# Perform any necessary postprocessing.
if patchmany:
result = dict(num_modified=num_modified)
for postprocessor in self.postprocessors['PATCH_MANY']:
returned_value = postprocessor(query=query, result=result,
search_params=search_params)
if returned_value:
result = returned_value
else:
result = self._instid_to_dict(instid)
for postprocessor in self.postprocessors['PATCH_SINGLE']:
returned_value = postprocessor(result=result)
if returned_value:
result = returned_value
return result
def put(self, *args, **kw):
"""Alias for :meth:`patch`."""
return self.patch(*args, **kw)
|
jkatzer/flask-restless
|
flask_restless/views.py
|
Python
|
agpl-3.0
| 72,094
|
# Copyright 2020 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Kpi Dashboard",
"summary": """
Create Dashboards using kpis""",
"version": "14.0.1.0.0",
"license": "AGPL-3",
"author": "Creu Blanca,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/reporting-engine",
"depends": ["bus", "board", "base_sparse_field"],
"qweb": ["static/src/xml/dashboard.xml"],
"data": [
"wizards/kpi_dashboard_menu.xml",
"security/security.xml",
"security/ir.model.access.csv",
"templates/assets.xml",
"views/kpi_menu.xml",
"views/kpi_kpi.xml",
"views/kpi_dashboard.xml",
],
"demo": ["demo/demo_dashboard.xml"],
"maintainers": ["etobella"],
}
|
OCA/reporting-engine
|
kpi_dashboard/__manifest__.py
|
Python
|
agpl-3.0
| 797
|
import pytest
from postix.core.models import Ping
from ..factories import cashdesk_factory, ping_factory
@pytest.mark.django_db
def test_troubleshooter_ping_view(troubleshooter_client):
[ping_factory(ponged=(index % 3 != 0)) for index in range(10)]
desk = cashdesk_factory()
assert Ping.objects.count() == 10
response = troubleshooter_client.get('/troubleshooter/ping/')
assert response.status_code == 200
response = troubleshooter_client.post(
'/troubleshooter/ping/', {'cashdesk': desk.pk}, follow=True
)
assert response.status_code == 200
assert Ping.objects.count() == 11
|
c3cashdesk/c6sh
|
src/tests/troubleshooter/test_troubleshooter_views_ping.py
|
Python
|
agpl-3.0
| 624
|
{
'name': 'Product Pack POS َfor IngAdhoc',
'summary': 'Product packs on POS',
'description': """
This module is extension َfor INGADHOC's module product_pack that will
Process product_pack pickings َfrom POS sales.
Note: this module works with Fixed price packs only.
""",
'version': '10.0.0.2',
'category': 'Point oَf Sale',
'author': 'DVIT.me',
'website': 'http://dvit.me',
'license': 'AGPL-3',
'depends': ['product_pack', 'point_oَf_sale'],
'data': [],
'demo': [],
"images": [
'static/description/banner.png'
],
'installable': True,
'auto_install': True,
'application': False,
}
|
mohamedhagag/dvit-odoo
|
product_pack_pos/__manifest__.py
|
Python
|
agpl-3.0
| 665
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Poll.detailed_chart'
db.add_column('polls_poll', 'detailed_chart', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Poll.detailed_chart'
db.delete_column('polls_poll', 'detailed_chart')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'polls.poll': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Poll'},
'always_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category_set': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'category_set'", 'null': 'True', 'to': "orm['polls.PollCategorySet']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'demographic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'detailed_chart': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'secondary_category_set': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'secondary_category_set'", 'null': 'True', 'to': "orm['polls.PollCategorySet']"}),
'secondary_template': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'secondary_template'", 'null': 'True', 'to': "orm['polls.PollCategorySet']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'template'", 'null': 'True', 'to': "orm['polls.PollCategorySet']"}),
'unknown_message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'polls.pollcategory': {
'Meta': {'unique_together': "(('name', 'category_set'),)", 'object_name': 'PollCategory'},
'category_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': "orm['polls.PollCategorySet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'polls.pollcategoryset': {
'Meta': {'object_name': 'PollCategorySet'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['polls.Poll']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'polls.pollkeyword': {
'Meta': {'object_name': 'PollKeyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keywords'", 'to': "orm['polls.Poll']"})
},
'polls.pollresponse': {
'Meta': {'ordering': "('-id',)", 'object_name': 'PollResponse'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_responses'", 'null': 'True', 'to': "orm['polls.PollCategory']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms_httprouter.Message']"}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': "orm['polls.Poll']"}),
'respondent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': "orm['polls.Respondent']"}),
'secondary_category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'secondary_responses'", 'null': 'True', 'to': "orm['polls.PollCategory']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '160'})
},
'polls.pollrule': {
'Meta': {'ordering': "('order', '-category')", 'object_name': 'PollRule'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['polls.PollCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lower_bound': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'match': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'numeric': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'upper_bound': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'polls.respondent': {
'Meta': {'object_name': 'Respondent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'active_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'db_index': 'True'}),
'last_response': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_respondent'", 'null': 'True', 'to': "orm['polls.PollResponse']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'polls.tracsettings': {
'Meta': {'object_name': 'TracSettings'},
'duplicate_message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recruitment_message': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'trac_off_response': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'trac_on_response': ('django.db.models.fields.CharField', [], {'max_length': '160'})
},
'rapidsms.backend': {
'Meta': {'object_name': 'Backend'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'rapidsms.connection': {
'Meta': {'object_name': 'Connection'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Backend']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'rapidsms_httprouter.message': {
'Meta': {'object_name': 'Message'},
'connection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['rapidsms.Connection']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_response_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'responses'", 'null': 'True', 'to': "orm['rapidsms_httprouter.Message']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['polls']
|
tracfm/tracfm
|
tracfm/polls/migrations/0010_auto__add_field_poll_detailed_chart.py
|
Python
|
agpl-3.0
| 13,004
|
# -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
from kotori.version import __VERSION__
from pyramid.config import Configurator
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
settings['SOFTWARE_VERSION'] = __VERSION__
config = Configurator(settings=settings)
# Addons
config.include('pyramid_jinja2')
# http://docs.pylonsproject.org/projects/pyramid-jinja2/en/latest/#adding-or-overriding-a-renderer
config.add_jinja2_renderer('.html')
config.include('cornice')
# Views and routes
config.add_static_view('static/app', 'static/app', cache_max_age=0)
config.add_static_view('static/lib', 'static/lib', cache_max_age=60 * 24)
config.add_route('index', '/')
config.scan()
return config.make_wsgi_app()
|
daq-tools/kotori
|
kotori/frontend/app.py
|
Python
|
agpl-3.0
| 843
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of medical_medicament,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# medical_medicament is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# medical_medicament is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with medical_medicament.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Medical Medicament',
'version': '8.0.1.0.0',
'author': 'ACSONE SA/NV, Odoo Community Association (OCA)',
'maintainer': 'ACSONE SA/NV, Odoo Community Association (OCA)',
'website': 'http://www.acsone.eu',
'license': 'AGPL-3',
'category': 'Medical',
'depends': [
'medical',
'product',
],
'summary': 'Introduce Medicament notion into the medical product',
'data': [
'security/ir.model.access.csv',
'data/medical_drug_form.xml',
'data/medical_drug_route.xml',
'views/product_product_view.xml',
'views/medical_medicament_view.xml',
'views/medical_drug_form_view.xml',
'views/medical_drug_route_view.xml',
],
'installable': True,
'application': True,
}
|
ShaheenHossain/eagle-medical
|
medical_medicament/__manifest__.py
|
Python
|
agpl-3.0
| 1,827
|
'''
Created on Mar 25, 2013
@author: dmitchell
'''
import datetime
import subprocess
import unittest
import uuid
from importlib import import_module
from xblock.fields import Scope
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.exceptions import InsufficientSpecificationError, ItemNotFoundError, VersionConflictError, \
DuplicateItemError
from xmodule.modulestore.locator import CourseLocator, BlockUsageLocator, VersionTree, DefinitionLocator
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.x_module import XModuleMixin
from pytz import UTC
from path import path
import re
import random
class SplitModuleTest(unittest.TestCase):
'''
The base set of tests manually populates a db w/ courses which have
versions. It creates unique collection names and removes them after all
tests finish.
'''
# Snippets of what would be in the django settings envs file
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'test_xmodule',
'collection': 'modulestore{0}'.format(uuid.uuid4().hex),
}
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': '',
'xblock_mixins': (InheritanceMixin, XModuleMixin)
}
MODULESTORE = {
'ENGINE': 'xmodule.modulestore.split_mongo.SplitMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
# don't create django dependency; so, duplicates common.py in envs
match = re.search(r'(.*?/common)(?:$|/)', path(__file__))
COMMON_ROOT = match.group(1)
modulestore = None
# These version_guids correspond to values hard-coded in fixture files
# used for these tests. The files live in mitx/fixtures/splitmongo_json/*
GUID_D0 = "1d00000000000000dddd0000" # v12345d
GUID_D1 = "1d00000000000000dddd1111" # v12345d1
GUID_D2 = "1d00000000000000dddd2222" # v23456d
GUID_D3 = "1d00000000000000dddd3333" # v12345d0
GUID_D4 = "1d00000000000000dddd4444" # v23456d0
GUID_D5 = "1d00000000000000dddd5555" # v345679d
GUID_P = "1d00000000000000eeee0000" # v23456p
@staticmethod
def bootstrapDB():
'''
Loads the initial data into the db ensuring the collection name is
unique.
'''
collection_prefix = SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG']['collection'] + '.'
dbname = SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG']['db']
processes = [
subprocess.Popen([
'mongoimport', '-d', dbname, '-c',
collection_prefix + collection, '--jsonArray',
'--file',
SplitModuleTest.COMMON_ROOT + '/test/data/splitmongo_json/' + collection + '.json'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
for collection in ('active_versions', 'structures', 'definitions')]
for p in processes:
stdout, stderr = p.communicate()
if p.returncode != 0:
print "Couldn't run mongoimport:"
print stdout
print stderr
raise Exception("DB did not init correctly")
@classmethod
def tearDownClass(cls):
collection_prefix = SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG']['collection'] + '.'
if SplitModuleTest.modulestore:
for collection in ('active_versions', 'structures', 'definitions'):
modulestore().db.drop_collection(collection_prefix + collection)
# drop the modulestore to force re init
SplitModuleTest.modulestore = None
def findByIdInResult(self, collection, _id):
"""
Result is a collection of descriptors. Find the one whose block id
matches the _id.
"""
for element in collection:
if element.location.usage_id == _id:
return element
class SplitModuleCourseTests(SplitModuleTest):
'''
Course CRUD operation tests
'''
def test_get_courses(self):
courses = modulestore().get_courses(branch='draft')
# should have gotten 3 draft courses
self.assertEqual(len(courses), 3, "Wrong number of courses")
# check metadata -- NOTE no promised order
course = self.findByIdInResult(courses, "head12345")
self.assertEqual(course.location.course_id, "GreekHero")
self.assertEqual(
str(course.location.version_guid), self.GUID_D0,
"course version mismatch"
)
self.assertEqual(course.category, 'course', 'wrong category')
self.assertEqual(len(course.tabs), 6, "wrong number of tabs")
self.assertEqual(
course.display_name, "The Ancient Greek Hero",
"wrong display name"
)
self.assertEqual(
course.advertised_start, "Fall 2013",
"advertised_start"
)
self.assertEqual(
len(course.children), 3,
"children")
self.assertEqual(str(course.definition_locator.definition_id), "ad00000000000000dddd0000")
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "testassist@edx.org")
self.assertEqual(str(course.previous_version), self.GUID_D1)
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
def test_branch_requests(self):
# query w/ branch qualifier (both draft and published)
def _verify_published_course(courses_published):
""" Helper function for verifying published course. """
self.assertEqual(len(courses_published), 1, len(courses_published))
course = self.findByIdInResult(courses_published, "head23456")
self.assertIsNotNone(course, "published courses")
self.assertEqual(course.location.course_id, "wonderful")
self.assertEqual(str(course.location.version_guid), self.GUID_P,
course.location.version_guid)
self.assertEqual(course.category, 'course', 'wrong category')
self.assertEqual(len(course.tabs), 4, "wrong number of tabs")
self.assertEqual(course.display_name, "The most wonderful course",
course.display_name)
self.assertIsNone(course.advertised_start)
self.assertEqual(len(course.children), 0,
"children")
_verify_published_course(modulestore().get_courses(branch='published'))
# default for branch is 'published'.
_verify_published_course(modulestore().get_courses())
def test_search_qualifiers(self):
# query w/ search criteria
courses = modulestore().get_courses(branch='draft', qualifiers={'org': 'testx'})
self.assertEqual(len(courses), 2)
self.assertIsNotNone(self.findByIdInResult(courses, "head12345"))
self.assertIsNotNone(self.findByIdInResult(courses, "head23456"))
courses = modulestore().get_courses(
branch='draft',
qualifiers={'edited_on': {"$lt": datetime.datetime(2013, 3, 28, 15)}})
self.assertEqual(len(courses), 2)
courses = modulestore().get_courses(
branch='draft',
qualifiers={'org': 'testx', "prettyid": "test_course"})
self.assertEqual(len(courses), 1)
self.assertIsNotNone(self.findByIdInResult(courses, "head12345"))
def test_get_course(self):
'''
Test the various calling forms for get_course
'''
locator = CourseLocator(version_guid=self.GUID_D1)
course = modulestore().get_course(locator)
self.assertIsNone(course.location.course_id)
self.assertEqual(str(course.location.version_guid), self.GUID_D1)
self.assertEqual(course.category, 'course')
self.assertEqual(len(course.tabs), 6)
self.assertEqual(course.display_name, "The Ancient Greek Hero")
self.assertEqual(course.graceperiod, datetime.timedelta(hours=2))
self.assertIsNone(course.advertised_start)
self.assertEqual(len(course.children), 0)
self.assertEqual(str(course.definition_locator.definition_id), "ad00000000000000dddd0001")
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "testassist@edx.org")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.55})
locator = CourseLocator(course_id='GreekHero', branch='draft')
course = modulestore().get_course(locator)
self.assertEqual(course.location.course_id, "GreekHero")
self.assertEqual(str(course.location.version_guid), self.GUID_D0)
self.assertEqual(course.category, 'course')
self.assertEqual(len(course.tabs), 6)
self.assertEqual(course.display_name, "The Ancient Greek Hero")
self.assertEqual(course.advertised_start, "Fall 2013")
self.assertEqual(len(course.children), 3)
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "testassist@edx.org")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
locator = CourseLocator(course_id='wonderful', branch='published')
course = modulestore().get_course(locator)
self.assertEqual(course.location.course_id, "wonderful")
self.assertEqual(str(course.location.version_guid), self.GUID_P)
locator = CourseLocator(course_id='wonderful', branch='draft')
course = modulestore().get_course(locator)
self.assertEqual(str(course.location.version_guid), self.GUID_D2)
def test_get_course_negative(self):
# Now negative testing
self.assertRaises(InsufficientSpecificationError,
modulestore().get_course, CourseLocator(course_id='edu.meh.blah'))
self.assertRaises(ItemNotFoundError,
modulestore().get_course, CourseLocator(course_id='nosuchthing', branch='draft'))
self.assertRaises(ItemNotFoundError,
modulestore().get_course,
CourseLocator(course_id='GreekHero', branch='published'))
def test_course_successors(self):
"""
get_course_successors(course_locator, version_history_depth=1)
"""
locator = CourseLocator(version_guid=self.GUID_D3)
result = modulestore().get_course_successors(locator)
self.assertIsInstance(result, VersionTree)
self.assertIsNone(result.locator.course_id)
self.assertEqual(str(result.locator.version_guid), self.GUID_D3)
self.assertEqual(len(result.children), 1)
self.assertEqual(str(result.children[0].locator.version_guid), self.GUID_D1)
self.assertEqual(len(result.children[0].children), 0, "descended more than one level")
result = modulestore().get_course_successors(locator, version_history_depth=2)
self.assertEqual(len(result.children), 1)
self.assertEqual(str(result.children[0].locator.version_guid), self.GUID_D1)
self.assertEqual(len(result.children[0].children), 1)
result = modulestore().get_course_successors(locator, version_history_depth=99)
self.assertEqual(len(result.children), 1)
self.assertEqual(str(result.children[0].locator.version_guid), self.GUID_D1)
self.assertEqual(len(result.children[0].children), 1)
class SplitModuleItemTests(SplitModuleTest):
'''
Item read tests including inheritance
'''
def test_has_item(self):
'''
has_item(BlockUsageLocator)
'''
course_id = 'GreekHero'
# positive tests of various forms
locator = BlockUsageLocator(version_guid=self.GUID_D1, usage_id='head12345')
self.assertTrue(modulestore().has_item(course_id, locator),
"couldn't find in %s" % self.GUID_D1)
locator = BlockUsageLocator(course_id='GreekHero', usage_id='head12345', branch='draft')
self.assertTrue(
modulestore().has_item(locator.course_id, locator),
"couldn't find in 12345"
)
self.assertTrue(
modulestore().has_item(locator.course_id, BlockUsageLocator(
course_id=locator.course_id,
branch='draft',
usage_id=locator.usage_id
)),
"couldn't find in draft 12345"
)
self.assertFalse(
modulestore().has_item(locator.course_id, BlockUsageLocator(
course_id=locator.course_id,
branch='published',
usage_id=locator.usage_id)),
"found in published 12345"
)
locator.branch = 'draft'
self.assertTrue(
modulestore().has_item(locator.course_id, locator),
"not found in draft 12345"
)
# not a course obj
locator = BlockUsageLocator(course_id='GreekHero', usage_id='chapter1', branch='draft')
self.assertTrue(
modulestore().has_item(locator.course_id, locator),
"couldn't find chapter1"
)
# in published course
locator = BlockUsageLocator(course_id="wonderful", usage_id="head23456", branch='draft')
self.assertTrue(
modulestore().has_item(
locator.course_id,
BlockUsageLocator(course_id=locator.course_id, usage_id=locator.usage_id, branch='published')
), "couldn't find in 23456"
)
locator.branch = 'published'
self.assertTrue(modulestore().has_item(course_id, locator), "couldn't find in 23456")
def test_negative_has_item(self):
# negative tests--not found
# no such course or block
course_id = 'GreekHero'
locator = BlockUsageLocator(course_id="doesnotexist", usage_id="head23456", branch='draft')
self.assertFalse(modulestore().has_item(course_id, locator))
locator = BlockUsageLocator(course_id="wonderful", usage_id="doesnotexist", branch='draft')
self.assertFalse(modulestore().has_item(course_id, locator))
# negative tests--insufficient specification
self.assertRaises(InsufficientSpecificationError, BlockUsageLocator)
self.assertRaises(InsufficientSpecificationError,
modulestore().has_item, None, BlockUsageLocator(version_guid=self.GUID_D1))
self.assertRaises(InsufficientSpecificationError,
modulestore().has_item, None, BlockUsageLocator(course_id='GreekHero'))
def test_get_item(self):
'''
get_item(blocklocator)
'''
# positive tests of various forms
locator = BlockUsageLocator(version_guid=self.GUID_D1, usage_id='head12345')
block = modulestore().get_item(locator)
self.assertIsInstance(block, CourseDescriptor)
# get_instance just redirects to get_item, ignores course_id
self.assertIsInstance(modulestore().get_instance("course_id", locator), CourseDescriptor)
def verify_greek_hero(block):
self.assertEqual(block.location.course_id, "GreekHero")
self.assertEqual(len(block.tabs), 6, "wrong number of tabs")
self.assertEqual(block.display_name, "The Ancient Greek Hero")
self.assertEqual(block.advertised_start, "Fall 2013")
self.assertEqual(len(block.children), 3)
self.assertEqual(str(block.definition_locator.definition_id), "ad00000000000000dddd0000")
# check dates and graders--forces loading of descriptor
self.assertEqual(block.edited_by, "testassist@edx.org")
self.assertDictEqual(
block.grade_cutoffs, {"Pass": 0.45},
)
locator = BlockUsageLocator(course_id='GreekHero', usage_id='head12345', branch='draft')
verify_greek_hero(modulestore().get_item(locator))
# get_instance just redirects to get_item, ignores course_id
verify_greek_hero(modulestore().get_instance("course_id", locator))
# try to look up other branches
self.assertRaises(ItemNotFoundError,
modulestore().get_item,
BlockUsageLocator(course_id=locator.as_course_locator(),
usage_id=locator.usage_id,
branch='published'))
locator.branch = 'draft'
self.assertIsInstance(
modulestore().get_item(locator),
CourseDescriptor
)
def test_get_non_root(self):
# not a course obj
locator = BlockUsageLocator(course_id='GreekHero', usage_id='chapter1', branch='draft')
block = modulestore().get_item(locator)
self.assertEqual(block.location.course_id, "GreekHero")
self.assertEqual(block.category, 'chapter')
self.assertEqual(str(block.definition_locator.definition_id), "cd00000000000000dddd0020")
self.assertEqual(block.display_name, "Hercules")
self.assertEqual(block.edited_by, "testassist@edx.org")
# in published course
locator = BlockUsageLocator(course_id="wonderful", usage_id="head23456", branch='published')
self.assertIsInstance(
modulestore().get_item(locator),
CourseDescriptor
)
# negative tests--not found
# no such course or block
locator = BlockUsageLocator(course_id="doesnotexist", usage_id="head23456", branch='draft')
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
locator = BlockUsageLocator(course_id="wonderful", usage_id="doesnotexist", branch='draft')
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
# negative tests--insufficient specification
with self.assertRaises(InsufficientSpecificationError):
modulestore().get_item(BlockUsageLocator(version_guid=self.GUID_D1))
with self.assertRaises(InsufficientSpecificationError):
modulestore().get_item(BlockUsageLocator(course_id='GreekHero', branch='draft'))
# pylint: disable=W0212
def test_matching(self):
'''
test the block and value matches help functions
'''
self.assertTrue(modulestore()._value_matches('help', 'help'))
self.assertFalse(modulestore()._value_matches('help', 'Help'))
self.assertTrue(modulestore()._value_matches(['distract', 'help', 'notme'], 'help'))
self.assertFalse(modulestore()._value_matches(['distract', 'Help', 'notme'], 'help'))
self.assertFalse(modulestore()._value_matches({'field': ['distract', 'Help', 'notme']}, {'field': 'help'}))
self.assertFalse(modulestore()._value_matches(['distract', 'Help', 'notme'], {'field': 'help'}))
self.assertTrue(modulestore()._value_matches(
{'field': ['distract', 'help', 'notme'],
'irrelevant': 2},
{'field': 'help'}))
self.assertTrue(modulestore()._value_matches('I need some help', {'$regex': 'help'}))
self.assertTrue(modulestore()._value_matches(['I need some help', 'today'], {'$regex': 'help'}))
self.assertFalse(modulestore()._value_matches('I need some help', {'$regex': 'Help'}))
self.assertFalse(modulestore()._value_matches(['I need some help', 'today'], {'$regex': 'Help'}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'c': None}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1, 'c': None}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 2}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'c': 1}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1, 'c': 1}))
def test_get_items(self):
'''
get_items(locator, qualifiers, [branch])
'''
locator = CourseLocator(version_guid=self.GUID_D0)
# get all modules
matches = modulestore().get_items(locator)
self.assertEqual(len(matches), 6)
matches = modulestore().get_items(locator, qualifiers={})
self.assertEqual(len(matches), 6)
matches = modulestore().get_items(locator, qualifiers={'category': 'chapter'})
self.assertEqual(len(matches), 3)
matches = modulestore().get_items(locator, qualifiers={'category': 'garbage'})
self.assertEqual(len(matches), 0)
matches = modulestore().get_items(
locator,
qualifiers=
{
'category': 'chapter',
'fields': {'display_name': {'$regex': 'Hera'}}
}
)
self.assertEqual(len(matches), 2)
matches = modulestore().get_items(locator, qualifiers={'fields': {'children': 'chapter2'}})
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].location.usage_id, 'head12345')
def test_get_parents(self):
'''
get_parent_locations(locator, [usage_id], [branch]): [BlockUsageLocator]
'''
locator = BlockUsageLocator(course_id="GreekHero", branch='draft', usage_id='chapter1')
parents = modulestore().get_parent_locations(locator)
self.assertEqual(len(parents), 1)
self.assertEqual(parents[0].usage_id, 'head12345')
self.assertEqual(parents[0].course_id, "GreekHero")
locator.usage_id = 'chapter2'
parents = modulestore().get_parent_locations(locator)
self.assertEqual(len(parents), 1)
self.assertEqual(parents[0].usage_id, 'head12345')
locator.usage_id = 'nosuchblock'
parents = modulestore().get_parent_locations(locator)
self.assertEqual(len(parents), 0)
def test_get_children(self):
"""
Test the existing get_children method on xdescriptors
"""
locator = BlockUsageLocator(course_id="GreekHero", usage_id="head12345", branch='draft')
block = modulestore().get_item(locator)
children = block.get_children()
expected_ids = [
"chapter1", "chapter2", "chapter3"
]
for child in children:
self.assertEqual(child.category, "chapter")
self.assertIn(child.location.usage_id, expected_ids)
expected_ids.remove(child.location.usage_id)
self.assertEqual(len(expected_ids), 0)
class TestItemCrud(SplitModuleTest):
"""
Test create update and delete of items
"""
# DHM do I need to test this case which I believe won't work:
# 1) fetch a course and some of its blocks
# 2) do a series of CRUD operations on those previously fetched elements
# The problem here will be that the version_guid of the items will be the version at time of fetch.
# Each separate save will change the head version; so, the 2nd piecemeal change will flag the version
# conflict. That is, if versions are v0..vn and start as v0 in initial fetch, the first CRUD op will
# say it's changing an object from v0, splitMongo will process it and make the current head v1, the next
# crud op will pass in its v0 element and splitMongo will flag the version conflict.
# What I don't know is how realistic this test is and whether to wrap the modulestore with a higher level
# transactional operation which manages the version change or make the threading cache reason out whether or
# not the changes are independent and additive and thus non-conflicting.
# A use case I expect is
# (client) change this metadata
# (server) done, here's the new info which, btw, updates the course version to v1
# (client) add these children to this other node (which says it came from v0 or
# will the client have refreshed the version before doing the op?)
# In this case, having a server side transactional model won't help b/c the bug is a long-transaction on the
# on the client where it would be a mistake for the server to assume anything about client consistency. The best
# the server could do would be to see if the parent's children changed at all since v0.
def test_create_minimal_item(self):
"""
create_item(course_or_parent_locator, category, user, definition_locator=None, fields): new_desciptor
"""
# grab link to course to ensure new versioning works
locator = CourseLocator(course_id="GreekHero", branch='draft')
premod_course = modulestore().get_course(locator)
premod_time = datetime.datetime.now(UTC) - datetime.timedelta(seconds=1)
# add minimal one w/o a parent
category = 'sequential'
new_module = modulestore().create_item(
locator, category, 'user123',
fields={'display_name': 'new sequential'}
)
# check that course version changed and course's previous is the other one
self.assertEqual(new_module.location.course_id, "GreekHero")
self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)
self.assertIsNone(locator.version_guid, "Version inadvertently filled in")
current_course = modulestore().get_course(locator)
self.assertEqual(new_module.location.version_guid, current_course.location.version_guid)
history_info = modulestore().get_course_history_info(current_course.location)
self.assertEqual(history_info['previous_version'], premod_course.location.version_guid)
self.assertEqual(str(history_info['original_version']), self.GUID_D3)
self.assertEqual(history_info['edited_by'], "user123")
self.assertGreaterEqual(history_info['edited_on'], premod_time)
self.assertLessEqual(history_info['edited_on'], datetime.datetime.now(UTC))
# check block's info: category, definition_locator, and display_name
self.assertEqual(new_module.category, 'sequential')
self.assertIsNotNone(new_module.definition_locator)
self.assertEqual(new_module.display_name, 'new sequential')
# check that block does not exist in previous version
locator = BlockUsageLocator(
version_guid=premod_course.location.version_guid,
usage_id=new_module.location.usage_id
)
self.assertRaises(ItemNotFoundError, modulestore().get_item, locator)
def test_create_parented_item(self):
"""
Test create_item w/ specifying the parent of the new item
"""
locator = BlockUsageLocator(course_id="wonderful", usage_id="head23456", branch='draft')
premod_course = modulestore().get_course(locator)
category = 'chapter'
new_module = modulestore().create_item(
locator, category, 'user123',
fields={'display_name': 'new chapter'},
definition_locator=DefinitionLocator("cd00000000000000dddd0022")
)
# check that course version changed and course's previous is the other one
self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)
parent = modulestore().get_item(locator)
self.assertIn(new_module.location.usage_id, parent.children)
self.assertEqual(str(new_module.definition_locator.definition_id), "cd00000000000000dddd0022")
def test_unique_naming(self):
"""
Check that 2 modules of same type get unique usage_ids. Also check that if creation provides
a definition id and new def data that it branches the definition in the db.
Actually, this tries to test all create_item features not tested above.
"""
locator = BlockUsageLocator(course_id="contender", usage_id="head345679", branch='draft')
category = 'problem'
premod_time = datetime.datetime.now(UTC) - datetime.timedelta(seconds=1)
new_payload = "<problem>empty</problem>"
new_module = modulestore().create_item(
locator, category, 'anotheruser',
fields={'display_name': 'problem 1', 'data': new_payload},
)
another_payload = "<problem>not empty</problem>"
another_module = modulestore().create_item(
locator, category, 'anotheruser',
fields={'display_name': 'problem 2', 'data': another_payload},
definition_locator=DefinitionLocator("0d00000040000000dddd0031"),
)
# check that course version changed and course's previous is the other one
parent = modulestore().get_item(locator)
self.assertNotEqual(new_module.location.usage_id, another_module.location.usage_id)
self.assertIn(new_module.location.usage_id, parent.children)
self.assertIn(another_module.location.usage_id, parent.children)
self.assertEqual(new_module.data, new_payload)
self.assertEqual(another_module.data, another_payload)
# check definition histories
new_history = modulestore().get_definition_history_info(new_module.definition_locator)
self.assertIsNone(new_history['previous_version'])
self.assertEqual(new_history['original_version'], new_module.definition_locator.definition_id)
self.assertEqual(new_history['edited_by'], "anotheruser")
self.assertLessEqual(new_history['edited_on'], datetime.datetime.now(UTC))
self.assertGreaterEqual(new_history['edited_on'], premod_time)
another_history = modulestore().get_definition_history_info(another_module.definition_locator)
self.assertEqual(str(another_history['previous_version']), '0d00000040000000dddd0031')
def test_create_continue_version(self):
"""
Test create_item using the continue_version flag
"""
# start transaction w/ simple creation
user = random.getrandbits(32)
new_course = modulestore().create_course('test_org', 'test_transaction', user)
new_course_locator = new_course.location.as_course_locator()
index_history_info = modulestore().get_course_history_info(new_course.location)
course_block_prev_version = new_course.previous_version
course_block_update_version = new_course.update_version
self.assertIsNotNone(new_course_locator.version_guid, "Want to test a definite version")
versionless_course_locator = CourseLocator(
course_id=new_course_locator.course_id, branch=new_course_locator.branch
)
# positive simple case: no force, add chapter
new_ele = modulestore().create_item(
new_course.location, 'chapter', user,
fields={'display_name': 'chapter 1'},
continue_version=True
)
# version info shouldn't change
self.assertEqual(new_ele.update_version, course_block_update_version)
self.assertEqual(new_ele.update_version, new_ele.location.version_guid)
refetch_course = modulestore().get_course(versionless_course_locator)
self.assertEqual(refetch_course.location.version_guid, new_course.location.version_guid)
self.assertEqual(refetch_course.previous_version, course_block_prev_version)
self.assertEqual(refetch_course.update_version, course_block_update_version)
refetch_index_history_info = modulestore().get_course_history_info(refetch_course.location)
self.assertEqual(refetch_index_history_info, index_history_info)
self.assertIn(new_ele.location.usage_id, refetch_course.children)
# try to create existing item
with self.assertRaises(DuplicateItemError):
_fail = modulestore().create_item(
new_course.location, 'chapter', user,
usage_id=new_ele.location.usage_id,
fields={'display_name': 'chapter 2'},
continue_version=True
)
# start a new transaction
new_ele = modulestore().create_item(
new_course.location, 'chapter', user,
fields={'display_name': 'chapter 2'},
continue_version=False
)
transaction_guid = new_ele.location.version_guid
# ensure force w/ continue gives exception
with self.assertRaises(VersionConflictError):
_fail = modulestore().create_item(
new_course.location, 'chapter', user,
fields={'display_name': 'chapter 2'},
force=True, continue_version=True
)
# ensure trying to continue the old one gives exception
with self.assertRaises(VersionConflictError):
_fail = modulestore().create_item(
new_course.location, 'chapter', user,
fields={'display_name': 'chapter 3'},
continue_version=True
)
# add new child to old parent in continued (leave off version_guid)
course_module_locator = BlockUsageLocator(
course_id=new_course.location.course_id,
usage_id=new_course.location.usage_id,
branch=new_course.location.branch
)
new_ele = modulestore().create_item(
course_module_locator, 'chapter', user,
fields={'display_name': 'chapter 4'},
continue_version=True
)
self.assertNotEqual(new_ele.update_version, course_block_update_version)
self.assertEqual(new_ele.location.version_guid, transaction_guid)
# check children, previous_version
refetch_course = modulestore().get_course(versionless_course_locator)
self.assertIn(new_ele.location.usage_id, refetch_course.children)
self.assertEqual(refetch_course.previous_version, course_block_update_version)
self.assertEqual(refetch_course.update_version, transaction_guid)
def test_update_metadata(self):
"""
test updating an items metadata ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(course_id="GreekHero", usage_id="problem3_2", branch='draft')
problem = modulestore().get_item(locator)
pre_def_id = problem.definition_locator.definition_id
pre_version_guid = problem.location.version_guid
self.assertIsNotNone(pre_def_id)
self.assertIsNotNone(pre_version_guid)
premod_time = datetime.datetime.now(UTC) - datetime.timedelta(seconds=1)
self.assertNotEqual(problem.max_attempts, 4, "Invalidates rest of test")
problem.max_attempts = 4
problem.save() # decache above setting into the kvs
updated_problem = modulestore().update_item(problem, 'changeMaven')
# check that course version changed and course's previous is the other one
self.assertEqual(updated_problem.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_problem.location.version_guid, pre_version_guid)
self.assertEqual(updated_problem.max_attempts, 4)
# refetch to ensure original didn't change
original_location = BlockUsageLocator(
version_guid=pre_version_guid,
usage_id=problem.location.usage_id
)
problem = modulestore().get_item(original_location)
self.assertNotEqual(problem.max_attempts, 4, "original changed")
current_course = modulestore().get_course(locator)
self.assertEqual(updated_problem.location.version_guid, current_course.location.version_guid)
history_info = modulestore().get_course_history_info(current_course.location)
self.assertEqual(history_info['previous_version'], pre_version_guid)
self.assertEqual(str(history_info['original_version']), self.GUID_D3)
self.assertEqual(history_info['edited_by'], "changeMaven")
self.assertGreaterEqual(history_info['edited_on'], premod_time)
self.assertLessEqual(history_info['edited_on'], datetime.datetime.now(UTC))
def test_update_children(self):
"""
test updating an item's children ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(course_id="GreekHero", usage_id="chapter3", branch='draft')
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
# reorder children
self.assertGreater(len(block.children), 0, "meaningless test")
moved_child = block.children.pop()
block.save() # decache model changes
updated_problem = modulestore().update_item(block, 'childchanger')
# check that course version changed and course's previous is the other one
self.assertEqual(updated_problem.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_problem.location.version_guid, pre_version_guid)
self.assertEqual(updated_problem.children, block.children)
self.assertNotIn(moved_child, updated_problem.children)
locator.usage_id = "chapter1"
other_block = modulestore().get_item(locator)
other_block.children.append(moved_child)
other_block.save() # decache model changes
other_updated = modulestore().update_item(other_block, 'childchanger')
self.assertIn(moved_child, other_updated.children)
def test_update_definition(self):
"""
test updating an item's definition: ensure it gets versioned as well as the course getting versioned
"""
locator = BlockUsageLocator(course_id="GreekHero", usage_id="head12345", branch='draft')
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
block.grading_policy['GRADER'][0]['min_count'] = 13
block.save() # decache model changes
updated_block = modulestore().update_item(block, 'definition_changer')
self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)
self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)
def test_update_manifold(self):
"""
Test updating metadata, children, and definition in a single call ensuring all the versioning occurs
"""
# first add 2 children to the course for the update to manipulate
locator = BlockUsageLocator(course_id="contender", usage_id="head345679", branch='draft')
category = 'problem'
new_payload = "<problem>empty</problem>"
modulestore().create_item(
locator, category, 'test_update_manifold',
fields={'display_name': 'problem 1', 'data': new_payload},
)
another_payload = "<problem>not empty</problem>"
modulestore().create_item(
locator, category, 'test_update_manifold',
fields={'display_name': 'problem 2', 'data': another_payload},
definition_locator=DefinitionLocator("0d00000040000000dddd0031"),
)
# pylint: disable=W0212
modulestore()._clear_cache()
# now begin the test
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
self.assertNotEqual(block.grading_policy['GRADER'][0]['min_count'], 13)
block.grading_policy['GRADER'][0]['min_count'] = 13
block.children = block.children[1:] + [block.children[0]]
block.advertised_start = "Soon"
block.save() # decache model changes
updated_block = modulestore().update_item(block, "test_update_manifold")
self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)
self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)
self.assertEqual(updated_block.children[0], block.children[0])
self.assertEqual(updated_block.advertised_start, "Soon")
def test_delete_item(self):
course = self.create_course_for_deletion()
self.assertRaises(ValueError,
modulestore().delete_item,
course.location,
'deleting_user')
reusable_location = BlockUsageLocator(
course_id=course.location.course_id,
usage_id=course.location.usage_id,
branch='draft')
# delete a leaf
problems = modulestore().get_items(reusable_location, {'category': 'problem'})
locn_to_del = problems[0].location
new_course_loc = modulestore().delete_item(locn_to_del, 'deleting_user', delete_children=True)
deleted = BlockUsageLocator(course_id=reusable_location.course_id,
branch=reusable_location.branch,
usage_id=locn_to_del.usage_id)
self.assertFalse(modulestore().has_item(reusable_location.course_id, deleted))
self.assertRaises(VersionConflictError, modulestore().has_item, reusable_location.course_id, locn_to_del)
locator = BlockUsageLocator(
version_guid=locn_to_del.version_guid,
usage_id=locn_to_del.usage_id
)
self.assertTrue(modulestore().has_item(reusable_location.course_id, locator))
self.assertNotEqual(new_course_loc.version_guid, course.location.version_guid)
# delete a subtree
nodes = modulestore().get_items(reusable_location, {'category': 'chapter'})
new_course_loc = modulestore().delete_item(nodes[0].location, 'deleting_user', delete_children=True)
# check subtree
def check_subtree(node):
if node:
node_loc = node.location
self.assertFalse(modulestore().has_item(reusable_location.course_id,
BlockUsageLocator(
course_id=node_loc.course_id,
branch=node_loc.branch,
usage_id=node.location.usage_id)))
locator = BlockUsageLocator(
version_guid=node.location.version_guid,
usage_id=node.location.usage_id)
self.assertTrue(modulestore().has_item(reusable_location.course_id, locator))
if node.has_children:
for sub in node.get_children():
check_subtree(sub)
check_subtree(nodes[0])
def create_course_for_deletion(self):
course = modulestore().create_course('nihilx', 'deletion', 'deleting_user')
root = BlockUsageLocator(
course_id=course.location.course_id,
usage_id=course.location.usage_id,
branch='draft')
for _ in range(4):
self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])
return modulestore().get_item(root)
def create_subtree_for_deletion(self, parent, category_queue):
if not category_queue:
return
node = modulestore().create_item(parent, category_queue[0], 'deleting_user')
node_loc = BlockUsageLocator(parent.as_course_locator(), usage_id=node.location.usage_id)
for _ in range(4):
self.create_subtree_for_deletion(node_loc, category_queue[1:])
class TestCourseCreation(SplitModuleTest):
"""
Test create_course, duh :-)
"""
def test_simple_creation(self):
"""
The simplest case but probing all expected results from it.
"""
# Oddly getting differences of 200nsec
pre_time = datetime.datetime.now(UTC) - datetime.timedelta(milliseconds=1)
new_course = modulestore().create_course('test_org', 'test_course', 'create_user')
new_locator = new_course.location
# check index entry
index_info = modulestore().get_course_index_info(new_locator)
self.assertEqual(index_info['org'], 'test_org')
self.assertEqual(index_info['prettyid'], 'test_course')
self.assertGreaterEqual(index_info["edited_on"], pre_time)
self.assertLessEqual(index_info["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(index_info['edited_by'], 'create_user')
# check structure info
structure_info = modulestore().get_course_history_info(new_locator)
self.assertEqual(structure_info['original_version'], index_info['versions']['draft'])
self.assertIsNone(structure_info['previous_version'])
self.assertGreaterEqual(structure_info["edited_on"], pre_time)
self.assertLessEqual(structure_info["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(structure_info['edited_by'], 'create_user')
# check the returned course object
self.assertIsInstance(new_course, CourseDescriptor)
self.assertEqual(new_course.category, 'course')
self.assertFalse(new_course.show_calculator)
self.assertTrue(new_course.allow_anonymous)
self.assertEqual(len(new_course.children), 0)
self.assertEqual(new_course.edited_by, "create_user")
self.assertEqual(len(new_course.grading_policy['GRADER']), 4)
self.assertDictEqual(new_course.grade_cutoffs, {"Pass": 0.5})
def test_cloned_course(self):
"""
Test making a course which points to an existing draft and published but not making any changes to either.
"""
pre_time = datetime.datetime.now(UTC)
original_locator = CourseLocator(course_id="wonderful", branch='draft')
original_index = modulestore().get_course_index_info(original_locator)
new_draft = modulestore().create_course(
'leech', 'best_course', 'leech_master', id_root='best',
versions_dict=original_index['versions'])
new_draft_locator = new_draft.location
self.assertRegexpMatches(new_draft_locator.course_id, r'best.*')
# the edited_by and other meta fields on the new course will be the original author not this one
self.assertEqual(new_draft.edited_by, 'test@edx.org')
self.assertLess(new_draft.edited_on, pre_time)
self.assertEqual(new_draft.location.version_guid, original_index['versions']['draft'])
# however the edited_by and other meta fields on course_index will be this one
new_index = modulestore().get_course_index_info(new_draft_locator)
self.assertGreaterEqual(new_index["edited_on"], pre_time)
self.assertLessEqual(new_index["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(new_index['edited_by'], 'leech_master')
new_published_locator = CourseLocator(course_id=new_draft_locator.course_id, branch='published')
new_published = modulestore().get_course(new_published_locator)
self.assertEqual(new_published.edited_by, 'test@edx.org')
self.assertLess(new_published.edited_on, pre_time)
self.assertEqual(new_published.location.version_guid, original_index['versions']['published'])
# changing this course will not change the original course
# using new_draft.location will insert the chapter under the course root
new_item = modulestore().create_item(
new_draft.location, 'chapter', 'leech_master',
fields={'display_name': 'new chapter'}
)
new_draft_locator.version_guid = None
new_index = modulestore().get_course_index_info(new_draft_locator)
self.assertNotEqual(new_index['versions']['draft'], original_index['versions']['draft'])
new_draft = modulestore().get_course(new_draft_locator)
self.assertEqual(new_item.edited_by, 'leech_master')
self.assertGreaterEqual(new_item.edited_on, pre_time)
self.assertNotEqual(new_item.location.version_guid, original_index['versions']['draft'])
self.assertNotEqual(new_draft.location.version_guid, original_index['versions']['draft'])
structure_info = modulestore().get_course_history_info(new_draft_locator)
self.assertGreaterEqual(structure_info["edited_on"], pre_time)
self.assertLessEqual(structure_info["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(structure_info['edited_by'], 'leech_master')
original_course = modulestore().get_course(original_locator)
self.assertEqual(original_course.location.version_guid, original_index['versions']['draft'])
self.assertFalse(
modulestore().has_item(new_draft_locator.course_id, BlockUsageLocator(
original_locator,
usage_id=new_item.location.usage_id
))
)
def test_derived_course(self):
"""
Create a new course which overrides metadata and course_data
"""
pre_time = datetime.datetime.now(UTC)
original_locator = CourseLocator(course_id="contender", branch='draft')
original = modulestore().get_course(original_locator)
original_index = modulestore().get_course_index_info(original_locator)
fields = {}
for field in original.fields.values():
if field.scope == Scope.content and field.name != 'location':
fields[field.name] = getattr(original, field.name)
elif field.scope == Scope.settings:
fields[field.name] = getattr(original, field.name)
fields['grading_policy']['GRADE_CUTOFFS'] = {'A': .9, 'B': .8, 'C': .65}
fields['display_name'] = 'Derivative'
new_draft = modulestore().create_course(
'leech', 'derivative', 'leech_master', id_root='counter',
versions_dict={'draft': original_index['versions']['draft']},
fields=fields
)
new_draft_locator = new_draft.location
self.assertRegexpMatches(new_draft_locator.course_id, r'counter.*')
# the edited_by and other meta fields on the new course will be the original author not this one
self.assertEqual(new_draft.edited_by, 'leech_master')
self.assertGreaterEqual(new_draft.edited_on, pre_time)
self.assertNotEqual(new_draft.location.version_guid, original_index['versions']['draft'])
# however the edited_by and other meta fields on course_index will be this one
new_index = modulestore().get_course_index_info(new_draft_locator)
self.assertGreaterEqual(new_index["edited_on"], pre_time)
self.assertLessEqual(new_index["edited_on"], datetime.datetime.now(UTC))
self.assertEqual(new_index['edited_by'], 'leech_master')
self.assertEqual(new_draft.display_name, fields['display_name'])
self.assertDictEqual(
new_draft.grading_policy['GRADE_CUTOFFS'],
fields['grading_policy']['GRADE_CUTOFFS']
)
def test_update_course_index(self):
"""
Test changing the org, pretty id, etc of a course. Test that it doesn't allow changing the id, etc.
"""
locator = CourseLocator(course_id="GreekHero", branch='draft')
modulestore().update_course_index(locator, {'org': 'funkyU'})
course_info = modulestore().get_course_index_info(locator)
self.assertEqual(course_info['org'], 'funkyU')
modulestore().update_course_index(locator, {'org': 'moreFunky', 'prettyid': 'Ancient Greek Demagods'})
course_info = modulestore().get_course_index_info(locator)
self.assertEqual(course_info['org'], 'moreFunky')
self.assertEqual(course_info['prettyid'], 'Ancient Greek Demagods')
self.assertRaises(ValueError, modulestore().update_course_index, locator, {'_id': 'funkygreeks'})
with self.assertRaises(ValueError):
modulestore().update_course_index(
locator,
{'edited_on': datetime.datetime.now(UTC)}
)
with self.assertRaises(ValueError):
modulestore().update_course_index(
locator,
{'edited_by': 'sneak'}
)
self.assertRaises(ValueError, modulestore().update_course_index, locator,
{'versions': {'draft': self.GUID_D1}})
# an allowed but not necessarily recommended way to revert the draft version
versions = course_info['versions']
versions['draft'] = self.GUID_D1
modulestore().update_course_index(locator, {'versions': versions}, update_versions=True)
course = modulestore().get_course(locator)
self.assertEqual(str(course.location.version_guid), self.GUID_D1)
# an allowed but not recommended way to publish a course
versions['published'] = self.GUID_D1
modulestore().update_course_index(locator, {'versions': versions}, update_versions=True)
course = modulestore().get_course(CourseLocator(course_id=locator.course_id, branch="published"))
self.assertEqual(str(course.location.version_guid), self.GUID_D1)
def test_create_with_root(self):
"""
Test create_course with a specified root id and category
"""
user = random.getrandbits(32)
new_course = modulestore().create_course(
'test_org', 'test_transaction', user,
root_usage_id='top', root_category='chapter'
)
self.assertEqual(new_course.location.usage_id, 'top')
self.assertEqual(new_course.category, 'chapter')
# look at db to verify
db_structure = modulestore().structures.find_one({
'_id': new_course.location.as_object_id(new_course.location.version_guid)
})
self.assertIsNotNone(db_structure, "Didn't find course")
self.assertNotIn('course', db_structure['blocks'])
self.assertIn('top', db_structure['blocks'])
self.assertEqual(db_structure['blocks']['top']['category'], 'chapter')
class TestInheritance(SplitModuleTest):
"""
Test the metadata inheritance mechanism.
"""
def test_inheritance(self):
"""
The actual test
"""
# Note, not testing value where defined (course) b/c there's no
# defined accessor for it on CourseDescriptor.
locator = BlockUsageLocator(course_id="GreekHero", usage_id="problem3_2", branch='draft')
node = modulestore().get_item(locator)
# inherited
self.assertEqual(node.graceperiod, datetime.timedelta(hours=2))
locator = BlockUsageLocator(course_id="GreekHero", usage_id="problem1", branch='draft')
node = modulestore().get_item(locator)
# overridden
self.assertEqual(node.graceperiod, datetime.timedelta(hours=4))
# TODO test inheritance after set and delete of attrs
#===========================================
# This mocks the django.modulestore() function and is intended purely to disentangle
# the tests from django
def modulestore():
def load_function(engine_path):
module_path, _, name = engine_path.rpartition('.')
return getattr(import_module(module_path), name)
if SplitModuleTest.modulestore is None:
SplitModuleTest.bootstrapDB()
class_ = load_function(SplitModuleTest.MODULESTORE['ENGINE'])
options = {}
options.update(SplitModuleTest.MODULESTORE['OPTIONS'])
options['render_template'] = render_to_template_mock
# pylint: disable=W0142
SplitModuleTest.modulestore = class_(
SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG'],
**options
)
return SplitModuleTest.modulestore
# pylint: disable=W0613
def render_to_template_mock(*args):
pass
|
abo-abo/edx-platform
|
common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore.py
|
Python
|
agpl-3.0
| 55,767
|
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from oauth_access.access import OAuthAccess
from oauth_access.exceptions import MissingToken
def oauth_login(request, service,
redirect_field_name="next", redirect_to_session_key="redirect_to"):
access = OAuthAccess(service)
if not service == "facebook":
token = access.unauthorized_token()
request.session["%s_unauth_token" % service] = token.to_string()
else:
token = None
if hasattr(request, "session"):
request.session[redirect_to_session_key] = request.GET.get(redirect_field_name)
return HttpResponseRedirect(access.authorization_url(token))
def oauth_callback(request, service):
ctx = RequestContext(request)
access = OAuthAccess(service)
unauth_token = request.session.get("%s_unauth_token" % service, None)
try:
#print "oauth_callback unauth_token = %s" % unauth_token
#print "oauth_callback request.GET = %s" % request.GET
auth_token = access.check_token(unauth_token, request.GET)
#print "oauth_login auth_token = %s" % auth_token
except MissingToken:
ctx.update({"error": "token_missing"})
else:
if auth_token:
cback = access.callback(request, access, auth_token)
return cback.redirect()
else:
# @@@ not nice for OAuth 2
ctx.update({"error": "token_mismatch"})
return render_to_response("oauth_access/oauth_error.html", ctx)
def finish_signup(request, service):
access = OAuthAccess(service)
return access.callback.finish_signup(request, service)
|
DraXus/andaluciapeople
|
oauth_access/views.py
|
Python
|
agpl-3.0
| 1,715
|
# -*- coding: utf-8 -*-
# Copyright 2019 Joan Marín <Github@JoanMarin>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Tax Group Types",
"category": "Financial",
"version": "10.0.1.0.0",
"author": "EXA Auto Parts Github@exaap, "
"Joan Marín Github@JoanMarin",
"website": "https://github.com/odooloco/l10n-colombia",
"license": "AGPL-3",
"summary": "Types for Tax Groups",
"depends": [
"account_tax_group_menu",
],
"data": [
'security/ir.model.access.csv',
"views/account_tax_group_views.xml",
],
"installable": True,
}
|
odoo-colombia/l10n-colombia
|
account_tax_group_type/__manifest__.py
|
Python
|
agpl-3.0
| 636
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from flask import request
from superdesk import get_resource_service, Service
from superdesk.metadata.item import EMBARGO
from superdesk.resource import Resource, build_custom_hateoas
from apps.packages import TakesPackageService
from apps.archive.common import CUSTOM_HATEOAS
from superdesk.metadata.utils import item_url
from apps.archive.archive import SOURCE as ARCHIVE
from superdesk.errors import SuperdeskApiError
import logging
logger = logging.getLogger(__name__)
class ArchiveLinkResource(Resource):
endpoint_name = 'archive_link'
resource_title = endpoint_name
schema = {
'link_id': Resource.rel('archive', embeddable=False, type='string'),
'desk': Resource.rel('desks', embeddable=False)
}
url = 'archive/<{0}:target_id>/link'.format(item_url)
resource_methods = ['POST']
item_methods = []
class ArchiveLinkService(Service):
packageService = TakesPackageService()
def create(self, docs, **kwargs):
target_id = request.view_args['target_id']
doc = docs[0]
link_id = doc.get('link_id')
desk_id = doc.get('desk')
service = get_resource_service(ARCHIVE)
target = service.find_one(req=None, _id=target_id)
self._validate_link(target, target_id)
link = {'task': {'desk': desk_id}} if desk_id else {}
if link_id:
link = service.find_one(req=None, _id=link_id)
linked_item = self.packageService.link_as_next_take(target, link)
doc.update(linked_item)
build_custom_hateoas(CUSTOM_HATEOAS, doc)
return [linked_item['_id']]
def _validate_link(self, target, target_id):
"""
Validates the article to be linked
:param target: article to be linked
:param target_id: id of the article to be linked
:raises: SuperdeskApiError
"""
if not target:
raise SuperdeskApiError.notFoundError(message='Cannot find the target item with id {}.'.format(target_id))
if target.get(EMBARGO):
raise SuperdeskApiError.badRequestError("Takes can't be created for an Item having Embargo")
if get_resource_service('published').is_rewritten_before(target['_id']):
raise SuperdeskApiError.badRequestError(message='Article has been rewritten before !')
|
amagdas/superdesk
|
server/apps/archive/archive_link.py
|
Python
|
agpl-3.0
| 2,628
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from django.utils.translation import ugettext_lazy as _
from base.forms.search.search_form import BaseSearchForm
from base.models import tutor
class TutorSearchForm(BaseSearchForm):
name = forms.CharField(max_length=40,
label=_("name"))
def search(self):
return tutor.search(**self.cleaned_data).order_by("person__last_name", "person__first_name")
|
uclouvain/osis_louvain
|
base/forms/search/search_tutor.py
|
Python
|
agpl-3.0
| 1,699
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from copy import deepcopy
from superdesk.publish.formatters import Formatter
from .aap_formatter_common import map_priority, get_service_level
import superdesk
from superdesk.errors import FormatterError
import datetime
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, BYLINE, FORMAT, FORMATS
from .field_mappers.locator_mapper import LocatorMapper
from .field_mappers.slugline_mapper import SluglineMapper
from eve.utils import config
from .unicodetoascii import to_ascii
from .category_list_map import get_aap_category_list
import re
from superdesk.etree import parse_html, to_string, etree
from superdesk.text_utils import get_text
from superdesk.utc import utc_to_local
class AAPAnpaFormatter(Formatter):
def format(self, article, subscriber, codes=None):
try:
docs = []
formatted_article = deepcopy(article)
for category in self._get_category_list(formatted_article.get('anpa_category')):
mapped_source = self._get_mapped_source(formatted_article)
formatted_article[config.ID_FIELD] = formatted_article.get('item_id',
formatted_article.get(config.ID_FIELD))
pub_seq_num = superdesk.get_resource_service('subscribers').generate_sequence_number(subscriber)
anpa = []
if codes:
anpa.append(b'\x05')
anpa.append(' '.join(codes).encode('ascii'))
anpa.append(b'\x0D\x0A')
# start of message header (syn syn soh)
anpa.append(b'\x16\x16\x01')
anpa.append(get_service_level(category, formatted_article).encode('ascii'))
# story number
anpa.append(str(pub_seq_num).zfill(4).encode('ascii'))
# field seperator
anpa.append(b'\x0A') # -LF
anpa.append(map_priority(formatted_article.get('priority')).encode('ascii'))
anpa.append(b'\x20')
anpa.append(category['qcode'].lower().encode('ascii'))
anpa.append(b'\x13')
# format identifier
if formatted_article.get(FORMAT, FORMATS.HTML) == FORMATS.PRESERVED:
anpa.append(b'\x12')
else:
anpa.append(b'\x11')
anpa.append(b'\x20')
# keyword
keyword = 'bc-{}'.format(self.append_legal(article=formatted_article, truncate=True)).replace(' ', '-')
keyword = keyword[:24] if len(keyword) > 24 else keyword
anpa.append(keyword.encode('ascii'))
anpa.append(b'\x20')
# version field
anpa.append(b'\x20')
# reference field
anpa.append(b'\x20')
# filing date
local_time = utc_to_local(config.DEFAULT_TIMEZONE or 'UTC', formatted_article['_updated'])
anpa.append('{}-{}'.format(local_time.strftime('%m'), local_time.strftime('%d')).encode('ascii'))
anpa.append(b'\x20')
# add the word count
anpa.append(str(formatted_article.get('word_count', '0000')).zfill(4).encode('ascii'))
anpa.append(b'\x0D\x0A')
anpa.append(b'\x02') # STX
self._process_headline(anpa, formatted_article, category['qcode'].encode('ascii'))
keyword = SluglineMapper().map(article=formatted_article, category=category['qcode'].upper(),
truncate=True).encode('ascii', 'ignore')
anpa.append(keyword)
take_key = (formatted_article.get('anpa_take_key', '') or '').encode('ascii', 'ignore')
anpa.append((b'\x20' + take_key) if len(take_key) > 0 else b'')
anpa.append(b'\x0D\x0A')
if formatted_article.get('ednote', '') != '':
ednote = '{}\r\n'.format(to_ascii(formatted_article.get('ednote')))
anpa.append(ednote.encode('ascii', 'replace'))
if formatted_article.get(BYLINE):
anpa.append(get_text(formatted_article.get(BYLINE)).encode('ascii', 'replace'))
anpa.append(b'\x0D\x0A')
if formatted_article.get(FORMAT) == FORMATS.PRESERVED:
anpa.append(get_text(self.append_body_footer(formatted_article),
content='html').encode('ascii', 'replace'))
else:
body = to_ascii(formatted_article.get('body_html', ''))
# we need to inject the dateline
if formatted_article.get('dateline', {}).get('text') and not article.get('auto_publish', False):
body_html_elem = parse_html(formatted_article.get('body_html'))
ptag = body_html_elem.find('.//p')
if ptag is not None:
ptag.text = formatted_article['dateline']['text'] + ' ' + (ptag.text or '')
body = to_string(body_html_elem)
anpa.append(self.get_text_content(body))
if formatted_article.get('body_footer'):
anpa.append(self.get_text_content(to_ascii(formatted_article.get('body_footer', ''))))
anpa.append(b'\x0D\x0A')
anpa.append(mapped_source.encode('ascii'))
sign_off = (formatted_article.get('sign_off', '') or '').encode('ascii')
anpa.append((b'\x20' + sign_off) if len(sign_off) > 0 else b'')
anpa.append(b'\x0D\x0A')
anpa.append(b'\x03') # ETX
# time and date
anpa.append(datetime.datetime.now().strftime('%d-%m-%y %H-%M-%S').encode('ascii'))
anpa.append(b'\x04') # EOT
anpa.append(b'\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A\x0D\x0A')
docs.append({'published_seq_num': pub_seq_num, 'encoded_item': b''.join(anpa),
'formatted_item': b''.join(anpa).decode('ascii')})
return docs
except Exception as ex:
raise FormatterError.AnpaFormatterError(ex, subscriber)
def get_text_content(self, content):
content = content.replace('<br>', '<br/>').replace('</br>', '')
content = re.sub('[\x00-\x09\x0b\x0c\x0e-\x1f]', '', content)
content = content.replace('\xA0', ' ')
parsed = parse_html(content, content='html')
for br in parsed.xpath('//br'):
br.tail = '\r\n' + br.tail if br.tail else '\r\n'
etree.strip_elements(parsed, 'br', with_tail=False)
for tag in parsed.xpath('/html/div/child::*'):
if tag.tag not in ('br') and tag.text is not None and tag.text.strip() != '':
tag.text = ' ' + re.sub(' +', ' ', re.sub('(?<!\r)\n+', ' ', tag.text)) if tag.text else ''
tag.tail = '\r\n' + tag.tail if tag.tail else '\r\n'
para_text = "".join(x for x in parsed.itertext())
para_text = para_text.replace('\xA0', ' ')
return para_text.encode('ascii', 'replace')
def _process_headline(self, anpa, article, category):
# prepend the locator to the headline if required
article['headline'] = get_text(article.get('headline', ''))
headline = to_ascii(LocatorMapper().get_formatted_headline(article, category.decode('UTF-8').upper()))
# Set the maximum size to 64 including the sequence number if any
if len(headline) > 64:
if article.get('sequence'):
digits = len(str(article['sequence'])) + 1
shortened_headline = '{}={}'.format(headline[:-digits][:(64 - digits)], article['sequence'])
anpa.append(shortened_headline.encode('ascii', 'replace'))
else:
anpa.append(headline[:64].encode('ascii', 'replace'))
else:
anpa.append(headline.encode('ascii', 'replace'))
anpa.append(b'\x0D\x0A')
def _get_category_list(self, category_list):
return get_aap_category_list(category_list)
def _get_mapped_source(self, article):
return article.get('source', '') if article.get('source', '') != 'NZN' else 'AAP'
def can_format(self, format_type, article):
return format_type == 'AAP ANPA' and article[ITEM_TYPE] in [CONTENT_TYPE.TEXT, CONTENT_TYPE.PREFORMATTED]
|
superdesk/superdesk-aap
|
server/aap/publish/formatters/anpa_formatter.py
|
Python
|
agpl-3.0
| 8,923
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
# Copyright 2013 Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime, timedelta
from openerp.osv import fields, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.addons.connector.session import ConnectorSession
from openerp.addons.connector.connector import ConnectorUnit
from openerp.addons.connector.unit.mapper import (mapping,
only_create,
ImportMapper
)
from .unit.backend_adapter import GenericAdapter
from .unit.import_synchronizer import (import_batch,
DirectBatchImport,
MagentoImportSynchronizer,
AddCheckpoint,
)
from .partner import partner_import_batch
from .sale import sale_order_import_batch
from .backend import magento
from .connector import add_checkpoint
_logger = logging.getLogger(__name__)
IMPORT_DELTA_BUFFER = 30 # seconds
class magento_backend(orm.Model):
_name = 'magento.backend'
_description = 'Magento Backend'
_inherit = 'connector.backend'
_backend_type = 'magento'
def select_versions(self, cr, uid, context=None):
""" Available versions in the backend.
Can be inherited to add custom versions. Using this method
to add a version from an ``_inherit`` does not constrain
to redefine the ``version`` field in the ``_inherit`` model.
"""
return [('1.7', '1.7')]
def _select_versions(self, cr, uid, context=None):
""" Available versions in the backend.
If you want to add a version, do not override this
method, but ``select_version``.
"""
return self.select_versions(cr, uid, context=context)
def _get_stock_field_id(self, cr, uid, context=None):
field_ids = self.pool.get('ir.model.fields').search(
cr, uid,
[('model', '=', 'product.product'),
('name', '=', 'virtual_available')],
context=context)
return field_ids[0]
_columns = {
'version': fields.selection(
_select_versions,
string='Version',
required=True),
'location': fields.char(
'Location',
required=True,
help="Url to magento application"),
'admin_location': fields.char('Admin Location'),
'use_custom_api_path': fields.boolean(
'Custom Api Path',
help="The default API path is '/index.php/api/xmlrpc'. "
"Check this box if you use a custom API path, in that case, "
"the location has to be completed with the custom API path "),
'username': fields.char(
'Username',
help="Webservice user"),
'password': fields.char(
'Password',
help="Webservice password"),
'use_auth_basic': fields.boolean(
'Use HTTP Auth Basic',
help="Use a Basic Access Authentication for the API. "
"The Magento server could be configured to restrict access "
"using a HTTP authentication based on a username and "
"a password."),
'auth_basic_username': fields.char(
'Basic Auth. Username',
help="Basic access authentication web server side username"),
'auth_basic_password': fields.char(
'Basic Auth. Password',
help="Basic access authentication web server side password"),
'sale_prefix': fields.char(
'Sale Prefix',
help="A prefix put before the name of imported sales orders.\n"
"For instance, if the prefix is 'mag-', the sales "
"order 100000692 in Magento, will be named 'mag-100000692' "
"in OpenERP."),
'warehouse_id': fields.many2one('stock.warehouse',
'Warehouse',
required=True,
help='Warehouse used to compute the '
'stock quantities.'),
'website_ids': fields.one2many(
'magento.website', 'backend_id',
string='Website', readonly=True),
'default_lang_id': fields.many2one(
'res.lang',
'Default Language',
help="If a default language is selected, the records "
"will be imported in the translation of this language.\n"
"Note that a similar configuration exists "
"for each storeview."),
'default_category_id': fields.many2one(
'product.category',
string='Default Product Category',
help='If a default category is selected, products imported '
'without a category will be linked to it.'),
# add a field `auto_activate` -> activate a cron
'import_products_from_date': fields.datetime(
'Import products from date'),
'import_categories_from_date': fields.datetime(
'Import categories from date'),
'catalog_price_tax_included': fields.boolean('Prices include tax'),
'product_stock_field_id': fields.many2one(
'ir.model.fields',
string='Stock Field',
domain="[('model', 'in', ['product.product', 'product.template']),"
" ('ttype', '=', 'float')]",
help="Choose the field of the product which will be used for "
"stock inventory updates.\nIf empty, Quantity Available "
"is used."),
'product_binding_ids': fields.one2many('magento.product.product',
'backend_id',
string='Magento Products',
readonly=True),
}
_defaults = {
'product_stock_field_id': _get_stock_field_id,
'use_custom_api_path': False,
'use_auth_basic': False,
}
_sql_constraints = [
('sale_prefix_uniq', 'unique(sale_prefix)',
"A backend with the same sale prefix already exists")
]
def check_magento_structure(self, cr, uid, ids, context=None):
""" Used in each data import.
Verify if a website exists for each backend before starting the import.
"""
for backend_id in ids:
website_ids = self.pool['magento.website'].search(
cr, uid, [('backend_id', '=', backend_id)], context=context)
if not website_ids:
self.synchronize_metadata(cr, uid, backend_id, context=context)
return True
def synchronize_metadata(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
session = ConnectorSession(cr, uid, context=context)
for backend_id in ids:
for model in ('magento.website',
'magento.store',
'magento.storeview'):
# import directly, do not delay because this
# is a fast operation, a direct return is fine
# and it is simpler to import them sequentially
import_batch(session, model, backend_id)
return True
def import_partners(self, cr, uid, ids, context=None):
""" Import partners from all websites """
if not hasattr(ids, '__iter__'):
ids = [ids]
self.check_magento_structure(cr, uid, ids, context=context)
for backend in self.browse(cr, uid, ids, context=context):
for website in backend.website_ids:
website.import_partners()
return True
def import_sale_orders(self, cr, uid, ids, context=None):
""" Import sale orders from all store views """
if not hasattr(ids, '__iter__'):
ids = [ids]
storeview_obj = self.pool.get('magento.storeview')
storeview_ids = storeview_obj.search(cr, uid,
[('backend_id', 'in', ids)],
context=context)
storeviews = storeview_obj.browse(cr, uid, storeview_ids,
context=context)
for storeview in storeviews:
storeview.import_sale_orders()
return True
def import_customer_groups(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
self.check_magento_structure(cr, uid, ids, context=context)
session = ConnectorSession(cr, uid, context=context)
for backend_id in ids:
import_batch.delay(session, 'magento.res.partner.category',
backend_id)
return True
def _import_from_date(self, cr, uid, ids, model, from_date_field,
context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
self.check_magento_structure(cr, uid, ids, context=context)
session = ConnectorSession(cr, uid, context=context)
import_start_time = datetime.now()
for backend in self.browse(cr, uid, ids, context=context):
from_date = getattr(backend, from_date_field)
if from_date:
from_date = datetime.strptime(from_date,
DEFAULT_SERVER_DATETIME_FORMAT)
else:
from_date = None
import_batch.delay(session, model,
backend.id, filters={'from_date': from_date})
# Records from Magento are imported based on their `created_at`
# date. This date is set on Magento at the beginning of a
# transaction, so if the import is run between the beginning and
# the end of a transaction, the import of a record may be
# missed. That's why we add a small buffer back in time where
# the eventually missed records will be retrieved. This also
# means that we'll have jobs that import twice the same records,
# but this is not a big deal because they will be skipped when
# the last `sync_date` is the same.
next_time = import_start_time - timedelta(seconds=IMPORT_DELTA_BUFFER)
next_time = next_time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.write(cr, uid, ids, {from_date_field: next_time}, context=context)
def import_product_categories(self, cr, uid, ids, context=None):
self._import_from_date(cr, uid, ids, 'magento.product.category',
'import_categories_from_date', context=context)
return True
def import_product_product(self, cr, uid, ids, context=None):
self._import_from_date(cr, uid, ids, 'magento.product.product',
'import_products_from_date', context=context)
return True
def update_product_stock_qty(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
mag_product_obj = self.pool.get('magento.product.product')
product_ids = mag_product_obj.search(cr, uid,
[('backend_id', 'in', ids),
('no_stock_sync', '=', False)],
context=context)
mag_product_obj.recompute_magento_qty(cr, uid, product_ids,
context=context)
return True
def _magento_backend(self, cr, uid, callback, domain=None, context=None):
if domain is None:
domain = []
ids = self.search(cr, uid, domain, context=context)
if ids:
callback(cr, uid, ids, context=context)
def _scheduler_import_sale_orders(self, cr, uid, domain=None,
context=None):
self._magento_backend(cr, uid, self.import_sale_orders,
domain=domain, context=context)
def _scheduler_import_customer_groups(self, cr, uid, domain=None,
context=None):
self._magento_backend(cr, uid, self.import_customer_groups,
domain=domain, context=context)
def _scheduler_import_partners(self, cr, uid, domain=None, context=None):
self._magento_backend(cr, uid, self.import_partners,
domain=domain, context=context)
def _scheduler_import_product_categories(self, cr, uid, domain=None,
context=None):
self._magento_backend(cr, uid, self.import_product_categories,
domain=domain, context=context)
def _scheduler_import_product_product(self, cr, uid, domain=None,
context=None):
self._magento_backend(cr, uid, self.import_product_product,
domain=domain, context=context)
def _scheduler_update_product_stock_qty(self, cr, uid,
domain=None, context=None):
self._magento_backend(cr, uid, self.update_product_stock_qty,
domain=domain, context=context)
def output_recorder(self, cr, uid, ids, context=None):
""" Utility method to output a file containing all the recorded
requests / responses with Magento. Used to generate test data.
Should be called with ``erppeek`` for instance.
"""
from .unit.backend_adapter import output_recorder
import os
import tempfile
fmt = '%Y-%m-%d-%H-%M-%S'
timestamp = datetime.now().strftime(fmt)
filename = 'output_%s_%s' % (cr.dbname, timestamp)
path = os.path.join(tempfile.gettempdir(), filename)
output_recorder(path)
return path
# TODO migrate from external.shop.group
class magento_website(orm.Model):
_name = 'magento.website'
_inherit = 'magento.binding'
_description = 'Magento Website'
_order = 'sort_order ASC, id ASC'
_columns = {
'name': fields.char('Name', required=True, readonly=True),
'code': fields.char('Code', readonly=True),
'sort_order': fields.integer('Sort Order', readonly=True),
'store_ids': fields.one2many(
'magento.store',
'website_id',
string="Stores",
readonly=True),
'import_partners_from_date': fields.datetime(
'Import partners from date'),
'product_binding_ids': fields.many2many('magento.product.product',
string='Magento Products',
readonly=True),
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
'A website with the same ID on Magento already exists.'),
]
def import_partners(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
session = ConnectorSession(cr, uid, context=context)
import_start_time = datetime.now()
for website in self.browse(cr, uid, ids, context=context):
backend_id = website.backend_id.id
if website.import_partners_from_date:
from_date = datetime.strptime(
website.import_partners_from_date,
DEFAULT_SERVER_DATETIME_FORMAT)
else:
from_date = None
partner_import_batch.delay(
session, 'magento.res.partner', backend_id,
{'magento_website_id': website.magento_id,
'from_date': from_date})
# Records from Magento are imported based on their `created_at`
# date. This date is set on Magento at the beginning of a
# transaction, so if the import is run between the beginning and
# the end of a transaction, the import of a record may be
# missed. That's why we add a small buffer back in time where
# the eventually missed records will be retrieved. This also
# means that we'll have jobs that import twice the same records,
# but this is not a big deal because they will be skipped when
# the last `sync_date` is the same.
next_time = import_start_time - timedelta(seconds=IMPORT_DELTA_BUFFER)
next_time = next_time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.write(cr, uid, ids, {'import_partners_from_date': next_time},
context=context)
return True
# TODO migrate from sale.shop (create a magento.store + associated
# sale.shop)
class magento_store(orm.Model):
_name = 'magento.store'
_inherit = 'magento.binding'
_description = 'Magento Store'
_inherits = {'sale.shop': 'openerp_id'}
def _get_store_from_website(self, cr, uid, ids, context=None):
store_obj = self.pool.get('magento.store')
return store_obj.search(cr, uid,
[('website_id', 'in', ids)],
context=context)
_columns = {
'website_id': fields.many2one(
'magento.website',
'Magento Website',
required=True,
readonly=True,
ondelete='cascade'),
'openerp_id': fields.many2one(
'sale.shop',
string='Sale Shop',
required=True,
readonly=True,
ondelete='cascade'),
'backend_id': fields.related(
'website_id', 'backend_id',
type='many2one',
relation='magento.backend',
string='Magento Backend',
store={
'magento.store': (lambda self, cr, uid, ids, c=None: ids,
['website_id'], 10),
'magento.website': (_get_store_from_website,
['backend_id'], 20),
},
readonly=True),
'storeview_ids': fields.one2many(
'magento.storeview',
'store_id',
string="Storeviews",
readonly=True),
'send_picking_done_mail': fields.boolean(
'Send email notification on picking done',
help="Does the picking export/creation should send "
"an email notification on Magento side?"),
'send_invoice_paid_mail': fields.boolean(
'Send email notification on invoice validated/paid',
help="Does the invoice export/creation should send "
"an email notification on Magento side?"),
'create_invoice_on': fields.selection(
[('open', 'Validate'),
('paid', 'Paid')],
'Create invoice on action',
required=True,
help="Should the invoice be created in Magento "
"when it is validated or when it is paid in OpenERP?\n"
"This only takes effect if the sales order's related "
"payment method is not giving an option for this by "
"itself. (See Payment Methods)"),
}
_defaults = {
'create_invoice_on': 'paid',
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
'A store with the same ID on Magento already exists.'),
]
class sale_shop(orm.Model):
_inherit = 'sale.shop'
_columns = {
'magento_bind_ids': fields.one2many(
'magento.store', 'openerp_id',
string='Magento Bindings',
readonly=True),
}
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default['magento_bind_ids'] = False
return super(sale_shop, self).copy_data(cr, uid, id,
default=default,
context=context)
# TODO: migrate from magerp.storeviews
class magento_storeview(orm.Model):
_name = 'magento.storeview'
_inherit = 'magento.binding'
_description = "Magento Storeview"
_order = 'sort_order ASC, id ASC'
_columns = {
'name': fields.char('Name', required=True, readonly=True),
'code': fields.char('Code', readonly=True),
'enabled': fields.boolean('Enabled', readonly=True),
'sort_order': fields.integer('Sort Order', readonly=True),
'store_id': fields.many2one('magento.store', 'Store',
ondelete='cascade', readonly=True),
'lang_id': fields.many2one('res.lang', 'Language'),
'backend_id': fields.related(
'store_id', 'website_id', 'backend_id',
type='many2one',
relation='magento.backend',
string='Magento Backend',
store=True,
readonly=True),
'import_orders_from_date': fields.datetime(
'Import sale orders from date',
help='do not consider non-imported sale orders before this date. '
'Leave empty to import all sale orders'),
'no_sales_order_sync': fields.boolean(
'No Sales Order Synchronization',
help='Check if the storeview is active in Magento '
'but its sales orders should not be imported.'),
}
_defaults = {
'no_sales_order_sync': False,
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
'A storeview with same ID on Magento already exists.'),
]
def import_sale_orders(self, cr, uid, ids, context=None):
session = ConnectorSession(cr, uid, context=context)
import_start_time = datetime.now()
for storeview in self.browse(cr, uid, ids, context=context):
if storeview.no_sales_order_sync:
_logger.debug("The storeview '%s' is active in Magento "
"but its sales orders should not be imported." %
storeview.name)
continue
backend_id = storeview.backend_id.id
if storeview.import_orders_from_date:
from_date = datetime.strptime(
storeview.import_orders_from_date,
DEFAULT_SERVER_DATETIME_FORMAT)
else:
from_date = None
sale_order_import_batch.delay(
session,
'magento.sale.order',
backend_id,
{'magento_storeview_id': storeview.magento_id,
'from_date': from_date},
priority=1) # executed as soon as possible
# Records from Magento are imported based on their `created_at`
# date. This date is set on Magento at the beginning of a
# transaction, so if the import is run between the beginning and
# the end of a transaction, the import of a record may be
# missed. That's why we add a small buffer back in time where
# the eventually missed records will be retrieved. This also
# means that we'll have jobs that import twice the same records,
# but this is not a big deal because the sales orders will be
# imported the first time and the jobs will be skipped on the
# subsequent imports
next_time = import_start_time - timedelta(seconds=IMPORT_DELTA_BUFFER)
next_time = next_time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.write(cr, uid, ids, {'import_orders_from_date': next_time},
context=context)
return True
@magento
class WebsiteAdapter(GenericAdapter):
_model_name = 'magento.website'
_magento_model = 'ol_websites'
_admin_path = 'system_store/editWebsite/website_id/{id}'
@magento
class StoreAdapter(GenericAdapter):
_model_name = 'magento.store'
_magento_model = 'ol_groups'
_admin_path = 'system_store/editGroup/group_id/{id}'
@magento
class StoreviewAdapter(GenericAdapter):
_model_name = 'magento.storeview'
_magento_model = 'ol_storeviews'
_admin_path = 'system_store/editStore/store_id/{id}'
@magento
class MetadataBatchImport(DirectBatchImport):
""" Import the records directly, without delaying the jobs.
Import the Magento Websites, Stores, Storeviews
They are imported directly because this is a rare and fast operation,
and we don't really bother if it blocks the UI during this time.
(that's also a mean to rapidly check the connectivity with Magento).
"""
_model_name = [
'magento.website',
'magento.store',
'magento.storeview',
]
@magento
class WebsiteImportMapper(ImportMapper):
_model_name = 'magento.website'
direct = [('code', 'code'),
('sort_order', 'sort_order')]
@mapping
def name(self, record):
name = record['name']
if name is None:
name = _('Undefined')
return {'name': name}
@mapping
def backend_id(self, record):
return {'backend_id': self.backend_record.id}
@magento
class StoreImportMapper(ImportMapper):
_model_name = 'magento.store'
direct = [('name', 'name')]
@mapping
def website_id(self, record):
binder = self.get_binder_for_model('magento.website')
binding_id = binder.to_openerp(record['website_id'])
return {'website_id': binding_id}
@mapping
@only_create
def warehouse_id(self, record):
return {'warehouse_id': self.backend_record.warehouse_id.id}
@magento
class StoreviewImportMapper(ImportMapper):
_model_name = 'magento.storeview'
direct = [
('name', 'name'),
('code', 'code'),
('is_active', 'enabled'),
('sort_order', 'sort_order'),
]
@mapping
def store_id(self, record):
binder = self.get_binder_for_model('magento.store')
binding_id = binder.to_openerp(record['group_id'])
return {'store_id': binding_id}
@magento
class StoreImport(MagentoImportSynchronizer):
""" Import one Magento Store (create a sale.shop via _inherits) """
_model_name = ['magento.store',
]
def _create(self, data):
openerp_binding_id = super(StoreImport, self)._create(data)
checkpoint = self.get_connector_unit_for_model(AddCheckpoint)
checkpoint.run(openerp_binding_id)
return openerp_binding_id
@magento
class StoreviewImport(MagentoImportSynchronizer):
""" Import one Magento Storeview """
_model_name = ['magento.storeview',
]
def _create(self, data):
openerp_binding_id = super(StoreviewImport, self)._create(data)
checkpoint = self.get_connector_unit_for_model(StoreViewAddCheckpoint)
checkpoint.run(openerp_binding_id)
return openerp_binding_id
@magento
class StoreViewAddCheckpoint(ConnectorUnit):
""" Add a connector.checkpoint on the magento.storeview
record """
_model_name = ['magento.storeview',
]
def run(self, openerp_binding_id):
add_checkpoint(self.session,
self.model._name,
openerp_binding_id,
self.backend_record.id)
|
credativUK/connector-magento
|
__unported__/magentoerpconnect/magento_model.py
|
Python
|
agpl-3.0
| 28,454
|
# -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
""" Course page """
import web
from inginious.frontend.pages.utils import INGIniousPage
class CoursePage(INGIniousPage):
""" Course page """
def get_course(self, courseid):
""" Return the course """
try:
course = self.course_factory.get_course(courseid)
except:
raise web.notfound()
return course
def POST(self, courseid): # pylint: disable=arguments-differ
""" POST request """
course = self.get_course(courseid)
user_input = web.input()
if "unregister" in user_input and course.allow_unregister():
self.user_manager.course_unregister_user(course, self.user_manager.session_username())
raise web.seeother(self.app.get_homepath() + '/mycourses')
return self.show_page(course)
def GET(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course = self.get_course(courseid)
user_input = web.input()
page = int(user_input.get("page", 1)) - 1
tag = user_input.get("tag", "")
return self.show_page(course, page, tag)
def show_page(self, course, current_page=0, current_tag=""):
""" Prepares and shows the course page """
username = self.user_manager.session_username()
if not self.user_manager.course_is_open_to_user(course, lti=False):
return self.template_helper.get_renderer().course_unavailable()
tasks = course.get_tasks()
last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": course.get_id(),
"taskid": {"$in": list(tasks.keys())}})
for submission in last_submissions:
submission["taskname"] = tasks[submission['taskid']].get_name_or_id(self.user_manager.session_language())
tasks_data = {}
user_tasks = self.database.user_tasks.find(
{"username": username, "courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}})
is_admin = self.user_manager.has_staff_rights_on_course(course, username)
tasks_score = [0.0, 0.0]
for taskid, task in tasks.items():
tasks_data[taskid] = {"visible": task.get_accessible_time().after_start() or is_admin, "succeeded": False,
"grade": 0.0}
tasks_score[1] += task.get_grading_weight() if tasks_data[taskid]["visible"] else 0
for user_task in user_tasks:
tasks_data[user_task["taskid"]]["succeeded"] = user_task["succeeded"]
tasks_data[user_task["taskid"]]["grade"] = user_task["grade"]
weighted_score = user_task["grade"] * tasks[user_task["taskid"]].get_grading_weight()
tasks_score[0] += weighted_score if tasks_data[user_task["taskid"]]["visible"] else 0
course_grade = round(tasks_score[0] / tasks_score[1]) if tasks_score[1] > 0 else 0
tag_list = course.get_all_tags_names_as_list(is_admin, self.user_manager.session_language())
user_info = self.database.users.find_one({"username": username})
# Filter tasks with the tag in case the tasks are filtered
if not current_tag:
filtered_tasks = tasks
else:
filtered_tasks = {task_id: task for task_id, task in tasks.items() if
current_tag in map(lambda x: x.get_name(), task.get_tags()[2] + task.get_tags()[0])}
# Manage tasks pagination
page_limit = 20
total_tasks = len(filtered_tasks)
pages = total_tasks // page_limit
if (total_tasks % page_limit) != 0 or pages == 0:
pages += 1
if (page_limit * current_page + page_limit) < total_tasks:
page_tasks_ids = list(filtered_tasks.keys())[page_limit * current_page:
page_limit * current_page + page_limit]
else:
page_tasks_ids = list(filtered_tasks.keys())[page_limit * current_page:]
filtered_tasks = {task_id: tasks_data[task_id] for task_id, __ in filtered_tasks.items() if
task_id in page_tasks_ids}
return self.template_helper.get_renderer().course(user_info, course, last_submissions, tasks,
filtered_tasks, course_grade, tag_list, pages,
current_page + 1, current_tag)
|
JuezUN/INGInious
|
inginious/frontend/pages/course.py
|
Python
|
agpl-3.0
| 4,665
|
from edx_ace import MessageType
# This code also exists in the Credentials app `messages.py` file. Any changes here should be duplicated there as well
# until we can come back around and create a common base Messaging class that the Credentials and Records app will
# utilize.
class ProgramCreditRequest(MessageType):
def __init__(self, site, user_email=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
super().__init__(*args, **kwargs)
if site.siteconfiguration.partner_from_address:
from_address = site.siteconfiguration.partner_from_address
else:
from_address = "no-reply@" + site.domain
if user_email:
self.options.update(
{
"reply_to": [user_email],
}
)
self.options.update(
{
"from_address": from_address,
}
)
self.context.update(
{
"platform_name": site.siteconfiguration.platform_name,
}
)
|
edx/credentials
|
credentials/apps/records/messages.py
|
Python
|
agpl-3.0
| 1,071
|
import datetime
from judge.utils.timedelta import nice_repr
from . import registry
@registry.filter
def timedelta(value, display='long'):
if value is None:
return value
return nice_repr(value, display)
@registry.filter
def timestampdelta(value, display='long'):
value = datetime.timedelta(seconds=value)
return timedelta(value, display)
@registry.filter
def seconds(timedelta):
return timedelta.total_seconds()
@registry.filter
@registry.render_with('time-remaining-fragment.html')
def as_countdown(timedelta):
return {'countdown': timedelta}
|
DMOJ/site
|
judge/jinja2/timedelta.py
|
Python
|
agpl-3.0
| 584
|
# encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import urllib
from .check_utils import journey_basic_query
from .tests_mechanism import dataset, AbstractTestFixture
from .check_utils import *
@dataset({"main_ptref_test": {}})
class TestPtRef(AbstractTestFixture):
"""
Test the structure of the ptref response
"""
@staticmethod
def _test_links(response, pt_obj_name):
# Test the validity of links of 'previous', 'next', 'last', 'first'
wanted_links_type = ['previous', 'next', 'last', 'first']
for l in response['links']:
if l['type'] in wanted_links_type:
assert pt_obj_name in l['href']
# Test the consistency between links
wanted_links = [l['href'] for l in response['links'] if l['type'] in wanted_links_type]
if len(wanted_links) <= 1:
return
def _get_dict_to_compare(link):
url_dict = query_from_str(link)
url_dict.pop('start_page', None)
url_dict['url'] = link.split('?')[0]
return url_dict
url_dict = _get_dict_to_compare(wanted_links[0])
for l in wanted_links[1:]:
assert url_dict == _get_dict_to_compare(l)
def test_vj_default_depth(self):
"""default depth is 1"""
response = self.query_region("v1/vehicle_journeys")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=1)
assert len(vjs) == 3
vj = vjs[0]
assert vj['id'] == 'vj1'
assert len(vj['stop_times']) == 2
assert vj['stop_times'][0]['arrival_time'] == '101500'
assert vj['stop_times'][0]['departure_time'] == '101500'
assert vj['stop_times'][1]['arrival_time'] == '111000'
assert vj['stop_times'][1]['departure_time'] == '111000'
#we added some comments on the vj, we should have them
com = get_not_null(vj, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == 'hello'
assert "feed_publishers" in response
feed_publishers = response["feed_publishers"]
for feed_publisher in feed_publishers:
is_valid_feed_publisher(feed_publisher)
feed_publisher = feed_publishers[0]
assert (feed_publisher["id"] == "c1")
assert (feed_publisher["name"] == "name-c1")
assert (feed_publisher["license"] == "ls-c1")
assert (feed_publisher["url"] == "ws-c1")
feed_publisher = feed_publishers[1]
assert (feed_publisher["id"] == "builder")
assert (feed_publisher["name"] == "canal tp")
assert (feed_publisher["license"] == "ODBL")
assert (feed_publisher["url"] == "www.canaltp.fr")
def test_vj_depth_0(self):
"""default depth is 1"""
response = self.query_region("v1/vehicle_journeys?depth=0")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=0)
def test_vj_depth_2(self):
"""default depth is 1"""
response = self.query_region("v1/vehicle_journeys?depth=2")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=2)
def test_vj_depth_3(self):
"""default depth is 1"""
response = self.query_region("v1/vehicle_journeys?depth=3")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=3)
def test_vj_show_codes_propagation(self):
"""stop_area:stop1 has a code, we should be able to find it when accessing it by the vj"""
response = self.query_region("stop_areas/stop_area:stop1/vehicle_journeys")
vjs = get_not_null(response, 'vehicle_journeys')
assert vjs
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=1)
stop_points = [get_not_null(st, 'stop_point') for vj in vjs for st in vj['stop_times']]
stops1 = [s for s in stop_points if s['id'] == 'stop_area:stop1']
assert stops1
for stop1 in stops1:
# all reference to stop1 must have it's codes
codes = get_not_null(stop1, 'codes')
code_uic = [c for c in codes if c['type'] == 'code_uic']
assert len(code_uic) == 1 and code_uic[0]['value'] == 'bobette'
def test_ptref_without_current_datetime(self):
"""
stop_area:stop1 without message because _current_datetime is NOW()
"""
response = self.query_region("stop_areas/stop_area:stop1")
assert len(response['disruptions']) == 0
def test_ptref_with_current_datetime(self):
"""
stop_area:stop1 with _current_datetime
"""
response = self.query_region("stop_areas/stop_area:stop1?_current_datetime=20140115T235959")
disruptions = get_not_null(response, 'disruptions')
assert len(disruptions) == 1
messages = get_not_null(disruptions[0], 'messages')
assert(messages[0]['text']) == 'Disruption on StopArea stop_area:stop1'
def test_contributors(self):
"""test contributor formating"""
response = self.query_region("v1/contributors")
contributors = get_not_null(response, 'contributors')
assert len(contributors) == 1
ctr = contributors[0]
assert(ctr["id"] == 'c1')
assert(ctr["website"] == 'ws-c1')
assert(ctr["license"] == 'ls-c1')
def test_datasets(self):
"""test dataset formating"""
response = self.query_region("v1/datasets")
datasets = get_not_null(response, 'datasets')
assert len(datasets) == 1
ds = datasets[0]
assert(ds["id"] == 'd1')
assert(ds["description"] == 'desc-d1')
assert(ds["system"] == 'sys-d1')
def test_contributor_by_dataset(self):
"""test contributor by dataset formating"""
response = self.query_region("datasets/d1/contributors")
ctrs = get_not_null(response, 'contributors')
assert len(ctrs) == 1
ctr = ctrs[0]
assert(ctr["id"] == 'c1')
assert(ctr["website"] == 'ws-c1')
assert(ctr["license"] == 'ls-c1')
def test_dataset_by_contributor(self):
"""test dataset by contributor formating"""
response = self.query_region("contributors/c1/datasets")
frs = get_not_null(response, 'datasets')
assert len(frs) == 1
fr = frs[0]
assert(fr["id"] == 'd1')
def test_line(self):
"""test line formating"""
response = self.query_region("v1/lines")
lines = get_not_null(response, 'lines')
assert len(lines) == 3
l = lines[0]
is_valid_line(l, depth_check=1)
assert l["text_color"] == 'FFD700'
#we know we have a geojson for this test so we can check it
geo = get_not_null(l, 'geojson')
shape(geo)
com = get_not_null(l, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "I'm a happy comment"
physical_modes = get_not_null(l, 'physical_modes')
assert len(physical_modes) == 1
is_valid_physical_mode(physical_modes[0], depth_check=1)
assert physical_modes[0]['id'] == 'physical_mode:Car'
assert physical_modes[0]['name'] == 'name physical_mode:Car'
line_group = get_not_null(l, 'line_groups')
assert len(line_group) == 1
is_valid_line_group(line_group[0], depth_check=0)
assert line_group[0]['name'] == 'A group'
assert line_group[0]['id'] == 'group:A'
self._test_links(response, 'lines')
def test_line_without_shape(self):
"""test line formating with shape disabled"""
response = self.query_region("v1/lines?disable_geojson=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 3
l = lines[0]
is_valid_line(l, depth_check=1)
#we don't want a geojson since we have desactivate them
assert 'geojson' not in l
response = self.query_region("v1/lines")
lines = get_not_null(response, 'lines')
assert len(lines) == 3
l = lines[0]
is_valid_line(l, depth_check=1)
#we check our geojson, just to be safe :)
assert 'geojson' in l
geo = get_not_null(l, 'geojson')
shape(geo)
def test_line_groups(self):
"""test line group formating"""
# Test for each possible range to ensure main_line is always at a depth of 0
for depth in range(0,3):
response = self.query_region("line_groups?depth={0}".format(depth))
line_groups = get_not_null(response, 'line_groups')
assert len(line_groups) == 1
lg = line_groups[0]
is_valid_line_group(lg, depth_check=depth)
if depth > 0:
com = get_not_null(lg, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "I'm a happy comment"
# test if line_groups are accessible through the ptref graph
response = self.query_region("routes/line:A:0/line_groups")
line_groups = get_not_null(response, 'line_groups')
assert len(line_groups) == 1
lg = line_groups[0]
is_valid_line_group(lg)
def test_line_codes(self):
"""test line formating"""
response = self.query_region("v1/lines/line:A?show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
l = lines[0]
codes = get_not_null(l, 'codes')
assert len(codes) == 4
is_valid_codes(codes)
def test_route(self):
"""test line formating"""
response = self.query_region("v1/routes")
routes = get_not_null(response, 'routes')
assert len(routes) == 3
r = [r for r in routes if r['id'] == 'line:A:0']
assert len(r) == 1
r = r[0]
is_valid_route(r, depth_check=1)
#we know we have a geojson for this test so we can check it
geo = get_not_null(r, 'geojson')
shape(geo)
com = get_not_null(r, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "I'm a happy comment"
self._test_links(response, 'routes')
def test_stop_areas(self):
"""test stop_areas formating"""
response = self.query_region("v1/stop_areas")
stops = get_not_null(response, 'stop_areas')
assert len(stops) == 3
s = next((s for s in stops if s['name'] == 'stop_area:stop1'))
is_valid_stop_area(s, depth_check=1)
com = get_not_null(s, 'comments')
assert len(com) == 2
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "comment on stop A"
assert com[1]['type'] == 'standard'
assert com[1]['value'] == "the stop is sad"
self._test_links(response, 'stop_areas')
def test_stop_area(self):
"""test stop_areas formating"""
response = self.query_region("v1/stop_areas/stop_area:stop1?depth=2")
stops = get_not_null(response, 'stop_areas')
assert len(stops) == 1
is_valid_stop_area(stops[0], depth_check=2)
modes = get_not_null(stops[0], 'physical_modes')
assert len(modes) == 1
modes = get_not_null(stops[0], 'commercial_modes')
assert len(modes) == 1
def test_stop_points(self):
"""test stop_points formating"""
response = self.query_region("v1/stop_points?depth=2")
stops = get_not_null(response, 'stop_points')
assert len(stops) == 3
s = next((s for s in stops if s['name'] == 'stop_area:stop2'))# yes, that's a stop_point
is_valid_stop_point(s, depth_check=2)
com = get_not_null(s, 'comments')
assert len(com) == 1
assert com[0]['type'] == 'standard'
assert com[0]['value'] == "hello bob"
modes = get_not_null(s, 'physical_modes')
assert len(modes) == 1
modes = get_not_null(s, 'commercial_modes')
assert len(modes) == 1
self._test_links(response, 'stop_points')
def test_company_default_depth(self):
"""default depth is 1"""
response = self.query_region("v1/companies")
companies = get_not_null(response, 'companies')
for company in companies:
is_valid_company(company, depth_check=1)
#we check afterward that we have the right data
#we know there is only one vj in the dataset
assert len(companies) == 1
company = companies[0]
assert company['id'] == 'CMP1'
self._test_links(response, 'companies')
def test_simple_crow_fly(self):
journey_basic_query = "journeys?from=9;9.001&to=stop_area%3Astop2&datetime=20140105T000000"
response = self.query_region(journey_basic_query)
#the response must be still valid (this test the kraken data reloading)
self.is_valid_journey_response(response, journey_basic_query)
def test_forbidden_uris_on_line(self):
"""test forbidden uri for lines"""
response = self.query_region("v1/lines")
lines = get_not_null(response, 'lines')
assert len(lines) == 3
assert len(lines[0]['physical_modes']) == 1
assert lines[0]['physical_modes'][0]['id'] == 'physical_mode:Car'
#there is only one line, so when we forbid it's physical mode, we find nothing
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines"
"?forbidden_uris[]=physical_mode:Car")
assert code == 404
# for retrocompatibility purpose forbidden_id[] is the same
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines"
"?forbidden_id[]=physical_mode:Car")
assert code == 404
# when we forbid another physical_mode, we find again our line
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines"
"?forbidden_uris[]=physical_mode:Bus")
assert code == 200
def test_simple_pt_objects(self):
response = self.query_region('pt_objects?q=stop2')
is_valid_pt_objects_response(response)
pt_objs = get_not_null(response, 'pt_objects')
assert len(pt_objs) == 1
assert get_not_null(pt_objs[0], 'id') == 'stop_area:stop2'
def test_query_with_strange_char(self):
q = b'stop_points/stop_point:stop_with name bob \" , é'
encoded_q = urllib.quote(q)
response = self.query_region(encoded_q)
stops = get_not_null(response, 'stop_points')
assert len(stops) == 1
is_valid_stop_point(stops[0], depth_check=1)
assert stops[0]["id"] == u'stop_point:stop_with name bob \" , é'
def test_filter_query_with_strange_char(self):
"""test that the ptref mechanism works an object with a weird id"""
response = self.query_region('stop_points/stop_point:stop_with name bob \" , é/lines')
lines = get_not_null(response, 'lines')
assert len(lines) == 1
for l in lines:
is_valid_line(l)
def test_filter_query_with_strange_char_in_filter(self):
"""test that the ptref mechanism works an object with a weird id passed in filter args"""
response = self.query_region('lines?filter=stop_point.uri="stop_point:stop_with name bob \\\" , é"')
lines = get_not_null(response, 'lines')
assert len(lines) == 1
for l in lines:
is_valid_line(l)
def test_journey_with_strange_char(self):
#we use an encoded url to be able to check the links
query = 'journeys?from={}&to={}&datetime=20140105T070000'.format(urllib.quote_plus(b'stop_with name bob \" , é'), urllib.quote_plus(b'stop_area:stop1'))
response = self.query_region(query, display=True)
self.is_valid_journey_response(response, query)
def test_vj_period_filter(self):
"""with just a since in the middle of the period, we find vj1"""
response = self.query_region("vehicle_journeys?since=20140105T070000")
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=1)
assert 'vj1' in (vj['id'] for vj in vjs)
# same with an until at the end of the day
response = self.query_region("vehicle_journeys?since=20140105T000000&until=20140106T0000")
vjs = get_not_null(response, 'vehicle_journeys')
assert 'vj1' in (vj['id'] for vj in vjs)
# there is no vj after the 8
response, code = self.query_no_assert("v1/coverage/main_ptref_test/vehicle_journeys?since=20140109T070000")
assert code == 404
assert get_not_null(response, 'error')['message'] == 'ptref : Filters: Unable to find object'
def test_line_by_code(self):
"""test the filter=type.has_code(key, value)"""
response = self.query_region("lines?filter=line.has_code(codeB, B)&show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert 'B' in [code['value'] for code in lines[0]['codes'] if code['type'] == 'codeB']
response = self.query_region("lines?filter=line.has_code(codeB, Bise)&show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert 'B' in [code['value'] for code in lines[0]['codes'] if code['type'] == 'codeB']
response = self.query_region("lines?filter=line.has_code(codeC, C)&show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert 'B' in [code['value'] for code in lines[0]['codes'] if code['type'] == 'codeB']
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines?filter=line.has_code(codeB, rien)&show_codes=true")
assert code == 400
assert get_not_null(response, 'error')['message'] == 'ptref : Filters: Unable to find object'
response, code = self.query_no_assert("v1/coverage/main_ptref_test/lines?filter=line.has_code(codeC, rien)&show_codes=true")
assert code == 400
assert get_not_null(response, 'error')['message'] == 'ptref : Filters: Unable to find object'
def test_pt_ref_internal_method(self):
from jormungandr import i_manager
from navitiacommon import type_pb2
i = i_manager.instances['main_ptref_test']
assert len([r for r in i.ptref.get_objs(type_pb2.ROUTE)]) == 3
@dataset({"main_ptref_test": {}, "main_routing_test": {}})
class TestPtRefRoutingAndPtrefCov(AbstractTestFixture):
def test_external_code(self):
"""test the strange and ugly external code api"""
response = self.query("v1/lines?external_code=A&show_codes=true")
lines = get_not_null(response, 'lines')
assert len(lines) == 1
assert 'A' in [code['value'] for code in lines[0]['codes'] if code['type'] == 'external_code']
def test_invalid_url(self):
"""the following bad url was causing internal errors, it should only be a 404"""
_, status = self.query_no_assert("v1/coverage/lines/bob")
eq_(status, 404)
@dataset({"main_routing_test": {}})
class TestPtRefRoutingCov(AbstractTestFixture):
def test_with_coords(self):
"""test with a coord in the pt call, so a place nearby is actually called"""
response = self.query_region("coords/{coord}/stop_areas".format(coord=r_coord))
stops = get_not_null(response, 'stop_areas')
for s in stops:
is_valid_stop_area(s)
#the default is the search for all stops within 200m, so we should have A and C
eq_(len(stops), 2)
assert set(["stopA", "stopC"]) == set([s['name'] for s in stops])
def test_with_coord(self):
"""some but with coord and not coords"""
response = self.query_region("coord/{coord}/stop_areas".format(coord=r_coord))
stops = get_not_null(response, 'stop_areas')
for s in stops:
is_valid_stop_area(s)
#the default is the search for all stops within 200m, so we should have A and C
eq_(len(stops), 2)
assert set(["stopA", "stopC"]) == set([s['name'] for s in stops])
def test_with_coord_distance_different(self):
"""same as test_with_coord, but with 300m radius. so we find all stops"""
response = self.query_region("coords/{coord}/stop_areas?distance=300".format(coord=r_coord))
stops = get_not_null(response, 'stop_areas')
for s in stops:
is_valid_stop_area(s)
eq_(len(stops), 3)
assert set(["stopA", "stopB", "stopC"]) == set([s['name'] for s in stops])
def test_with_coord_and_filter(self):
"""
we now test with a more complex query, we want all stops with a metro within 300m of r
only A and C have a metro line
Note: the metro is physical_mode:0x1
"""
response = self.query_region("physical_modes/physical_mode:0x1/coords/{coord}/stop_areas"
"?distance=300".format(coord=r_coord), display=True)
stops = get_not_null(response, 'stop_areas')
for s in stops:
is_valid_stop_area(s)
#the default is the search for all stops within 200m, so we should have all 3 stops
#we should have 3 stops
eq_(len(stops), 2)
assert set(["stopA", "stopC"]) == set([s['name'] for s in stops])
def test_all_lines(self):
"""test with all lines in the pt call"""
response = self.query_region('lines')
assert 'error' not in response
lines = get_not_null(response, 'lines')
eq_(len(lines), 4)
assert {"1A", "1B", "1C", "1D"} == {l['code'] for l in lines}
def test_line_filter_line_code(self):
"""test filtering lines from line code 1A in the pt call"""
response = self.query_region('lines?filter=line.code=1A')
assert 'error' not in response
lines = get_not_null(response, 'lines')
eq_(len(lines), 1)
assert "1A" == lines[0]['code']
def test_line_filter_line_code_with_resource_uri(self):
"""test filtering lines from line code 1A in the pt call with a resource uri"""
response = self.query_region('physical_modes/physical_mode:0x1/lines?filter=line.code=1D')
assert 'error' not in response
lines = get_not_null(response, 'lines')
eq_(len(lines), 1)
assert "1D" == lines[0]['code']
def test_line_filter_line_code_empty_response(self):
"""test filtering lines from line code bob in the pt call
as no line has the code "bob" response returns no object"""
url = 'v1/coverage/main_routing_test/lines?filter=line.code=bob'
response, status = self.query_no_assert(url)
assert status == 400
assert 'error' in response
assert 'bad_filter' in response['error']['id']
def test_line_filter_route_code_ignored(self):
"""test filtering lines from route code bob in the pt call
as there is no attribute "code" for route, filter is invalid and ignored"""
response_all_lines = self.query_region('lines')
all_lines = get_not_null(response_all_lines, 'lines')
response = self.query_region('lines?filter=route.code=bob')
assert 'error' not in response
lines = get_not_null(response, 'lines')
eq_(len(lines), 4)
assert {l['code'] for l in all_lines} == {l['code'] for l in lines}
def test_route_filter_line_code(self):
"""test filtering routes from line code 1B in the pt call"""
response = self.query_region('routes?filter=line.code=1B')
assert 'error' not in response
routes = get_not_null(response, 'routes')
eq_(len(routes), 1)
assert "1B" == routes[0]['line']['code']
def test_headsign(self):
"""test basic usage of headsign"""
response = self.query_region('vehicle_journeys?headsign=vjA')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
eq_(len(vjs), 1)
def test_headsign_with_resource_uri(self):
"""test usage of headsign with resource uri"""
response = self.query_region('physical_modes/physical_mode:0x0/vehicle_journeys'
'?headsign=vjA')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
eq_(len(vjs), 1)
def test_headsign_with_code_filter_and_resource_uri(self):
"""test usage of headsign with code filter and resource uri"""
response = self.query_region('physical_modes/physical_mode:0x0/vehicle_journeys'
'?headsign=vjA&filter=line.code=1A')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
eq_(len(vjs), 1)
def test_multiple_resource_uri_no_final_collection_uri(self):
"""test usage of multiple resource uris with line and physical mode giving result,
then with multiple resource uris giving no result as nothing matches"""
response = self.query_region('physical_modes/physical_mode:0x0/lines/A')
assert 'error' not in response
lines = get_not_null(response, 'lines')
eq_(len(lines), 1)
response = self.query_region('lines/D')
assert 'error' not in response
lines = get_not_null(response, 'lines')
eq_(len(lines), 1)
response = self.query_region('physical_modes/physical_mode:0x1/lines/D')
assert 'error' not in response
lines = get_not_null(response, 'lines')
eq_(len(lines), 1)
response, status = self.query_region('physical_modes/physical_mode:0x0/lines/D', False)
assert status == 404
assert 'error' in response
assert 'unknown_object' in response['error']['id']
def test_multiple_resource_uri_with_final_collection_uri(self):
"""test usage of multiple resource uris with line and physical mode giving result,
as we match it with a final collection, so the intersection is what we want"""
response = self.query_region('physical_modes/physical_mode:0x1/lines/D/stop_areas')
assert 'error' not in response
stop_areas = get_not_null(response, 'stop_areas')
eq_(len(stop_areas), 2)
response = self.query_region('physical_modes/physical_mode:0x0/lines/D/stop_areas')
assert 'error' not in response
stop_areas = get_not_null(response, 'stop_areas')
eq_(len(stop_areas), 1)
def test_headsign_stop_time_vj(self):
"""test basic print of headsign in stop_times for vj"""
response = self.query_region('vehicle_journeys?filter=vehicle_journey.name="vjA"')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
eq_(len(vjs), 1)
eq_(len(vjs[0]['stop_times']), 2)
eq_(vjs[0]['stop_times'][0]['headsign'], "A00")
eq_(vjs[0]['stop_times'][1]['headsign'], "vjA")
def test_headsign_display_info_journeys(self):
"""test basic print of headsign in section for journeys"""
response = self.query_region('journeys?from=stop_point:stopB&to=stop_point:stopA&datetime=20120615T000000')
assert 'error' not in response
journeys = get_not_null(response, 'journeys')
eq_(len(journeys), 1)
eq_(len(journeys[0]['sections']), 1)
eq_(journeys[0]['sections'][0]['display_informations']['headsign'], "A00")
def test_headsign_display_info_departures(self):
"""test basic print of headsign in display informations for departures"""
response = self.query_region('stop_points/stop_point:stopB/departures?from_datetime=20120615T000000')
assert 'error' not in response
departures = get_not_null(response, 'departures')
eq_(len(departures), 2)
assert {"A00", "vjB"} == {d['display_informations']['headsign'] for d in departures}
def test_headsign_display_info_arrivals(self):
"""test basic print of headsign in display informations for arrivals"""
response = self.query_region('stop_points/stop_point:stopB/arrivals?from_datetime=20120615T000000')
assert 'error' not in response
arrivals = get_not_null(response, 'arrivals')
eq_(len(arrivals), 1)
eq_(arrivals[0]['display_informations']['headsign'], "vehicle_journey 2")
def test_headsign_display_info_route_schedules(self):
"""test basic print of headsign in display informations for route schedules"""
response = self.query_region('routes/A:0/route_schedules?from_datetime=20120615T000000')
assert 'error' not in response
route_schedules = get_not_null(response, 'route_schedules')
eq_(len(route_schedules), 1)
eq_(len(route_schedules[0]['table']['headers']), 1)
display_info = route_schedules[0]['table']['headers'][0]['display_informations']
eq_(display_info['headsign'], "vjA")
assert {"A00", "vjA"} == set(display_info['headsigns'])
def test_trip_id_vj(self):
"""test basic print of trip and its id in vehicle_journeys"""
response = self.query_region('vehicle_journeys')
assert 'error' not in response
vjs = get_not_null(response, 'vehicle_journeys')
for vj in vjs:
is_valid_vehicle_journey(vj, depth_check=1)
assert any(vj['name'] == "vjB" and vj['trip']['id'] == "vjB" for vj in vjs)
def test_disruptions(self):
"""test the /disruptions api"""
response = self.query_region('disruptions')
disruptions = get_not_null(response, 'disruptions')
assert len(disruptions) == 9
for d in disruptions:
is_valid_disruption(d)
# in pt_ref, the status is always active as the checked
# period is the validity period
assert d["status"] == "active"
# we test that we can access a specific disruption
response = self.query_region('disruptions/too_bad_line_C')
disruptions = get_not_null(response, 'disruptions')
assert len(disruptions) == 1
# we can also display all disruptions of an object
response = self.query_region('lines/C/disruptions')
disruptions = get_not_null(response, 'disruptions')
assert len(disruptions) == 2
disruptions_uris = set([d['uri'] for d in disruptions])
eq_({"too_bad_line_C", "too_bad_all_lines"}, disruptions_uris)
# we can't access object from the disruption though (we don't think it to be useful for the moment)
response, status = self.query_region('disruptions/too_bad_line_C/lines', check=False)
eq_(status, 404)
e = get_not_null(response, 'error')
assert e['id'] == 'unknown_object'
assert e['message'] == 'ptref : Filters: Unable to find object'
def test_trips(self):
"""test the /trips api"""
response = self.query_region('trips')
trips = get_not_null(response, 'trips')
assert len(trips) == 4
for t in trips:
is_valid_trip(t)
# we test that we can access a specific trip
response = self.query_region('trips/vjA')
trips = get_not_null(response, 'trips')
assert len(trips) == 1
assert get_not_null(trips[0], 'id') == "vjA"
# we can also display trip of a vj
response = self.query_region('vehicle_journeys/vjB/trips')
trips = get_not_null(response, 'trips')
assert len(trips) == 1
assert get_not_null(trips[0], 'id') == "vjB"
def test_attributs_in_display_info_journeys(self):
"""test some attributs in display_information of a section for journeys"""
response = self.query_region('journeys?from=stop_point:stopB&to=stop_point:stopA&datetime=20120615T000000')
assert 'error' not in response
journeys = get_not_null(response, 'journeys')
eq_(len(journeys), 1)
eq_(len(journeys[0]['sections']), 1)
eq_(journeys[0]['sections'][0]['display_informations']['headsign'], "A00")
eq_(journeys[0]['sections'][0]['display_informations']['color'], "289728")
eq_(journeys[0]['sections'][0]['display_informations']['text_color'], "FFD700")
|
ballouche/navitia
|
source/jormungandr/tests/ptref_tests.py
|
Python
|
agpl-3.0
| 34,004
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import addons
import base64
import ir
import locale
import logging
import netsvc
import os
import platform
import pooler
import release
import security
import sql_db
import sys
import threading
import time
import tools
from tools.translate import _
from cStringIO import StringIO
#.apidoc title: Exported Service methods
#.apidoc module-mods: member-order: bysource
""" This python module defines the RPC methods available to remote clients.
Each 'Export Service' is a group of 'methods', which in turn are RPC
procedures to be called. Each method has its own arguments footprint.
"""
logging.basicConfig()
class baseExportService(netsvc.ExportService):
""" base class for the objects that implement the standardized
xmlrpc2 dispatch
"""
_auth_commands = { 'pub': [] , 'root': [], 'db': [] }
def new_dispatch(self, method, auth, params, auth_domain=None):
# Double check, that we have the correct authentication:
if not auth:
domain='pub'
else:
domain=auth.provider.domain
if method not in self._auth_commands[domain]:
raise Exception("Method not found: %s" % method)
fn = getattr(self, 'exp_'+method)
if domain == 'db':
u, p, db, uid = auth.auth_creds[auth.last_auth]
cr = pooler.get_db(db).cursor()
try:
res = fn(cr, uid, *params)
cr.commit()
return res
finally:
cr.close()
else:
return fn(*params)
class db(baseExportService):
_auth_commands = { 'root': [ 'create', 'get_progress', 'drop', 'dump',
'restore', 'rename',
'change_admin_password', 'migrate_databases' ],
'pub': [ 'db_exist', 'list', 'list_lang', 'server_version' ],
}
def __init__(self, name="db"):
netsvc.ExportService.__init__(self, name)
self.joinGroup("web-services")
self.actions = {}
self.id = 0
self.id_protect = threading.Semaphore()
self._pg_psw_env_var_is_set = False # on win32, pg_dump need the PGPASSWORD env var
def dispatch(self, method, auth, params):
if method in [ 'create', 'get_progress', 'drop', 'dump',
'restore', 'rename',
'change_admin_password', 'migrate_databases' ]:
passwd = params[0]
params = params[1:]
security.check_super(passwd)
elif method in [ 'db_exist', 'list', 'list_lang', 'server_version' ]:
# params = params
# No security check for these methods
pass
else:
raise KeyError("Method not found: %s" % method)
fn = getattr(self, 'exp_'+method)
return fn(*params)
def _create_empty_database(self, name):
db = sql_db.db_connect('template1')
cr = db.cursor()
try:
cr.autocommit(True) # avoid transaction block
cr.execute("""CREATE DATABASE "%s" ENCODING 'unicode' TEMPLATE "template0" """ % name)
finally:
cr.close()
def exp_create(self, db_name, demo, lang, user_password='admin'):
self.id_protect.acquire()
self.id += 1
id = self.id
self.id_protect.release()
self.actions[id] = {'clean': False}
self._create_empty_database(db_name)
class DBInitialize(object):
def __call__(self, serv, id, db_name, demo, lang, user_password='admin'):
cr = None
try:
serv.actions[id]['progress'] = 0
cr = sql_db.db_connect(db_name).cursor()
tools.init_db(cr)
cr.commit()
cr.close()
cr = None
_langs = []
if lang:
_langs.append(lang)
pool = pooler.restart_pool(db_name, demo, serv.actions[id],
update_module=True, languages=_langs)[1]
cr = sql_db.db_connect(db_name).cursor()
if lang:
modobj = pool.get('ir.module.module')
mids = modobj.search(cr, 1, [('state', '=', 'installed')])
modobj.update_translations(cr, 1, mids, lang)
cr.execute('UPDATE res_users SET password=%s, context_lang=%s, active=True WHERE login=%s', (
user_password, lang, 'admin'))
cr.execute('SELECT login, password, name ' \
' FROM res_users ' \
' ORDER BY login')
serv.actions[id]['users'] = cr.dictfetchall()
serv.actions[id]['clean'] = True
cr.commit()
cr.close()
except Exception, e:
serv.actions[id]['clean'] = False
serv.actions[id]['exception'] = e
import traceback
e_str = StringIO()
traceback.print_exc(file=e_str)
traceback_str = e_str.getvalue()
e_str.close()
logging.getLogger('web-services').error('CREATE DATABASE\n%s' % (traceback_str))
serv.actions[id]['traceback'] = traceback_str
if cr:
cr.close()
logger = logging.getLogger('web-services')
logger.info('CREATE DATABASE: %s' % (db_name.lower()))
dbi = DBInitialize()
create_thread = threading.Thread(target=dbi,
args=(self, id, db_name, demo, lang, user_password))
create_thread.start()
self.actions[id]['thread'] = create_thread
return id
def exp_get_progress(self, id):
if self.actions[id]['thread'].isAlive():
# return addons.init_progress[db_name]
return (min(self.actions[id].get('progress', 0),0.95), [])
else:
clean = self.actions[id]['clean']
if clean:
users = self.actions[id]['users']
self.actions.pop(id)
return (1.0, users)
else:
e = self.actions[id]['exception']
self.actions.pop(id)
raise Exception, e
def exp_drop(self, db_name):
sql_db.close_db(db_name)
logger = logging.getLogger()
db = sql_db.db_connect('template1')
cr = db.cursor()
cr.autocommit(True) # avoid transaction block
if tools.config.get_misc('debug', 'drop_guard', False):
raise Exception("Not dropping database %s because guard is set!" % db_name)
try:
cr.execute('DROP DATABASE "%s"' % db_name)
logger.info('DROP DB: %s' % (db_name))
except Exception, e:
logger.exception('DROP DB: %s failed:' % (db_name,))
raise Exception("Couldn't drop database %s: %s" % (db_name, e))
finally:
cr.close()
return True
def _set_pg_psw_env_var(self):
if os.name == 'nt' and not os.environ.get('PGPASSWORD', ''):
os.environ['PGPASSWORD'] = tools.config['db_password']
self._pg_psw_env_var_is_set = True
def _unset_pg_psw_env_var(self):
if os.name == 'nt' and self._pg_psw_env_var_is_set:
os.environ['PGPASSWORD'] = ''
def exp_dump(self, db_name):
logger = logging.getLogger('web-services')
if tools.config.get_misc('databases', 'dump_guard', False):
logger.error("Prevented dump of database %s, because guard is set!", db_name)
raise Exception("Not dropping database %s because guard is set!" % db_name)
allowed_res = tools.config.get_misc('databases', 'allowed')
if allowed_res:
dbs_allowed = [ x.strip() for x in allowed_res.split(' ')]
if not db_name in dbs_allowed:
logger.critical("Asked to dump illegal database: %s", db_name)
raise Exception("Database %s is not allowed to be dumped!" % db_name)
self._set_pg_psw_env_var()
cmd = ['pg_dump', '--format=c', '--no-owner' , '-w']
if tools.config['db_user']:
cmd.append('--username=' + tools.config['db_user'])
if tools.config['db_host']:
cmd.append('--host=' + tools.config['db_host'])
if tools.config['db_port']:
cmd.append('--port=' + str(tools.config['db_port']))
cmd.append(db_name)
stdin, stdout = tools.exec_pg_command_pipe(*tuple(cmd))
stdin.close()
data = stdout.read()
res = stdout.close()
if res:
logger.error('DUMP DB: %s failed\n%s' % (db_name, data))
raise Exception("Couldn't dump database")
logger.info('DUMP DB: %s' % (db_name))
self._unset_pg_psw_env_var()
return base64.encodestring(data)
def exp_restore(self, db_name, data):
logger = logging.getLogger('web-services')
self._set_pg_psw_env_var()
if self.exp_db_exist(db_name):
logger.warning('RESTORE DB: %s already exists' % (db_name,))
raise Exception("Database already exists")
self._create_empty_database(db_name)
cmd = ['pg_restore', '--no-owner', '-w']
if tools.config['db_user']:
cmd.append('--username=' + tools.config['db_user'])
if tools.config['db_host']:
cmd.append('--host=' + tools.config['db_host'])
if tools.config['db_port']:
cmd.append('--port=' + str(tools.config['db_port']))
cmd.append('--dbname=' + db_name)
args2 = tuple(cmd)
buf=base64.decodestring(data)
if os.name == "nt":
tmpfile = (os.environ['TMP'] or 'C:\\') + os.tmpnam()
file(tmpfile, 'wb').write(buf)
args2=list(args2)
args2.append(' ' + tmpfile)
args2=tuple(args2)
stdin, stdout = tools.exec_pg_command_pipe(*args2)
if not os.name == "nt":
stdin.write(base64.decodestring(data))
stdin.close()
res = stdout.close()
if res:
raise Exception, "Couldn't restore database"
logger.info('RESTORE DB: %s' % (db_name))
self._unset_pg_psw_env_var()
return True
def exp_rename(self, old_name, new_name):
sql_db.close_db(old_name)
logger = logging.getLogger('web-services')
allowed_res = tools.config.get_misc('databases', 'allowed')
if allowed_res:
# When we have a restricted set of database names, renaming must
# be totally forbiden. That is, we both don't want some known db
# to be renamed into an arbitrary name, nor one arbitrary db to
# be renamed into a known name. The old/new names of the databases
# are neither expected to be present at the config file.
# So, just tell the admin that he has to temporarily change the
# conf file.
logger.error("Renaming databases is not allowed. "\
"Please turn off the databases.allowed setting at the conf file.")
raise Exception("Database renaming is forbiden because the names are restricted")
db = sql_db.db_connect('template1')
cr = db.cursor()
cr.autocommit(True) # avoid transaction block
try:
try:
cr.execute('ALTER DATABASE "%s" RENAME TO "%s"' % (old_name, new_name))
except Exception, e:
logger.error('RENAME DB: %s -> %s failed:\n%s' % (old_name, new_name, e))
raise Exception("Couldn't rename database %s to %s: %s" % (old_name, new_name, e))
else:
fs = os.path.join(tools.config['root_path'], 'filestore')
if os.path.exists(os.path.join(fs, old_name)):
os.rename(os.path.join(fs, old_name), os.path.join(fs, new_name))
logger.info('RENAME DB: %s -> %s' % (old_name, new_name))
finally:
cr.close()
return True
def exp_db_exist(self, db_name):
## Not True: in fact, check if connection to database is possible. The database may exists
return bool(sql_db.db_connect(db_name))
def exp_list(self, document=False):
if not tools.config['list_db'] and not document:
raise Exception('AccessDenied')
db = sql_db.db_connect('template1')
cr = db.cursor()
try:
try:
db_user = tools.config["db_user"]
if not db_user and os.name == 'posix':
import pwd
db_user = pwd.getpwuid(os.getuid())[0]
if not db_user:
cr.execute("select decode(usename, 'escape') from pg_user where usesysid=(select datdba from pg_database where datname=%s)", (tools.config["db_name"],))
res = cr.fetchone()
db_user = res and str(res[0])
if db_user:
cr.execute("select decode(datname, 'escape') from pg_database where datdba=(select usesysid from pg_user where usename=%s) and datname not in ('template0', 'template1', 'postgres') order by datname", (db_user,))
else:
cr.execute("select decode(datname, 'escape') from pg_database where datname not in('template0', 'template1','postgres') order by datname")
res = [str(name) for (name,) in cr.fetchall()]
except Exception:
res = []
finally:
cr.close()
allowed_res = tools.config.get_misc('databases', 'allowed')
if allowed_res:
dbs_allowed = [ x.strip() for x in allowed_res.split(' ')]
res_o = res
res = []
for s in res_o:
if s in dbs_allowed:
res.append(s)
res.sort()
return res
def exp_change_admin_password(self, new_password):
tools.config['admin_passwd'] = new_password
tools.config.save()
return True
def exp_list_lang(self):
return tools.scan_languages()
def exp_server_version(self):
""" Return the version of the server
Used by the client to verify the compatibility with its own version
"""
return release.version
def exp_migrate_databases(self,databases):
from osv.orm import except_orm
from osv.osv import except_osv
l = logging.getLogger('migration')
for db in databases:
try:
l.info('migrate database %s' % (db,))
tools.config['update']['base'] = True
pooler.restart_pool(db, force_demo=False, update_module=True)
except except_orm, inst:
self.abortResponse(1, inst.name, 'warning', inst.value)
except except_osv, inst:
self.abortResponse(1, inst.name, inst.exc_type, inst.value)
except Exception:
l.exception("Migrate database %s failed" % db)
raise
return True
db()
class _ObjectService(baseExportService):
"A common base class for those who have fn(db, uid, password,...) "
def common_dispatch(self, method, auth, params):
(db, uid, passwd ) = params[0:3]
params = params[3:]
security.check(db,uid,passwd)
cr = pooler.get_db(db).cursor()
fn = getattr(self, 'exp_'+method)
res = fn(cr, uid, *params)
cr.commit()
cr.close()
return res
class common(_ObjectService):
_auth_commands = { 'db-broken': [ 'ir_set','ir_del', 'ir_get' ],
'pub': ['about', 'timezone_get', 'get_server_environment',
'login_message','get_stats', 'check_connectivity',
'list_http_services', 'get_options'],
'root': ['get_available_updates', 'get_migration_scripts',
'set_loglevel', 'set_obj_debug', 'set_pool_debug',
'set_logger_level', 'get_pgmode', 'set_pgmode',
'get_loglevel', 'get_sqlcount', 'get_sql_stats',
'reset_sql_stats',
'get_garbage_stats',
'get_os_time']
}
def __init__(self,name="common"):
_ObjectService.__init__(self,name)
self.joinGroup("web-services")
def dispatch(self, method, auth, params):
logger = logging.getLogger('web-services')
if method in [ 'ir_set','ir_del', 'ir_get' ]:
return self.common_dispatch(method,auth,params)
if method == 'login':
# At this old dispatcher, we do NOT update the auth proxy
res = security.login(params[0], params[1], params[2])
msg = res and 'successful login' or 'bad login or password'
# TODO log the client ip address..
logger.info("%s from '%s' using database '%s'" % (msg, params[1], params[0].lower()))
return res or False
elif method == 'logout':
if auth:
auth.logout(params[1])
logger.info('Logout %s from database %s'%(params[1],db))
return True
elif method in self._auth_commands['pub']:
pass
elif method in self._auth_commands['root']:
passwd = params[0]
params = params[1:]
security.check_super(passwd)
else:
raise Exception("Method not found: %s" % method)
fn = getattr(self, 'exp_'+method)
return fn(*params)
def new_dispatch(self, method, auth, params, auth_domain=None):
# Double check, that we have the correct authentication:
if method == 'login':
if not (auth and auth.provider.domain == 'db'):
raise Exception("Method not found: %s" % method)
# By this time, an authentication should already be done at the
# http level
if not auth.last_auth:
return False
acds = auth.auth_creds[auth.last_auth]
assert(acds[0] == params[1])
assert(acds[1] == params[2])
assert(acds[2] == params[0])
assert acds[3] != False and acds[3] != None
log = logging.getLogger('web-service')
log.info("login from '%s' using database '%s'" % (params[1], params[0].lower()))
return acds[3]
else:
return super(common, self).new_dispatch(method, auth, params, auth_domain)
def exp_ir_set(self, cr, uid, keys, args, name, value, replace=True, isobject=False):
res = ir.ir_set(cr,uid, keys, args, name, value, replace, isobject)
return res
def exp_ir_del(self, cr, uid, id):
res = ir.ir_del(cr,uid, id)
return res
def exp_ir_get(self, cr, uid, keys, args=None, meta=None, context=None):
if not args:
args=[]
if not context:
context={}
res = ir.ir_get(cr,uid, keys, args, meta, context)
return res
def exp_about(self, extended=False):
"""Return information about the OpenERP Server.
@param extended: if True then return version info
@return string if extended is False else tuple
"""
info = _('''
OpenERP is an ERP+CRM program for small and medium businesses.
The whole source code is distributed under the terms of the
GNU Public Licence.
(c) 2003-TODAY, Fabien Pinckaers - Tiny sprl''')
if extended:
return info, release.version
return info
def exp_timezone_get(self, *args):
return tools.misc.get_server_timezone()
def exp_get_available_updates(self, contract_id, contract_password):
import tools.maintenance as tm
try:
rc = tm.remote_contract(contract_id, contract_password)
if not rc.id:
raise tm.RemoteContractException('This contract does not exist or is not active')
return rc.get_available_updates(rc.id, addons.get_modules_with_version())
except tm.RemoteContractException, e:
self.abortResponse(1, 'Migration Error', 'warning', str(e))
def exp_get_migration_scripts(self, contract_id, contract_password):
l = logging.getLogger('migration')
import tools.maintenance as tm
try:
rc = tm.remote_contract(contract_id, contract_password)
if not rc.id:
raise tm.RemoteContractException('This contract does not exist or is not active')
if rc.status != 'full':
raise tm.RemoteContractException('Can not get updates for a partial contract')
l.info('starting migration with contract %s' % (rc.name,))
zips = rc.retrieve_updates(rc.id, addons.get_modules_with_version())
from shutil import rmtree, copytree, copy
backup_directory = os.path.join(tools.config['root_path'], 'backup', time.strftime('%Y-%m-%d-%H-%M'))
if zips and not os.path.isdir(backup_directory):
l.info('Create a new backup directory to \
store the old modules: %s' % (backup_directory,))
os.makedirs(backup_directory)
for module in zips:
l.info('upgrade module %s' % (module,))
mp = addons.get_module_path(module)
if mp:
if os.path.isdir(mp):
copytree(mp, os.path.join(backup_directory, module))
if os.path.islink(mp):
os.unlink(mp)
else:
rmtree(mp)
else:
copy(mp + 'zip', backup_directory)
os.unlink(mp + '.zip')
try:
try:
base64_decoded = base64.decodestring(zips[module])
except Exception:
l.exception('unable to read the module %s' % (module,))
raise
zip_contents = StringIO(base64_decoded)
zip_contents.seek(0)
try:
try:
tools.extract_zip_file(zip_contents, tools.config['addons_path'] )
except Exception:
l.exception('unable to extract the module %s' % (module, ))
rmtree(module)
raise
finally:
zip_contents.close()
except Exception:
l.exception('restore the previous version of the module %s' % (module, ))
nmp = os.path.join(backup_directory, module)
if os.path.isdir(nmp):
copytree(nmp, tools.config['addons_path'])
else:
copy(nmp+'.zip', tools.config['addons_path'])
raise
return True
except tm.RemoteContractException, e:
self.abortResponse(1, 'Migration Error', 'warning', str(e))
except Exception, e:
l.exception("%s" % e)
raise
def exp_get_server_environment(self):
os_lang = '.'.join( [x for x in locale.getdefaultlocale() if x] )
if not os_lang:
os_lang = 'NOT SET'
environment = '\nEnvironment Information : \n' \
'System : %s\n' \
'OS Name : %s\n' \
%(platform.platform(), platform.os.name)
if os.name == 'posix':
if platform.system() == 'Linux':
lsbinfo = os.popen('lsb_release -a').read()
environment += '%s'%(lsbinfo)
else:
environment += 'Your System is not lsb compliant\n'
environment += 'Operating System Release : %s\n' \
'Operating System Version : %s\n' \
'Operating System Architecture : %s\n' \
'Operating System Locale : %s\n'\
'Python Version : %s\n'\
'OpenERP-Server Version : %s'\
%(platform.release(), platform.version(), platform.architecture()[0],
os_lang, platform.python_version(),release.version)
return environment
def exp_login_message(self):
return tools.config.get('login_message', False)
def exp_set_loglevel(self, loglevel, logger=None):
l = netsvc.Logger()
l.set_loglevel(loglevel, logger)
return True
def exp_set_logger_level(self, logger, loglevel):
l = netsvc.Logger()
l.set_logger_level(logger, loglevel)
return True
def exp_get_loglevel(self, logger=None):
l = netsvc.Logger()
return l.get_loglevel(logger)
def exp_get_pgmode(self):
return sql_db.Cursor.get_pgmode()
def exp_set_pgmode(self, pgmode):
assert pgmode in ['old', 'sql', 'pgsql', 'pg84', 'pg90', 'pg91', 'pg92']
sql_db.Cursor.set_pgmode(pgmode)
return True
def exp_set_obj_debug(self,db, obj, do_debug):
log = logging.getLogger('web-services')
log.info("setting debug for %s@%s to %s" %(obj, db, do_debug))
ls = netsvc.LocalService('object_proxy')
res = ls.set_debug(db, obj, do_debug)
return res
def exp_set_pool_debug(self,db, do_debug):
sql_db._Pool.set_pool_debug(do_debug)
return None
def exp_get_stats(self):
import threading
res = "OpenERP server: %d threads\n" % threading.active_count()
res += netsvc.Server.allStats()
res += "\n"
res += netsvc.ExportService.allStats()
try:
import gc
if gc.isenabled():
res += "\nPython GC enabled: %d:%d:%d objs." % \
gc.get_count()
except ImportError: pass
try:
from tools import lru
res += "\nLRU counts: LRU: %d, nodes: %d" % \
(sys.getrefcount(lru.LRU), sys.getrefcount(lru.LRUNode))
except Exception: pass
return res
def exp_list_http_services(self, *args):
from service import http_server
return http_server.list_http_services(*args)
def exp_check_connectivity(self):
return bool(sql_db.db_connect('template1'))
def exp_get_os_time(self):
return os.times()
def exp_get_sqlcount(self):
logger = logging.getLogger('db.cursor')
if not logger.isEnabledFor(logging.DEBUG_SQL):
logger.warning("Counters of SQL will not be reliable unless DEBUG_SQL is set at the server's config.")
return sql_db.sql_counter
def exp_get_sql_stats(self):
"""Retrieve the sql statistics from the pool.
Unfortunately, XML-RPC won't allow tuple indexes, so we have to
rearrange the dict.
"""
ret = {}
for skey, val in sql_db._Pool.sql_stats.items():
sk0 = skey[0]
if not isinstance(skey[0], str):
sk0 = str(skey[0])
ret.setdefault(sk0,{})
ret[sk0][skey[1]] = val
return ret
def exp_reset_sql_stats(self):
sql_db._Pool.sql_stats = {}
return True
def exp_get_garbage_stats(self):
import gc
garbage_count = {}
for garb in gc.garbage:
try:
name = '%s.%s' % (garb.__class__.__module__, garb.__class__.__name__)
garbage_count.setdefault(name, 0)
garbage_count[name] += 1
except Exception, e:
print "Exception:", e
continue
# Perhaps list the attributes of garb that are instances of object
return garbage_count
def exp_get_options(self, module=None):
"""Return a list of options, keywords, that the server supports.
Apart from the server version, which should be a linear number,
some server branches may support extra API functionality. By this
call, the server can advertise these extensions to compatible
clients.
"""
if module:
raise NotImplementedError('No module-specific options yet')
return release.server_options
common()
class objects_proxy(baseExportService):
_auth_commands = { 'db': ['execute','exec_workflow', 'exec_dict'], 'root': ['obj_list',] }
def __init__(self, name="object"):
netsvc.ExportService.__init__(self,name)
self.joinGroup('web-services')
self._ls = netsvc.LocalService('object_proxy')
def dispatch(self, method, auth, params):
if method in self._auth_commands['root']:
passwd = params[0]
params = params[1:]
security.check_super(passwd)
fn = getattr(self._ls, method)
res = fn(*params, auth_proxy=auth)
return res
(db, uid, passwd ) = params[0:3]
params = params[3:]
if method not in ['execute','exec_workflow', 'exec_dict', 'obj_list']:
raise KeyError("Method not supported %s" % method)
security.check(db,uid,passwd)
fn = getattr(self._ls, method)
res = fn(db, uid, *params, auth_proxy=auth)
return res
def new_dispatch(self, method, auth, params, auth_domain=None):
# Double check, that we have the correct authentication:
if not auth:
raise Exception("Not auth domain for object service")
if auth.provider.domain not in self._auth_commands:
raise Exception("Invalid domain for object service")
if method not in self._auth_commands[auth.provider.domain]:
raise Exception("Method not found: %s" % method)
fn = getattr(self._ls, method)
if auth.provider.domain == 'root':
res = fn(*params, auth_proxy=auth)
return res
acds = auth.auth_creds[auth.last_auth]
db, uid = (acds[2], acds[3])
res = fn(db, uid, *params, auth_proxy=auth)
return res
def stats(self, _pre_msg='No statistics'):
try:
from osv import orm
msg = ''
for klass in ('browse_record', 'browse_record_list', 'browse_null',
'orm_memory', 'orm'):
msg += "%s[%d] " % (klass, sys.getrefcount(getattr(orm,klass)))
except Exception, e:
msg = str(e)
return "%s (%s.%s): %s" % ('object',
self.__class__.__module__, self.__class__.__name__,
msg)
objects_proxy()
class dbExportDispatch:
""" Intermediate class for those ExportServices that call fn(db, uid, ...)
These classes don't need the cursor, but just the name of the db
"""
def new_dispatch(self, method, auth, params, auth_domain=None):
# Double check, that we have the correct authentication:
if not auth:
domain='pub'
else:
domain=auth.provider.domain
if method not in self._auth_commands[domain]:
raise Exception("Method not found: %s" % method)
fn = getattr(self, 'exp_'+method)
if domain == 'db':
u, p, db, uid = auth.auth_creds[auth.last_auth]
res = fn(db, uid, *params)
return res
else:
return fn(*params)
#
# Wizard ID: 1
# - None = end of wizard
#
# Wizard Type: 'form'
# - form
# - print
#
# Wizard datas: {}
# TODO: change local request to OSE request/reply pattern
#
class wizard(dbExportDispatch,baseExportService):
_auth_commands = { 'db': ['execute','create'] }
def __init__(self, name='wizard'):
netsvc.ExportService.__init__(self,name)
self.joinGroup('web-services')
self.id = 0
self.wiz_datas = {}
self.wiz_name = {}
self.wiz_uid = {}
def dispatch(self, method, auth, params):
(db, uid, passwd ) = params[0:3]
params = params[3:]
if method not in ['execute','create']:
raise KeyError("Method not supported %s" % method)
security.check(db,uid,passwd)
fn = getattr(self, 'exp_'+method)
res = fn(db, uid, *params)
return res
def _execute(self, db, uid, wiz_id, datas, action, context):
self.wiz_datas[wiz_id].update(datas)
wiz = netsvc.LocalService('wizard.'+self.wiz_name[wiz_id])
return wiz.execute(db, uid, self.wiz_datas[wiz_id], action, context)
def exp_create(self, db, uid, wiz_name, datas=None):
if not datas:
datas={}
#FIXME: this is not thread-safe
self.id += 1
self.wiz_datas[self.id] = {}
self.wiz_name[self.id] = wiz_name
self.wiz_uid[self.id] = uid
return self.id
def exp_execute(self, db, uid, wiz_id, datas, action='init', context=None):
if not context:
context={}
if wiz_id in self.wiz_uid:
if self.wiz_uid[wiz_id] == uid:
return self._execute(db, uid, wiz_id, datas, action, context)
else:
raise Exception, 'AccessDenied'
else:
raise Exception, 'WizardNotFound'
wizard()
#
# TODO: set a maximum report number per user to avoid DOS attacks
#
# Report state:
# False -> True
#
class ExceptionWithTraceback(Exception):
def __init__(self, msg, tb):
self.message = msg
self.traceback = tb
self.args = (msg, tb)
class _report_spool_job(threading.Thread):
def __init__(self, id, db, uid, obj, ids, datas=None, context=None):
"""A report job, that should be spooled in the background
@param id the index at the parent spool list, shall not be trusted,
only useful for the repr()
@param db the database name
@param uid the calling user
@param obj the report orm object (string w/o the 'report.' prefix)
@param ids of the obj model
@param datas dictionary of input to report
"""
threading.Thread.__init__(self)
self.id = id
self.uid = uid
self.db = db
self.report_obj = obj
self.ids = ids
self.datas = datas
self.context = context
if self.context is None:
self.context = {}
self.result = False
self.format = None
self.state = False
self.exception = None
self.name = "report-%s-%s" % (self.report_obj, self.id)
def run(self):
try:
self.cr = pooler.get_db(self.db).cursor()
self.go()
self.cr.commit()
except Exception, e:
logger = logging.getLogger('web-services')
logger.exception('Exception: %s' % (e))
if hasattr(e, 'name') and hasattr(e, 'value'):
self.exception = ExceptionWithTraceback(tools.ustr(e.name), tools.ustr(e.value))
else:
self.exception = e
self.state = True
return
except KeyboardInterrupt, e:
tb = sys.exc_info()
logger = logging.getLogger('web-services')
logger.exception('Interrupt of report: %r' % self)
self.exception = ExceptionWithTraceback('KeyboardInterrupt of report: %r' % self, tb)
self.state = True
# we don't need to raise higher, because we already printed the tb
# and are exiting the thread loop.
return
finally:
if self.cr:
self.cr.close()
self.cr = None
return True
def stop(self):
"""Try to kill the job.
So far there is no genuinely good way to stop the thread (is there?),
so we can at least kill the cursor, so that the rest of the job borks.
"""
self.must_stop = True
if self.cr:
self.cr.rollback()
self.cr.close()
self.cr = None
def __repr__(self):
"""Readable name of report job
"""
return "<Report job #%s: %s.%s>" % (self.id, self.db, self.report_obj)
def go(self,):
cr = self.cr
obj = netsvc.LocalService('report.' + self.report_obj)
(result, format) = obj.create(cr, self.uid, self.ids, self.datas, self.context)
if not result:
tb = sys.exc_info()
self.exception = ExceptionWithTraceback('RML is not available at specified location or not enough data to print!', tb)
self.result = result
self.format = format
self.state = True
return True
class report_spool(dbExportDispatch, baseExportService):
_auth_commands = { 'db': ['report','report_get', 'report_stop'] }
def __init__(self, name='report'):
netsvc.ExportService.__init__(self, name)
self.joinGroup('web-services')
self._reports = {}
self.id = 0
self.id_protect = threading.Semaphore()
def dispatch(self, method, auth, params):
(db, uid, passwd ) = params[0:3]
params = params[3:]
if method not in ['report','report_get', 'report_stop']:
raise KeyError("Method not supported %s" % method)
security.check(db,uid,passwd)
fn = getattr(self, 'exp_' + method)
res = fn(db, uid, *params)
return res
def stats(self, _pre_msg=None):
ret = baseExportService.stats(self, _pre_msg='%d reports' % len(self._reports))
for id, r in self._reports.items():
if not r:
continue
ret += '\n [%d] ' % id
if r.is_alive() or not r.state:
ret += 'running '
else:
ret += 'finished '
ret += repr(r)
return ret
def exp_report(self, db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self.id_protect.acquire()
self.id += 1
id = self.id
self.id_protect.release()
self._reports[id] = _report_spool_job(id, db, uid, object, ids, datas=datas, context=context)
self._reports[id].start()
return id
def _check_report(self, report_id):
report = self._reports[report_id]
exc = report.exception
if exc:
self.abortResponse(1, exc.__class__.__name__, 'warning', exc.message)
res = {'state': report.state }
if res['state']:
if tools.config['reportgz']:
import zlib
res2 = zlib.compress(report.result)
res['code'] = 'zlib'
else:
#CHECKME: why is this needed???
if isinstance(report.result, unicode):
res2 = report.result.encode('latin1', 'replace')
else:
res2 = report.result
if res2:
res['result'] = base64.encodestring(res2)
res['format'] = report.format
self.id_protect.acquire()
del self._reports[report_id]
self.id_protect.release()
return res
def exp_report_get(self, db, uid, report_id):
if report_id in self._reports:
if self._reports[report_id].uid == uid:
return self._check_report(report_id)
else:
raise Exception, 'AccessDenied'
else:
raise Exception, 'ReportNotFound'
def exp_report_stop(self, db, uid, report_id, timeout=5.0):
""" Stop a running report, wait for it to finish
@return True if stopped, False if alredy finished,
Exception('Timeout') if cannot stop
Note that after a "report_stop" request, the caller shall
do one more "report_get" to fetch the exception and free
the job object.
"""
if report_id in self._reports:
report = self._reports[report_id]
if report.uid == uid or uid == 1:
if report.is_alive() and not report.state:
report.stop()
report.join(timeout=timeout)
if report.is_alive():
raise Exception('Timeout')
return True
else:
return False
else:
raise Exception, 'AccessDenied'
else:
raise Exception, 'ReportNotFound'
report_spool()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
xrg/openerp-server
|
bin/service/web_services.py
|
Python
|
agpl-3.0
| 41,970
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from users.models import Hijo
class Post(models.Model):
title = models.TextField(null=True, blank=True) #Título
content = models.TextField(null=True, blank=True) #Contenido de la entrada
url = models.URLField(max_length=200, null=True, blank=True)
hijo = models.ForeignKey(Hijo, blank=True, null=True)
date_creation = models.DateTimeField(auto_now_add=True)
published_by = models.ForeignKey(User, blank=True, null=True)
|
pupils/pupils
|
pupils/tablon/models.py
|
Python
|
agpl-3.0
| 566
|
# -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614, C0103
import sys
import os
import imp
from path import path
from warnings import simplefilter
from django.utils.translation import ugettext_lazy as _
from .discussionsettings import *
from xmodule.modulestore.modulestore_settings import update_module_store_settings
from lms.lib.xblock.mixin import LmsBlockMixin
################################### FEATURES ###################################
# The display name of the platform to be used in templates/emails/etc.
PLATFORM_NAME = "Your Platform Name Here"
CC_MERCHANT_NAME = PLATFORM_NAME
PLATFORM_FACEBOOK_ACCOUNT = "http://www.facebook.com/YourPlatformFacebookAccount"
PLATFORM_TWITTER_ACCOUNT = "@YourPlatformTwitterAccount"
PLATFORM_TWITTER_URL = "https://twitter.com/YourPlatformTwitterAccount"
PLATFORM_MEETUP_URL = "http://www.meetup.com/YourMeetup"
PLATFORM_LINKEDIN_URL = "http://www.linkedin.com/company/YourPlatform"
PLATFORM_GOOGLE_PLUS_URL = "https://plus.google.com/YourGooglePlusAccount/"
COURSEWARE_ENABLED = True
SCHOOL_ENABLED =True
ENABLE_JASMINE = False
DISCUSSION_SETTINGS = {
'MAX_COMMENT_DEPTH': 2,
}
# Features
FEATURES = {
'SAMPLE': False,
'USE_DJANGO_PIPELINE': True,
'DISPLAY_DEBUG_INFO_TO_STAFF': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff.
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# When True, will only publicly list courses by the subdomain. Expects you
# to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of
# course_ids (see dev_int.py for an example)
'SUBDOMAIN_COURSE_LISTINGS': False,
# When True, will override certain branding with university specific values
# Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the
# university to use for branding purposes
'SUBDOMAIN_BRANDING': False,
'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST
# set to None to do no university selection
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the corresponding ones in cms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI.
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
# this should remain off in production until digest notifications are online.
'ENABLE_DISCUSSION_HOME_PANEL': False,
'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard)
'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': False,
'ENABLE_MANUAL_GIT_RELOAD': False,
'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware
'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
# extrernal access methods
'ACCESS_REQUIRE_STAFF_FOR_COURSE': False,
'AUTH_USE_OPENID': False,
'AUTH_USE_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled
# in LMS
'AUTH_USE_SHIB': False,
'AUTH_USE_CAS': False,
# This flag disables the requirement of having to agree to the TOS for users registering
# with Shib. Feature was requested by Stanford's office of general counsel
'SHIB_DISABLE_TOS': False,
# Toggles OAuth2 authentication provider
'ENABLE_OAUTH2_PROVIDER': False,
# Can be turned off if course lists need to be hidden. Effects views and templates.
'COURSES_ARE_BROWSABLE': True,
# Enables ability to restrict enrollment in specific courses by the user account login method
'RESTRICT_ENROLL_BY_REG_METHOD': False,
# Enables the LMS bulk email feature for course staff
'ENABLE_INSTRUCTOR_EMAIL': True,
# If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on
# for each course via django-admin interface.
# If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default
# for all Mongo-backed courses.
'REQUIRE_COURSE_EMAIL_AUTH': True,
# Analytics experiments - shows instructor analytics tab in LMS instructor dashboard.
# Enabling this feature depends on installation of a separate analytics server.
'ENABLE_INSTRUCTOR_ANALYTICS': False,
# enable analytics server.
# WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL
# LMS OPERATION. See analytics.py for details about what
# this does.
'RUN_AS_ANALYTICS_SERVER_ENABLED': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# Segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False,
# Provide a UI to allow users to submit feedback from the LMS (left-hand help modal)
'ENABLE_FEEDBACK_SUBMISSION': False,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
# Enable URL that shows information about the status of variuous services
'ENABLE_SERVICE_STATUS': False,
# Toggle to indicate use of a custom theme
'USE_CUSTOM_THEME': False,
# Don't autoplay videos for students
'AUTOPLAY_VIDEOS': False,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
# Enable instructor to assign individual due dates
'INDIVIDUAL_DUE_DATES': False,
# Enable legacy instructor dashboard
'ENABLE_INSTRUCTOR_LEGACY_DASHBOARD': True,
# Is this an edX-owned domain? (used on instructor dashboard)
'IS_EDX_DOMAIN': False,
# Toggle to enable certificates of courses on dashboard
'ENABLE_VERIFIED_CERTIFICATES': False,
# Allow use of the hint managment instructor view.
'ENABLE_HINTER_INSTRUCTOR_VIEW': False,
# for load testing
'AUTOMATIC_AUTH_FOR_TESTING': False,
# Toggle to enable chat availability (configured on a per-course
# basis in Studio)
'ENABLE_CHAT': False,
# Allow users to enroll with methods other than just honor code certificates
'MULTIPLE_ENROLLMENT_ROLES': False,
# Toggle the availability of the shopping cart page
'ENABLE_SHOPPING_CART': False,
# Toggle storing detailed billing information
'STORE_BILLING_INFO': False,
# Enable flow for payments for course registration (DIFFERENT from verified student flow)
'ENABLE_PAID_COURSE_REGISTRATION': False,
# Automatically approve student identity verification attempts
'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False,
# Disable instructor dash buttons for downloading course data
# when enrollment exceeds this number
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# Grade calculation started from the new instructor dashboard will write
# grades CSV files to S3 and give links for downloads.
'ENABLE_S3_GRADE_DOWNLOADS': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# Give course staff unrestricted access to grade downloads (if set to False,
# only edX superusers can perform the downloads)
'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False,
'ENABLED_PAYMENT_REPORTS': ["refund_report", "itemized_purchase_report", "university_revenue_share", "certificate_status"],
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles the embargo functionality, which enable embargoing for particular courses
'EMBARGO': False,
# Toggles the embargo site functionality, which enable embargoing for the whole site
'SITE_EMBARGOED': False,
# Whether the Wiki subsystem should be accessible via the direct /wiki/ paths. Setting this to True means
# that people can submit content and modify the Wiki in any arbitrary manner. We're leaving this as True in the
# defaults, so that we maintain current behavior
'ALLOW_WIKI_ROOT_ACCESS': True,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Turn on third-party auth. Disabled for now because full implementations are not yet available. Remember to syncdb
# if you enable this; we don't create tables by default.
'ENABLE_THIRD_PARTY_AUTH': False,
# Toggle to enable alternate urls for marketing links
'ENABLE_MKTG_SITE': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Show a "Download your certificate" on the Progress page if the lowest
# nonzero grade cutoff is met
'SHOW_PROGRESS_SUCCESS_BUTTON': False,
# When a logged in user goes to the homepage ('/') should the user be
# redirected to the dashboard - this is default Open edX behavior. Set to
# False to not redirect the user
'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER': True,
# Expose Mobile REST API. Note that if you use this, you must also set
# ENABLE_OAUTH2_PROVIDER to True
'ENABLE_MOBILE_REST_API': False,
# Enable the new dashboard, account, and profile pages
'ENABLE_NEW_DASHBOARD': False,
# Enable the combined login/registration form
'ENABLE_COMBINED_LOGIN_REGISTRATION': False,
# Show a section in the membership tab of the instructor dashboard
# to allow an upload of a CSV file that contains a list of new accounts to create
# and register for course.
'ALLOW_AUTOMATED_SIGNUPS': False,
# Display demographic data on the analytics tab in the instructor dashboard.
'DISPLAY_ANALYTICS_DEMOGRAPHICS': True,
# Enable display of enrollment counts in instructor and legacy analytics dashboard
'DISPLAY_ANALYTICS_ENROLLMENTS': True,
}
# Ignore static asset files on import which match this pattern
ASSET_IGNORE_REGEX = r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)"
# Used for A/B testing
DEFAULT_GROUPS = []
# If this is true, random scores will be generated for the purpose of debugging the profile graphs
GENERATE_PROFILE_SCORES = False
# Used with XQueue
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/lms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
COURSES_ROOT = ENV_ROOT / "data"
DATA_DIR = COURSES_ROOT
# TODO: Remove the rest of the sys.path modification here and in cms/envs/common.py
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For Node.js
system_node_path = os.environ.get("NODE_PATH", REPO_ROOT / 'node_modules')
node_paths = [
COMMON_ROOT / "static/js/vendor",
COMMON_ROOT / "static/coffee/src",
system_node_path,
]
NODE_PATH = ':'.join(node_paths)
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
# Where to look for a status message
STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json"
############################ OpenID Provider ##################################
OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net']
############################ OAUTH2 Provider ###################################
# OpenID Connect issuer ID. Normally the URL of the authentication endpoint.
OAUTH_OIDC_ISSUER = 'https:/example.com/oauth2'
# OpenID Connect claim handlers
OAUTH_OIDC_ID_TOKEN_HANDLERS = (
'oauth2_provider.oidc.handlers.BasicIDTokenHandler',
'oauth2_provider.oidc.handlers.ProfileHandler',
'oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.IDTokenHandler'
)
OAUTH_OIDC_USERINFO_HANDLERS = (
'oauth2_provider.oidc.handlers.BasicUserInfoHandler',
'oauth2_provider.oidc.handlers.ProfileHandler',
'oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.UserInfoHandler'
)
################################## EDX WEB #####################################
# This is where we stick our compiled template files. Most of the app uses Mako
# templates
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_lms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates']
# This is where Django Template lookup is defined. There are a few of these
# still left lying around.
TEMPLATE_DIRS = [
PROJECT_ROOT / "templates",
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
# Added for django-wiki
'django.core.context_processors.media',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
# Hack to get required link URLs to password reset templates
'edxmako.shortcuts.marketing_link_context_processor',
# Allows the open edX footer to be leveraged in Django Templates.
'edxmako.shortcuts.open_source_footer_context_processor',
# Shoppingcart processor (detects if request.user has a cart)
'shoppingcart.context_processor.user_has_cart_context_processor',
# Allows the open edX footer to be leveraged in Django Templates.
'edxmako.shortcuts.microsite_footer_context_processor',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB
MAX_FILEUPLOADS_PER_INPUT = 20
# FIXME:
# We should have separate S3 staged URLs in case we need to make changes to
# these assets and test them.
LIB_URL = '/static/js/'
# Dev machines shouldn't need the book
# BOOK_URL = '/static/book/'
BOOK_URL = 'https://mitxstatic.s3.amazonaws.com/book_images/' # For AWS deploys
RSS_TIMEOUT = 600
# Configuration option for when we want to grab server error pages
STATIC_GRAB = False
DEV_CONTENT = True
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/accounts/login'
LOGIN_URL = EDX_ROOT_URL + '/accounts/login'
COURSE_NAME = "6.002_Spring_2012"
COURSE_NUMBER = "6.002x"
COURSE_TITLE = "Circuits and Electronics"
### Dark code. Should be enabled in local settings for devel.
ENABLE_MULTICOURSE = False # set to False to disable multicourse display (see lib.util.views.edXhome)
WIKI_ENABLED = False
###
COURSE_DEFAULT = '6.002x_Fall_2012'
COURSE_SETTINGS = {
'6.002x_Fall_2012': {
'number': '6.002x',
'title': 'Circuits and Electronics',
'xmlpath': '6002x/',
'location': 'i4x://edx/6002xs12/course/6.002x_Fall_2012',
}
}
# IP addresses that are allowed to reload the course, etc.
# TODO (vshnayder): Will probably need to change as we get real access control in.
LMS_MIGRATION_ALLOWED_IPS = []
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
# Note: these intentionally greedily grab all chars up to the next slash including any pluses
# DHM: I really wanted to ensure the separators were the same (+ or /) but all patts I tried had
# too many inadvertent side effects :-(
COURSE_KEY_PATTERN = r'(?P<course_key_string>[^/+]+(/|\+)[^/+]+(/|\+)[^/]+)'
COURSE_ID_PATTERN = COURSE_KEY_PATTERN.replace('course_key_string', 'course_id')
COURSE_KEY_REGEX = COURSE_KEY_PATTERN.replace('P<course_key_string>', ':')
USAGE_KEY_PATTERN = r'(?P<usage_key_string>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
ASSET_KEY_PATTERN = r'(?P<asset_key_string>(?:/?c4x(:/)?/[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
USAGE_ID_PATTERN = r'(?P<usage_id>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
COURSE_KIND_PATTERN = r'(?P<course_kind>(?:[^/]+)|(?:[^/]+))'
SCHOOL_NAME_PATTERN = r'(?P<school_key_string>(?:[^/]+)|(?:[^/]+))'
############################## EVENT TRACKING #################################
# FIXME: Should we be doing this truncation?
TRACK_MAX_EVENT = 50000
DEBUG_TRACK_LOG = False
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat', r'^/segmentio/event']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
}
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
},
{
'ENGINE': 'track.shim.VideoEventProcessor'
}
]
# Backwards compatibility with ENABLE_SQL_TRACKING_LOGS feature flag.
# In the future, adding the backend to TRACKING_BACKENDS should be enough.
if FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
EVENT_TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
TRACKING_SEGMENTIO_WEBHOOK_SECRET = None
TRACKING_SEGMENTIO_ALLOWED_TYPES = ['track']
TRACKING_SEGMENTIO_SOURCE_MAP = {
'analytics-android': 'mobile',
'analytics-ios': 'mobile',
}
######################## GOOGLE ANALYTICS ###########################
GOOGLE_ANALYTICS_ACCOUNT = None
GOOGLE_ANALYTICS_LINKEDIN = 'GOOGLE_ANALYTICS_LINKEDIN_DUMMY'
######################## OPTIMIZELY ###########################
OPTIMIZELY_PROJECT_ID = None
######################## subdomain specific settings ###########################
COURSE_LISTINGS = {}
SUBDOMAIN_BRANDING = {}
VIRTUAL_UNIVERSITIES = []
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin)
# Allow any XBlock in the LMS
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############# ModuleStore Configuration ##########
MODULESTORE_BRANCH = 'published-only'
CONTENTSTORE = None
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'xmodule',
'collection': 'modulestore',
# If 'asset_collection' defined, it'll be used
# as the collection name for asset metadata.
# Otherwise, a default collection name will be used.
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'xml',
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
},
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
]
}
}
}
#################### Python sandbox ############################################
CODE_JAIL = {
# Path to a sandboxed Python executable. None means don't bother.
'python_bin': None,
# User to run as in the sandbox.
'user': 'sandbox',
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
},
}
# Some courses are allowed to run unsafe code. This is a list of regexes, one
# of them must match the course id for that course to run unsafe code.
#
# For example:
#
# COURSES_WITH_UNSAFE_CODE = [
# r"Harvard/XY123.1/.*"
# ]
COURSES_WITH_UNSAFE_CODE = []
############################### DJANGO BUILT-INS ###############################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
USE_TZ = True
SESSION_COOKIE_SECURE = False
# CMS base
CMS_BASE = 'localhost:8001'
# Site info
SITE_ID = 1
SITE_NAME = "example.com"
HTTPS = 'on'
ROOT_URLCONF = 'lms.urls'
# NOTE: Please set ALLOWED_HOSTS to some sane value, as we do not allow the default '*'
# Platform Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = 'registration@example.com'
DEFAULT_FEEDBACK_EMAIL = 'feedback@example.com'
SERVER_EMAIL = 'devops@example.com'
TECH_SUPPORT_EMAIL = 'technical@example.com'
CONTACT_EMAIL = 'info@example.com'
BUGS_EMAIL = 'bugs@example.com'
UNIVERSITY_EMAIL = 'university@example.com'
PRESS_EMAIL = 'press@example.com'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/'
STATIC_ROOT = ENV_ROOT / "staticfiles"
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
FAVICON_PATH = 'images/favicon.ico'
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
# these languages display right to left
LANGUAGES_BIDI = ("en@rtl", "he", "ar", "fa", "ur", "fa-ir")
# Sourced from http://www.localeplanet.com/icu/ and wikipedia
LANGUAGES = (
('en', u'English'),
('en@rtl', u'English (right-to-left)'),
('eo', u'Dummy Language (Esperanto)'), # Dummy languaged used for testing
('fake2', u'Fake translations'), # Another dummy language for testing (not pushed to prod)
('am', u'አማርኛ'), # Amharic
('ar', u'العربية'), # Arabic
('az', u'azərbaycanca'), # Azerbaijani
('bg-bg', u'български (България)'), # Bulgarian (Bulgaria)
('bn-bd', u'বাংলা (বাংলাদেশ)'), # Bengali (Bangladesh)
('bn-in', u'বাংলা (ভারত)'), # Bengali (India)
('bs', u'bosanski'), # Bosnian
('ca', u'Català'), # Catalan
('ca@valencia', u'Català (València)'), # Catalan (Valencia)
('cs', u'Čeština'), # Czech
('cy', u'Cymraeg'), # Welsh
('da', u'dansk'), # Danish
('de-de', u'Deutsch (Deutschland)'), # German (Germany)
('el', u'Ελληνικά'), # Greek
('en-uk', u'English (United Kingdom)'), # English (United Kingdom)
('en@lolcat', u'LOLCAT English'), # LOLCAT English
('en@pirate', u'Pirate English'), # Pirate English
('es-419', u'Español (Latinoamérica)'), # Spanish (Latin America)
('es-ar', u'Español (Argentina)'), # Spanish (Argentina)
('es-ec', u'Español (Ecuador)'), # Spanish (Ecuador)
('es-es', u'Español (España)'), # Spanish (Spain)
('es-mx', u'Español (México)'), # Spanish (Mexico)
('es-pe', u'Español (Perú)'), # Spanish (Peru)
('et-ee', u'Eesti (Eesti)'), # Estonian (Estonia)
('eu-es', u'euskara (Espainia)'), # Basque (Spain)
('fa', u'فارسی'), # Persian
('fa-ir', u'فارسی (ایران)'), # Persian (Iran)
('fi-fi', u'Suomi (Suomi)'), # Finnish (Finland)
('fil', u'Filipino'), # Filipino
('fr', u'Français'), # French
('gl', u'Galego'), # Galician
('gu', u'ગુજરાતી'), # Gujarati
('he', u'עברית'), # Hebrew
('hi', u'हिन्दी'), # Hindi
('hr', u'hrvatski'), # Croatian
('hu', u'magyar'), # Hungarian
('hy-am', u'Հայերեն (Հայաստան)'), # Armenian (Armenia)
('id', u'Bahasa Indonesia'), # Indonesian
('it-it', u'Italiano (Italia)'), # Italian (Italy)
('ja-jp', u'日本語 (日本)'), # Japanese (Japan)
('kk-kz', u'қазақ тілі (Қазақстан)'), # Kazakh (Kazakhstan)
('km-kh', u'ភាសាខ្មែរ (កម្ពុជា)'), # Khmer (Cambodia)
('kn', u'ಕನ್ನಡ'), # Kannada
('ko-kr', u'한국어 (대한민국)'), # Korean (Korea)
('lt-lt', u'Lietuvių (Lietuva)'), # Lithuanian (Lithuania)
('ml', u'മലയാളം'), # Malayalam
('mn', u'Монгол хэл'), # Mongolian
('mr', u'मराठी'), # Marathi
('ms', u'Bahasa Melayu'), # Malay
('nb', u'Norsk bokmål'), # Norwegian Bokmål
('ne', u'नेपाली'), # Nepali
('nl-nl', u'Nederlands (Nederland)'), # Dutch (Netherlands)
('or', u'ଓଡ଼ିଆ'), # Oriya
('pl', u'Polski'), # Polish
('pt-br', u'Português (Brasil)'), # Portuguese (Brazil)
('pt-pt', u'Português (Portugal)'), # Portuguese (Portugal)
('ro', u'română'), # Romanian
('ru', u'Русский'), # Russian
('si', u'සිංහල'), # Sinhala
('sk', u'Slovenčina'), # Slovak
('sl', u'Slovenščina'), # Slovenian
('sq', u'shqip'), # Albanian
('sr', u'Српски'), # Serbian
('sv', u'svenska'), # Swedish
('sw', u'Kiswahili'), # Swahili
('ta', u'தமிழ்'), # Tamil
('te', u'తెలుగు'), # Telugu
('th', u'ไทย'), # Thai
('tr-tr', u'Türkçe (Türkiye)'), # Turkish (Turkey)
('uk', u'Українська'), # Ukranian
('ur', u'اردو'), # Urdu
('vi', u'Tiếng Việt'), # Vietnamese
('uz', u'Ўзбек'), # Uzbek
('zh-cn', u'中文 (简体)'), # Chinese (China)
('zh-hk', u'中文 (香港)'), # Chinese (Hong Kong)
('zh-tw', u'中文 (台灣)'), # Chinese (Taiwan)
)
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Guidelines for translators
TRANSLATORS_GUIDE = 'https://github.com/edx/edx-platform/blob/master/docs/en_us/developers/source/i18n_translators_guide.rst'
#################################### GITHUB #######################################
# gitreload is used in LMS-workflow to pull content from github
# gitreload requests are only allowed from these IP addresses, which are
# the advertised public IPs of the github WebHook servers.
# These are listed, eg at https://github.com/edx/edx-platform/admin/hooks
ALLOWED_GITRELOAD_IPS = ['207.97.227.253', '50.57.128.197', '108.171.174.178']
#################################### AWS #######################################
# S3BotoStorage insists on a timeout for uploaded assets. We should make it
# permanent instead, but rather than trying to figure out exactly where that
# setting is, I'm just bumping the expiration time to something absurd (100
# years). This is only used if DEFAULT_FILE_STORAGE is overriden to use S3
# in the global settings.py
AWS_QUERYSTRING_EXPIRE = 10 * 365 * 24 * 60 * 60 # 10 years
################################# SIMPLEWIKI ###################################
SIMPLE_WIKI_REQUIRE_LOGIN_EDIT = True
SIMPLE_WIKI_REQUIRE_LOGIN_VIEW = False
################################# WIKI ###################################
from course_wiki import settings as course_wiki_settings
WIKI_ACCOUNT_HANDLING = False
WIKI_EDITOR = 'course_wiki.editors.CodeMirror'
WIKI_SHOW_MAX_CHILDREN = 0 # We don't use the little menu that shows children of an article in the breadcrumb
WIKI_ANONYMOUS = False # Don't allow anonymous access until the styling is figured out
WIKI_CAN_DELETE = course_wiki_settings.CAN_DELETE
WIKI_CAN_MODERATE = course_wiki_settings.CAN_MODERATE
WIKI_CAN_CHANGE_PERMISSIONS = course_wiki_settings.CAN_CHANGE_PERMISSIONS
WIKI_CAN_ASSIGN = course_wiki_settings.CAN_ASSIGN
WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False
WIKI_LINK_LIVE_LOOKUPS = False
WIKI_LINK_DEFAULT_LEVEL = 2
##### Feedback submission mechanism #####
FEEDBACK_SUBMISSION_EMAIL = None
##### Zendesk #####
ZENDESK_URL = None
ZENDESK_USER = None
ZENDESK_API_KEY = None
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
##### shoppingcart Payment #####
PAYMENT_SUPPORT_EMAIL = 'payment@example.com'
##### Using cybersource by default #####
CC_PROCESSOR_NAME = 'CyberSource'
CC_PROCESSOR = {
'CyberSource': {
'SHARED_SECRET': '',
'MERCHANT_ID': '',
'SERIAL_NUMBER': '',
'ORDERPAGE_VERSION': '7',
'PURCHASE_ENDPOINT': '',
},
'CyberSource2': {
"PURCHASE_ENDPOINT": '',
"SECRET_KEY": '',
"ACCESS_KEY": '',
"PROFILE_ID": '',
}
}
# Setting for PAID_COURSE_REGISTRATION, DOES NOT AFFECT VERIFIED STUDENTS
PAID_COURSE_REGISTRATION_CURRENCY = ['usd', '$']
# Members of this group are allowed to generate payment reports
PAYMENT_REPORT_GENERATOR_GROUP = 'shoppingcart_report_access'
################################# open ended grading config #####################
#By setting up the default settings with an incorrect user name and password,
# will get an error when attempting to connect
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'http://example.com/peer_grading',
'username': 'incorrect_user',
'password': 'incorrect_pass',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
# Used for testing, debugging peer grading
MOCK_PEER_GRADING = False
# Used for testing, debugging staff grading
MOCK_STAFF_GRADING = False
################################# Jasmine ##################################
JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee'
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'edxmako.makoloader.MakoFilesystemLoader',
'edxmako.makoloader.MakoAppDirectoriesLoader',
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'microsite_configuration.middleware.MicrositeMiddleware',
'django_comment_client.middleware.AjaxExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# Instead of AuthenticationMiddleware, we use a cached backed version
#'django.contrib.auth.middleware.AuthenticationMiddleware',
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
# Adds user tags to tracking events
# Must go before TrackMiddleware, to get the context set up
'user_api.middleware.UserTagsEventContextMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'splash.middleware.SplashMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'geoinfo.middleware.CountryMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Allows us to set user preferences
# should be after DarkLangMiddleware
'lang_pref.middleware.LanguagePreferenceMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_comment_client.utils.ViewNameMiddleware',
'codejail.django_integration.ConfigureCodeJailMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# to redirected unenrolled students to the course info page
'courseware.middleware.RedirectUnenrolledMiddleware',
'course_wiki.middleware.WikiAccessMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############################### Pipeline #######################################
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from rooted_paths import rooted_glob
courseware_js = (
[
'coffee/src/' + pth + '.js'
for pth in ['courseware', 'histogram', 'navigation', 'time']
] +
['js/' + pth + '.js' for pth in ['ajax-error']] +
sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/modules/**/*.js'))
)
# Before a student accesses courseware, we do not
# need many of the JS dependencies. This includes
# only the dependencies used everywhere in the LMS
# (including the dashboard/account/profile pages)
# Currently, this partially duplicates the "main vendor"
# JavaScript file, so only one of the two should be included
# on a page at any time.
# In the future, we will likely refactor this to use
# RequireJS and an optimizer.
base_vendor_js = [
'js/vendor/jquery.min.js',
'js/vendor/jquery.cookie.js',
'js/vendor/underscore-min.js'
]
main_vendor_js = base_vendor_js + [
'js/vendor/require.js',
'js/RequireJS-namespace-undefine.js',
'js/vendor/json2.js',
'js/vendor/jquery-ui.min.js',
'js/vendor/jquery.qtip.min.js',
'js/vendor/swfobject/swfobject.js',
'js/vendor/jquery.ba-bbq.min.js',
'js/vendor/ova/annotator-full.js',
'js/vendor/ova/annotator-full-firebase-auth.js',
'js/vendor/ova/video.dev.js',
'js/vendor/ova/vjs.youtube.js',
'js/vendor/ova/rangeslider.js',
'js/vendor/ova/share-annotator.js',
'js/vendor/ova/richText-annotator.js',
'js/vendor/ova/reply-annotator.js',
'js/vendor/ova/tags-annotator.js',
'js/vendor/ova/flagging-annotator.js',
'js/vendor/ova/diacritic-annotator.js',
'js/vendor/ova/grouping-annotator.js',
'js/vendor/ova/jquery-Watch.js',
'js/vendor/ova/openseadragon.js',
'js/vendor/ova/OpenSeaDragonAnnotation.js',
'js/vendor/ova/ova.js',
'js/vendor/ova/catch/js/catch.js',
'js/vendor/ova/catch/js/handlebars-1.1.2.js',
'js/vendor/URI.min.js',
]
dashboard_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/dashboard/**/*.js'))
discussion_js = sorted(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/discussion/**/*.js'))
rwd_header_footer_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/common_helpers/rwd_header_footer.js'))
staff_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/staff_grading/**/*.js'))
open_ended_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/open_ended/**/*.js'))
notes_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/notes/**/*.js'))
instructor_dash_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/instructor_dashboard/**/*.js'))
# JavaScript used by the student account and profile pages
# These are not courseware, so they do not need many of the courseware-specific
# JavaScript modules.
student_account_js = [
'js/utils/rwd_header_footer.js',
'js/utils/edx.utils.validate.js',
'js/src/utility.js',
'js/student_account/enrollment.js',
'js/student_account/shoppingcart.js',
'js/student_account/models/LoginModel.js',
'js/student_account/models/RegisterModel.js',
'js/student_account/models/PasswordResetModel.js',
'js/student_account/views/FormView.js',
'js/student_account/views/LoginView.js',
'js/student_account/views/RegisterView.js',
'js/student_account/views/PasswordResetView.js',
'js/student_account/views/AccessView.js',
'js/student_account/accessApp.js',
]
student_profile_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/student_profile/**/*.js'))
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/font-awesome.css',
'css/vendor/jquery.qtip.min.css',
'css/vendor/responsive-carousel/responsive-carousel.css',
'css/vendor/responsive-carousel/responsive-carousel.slide.css',
],
'output_filename': 'css/lms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-skin.css',
},
'style-app': {
'source_filenames': [
'sass/application.css',
'sass/ie.css'
],
'output_filename': 'css/lms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/application-extend1.css',
],
'output_filename': 'css/lms-style-app-extend1.css',
},
'style-app-extend2': {
'source_filenames': [
'sass/application-extend2.css',
],
'output_filename': 'css/lms-style-app-extend2.css',
},
'style-app-rtl': {
'source_filenames': [
'sass/application-rtl.css',
'sass/ie-rtl.css'
],
'output_filename': 'css/lms-style-app-rtl.css',
},
'style-app-extend1-rtl': {
'source_filenames': [
'sass/application-extend1-rtl.css',
],
'output_filename': 'css/lms-style-app-extend1-rtl.css',
},
'style-app-extend2-rtl': {
'source_filenames': [
'sass/application-extend2-rtl.css',
],
'output_filename': 'css/lms-style-app-extend2-rtl.css',
},
'style-course-vendor': {
'source_filenames': [
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/jquery.treeview.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
],
'output_filename': 'css/lms-style-course-vendor.css',
},
'style-course': {
'source_filenames': [
'sass/course.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-style-course.css',
},
'style-course-rtl': {
'source_filenames': [
'sass/course-rtl.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-style-course-rtl.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/lms-style-xmodule-annotations.css',
},
}
common_js = set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
project_js = set(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
PIPELINE_JS = {
'application': {
# Application will contain all paths not in courseware_only_js
'source_filenames': sorted(common_js) + sorted(project_js) + [
'js/form.ext.js',
'js/my_courses_dropdown.js',
'js/toggle_login_modal.js',
'js/sticky_filter.js',
'js/query-params.js',
'js/src/utility.js',
'js/src/accessibility_tools.js',
'js/src/ie_shim.js',
'js/src/string_utils.js',
],
'output_filename': 'js/lms-application.js',
},
'courseware': {
'source_filenames': courseware_js,
'output_filename': 'js/lms-courseware.js',
},
'base_vendor': {
'source_filenames': base_vendor_js,
'output_filename': 'js/lms-base-vendor.js',
},
'main_vendor': {
'source_filenames': main_vendor_js,
'output_filename': 'js/lms-main_vendor.js',
},
'module-descriptor-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js'),
'output_filename': 'js/lms-module-descriptors.js',
},
'module-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static', 'xmodule/modules/js/*.js'),
'output_filename': 'js/lms-modules.js',
},
'discussion': {
'source_filenames': discussion_js,
'output_filename': 'js/discussion.js',
},
'staff_grading': {
'source_filenames': staff_grading_js,
'output_filename': 'js/staff_grading.js',
},
'open_ended': {
'source_filenames': open_ended_js,
'output_filename': 'js/open_ended.js',
},
'notes': {
'source_filenames': notes_js,
'output_filename': 'js/notes.js',
},
'instructor_dash': {
'source_filenames': instructor_dash_js,
'output_filename': 'js/instructor_dash.js',
},
'dashboard': {
'source_filenames': dashboard_js,
'output_filename': 'js/dashboard.js'
},
'rwd_header_footer': {
'source_filenames': rwd_header_footer_js,
'output_filename': 'js/rwd_header_footer.js'
},
'student_account': {
'source_filenames': student_account_js,
'output_filename': 'js/student_account.js'
},
'student_profile': {
'source_filenames': student_profile_js,
'output_filename': 'js/student_profile.js'
},
}
PIPELINE_DISABLE_WRAPPER = True
# Compile all coffee files in course data directories if they are out of date.
# TODO: Remove this once we move data into Mongo. This is only temporary while
# course data directories are still in use.
if os.path.isdir(DATA_DIR):
for course_dir in os.listdir(DATA_DIR):
js_dir = DATA_DIR / course_dir / "js"
if not os.path.isdir(js_dir):
continue
for filename in os.listdir(js_dir):
if filename.endswith('coffee'):
new_filename = os.path.splitext(filename)[0] + ".js"
if os.path.exists(js_dir / new_filename):
coffee_timestamp = os.stat(js_dir / filename).st_mtime
js_timestamp = os.stat(js_dir / new_filename).st_mtime
if coffee_timestamp <= js_timestamp:
continue
os.system("rm %s" % (js_dir / new_filename))
os.system("coffee -c %s" % (js_dir / filename))
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = "pipeline.compressors.uglifyjs.UglifyJSCompressor"
STATICFILES_IGNORE_PATTERNS = (
"sass/*",
"coffee/*",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_UGLIFYJS_BINARY = 'node_modules/.bin/uglifyjs'
# Setting that will only affect the edX version of django-pipeline until our changes are merged upstream
PIPELINE_COMPILE_INPLACE = True
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
HIGH_MEM_QUEUE = 'edx.core.high_mem'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
# let logging work as configured:
CELERYD_HIJACK_ROOT_LOGGER = False
################################ Bulk Email ###################################
# Suffix used to construct 'from' email address for bulk emails.
# A course-specific identifier is prepended.
BULK_EMAIL_DEFAULT_FROM_EMAIL = 'no-reply@example.com'
# Parameters for breaking down course enrollment into subtasks.
BULK_EMAIL_EMAILS_PER_TASK = 100
# Initial delay used for retrying tasks. Additional retries use
# longer delays. Value is in seconds.
BULK_EMAIL_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
BULK_EMAIL_MAX_RETRIES = 5
# Maximum number of retries per task for errors that are related to
# throttling. If this is not set, then there is no cap on such retries.
BULK_EMAIL_INFINITE_RETRY_CAP = 1000
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
# Flag to indicate if individual email addresses should be logged as they are sent
# a bulk email message.
BULK_EMAIL_LOG_SENT_EMAILS = False
# Delay in seconds to sleep between individual mail messages being sent,
# when a bulk email task is retried for rate-related reasons. Choose this
# value depending on the number of workers that might be sending email in
# parallel, and what the SES rate is.
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to test YouTube availability
'TEST_URL': 'gdata.youtube.com/feeds/api/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
################################### APPS ######################################
INSTALLED_APPS = (
# Standard ones that are always installed...
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'south',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# Our courseware
'circuit',
'courseware',
'student',
'static_template_view',
'staticbook',
'track',
'eventtracking.django',
'util',
'certificates',
'dashboard',
'instructor',
'instructor_task',
'open_ended_grading',
'psychometrics',
'licenses',
'course_groups',
'bulk_email',
# External auth (OpenID, shib)
'external_auth',
'django_openid_auth',
# OAuth2 Provider
'provider',
'provider.oauth2',
'oauth2_provider',
# For the wiki
'wiki', # The new django-wiki from benjaoming
'django_notify',
'course_wiki', # Our customizations
'mptt',
'sekizai',
#'wiki.plugins.attachments',
'wiki.plugins.links',
'wiki.plugins.notifications',
'course_wiki.plugins.markdownedx',
# Foldit integration
'foldit',
# For testing
'django.contrib.admin', # only used in DEBUG mode
'django_nose',
'debug',
# Discussion forums
'django_comment_client',
'django_comment_common',
'notes',
# Splash screen
'splash',
# Monitoring
'datadog',
# User API
'rest_framework',
'user_api',
# Shopping cart
'shoppingcart',
# Notification preferences setting
'notification_prefs',
'notifier_api',
# Different Course Modes
'course_modes',
# Student Identity Verification
'verify_student',
# Dark-launching languages
'dark_lang',
# Microsite configuration
'microsite_configuration',
# Student Identity Reverification
'reverification',
'embargo',
# Monitoring functionality
'monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
# Country list
'django_countries',
# edX Mobile API
'mobile_api',
# Surveys
'survey',
)
######################### MARKETING SITE ###############################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
'ABOUT': 'about_edx',
'CONTACT': 'contact',
'FAQ': 'help_edx',
'COURSES': 'courses',
'SCHOOLS':'schools',
'ROOT': 'root',
'TOS': 'tos',
'HONOR': 'honor',
'PRIVACY': 'privacy_edx',
'JOBS': 'jobs',
'NEWS': 'news',
'PRESS': 'press',
'BLOG': 'edx-blog',
'DONATE': 'donate',
# Verified Certificates
'WHAT_IS_VERIFIED_CERT': 'verified-certificate',
}
################# Student Verification #################
VERIFY_STUDENT = {
"DAYS_GOOD_FOR": 365, # How many days is a verficiation good for?
}
### This enables the Metrics tab for the Instructor dashboard ###########
FEATURES['CLASS_DASHBOARD'] = False
if FEATURES.get('CLASS_DASHBOARD'):
INSTALLED_APPS += ('class_dashboard',)
######################## CAS authentication ###########################
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = 'https://provide_your_cas_url_here'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
###################### Registration ##################################
# For each of the fields, give one of the following values:
# - 'required': to display the field, and make it mandatory
# - 'optional': to display the field, and make it non-mandatory
# - 'hidden': to not display the field
REGISTRATION_EXTRA_FIELDS = {
'level_of_education': 'optional',
'gender': 'optional',
'year_of_birth': 'optional',
'mailing_address': 'optional',
'goals': 'optional',
'honor_code': 'required',
'terms_of_service': 'hidden',
'city': 'hidden',
'country': 'hidden',
}
########################## CERTIFICATE NAME ########################
CERT_NAME_SHORT = "Certificate"
CERT_NAME_LONG = "Certificate of Achievement"
###################### Grade Downloads ######################
GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': '/tmp/edx-s3/grades',
}
######################## PROGRESS SUCCESS BUTTON ##############################
# The following fields are available in the URL: {course_id} {student_id}
PROGRESS_SUCCESS_BUTTON_URL = 'http://<domain>/<path>/{course_id}'
PROGRESS_SUCCESS_BUTTON_TEXT_OVERRIDE = None
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##################### LinkedIn #####################
INSTALLED_APPS += ('django_openid_auth',)
############################ LinkedIn Integration #############################
INSTALLED_APPS += ('linkedin',)
LINKEDIN_API = {
'EMAIL_WHITELIST': [],
'COMPANY_ID': '2746406',
}
############################ ORA 2 ############################################
# By default, don't use a file prefix
ORA2_FILE_PREFIX = None
# Default File Upload Storage bucket and prefix. Used by the FileUpload Service.
FILE_UPLOAD_STORAGE_BUCKET_NAME = 'edxuploads'
FILE_UPLOAD_STORAGE_PREFIX = 'submissions_attachments'
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = 'UTC'
# Source:
# http://loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt according to http://en.wikipedia.org/wiki/ISO_639-1
ALL_LANGUAGES = (
[u"aa", u"Afar"],
[u"ab", u"Abkhazian"],
[u"af", u"Afrikaans"],
[u"ak", u"Akan"],
[u"sq", u"Albanian"],
[u"am", u"Amharic"],
[u"ar", u"Arabic"],
[u"an", u"Aragonese"],
[u"hy", u"Armenian"],
[u"as", u"Assamese"],
[u"av", u"Avaric"],
[u"ae", u"Avestan"],
[u"ay", u"Aymara"],
[u"az", u"Azerbaijani"],
[u"ba", u"Bashkir"],
[u"bm", u"Bambara"],
[u"eu", u"Basque"],
[u"be", u"Belarusian"],
[u"bn", u"Bengali"],
[u"bh", u"Bihari languages"],
[u"bi", u"Bislama"],
[u"bs", u"Bosnian"],
[u"br", u"Breton"],
[u"bg", u"Bulgarian"],
[u"my", u"Burmese"],
[u"ca", u"Catalan"],
[u"ch", u"Chamorro"],
[u"ce", u"Chechen"],
[u"zh", u"Chinese"],
[u"cu", u"Church Slavic"],
[u"cv", u"Chuvash"],
[u"kw", u"Cornish"],
[u"co", u"Corsican"],
[u"cr", u"Cree"],
[u"cs", u"Czech"],
[u"da", u"Danish"],
[u"dv", u"Divehi"],
[u"nl", u"Dutch"],
[u"dz", u"Dzongkha"],
[u"en", u"English"],
[u"eo", u"Esperanto"],
[u"et", u"Estonian"],
[u"ee", u"Ewe"],
[u"fo", u"Faroese"],
[u"fj", u"Fijian"],
[u"fi", u"Finnish"],
[u"fr", u"French"],
[u"fy", u"Western Frisian"],
[u"ff", u"Fulah"],
[u"ka", u"Georgian"],
[u"de", u"German"],
[u"gd", u"Gaelic"],
[u"ga", u"Irish"],
[u"gl", u"Galician"],
[u"gv", u"Manx"],
[u"el", u"Greek"],
[u"gn", u"Guarani"],
[u"gu", u"Gujarati"],
[u"ht", u"Haitian"],
[u"ha", u"Hausa"],
[u"he", u"Hebrew"],
[u"hz", u"Herero"],
[u"hi", u"Hindi"],
[u"ho", u"Hiri Motu"],
[u"hr", u"Croatian"],
[u"hu", u"Hungarian"],
[u"ig", u"Igbo"],
[u"is", u"Icelandic"],
[u"io", u"Ido"],
[u"ii", u"Sichuan Yi"],
[u"iu", u"Inuktitut"],
[u"ie", u"Interlingue"],
[u"ia", u"Interlingua"],
[u"id", u"Indonesian"],
[u"ik", u"Inupiaq"],
[u"it", u"Italian"],
[u"jv", u"Javanese"],
[u"ja", u"Japanese"],
[u"kl", u"Kalaallisut"],
[u"kn", u"Kannada"],
[u"ks", u"Kashmiri"],
[u"kr", u"Kanuri"],
[u"kk", u"Kazakh"],
[u"km", u"Central Khmer"],
[u"ki", u"Kikuyu"],
[u"rw", u"Kinyarwanda"],
[u"ky", u"Kirghiz"],
[u"kv", u"Komi"],
[u"kg", u"Kongo"],
[u"ko", u"Korean"],
[u"kj", u"Kuanyama"],
[u"ku", u"Kurdish"],
[u"lo", u"Lao"],
[u"la", u"Latin"],
[u"lv", u"Latvian"],
[u"li", u"Limburgan"],
[u"ln", u"Lingala"],
[u"lt", u"Lithuanian"],
[u"lb", u"Luxembourgish"],
[u"lu", u"Luba-Katanga"],
[u"lg", u"Ganda"],
[u"mk", u"Macedonian"],
[u"mh", u"Marshallese"],
[u"ml", u"Malayalam"],
[u"mi", u"Maori"],
[u"mr", u"Marathi"],
[u"ms", u"Malay"],
[u"mg", u"Malagasy"],
[u"mt", u"Maltese"],
[u"mn", u"Mongolian"],
[u"na", u"Nauru"],
[u"nv", u"Navajo"],
[u"nr", u"Ndebele, South"],
[u"nd", u"Ndebele, North"],
[u"ng", u"Ndonga"],
[u"ne", u"Nepali"],
[u"nn", u"Norwegian Nynorsk"],
[u"nb", u"Bokmål, Norwegian"],
[u"no", u"Norwegian"],
[u"ny", u"Chichewa"],
[u"oc", u"Occitan"],
[u"oj", u"Ojibwa"],
[u"or", u"Oriya"],
[u"om", u"Oromo"],
[u"os", u"Ossetian"],
[u"pa", u"Panjabi"],
[u"fa", u"Persian"],
[u"pi", u"Pali"],
[u"pl", u"Polish"],
[u"pt", u"Portuguese"],
[u"ps", u"Pushto"],
[u"qu", u"Quechua"],
[u"rm", u"Romansh"],
[u"ro", u"Romanian"],
[u"rn", u"Rundi"],
[u"ru", u"Russian"],
[u"sg", u"Sango"],
[u"sa", u"Sanskrit"],
[u"si", u"Sinhala"],
[u"sk", u"Slovak"],
[u"sl", u"Slovenian"],
[u"se", u"Northern Sami"],
[u"sm", u"Samoan"],
[u"sn", u"Shona"],
[u"sd", u"Sindhi"],
[u"so", u"Somali"],
[u"st", u"Sotho, Southern"],
[u"es", u"Spanish"],
[u"sc", u"Sardinian"],
[u"sr", u"Serbian"],
[u"ss", u"Swati"],
[u"su", u"Sundanese"],
[u"sw", u"Swahili"],
[u"sv", u"Swedish"],
[u"ty", u"Tahitian"],
[u"ta", u"Tamil"],
[u"tt", u"Tatar"],
[u"te", u"Telugu"],
[u"tg", u"Tajik"],
[u"tl", u"Tagalog"],
[u"th", u"Thai"],
[u"bo", u"Tibetan"],
[u"ti", u"Tigrinya"],
[u"to", u"Tonga (Tonga Islands)"],
[u"tn", u"Tswana"],
[u"ts", u"Tsonga"],
[u"tk", u"Turkmen"],
[u"tr", u"Turkish"],
[u"tw", u"Twi"],
[u"ug", u"Uighur"],
[u"uk", u"Ukrainian"],
[u"ur", u"Urdu"],
[u"uz", u"Uzbek"],
[u"ve", u"Venda"],
[u"vi", u"Vietnamese"],
[u"vo", u"Volapük"],
[u"cy", u"Welsh"],
[u"wa", u"Walloon"],
[u"wo", u"Wolof"],
[u"xh", u"Xhosa"],
[u"yi", u"Yiddish"],
[u"yo", u"Yoruba"],
[u"za", u"Zhuang"],
[u"zu", u"Zulu"]
)
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval'
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
# Stub for third_party_auth options.
# See common/djangoapps/third_party_auth/settings.py for configuration details.
THIRD_PARTY_AUTH = {}
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Analytics Data API + Dashboard (Insights) settings
ANALYTICS_DATA_URL = ""
ANALYTICS_DATA_TOKEN = ""
ANALYTICS_DASHBOARD_URL = ""
ANALYTICS_DASHBOARD_NAME = PLATFORM_NAME + " Insights"
# REGISTRATION CODES DISPLAY INFORMATION SUBTITUTIONS IN THE INVOICE ATTACHMENT
INVOICE_CORP_ADDRESS = "Please place your corporate address\nin this configuration"
INVOICE_PAYMENT_INSTRUCTIONS = "This is where you can\nput directions on how people\nbuying registration codes"
# Country code overrides
# Used by django-countries
COUNTRIES_OVERRIDE = {
"TW": _("Taiwan"),
}
# which access.py permission name to check in order to determine if a course is visible in
# the course catalog. We default this to the legacy permission 'see_exists'.
COURSE_CATALOG_VISIBILITY_PERMISSION = 'see_exists'
# which access.py permission name to check in order to determine if a course about page is
# visible. We default this to the legacy permission 'see_exists'.
COURSE_ABOUT_VISIBILITY_PERMISSION = 'see_exists'
|
kuiwei/edx-platform
|
lms/envs/common.py
|
Python
|
agpl-3.0
| 63,921
|
# -*- coding: utf-8 -*-
import base64
import time
import simplejson as json
from eventlet.green import urllib2
import urllib
from kral import config
def stream(queries, queue, kral_start_time):
url = 'https://stream.twitter.com/1/statuses/filter.json'
queries = [q.lower() for q in queries]
quoted_queries = [urllib.quote(q) for q in queries]
query_post = 'track=' + ",".join(quoted_queries)
request = urllib2.Request(url, query_post)
auth = base64.b64encode('%s:%s' % (config.TWITTER['user'], config.TWITTER['password']))
request.add_header('Authorization', "basic %s" % auth)
request.add_header('User-agent', config.USER_AGENT)
for item in urllib2.urlopen(request):
try:
item = json.loads(item)
except json.JSONDecodeError: #for whatever reason json reading twitters json sometimes raises this
continue
if 'text' in item and 'user' in item:
#determine what query we're on if it exists in the text
text = item['text'].lower()
query = ''
for q in queries:
q_uni = unicode(q, 'utf-8')
if q_uni in text:
query = q_uni
lang = False
if config.LANG:
if item['user']['lang'] == config.LANG:
lang = True
else:
lang = True
if query and lang:
post = {
'service' : 'twitter',
'user' : {
'id' : item['user']['id_str'],
'utc' : item['user']['utc_offset'],
'name' : item['user']['screen_name'],
'description' : item['user']['description'],
'location' : item['user']['location'],
'avatar' : item['user']['profile_image_url'],
'subscribers': item['user']['followers_count'],
'subscriptions': item['user']['friends_count'],
'website': item['user']['url'],
'language' : item['user']['lang'],
},
'links' : [],
'id' : item['id'],
'application': item['source'],
'date' : int(time.mktime(time.strptime(item['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))),
'text' : text,
'query' : query,
'geo' : item['coordinates'],
}
for url in item['entities']['urls']:
post['links'].append({ 'href' : url.get('url') })
queue.put(post)
|
lrvick/kral
|
kral/services/twitter.py
|
Python
|
agpl-3.0
| 2,819
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
import glob
import sys
#VERSION="2.1dev4"
VERSION="2.6dev5"
# Taken from kennethreitz/requests/setup.py
package_directory = os.path.realpath(os.path.dirname(__file__))
def get_file_contents(file_path):
"""Get the context of the file using full path name."""
content = ""
try:
full_path = os.path.join(package_directory, file_path)
content = open(full_path, 'r').read()
except:
print >> sys.stderr, "### could not open file %r" % file_path
return content
setup(
name='privacyIDEA',
version=VERSION,
description='privacyIDEA: identity, multifactor authentication (OTP), '
'authorization, audit',
author='privacyidea.org',
license='AGPLv3',
author_email='cornelius@privacyidea.org',
url='http://www.privacyidea.org',
keywords='OTP, two factor authentication, management, security',
packages=find_packages(),
scripts=['pi-manage.py',
'tools/privacyidea-convert-token',
'tools/privacyidea-create-pwidresolver-user',
'tools/privacyidea-create-sqlidresolver-user',
'tools/privacyidea-pip-update',
'tools/privacyidea-create-certificate',
'tools/privacyidea-fix-access-rights',
'tools/privacyidea-create-ad-users',
'tools/privacyidea-fetchssh.sh',
'tools/privacyidea-create-userdb.sh'
],
extras_require={
'dev': ["Sphinx>=1.3.1",
"sphinxcontrib-httpdomain>=1.3.0"],
'test': ["coverage>=3.7.1",
"mock>=1.0.1",
"nose>=1.3.4",
"responses>=0.4.0",
"six>=1.8.0"],
},
install_requires=["Flask>=0.10.1",
"Flask-Cache>=0.13.1",
"Flask-Migrate>=1.2.0",
"Flask-SQLAlchemy>=2.0",
"Flask-Script>=2.0.5",
"Jinja2>=2.7.3",
"Mako>=0.9.1",
"MarkupSafe>=0.23",
"MySQL-python>=1.2.5",
"Pillow>=2.6.1",
"PyJWT>=1.3.0",
"PyYAML>=3.11",
"Pygments>=2.0.2",
"SQLAlchemy>=1.0.5",
"Werkzeug>=0.10.4",
"alembic>=0.6.7",
"argparse>=1.2.1",
"bcrypt>=1.1.0",
"beautifulsoup4>=4.3.2",
"cffi>=0.8.6",
"configobj>=5.0.6",
"docutils>=0.12",
"funcparserlib>=0.3.6",
"itsdangerous>=0.24",
"ldap3>=0.9.8.4",
"netaddr>=0.7.12",
"passlib>=1.6.2",
"pyasn1>=0.1.7",
"pyOpenSSL>=0.15.1",
"pycparser>=2.10",
"pycrypto>=2.6.1",
"pyrad>=2.0",
"pyusb>=1.0.0b2",
"qrcode>=5.1",
"requests>=2.7.0",
"sqlsoup>=0.9.0",
"wsgiref>=0.1.2"
],
include_package_data=True,
data_files=[('etc/privacyidea/',
['deploy/apache/privacyideaapp.wsgi',
'deploy/privacyidea/dictionary',
'deploy/privacyidea/enckey',
'deploy/privacyidea/private.pem',
'deploy/privacyidea/public.pem']),
('share/man/man1',
["tools/privacyidea-convert-token.1",
"tools/privacyidea-create-pwidresolver-user.1",
"tools/privacyidea-create-sqlidresolver-user.1",
"tools/privacyidea-pip-update.1",
"tools/privacyidea-create-certificate.1",
"tools/privacyidea-fix-access-rights.1"
]),
('lib/privacyidea/authmodules/FreeRADIUS',
["authmodules/FreeRADIUS/LICENSE",
"authmodules/FreeRADIUS/privacyidea_radius.pm"]),
('lib/privacyidea/authmodules/OTRS',
["authmodules/OTRS/privacyIDEA.pm"]),
('lib/privacyidea/migrations',
["migrations/alembic.ini",
"migrations/env.py",
"migrations/README",
"migrations/script.py.mako"]),
('lib/privacyidea/migrations/versions',
["migrations/versions/2551ee982544_.py",
"migrations/versions/4f32a4e1bf33_.py",
"migrations/versions/2181294eed0b_.py",
"migrations/versions/e5cbeb7c177_.py",
"migrations/versions/4d9178fa8336_.py",
"migrations/versions/20969b4cbf06_.py"])
],
classifiers=["Framework :: Flask",
"License :: OSI Approved :: "
"GNU Affero General Public License v3",
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"Topic :: Internet",
"Topic :: Security",
"Topic :: System ::"
" Systems Administration :: Authentication/Directory"
],
#message_extractors={'privacyidea': [
# ('**.py', 'python', None),
# ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},
zip_safe=False,
long_description=get_file_contents('README.md')
)
|
woddx/privacyidea
|
setup.py
|
Python
|
agpl-3.0
| 5,648
|
from __future__ import absolute_import
from django.conf.urls import include, url
from tastypie.api import Api
from locations.api import v1, v2
from locations.api.sword import views
v1_api = Api(api_name="v1")
v1_api.register(v1.SpaceResource())
v1_api.register(v1.LocationResource())
v1_api.register(v1.PackageResource())
v1_api.register(v1.PipelineResource())
v1_api.register(v1.AsyncResource())
v2_api = Api(api_name="v2")
v2_api.register(v2.SpaceResource())
v2_api.register(v2.LocationResource())
v2_api.register(v2.PackageResource())
v2_api.register(v2.PipelineResource())
v2_api.register(v2.AsyncResource())
urlpatterns = [
url(r"", include(v1_api.urls)),
url(r"v1/sword/$", views.service_document, name="sword_service_document"),
url(r"", include(v2_api.urls)),
url(r"v2/sword/$", views.service_document, name="sword_service_document"),
]
|
artefactual/archivematica-storage-service
|
storage_service/locations/api/urls.py
|
Python
|
agpl-3.0
| 865
|
# This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <aleksandar.erkalovic@sourcefabric.org>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction
from booki.editor import models
from booki.utils import security
def remote_get_status_messages(request, message, groupid):
from booki.statusnet.models import searchMessages
group = models.BookiGroup.objects.get(url_name=groupid)
mess = searchMessages('%%23%s' % group.url_name)
# remove this hard code
messages = ['<a href="http://status.flossmanuals.net/notice/%s">%s: %s</a>' % (m['id'], m['from_user'], m['text']) for m in mess['results']]
return {"list": messages}
def remote_init_group(request, message, groupid):
import sputnik
## get online users
try:
_onlineUsers = sputnik.smembers("sputnik:channel:%s:users" % message["channel"])
except:
_onlineUsers = []
if request.user.username not in _onlineUsers:
try:
sputnik.sadd("sputnik:channel:%s:users" % message["channel"], request.user.username)
except:
pass
return {}
def remote_leave_group(request, message, groupid):
group = models.BookiGroup.objects.get(url_name=groupid)
group.members.remove(request.user)
transaction.commit()
return {"result": True}
def remote_join_group(request, message, groupid):
group = models.BookiGroup.objects.get(url_name=groupid)
group.members.add(request.user)
transaction.commit()
return {"result": True}
|
aerkalov/Booktype
|
lib/booki/channels/group.py
|
Python
|
agpl-3.0
| 2,141
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Rodrigo Alves Lima
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import resolve
class URLNameMiddleware:
def process_view(self, request, *args):
request.url_name = resolve(request.path).url_name
return None
|
rodrigoalveslima/runn
|
src/src/middleware.py
|
Python
|
agpl-3.0
| 917
|
# decodex - simple enigma decoder.
#
# Copyright (c) 2013 Paul R. Tagliamonte <tag@pault.ag>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
def cleanup(what):
return what.strip().lower().replace("'", "")
def issubset(superstr, substr):
superstr = list(superstr)
for ch in substr:
if ch not in superstr:
return False
superstr.remove(ch)
return True
def strsub(superstr, substr):
superstr = list(superstr)
substr = list(substr)
for k in substr:
superstr.remove(k)
return "".join(superstr)
class Words(object):
def __init__(self, dictionary):
self.path = "/usr/share/dict/%s" % (dictionary)
self.mapping = defaultdict(set)
self.word_hash = {}
self._build_map()
def _build_map(self):
for line in (cleanup(x) for x in open(self.path, 'r')):
self.word_hash[line] = line
self.mapping["".join(sorted(line))].add(line)
def anagram(self, word, depth=2):
if depth == 0:
return
l_hash = "".join(sorted(word))
# OK. Let's start simple.
if l_hash in self.mapping:
for entry in self.mapping[l_hash]:
yield [entry]
# Meh, Let's do our best and find l_hash in r_hash.
for r_hash, entries in self.mapping.items():
if issubset(l_hash, r_hash):
leftover = strsub(l_hash, r_hash)
# OK. So, this is a word if we can match the rest.
for anagram in self.anagram(leftover, depth=(depth - 1)):
for entry in entries:
yield [entry] + anagram
|
paultag/decodex
|
decodex/utils/words.py
|
Python
|
agpl-3.0
| 2,313
|
from erukar.system.engine import Enemy, BasicAI
from ..templates.Undead import Undead
from erukar.content.inventory import Shortsword, Buckler
from erukar.content.modifiers import Steel, Oak
class Skeleton(Undead):
ClassName = 'Skeleton'
ClassLevel = 1
BaseMitigations = {
'bludgeoning': (-0.25, 0),
'piercing': (0.2, 0),
'slashing': (0.15, 0)
}
BriefDescription = "a skeleton holding a {describe|right} and a {describe|left}."
def init_stats(self):
self.strength = 5
self.dexterity = 4
self.vitality = -1
self.acuity = -2
self.sense = -2
def init_personality(self):
self.ai_module = BasicAI(self)
self.str_ratio = 0.4
self.dex_ratio = 0.3
self.vit_ratio = 0.2
self.acu_ratio = 0.0
self.sen_ratio = 0.0
self.res_ratio = 0.1
self.stat_points = 8
def init_inventory(self):
self.left = Buckler(modifiers=[Oak])
self.right = Shortsword(modifiers=[Steel])
self.inventory = [self.left, self.right]
|
etkirsch/legends-of-erukar
|
erukar/content/enemies/undead/Skeleton.py
|
Python
|
agpl-3.0
| 1,103
|
###################################################################
#
# Copyright (c) 2011 Canonical Ltd.
# Copyright (c) 2013 Miing.org <samuel.miing@gmail.com>
#
# This software is licensed under the GNU Affero General Public
# License version 3 (AGPLv3), as published by the Free Software
# Foundation, and may be copied, distributed, and modified under
# those terms.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# file LICENSE for more details.
#
###################################################################
from .environment import *
from .django import *
from .postgresql import *
from .dbengine import *
from .development import *
from .docs import *
# make sure the virtualenv is automatically activated
setup_virtualenv()
|
miing/mci_migo
|
fabfile/__init__.py
|
Python
|
agpl-3.0
| 899
|
# Generated by Django 3.0.9 on 2020-09-04 21:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock', '0006_auto_20200904_2343'),
]
operations = [
migrations.RemoveField(
model_name='paymenttransaction',
name='method',
),
migrations.AddField(
model_name='fundzone',
name='method',
field=models.CharField(choices=[('a', 'Card'), ('b', 'Cash')], default='a', max_length=1),
preserve_default=False,
),
]
|
UrLab/incubator
|
stock/migrations/0007_auto_20200904_2351.py
|
Python
|
agpl-3.0
| 587
|
#!/usr/bin/env python
"""
Build an ansible inventory based on autoscaling group instance lifecycle state.
Outputs JSON to stdout with keys for each state and combination of autoscaling
group and state.
{
"InService": [
"10.0.47.127",
"10.0.46.174"
],
"Terminating:Wait": [
"10.0.48.104"
],
"e-d-CommonClusterServerAsGroup": [
"10.0.47.127",
"10.0.46.174"
],
"e-d-CommonClusterServerAsGroup_InService": [
"10.0.47.127",
"10.0.46.174"
],
"e-d-CommonClusterServerAsGroup_InService": [
"10.0.48.104"
]
}
"""
import argparse
import boto
import json
from collections import defaultdict
class LifecycleInventory():
profile = None
def __init__(self, profile):
parser = argparse.ArgumentParser()
self.profile = profile
def get_instance_dict(self):
ec2 = boto.connect_ec2(profile_name=self.profile)
reservations = ec2.get_all_instances()
dict = {}
for instance in [i for r in reservations for i in r.instances]:
dict[instance.id] = instance
return dict
def run(self):
autoscale = boto.connect_autoscale(profile_name=self.profile)
groups = autoscale.get_all_groups()
instances = self.get_instance_dict()
inventory = defaultdict(list)
for group in groups:
for instance in group.instances:
private_ip_address = instances[instance.instance_id].private_ip_address
if private_ip_address:
inventory[group.name].append(private_ip_address)
inventory[group.name + "_" + instance.lifecycle_state].append(private_ip_address)
inventory[instance.lifecycle_state.replace(":","_")].append(private_ip_address)
print json.dumps(inventory, sort_keys=True, indent=2)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--profile', help='The aws profile to use when connecting.')
parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='store_true', default=True)
args = parser.parse_args()
LifecycleInventory(args.profile).run()
|
hmcmooc/ec2-edx-configuration
|
playbooks/edx-east/lifecycle_inventory.py
|
Python
|
agpl-3.0
| 2,195
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 CamptoCamp. All rights reserved.
# @author Nicolas Bessi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
_logger = logging.getLogger(__name__)
class AbstractClassError(Exception):
def __str__(self):
return 'Abstract Class'
def __repr__(self):
return 'Abstract Class'
class AbstractMethodError(Exception):
def __str__(self):
return 'Abstract Method'
def __repr__(self):
return 'Abstract Method'
class UnknowClassError(Exception):
def __str__(self):
return 'Unknown Class'
def __repr__(self):
return 'Unknown Class'
class UnsuportedCurrencyError(Exception):
def __init__(self, value):
self.curr = value
def __str__(self):
return 'Unsupported currency %s' % self.curr
def __repr__(self):
return 'Unsupported currency %s' % self.curr
class Currency_getter_factory():
"""Factory pattern class that will return
a currency getter class base on the name passed
to the register method
"""
def register(self, class_name):
allowed = [
'CH_ADMIN_getter',
'PL_NBP_getter',
'ECB_getter',
'GOOGLE_getter',
'YAHOO_getter',
'MX_BdM_getter',
'CA_BOC_getter',
'RO_BNR_getter',
'BG_CUSTOMS_getter',
'BG_SIBANK_getter',
'BG_UNICRDT_getter',
]
if class_name in allowed:
exec "from .update_service_%s import %s" % (class_name.replace('_getter', ''), class_name)
class_def = eval(class_name)
_logger.info("from .update_service_%s import %s: class_def %s:" % (class_name.replace('_getter', ''), class_name, class_def))
return class_def()
else:
raise UnknowClassError
|
rosenvladimirov/addons
|
currency_rate_update_bg/services/currency_getter.py
|
Python
|
agpl-3.0
| 2,676
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 MicroEra s.r.l.
# (<http://www.microera.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Jimo Pricelist Report',
'version': '0.1',
'category': 'Product',
'description': """
Jimo Pricelist Report
""",
'author': 'MicroEra srl',
'website': 'http://www.microera.it',
'depends': ['base', 'product'],
'data': [
'pricelist_report_view.xml',
'security/ir.model.access.csv',
'security/pricelist_report_security.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
appendif/microera
|
jimo_pricelist_report/__openerp__.py
|
Python
|
agpl-3.0
| 1,482
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2020-03-04 21:13
# Django
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crowdsource', '0025_auto_20191209_1647'),
]
operations = [
migrations.AddField(
model_name='crowdsource',
name='ask_public',
field=models.BooleanField(
default=True,
help_text=
'Add a field asking users if we can publically credit them for their response'
),
),
]
|
MuckRock/muckrock
|
muckrock/crowdsource/migrations/0026_crowdsource_ask_public.py
|
Python
|
agpl-3.0
| 587
|
# Django settings for apmanager project.
import os
from django.utils.translation import ugettext_lazy as _
DEBUG = True
USE_DAEMON = True
if os.environ.get("USE_DEV_PATHS", None):
DEV_PATHS = True
else:
DEV_PATHS = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = 'papas.sqlite' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Montreal'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGES = (
('fr', _("French")),
('en', _("English")),
)
LANGUAGE_CODE = 'fr_CA'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
if DEV_PATHS:
UPLOAD_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"uploads",
)
)
else:
UPLOAD_ROOT = '/var/lib/apmanager/uploads/'
#Site prefix to add to relative urls, such as apmanager/ for a site on example.com/apmanager/
# Leave blank if installed on web root
LOGIN_URL = "/accounts/login/"
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
if DEV_PATHS:
MEDIA_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"..", "..", "apmanager", 'templates','site-media'),
)
else:
MEDIA_ROOT = "/usr/share/apmanager/templates/site-media"
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = "/site-media/"
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = "/media/"
# Make this unique, and don't share it with anybody.
SECRET_KEY = ')@1wt()$4x&&e9c#n&viv-g#k20(p!_ga)s$+4i!*hbdcid$)s'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'apmanager.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.abspath(os.path.join(MEDIA_ROOT, "..")),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'apmanager.accesspoints',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
)
if DEV_PATHS:
WATCH_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__),"..","..","watch"),
)
else:
WATCH_DIR='/var/lib/apmanager/watch'
COMMAND_WATCH_DIR = WATCH_DIR + '/commands'
AP_DIR = WATCH_DIR + '/ap'
AP_REFRESH_WATCH_DIR = AP_DIR + '/refresh'
AP_INIT_WATCH_DIR = AP_DIR + '/init'
LOCALE_PATHS = (
'/usr/share/apmanager/locale',
)
if DEV_PATHS:
LOCALE_PATHS = LOCALE_PATHS + (
os.path.join(os.path.dirname(__file__), "..", "..", "locale"),
)
TEMPLATE_DIRS = TEMPLATE_DIRS + (
os.path.join(os.path.dirname(__file__), "..", "..", "apmanager"),
)
for dpath in (
UPLOAD_ROOT,
WATCH_DIR,
AP_DIR,
AP_REFRESH_WATCH_DIR,
AP_INIT_WATCH_DIR,
COMMAND_WATCH_DIR,
):
if not os.path.isdir(dpath): os.mkdir(dpath)
|
veloutin/papas
|
etc/papas/settings.py
|
Python
|
agpl-3.0
| 4,958
|
# -*- coding: utf-8 -*-
# Copyright 2017 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class ResLang(models.Model):
_inherit = 'res.lang'
default_uom_ids = fields.Many2many(
string='Default Units',
comodel_name='product.uom',
)
@api.multi
@api.constrains('default_uom_ids')
def _check_default_uom_ids(self):
for record in self:
categories = set(record.default_uom_ids.mapped('category_id'))
if len(categories) != len(record.default_uom_ids):
raise ValidationError(_(
'Only one default unit of measure per category may '
'be selected.',
))
@api.model
def default_uom_by_category(self, category_name, lang=None):
"""Return the default UoM for language for the input UoM Category.
Args:
category_name (str): Name of the UoM category to get the default
for.
lang (ResLang or str, optional): Recordset or code of the language
to get the default for. Will use the current user language if
omitted.
Returns:
ProductUom: Unit of measure representing the default, if set.
Empty recordset otherwise.
"""
if lang is None:
lang = self.env.user.lang
if isinstance(lang, basestring):
lang = self.env['res.lang'].search([
('code', '=', lang),
],
limit=1,
)
results = lang.default_uom_ids.filtered(
lambda r: r.category_id.name == category_name,
)
return results[:1]
|
ovnicraft/server-tools
|
base_locale_uom_default/models/res_lang.py
|
Python
|
agpl-3.0
| 1,775
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import django.utils.timezone
import model_utils.fields
from django.db import migrations, models
from opaque_keys.edx.django.models import CourseKeyField, UsageKeyField
from lms.djangoapps.courseware.fields import UnsignedBigIntAutoField
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='PersistentSubsectionGrade',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('id', UnsignedBigIntAutoField(serialize=False, primary_key=True)),
('user_id', models.IntegerField()),
('course_id', CourseKeyField(max_length=255)),
('usage_key', UsageKeyField(max_length=255)),
('subtree_edited_date', models.DateTimeField(verbose_name=b'last content edit timestamp')),
('course_version', models.CharField(max_length=255, verbose_name=b'guid of latest course version', blank=True)),
('earned_all', models.FloatField()),
('possible_all', models.FloatField()),
('earned_graded', models.FloatField()),
('possible_graded', models.FloatField()),
],
),
migrations.CreateModel(
name='VisibleBlocks',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('blocks_json', models.TextField()),
('hashed', models.CharField(unique=True, max_length=100)),
],
),
migrations.AddField(
model_name='persistentsubsectiongrade',
name='visible_blocks',
field=models.ForeignKey(to='grades.VisibleBlocks', db_column=b'visible_blocks_hash', to_field=b'hashed', on_delete=models.CASCADE),
),
migrations.AlterUniqueTogether(
name='persistentsubsectiongrade',
unique_together=set([('course_id', 'user_id', 'usage_key')]),
),
]
|
ESOedX/edx-platform
|
lms/djangoapps/grades/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 2,347
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-Today: Odoo Community Association (OCA)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import logging
from openerp import models, fields, api
_logger = logging.getLogger(__name__)
class GithubRepository(models.Model):
_name = 'github.repository'
_inherit = ['github.connector']
_order = 'organization_id, name'
# Column Section
organization_id = fields.Many2one(
comodel_name='github.organization', string='Organization',
required=True, select=True, readonly=True, ondelete='cascade')
name = fields.Char(
string='Name', select=True, required=True, readonly=True)
complete_name = fields.Char(
string='Complete Name', select=True, required=True, readonly=True)
description = fields.Char(string='Description', readonly=True)
website = fields.Char(string='Website', readonly=True)
github_url = fields.Char(string='Github URL', readonly=True)
repository_branch_ids = fields.One2many(
comodel_name='github.repository.branch',
inverse_name='repository_id', string='Branches', readonly=True)
# Constraint Section
_sql_constraints = [
(
'complete_name_uniq', 'unique(complete_name)',
"Two Projects with the same Complete Name ? I don't think so.")
]
# Action Section
@api.multi
def button_analyze_issue(self):
return self._analyze_issue()
# Custom Section
def github_2_odoo(self, data):
return {
'name': data['name'],
'complete_name': data['full_name'],
'github_url': data['url'],
'website': data['homepage'],
'description': data['description'],
}
@api.multi
def _analyze_issue(self):
for repository in self:
#### # Delete all issues versions # TODO
#### module_versions = module_version_obj.search([
#### ('repository_branch_id', '=', repository_branch.id)])
#### module_versions.with_context(
#### dont_change_repository_branch_state=True).unlink()
#### # Delete all pull requests # TODO
#### git_commits = git_commit_obj.search([
#### ('repository_branch_id', '=', repository_branch.id)])
#### git_commits.with_context(
#### dont_change_repository_branch_state=True).unlink()
abstract_issue_obj = self.env['github.abstract.issue']
# Get Issues datas
issue_ids = []
for data in self.get_datalist_from_github(
'repository_issues', [repository.complete_name]):
abstract_issue =\
abstract_issue_obj.create_or_update_from_github(
data, repository)
# repository_ids.append(repository.id)
# organization.repository_ids = repository_ids
# Custom Section
@api.model
def create_or_update_from_github(self, organization_id, data, full):
"""Create a new repository or update an existing one based on github
datas. Return a repository."""
repository_branch_obj = self.env['github.repository.branch']
repository = self.search([('complete_name', '=', data['full_name'])])
if repository and not full:
return repository
# Get Full Datas from Github
odoo_data = self.github_2_odoo(
self.get_data_from_github('repository', [data['full_name']]))
odoo_data.update({'organization_id': organization_id})
if not repository:
repository = self.create(odoo_data)
else:
repository.write(odoo_data)
# Get Branches Data
branch_datas = self.get_datalist_from_github(
'repository_branches', [data['full_name']])
correct_series =\
repository.organization_id.organization_serie_ids.mapped('name')
for branch_data in branch_datas:
if branch_data['name'] in correct_series:
repository_branch_obj.create_or_update_from_name(
repository.id, branch_data['name'])
else:
_logger.warning(
"the branch '%s'/'%s' has been ignored." % (
repository.complete_name, branch_data['name']))
return repository
|
legalsylvain/oca-custom
|
__unported__/oca_freestore/models/github_repository.py
|
Python
|
agpl-3.0
| 4,465
|
#!/usr/bin/env python
import os
import os.path
import msgpack
import json
from base64 import b64encode, b64decode
from binascii import hexlify
from time import time
import compression
import crypto
from transports.metatransport import MetaTransport
from transports.transport import NotRedundant
from flexceptions import BlockCorruptionError
class Backup:
'''A Backup object represents a backup or restore job, with a local
path and an attached transport. A crypto object can also be attached.
(If none is selected, we default to NullEncryption().'''
def __init__(self, config=None, status=None):
if config is None:
config = {}
self.config = config
if 'chunksize' not in config:
self.config['chunksize'] = 1024 * 1024 * 8 # 8MB
crypto.init(config)
compression.init(config)
self.status = status
self.blockmap = {}
self.inittime = int(time())
self.oldfiles = {}
self.transport = MetaTransport(config, status)
self.root = '/'
self.force = False
def _syspath_to_backup(self, path):
return os.path.relpath(path, self.root)
def _backup_to_syspath(self, path):
return os.path.join(self.root, path)
def _digest(self, data):
return crypto.hmac(self.config, data)
def _enc(self, data):
encrypted = crypto.encrypt(self.config,
compression.compress(self.config, data))
return encrypted
def _dec(self, data):
return compression.decompress(self.config,
crypto.decrypt(self.config, data))
def _save_manifest(self, data):
data = crypto.encrypt_then_mac(self.config,
compression.compress(self.config,
msgpack.dumps(data)))
self.transport.write_manifest(data, self.inittime)
def _load_manifest(self, mid):
data = self.transport.read_manifest(mid)
return msgpack.loads(
compression.decompress(self.config,
crypto.auth_then_decrypt(self.config,
data)))
def _get_chunks(self, f):
'''Generator that takes a file handle and yields tuples consisting
of a hash of the encrypted chunk of data as well as the
encrypted chunk of data itself.'''
data = f.read(self.config['chunksize'])
while data != '':
digest = self._digest(data)
if not self.transport.chunk_exists(digest):
encdata = self._enc(data)
else:
encdata = None
yield (digest, encdata)
data = f.read(self.config['chunksize'])
def load_config_remote(self, passphrase):
config = self.transport.read_config()
config = crypto.decrypt_config(config, passphrase)
config = msgpack.loads(config)
self.__init__(config, self.status)
def save_config_remote(self):
config = msgpack.dumps(self.config)
config = crypto.encrypt_config(self.config, config)
self.transport.write_config(config)
def _local_config_path(self):
if 'local_config' in self.config:
return self.config['local_config']
return os.path.join(self.config['local_paths'][0],
'.forklift_config')
def load_config_local(self, path=None):
if path is None:
path = self._local_config_path()
f = open(path, 'r')
config = json.load(f)
f.close()
self.__init__(config, self.status)
def save_config_local(self, path=None):
if path is None:
path = self._local_config_path()
f = open(path, 'w')
f.write(json.dumps(self.config, indent=2))
f.close()
def set_passphrase(self, passphrase):
crypto.new_passphrase(self.config, passphrase)
def fetch_chunk(self, chunkhash, verifyonly=False):
'''Fetches a chunk of the given hash value. First it looks in
local storage.'''
if chunkhash in self.blockmap:
for pos, path in self.blockmap[chunkhash]:
for pathtotry in [path, path + '.' + str(self.inittime)]:
try:
f = open(self._backup_to_syspath(pathtotry), 'r')
f.seek(pos)
data = f.read(self.config['chunksize'])
if self._digest(data) == chunkhash:
f.close()
return data
except IOError:
pass
if verifyonly:
return None
data = self.transport.read_chunk(chunkhash)
self.status.update()
data = self._dec(data)
if self._digest(data) != chunkhash:
raise BlockCorruptionError('Block %s corrupted!' %
hexlify(chunkhash))
return data
def restore_file(self, file_manifest):
'''Fetches and restores a file from a given manifest dict.
Manifest format:
{'n': 'dir/Filename.txt',
'b': ['123abc...', '234bcd...', 'more base64 encoded digests'],
's': filesize,
'mode': os stat mode,
'mtime': modified time}'''
path = self._backup_to_syspath(file_manifest['n'])
tmppath = path + '.' + str(self.inittime)
try:
f = open(tmppath, 'wb')
bytes_d = self.status.bytes_d
for chunk in file_manifest['b']:
f.write(self.fetch_chunk(chunk))
self.status.chunks_d += 1
self.status.bytes_d = bytes_d + f.tell()
self.status.update()
f.flush()
self.status.files_d += 1
self.status.update()
f.close()
try:
os.unlink(path)
except OSError:
pass
os.rename(tmppath, path)
except BaseException:
self.status.verbose('Cleaning up temporary file!')
if not f.closed:
f.close()
os.unlink(tmppath)
raise
os.chmod(path, file_manifest['mode'])
os.utime(path, (int(file_manifest['mtime']),
int(file_manifest['mtime'])))
self.status.verbose(file_manifest['n'])
def get_chunklist(self,
manifest,
return_sizes=False,
dupesokay=False):
'''Fetches a full list of chunk digests from a manifest.'''
chunklist = []
chunklist_sizes = []
if 'files' not in manifest:
return chunklist
if dupesokay and not return_sizes:
chunklist = [x for f in manifest['files']
for x in f['b']]
return chunklist
for file_manifest in manifest['files']:
for count, chunk in enumerate(file_manifest['b']):
if chunk not in chunklist:
chunklist.append(chunk)
if (count + 1) * self.config['chunksize'] > \
file_manifest['s']:
chunklist_sizes.append(file_manifest['s'] %
self.config['chunksize'])
else:
chunklist_sizes.append(self.config['chunksize'])
if return_sizes:
return zip(chunklist, chunklist_sizes)
return chunklist
def retention(self, t):
'''Deletes all manifests older than t. Reads remaining manifests
and removes unused chunks.'''
mids = self.transport.list_manifest_ids()
delete_mids = [mid for mid in mids if mid < t]
keep_mids = [mid for mid in mids if mid >= t]
for mid in delete_mids:
self.transport.del_manifest(mid)
keep_chunks = set()
for mid in keep_mids:
manifest = self._load_manifest(mid)
keep_chunks.update(self.get_chunklist(manifest,
dupesokay=True))
existing_chunks = set(self.transport.list_chunks())
for chunk in existing_chunks - keep_chunks:
self.transport.del_chunk(chunk)
def build_block_map(self, manifest):
'''Builds a dict (as part of the object) which contains each chunkhash
and where it can potientially be found in the filesystem.'''
for f in manifest['files']:
for pos, chunk in enumerate(f['b']):
if chunk not in self.blockmap:
self.blockmap[chunk] = []
self.blockmap[chunk].append((pos * self.config['chunksize'],
f['n']))
def find_needed_chunks(self, chunklist):
'''Returns a list of chunks not on the local filesystem. Chunklist
should be a list of tuples with the chunkhash and the chunksize.'''
needed_chunks = []
for chunk in chunklist:
if self.fetch_chunk(chunk[0], verifyonly=True) is None:
needed_chunks.append(chunk)
return needed_chunks
def restore_tree(self, mid=None):
'''Restores the entire file tree for a given manifest id.'''
self.status.mode = self.status.RESTORING
self.status.update()
if mid is None:
manifest = self.get_last_manifest()
else:
manifest = self._load_manifest(mid)
self.status.files = len(manifest['files'])
self.status.bytes = reduce(lambda x, y: x+y['s'],
manifest['files'],
0)
self.build_block_map(manifest)
self.transport.prepare_for_restore(
self.find_needed_chunks(self.get_chunklist(manifest,
return_sizes=True)))
for dir_manifest in manifest['dirs']:
dirname = dir_manifest['n']
dirpath = self._backup_to_syspath(dirname)
self.status.dirs += 1
self.status.update()
self.status.verbose(dirname)
try:
os.makedirs(dirpath)
except os.error:
pass
for file_manifest in manifest['files']:
self.restore_file(file_manifest)
for dir_manifest in reversed(manifest['dirs']): # permissions
dirname = dir_manifest['n']
dirpath = self._backup_to_syspath(dirname)
os.chmod(dirpath, dir_manifest['mode'])
os.utime(dirpath, (int(dir_manifest['mtime']),
int(dir_manifest['mtime'])))
self.status.complete_operation()
def snap_file(self, full_path, rel_path):
'''Uploads a file and returns a file manifest. Does not
re-upload chunks when chunks exist at destination.'''
self.status.filename(rel_path)
s = os.stat(full_path)
file_manifest = {'n': rel_path,
'uid': s.st_uid,
'gid': s.st_gid,
'mode': s.st_mode,
'mtime': int(s.st_mtime),
'b': []}
if rel_path in self.oldfiles and \
file_manifest['mtime'] == self.oldfiles[rel_path]['mtime'] and \
'd' not in self.oldfiles[rel_path] and not self.force:
file_manifest['b'] = self.oldfiles[rel_path]['b']
file_manifest['s'] = self.oldfiles[rel_path]['s']
self.status.chunks += len(file_manifest['b'])
self.status.files += 1
self.status.bytes += file_manifest['s']
self.status.update()
self.status.filename(None)
return file_manifest
f = open(full_path, 'rb')
for chunkhash, chunkdata in self._get_chunks(f):
if chunkdata is not None:
try:
self.transport.write_chunk(chunkhash, chunkdata)
except NotRedundant: # chunk written to >= 1 dest
file_manifest['d'] = 1 # mark file dirty
file_manifest['b'].append(chunkhash)
self.status.chunks += 1
self.status.update()
file_manifest['s'] = f.tell()
self.status.bytes += f.tell()
f.close()
self.status.files += 1
self.status.update()
self.status.filename(None)
return file_manifest
def snap_tree(self):
'''Uploads a full backup of tree to destination.'''
self.status.mode = self.status.BACKING_UP
self.get_last_manifest()
manifest = {'version': 1,
'dirs': [],
'files': []}
self.m = manifest
for path in self.config['local_paths']:
path = self._backup_to_syspath(path)
for root, dirs, files in os.walk(path):
s = os.stat(root)
dir_manifest = {'n': self._syspath_to_backup(root),
'uid': s.st_uid,
'gid': s.st_gid,
'mode': s.st_mode,
'mtime': int(s.st_mtime)}
manifest['dirs'].append(dir_manifest)
self.status.verbose(root)
for filename in files:
full_path = os.path.join(root, filename)
backup_path = self._syspath_to_backup(full_path)
manifest['files'].append(self.snap_file(full_path,
backup_path))
self.status.verbose(full_path)
try:
self._save_manifest(manifest)
except transports.NotRedundant:
pass
self.status.complete_operation()
def get_last_manifest(self):
'''Retrieves last manifest from destination for file comparison.'''
if self.oldfiles != {}:
return
mids = self.transport.list_manifest_ids()
if len(mids) > 0:
manifest = self._load_manifest(mids[-1])
self.oldfiles = dict(map(lambda x: (x['n'], x),
manifest['files']))
return manifest
|
sharph/forklift
|
forklift/backup.py
|
Python
|
agpl-3.0
| 14,454
|
# -*- coding: utf-8 -*-
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import mass_mailing_stats
|
acsone/mozaik
|
mozaik_communication/models/__init__.py
|
Python
|
agpl-3.0
| 178
|
"""
Tests for contentstore.views.preview.py
"""
import re
from unittest import mock
import ddt
from django.test.client import Client, RequestFactory
from web_fragments.fragment import Fragment
from xblock.core import XBlock, XBlockAside
from cms.djangoapps.contentstore.utils import reverse_usage_url
from cms.djangoapps.xblock_config.models import StudioConfig
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.test_asides import AsideTestType # lint-amnesty, pylint: disable=wrong-import-order
from ..preview import _preview_module_system, get_preview_fragment
@ddt.ddt
class GetPreviewHtmlTestCase(ModuleStoreTestCase):
"""
Tests for get_preview_fragment.
Note that there are other existing test cases in test_contentstore that indirectly execute
get_preview_fragment via the xblock RESTful API.
"""
@XBlockAside.register_temp_plugin(AsideTestType, 'test_aside')
def test_preview_fragment(self):
"""
Test for calling get_preview_html. Ensures data-usage-id is correctly set and
asides are correctly included.
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
html = ItemFactory.create(
parent_location=course.location,
category="html",
data={'data': "<html>foobar</html>"}
)
config = StudioConfig.current()
config.enabled = True
config.save()
request = RequestFactory().get('/dummy-url')
request.user = UserFactory()
request.session = {}
# Call get_preview_fragment directly.
context = {
'reorderable_items': set(),
'read_only': True
}
html = get_preview_fragment(request, html, context).content
# Verify student view html is returned, and the usage ID is as expected.
html_pattern = re.escape(
str(course.id.make_usage_key('html', 'replaceme'))
).replace('replaceme', r'html_[0-9]*')
self.assertRegex(
html,
f'data-usage-id="{html_pattern}"'
)
self.assertRegex(html, '<html>foobar</html>')
self.assertRegex(html, r"data-block-type=[\"\']test_aside[\"\']")
self.assertRegex(html, "Aside rendered")
# Now ensure the acid_aside is not in the result
self.assertNotRegex(html, r"data-block-type=[\"\']acid_aside[\"\']")
# Ensure about pages don't have asides
about = modulestore().get_item(course.id.make_usage_key('about', 'overview'))
html = get_preview_fragment(request, about, context).content
self.assertNotRegex(html, r"data-block-type=[\"\']test_aside[\"\']")
self.assertNotRegex(html, "Aside rendered")
@XBlockAside.register_temp_plugin(AsideTestType, 'test_aside')
def test_preview_no_asides(self):
"""
Test for calling get_preview_html. Ensures data-usage-id is correctly set and
asides are correctly excluded because they are not enabled.
"""
course = CourseFactory.create(default_store=ModuleStoreEnum.Type.split)
html = ItemFactory.create(
parent_location=course.location,
category="html",
data={'data': "<html>foobar</html>"}
)
config = StudioConfig.current()
config.enabled = False
config.save()
request = RequestFactory().get('/dummy-url')
request.user = UserFactory()
request.session = {}
# Call get_preview_fragment directly.
context = {
'reorderable_items': set(),
'read_only': True
}
html = get_preview_fragment(request, html, context).content
self.assertNotRegex(html, r"data-block-type=[\"\']test_aside[\"\']")
self.assertNotRegex(html, "Aside rendered")
@mock.patch('xmodule.conditional_module.ConditionalBlock.is_condition_satisfied')
def test_preview_conditional_module_children_context(self, mock_is_condition_satisfied):
"""
Tests that when empty context is pass to children of ConditionalBlock it will not raise KeyError.
"""
mock_is_condition_satisfied.return_value = True
client = Client()
client.login(username=self.user.username, password=self.user_password)
with self.store.default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
conditional_block = ItemFactory.create(
parent_location=course.location,
category="conditional"
)
# child conditional_block
ItemFactory.create(
parent_location=conditional_block.location,
category="conditional"
)
url = reverse_usage_url(
'preview_handler',
conditional_block.location,
kwargs={'handler': 'xmodule_handler/conditional_get'}
)
response = client.post(url)
self.assertEqual(response.status_code, 200)
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_block_branch_not_changed_by_preview_handler(self, default_store):
"""
Tests preview_handler should not update blocks being previewed
"""
client = Client()
client.login(username=self.user.username, password=self.user_password)
with self.store.default_store(default_store):
course = CourseFactory.create()
block = ItemFactory.create(
parent_location=course.location,
category="problem"
)
url = reverse_usage_url(
'preview_handler',
block.location,
kwargs={'handler': 'xmodule_handler/problem_check'}
)
response = client.post(url)
self.assertEqual(response.status_code, 200)
self.assertFalse(modulestore().has_changes(modulestore().get_item(block.location)))
@XBlock.needs("field-data")
@XBlock.needs("i18n")
@XBlock.needs("mako")
@XBlock.needs("user")
@XBlock.needs("teams_configuration")
class PureXBlock(XBlock):
"""
Pure XBlock to use in tests.
"""
def student_view(self, context):
"""
Renders the output that a student will see.
"""
fragment = Fragment()
fragment.add_content(self.runtime.service(self, 'mako').render_template('edxmako.html', context))
return fragment
@ddt.ddt
class StudioXBlockServiceBindingTest(ModuleStoreTestCase):
"""
Tests that the Studio Module System (XBlock Runtime) provides an expected set of services.
"""
def setUp(self):
"""
Set up the user and request that will be used.
"""
super().setUp()
self.user = UserFactory()
self.course = CourseFactory.create()
self.request = mock.Mock()
self.field_data = mock.Mock()
@XBlock.register_temp_plugin(PureXBlock, identifier='pure')
@ddt.data("user", "i18n", "field-data", "teams_configuration")
def test_expected_services_exist(self, expected_service):
"""
Tests that the 'user' and 'i18n' services are provided by the Studio runtime.
"""
descriptor = ItemFactory(category="pure", parent=self.course)
runtime = _preview_module_system(
self.request,
descriptor,
self.field_data,
)
service = runtime.service(descriptor, expected_service)
self.assertIsNotNone(service)
class CmsModuleSystemShimTest(ModuleStoreTestCase):
"""
Tests that the deprecated attributes in the Module System (XBlock Runtime) return the expected values.
"""
def setUp(self):
"""
Set up the user and other fields that will be used to instantiate the runtime.
"""
super().setUp()
self.course = CourseFactory.create()
self.user = UserFactory()
self.request = RequestFactory().get('/dummy-url')
self.request.user = self.user
self.request.session = {}
self.descriptor = ItemFactory(category="video", parent=self.course)
self.field_data = mock.Mock()
self.runtime = _preview_module_system(
self.request,
self.descriptor,
self.field_data,
)
def test_get_user_role(self):
assert self.runtime.get_user_role() == 'staff'
@XBlock.register_temp_plugin(PureXBlock, identifier='pure')
def test_render_template(self):
descriptor = ItemFactory(category="pure", parent=self.course)
html = get_preview_fragment(self.request, descriptor, {'element_id': 142}).content
assert '<div id="142" ns="main">Testing the MakoService</div>' in html
def test_xqueue_is_not_available_in_studio(self):
descriptor = ItemFactory(category="problem", parent=self.course)
runtime = _preview_module_system(
self.request,
descriptor=descriptor,
field_data=mock.Mock(),
)
assert runtime.xqueue is None
assert runtime.service(descriptor, 'xqueue') is None
|
edx/edx-platform
|
cms/djangoapps/contentstore/views/tests/test_preview.py
|
Python
|
agpl-3.0
| 9,656
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 - INECO PARTNERSHIP LIMITED (<http://www.ineco.co.th>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Ineco Purchase Sequence',
'version': '0.1',
'category': 'Extended',
'description': """
""",
'author': 'Mr.Tititab Srisookco',
'website': 'http://www.ineco.co.th',
'depends': ['base','purchase','stock'],
'data': [],
'demo': [],
'test':[],
'update_xml': [
'stock_view.xml',
],
'installable': True,
'images': [],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jeffery9/mixprint_addons
|
ineco_purchase_sequence/__openerp__.py
|
Python
|
agpl-3.0
| 1,458
|
# #############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import io
import zipfile
from odoo.tests import common
class TestPrototypeModuleExport(common.TransactionCase):
def setUp(self):
super(TestPrototypeModuleExport, self).setUp()
self.main_model = self.env["module_prototyper.module.export"]
self.prototype_model = self.env["module_prototyper"]
self.module_category_model = self.env["ir.module.category"]
self.prototype = self.prototype_model.create(
{
"name": "t_name",
"category_id": self.module_category_model.browse(1).id,
"human_name": "t_human_name",
"summary": "t_summary",
"description": "t_description",
"author": "t_author",
"maintainer": "t_maintainer",
"website": "t_website",
}
)
self.exporter = self.main_model.create({"name": "t_name"})
def test_action_export_assert_for_wrong_active_model(self):
"""Test if the assertion raises."""
exporter = self.main_model.with_context(active_model="t_active_model").create(
{}
)
self.assertRaises(AssertionError, exporter.action_export)
def test_action_export_update_wizard(self):
"""Test if the wizard is updated during the process."""
exporter = self.main_model.with_context(
active_model=self.prototype_model._name,
active_id=self.prototype.id,
).create({})
exporter.action_export()
self.assertEqual(exporter.state, "get")
self.assertEqual(exporter.name, "{}.zip".format(self.prototype.name))
def test_zip_files_returns_tuple(self):
"""Test the method return of the method that generate the zip file."""
ret = self.main_model.zip_files(self.exporter, [self.prototype])
self.assertIsInstance(ret, tuple)
self.assertIsInstance(ret.zip_file, zipfile.ZipFile)
self.assertIsInstance(ret.BytesIO, io.BytesIO)
|
OCA/server-tools
|
module_prototyper/tests/test_prototype_module_export.py
|
Python
|
agpl-3.0
| 2,979
|
# Copyright 2015, Province of British Columbia
# License: https://github.com/bcgov/ckanext-bcgov/blob/master/license
from ckan.common import c, _
import pylons.config as config
import ckan.lib.base as base
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
from routes.mapper import SubMapper
from ckanext.bcgov.util.util import (get_edc_tags,
edc_type_label,
get_state_values,
get_username,
get_user_orgs,
get_user_role_orgs,
get_user_orgs_id,
get_user_toporgs,
get_organization_branches,
get_all_orgs
)
from ckanext.bcgov.util.helpers import (get_suborg_sector,
get_user_dataset_num,
get_package_data,
is_license_open,
get_record_type_label,
get_suborgs,
record_is_viewable,
get_facets_selected,
get_facets_unselected,
get_sectors_list,
get_dataset_type,
get_organizations,
get_organization_title,
get_espg_id,
get_edc_org,
get_iso_topic_values,
get_eas_login_url,
get_fqdn,
get_environment_name,
get_version,
get_bcgov_commit_id,
resource_prefix,
)
abort = base.abort
class SchemaPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.IRoutes, inherit=True)
plugins.implements(plugins.ITemplateHelpers, inherit=False)
plugins.implements(plugins.IPackageController, inherit=True)
plugins.implements(plugins.IFacets, inherit=True)
plugins.implements(plugins.IActions, inherit=True)
def get_helpers(self):
return {
"dataset_type" : get_dataset_type,
"edc_tags" : get_edc_tags,
"edc_orgs" : get_organizations,
"edc_org_branches" : get_organization_branches,
"edc_org_title" : get_organization_title,
"edc_type_label" : edc_type_label,
"edc_state_values" : get_state_values,
"edc_username": get_username,
"get_sector" : get_suborg_sector,
"get_user_orgs" : get_user_orgs,
"get_user_orgs_id" : get_user_orgs_id,
"get_user_toporgs": get_user_toporgs,
"get_suborg_sector" : get_suborg_sector,
"get_user_dataset_num" : get_user_dataset_num,
"get_edc_package" : get_package_data,
"is_license_open" : is_license_open,
"record_type_label" : get_record_type_label,
"get_suborgs": get_suborgs,
"record_is_viewable": record_is_viewable,
"get_espg_id" : get_espg_id,
"get_user_role_orgs" : get_user_role_orgs,
"get_all_orgs" : get_all_orgs,
"get_facets_selected": get_facets_selected,
"get_facets_unselected" : get_facets_unselected,
"get_sectors_list": get_sectors_list,
"get_edc_org" : get_edc_org,
"get_iso_topic_values" : get_iso_topic_values,
"get_eas_login_url": get_eas_login_url,
"get_fqdn": get_fqdn,
"get_environment_name": get_environment_name,
"get_version": get_version,
"get_bcgov_commit_id": get_bcgov_commit_id,
"googleanalytics_resource_prefix": resource_prefix,
}
def update_config(self, config):
toolkit.add_public_directory(config, 'public')
toolkit.add_template_directory(config, 'templates')
toolkit.add_resource('fanstatic', 'edc_resource')
toolkit.add_resource('public/scripts', 'theme_scripts')
#Customizing action mapping
def before_map(self, map):
from routes.mapper import SubMapper
package_controller = 'ckanext.bcgov.controllers.package:EDCPackageController'
user_controller = 'ckanext.bcgov.controllers.user:EDCUserController'
org_controller = 'ckanext.bcgov.controllers.organization:EDCOrganizationController'
site_map_controller = 'ckanext.bcgov.controllers.site_map:GsaSitemapController'
api_controller = 'ckanext.bcgov.controllers.api:EDCApiController'
# map.redirect('/', '/dataset')
map.connect('package_index', '/', controller=package_controller, action='index')
map.connect('/dataset/add', controller=package_controller, action='typeSelect')
with SubMapper(map, controller=package_controller) as m:
m.connect('add dataset', '/dataset/new', action='new')
#m.connect('dataset_edit', '/dataset/edit/{id}', action='edc_edit',ckan_icon='edit')
m.connect('search', '/dataset', action='search', highlight_actions='index search')
m.connect('dataset_read', '/dataset/{id}', action='read', ckan_icon='sitemap')
m.connect('duplicate', '/dataset/duplicate/{id}', action='duplicate')
m.connect('/dataset/{id}/resource/{resource_id}', action='resource_read')
m.connect('/dataset/{id}/resource_delete/{resource_id}', action='resource_delete')
m.connect('/authorization-error', action='auth_error')
m.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', action='resource_edit', ckan_icon='edit')
m.connect('new_resource', '/dataset/new_resource/{id}', action='new_resource')
with SubMapper(map, controller=user_controller) as m:
m.connect('user_dashboard_unpublished', '/dashboard/unpublished',
action='dashboard_unpublished', ckan_icon='group')
m.connect('/user/edit', action='edit')
m.connect('/user/activity/{id}/{offset}', action='activity')
m.connect('user_activity_stream', '/user/activity/{id}',
action='activity', ckan_icon='time')
m.connect('user_dashboard', '/dashboard', action='dashboard',
ckan_icon='list')
m.connect('user_dashboard_datasets', '/dashboard/datasets',
action='dashboard_datasets', ckan_icon='sitemap')
m.connect('user_dashboard_organizations', '/dashboard/organizations',
action='dashboard_organizations', ckan_icon='building')
m.connect('/dashboard/{offset}', action='dashboard')
m.connect('user_follow', '/user/follow/{id}', action='follow')
m.connect('/user/unfollow/{id}', action='unfollow')
m.connect('user_followers', '/user/followers/{id:.*}',
action='followers', ckan_icon='group')
m.connect('user_edit', '/user/edit/{id:.*}', action='edit',
ckan_icon='cog')
m.connect('user_delete', '/user/delete/{id}', action='delete')
m.connect('/user/reset/{id:.*}', action='perform_reset')
m.connect('register', '/user/register', action='register')
m.connect('login', '/user/login', action='login')
m.connect('/user/_logout', action='logout')
m.connect('/user/logged_in', action='logged_in')
m.connect('/user/logged_out', action='logged_out')
m.connect('/user/logged_out_redirect', action='logged_out_page')
m.connect('/user/reset', action='request_reset')
m.connect('/user/me', action='me')
m.connect('/user/set_lang/{lang}', action='set_lang')
m.connect('user_datasets', '/user/{id:.*}', action='read',
ckan_icon='sitemap')
m.connect('user_index', '/user', action='index')
with SubMapper(map, controller=org_controller) as m:
m.connect('organizations_index', '/organization', action='index')
m.connect('/organization/list', action='list')
m.connect('/organization/new', action='new')
m.connect('/organization/{action}/{id}',
requirements=dict(action='|'.join([
'delete',
'admins',
'member_new',
'member_delete',
'history'
])))
m.connect('organization_activity', '/organization/activity/{id}',
action='activity', ckan_icon='time')
m.connect('organization_about', '/organization/about/{id}',
action='about', ckan_icon='info-sign')
m.connect('organization_read', '/organization/{id}', action='read',
ckan_icon='sitemap')
m.connect('organization_edit', '/organization/edit/{id}',
action='edit', ckan_icon='edit')
m.connect('organization_members', '/organization/members/{id}',
action='members', ckan_icon='group')
m.connect('organization_bulk_process',
'/organization/bulk_process/{id}',
action='bulk_process', ckan_icon='sitemap')
map.connect('sitemap','/sitemap.html', controller=site_map_controller, action='view')
map.connect('sitemap','/sitemap.xml', controller=site_map_controller, action='read')
with SubMapper(map, controller=api_controller, path_prefix='/api{ver:/1|/2|/3|}',
ver='/1') as m:
m.connect('/i18n/{lang}', action='i18n_js_translations')
m.connect('/')
GET_POST = dict(method=['GET', 'POST'])
m.connect('/action/organization_list_related', action='organization_list_related', conditions=GET_POST)
m.connect('/action/{logic_function}', action='action', conditions=GET_POST)
map.connect('/admin/trash', controller='admin', action='trash')
map.connect('ckanadmin_trash', '/admin/trash', controller='admin',
action='trash', ckan_icon='trash')
return map
def after_map(self, map):
return map;
def before_index(self, pkg_dict):
'''
Makes the sort by name case insensitive.
Note that the search index must be rebuild for the first time in order for the changes to take affect.
'''
title = pkg_dict['title']
if title:
#Assign title to title_string with all characters switched to lower case.
pkg_dict['title_string'] = title.lower()
res_format = pkg_dict.get('res_format', [])
if 'other' in res_format:
# custom download (other) supports a number of formats
res_format.remove('other')
res_format.extend(['shp', 'fgdb', 'e00'])
return pkg_dict
def before_search(self, search_params):
'''
Customizes package search and applies filters based on the dataset metadata-visibility
and user roles.
'''
#Change the default sort order when no query passed
if not search_params.get('q') and search_params.get('sort') in (None, 'rank'):
search_params['sort'] = 'record_publish_date desc, metadata_modified desc'
#Change the query filter depending on the user
if 'fq' in search_params:
fq = search_params['fq']
else:
fq = ''
#need to append solr param q.op to force an AND query
if 'q' in search_params:
q = search_params['q']
if q !='':
q = '{!lucene q.op=AND}' + q
search_params['q'] = q
else:
q = ''
try :
user_name = c.user or 'visitor'
# There are no restrictions for sysadmin
if c.userobj and c.userobj.sysadmin == True:
fq += ' '
else:
if user_name != 'visitor':
fq += ' +(edc_state:("PUBLISHED" OR "PENDING ARCHIVE")'
#IDIR users can also see private records of their organizations
user_id = c.userobj.id
#Get the list of orgs that the user is an admin or editor of
user_orgs = ['"' + org.id + '"' for org in get_user_orgs(user_id, 'admin')]
user_orgs += ['"' + org.id + '"' for org in get_user_orgs(user_id, 'editor')]
if user_orgs != []:
fq += ' OR ' + 'owner_org:(' + ' OR '.join(user_orgs) + ')'
fq += ')'
#Public user can only view public and published records
else:
fq += ' +(edc_state:("PUBLISHED" OR "PENDING ARCHIVE") AND metadata_visibility:("Public"))'
except Exception:
if 'fq' in search_params:
fq = search_params['fq']
else:
fq = ''
fq += ' +edc_state:("PUBLISHED" OR "PENDING ARCHIVE") +metadata_visibility:("Public")'
search_params['fq'] = fq
return search_params
def before_view(self, pkg_dict):
# CITZEDC808
if not record_is_viewable(pkg_dict, c.userobj):
abort(401, _('Unauthorized to read package %s') % pkg_dict.get("title"))
return pkg_dict
def dataset_facets(self, facet_dict, package_type):
'''
Customizes search facet list.
'''
from collections import OrderedDict
facet_dict = OrderedDict()
#Add dataset types and organization sectors to the facet list
facet_dict['license_id'] = _('License')
facet_dict['sector'] = _('Sectors')
facet_dict['type'] = _('Dataset types')
facet_dict['res_format'] = _('Format')
facet_dict['organization'] = _('Organizations')
facet_dict['download_audience'] = _('Download permission')
if c.userobj and c.userobj.sysadmin:
facet_dict['edc_state'] = _('States')
return facet_dict
def group_facets(self, facet_dict, group_type, package_type):
'''
Use the same facets for filtering datasets within group pages
'''
return self.dataset_facets(facet_dict, package_type)
def get_actions(self):
import ckanext.bcgov.logic.action as edc_action
return {'edc_package_update' : edc_action.edc_package_update,
'edc_package_update_bcgw' : edc_action.edc_package_update_bcgw,
'package_update' : edc_action.package_update,
'package_autocomplete' : edc_action.package_autocomplete }
class EDCDisqusPlugin(plugins.SingletonPlugin):
# Declare that this class implements IConfigurer.
plugins.implements(plugins.IConfigurer)
# Declare that this plugin will implement ITemplateHelpers.
plugins.implements(plugins.ITemplateHelpers)
plugins.implements(plugins.IRoutes, inherit=True)
def update_config(self, config):
# Add this plugin's templates dir to CKAN's extra_template_paths, so
# that CKAN will use this plugin's custom templates.
# 'templates' is the path to the templates dir, relative to this
# plugin.py file.
toolkit.add_template_directory(config, 'templates')
# Add this plugin's public dir to CKAN's extra_public_paths, so
# that CKAN will use this plugin's custom static files.
toolkit.add_public_directory(config, 'public')
toolkit.add_resource('fanstatic', 'edc_resource')
def before_map(self, map):
disqus_controller = 'ckanext.bcgov.controllers.disqus:DisqusController'
with SubMapper(map, controller=disqus_controller) as m:
m.connect('/disqus/posts/create', action='disqusPostCreate')
with SubMapper(map, controller=disqus_controller) as m:
m.connect('/disqus/threads/get', action='disqusGetThread')
with SubMapper(map, controller=disqus_controller) as m:
m.connect('/disqus/posts/list', action='disqusGetPostsByThread')
return map
def comments_block(self):
''' Adds Disqus Comments to the page.'''
# we need to create an identifier
c = plugins.toolkit.c
identifier = ''
try:
if c.current_package_id:
identifier = c.current_package_id
elif c.id:
identifier = c.id
else:
# cannot make an identifier
identifier = ''
except:
identifier = ''
data = {'identifier' : identifier, 'site_url': config.get('ckan.site_url') }
return plugins.toolkit.render_snippet('package/comments_block.html', data)
def disqus_get_forum_name(self):
return config.get('edcdisqus.forum_name') or 'bccatalogue'
def get_helpers(self):
return { 'comments_block' : self.comments_block, 'disqus_get_forum_name': self.disqus_get_forum_name }
|
gjlawran/ckanext-bcgov
|
ckanext/bcgov/plugin.py
|
Python
|
agpl-3.0
| 17,900
|
from cornflake.exceptions import ValidationError
from flask import Response
from radar.api.serializers.auth import ForgotUsernameSerializer
from radar.api.views.generics import ApiView, request_json
from radar.auth.exceptions import UserNotFound
from radar.auth.forgot_username import forgot_username
class ForgotUsernameView(ApiView):
@request_json(ForgotUsernameSerializer)
def post(self, data):
email = data['email']
try:
forgot_username(email)
except UserNotFound:
raise ValidationError({'email': 'No users found with that email address.'})
return Response(status=200)
def register_views(app):
app.add_public_endpoint('forgot_username')
app.add_url_rule('/forgot-username', view_func=ForgotUsernameView.as_view('forgot_username'))
|
renalreg/radar
|
radar/api/views/forgot_username.py
|
Python
|
agpl-3.0
| 814
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Name: ProjectCli.py
# Purpose:
# Author: Fabien Marteau <fabien.marteau@armadeus.com>
# Created: 23/05/2008
#-----------------------------------------------------------------------------
# Copyright (2008) Armadeus Systems
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#-----------------------------------------------------------------------------
# Revision list :
#
# Date By Changes
#
#-----------------------------------------------------------------------------
__doc__ = ""
__version__ = "1.0.0"
__author__ = "Fabien Marteau <fabien.marteau@armadeus.com>"
import cmd,os
from periphondemand.bin.define import *
from periphondemand.bin.utils import wrapperxml, settings, error, basecli
from periphondemand.bin.utils import wrappersystem as sy
from periphondemand.bin.utils.display import Display
from periphondemand.bin.commandline import *
from periphondemand.bin.commandline.synthesiscli import SynthesisCli
from periphondemand.bin.commandline.simulationcli import SimulationCli
from periphondemand.bin.commandline.drivercli import DriverCli
from periphondemand.bin.utils.settings import Settings
from periphondemand.bin.utils.basecli import BaseCli
from periphondemand.bin.utils.error import Error
from periphondemand.bin.core.project import Project
from periphondemand.bin.core.component import Component
from periphondemand.bin.core.platform import Platform
from periphondemand.bin.core.library import Library
from periphondemand.bin.code.intercon import Intercon
from periphondemand.bin.code.vhdl.topvhdl import TopVHDL
from periphondemand.bin.toolchain.synthesis import Synthesis
from periphondemand.bin.toolchain.simulation import Simulation
from periphondemand.bin.toolchain.driver import Driver
settings = Settings()
display = Display()
class ProjectCli(BaseCli):
""" Project command line interface
"""
def __init__(self,parent=None):
BaseCli.__init__(self,parent)
if settings.active_project is None:
settings.active_project = Project("",void=1)
if settings.active_library is None:
settings.active_library = Library()
def do_synthesis(self,arg):
"""\
Usage : synthesis
synthesis commands
"""
try:
self.isProjectOpen()
self.isPlatformSelected()
except Error,e:
print e
return
cli = SynthesisCli(self)
cli.setPrompt("synthesis")
arg = str(arg)
if len(arg) > 0:
line = cli.precmd(arg)
cli.onecmd(line)
cli.postcmd(True, line)
else:
cli.cmdloop()
self.stdout.write("\n")
def do_simulation(self,line):
"""\
Usage : simulation
Simulation generation environment
"""
try:
self.isProjectOpen()
self.isPlatformSelected()
except Error,e:
print e
return
# test if only one toolchain for simulation in library
cli = SimulationCli(self)
cli.setPrompt("simulation")
line = str(line)
if len(line) > 0:
line = cli.precmd(line)
cli.onecmd(line)
cli.postcmd(True, line)
else:
cli.cmdloop()
self.stdout.write("\n")
def do_driver(self,line):
"""\
Usage : driver
Driver generation environment
"""
try:
self.isProjectOpen()
self.isPlatformSelected()
except Error,e:
print e
return
# test if only one toolchain for simulation in library
cli = DriverCli(self)
cli.setPrompt("driver")
line = str(line)
if len(line) > 0:
line = cli.precmd(line)
cli.onecmd(line)
cli.postcmd(True, line)
else:
cli.cmdloop()
self.stdout.write("\n")
def do_create(self,line):
"""\
Usage : create <projectname>
create new project
"""
try:
self.checkargs(line,"<projectname>")
except Error,e:
print e
return
try:
sy.check_name(line)
except Error,e:
print e
return 0
dirname = os.path.abspath(line)
if sy.dirExist(dirname):
print "Project "+line+" already exists"
return 0
else:
try:
settings.active_project = Project(dirname,void=0)
except Error,e:
print e
return
self.setPrompt("project",settings.active_project.getName())
print "Project "+settings.active_project.getName()+" created"
def complete_load(self,text,line,begidx,endidx):
""" complete load command with files under directory """
path = line.split(" ")[1]
if path.find("/") == -1: # sub
path = ""
elif text.split() == "": # sub/sub/
path = "/".join(path)+"/"
else: # sub/sub
path = "/".join(path.split("/")[0:-1]) + "/"
listdir = sy.listDirectory(path)
listfile = sy.listFileType(path,XMLEXT[1:])
listfile.extend(listdir)
return self.completelist(line,text,listfile)
def do_load(self,line):
"""\
Usage : projectload <projectfilename>.xml
Load a project
"""
try:
self.checkargs(line,"<projectfilename>.xml")
except Error,e:
print e
return
if sy.dirExist(line):
head,projectname = os.path.split(line)
line = os.path.join(head,projectname,projectname+".xml")
if not sy.fileExist(line):
print Error("File doesn't exists")
return
try:
settings.active_project = Project(line)
except Error,e:
print e
return
except IOError,e:
print e
return
self.setPrompt("project:"+settings.active_project.getName())
print display
def complete_addinstance(self,text,line,begidx,endidx):
componentlist = []
try:
componentlist = self.completeargs(text,line,"<libraryname>.<componentname>.[componentversion] [newinstancename]")
except Exception,e:
print e
return componentlist
def do_addinstance(self,line):
"""\
Usage : addinstance <libraryname>.<componentname>.[componentversion] [newinstancename]
Add component in project
"""
try:
self.isProjectOpen()
self.isPlatformSelected()
self.checkargs(line,"<libraryname>.<componentname>.[componentversion] [newinstancename]")
except Error,e:
print display
print e
return
arg = line.split(' ')
subarg = arg[0].split(".")
try:
instancename= arg[1]
except IndexError:
instancename=None
try:
componentversion=subarg[2]
except IndexError:
componentversion=None
try:
if instancename != None:
sy.check_name(instancename)
if instancename== None and componentversion==None:
settings.active_project.addinstance(componentname=subarg[1],
libraryname=subarg[0])
elif instancename != None and componentversion==None:
settings.active_project.addinstance(componentname=subarg[1],
libraryname=subarg[0],
instancename=instancename)
elif instancename == None and componentversion!=None:
settings.active_project.addinstance(componentname=subarg[1],
libraryname=subarg[0],
componentversion=componentversion)
else:
settings.active_project.addinstance(componentname=subarg[1],
libraryname=subarg[0],
componentversion=componentversion,
instancename=instancename)
except Error,e:
print display
print e
return
print display
def complete_listcomponents(self,text,line,begidx,endidx):
componentlist = []
try:
componentlist = self.completeargs(text,line,"[libraryname]")
except Exception:
pass
return componentlist
def do_listcomponents(self,line):
"""\
Usage : listcomponents [libraryname]
List components available in the library
"""
if line.strip() == "":
return self.columnize(settings.active_library.listLibraries())
else:
return self.columnize(
settings.active_library.listComponents(line))
def listinstances(self):
try:
self.isProjectOpen()
return [comp.getInstanceName()\
for comp in settings.active_project.getInstancesList()]
except Error,e:
print e
return
def do_listinstances(self,line):
"""\
Usage : listinstances
List all project instances
"""
try:
self.isProjectOpen()
except Error,e:
print e
return
return self.columnize(self.listinstances())
def complete_selectplatform(self,text,line,begidx,endidx):
platformlist = []
try:
platformlist = self.completeargs(text,line,"<platformname>")
except Exception,e:
print e
return platformlist
def do_selectplatform(self,line):
"""\
Usage : selectplatform <platformname>
Select the platform to use
"""
try:
self.isProjectOpen()
self.checkargs(line,"<platformname>")
except Error,e:
print e
return
try:
settings.active_project.selectPlatform(line)
settings.active_project.saveProject()
except Error,e:
print display
print e
return
print display
def do_listplatforms(self,line):
"""\
Usage : listplatforms
List platform available
"""
try:
self.isProjectOpen()
except Error,e:
print e
return
try:
return self.columnize(settings.active_project.listAvailablePlatforms())
except AttributeError,e:
print e
def complete_listinterfaces(self,text,line,begidx,endidx):
pinlist = []
try:
pinlist = self.completeargs(text,line,"<instancename>")
except Exception,e:
print e
return pinlist
def do_listinterfaces(self,line=None):
"""\
Usage : listinterfaces
List instance interface
"""
try:
self.checkargs(line,"<instancename>")
self.isProjectOpen()
interfacelist= [interface.getName() for interface in settings.active_project.getInstance(line).getInterfacesList()]
except Error,e:
print display
print e
return
print display
return self.columnize(interfacelist)
def do_saveproject(self,line):
"""\
Usage : saveproject
Save project in the curent directory
"""
try:
self.isProjectOpen()
except Error,e:
print display
print e
return
print display
settings.active_project.saveProject()
def complete_connectpin(self,text,line,begidx,endidx):
pinlist = []
try:
pinlist = self.completeargs(text,line,"<instancename>.<interfacename>.<portname>.<pinnum> <instancename>.<interfacename>.<portname>.<pinnum>")
except Exception,e:
print e
return pinlist
def do_connectpin(self,line):
"""\
Usage : connectpin <instancename>.<interfacename>.<portname>.[pinnum] <instancename>.<interfacename>.<portname>.[pinnum]
Connect pin between instances
"""
try:
self.isProjectOpen()
self.checkargs(line,"<instancename>.<interfacename>.<portname>.[pinnum] <instancename>.<interfacename>.<portname>.[pinnum]")
except Error,e:
print display
print e
return
arg = line.split(' ')
source = arg[0].split('.')
dest = arg[-1].split('.')
if len(source) == 3:
source.append(0)
if len(dest) == 3:
dest.append(0)
try:
settings.active_project.connectPin_cmd(\
settings.active_project.getInstance(
source[0]).getInterface(
source[1]).getPort(
source[2]).getPin(source[3]),\
settings.active_project.getInstance(
dest[0] ).getInterface(
dest [1]).getPort(dest[2]).getPin(dest[3]))
except Error, e:
print display
print e
return
print display
def complete_connectport(self,text,line,begidx,endidx):
portlist = []
try:
portlist = self.completeargs(text,line,"<instancename>.<interfacename>.<portname> <instancename>.<interfacename>.<portname>")
except Exception,e:
print e
return portlist
def do_connectport(self,line):
"""
Usage : connectport <instancename>.<interfacename>.<portname> <instancename>.<interfacename>.<portname>
Connect all pins of two same size ports.
"""
try:
self.isProjectOpen()
self.checkargs(line,"<instancename>.<interfacename>.<portname> <instancename>.<interfacename>.<portname>")
except Exception,e:
print display
print e
return
arg=line.split(' ')
source = arg[0].split('.')
dest = arg[-1].split('.')
if len(source) != 3:
print "source arguments error"
return
if len(dest) != 3:
print "Argument error"
return
try:
settings.active_project.connectPort(source[0],source[1],source[2],
dest[0],dest[1],dest[2])
except Error, e:
print display
print e
return
print display
def complete_connectbus(self,text,line,begidx,endidx):
buslist = []
try:
buslist = self.completeargs(text,line,"<masterinstancename>.<masterinterfacename> <slaveinstancename>.<slaveinterfacename>")
except Exception,e:
print e
return buslist
def do_connectbus(self,line):
"""\
Usage : connectbus <masterinstancename>.<masterinterfacename> <slaveinstancename>.<slaveinterfacename>
Connect slave to master bus
"""
try:
self.isProjectOpen()
self.checkargs(line,"<masterinstancename>.<masterinterfacename> <slaveinstancename>.<slaveinterfacename>")
except Exception,e:
print display
print e
return
arg=line.split(' ')
source = arg[0].split('.')
dest = arg[-1].split('.')
if len(source) != 2 or len(dest) != 2:
print "Argument error"
return
try:
settings.active_project.connectBus(source[0],source[1],dest[0],dest[1])
except Error, e:
print display
print e
return
print display
def do_autoconnectbus(self,line):
"""\
Usage : autoconnectbus
Autoconnect bus if only one master in project
"""
try:
self.isProjectOpen()
settings.active_project.autoConnectBus()
except Error,e:
print display
print e
return
print display
def complete_addbusclock(self,text,line,begidx,endidx):
clocklist = []
try:
clocklist = self.completeargs(text,line,"<instancesysconname>.<interfacename> <masterinstancename>.<masterinterfacename>")
except Exception,e:
print e
return clocklist
def do_addbusclock(self,line):
"""\
Usage : busaddclock <instancesysconname>.<interfacename> <masterinstancename>.<masterinterfacename>
Specify the bus clock
"""
try:
self.isProjectOpen()
self.checkargs(line,"<instancesysconname>.<interfacename> <masterinstancename>.<masterinterfacename>")
except Error,e:
print display
print e
return
arg=line.split(' ')
source = arg[0].split('.')
dest = arg[-1].split('.')
if len(source) != 2 or len(dest) != 2:
print "Argument error"
return
try:
settings.active_project.connectClkDomain(source[0],dest[0],source[1],dest[1])
except Error, e:
print display
print e
return
print display
def complete_delpinconnection(self,text,line,begidx,endidx):
connectlist = []
try:
connectlist = self.completeargs(text,line,"<instancename>.<interfacename>.<portname>.<pinnum> <instancename>.<interfacename>.<portname>.<pinnum>")
except Exception,e:
print e
return connectlist
def do_delpinconnection(self,line):
"""\
Usage : delpinconnection <instancename>.<interfacename>.<portname>.[pinnum] [instancename].[interfacename].[portname].[pinnum]
Suppress a pin connection
"""
try:
self.isProjectOpen()
self.checkargs(line,"<instancename>.<interfacename>.<portname>.[pinnum] [instancename].[interfacename].[portname].[pinnum]")
except Error,e:
print display
print e
return
# get arguments
arg = line.split(' ')
# make source and destination tabular
source = arg[0].split('.')
dest = arg[-1].split('.')
# check if dest "instance.interface.port.pin" present,
# if not set it to [None] tabular
try:
dest = arg[1].split('.')
except IndexError:
dest = [None,None,None,None]
# check if pin num present, if not set it None
if len(source) == 3: # instead of 4
source.append(None)
if len(dest) == 3 :
dest.append(None)
try:
settings.active_project.deletePinConnection_cmd(source[0],source[1],source[2],source[3],
dest[0],dest[1],dest[2],dest[3])
except Error, e:
print display
print e
return
print display
print "Connection deleted"
# TODO
def complete_delbusconnection(self,text,line,begidx,endidx):
connectlist = []
try:
connectlist = self.completeargs(text,line,"<instancename>.<interfacename>.<portname>")
except Exception,e:
print e
return connectlist
# TODO
def do_delbusconnection(self,line):
"""\
Usage : delbusconnection <instancename>.<interfacebusname>
Suppress a pin connection
"""
print "TODO"
def complete_delinstance(self,text,line,begidx,endidx):
componentlist = []
try:
componentlist = self.completeargs(text,line,"<instancename>")
except Exception,e:
print e
return componentlist
def do_delinstance(self,line):
"""\
Usage : delinstance <instancename>
Suppress a component from project
"""
try:
self.isProjectOpen()
self.checkargs(line,"<instancename>")
except Error,e:
print display
print e
return
try:
settings.active_project.delProjectInstance(line)
except Error,e:
print display
print e
return
print display
def do_check(self,line):
"""\
Usage : check
Check the project before code generation
"""
try:
self.isProjectOpen()
settings.active_project.check()
except Error,e:
print display
print e
print display
def complete_setaddr(self,text,line,begidx,endidx):
addrlist = []
try:
addrlist = self.completeargs(text,line,"<slaveinstancename>.<slaveinterfacename> <addressinhexa>")
except Exception,e:
print e
return addrlist
def do_setaddr(self,line):
"""\
Usage : setaddr <slaveinstancename>.<slaveinterfacename> <addressinhexa>
Set the base address of slave interface
"""
try:
self.isProjectOpen()
self.checkargs(line,"<slaveinstancename>.<slaveinterfacename> <addressinhexa>")
except Error,e:
print display
print e
return
arg = line.split(' ')
names = arg[0].split('.')
if len(names) < 2:
masterinterface = settings.active_project.getInstance(names[0]).getSlaveInterfaceList()
if len(masterinterface) != 1:
print display
print "Error, need a slave interface name"
return
names.append(masterinterface[0].getName())
try:
interfaceslave = settings.active_project.getInstance(names[0]).getInterface(names[1])
interfacemaster = interfaceslave.getMaster()
interfacemaster.allocMem.setAddressSlave(interfaceslave,arg[1])
except Error,e:
print display
print e
return
print display
print "Base address "+arg[1]+" set"
def do_listmasters(self,line):
"""\
Usage : listmaster
List master interface
"""
try:
self.isProjectOpen()
except Error,e:
print display
print e
return
for master in settings.active_project.getInterfaceMaster():
print master.parent.getInstanceName()+"."+master.getName()
print display
def complete_getmapping(self,text,line,begidx,endidx):
mappinglist = []
try:
mappinglist = self.completeargs(text,line,"<masterinstancename>.<masterinterfacename>")
except Exception,e:
print e
return mappinglist
def do_getmapping(self,line=None):
"""\
Usage : getmapping <masterinstancename>.<masterinterfacename>
Return mapping for a master interface
"""
try:
self.isProjectOpen()
self.checkargs(line,"<masterinstancename>.<masterinterfacename>")
except Error,e:
print display
print e
return
arg = line.split(' ')
names = arg[0].split('.')
try:
masterinterface = settings.active_project.getInstance(names[0]).getInterface(names[1])
print masterinterface.allocMem
except Error,e:
print display
print e
print display
def complete_printxml(self,text,line,begidx,endidx):
printlist = []
try:
printlist = self.completeargs(text,line,"<instancename>")
except Exception,e:
print e
return printlist
def do_printxml(self,line=None):
"""\
Usage : printxml <instancename>
Print instance in XML format
"""
try:
self.isProjectOpen()
self.checkargs(line,"<instancename>")
except Error,e:
print display
print e
return
print settings.active_project.getInstance(line)
print display
def complete_info(self,text,line,begidx,endidx):
infolist = []
try:
infolist = self.completeargs(text,line,"<instancename>")
except Exception,e:
print e
return infolist
def do_info(self,line=None):
"""\
Usage : info <instancename>
Print instance information
"""
try:
self.isProjectOpen()
self.checkargs(line,"<instancename>")
instance = settings.active_project.getInstance(line)
except Error,e:
print display
print e
return
print "Instance name :"+instance.getInstanceName()
print "Component name :"+instance.getName()
print "description : "+instance.getDescription().strip()
print "->Generics"
for generic in instance.getGenericsList():
print "%15s : "%generic.getName() + generic.getValue()
print "->Interfaces"
for interface in instance.getInterfacesList():
if interface.getBusName() != None:
if interface.getClass() == "slave":
print "%-15s "%interface.getName()+" Base address:"+hex(interface.getBaseInt())
elif interface.getClass() == "master":
print "%-15s :"%interface.getName()
for slave in interface.getSlavesList():
print " "*10 + "slave -> "+slave.getInstanceName()+"."+slave.getInterfaceName()
else:
print "%-15s :"%interface.getName()
for port in interface.getPortsList():
print " "*5+"%-15s"%port.getName()+" s"+port.getSize()
for pin in port.getPinsList():
print " "*8+"pin",
if pin.getNum()!= None:
print pin.getNum()+":",
elif pin.isAll():
print "all",
first = True
for connection in pin.getConnections():
if first is not True:
print " "*8+"|"+" "*5,
first = False
print "-> "+connection["instance_dest"]+"."+connection["interface_dest"]+"."+connection["port_dest"]+"."+connection["pin_dest"]
def complete_setgeneric(self,text,line,begidx,endidx):
genericlist = []
try:
genericlist = self.completeargs(text,line,"<instancename>.<genericname> <genericvalue>")
except Exception,e:
print e
return genericlist
def do_setgeneric(self,line=None):
"""\
Usage : setgeneric <instancename>.<genericname> <genericvalue>
Set generic parameter
"""
try:
self.isProjectOpen()
self.checkargs(line,"<instancename>.<genericname> <genericvalue>")
except Error,e:
print display
print e
return
args = line.split(" ")
names = args[0].split(".")
try:
instance = settings.active_project.getInstance(names[0])
generic = instance.getGeneric(names[1])
if generic.isPublic()=="true":
generic.setValue(args[1])
else:
raise Error("this generic can't be modified by user",0)
except Error,e:
print display
print e
return
print display
print "Done"
def do_description(self,line):
"""\
Usage : description <some word for description>
set the project description
"""
settings.active_project.setDescription(line)
print display
print "Description set : "+line
return
def do_closeproject(self,line):
"""\
Usage : closeproject
Close the project
"""
try:
self.isProjectOpen()
except Error,e:
print display
print e
return
settings.active_project = None
print display
print "Project closed"
# Generate CODE
def complete_intercon(self,text,line,begidx,endidx):
interconlist = []
try:
interconlist = self.completeargs(text,line,"<masterinstancename>.<masterinterfacename>")
except Exception,e:
print e
return interconlist
############# FIXME: Move it in synthesiscli ? ######################-
#TODO: change name to generateintercon
def do_intercon(self,line=None):
"""\
Usage : intercon <masterinstancename>.<masterinterfacename>
Generate intercon for master given in argument
"""
try:
self.isProjectOpen()
self.checkargs(line,"<instancename>.<masterinterfacename>")
except Error,e:
print e
return
arg = line.split(' ')
names = arg[0].split('.')
if len(names) != 2:
print "Arguments error"
return
try:
settings.active_project.generateIntercon(names[0],names[1])
except Error,e:
print e
return
print display
#TODO : change name to generatetop
def do_top(self,line):
"""\
Usage : top
Generate top component
"""
try:
self.isProjectOpen()
settings.active_project.check()
top = TopVHDL(settings.active_project)
top.generate()
except Error,e:
print e
return
print display
print "Top generated with name : top_"+settings.active_project.getName()+".vhd"
#####################################################################
def do_report(self,line):
"""\
Usage : report
Generate a report of the project
"""
try:
self.isProjectOpen()
text = settings.active_project.generateReport()
except Error,e:
print display
print e
return
print display
print "report : "
print text
def isProjectOpen(self):
""" check if project is open, raise error if not
"""
if settings.active_project.isVoid() :
raise Error("No project open",0)
def do_listforce(self,line):
"""\
Usage : listforce
List all force configured for this project
"""
try:
for port in settings.active_project.getForcesList():
print "port "+str(port.getName())+" is forced to "+str(port.getForce())
except Error, e:
print display
print e
return
# TODO
def complete_setforce(self,text,line,begidx,endidx):
pinlist = []
try:
pinlist = self.completeargs(text,line,"<forcename> <forcestate>")
except Exception,e:
print e
return pinlist
def do_setforce(self, line):
"""\
Usage : setpin <pinname> <state>
Set fpga pin state in 'gnd', 'vcc'. To unset use 'undef' value
"""
try:
self.isProjectOpen()
self.checkargs(line,"<forcename> <forcestate>")
except Error,e:
print display
print e
return
arg = line.split(' ')
portname = arg[-2]
state = arg[-1]
try:
settings.active_project.setForce(portname, state)
except Error, e:
print display
print e
return
|
xcthulhu/periphondemand
|
src/bin/commandline/projectcli.py
|
Python
|
lgpl-2.1
| 32,354
|
from abc import ABCMeta
from recommenders.similarity.weights_similarity_matrix_builder import \
WeightsSimilarityMatrixBuilder
from tripadvisor.fourcity import extractor
from recommenders.base_recommender import BaseRecommender
from utils import dictionary_utils
__author__ = 'fpena'
class MultiCriteriaBaseRecommender(BaseRecommender):
__metaclass__ = ABCMeta
def __init__(
self, name, similarity_metric=None,
significant_criteria_ranges=None):
super(MultiCriteriaBaseRecommender, self).__init__(name, None)
self._significant_criteria_ranges = significant_criteria_ranges
self._similarity_matrix_builder = WeightsSimilarityMatrixBuilder(similarity_metric)
self.user_cluster_dictionary = None
def load(self, reviews):
self.reviews = reviews
self.user_ids = extractor.get_groupby_list(self.reviews, 'user_id')
self.user_dictionary =\
extractor.initialize_cluster_users(self.reviews, self._significant_criteria_ranges)
self.user_cluster_dictionary = self.build_user_clusters(
self.reviews, self._significant_criteria_ranges)
if self._similarity_matrix_builder._similarity_metric is not None:
self.user_similarity_matrix =\
self._similarity_matrix_builder.build_similarity_matrix(
self.user_dictionary, self.user_ids)
def clear(self):
super(MultiCriteriaBaseRecommender, self).clear()
self.user_cluster_dictionary = None
# TODO: Add the item_id as a parameter in order to optimize the method
def get_neighbourhood(self, user_id):
cluster_name = self.user_dictionary[user_id].cluster
cluster_users = list(self.user_cluster_dictionary[cluster_name])
cluster_users.remove(user_id)
# We remove the given user from the cluster in order to avoid bias
if self._num_neighbors is None:
return cluster_users
similarity_matrix = self.user_similarity_matrix[user_id].copy()
similarity_matrix.pop(user_id, None)
ordered_similar_users = dictionary_utils.sort_dictionary_keys(
similarity_matrix)
intersection_set = set.intersection(set(ordered_similar_users), set(cluster_users))
intersection_lst = [t for t in ordered_similar_users if t in intersection_set]
return intersection_lst # [:self._num_neighbors]
@staticmethod
def build_user_clusters(reviews, significant_criteria_ranges=None):
"""
Builds a series of clusters for users according to their significant
criteria. Users that have exactly the same significant criteria will belong
to the same cluster.
:param reviews: the list of reviews
:return: a dictionary where all the keys are the cluster names and the
values for those keys are list of users that belong to that cluster
"""
user_list = extractor.get_groupby_list(reviews, 'user_id')
user_cluster_dictionary = {}
for user in user_list:
weights = extractor.get_criteria_weights(reviews, user)
significant_criteria, cluster_name =\
extractor.get_significant_criteria(weights, significant_criteria_ranges)
if cluster_name in user_cluster_dictionary:
user_cluster_dictionary[cluster_name].append(user)
else:
user_cluster_dictionary[cluster_name] = [user]
return user_cluster_dictionary
|
melqkiades/yelp
|
source/python/recommenders/multicriteria/multicriteria_base_recommender.py
|
Python
|
lgpl-2.1
| 3,524
|
#!/usr/bin/env python3
#
# Copyright © 2019 Endless Mobile, Inc.
#
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# Original author: Philip Withnall
"""
Checks that a merge request doesn’t add any instances of the string ‘todo’
(in uppercase), or similar keywords. It may remove instances of that keyword,
or move them around, according to the logic of `git log -S`.
"""
import argparse
import re
import subprocess
import sys
# We have to specify these keywords obscurely to avoid the script matching
# itself. The keyword ‘fixme’ (in upper case) is explicitly allowed because
# that’s conventionally used as a way of marking a workaround which needs to
# be merged for now, but is to be grepped for and reverted or reworked later.
BANNED_KEYWORDS = [
'TO' + 'DO',
'X' + 'XX',
'W' + 'IP',
]
def main():
parser = argparse.ArgumentParser(
description='Check a range of commits to ensure they don’t contain '
'banned keywords.')
parser.add_argument('commits',
help='SHA to diff from, or range of commits to diff')
args = parser.parse_args()
banned_words_seen = set()
seen_in_log = False
seen_in_diff = False
# Check the log messages for banned words.
log_process = subprocess.run(
['git', 'log', '--no-color', args.commits + '..HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8',
check=True)
log_lines = log_process.stdout.strip().split('\n')
for line in log_lines:
for keyword in BANNED_KEYWORDS:
if re.search('(^|\W+){}(\W+|$)'.format(keyword), line):
banned_words_seen.add(keyword)
seen_in_log = True
# Check the diff for banned words.
diff_process = subprocess.run(
['git', 'diff', '-U0', '--no-color', args.commits],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8',
check=True)
diff_lines = diff_process.stdout.strip().split('\n')
for line in diff_lines:
if not line.startswith('+ '):
continue
for keyword in BANNED_KEYWORDS:
if re.search('(^|\W+){}(\W+|$)'.format(keyword), line):
banned_words_seen.add(keyword)
seen_in_diff = True
if banned_words_seen:
if seen_in_log and seen_in_diff:
where = 'commit message and diff'
elif seen_in_log:
where = 'commit message'
elif seen_in_diff:
where = 'commit diff'
print('Saw banned keywords in a {}: {}. '
'This indicates the branch is a work in progress and should not '
'be merged in its current '
'form.'.format(where, ', '.join(banned_words_seen)))
sys.exit(1)
if __name__ == '__main__':
main()
|
endlessm/glib
|
.gitlab-ci/check-todos.py
|
Python
|
lgpl-2.1
| 2,831
|
import numpy as np
from .base import Env
from rllab.spaces import Discrete
from rllab.envs.base import Step
from rllab.core.serializable import Serializable
MAPS = {
"chain": [
"GFFFFFFFFFFFFFSFFFFFFFFFFFFFG"
],
"4x4_safe": [
"SFFF",
"FWFW",
"FFFW",
"WFFG"
],
"4x4": [
[
"SFFF",
"FHFH",
"FFFH",
"HFFF"
],
[
"FFFF",
"FHFH",
"FFFH",
"HFFG"
]
],
"8x8": [
"FFFFSFFF",
"FFFFFFFF",
"FFFHFFFF",
"FFFFFHFF",
"FFFHFFFF",
"FHHFFFHF",
"FHFFHFHF",
"FFFHFFFG"
],
}
class GridWorld3DEnv(Env, Serializable):
"""
'S' : starting point
'F' or '.': free space
'W' or 'x': wall
'H' or 'o': hole (terminates episode)
'G' : goal
"""
def __init__(self, desc='4x4'):
Serializable.quick_init(self, locals())
#print("desc before isinstance",desc)
if isinstance(desc, str):
desc = MAPS[desc]
#print("desc before nparray \n",desc)
desc[0] = list(map(list, desc[0]))
#print(desc[0])
desc[1] = list(map(list, desc[1]))
#print(desc[1])
desc= np.array(list(desc))
#print("desc after nparray \n",desc)
desc[desc == '.'] = 'F'
desc[desc == 'o'] = 'H'
desc[desc == 'x'] = 'W'
self.desc = desc
self.levels, self.n_row, self.n_col = desc.shape[:]
#print("desc before search start \n", desc)
(start_z,), (start_x,), (start_y,) = np.nonzero(desc == 'S')
print('x', start_x)
print('y', start_y)
print('z', start_z)
self.start_state = start_x * self.n_col + start_y + start_z * (self.n_col + self.n_row)
self.state = None
self.domain_fig = None
def reset(self):
self.state = self.start_state
return self.state
@staticmethod
def action_from_direction(d):
"""
Return the action corresponding to the given direction. This is a helper method for debugging and testing
purposes.
:return: the action index corresponding to the given direction
"""
return dict(
left=0,
down=1,
right=2,
up=3,
climb_up=4,
climb_down=5
)[d]
def step(self, action):
"""
action map:
0: left
1: down
2: right
3: up
4: climb_up
5: climb_down
:param action: should be a one-hot vector encoding the action
:return:
"""
possible_next_states = self.get_possible_next_states(self.state, action)
#x = self.state // self.n_col
#y = self.state % self.n_col
#coords = np.array([x, y])
#print(coords)
#now=self.desc
#now[coords[0], coords[1]]='X'
#print(now)
probs = [x[1] for x in possible_next_states]
next_state_idx = np.random.choice(len(probs), p=probs)
next_state = possible_next_states[next_state_idx][0]
print("next state is", next_state)
next_z = next_state // (self.n_col * self.n_row)
next_x = (next_state - next_z*(self.n_col * self.n_row)) // self.n_col #Note: this is not a comment :D
next_y = (next_state - next_z*(self.n_col * self.n_row)) % self.n_col
#print(self.n_col)
#print(self.n_row)
#print(self.levels)
#print("the next z is", next_z)
#print("the next x is", next_x)
#print("the next y is", next_y)
next_state_type = self.desc[next_z, next_x, next_y]
#print(next_state_type)
#print(self.desc)
# Here we fix what each position does.
if next_state_type == 'H':
done = True
reward = 0
elif next_state_type in ['F', 'S']:
done = False
reward = 0
elif next_state_type == 'G':
done = True
reward = 1
else:
raise NotImplementedError
self.state = next_state
return Step(observation=self.state, reward=reward, done=done)
def get_possible_next_states(self, state, action):
"""
Given the state and action, return a list of possible next states and their probabilities. Only next states
with nonzero probabilities will be returned
:param state: start state
:param action: action
:return: a list of pairs (s', p(s'|s,a))
"""
# assert self.observation_space.contains(state)
# assert self.action_space.contains(action)
z = self.state // (self.n_col * self.n_row)
x = (self.state - z*(self.n_col * self.n_row)) // self.n_col #Note: this is not a comment :D
y = (self.state - z*(self.n_col * self.n_row)) % self.n_col
coords = np.array([z, x, y])
#print('NEW STEP')
#print(coords)
#print(coords)
self.desc[0] = list(map(list, self.desc[0]))
#print(desc[0])
self.desc[1] = list(map(list, self.desc[1]))
#print(desc[1])
now= np.array(list(self.desc))
#now=np.array(list(map(list, self.desc)))
#print(now)
now[z, x, y]='X'
print(now)
#Possible increments produced by the actions.
#print(action)
increments = np.array([[0, 0, -1], [0, 1, 0], [0, 0, 1], [0, -1, 0], [1, 0, 0], [-1, 0, 0]])
next_coords = np.clip(
coords + increments[action],
[0, 0, 0],
[self.levels -1, self.n_row - 1, self.n_col - 1]
)
#print(next_coords)
next_state = next_coords[0] * (self.n_col + self.n_row) + next_coords[1] * self.n_col + next_coords[2] #Calculate next step
#print(next_state)
state_type = self.desc[z, x, y]
next_state_type = self.desc[next_coords[0], next_coords[1], next_coords[2]]
#print(next_state_type)
if next_state_type == 'W' or state_type == 'H' or state_type == 'G':
return [(state, 1.)]
else:
return [(next_state, 1.)]
@property
def action_space(self):
return Discrete(6)
@property
def observation_space(self):
return Discrete(self.n_row * self.n_col * self.levels)
|
roboticslab-uc3m/xgnitive
|
programs/drl/envs/grid_world_3D_env.py
|
Python
|
lgpl-2.1
| 6,357
|
# referenciacatastral.py - functions for handling Spanish real state ids
# coding: utf-8
#
# Copyright (C) 2016 David García Garzón
# Copyright (C) 2016-2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Referencia Catastral (Spanish real estate property id)
The cadastral reference code is an identifier for real estate in Spain. It is
issued by Dirección General del Catastro (General Directorate of Land
Registry) of the Ministerio de Hacienda (Tresury Ministry).
It has 20 digits and contains numbers and letters including the Spanish Ñ.
The number consists of 14 digits for the parcel, 4 for identifying properties
within the parcel and 2 check digits. The parcel digits are structured
differently for urban, non-urban or special (infrastructure) cases.
More information:
* http://www.catastro.meh.es/esp/referencia_catastral_1.asp (Spanish)
* http://www.catastro.meh.es/documentos/05042010_P.pdf (Spanish)
* https://es.wikipedia.org/wiki/Catastro#Referencia_catastral
>>> validate('7837301-VG8173B-0001 TT') # Lanteira town hall
'7837301VG8173B0001TT'
>>> validate('783301 VG8173B 0001 TT') # missing digit
Traceback (most recent call last):
...
InvalidLength: ...
>>> validate('7837301/VG8173B 0001 TT') # not alphanumeric
Traceback (most recent call last):
...
InvalidFormat: ...
>>> validate('7837301 VG8173B 0001 NN') # bad check digits
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('4A08169P03PRAT0001LR') # BCN Airport
'4A08169 P03PRAT 0001 LR'
"""
from stdnum.exceptions import *
from stdnum.util import clean
alphabet = u'ABCDEFGHIJKLMNÑOPQRSTUVWXYZ0123456789'
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -').strip().upper()
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return ' '.join([
number[:7],
number[7:14],
number[14:18],
number[18:]
])
# The check digit implementation is based on the Javascript
# implementation by Vicente Sancho that can be found at
# http://trellat.es/validar-la-referencia-catastral-en-javascript/
def _check_digit(number):
"""Calculate a single check digit on the provided part of the number."""
weights = (13, 15, 12, 5, 4, 17, 9, 21, 3, 7, 1)
s = sum(w * (int(n) if n.isdigit() else alphabet.find(n) + 1)
for w, n in zip(weights, number))
return 'MQWERTYUIOPASDFGHJKLBZX'[s % 23]
def _force_unicode(number):
"""Convert the number to unicode."""
if not hasattr(number, 'isnumeric'): # pragma: no cover (Python 2 code)
number = number.decode('utf-8')
return number
def calc_check_digits(number):
"""Calculate the check digits for the number."""
number = _force_unicode(compact(number))
return (
_check_digit(number[0:7] + number[14:18]) +
_check_digit(number[7:14] + number[14:18]))
def validate(number):
"""Checks to see if the number provided is a valid Cadastral Reference.
This checks the length, formatting and check digits."""
number = compact(number)
n = _force_unicode(number)
if not all(c in alphabet for c in n):
raise InvalidFormat()
if len(n) != 20:
raise InvalidLength()
if calc_check_digits(n) != n[18:]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid Cadastral Reference."""
try:
return bool(validate(number))
except ValidationError:
return False
|
holvi/python-stdnum
|
stdnum/es/referenciacatastral.py
|
Python
|
lgpl-2.1
| 4,360
|
import itchat
itchat.login()
friends = itchat.get_friends(update = True)[0:]
info = {}
for i in friends:
info[i['NickName']] = i.Signature
print(info)
|
XiangYz/webscraper
|
itchat_test.py
|
Python
|
lgpl-2.1
| 157
|
# Copyright (C) 2011 Jeff Forcier <jeff@bitprophet.org>
#
# This file is part of ssh.
#
# 'ssh' is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# 'ssh' is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with 'ssh'; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA.
"""
L{Transport} handles the core SSH2 protocol.
"""
import os
import socket
import string
import struct
import sys
import threading
import time
import weakref
import ssh
from ssh import util
from ssh.auth_handler import AuthHandler
from ssh.channel import Channel
from ssh.common import *
from ssh.compress import ZlibCompressor, ZlibDecompressor
from ssh.dsskey import DSSKey
from ssh.kex_gex import KexGex
from ssh.kex_group1 import KexGroup1
from ssh.message import Message
from ssh.packet import Packetizer, NeedRekeyException
from ssh.primes import ModulusPack
from ssh.rsakey import RSAKey
from ssh.server import ServerInterface
from ssh.sftp_client import SFTPClient
from ssh.ssh_exception import SSHException, BadAuthenticationType, ChannelException
from ssh.util import retry_on_signal
from Crypto import Random
from Crypto.Cipher import Blowfish, AES, DES3, ARC4
from Crypto.Hash import SHA, MD5
try:
from Crypto.Util import Counter
except ImportError:
from ssh.util import Counter
# for thread cleanup
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class SecurityOptions (object):
"""
Simple object containing the security preferences of an ssh transport.
These are tuples of acceptable ciphers, digests, key types, and key
exchange algorithms, listed in order of preference.
Changing the contents and/or order of these fields affects the underlying
L{Transport} (but only if you change them before starting the session).
If you try to add an algorithm that ssh doesn't recognize,
C{ValueError} will be raised. If you try to assign something besides a
tuple to one of the fields, C{TypeError} will be raised.
"""
__slots__ = [ 'ciphers', 'digests', 'key_types', 'kex', 'compression', '_transport' ]
def __init__(self, transport):
self._transport = transport
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
@rtype: str
"""
return '<ssh.SecurityOptions for %s>' % repr(self._transport)
def _get_ciphers(self):
return self._transport._preferred_ciphers
def _get_digests(self):
return self._transport._preferred_macs
def _get_key_types(self):
return self._transport._preferred_keys
def _get_kex(self):
return self._transport._preferred_kex
def _get_compression(self):
return self._transport._preferred_compression
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError('expected tuple or list')
possible = getattr(self._transport, orig).keys()
forbidden = filter(lambda n: n not in possible, x)
if len(forbidden) > 0:
raise ValueError('unknown cipher')
setattr(self._transport, name, x)
def _set_ciphers(self, x):
self._set('_preferred_ciphers', '_cipher_info', x)
def _set_digests(self, x):
self._set('_preferred_macs', '_mac_info', x)
def _set_key_types(self, x):
self._set('_preferred_keys', '_key_info', x)
def _set_kex(self, x):
self._set('_preferred_kex', '_kex_info', x)
def _set_compression(self, x):
self._set('_preferred_compression', '_compression_info', x)
ciphers = property(_get_ciphers, _set_ciphers, None,
"Symmetric encryption ciphers")
digests = property(_get_digests, _set_digests, None,
"Digest (one-way hash) algorithms")
key_types = property(_get_key_types, _set_key_types, None,
"Public-key algorithms")
kex = property(_get_kex, _set_kex, None, "Key exchange algorithms")
compression = property(_get_compression, _set_compression, None,
"Compression algorithms")
class ChannelMap (object):
def __init__(self):
# (id -> Channel)
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return self._map.values()
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
class Transport (threading.Thread):
"""
An SSH Transport attaches to a stream (usually a socket), negotiates an
encrypted session, authenticates, and then creates stream tunnels, called
L{Channel}s, across the session. Multiple channels can be multiplexed
across a single session (and often are, in the case of port forwardings).
"""
_PROTO_ID = '2.0'
_CLIENT_ID = 'ssh_%s' % (ssh.__version__)
_preferred_ciphers = ( 'aes128-ctr', 'aes256-ctr', 'aes128-cbc', 'blowfish-cbc', 'aes256-cbc', '3des-cbc',
'arcfour128', 'arcfour256' )
_preferred_macs = ( 'hmac-sha1', 'hmac-md5', 'hmac-sha1-96', 'hmac-md5-96' )
_preferred_keys = ( 'ssh-rsa', 'ssh-dss' )
_preferred_kex = ( 'diffie-hellman-group1-sha1', 'diffie-hellman-group-exchange-sha1' )
_preferred_compression = ( 'none', )
_cipher_info = {
'aes128-ctr': { 'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 16 },
'aes256-ctr': { 'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 32 },
'blowfish-cbc': { 'class': Blowfish, 'mode': Blowfish.MODE_CBC, 'block-size': 8, 'key-size': 16 },
'aes128-cbc': { 'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 16 },
'aes256-cbc': { 'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 32 },
'3des-cbc': { 'class': DES3, 'mode': DES3.MODE_CBC, 'block-size': 8, 'key-size': 24 },
'arcfour128': { 'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 16 },
'arcfour256': { 'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 32 },
}
_mac_info = {
'hmac-sha1': { 'class': SHA, 'size': 20 },
'hmac-sha1-96': { 'class': SHA, 'size': 12 },
'hmac-md5': { 'class': MD5, 'size': 16 },
'hmac-md5-96': { 'class': MD5, 'size': 12 },
}
_key_info = {
'ssh-rsa': RSAKey,
'ssh-dss': DSSKey,
}
_kex_info = {
'diffie-hellman-group1-sha1': KexGroup1,
'diffie-hellman-group-exchange-sha1': KexGex,
}
_compression_info = {
# zlib@openssh.com is just zlib, but only turned on after a successful
# authentication. openssh servers may only offer this type because
# they've had troubles with security holes in zlib in the past.
'zlib@openssh.com': ( ZlibCompressor, ZlibDecompressor ),
'zlib': ( ZlibCompressor, ZlibDecompressor ),
'none': ( None, None ),
}
_modulus_pack = None
def __init__(self, sock):
"""
Create a new SSH session over an existing socket, or socket-like
object. This only creates the Transport object; it doesn't begin the
SSH session yet. Use L{connect} or L{start_client} to begin a client
session, or L{start_server} to begin a server session.
If the object is not actually a socket, it must have the following
methods:
- C{send(str)}: Writes from 1 to C{len(str)} bytes, and
returns an int representing the number of bytes written. Returns
0 or raises C{EOFError} if the stream has been closed.
- C{recv(int)}: Reads from 1 to C{int} bytes and returns them as a
string. Returns 0 or raises C{EOFError} if the stream has been
closed.
- C{close()}: Closes the socket.
- C{settimeout(n)}: Sets a (float) timeout on I/O operations.
For ease of use, you may also pass in an address (as a tuple) or a host
string as the C{sock} argument. (A host string is a hostname with an
optional port (separated by C{":"}) which will be converted into a
tuple of C{(hostname, port)}.) A socket will be connected to this
address and used for communication. Exceptions from the C{socket} call
may be thrown in this case.
@param sock: a socket or socket-like object to create the session over.
@type sock: socket
"""
if isinstance(sock, (str, unicode)):
# convert "host:port" into (host, port)
hl = sock.split(':', 1)
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
reason = 'No suitable address family'
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error, e:
reason = str(e)
else:
break
else:
raise SSHException(
'Unable to connect to %s: %s' % (hostname, reason))
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.rng = rng
self.sock = sock
# Python < 2.3 doesn't have the settimeout method - RogerB
try:
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never
# propagated.
self.sock.settimeout(0.1)
except AttributeError:
pass
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = 'SSH-' + self._PROTO_ID + '-' + self._CLIENT_ID
self.remote_version = ''
self.local_cipher = self.remote_cipher = ''
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.active = False
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
self.lock = threading.Lock() # synchronization (always higher level than write_lock)
# tracking open channels
self._channels = ChannelMap()
self.channel_events = { } # (id -> Event)
self.channels_seen = { } # (id -> True)
self._channel_counter = 1
self.window_size = 65536
self.max_packet_size = 34816
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = 'ssh.transport'
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
self.global_response = None # response Message from an arbitrary global request
self.completion_event = None # user-defined event callbacks
self.banner_timeout = 15 # how long (seconds) to wait for the SSH banner
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = { }
self.server_accepts = [ ]
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = { }
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
@rtype: str
"""
out = '<ssh.Transport at %s' % hex(long(id(self)) & 0xffffffffL)
if not self.active:
out += ' (unconnected)'
else:
if self.local_cipher != '':
out += ' (cipher %s, %d bits)' % (self.local_cipher,
self._cipher_info[self.local_cipher]['key-size'] * 8)
if self.is_authenticated():
out += ' (active; %d open channel(s))' % len(self._channels)
elif self.initial_kex_done:
out += ' (connected; awaiting auth)'
else:
out += ' (connecting)'
out += '>'
return out
def atfork(self):
"""
Terminate this Transport without closing the session. On posix
systems, if a Transport is open during process forking, both parent
and child will share the underlying socket, but only one process can
use the connection (without corrupting the session). Use this method
to clean up a Transport object without disrupting the other process.
@since: 1.5.3
"""
self.sock.close()
self.close()
def get_security_options(self):
"""
Return a L{SecurityOptions} object which can be used to tweak the
encryption algorithms this transport will permit, and the order of
preference for them.
@return: an object that can be used to change the preferred algorithms
for encryption, digest (hash), public key, and key exchange.
@rtype: L{SecurityOptions}
"""
return SecurityOptions(self)
def start_client(self, event=None):
"""
Negotiate a new SSH2 session as a client. This is the first step after
creating a new L{Transport}. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given C{Event} will
be triggered. On failure, L{is_active} will return C{False}.
(Since 1.4) If C{event} is C{None}, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling L{auth_password <Transport.auth_password>} or
L{auth_publickey <Transport.auth_publickey>}.
@note: L{connect} is a simpler method for connecting as a client.
@note: After calling this method (or L{start_server} or L{connect}),
you should no longer directly read from or write to the original
socket object.
@param event: an event to trigger when negotiation is complete
(optional)
@type event: threading.Event
@raise SSHException: if negotiation fails (and no C{event} was passed
in)
"""
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
Random.atfork()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def start_server(self, event=None, server=None):
"""
Negotiate a new SSH2 session as a server. This is the first step after
creating a new L{Transport} and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given C{Event} will
be triggered. On failure, L{is_active} will return C{False}.
(Since 1.4) If C{event} is C{None}, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods
L{get_allowed_auths <ServerInterface.get_allowed_auths>},
L{check_auth_none <ServerInterface.check_auth_none>},
L{check_auth_password <ServerInterface.check_auth_password>}, and
L{check_auth_publickey <ServerInterface.check_auth_publickey>} in the
given C{server} object to control the authentication process.
After a successful authentication, the client should request to open
a channel. Override
L{check_channel_request <ServerInterface.check_channel_request>} in the
given C{server} object to allow channels to be opened.
@note: After calling this method (or L{start_client} or L{connect}),
you should no longer directly read from or write to the original
socket object.
@param event: an event to trigger when negotiation is complete.
@type event: threading.Event
@param server: an object used to perform authentication and create
L{Channel}s.
@type server: L{server.ServerInterface}
@raise SSHException: if negotiation fails (and no C{event} was passed
in)
"""
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def add_server_key(self, key):
"""
Add a host key to the list of keys used for server mode. When behaving
as a server, the host key is used to sign certain packets during the
SSH2 negotiation, so that the client can trust that we are who we say
we are. Because this is used for signing, the key must contain private
key info, not just the public half. Only one key of each type (RSA or
DSS) is kept.
@param key: the host key to add, usually an L{RSAKey <rsakey.RSAKey>} or
L{DSSKey <dsskey.DSSKey>}.
@type key: L{PKey <pkey.PKey>}
"""
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
"""
Return the active host key, in server mode. After negotiating with the
client, this method will return the negotiated host key. If only one
type of host key was set with L{add_server_key}, that's the only key
that will ever be returned. But in cases where you have set more than
one type of host key (for example, an RSA key and a DSS key), the key
type will be negotiated by the client, and this method will return the
key of the type agreed on. If the host key has not been negotiated
yet, C{None} is returned. In client mode, the behavior is undefined.
@return: host key of the type negotiated by the client, or C{None}.
@rtype: L{PKey <pkey.PKey>}
"""
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
def load_server_moduli(filename=None):
"""
I{(optional)}
Load a file of prime moduli for use in doing group-exchange key
negotiation in server mode. It's a rather obscure option and can be
safely ignored.
In server mode, the remote client may request "group-exchange" key
negotiation, which asks the server to send a random prime number that
fits certain criteria. These primes are pretty difficult to compute,
so they can't be generated on demand. But many systems contain a file
of suitable primes (usually named something like C{/etc/ssh/moduli}).
If you call C{load_server_moduli} and it returns C{True}, then this
file of primes has been loaded and we will support "group-exchange" in
server mode. Otherwise server mode will just claim that it doesn't
support that method of key negotiation.
@param filename: optional path to the moduli file, if you happen to
know that it's not in a standard location.
@type filename: str
@return: True if a moduli file was successfully loaded; False
otherwise.
@rtype: bool
@note: This has no effect when used in client mode.
"""
Transport._modulus_pack = ModulusPack(rng)
# places to look for the openssh "moduli" file
file_list = [ '/etc/ssh/moduli', '/usr/local/etc/moduli' ]
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
load_server_moduli = staticmethod(load_server_moduli)
def close(self):
"""
Close this session, and any open channels that are tied to it.
"""
if not self.active:
return
self.active = False
self.packetizer.close()
self.join()
for chan in self._channels.values():
chan._unlink()
def get_remote_server_key(self):
"""
Return the host key of the server (in client mode).
@note: Previously this call returned a tuple of (key type, key string).
You can get the same effect by calling
L{PKey.get_name <pkey.PKey.get_name>} for the key type, and
C{str(key)} for the key string.
@raise SSHException: if no session is currently active.
@return: public key of the remote server
@rtype: L{PKey <pkey.PKey>}
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
return self.host_key
def is_active(self):
"""
Return true if this session is active (open).
@return: True if the session is still active (open); False if the
session is closed
@rtype: bool
"""
return self.active
def open_session(self):
"""
Request a new channel to the server, of type C{"session"}. This
is just an alias for C{open_channel('session')}.
@return: a new L{Channel}
@rtype: L{Channel}
@raise SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('session')
def open_x11_channel(self, src_addr=None):
"""
Request a new channel to the client, of type C{"x11"}. This
is just an alias for C{open_channel('x11', src_addr=src_addr)}.
@param src_addr: the source address of the x11 server (port is the
x11 port, ie. 6010)
@type src_addr: (str, int)
@return: a new L{Channel}
@rtype: L{Channel}
@raise SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('x11', src_addr=src_addr)
def open_forward_agent_channel(self):
"""
Request a new channel to the client, of type
C{"auth-agent@openssh.com"}.
This is just an alias for C{open_channel('auth-agent@openssh.com')}.
@return: a new L{Channel}
@rtype: L{Channel}
@raise SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('auth-agent@openssh.com')
def open_forwarded_tcpip_channel(self, (src_addr, src_port), (dest_addr, dest_port)):
"""
Request a new channel back to the client, of type C{"forwarded-tcpip"}.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
@param src_addr: originator's address
@param src_port: originator's port
@param dest_addr: local (server) connected address
@param dest_port: local (server) connected port
"""
return self.open_channel('forwarded-tcpip', (dest_addr, dest_port), (src_addr, src_port))
def open_channel(self, kind, dest_addr=None, src_addr=None):
"""
Request a new channel to the server. L{Channel}s are socket-like
objects used for the actual transfer of data across the session.
You may only request a channel after negotiating encryption (using
L{connect} or L{start_client}) and authenticating.
@param kind: the kind of channel requested (usually C{"session"},
C{"forwarded-tcpip"}, C{"direct-tcpip"}, or C{"x11"})
@type kind: str
@param dest_addr: the destination address of this port forwarding,
if C{kind} is C{"forwarded-tcpip"} or C{"direct-tcpip"} (ignored
for other channel types)
@type dest_addr: (str, int)
@param src_addr: the source address of this port forwarding, if
C{kind} is C{"forwarded-tcpip"}, C{"direct-tcpip"}, or C{"x11"}
@type src_addr: (str, int)
@return: a new L{Channel} on success
@rtype: L{Channel}
@raise SSHException: if the request is rejected or the session ends
prematurely
"""
if not self.active:
raise SSHException('SSH session not active')
self.lock.acquire()
try:
chanid = self._next_channel()
m = Message()
m.add_byte(chr(MSG_CHANNEL_OPEN))
m.add_string(kind)
m.add_int(chanid)
m.add_int(self.window_size)
m.add_int(self.max_packet_size)
if (kind == 'forwarded-tcpip') or (kind == 'direct-tcpip'):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == 'x11':
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(self.window_size, self.max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
while True:
event.wait(0.1);
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
if event.isSet():
break
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
address = str(address)
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)):
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
"""
Ask the server to cancel a previous port-forwarding request. No more
connections to the given address & port will be forwarded across this
ssh connection.
@param address: the address to stop forwarding
@type address: str
@param port: the port to stop forwarding
@type port: int
"""
if not self.active:
return
self._tcp_handler = None
self.global_request('cancel-tcpip-forward', (address, port), wait=True)
def open_sftp_client(self):
"""
Create an SFTP client channel from an open transport. On success,
an SFTP session will be opened with the remote host, and a new
SFTPClient object will be returned.
@return: a new L{SFTPClient} object, referring to an sftp session
(channel) across this transport
@rtype: L{SFTPClient}
"""
return SFTPClient.from_transport(self)
def send_ignore(self, bytes=None):
"""
Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
@param bytes: the number of random bytes to send in the payload of the
ignored packet -- defaults to a random number from 10 to 41.
@type bytes: int
"""
m = Message()
m.add_byte(chr(MSG_IGNORE))
if bytes is None:
bytes = (ord(rng.read(1)) % 32) + 10
m.add_bytes(rng.read(bytes))
self._send_user_message(m)
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
@raise SSHException: if the key renegotiation failed (which causes the
session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.isSet():
break
return
def set_keepalive(self, interval):
"""
Turn on/off keepalive packets (default is off). If this is set, after
C{interval} seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
@param interval: seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
@type interval: int
"""
self.packetizer.set_keepalive(interval,
lambda x=weakref.proxy(self): x.global_request('keepalive@lag.net', wait=False))
def global_request(self, kind, data=None, wait=True):
"""
Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
@param kind: name of the request.
@type kind: str
@param data: an optional tuple containing additional data to attach
to the request.
@type data: tuple
@param wait: C{True} if this method should not return until a response
is received; C{False} otherwise.
@type wait: bool
@return: a L{Message} containing possible additional data if the
request was successful (or an empty L{Message} if C{wait} was
C{False}); C{None} if the request was denied.
@rtype: L{Message}
"""
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(chr(MSG_GLOBAL_REQUEST))
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "%s"' % kind)
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.isSet():
break
return self.global_response
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout, C{None}
is returned.
@param timeout: seconds to wait for a channel, or C{None} to wait
forever
@type timeout: int
@return: a new Channel opened by the client
@rtype: L{Channel}
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(self, hostkey=None, username='', password=None, pkey=None):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for L{start_client}, L{get_remote_server_key}, and
L{Transport.auth_password} or L{Transport.auth_publickey}. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call L{open_channel} or
L{open_session} to get a L{Channel} object, which is used for data
transfer.
@note: If you fail to supply a password or private key, this method may
succeed, but a subsequent L{open_channel} or L{open_session} call may
fail because you haven't authenticated yet.
@param hostkey: the host key expected from the server, or C{None} if
you don't want to do host key verification.
@type hostkey: L{PKey<pkey.PKey>}
@param username: the username to authenticate as.
@type username: str
@param password: a password to use for authentication, if you want to
use password authentication; otherwise C{None}.
@type password: str
@param pkey: a private key to use for authentication, if you want to
use private key authentication; otherwise C{None}.
@type pkey: L{PKey<pkey.PKey>}
@raise SSHException: if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
"""
if hostkey is not None:
self._preferred_keys = [ hostkey.get_name() ]
self.start_client()
# check host key if we were given one
if (hostkey is not None):
key = self.get_remote_server_key()
if (key.get_name() != hostkey.get_name()) or (str(key) != str(hostkey)):
self._log(DEBUG, 'Bad host key from server')
self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(str(hostkey))))
self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(str(key))))
raise SSHException('Bad host key from server')
self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name())
if (pkey is not None) or (password is not None):
if password is not None:
self._log(DEBUG, 'Attempting password auth...')
self.auth_password(username, password)
else:
self._log(DEBUG, 'Attempting public-key auth...')
self.auth_publickey(username, pkey)
return
def get_exception(self):
"""
Return any exception that happened during the last server request.
This can be used to fetch more specific error information after using
calls like L{start_client}. The exception (if any) is cleared after
this call.
@return: an exception, or C{None} if there is no stored exception.
@rtype: Exception
@since: 1.1
"""
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
"""
Set the handler class for a subsystem in server mode. If a request
for this subsystem is made on an open ssh channel later, this handler
will be constructed and called -- see L{SubsystemHandler} for more
detailed documentation.
Any extra parameters (including keyword arguments) are saved and
passed to the L{SubsystemHandler} constructor later.
@param name: name of the subsystem.
@type name: str
@param handler: subclass of L{SubsystemHandler} that handles this
subsystem.
@type handler: class
"""
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
"""
Return true if this session is active and authenticated.
@return: True if the session is still open and has been authenticated
successfully; False if authentication failed and/or the session is
closed.
@rtype: bool
"""
return self.active and (self.auth_handler is not None) and self.auth_handler.is_authenticated()
def get_username(self):
"""
Return the username this connection is authenticated for. If the
session is not authenticated (or authentication failed), this method
returns C{None}.
@return: username that was authenticated, or C{None}.
@rtype: string
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
L{BadAuthenticationType} exception raised.
@param username: the username to authenticate as
@type username: string
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
@raise SSHException: if the authentication failed due to a network
error
@since: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
"""
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an C{event} is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, L{is_authenticated} will return C{True}. On failure, you may
use L{get_exception} to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and C{fallback} is C{True} (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
@param username: the username to authenticate as
@type username: str
@param password: the password to authenticate with
@type password: str or unicode
@param event: an event to trigger when the authentication attempt is
complete (whether it was successful or not)
@type event: threading.Event
@param fallback: C{True} if an attempt at an automated "interactive"
password auth should be made if the server doesn't support normal
password auth
@type fallback: bool
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if password authentication isn't
allowed by the server for this user (and no event was passed in)
@raise AuthenticationException: if the authentication failed (and no
event was passed in)
@raise SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType, x:
# if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it
if not fallback or ('keyboard-interactive' not in x.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException('Fallback authentication failed.')
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
# to try to fake out automated scripting of the exact
# type we're doing here. *shrug* :)
return []
return [ password ]
return self.auth_interactive(username, handler)
except SSHException, ignored:
# attempt failed; just raise the original exception
raise x
return None
def auth_publickey(self, username, key, event=None):
"""
Authenticate to the server using a private key. The key is used to
sign data from the server, so it must include the private part.
If an C{event} is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, L{is_authenticated} will return C{True}. On failure, you may
use L{get_exception} to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
@param username: the username to authenticate as
@type username: string
@param key: the private key to authenticate with
@type key: L{PKey <pkey.PKey>}
@param event: an event to trigger when the authentication attempt is
complete (whether it was successful or not)
@type event: threading.Event
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user (and no event was passed in)
@raise AuthenticationException: if the authentication failed (and no
event was passed in)
@raise SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=''):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: C{handler(title, instructions, prompt_list)}. The C{title} is
meant to be a dialog-window title, and the C{instructions} are user
instructions (both are strings). C{prompt_list} will be a list of
prompts, each prompt being a tuple of C{(str, bool)}. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
C{handler('title', 'instructions', [('Password:', False)])}.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
@param username: the username to authenticate as
@type username: string
@param handler: a handler for responding to server questions
@type handler: callable
@param submethods: a string list of desired submethods (optional)
@type submethods: str
@return: list of auth types permissible for the next stage of
authentication (normally empty).
@rtype: list
@raise BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user
@raise AuthenticationException: if the authentication failed
@raise SSHException: if there was a network error
@since: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(username, handler, my_event, submethods)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
"""
Set the channel for this transport's logging. The default is
C{"ssh.transport"} but it can be set to anything you want.
(See the C{logging} module for more info.) SSH Channels will log
to a sub-channel of the one specified.
@param name: new channel name for logging
@type name: str
@since: 1.1
"""
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
"""
Return the channel name used for this transport's logging.
@return: channel name.
@rtype: str
@since: 1.2
"""
return self.log_name
def set_hexdump(self, hexdump):
"""
Turn on/off logging a hex dump of protocol traffic at DEBUG level in
the logs. Normally you would want this off (which is the default),
but if you are debugging something, it may be useful.
@param hexdump: C{True} to log protocol traffix (in hex) to the log;
C{False} otherwise.
@type hexdump: bool
"""
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
"""
Return C{True} if the transport is currently logging hex dumps of
protocol traffic.
@return: C{True} if hex dumps are being logged
@rtype: bool
@since: 1.4
"""
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
"""
Turn on/off compression. This will only have an affect before starting
the transport (ie before calling L{connect}, etc). By default,
compression is off since it negatively affects interactive sessions.
@param compress: C{True} to ask the remote client/server to compress
traffic; C{False} to refuse compression
@type compress: bool
@since: 1.5.2
"""
if compress:
self._preferred_compression = ( 'zlib@openssh.com', 'zlib', 'none' )
else:
self._preferred_compression = ( 'none', )
def getpeername(self):
"""
Return the address of the remote side of this Transport, if possible.
This is effectively a wrapper around C{'getpeername'} on the underlying
socket. If the socket-like object has no C{'getpeername'} method,
then C{("unknown", 0)} is returned.
@return: the address if the remote host, if known
@rtype: tuple(str, int)
"""
gp = getattr(self.sock, 'getpeername', None)
if gp is None:
return ('unknown', 0)
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
### internals...
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
"used by KexGex to find primes for group exchange"
return self._modulus_pack
def _next_channel(self):
"you are holding the lock"
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
"used by a Channel to remove itself from the active channel list"
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
"""
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(DEBUG, 'Dropping user packet because connection is dead.')
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.isSet():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException('Key-exchange timed out waiting for key negotiation')
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
"used by a kex object to set the K (root key) and H (exchange hash)"
self.K = k
self.H = h
if self.session_id == None:
self.session_id = h
def _expect_packet(self, *ptypes):
"used by a kex object to register the next packet type it expects to see"
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException('Unknown host key type')
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException('Signature verification (%s) failed.' % self.host_key_type)
self.host_key = key
def _compute_key(self, id, nbytes):
"id is 'A' - 'F' for the various keys used by ssh"
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(id)
m.add_bytes(self.session_id)
out = sofar = SHA.new(str(m)).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = SHA.new(str(m)).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv):
if name not in self._cipher_info:
raise SSHException('Unknown client cipher ' + name)
if name in ('arcfour128', 'arcfour256'):
# arcfour cipher
cipher = self._cipher_info[name]['class'].new(key)
# as per RFC 4345, the first 1536 bytes of keystream
# generated by the cipher MUST be discarded
cipher.encrypt(" " * 1536)
return cipher
elif name.endswith("-ctr"):
# CTR modes, we need a counter
counter = Counter.new(nbits=self._cipher_info[name]['block-size'] * 8, initial_value=util.inflate_long(iv, True))
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv, counter)
else:
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv)
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, (src_addr, src_port)):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# Required to prevent RNG errors when running inside many subprocess
# containers.
Random.atfork()
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
if self.server_mode:
self._log(DEBUG, 'starting thread (server mode): %s' % hex(long(id(self)) & 0xffffffffL))
else:
self._log(DEBUG, 'starting thread (client mode): %s' % hex(long(id(self)) & 0xffffffffL))
try:
try:
self.packetizer.write_all(self.local_version + '\r\n')
self._check_banner()
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
self.active = False
self.packetizer.close()
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype))
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 39):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid)
else:
self._log(ERROR, 'Channel request for unknown channel %d' % chanid)
self.active = False
self.packetizer.close()
elif (self.auth_handler is not None) and (ptype in self.auth_handler._handler_table):
self.auth_handler._handler_table[ptype](self.auth_handler, m)
else:
self._log(WARNING, 'Oops, unhandled type %d' % ptype)
msg = Message()
msg.add_byte(chr(MSG_UNIMPLEMENTED))
msg.add_int(m.seqno)
self._send_message(msg)
except SSHException, e:
self._log(ERROR, 'Exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError, e:
self._log(DEBUG, 'EOF in transport thread')
#self._log(DEBUG, util.tb_strings())
self.saved_exception = e
except socket.error, e:
if type(e.args) is tuple:
emsg = '%s (%d)' % (e.args[1], e.args[0])
else:
emsg = e.args
self._log(ERROR, 'Socket exception: ' + emsg)
self.saved_exception = e
except Exception, e:
self._log(ERROR, 'Unknown exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in self._channels.values():
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event != None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# Don't raise spurious 'NoneType has no attribute X' errors when we
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
### protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init == None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except Exception, x:
raise SSHException('Error reading SSH protocol banner' + str(x))
if buf[:4] == 'SSH-':
break
self._log(DEBUG, 'Banner: ' + buf)
if buf[:4] != 'SSH-':
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
# pull off any attached comment
comment = ''
i = string.find(buf, ' ')
if i >= 0:
comment = buf[i+1:]
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split('-', 2)
if len(segs) < 3:
raise SSHException('Invalid SSH banner')
version = segs[1]
client = segs[2]
if version != '1.99' and version != '2.0':
raise SSHException('Incompatible version (%s instead of 2.0)' % (version,))
self._log(INFO, 'Connected (version %s, client %s)' % (version, client))
def _send_kex_init(self):
"""
announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support.
"""
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.in_kex = True
if self.server_mode:
if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex):
# can't do group-exchange if we don't have a pack of potential primes
pkex = list(self.get_security_options().kex)
pkex.remove('diffie-hellman-group-exchange-sha1')
self.get_security_options().kex = pkex
available_server_keys = filter(self.server_key_dict.keys().__contains__,
self._preferred_keys)
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(chr(MSG_KEXINIT))
m.add_bytes(rng.read(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string('')
m.add_string('')
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = str(m)
self._send_message(m)
def _parse_kex_init(self, m):
cookie = m.get_bytes(16)
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
unused = m.get_int()
self._log(DEBUG, 'kex algos:' + str(kex_algo_list) + ' server key:' + str(server_key_algo_list) + \
' client encrypt:' + str(client_encrypt_algo_list) + \
' server encrypt:' + str(server_encrypt_algo_list) + \
' client mac:' + str(client_mac_algo_list) + \
' server mac:' + str(server_mac_algo_list) + \
' client compress:' + str(client_compress_algo_list) + \
' server compress:' + str(server_compress_algo_list) + \
' client lang:' + str(client_lang_list) + \
' server lang:' + str(server_lang_list) + \
' kex follows?' + str(kex_follows))
# as a server, we pick the first item in the client's list that we support.
# as a client, we pick the first item in our list that the server supports.
if self.server_mode:
agreed_kex = filter(self._preferred_kex.__contains__, kex_algo_list)
else:
agreed_kex = filter(kex_algo_list.__contains__, self._preferred_kex)
if len(agreed_kex) == 0:
raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)')
self.kex_engine = self._kex_info[agreed_kex[0]](self)
if self.server_mode:
available_server_keys = filter(self.server_key_dict.keys().__contains__,
self._preferred_keys)
agreed_keys = filter(available_server_keys.__contains__, server_key_algo_list)
else:
agreed_keys = filter(server_key_algo_list.__contains__, self._preferred_keys)
if len(agreed_keys) == 0:
raise SSHException('Incompatible ssh peer (no acceptable host key)')
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException('Incompatible ssh peer (can\'t match requested host key type)')
if self.server_mode:
agreed_local_ciphers = filter(self._preferred_ciphers.__contains__,
server_encrypt_algo_list)
agreed_remote_ciphers = filter(self._preferred_ciphers.__contains__,
client_encrypt_algo_list)
else:
agreed_local_ciphers = filter(client_encrypt_algo_list.__contains__,
self._preferred_ciphers)
agreed_remote_ciphers = filter(server_encrypt_algo_list.__contains__,
self._preferred_ciphers)
if (len(agreed_local_ciphers) == 0) or (len(agreed_remote_ciphers) == 0):
raise SSHException('Incompatible ssh server (no acceptable ciphers)')
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log(DEBUG, 'Ciphers agreed: local=%s, remote=%s' % (self.local_cipher, self.remote_cipher))
if self.server_mode:
agreed_remote_macs = filter(self._preferred_macs.__contains__, client_mac_algo_list)
agreed_local_macs = filter(self._preferred_macs.__contains__, server_mac_algo_list)
else:
agreed_local_macs = filter(client_mac_algo_list.__contains__, self._preferred_macs)
agreed_remote_macs = filter(server_mac_algo_list.__contains__, self._preferred_macs)
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException('Incompatible ssh server (no acceptable macs)')
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
if self.server_mode:
agreed_remote_compression = filter(self._preferred_compression.__contains__, client_compress_algo_list)
agreed_local_compression = filter(self._preferred_compression.__contains__, server_compress_algo_list)
else:
agreed_local_compression = filter(client_compress_algo_list.__contains__, self._preferred_compression)
agreed_remote_compression = filter(server_compress_algo_list.__contains__, self._preferred_compression)
if (len(agreed_local_compression) == 0) or (len(agreed_remote_compression) == 0):
raise SSHException('Incompatible ssh server (no acceptable compression) %r %r %r' % (agreed_local_compression, agreed_remote_compression, self._preferred_compression))
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log(DEBUG, 'using kex %s; server key type %s; cipher: local %s, remote %s; mac: local %s, remote %s; compression: local %s, remote %s' %
(agreed_kex[0], self.host_key_type, self.local_cipher, self.remote_cipher, self.local_mac,
self.remote_mac, self.local_compression, self.remote_compression))
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
# the end of the packet but not parsed. turns out we need to throw
# away those bytes because they aren't part of the hash.
self.remote_kex_init = chr(MSG_KEXINIT) + m.get_so_far()
def _activate_inbound(self):
"switch on newly negotiated encryption parameters for inbound traffic"
block_size = self._cipher_info[self.remote_cipher]['block-size']
if self.server_mode:
IV_in = self._compute_key('A', block_size)
key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size'])
else:
IV_in = self._compute_key('B', block_size)
key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size'])
engine = self._get_cipher(self.remote_cipher, key_in, IV_in)
mac_size = self._mac_info[self.remote_mac]['size']
mac_engine = self._mac_info[self.remote_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('E', mac_engine.digest_size)
else:
mac_key = self._compute_key('F', mac_engine.digest_size)
self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
compress_in = self._compression_info[self.remote_compression][1]
if (compress_in is not None) and ((self.remote_compression != 'zlib@openssh.com') or self.authenticated):
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
"switch on newly negotiated encryption parameters for outbound traffic"
m = Message()
m.add_byte(chr(MSG_NEWKEYS))
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]['block-size']
if self.server_mode:
IV_out = self._compute_key('B', block_size)
key_out = self._compute_key('D', self._cipher_info[self.local_cipher]['key-size'])
else:
IV_out = self._compute_key('A', block_size)
key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size'])
engine = self._get_cipher(self.local_cipher, key_out, IV_out)
mac_size = self._mac_info[self.local_mac]['size']
mac_engine = self._mac_info[self.local_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('F', mac_engine.digest_size)
else:
mac_key = self._compute_key('E', mac_engine.digest_size)
self.packetizer.set_outbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
compress_out = self._compression_info[self.local_compression][0]
if (compress_out is not None) and ((self.local_compression != 'zlib@openssh.com') or self.authenticated):
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == 'zlib@openssh.com':
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == 'zlib@openssh.com':
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, 'Switch to new keys ...')
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event != None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_string()
self._log(INFO, 'Disconnect (code %d): %s' % (code, desc))
def _parse_global_request(self, m):
kind = m.get_string()
self._log(DEBUG, 'Received global request "%s"' % kind)
want_reply = m.get_boolean()
if not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" global request from server.' % kind)
ok = False
elif kind == 'tcpip-forward':
address = m.get_string()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok != False:
ok = (ok,)
elif kind == 'cancel-tcpip-forward':
address = m.get_string()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(chr(MSG_REQUEST_SUCCESS))
msg.add(*extra)
else:
msg.add_byte(chr(MSG_REQUEST_FAILURE))
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, 'Global request successful.')
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, 'Global request denied.')
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, 'Success for unrequested channel! [??]')
return
self.lock.acquire()
try:
chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
self._log(INFO, 'Secsh channel %d opened.' % chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_string()
lang = m.get_string()
reason_text = CONNECTION_FAILED_CODE.get(reason, '(unknown code)')
self._log(INFO, 'Secsh channel %d open FAILED: %s: %s' % (chanid, reason_str, reason_text))
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_string()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (kind == 'auth-agent@openssh.com') and (self._forward_agent_handler is not None):
self._log(DEBUG, 'Incoming forward agent connection')
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'x11') and (self._x11_handler is not None):
origin_addr = m.get_string()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming x11 connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'forwarded-tcpip') and (self._tcp_handler is not None):
server_addr = m.get_string()
server_port = m.get_int()
origin_addr = m.get_string()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming tcp forwarded connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" channel request from server.' % kind)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == 'direct-tcpip':
# handle direct-tcpip requests comming from the client
dest_addr = m.get_string()
dest_port = m.get_int()
origin_addr = m.get_string()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid, (origin_addr, origin_port),
(dest_addr, dest_port))
else:
reason = self.server_object.check_channel_request(kind, my_chanid)
if reason != OPEN_SUCCEEDED:
self._log(DEBUG, 'Rejecting "%s" channel request from client.' % kind)
reject = True
if reject:
msg = Message()
msg.add_byte(chr(MSG_CHANNEL_OPEN_FAILURE))
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string('')
msg.add_string('en')
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(self.window_size, self.max_packet_size)
chan._set_remote_channel(chanid, initial_window_size, max_packet_size)
finally:
self.lock.release()
m = Message()
m.add_byte(chr(MSG_CHANNEL_OPEN_SUCCESS))
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.window_size)
m.add_int(self.max_packet_size)
self._send_message(m)
self._log(INFO, 'Secsh channel %d (%s) opened.', my_chanid, kind)
if kind == 'auth-agent@openssh.com':
self._forward_agent_handler(chan)
elif kind == 'x11':
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == 'forwarded-tcpip':
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(chan, (origin_addr, origin_port), (server_addr, server_port))
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
always_display = m.get_boolean()
msg = m.get_string()
lang = m.get_string()
self._log(DEBUG, 'Debug msg: ' + util.safe_string(msg))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return (None, [], {})
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
|
bitprophet/ssh
|
ssh/transport.py
|
Python
|
lgpl-2.1
| 88,838
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetić ( jurko.gospodnetic@pke.hr )
"""
Implemented using the 'pytest' testing framework.
"""
if __name__ == "__main__":
import __init__
__init__.runUsingPyTest(globals())
from suds.wsse import UsernameToken
class TestUsernameToken:
username_token = None
def setup(self):
self.username_token = UsernameToken(
username=b"foouser",
password=b"barpasswd",
)
def test_setnonce_null(self):
self.setup()
self.username_token.setnonce()
assert self.username_token.nonce != None
def test_setnonce_text(self):
self.setup()
self.username_token.setnonce(b"affirm")
assert self.username_token.nonce == b"affirm"
|
Affirm/suds-jurko
|
tests/test_wsse.py
|
Python
|
lgpl-3.0
| 1,605
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015-2017 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as _np
from pyemma._base.estimator import Estimator as _Estimator
from pyemma._base.progress import ProgressReporter as _ProgressReporter
from pyemma.thermo import MEMM as _MEMM
from pyemma.thermo.estimators._base import ThermoBase
from pyemma.thermo.models.memm import ThermoMSM as _ThermoMSM
from pyemma.util import types as _types
from pyemma.thermo.estimators._callback import _ConvergenceProgressIndicatorCallBack
from msmtools.estimation import largest_connected_set as _largest_connected_set
from pyemma.thermo.extensions import (
dtram as _dtram,
wham as _wham,
util as _util,
cset as _cset
)
__author__ = 'noe, wehmeyer'
class DTRAM(_Estimator, _MEMM, ThermoBase):
r""" Discrete Transition(-based) Reweighting Analysis Method."""
__serialize_version = 0
__serialize_fields = ('bias_energies',
'conf_energies',
'count_matrices',
'count_matrices_full',
'increments',
'log_lagrangian_mult',
'loglikelihoods',
'nthermo',
'state_counts',
'state_counts_full',
'therm_energies',
)
def __init__(
self, bias_energies_full, lag, count_mode='sliding', connectivity='reversible_pathways',
maxiter=10000, maxerr=1.0E-15, save_convergence_info=0, dt_traj='1 step',
init=None, init_maxiter=10000, init_maxerr=1.0E-8):
r""" Discrete Transition(-based) Reweighting Analysis Method
Parameters
----------
bias_energies_full : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object
bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i
at thermodynamic state j.
lag : int
Integer lag time at which transitions are counted.
count_mode : str, optional, default='sliding'
Mode to obtain count matrices from discrete trajectories. Should be one of:
* 'sliding' : a trajectory of length T will have :math:`T-\tau` counts at time indexes
.. math::
(0 \rightarrow \tau), (1 \rightarrow \tau+1), ..., (T-\tau-1 \rightarrow T-1)
* 'sample' : a trajectory of length T will have :math:`T/\tau` counts at time indexes
.. math::
(0 \rightarrow \tau), (\tau \rightarrow 2 \tau), ..., ((T/\tau-1) \tau \rightarrow T)
Currently only 'sliding' is supported.
connectivity : str, optional, default='reversible_pathways'
One of 'reversible_pathways', 'summed_count_matrix' or None.
Defines what should be considered a connected set in the joint (product)
space of conformations and thermodynamic ensembles.
* 'reversible_pathways' : requires that every state in the connected set
can be reached by following a pathway of reversible transitions. A
reversible transition between two Markov states (within the same
thermodynamic state k) is a pair of Markov states that belong to the
same strongly connected component of the count matrix (from
thermodynamic state k). A pathway of reversible transitions is a list of
reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)),
(i_(N-1), i_N)]. The thermodynamic state where the reversible
transitions happen, is ignored in constructing the reversible pathways.
This is equivalent to assuming that two ensembles overlap at some Markov
state whenever there exist frames from both ensembles in that Markov
state.
* 'summed_count_matrix' : all thermodynamic states are assumed to overlap.
The connected set is then computed by summing the count matrices over
all thermodynamic states and taking it's largest strongly connected set.
Not recommended!
* None : assume that everything is connected. For debugging.
For more details see :func:`pyemma.thermo.extensions.cset.compute_csets_dTRAM`.
maxiter : int, optional, default=10000
The maximum number of self-consistent iterations before the estimator exits unsuccessfully.
maxerr : float, optional, default=1.0E-15
Convergence criterion based on the maximal free energy change in a self-consistent
iteration step.
save_convergence_info : int, optional, default=0
Every save_convergence_info iteration steps, store the actual increment
and the actual log-likelihood; 0 means no storage.
dt_traj : str, optional, default='1 step'
Description of the physical time corresponding to the lag. May be used by analysis
algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e.
there is no physical time unit. Specify by a number, whitespace and unit. Permitted
units are (* is an arbitrary string):
| 'fs', 'femtosecond*'
| 'ps', 'picosecond*'
| 'ns', 'nanosecond*'
| 'us', 'microsecond*'
| 'ms', 'millisecond*'
| 's', 'second*'
init : str, optional, default=None
Use a specific initialization for self-consistent iteration:
| None: use a hard-coded guess for free energies and Lagrangian multipliers
| 'wham': perform a short WHAM estimate to initialize the free energies
init_maxiter : int, optional, default=10000
The maximum number of self-consistent iterations during the initialization.
init_maxerr : float, optional, default=1.0E-8
Convergence criterion for the initialization.
Example
-------
>>> from pyemma.thermo import DTRAM
>>> import numpy as np
>>> B = np.array([[0, 0],[0.5, 1.0]])
>>> dtram = DTRAM(B, 1)
>>> ttrajs = [np.array([0,0,0,0,0,0,0,0,0,0]),np.array([1,1,1,1,1,1,1,1,1,1])]
>>> dtrajs = [np.array([0,0,0,0,1,1,1,0,0,0]),np.array([0,1,0,1,0,1,1,0,0,1])]
>>> dtram = dtram.estimate((ttrajs, dtrajs))
>>> dtram.log_likelihood() # doctest: +ELLIPSIS
-9.805...
>>> dtram.count_matrices # doctest: +SKIP
array([[[5, 1],
[1, 2]],
[[1, 4],
[3, 1]]], dtype=int32)
>>> dtram.stationary_distribution # doctest: +ELLIPSIS
array([ 0.38..., 0.61...])
>>> dtram.meval('stationary_distribution') # doctest: +ELLIPSIS
[array([ 0.38..., 0.61...]), array([ 0.50..., 0.49...])]
References
----------
.. [1] Wu, H. et al 2014
Statistically optimal analysis of state-discretized trajectory data from multiple thermodynamic states
J. Chem. Phys. 141, 214106
"""
# set all parameters
self.bias_energies_full = _types.ensure_ndarray(bias_energies_full, ndim=2, kind='numeric')
self.lag = lag
assert count_mode == 'sliding', 'Currently the only implemented count_mode is \'sliding\''
self.count_mode = count_mode
assert connectivity in [ None, 'reversible_pathways', 'summed_count_matrix' ], \
'Currently the only implemented connectivity checks are \'reversible_pathways\', \'summed_count_matrix\' and None'
self.connectivity = connectivity
self.dt_traj = dt_traj
self.maxiter = maxiter
self.maxerr = maxerr
self.save_convergence_info = save_convergence_info
assert init in (None, 'wham'), 'Currently only None and \'wham\' are supported'
self.init = init
self.init_maxiter = init_maxiter
self.init_maxerr = init_maxerr
# set derived quantities
self.nthermo, self.nstates_full = bias_energies_full.shape
# set iteration variables
self.therm_energies = None
self.conf_energies = None
self.log_lagrangian_mult = None
def estimate(self, trajs):
"""
Parameters
----------
X : tuple of (ttrajs, dtrajs)
Simulation trajectories. ttrajs contain the indices of the thermodynamic state and
dtrajs contains the indices of the configurational states.
ttrajs : list of numpy.ndarray(X_i, dtype=int)
Every elements is a trajectory (time series). ttrajs[i][t] is the index of the
thermodynamic state visited in trajectory i at time step t.
dtrajs : list of numpy.ndarray(X_i, dtype=int)
dtrajs[i][t] is the index of the configurational state (Markov state) visited in
trajectory i at time step t.
"""
return super(DTRAM, self).estimate(trajs)
def _estimate(self, trajs):
# check input
assert isinstance(trajs, (tuple, list))
assert len(trajs) == 2
ttrajs = trajs[0]
dtrajs = trajs[1]
# validate input
for ttraj, dtraj in zip(ttrajs, dtrajs):
_types.assert_array(ttraj, ndim=1, kind='numeric')
_types.assert_array(dtraj, ndim=1, kind='numeric')
assert _np.shape(ttraj)[0] == _np.shape(dtraj)[0]
# harvest transition counts
self.count_matrices_full = _util.count_matrices(
ttrajs, dtrajs, self.lag,
sliding=self.count_mode, sparse_return=False, nstates=self.nstates_full)
# harvest state counts (for WHAM)
self.state_counts_full = _util.state_counts(
ttrajs, dtrajs, nthermo=self.nthermo, nstates=self.nstates_full)
# restrict to connected set
C_sum = self.count_matrices_full.sum(axis=0)
# TODO: use improved cset
_, cset = _cset.compute_csets_dTRAM(self.connectivity, self.count_matrices_full)
self.active_set = cset
# correct counts
self.count_matrices = self.count_matrices_full[:, cset[:, _np.newaxis], cset]
self.count_matrices = _np.require(
self.count_matrices, dtype=_np.intc ,requirements=['C', 'A'])
# correct bias matrix
self.bias_energies = self.bias_energies_full[:, cset]
self.bias_energies = _np.require(
self.bias_energies, dtype=_np.float64 ,requirements=['C', 'A'])
# correct state counts
self.state_counts = self.state_counts_full[:, cset]
self.state_counts = _np.require(self.state_counts, dtype=_np.intc ,requirements=['C', 'A'])
# run initialisation
pg = _ProgressReporter()
if self.init is not None and self.init == 'wham':
stage = 'WHAM init.'
with pg.context(stage=stage):
self.therm_energies, self.conf_energies, _increments, _loglikelihoods = \
_wham.estimate(
self.state_counts, self.bias_energies,
maxiter=self.init_maxiter, maxerr=self.init_maxerr, save_convergence_info=0,
therm_energies=self.therm_energies, conf_energies=self.conf_energies,
callback=_ConvergenceProgressIndicatorCallBack(
pg, stage, self.init_maxiter, self.init_maxerr))
# run estimator
stage = 'DTRAM'
with pg.context(stage=stage):
self.therm_energies, self.conf_energies, self.log_lagrangian_mult, \
self.increments, self.loglikelihoods = _dtram.estimate(
self.count_matrices, self.bias_energies,
maxiter=self.maxiter, maxerr=self.maxerr,
log_lagrangian_mult=self.log_lagrangian_mult,
conf_energies=self.conf_energies,
save_convergence_info=self.save_convergence_info,
callback=_ConvergenceProgressIndicatorCallBack(
pg, stage, self.maxiter, self.maxerr))
# compute models
fmsms = [_dtram.estimate_transition_matrix(
self.log_lagrangian_mult, self.bias_energies, self.conf_energies,
self.count_matrices, _np.zeros(
shape=self.conf_energies.shape, dtype=_np.float64), K) for K in range(self.nthermo)]
active_sets = [_largest_connected_set(msm, directed=False) for msm in fmsms]
fmsms = [_np.ascontiguousarray(
(msm[lcc, :])[:, lcc]) for msm, lcc in zip(fmsms, active_sets)]
models = []
for i, (msm, acs) in enumerate(zip(fmsms, active_sets)):
models.append(_ThermoMSM(
msm, self.active_set[acs], self.nstates_full,
pi=_np.exp(self.therm_energies[i] - self.bias_energies[i, :] - self.conf_energies),
dt_model=self.timestep_traj.get_scaled(self.lag)))
# set model parameters to self
self.set_model_params(
models=models, f_therm=self.therm_energies, f=self.conf_energies)
# done
return self
def log_likelihood(self):
return _dtram.get_loglikelihood(
self.count_matrices,
_dtram.estimate_transition_matrices(
self.log_lagrangian_mult,
self.bias_energies,
self.conf_energies,
self.count_matrices,
_np.zeros(shape=self.conf_energies.shape, dtype=_np.float64)))
|
fabian-paul/PyEMMA
|
pyemma/thermo/estimators/DTRAM_estimator.py
|
Python
|
lgpl-3.0
| 14,367
|
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
# File: pyText2Pdf.py
#
# Derived from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189858
__docformat__ = 'plaintext'
__date__ = '04/10/2013'
'''
pyText2Pdf - Python script to convert plain text files into Adobe
Acrobat PDF files.
Version 1.2
Author: Anand B Pillai <abpillai at lycos dot com>
Keywords: python, tools, converter, pdf, text2pdf, adobe, acrobat,
processing.
Copyright (C) 2003-2004 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Emacs; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.'''
import sys, os
import string
import time
import optparse
import re
LF_EXTRA=0
LINE_END='\015'
# form feed character (^L)
FF=chr(12)
ENCODING_STR = """\
/Encoding <<
/Differences [ 0 /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /space /exclam
/quotedbl /numbersign /dollar /percent /ampersand
/quoteright /parenleft /parenright /asterisk /plus /comma
/hyphen /period /slash /zero /one /two /three /four /five
/six /seven /eight /nine /colon /semicolon /less /equal
/greater /question /at /A /B /C /D /E /F /G /H /I /J /K /L
/M /N /O /P /Q /R /S /T /U /V /W /X /Y /Z /bracketleft
/backslash /bracketright /asciicircum /underscore
/quoteleft /a /b /c /d /e /f /g /h /i /j /k /l /m /n /o /p
/q /r /s /t /u /v /w /x /y /z /braceleft /bar /braceright
/asciitilde /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/dotlessi /grave /acute /circumflex /tilde /macron /breve
/dotaccent /dieresis /.notdef /ring /cedilla /.notdef
/hungarumlaut /ogonek /caron /space /exclamdown /cent
/sterling /currency /yen /brokenbar /section /dieresis
/copyright /ordfeminine /guillemotleft /logicalnot /hyphen
/registered /macron /degree /plusminus /twosuperior
/threesuperior /acute /mu /paragraph /periodcentered
/cedilla /onesuperior /ordmasculine /guillemotright
/onequarter /onehalf /threequarters /questiondown /Agrave
/Aacute /Acircumflex /Atilde /Adieresis /Aring /AE
/Ccedilla /Egrave /Eacute /Ecircumflex /Edieresis /Igrave
/Iacute /Icircumflex /Idieresis /Eth /Ntilde /Ograve
/Oacute /Ocircumflex /Otilde /Odieresis /multiply /Oslash
/Ugrave /Uacute /Ucircumflex /Udieresis /Yacute /Thorn
/germandbls /agrave /aacute /acircumflex /atilde /adieresis
/aring /ae /ccedilla /egrave /eacute /ecircumflex
/edieresis /igrave /iacute /icircumflex /idieresis /eth
/ntilde /ograve /oacute /ocircumflex /otilde /odieresis
/divide /oslash /ugrave /uacute /ucircumflex /udieresis
/yacute /thorn /ydieresis ]
>>
"""
INTRO="""\
%prog [options] filename
PyText2Pdf makes a 7-bit clean PDF file from any input file.
It reads from a named file, and writes the PDF file to a file specified by
the user, otherwise to a file with '.pdf' appended to the input file.
Author: Anand B Pillai."""
class PyText2Pdf(object):
""" Text2pdf converter in pure Python """
def __init__(self, **kwargs):
# version number
self._version="1.3"
# iso encoding flag
self._IsoEnc=False
# formfeeds flag
self._doFFs=False
self._progname="PyText2Pdf"
self._appname = " ".join((self._progname,str(self._version)))
# default font
self._font="/Courier"
# default font size
self._ptSize=10
# default vert space
self._vertSpace=12
self._lines=0
# number of characters in a row
self._cols=80
self._columns=1
# page ht
self._pageHt=792
# page wd
self._pageWd=612
# input file
self._ifile=""
if 'ifilename' in kwargs:
self._ifile = kwargs['ifilename']
# output file
self._ofile=""
if 'ofile' in kwargs:
self._ofile = kwargs['ofile']
# default tab width
self._tab=4
# input file descriptor
self._ifs=None
# output file descriptor
self._ofs=None
self.buffers = False
if 'buffers' in kwargs and kwargs['buffers']:
self._ifs=kwargs['ifile']
self._ofs=kwargs['ofile']
self.buffers = True
# landscape flag
self._landscape=False
# Subject
self._subject = ''
# Author
self._author = ''
# Keywords
self._keywords = []
# Custom regexp for page breaks
self._pagebreakre = None
# marker objects
self._curobj = 5
self._pageObs = [0]
self._locations = [0,0,0,0,0,0]
self._pageNo=0
# file position marker
self._fpos=0
def parse_args(self):
""" Callback function called by argument parser.
Helps to remove duplicate code """
if len(sys.argv)<2:
sys.argv.append('-h')
parser = optparse.OptionParser(usage=INTRO)
parser.add_option('-o','--output',dest='outfile',help='Direct output to file OUTFILE',metavar='OUTFILE')
parser.add_option('-f','--font',dest='font',help='Use Postscript font FONT (must be in standard 14, default: Courier)',
default='Courier')
parser.add_option('-I','--isolatin',dest='isolatin',help='Use ISO latin-1 encoding',default=False,action='store_true')
parser.add_option('-s','--size',dest='fontsize',help='Use font at PTSIZE points (default=>10)',metavar='PTSIZE',default=10)
parser.add_option('-v','--linespace',dest='linespace',help='Use line spacing LINESPACE (deault 12)',metavar='LINESPACE',default=12)
parser.add_option('-l','--lines',dest='lines',help='Lines per page (default 60, determined automatically if unspecified)',default=60, metavar=None)
parser.add_option('-c','--chars',dest='chars',help='Maximum characters per line (default 80)',default=80,metavar=None)
parser.add_option('-t','--tab',dest='tabspace',help='Spaces per tab character (default 4)',default=4,metavar=None)
parser.add_option('-F','--ignoreff',dest='formfeed',help='Ignore formfeed character ^L (i.e, accept formfeed characters as pagebreaks)',default=False,action='store_true')
parser.add_option('-P','--papersize',dest='papersize',help='Set paper size (default is letter, accepted values are "A4" or "A3")')
parser.add_option('-W','--width',dest='width',help='Independent paper width in points',metavar=None,default=612)
parser.add_option('-H','--height',dest='height',help='Independent paper height in points',metavar=None,default=792)
parser.add_option('-2','--twocolumns',dest='twocolumns',help='Format as two columns',metavar=None,default=False,action='store_true')
parser.add_option('-L','--landscape',dest='landscape',help='Format in landscape mode',metavar=None,default=False,action='store_true')
parser.add_option('-R','--regexp',dest='pageregexp',help='Regular expression string to determine page breaks (if supplied, this will be used to split text into pages, instead of using line count)',metavar=None)
parser.add_option('-S','--subject',dest='subject',help='Optional subject for the document',metavar=None)
parser.add_option('-A','--author',dest='author',help='Optional author for the document',metavar=None)
parser.add_option('-K','--keywords',dest='keywords',help='Optional list of keywords for the document (separated by commas)',metavar=None)
optlist, args = parser.parse_args()
# print optlist.__dict__, args
if len(args)==0:
sys.exit('Error: input file argument missing')
elif len(args)>1:
sys.exit('Error: Too many arguments')
self._ifile = args[0]
d = optlist.__dict__
if d.get('isolatin'): self._IsoEnc=True
if d.get('formfeed'): self._doFFs = True
if d.get('twocolumns'): self._columns = 2
if d.get('landscape'): self._landscape = True
self._font = '/' + d.get('font')
psize = d.get('papersize')
if psize=='A4':
self._pageWd=595
self._pageHt=842
elif psize=='A3':
self._pageWd=842
self._pageHt=1190
fsize = int(d.get('fontsize'))
if fsize < 1: fsize = 1
self._ptSize = fsize
lspace = int(d.get('linespace'))
if lspace<1: lspace = 1
self._vertSpace = lspace
lines = int(d.get('lines'))
if lines<1: lines = 1
self._lines = int(lines)
chars = int(d.get('chars'))
if chars<4: chars = 4
self._cols = chars
tab = int(d.get('tabspace'))
if tab<1: tab = 1
self._tab = tab
w = int(d.get('width'))
if w<72: w=72
self._pageWd = w
h = int(d.get('height'))
if h<72: h=72
self._pageHt = h
# Very optional args
author = d.get('author')
if author: self._author = author
subject = d.get('subject')
if subject: self._subject = subject
keywords = d.get('keywords')
if keywords:
self._keywords = keywords.split(',')
pagebreak = d.get('pageregexp')
if pagebreak:
self._pagebreakre = re.compile(pagebreak, re.UNICODE|re.IGNORECASE)
outfile = d.get('outfile')
if outfile: self._ofile = outfile
if self._landscape:
print 'Landscape option on...'
if self._columns==2:
print 'Printing in two columns...'
if self._doFFs:
print 'Ignoring form feed character...'
if self._IsoEnc:
print 'Using ISO Latin Encoding...'
print 'Using font',self._font[1:],'size =', self._ptSize
def writestr(self, str):
""" Write string to output file descriptor.
All output operations go through this function.
We keep the current file position also here"""
# update current file position
self._fpos += len(str)
for x in range(0, len(str)):
if str[x] == '\n':
self._fpos += LF_EXTRA
try:
self._ofs.write(str)
except IOError, e:
print e
return -1
return 0
def convert(self, buff=False):
""" Perform the actual conversion """
if self._landscape:
# swap page width & height
tmp = self._pageHt
self._pageHt = self._pageWd
self._pageWd = tmp
if self._lines==0:
self._lines = (self._pageHt - 72)/self._vertSpace
if self._lines < 1:
self._lines=1
if not self.buffers:
try:
self._ifs=open(self._ifile)
except IOError, (strerror, errno):
print 'Error: Could not open file to read --->', self._ifile
sys.exit(3)
if self._ofile=="":
self._ofile = os.path.splitext(self._ifile)[0] + '.pdf'
try:
self._ofs = open(self._ofile, 'wb')
except IOError, (strerror, errno):
print 'Error: Could not open file to write --->', self._ofile
sys.exit(3)
#print 'Input file=>',self._ifile
#print 'Writing pdf file',self._ofile, '...'
self.writeheader()
self.writepages()
self.writerest()
if not self.buffers:
#print 'Wrote file', self._ofile
self._ifs.close()
self._ofs.close()
return 0
def writeheader(self):
"""Write the PDF header"""
ws = self.writestr
title = self._ifile
t=time.localtime()
timestr=str(time.strftime("D:%Y%m%d%H%M%S", t))
ws("%PDF-1.4\n")
self._locations[1] = self._fpos
ws("1 0 obj\n")
ws("<<\n")
buf = "".join(("/Creator (", self._appname, " By Anand B Pillai )\n"))
ws(buf)
buf = "".join(("/CreationDate (", timestr, ")\n"))
ws(buf)
buf = "".join(("/Producer (", self._appname, "(\\251 Anand B Pillai))\n"))
ws(buf)
if self._subject:
title = self._subject
buf = "".join(("/Subject (",self._subject,")\n"))
ws(buf)
if self._author:
buf = "".join(("/Author (",self._author,")\n"))
ws(buf)
if self._keywords:
buf = "".join(("/Keywords (",' '.join(self._keywords),")\n"))
ws(buf)
if title:
buf = "".join(("/Title (", title, ")\n"))
ws(buf)
ws(">>\n")
ws("endobj\n")
self._locations[2] = self._fpos
ws("2 0 obj\n")
ws("<<\n")
ws("/Type /Catalog\n")
ws("/Pages 3 0 R\n")
ws(">>\n")
ws("endobj\n")
self._locations[4] = self._fpos
ws("4 0 obj\n")
ws("<<\n")
buf = "".join(("/BaseFont ", str(self._font), " /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font >>\n"))
ws(buf)
if self._IsoEnc:
ws(ENCODING_STR)
ws(">>\n")
ws("endobj\n")
self._locations[5] = self._fpos
ws("5 0 obj\n")
ws("<<\n")
ws(" /Font << /F1 4 0 R >>\n")
ws(" /ProcSet [ /PDF /Text ]\n")
ws(">>\n")
ws("endobj\n")
def startpage(self):
""" Start a page of data """
ws = self.writestr
self._pageNo += 1
self._curobj += 1
self._locations.append(self._fpos)
self._locations[self._curobj]=self._fpos
self._pageObs.append(self._curobj)
self._pageObs[self._pageNo] = self._curobj
buf = "".join((str(self._curobj), " 0 obj\n"))
ws(buf)
ws("<<\n")
ws("/Type /Page\n")
ws("/Parent 3 0 R\n")
ws("/Resources 5 0 R\n")
self._curobj += 1
buf = "".join(("/Contents ", str(self._curobj), " 0 R\n"))
ws(buf)
ws(">>\n")
ws("endobj\n")
self._locations.append(self._fpos)
self._locations[self._curobj] = self._fpos
buf = "".join((str(self._curobj), " 0 obj\n"))
ws(buf)
ws("<<\n")
buf = "".join(("/Length ", str(self._curobj + 1), " 0 R\n"))
ws(buf)
ws(">>\n")
ws("stream\n")
strmPos = self._fpos
ws("BT\n");
buf = "".join(("/F1 ", str(self._ptSize), " Tf\n"))
ws(buf)
buf = "".join(("1 0 0 1 50 ", str(self._pageHt - 40), " Tm\n"))
ws(buf)
buf = "".join((str(self._vertSpace), " TL\n"))
ws(buf)
return strmPos
def endpage(self, streamStart):
"""End a page of data """
ws = self.writestr
ws("ET\n")
streamEnd = self._fpos
ws("endstream\n")
ws("endobj\n")
self._curobj += 1
self._locations.append(self._fpos)
self._locations[self._curobj] = self._fpos
buf = "".join((str(self._curobj), " 0 obj\n"))
ws(buf)
buf = "".join((str(streamEnd - streamStart), '\n'))
ws(buf)
ws('endobj\n')
def writepages(self):
"""Write pages as PDF"""
ws = self.writestr
beginstream=0
lineNo, charNo=0,0
ch, column=0,0
padding,i=0,0
atEOF=0
linebuf = ''
while not atEOF:
beginstream = self.startpage()
column=1
while column <= self._columns:
column += 1
atFF=0
atBOP=0
lineNo=0
# Special flag for regexp page break
pagebreak = False
while lineNo < self._lines and not atFF and not atEOF and not pagebreak:
linebuf = ''
lineNo += 1
ws("(")
charNo=0
while charNo < self._cols:
charNo += 1
ch = self._ifs.read(1)
cond = ((ch != '\n') and not(ch==FF and self._doFFs) and (ch != ''))
if not cond:
# See if this dude matches the pagebreak regexp
if self._pagebreakre and self._pagebreakre.search(linebuf.strip()):
pagebreak = True
linebuf = ''
break
else:
linebuf = linebuf + ch
if ord(ch) >= 32 and ord(ch) <= 127:
if ch == '(' or ch == ')' or ch == '\\':
ws("\\")
ws(ch)
else:
if ord(ch) == 9:
padding =self._tab - ((charNo - 1) % self._tab)
for i in range(padding):
ws(" ")
charNo += (padding -1)
else:
if ch != FF:
# write \xxx form for dodgy character
buf = "".join(('\\', ch))
ws(buf)
else:
# dont print anything for a FF
charNo -= 1
ws(")'\n")
if ch == FF:
atFF=1
if lineNo == self._lines:
atBOP=1
if atBOP:
pos=0
ch = self._ifs.read(1)
pos= self._ifs.tell()
if ch == FF:
ch = self._ifs.read(1)
pos=self._ifs.tell()
# python's EOF signature
if ch == '':
atEOF=1
else:
# push position back by one char
self._ifs.seek(pos-1)
elif atFF:
ch = self._ifs.read(1)
pos=self._ifs.tell()
if ch == '':
atEOF=1
else:
self._ifs.seek(pos-1)
if column < self._columns:
buf = "".join(("1 0 0 1 ",
str((self._pageWd/2 + 25)),
" ",
str(self._pageHt - 40),
" Tm\n"))
ws(buf)
self.endpage(beginstream)
def writerest(self):
"""Finish the file"""
ws = self.writestr
self._locations[3] = self._fpos
ws("3 0 obj\n")
ws("<<\n")
ws("/Type /Pages\n")
buf = "".join(("/Count ", str(self._pageNo), "\n"))
ws(buf)
buf = "".join(("/MediaBox [ 0 0 ", str(self._pageWd), " ", str(self._pageHt), " ]\n"))
ws(buf)
ws("/Kids [ ")
for i in range(1, self._pageNo+1):
buf = "".join((str(self._pageObs[i]), " 0 R "))
ws(buf)
ws("]\n")
ws(">>\n")
ws("endobj\n")
xref = self._fpos
ws("xref\n")
buf = "".join(("0 ", str((self._curobj) + 1), "\n"))
ws(buf)
buf = "".join(("0000000000 65535 f ", str(LINE_END)))
ws(buf)
for i in range(1, self._curobj + 1):
val = self._locations[i]
buf = "".join((string.zfill(str(val), 10), " 00000 n ", str(LINE_END)))
ws(buf)
ws("trailer\n")
ws("<<\n")
buf = "".join(("/Size ", str(self._curobj + 1), "\n"))
ws(buf)
ws("/Root 2 0 R\n")
ws("/Info 1 0 R\n")
ws(">>\n")
ws("startxref\n")
buf = "".join((str(xref), "\n"))
ws(buf)
ws("%%EOF\n")
'''def main():
# reads from argv the input and output files
# usual for command line
pdfclass=PyText2Pdf()
pdfclass.parse_args()
pdfclass.convert()
# uses input and output file descriptors
# usual for importing from other Python modules
from pyText2Pdf import PyText2Pdf
from StringIO import StringIO
#input_fp = StringIO() # or open('<file>', 'rb')
input_fp = open('test.txt', 'rb')
output_fp = StringIO() # or open('<file>', 'wb')
t2p = PyText2Pdf(ifile=input_fp, ofile=output_fp, ifilename='test.txt', buffers=False)
t2p.convert()
f = open('somefile.pdf', 'wb')
f.write(output_fp)
f.close()
t= PyText2Pdf( ifilename='test.txt', buffers=False)
t.convert()
#if __name__ == "__main__":
# main()'''
|
costastf/blind-Pyrsync
|
pytxt2pdf/pyText2Pdf.py
|
Python
|
lgpl-3.0
| 22,089
|
#!/usr/bin/python
import itertools
class ListCreator():
current_list = None
def fcstDiracGammasInAlldimsAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<4>'])))
def fcstDiracGammasInAlldimsOneG5AllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<4>', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsOneG6AllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>',
'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<4>', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG7AllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>',
'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<4>', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsTwoG5AllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma5()', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma5()', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma5()', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsTwoG6AllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gammaR()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gammaR()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gammaR()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsTwoG7AllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gammaL()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gammaL()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gammaL()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG5OneG6AllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma5()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma5()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma5()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG5OneG7AllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma5()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma5()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma5()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG6OneG7AllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gammaR()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gammaR()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gammaR()', 'dirac_gammaL()'])))
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def fcstDiracGammasInAlldimsOneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>']))
+ list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>']))
+ list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<4>', 'dirac_gamma<5>'])))
def fcstDiracGammasInAlldimsOneG5OneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsOneG6OneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG7OneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsTwoG5OneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma5()', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma5()', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsTwoG6OneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gammaR()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaR()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsTwoG7OneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gammaL()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaL()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG5OneG6OneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma5()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma5()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG5OneG7OneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma5()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma5()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG6OneG7OneIndexFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gammaR()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaR()', 'dirac_gammaL()'])))
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def fcstDiracGammasInAlldimsTwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>'])))
def fcstDiracGammasInAlldimsOneG5TwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsOneG6TwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG7TwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsTwoG5TwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma5()', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma5()', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsTwoG6TwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaR()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaR()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsTwoG7TwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaL()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaL()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG5OneG6TwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma5()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG5OneG7TwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma5()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG6OneG7TwoIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gammaR()', 'dirac_gammaL()'])))
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def fcstDiracGammasInAlldimsThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>'])))
def fcstDiracGammasInAlldimsOneG5ThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma5()' ])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsOneG6ThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaR()' ])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG7ThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaL()' ])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsTwoG5ThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma5()', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsTwoG6ThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaR()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsTwoG7ThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaL()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG5OneG6ThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma5()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG5OneG7ThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma5()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG6OneG7ThreeIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gammaR()', 'dirac_gammaL()'])))
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def fcstDiracGammasInAlldimsFourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>' ])))
def fcstDiracGammasInAlldimsOneG5FourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsOneG6FourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG7FourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsTwoG5FourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gamma5()', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsTwoG6FourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gammaR()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsTwoG7FourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gammaL()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG5OneG6FourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gamma5()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG5OneG7FourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gamma5()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG6OneG7FourIndicesFree(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<5>', 'dirac_gammaR()', 'dirac_gammaL()'])))
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def fcstDiracGammasInAlldimsOneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)']))
+ list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_slash(p1 comma dims)']))
+ list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_gamma<4>', 'dirac_gamma<4>', 'dirac_slash(p1 comma dims)'])))
def fcstDiracGammasInAlldimsOneG5OneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsOneG6OneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG7OneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_gamma<3>', 'dirac_gamma<3>', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsTwoG5OneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsTwoG6OneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsTwoG7OneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG5OneG6OneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG5OneG7OneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG6OneG7OneSlashAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaL()'])))
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def fcstDiracGammasInAlldimsTwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)'])))
def fcstDiracGammasInAlldimsOneG5TwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsOneG6TwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG7TwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_gamma<2>', 'dirac_gamma<2>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsTwoG5TwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gamma5()', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsTwoG6TwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gammaR()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsTwoG7TwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gammaL()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG5OneG6TwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gamma5()', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG5OneG7TwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gamma5()', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsOneG6OneG7TwoSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_gammaR()', 'dirac_gammaL()'])))
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def fcstDiracGammasInAlldimsThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)'])))
def fcstDiracGammasInAlldimsOneG5ThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()'])))
def fcstDiracGammasInAlldimsOneG6ThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()'])))
def fcstDiracGammasInAlldimsOneG7ThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()'])) +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()'])))
def fcstDiracGammasInAlldimsTwoG5ThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)', 'dirac_gamma5()', 'dirac_gamma5()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gamma5()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gamma5()']))[0:600])
def fcstDiracGammasInAlldimsTwoG6ThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)', 'dirac_gammaR()', 'dirac_gammaR()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaR()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaR()']))[0:600])
def fcstDiracGammasInAlldimsTwoG7ThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)', 'dirac_gammaL()', 'dirac_gammaL()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()', 'dirac_gammaL()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaL()', 'dirac_gammaL()']))[0:600])
def fcstDiracGammasInAlldimsOneG5OneG6ThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)', 'dirac_gamma5()', 'dirac_gammaR()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaR()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaR()']))[0:600])
def fcstDiracGammasInAlldimsOneG5OneG7ThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)', 'dirac_gamma5()', 'dirac_gammaL()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaL()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gamma5()', 'dirac_gammaL()']))[0:600])
def fcstDiracGammasInAlldimsOneG6OneG7ThreeSlashesAllIndicesContracted(self):
self.current_list = set(
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p3 comma dims)', 'dirac_gammaR()', 'dirac_gammaL()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p2 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaL()']))[0:600] +
list(itertools.permutations(['dirac_gamma<1>', 'dirac_gamma<1>', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_slash(p1 comma dims)', 'dirac_gammaR()', 'dirac_gammaL()']))[0:600])
|
FeynCalc/pyfeyncalctester
|
src/listcreator.py
|
Python
|
lgpl-3.0
| 44,022
|
###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Documents Access
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
import os
import time
import logging
import unittest
import threading
import functools
from odoo.tests import tagged
from odoo.tools.profiler import profile
from odoo.tools import config, convert_file
from odoo.modules.module import get_resource_path
from odoo.modules.module import get_module_resource
from odoo.addons.muk_utils.tests.common import multi_users
from odoo.addons.muk_utils.tests.common import track_function
from odoo.addons.muk_dms.tests.test_benchmark import BenchmarkTestCase
_path = os.path.dirname(os.path.dirname(__file__))
_logger = logging.getLogger(__name__)
@tagged('-standard', 'benchmark')
class BenchmarkTestCase(BenchmarkTestCase):
@classmethod
def _setup_benchmark_data(cls):
super(BenchmarkTestCase, cls)._setup_benchmark_data()
cls._load('muk_dms_access', 'tests', 'data', 'muk_dms.directory.csv')
cls._load('muk_dms_access', 'tests', 'data', 'muk_security.access_groups.csv')
|
muk-it/muk_dms
|
muk_dms_access/tests/test_benchmark.py
|
Python
|
lgpl-3.0
| 1,933
|
"""
Usefull method and classes not belonging anywhere and depending on opcua library
"""
from dateutil import parser
from datetime import datetime
from enum import Enum, IntEnum
import uuid
from opcua import ua
from opcua.ua.uaerrors import UaError
def val_to_string(val):
"""
convert a python object or python-opcua object to a string
which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
if isinstance(val, (list, tuple)):
res = []
for v in val:
res.append(val_to_string(v))
return "[" + ", ".join(res) + "]"
if hasattr(val, "to_string"):
val = val.to_string()
elif isinstance(val, ua.StatusCode):
val = val.name
elif isinstance(val, (Enum, IntEnum)):
val = val.name
elif isinstance(val, ua.DataValue):
val = variant_to_string(val.Value)
elif isinstance(val, ua.XmlElement):
val = val.Value
elif isinstance(val, str):
pass
elif isinstance(val, bytes):
val = str(val)
elif isinstance(val, datetime):
val = val.isoformat()
elif isinstance(val, (int, float)):
val = str(val)
else:
# FIXME: Some types are probably missing!
val = str(val)
return val
def variant_to_string(var):
"""
convert a variant to a string which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
return val_to_string(var.Value)
def string_to_val(string, vtype):
"""
Convert back a string to a python or python-opcua object
Note: no error checking is done here, supplying null strings could raise exceptions (datetime and guid)
"""
string = string.strip()
if string.startswith("["):
string = string[1:-1]
var = []
for s in string.split(","):
s = s.strip()
val = string_to_val(s, vtype)
var.append(val)
return var
if vtype == ua.VariantType.Null:
val = None
elif vtype == ua.VariantType.Boolean:
if string in ("True", "true", "on", "On", "1"):
val = True
else:
val = False
elif vtype in (ua.VariantType.SByte, ua.VariantType.Int16, ua.VariantType.Int32, ua.VariantType.Int64):
val = int(string)
elif vtype in (ua.VariantType.Byte, ua.VariantType.UInt16, ua.VariantType.UInt32, ua.VariantType.UInt64):
val = int(string)
elif vtype in (ua.VariantType.Float, ua.VariantType.Double):
val = float(string)
elif vtype == ua.VariantType.XmlElement:
val = ua.XmlElement(string)
elif vtype == ua.VariantType.String:
val = string
elif vtype == ua.VariantType.ByteString:
val = string.encode("utf-8")
elif vtype in (ua.VariantType.NodeId, ua.VariantType.ExpandedNodeId):
val = ua.NodeId.from_string(string)
elif vtype == ua.VariantType.QualifiedName:
val = ua.QualifiedName.from_string(string)
elif vtype == ua.VariantType.DateTime:
val = parser.parse(string)
elif vtype == ua.VariantType.LocalizedText:
val = ua.LocalizedText(string)
elif vtype == ua.VariantType.StatusCode:
val = ua.StatusCode(string)
elif vtype == ua.VariantType.Guid:
val = uuid.UUID(string)
else:
# FIXME: Some types are probably missing!
raise NotImplementedError
return val
def string_to_variant(string, vtype):
"""
convert back a string to an ua.Variant
"""
return ua.Variant(string_to_val(string, vtype), vtype)
def get_node_children(node, nodes=None):
"""
Get recursively all children of a node
"""
if nodes is None:
nodes = [node]
for child in node.get_children():
nodes.append(child)
get_node_children(child, nodes)
return nodes
def get_node_subtypes(node, nodes=None):
if nodes is None:
nodes = [node]
for child in node.get_children(refs=ua.ObjectIds.HasSubtype):
nodes.append(child)
get_node_subtypes(child, nodes)
return nodes
def get_node_supertypes(node, includeitself=False, skipbase=True):
"""
return get all subtype parents of node recursive
:param node: can be a ua.Node or ua.NodeId
:param includeitself: include also node to the list
:param skipbase don't include the toplevel one
:returns list of ua.Node, top parent first
"""
parents = []
if includeitself:
parents.append(node)
parents.extend(_get_node_supertypes(node))
if skipbase and len(parents) > 1:
parents = parents[:-1]
return parents
def _get_node_supertypes(node):
"""
recursive implementation of get_node_derived_from_types
"""
basetypes = []
parent = get_node_supertype(node)
if parent:
basetypes.append(parent)
basetypes.extend(_get_node_supertypes(parent))
return basetypes
def get_node_supertype(node):
"""
return node supertype or None
"""
supertypes = node.get_referenced_nodes(refs=ua.ObjectIds.HasSubtype,
direction=ua.BrowseDirection.Inverse,
includesubtypes=True)
if supertypes:
return supertypes[0]
else:
return None
def is_child_present(node, browsename):
"""
return if a browsename is present a child from the provide node
:param node: node wherein to find the browsename
:param browsename: browsename to search
:returns returne True if the browsename is present else False
"""
child_descs = node.get_children_descriptions()
for child_desc in child_descs:
if child_desc.BrowseName == browsename:
return True
return False
def data_type_to_variant_type(dtype_node):
"""
Given a Node datatype, find out the variant type to encode
data. This is not exactly straightforward...
"""
base = get_base_data_type(dtype_node)
if base.nodeid.Identifier != 29:
return ua.VariantType(base.nodeid.Identifier)
else:
# we have an enumeration, value is a Int32
return ua.VariantType.Int32
def get_base_data_type(datatype):
"""
Looks up the base datatype of the provided datatype Node
The base datatype is either:
A primitive type (ns=0, i<=21) or a complex one (ns=0 i>21 and i<=30) like Enum and Struct.
Args:
datatype: NodeId of a datype of a variable
Returns:
NodeId of datatype base or None in case base datype can not be determined
"""
base = datatype
while base:
if base.nodeid.NamespaceIndex == 0 and isinstance(base.nodeid.Identifier, int) and base.nodeid.Identifier <= 30:
return base
base = get_node_supertype(base)
raise ua.UaError("Datatype must be a subtype of builtin types {0!s}".format(datatype))
def get_nodes_of_namespace(server, namespaces=None):
"""
Get the nodes of one or more namespaces .
Args:
server: opc ua server to use
namespaces: list of string uri or int indexes of the namespace to export
Returns:
List of nodes that are part of the provided namespaces
"""
if namespaces is None:
namespaces = []
ns_available = server.get_namespace_array()
if not namespaces:
namespaces = ns_available[1:]
elif isinstance(namespaces, (str, int)):
namespaces = [namespaces]
# make sure all namespace are indexes (if needed convert strings to indexes)
namespace_indexes = [n if isinstance(n, int) else ns_available.index(n) for n in namespaces]
# filter nodeis based on the provide namespaces and convert the nodeid to a node
nodes = [server.get_node(nodeid) for nodeid in server.iserver.aspace.keys()
if nodeid.NamespaceIndex != 0 and nodeid.NamespaceIndex in namespace_indexes]
return nodes
def get_default_value(uatype):
if isinstance(uatype, ua.VariantType):
return ua.get_default_values(uatype)
elif hasattr(ua.VariantType, uatype):
return ua.get_default_value(getattr(ua.VariantType, uatype))
else:
return getattr(ua, uatype)()
|
bitkeeper/python-opcua
|
opcua/common/ua_utils.py
|
Python
|
lgpl-3.0
| 8,286
|
#!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Interpret various computer languages using installed interpreters.
@file code_interpreter.py
@package pybooster.code_interpreter
@version 2019.07.14
@author Devyn Collier Johnson <DevynCJohnson@Gmail.com>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from subprocess import getoutput # nosec
from sys import stdout
__all__: list = [
# CLISP #
r'execclispfile',
# COFFEESCRIPT #
r'execcoffeescript',
# JAVASCRIPT #
r'execjs',
r'execjsfile',
# LUA #
r'execlua',
r'execluafile',
# PERL #
r'execperl',
r'execperlfile',
r'initperl',
# PHP #
r'execphp',
r'execphpfile',
# RUBY #
r'execruby',
r'execrubyfile',
# SCALA #
r'execscala',
r'execscala',
# SHELL #
r'execsh',
r'execshfile',
r'initsh'
]
# CLISP #
def execclispfile(_filename: str) -> str:
"""Execute a CLisp file given as a str and return the output as a str."""
return getoutput(r'clisp ' + _filename)
# COFFEESCRIPT #
def execcoffeescript(_code: str) -> str:
"""Execute Coffeescript code given as a str and return the output as a str."""
return getoutput('coffeescript --eval \'' + _code.replace('\'', '\\\'') + '\'')
# JAVASCRIPT #
def execjs(_code: str) -> str:
"""Execute JavaScript code given as a str and return the output as a str."""
return getoutput('jsc -e \'' + _code.replace('\'', '\\\'') + '\'')
def execjsfile(_filename: str) -> str:
"""Execute a JavaScript file given as a str and return the output as a str."""
return getoutput(r'jsc -e ' + _filename)
# LUA #
def execlua(_code: str) -> str:
"""Execute Lua code given as a str and return the output as a str."""
return getoutput('lua -e \'' + _code.replace('\'', '\\\'') + '\'')
def execluafile(_filename: str) -> str:
"""Execute a Lua script given as a str and return the output as a str."""
return getoutput(r'lua ' + _filename)
# PERL #
def execperl(_code: str) -> str:
"""Execute Perl code given as a str and return the output as a str."""
return getoutput('perl -e \'' + _code.replace('\'', '\\\'') + '\'')
def execperlfile(_filename: str) -> str:
"""Execute a Perl script given as a str and return the output as a str."""
return getoutput(r'perl ' + _filename)
def initperl() -> None:
"""Run a Perl REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Perl > ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('perl -e \'' + _input + '\'') + '\n')
# PHP #
def execphp(_code: str) -> str:
"""Execute PHP code given as a str and return the output as a str."""
return getoutput('php -r \'' + _code.replace('\'', '\\\'') + '\'')
def execphpfile(_filename: str) -> str:
"""Execute a PHP script given as a str and return the output as a str."""
return getoutput(r'php -f ' + _filename)
# RUBY #
def execruby(_code: str) -> str:
"""Execute Ruby code given as a str and return the output as a str."""
return getoutput('ruby -e \'' + _code.replace('\'', '\\\'') + '\'')
def execrubyfile(_filename: str) -> str:
"""Execute a Ruby script given as a str and return the output as a str."""
return getoutput(r'ruby ' + _filename)
# SCALA #
def execscala(_code: str) -> str:
"""Execute Scala code given as a str and return the output as a str."""
return getoutput('scala -e \'' + _code.replace('\'', '\\\'') + '\'')
def execscalafile(_filename: str) -> str:
"""Execute a Scala file given as a str and return the output as a str."""
return getoutput(r'scala ' + _filename)
# SHELL #
def execsh(_code: str) -> str:
"""Execute Shell code given as a str and return the output as a str."""
return getoutput('sh -c \'' + _code.replace('\'', '\\\'') + '\'')
def execshfile(_filename: str) -> str:
"""Execute a Shell script given as a str and return the output as a str."""
return getoutput(r'sh ' + _filename)
def initsh() -> None:
"""Run a shell REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Shell: $ ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('sh -c \'' + _input + '\'') + '\n')
|
DevynCJohnson/Pybooster
|
pylib/code_interpreter.py
|
Python
|
lgpl-3.0
| 5,388
|
def isPandigital(strN):
Ln = [c for c in strN]
Ln.sort()
if Ln == ['1', '2', '3', '4', '5', '6', '7', '8', '9']:
return True
return False
listPans=[]
listMultiplier=[]
#dont know if these limits are ok
i=9
while i<9999:
n=2
while n<999:
j=1
strProd=''
while j<n:
prod=j*i
strProd += str(prod)
j+=1
if len(strProd)>9:
break
#print i, n, strProd
if isPandigital(strProd):
listPans.append(prod)
listMultiplier.append(i)
print "Pandigital", i, j, strProd
n+=1
i+=1
#output:
#Pandigital 9 6 918273645
#Pandigital 192 4 192384576
#Pandigital 219 4 219438657
#Pandigital 273 4 273546819
#Pandigital 327 4 327654981
#Pandigital 6729 3 672913458
#Pandigital 6792 3 679213584
#Pandigital 6927 3 692713854
#Pandigital 7269 3 726914538
#Pandigital 7293 3 729314586
#Pandigital 7329 3 732914658
#Pandigital 7692 3 769215384
#Pandigital 7923 3 792315846
#Pandigital 7932 3 793215864
#Pandigital 9267 3 926718534
#Pandigital 9273 3 927318546
#Pandigital 9327 3 932718654
|
haphaeu/yoshimi
|
EulerProject/038.py
|
Python
|
lgpl-3.0
| 1,137
|
# -*- encoding: utf-8 -*-
# from django.shortcuts import render, render_to_response, redirect, get_object_or_404, get_list_or_404, Http404
from django.core.cache import cache
from django.shortcuts import *
from django.views.generic import TemplateView, FormView
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.template import RequestContext
from django import template
from models import proyecto
from .forms import *
#from Logica.ConexionBD import adminBD
import funciones
import sys
#~ from administradorConsultas import AdministradorConsultas # Esta la comente JAPeTo
#~ from manejadorArchivos import obtener_autores # Esta la comente JAPeTo
#~ from red import Red # Esta la comente JAPeTo
from Logica import ConsumirServicios, procesamientoScopusXml, procesamientoArxiv
# import igraph
import traceback
import json
import django.utils
from Logica.ConexionBD.adminBD import AdminBD
from principal.parameters import *
from principal.permisos import *
# sys.setdefaultencoding is cancelled by site.py
reload(sys) # to re-enable sys.setdefaultencoding()
sys.setdefaultencoding('utf-8')
# Create your views here.
# @login_required
#ruta = "/home/administrador/ManejoVigtech/ArchivosProyectos/"
sesion_proyecto=None
proyectos_list =None
model_proyecto =None
id_proyecto = None
##nombre_proyecto = None
class home(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
global proyectos_list
global model_proyecto
try:
existe_proyecto = False
proyectos_list = get_list_or_404(proyecto, idUsuario=self.request.user)
for project in proyectos_list:
if project == model_proyecto:
existe_proyecto = True
if not (existe_proyecto):
model_proyecto = None
except:
# print traceback.format_exc()
proyectos_list = None
model_proyecto = None
return {'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos}
class RegistrarUsuario(FormView):
template_name = "registrarUsuario.html"
form_class = FormularioRegistrarUsuario
success_url = reverse_lazy('RegistrarUsuarios')
def form_valid(self, form):
user = form.save()
messages.success(self.request, "Se ha creado exitosamente el usuario")
return redirect('login')
def cambia_mensaje(crfsession,proyecto,usuario,borrar, mensaje,valor):
# print ">>>> AQUI ESTOY"+str(borrar)+" & "+str(mensaje)
try:
cache_key = "%s_%s_%s" % (crfsession,proyecto.replace(" ",""),usuario)
data = cache.get(cache_key)
if data:
data['estado'] = valor
data['mensaje'] += mensaje
if borrar :
data['mensaje'] = mensaje
cache.set(cache_key, data)
else:
cache.set(cache_key, {
'estado': 0,
'mensaje' : mensaje
})
except:
pass
@login_required
def nuevo_proyecto(request):
global id_proyecto
global model_proyecto
global proyectos_list
if request.method == 'POST':
form = FormularioCrearProyecto(request.POST)
fraseB = request.POST.get('fraseB')
fraseA = request.POST.get('fraseA')
autor = request.POST.get('autor')
words = request.POST.get('words')
before = request.POST.get('before')
after = request.POST.get('after')
limArxiv = request.POST.get('limArxiv')
limSco = request.POST.get('limSco')
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"",0)
busqueda = fraseB + "," + words + "," + fraseA + "," + autor + "," + before + "," + after
# print "busca "+busqueda+", by japeto"
if form.is_valid():
nombreDirectorio = form.cleaned_data['nombre']
articulos = {}
modelo_proyecto = form.save(commit=False)
modelo_proyecto.idUsuario = request.user
# print "formulario valido, by japeto"
# print "2"
# proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
# proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
#modelo_proyecto.calificacion=5
modelo_proyecto.fraseBusqueda = busqueda
modelo_proyecto.save()
proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
model_proyecto = get_object_or_404(proyecto, id_proyecto=modelo_proyecto.id_proyecto)
id_proyecto = model_proyecto.id_proyecto
#Creacion del directorio donde se guardaran los documentos respectivos del proyecto creado.
mensajes_pantalla="<p class='text-primary'><span class='fa fa-send fa-fw'></span>Se ha creado el Directorio para el proyecto</p>"
funciones.CrearDirectorioProyecto(modelo_proyecto.id_proyecto, request.user)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,mensajes_pantalla,6)
# print "se crea directorio, by japeto"
if fraseB != "":
try:
"""
Descarga de documentos de Google Arxiv
"""
# print "descarga de documentos, by japeto"
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Descarga de documentos de Arxiv</p>",12)
articulos_arxiv= ConsumirServicios.consumir_arxiv(fraseB, request.user.username, str(modelo_proyecto.id_proyecto), limArxiv)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Descarga de documentos terminada</p>",18)
except:
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA: </b>Descarga de documentos de Arxiv</p>",12)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"STOP",0)
print traceback.format_exc()
try:
"""
Descarga de documentos de Google Scopus
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Descarga de documentos de Scopus</p>",24)
articulos_scopus = ConsumirServicios.consumir_scopus(fraseB, request.user.username, str(modelo_proyecto.id_proyecto), limSco)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Descarga de documentos terminada</p>",30)
except:
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA: </b>Descarga de documentos de Scopus</p>",24)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"STOP",0)
print traceback.format_exc()
try:
"""
Inserción de metadatos Arxiv
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inica la inserción de metadatos Arxiv</p>",36)
xml = open(REPOSITORY_DIR+ str(request.user.username)+ "." + str(modelo_proyecto.id_proyecto) + "/salida.xml")
procesamientoArxiv.insertar_metadatos_bd(xml, str(modelo_proyecto.id_proyecto))
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>La inserción de metadatos Arxiv ha terminado</p>",42)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",36)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>La inserción de metadatos Arxiv no se puede completar</p>",36)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",36)
# print traceback.format_exc()
try:
"""
Conexión con base datos para insertar metadatos de paper de Scopus
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inica la inserción de metadatos Scopus</p>",48)
busqueda = open(REPOSITORY_DIR+ str(request.user.username)+ "." + str(modelo_proyecto.id_proyecto) + "/busqueda0.xml")
procesamientoScopusXml.xml_to_bd(busqueda, modelo_proyecto.id_proyecto, articulos_scopus['titulos'])
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>La inserción de metadatos Scopus ha terminado</p>",54)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",48)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>La inserción de metadatos Scopus no se puede completar</p>",48)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",48)
# print traceback.format_exc()
# try:
# """
# NAIVE BAYES
# """
# #ConsumirServicios.consumir_recuperacion_unidades_academicas(str(request.user.username),str(modelo_proyecto.id_proyecto))
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia el procesado Scopus XML</ṕ>",60)
# procesamientoScopusXml.xml_to_bd(busqueda, modelo_proyecto.id_proyecto, articulos_scopus['titulos'])
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>El procesmiento Scopus XML ha terminado</p>",62)
# except:
# # cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",60)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b> El procesando Scopus XML no se puede completar</p>",60)
# # cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",60)
# # print traceback.format_exc()
try:
"""
generar el XML OUTPUT
"""
admin =AdminBD()
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia convertidor archivo de XML</ṕ>",60)
#papers = admin.getPapers(modelo_proyecto.id_proyecto)
adminBD = AdminBD()
papers =adminBD.getPapers(modelo_proyecto.id_proyecto)
target = open(REPOSITORY_DIR+ str(request.user.username)+ "." + str(modelo_proyecto.id_proyecto) + "/busqueda1.xml", 'w')
target.write(funciones.papersToXML(papers))
target.close()
# print str(funciones.papersToXML(papers))
# funciones.papersToXML(papers).write(REPOSITORY_DIR+ str(request.user.username)+ "." + str(modelo_proyecto.id_proyecto) + "/busqueda1.xml")
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>termina el convertidor archivo de XML</ṕ>",60)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",64)
print traceback.format_exc()
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>Error al convertir archivo de XML</p>",64)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",64)
# print traceback.format_exc()
try:
"""
indexación
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia la indexación</label></p>",64)
ir = ConsumirServicios.IR()
ir.indexar(str(request.user.username),str(modelo_proyecto.id_proyecto))
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Indexacion terminada</p>",68)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",64)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>La indexación no se puede completar</p>",64)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",64)
# print traceback.format_exc()
try:
""""
Analisis
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia el Analisis</p>",66)
data = ConsumirServicios.consumir_analisis(str(request.user.username),str(modelo_proyecto.id_proyecto))
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Analisis terminado</p>",68)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",66)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b> El Analisis no se puede completar</p>",66)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",66)
# print traceback.format_exc()
try:
"""
Analisis de Redes Sociales
"""
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia el Analisis de Redes Sociales</p>",70)
network = ConsumirServicios.consumir_red(str(request.user.username),str(modelo_proyecto.id_proyecto))
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Analisis de Redes Sociales terminado</p>",72)
except:
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",70)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b>El Analisis de Redes Sociales no se puede completar</p>",70)
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",70)
#print traceback.format_exc()
try:
"""
Recuperacion de unidades
"""
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-primary'><span class='fa fa-send fa-fw'></span>Inicia la recuperacion de unidades academicas</p>",10)
# ConsumirServicios.consumir_recuperacion_unidades_academicas(str(request.user.username),str(modelo_proyecto.id_proyecto))
# cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Finaliza la recuperacion de unidades academicas</p>",10)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,True,"",80)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Se ha creado el proyecto</p>",90)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-success'><span class='fa fa-check fa-fw'></span>Su navegador se reiniciara</p>",97)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"EOF",100)
except:
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"<p class='text-danger'><span class='fa fa-times fa-fw'></span><b>PROBLEMA:</b> la recuperacion de unidades academicas no se puede completar: {}</p>".format(traceback.format_exc()),80)
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"EOF",100)
# print traceback.format_exc()
# messages.success(request, "Se ha creado exitosamente el proyecto")
#articulos = funciones.buscadorSimple(fraseB)
#ac = AdministradorConsultas()
#ac.descargar_papers(fraseB)
#lista_scopus = ac.titulos_descargas
#if fraseA != "" or autor != "" or words != "":
# articulos = funciones.buscadorAvanzado(fraseA, words, autor, after, before)
#print articulos
#funciones.moveFiles(modelo_proyecto.id_proyecto, request.user, articulos, lista_scopus)
#funciones.escribir_archivo_documentos(modelo_proyecto.id_proyecto, request.user, articulos, lista_scopus)
# messages.success(request, "Se ha creado exitosamente el proyecto")
#~ return redirect('crear_proyecto')
else:
messages.error(request, "Imposible crear el proyecto")
else:
form = FormularioCrearProyecto()
return render(request, 'GestionProyecto/NuevoProyecto.html', {'form': form,
'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos}, context_instance=RequestContext(request))
#Visualización de proyectos propios de un usuario.
@login_required
def ver_mis_proyectos(request):
global model_proyecto
global proyectos_list
try:
proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
except:
proyectos_list =None
messages.success(request, "Usted no tiene proyectos")
return render(request, 'GestionProyecto/verMisProyectos.html', {'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos}, context_instance=RequestContext(request))
#Visualización de proyectos con disponibilidad pública que no pertenecen al usuario actual.
@login_required
def ver_otros_proyectos(request):
global model_proyecto
global proyecto_list
if (model_proyecto != None and model_proyecto.idUsuario != request.user):
model_proyecto = None
try:
proyectos_list_all = get_list_or_404(proyecto)
idUser = request.user
otros_proyectos = []
for project in proyectos_list_all:
if project.idUsuario != idUser:
otros_proyectos.append(project)
except:
proyectos_list_all =None
otros_proyectos = None
return render(request, 'GestionProyecto/OtrosProyectos.html', {
'proyectos': otros_proyectos, 'proyectos_user':proyectos_list, 'mproyecto': model_proyecto}, context_instance=RequestContext(request))
@login_required
def busqueda_navegacion(request):
global proyectos_list
global model_proyecto
return render(request, 'GestionBusqueda/Busqueda_Navegacion.html', {'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def editar_proyecto(request, id_proyecto):
global proyectos_list
global model_proyecto
model_proyecto = get_object_or_404(proyecto, id_proyecto=id_proyecto)
request.session['proyecto']= str(model_proyecto.id_proyecto)
request.proyecto = model_proyecto
# print "This is my project:",request.session['proyecto']
lista = funciones.crearListaDocumentos(id_proyecto, request.user)
if request.method == 'POST':
proyecto_form = FormularioCrearProyecto(request.POST, instance=model_proyecto)
#proyecto_form.fields['disponibilidad'].widget.attrs['disabled']=True
if proyecto_form.is_valid:
#print proyecto_form.cleaned_data
#nuevoNombre=proyecto_form.cleaned_data['nombre']
model_project = proyecto_form.save()
# funciones.cambiarNombreDirectorio(nombreDirectorioAnterior,nuevoNombre,request.user)
messages.success(request, "Se ha modificado exitosamente el proyecto")
else:
messages.error(request, "Imposible editar el proyecto")
else:
proyecto_form = FormularioCrearProyecto(instance=model_proyecto)
return render(request, 'GestionProyecto/editar_proyecto.html',
{'form': proyecto_form, 'lista': lista, 'user': request.user, 'mproyecto':model_proyecto, 'proyectos_user': proyectos_list, 'proyecto': id_proyecto, 'lista_permisos': permisos},
context_instance=RequestContext(request))
@login_required
def ver_proyecto(request, id_proyecto):
global model_proyecto
global proyectos_list
proyecto_actual = None
proyecto_actual = get_object_or_404(proyecto, id_proyecto=id_proyecto)
proyecto_form = FormularioCrearProyecto(instance=proyecto_actual)
if (model_proyecto != None and model_proyecto.idUsuario != request.user):
model_proyecto = None
#model_proyecto = get_object_or_404(proyecto, id_proyecto=id_proyecto)
#proyecto_form = FormularioCrearProyecto(instance=model_proyecto)
#proyecto_form.fields['disponibilidad'].widget.attrs['disabled']=True
#proyecto_form.fields['nombre'].label="Titulo del proyecto"
proyecto_form.fields['nombre'].widget.attrs['disabled'] = True
proyecto_form.fields['resumen'].widget.attrs['disabled'] = True
return render(request, 'GestionProyecto/ver_proyecto.html', {'form': proyecto_form, 'mproyecto':model_proyecto, 'proyectos_user':proyectos_list, 'lista_permisos': permisos},
context_instance=RequestContext(request))
@login_required
def buscador(request):
global proyectos_list
global model_proyecto
if request.method == 'GET':
ir = ConsumirServicios.IR()
fraseBusqueda = request.GET.get("busquedaIR")
data = ir.consultar(fraseBusqueda,str(request.user.username), str(model_proyecto.id_proyecto))
# print model_proyecto
# IR.consultar(fraseBusqueda,"","")
# data = ir.consultar(fraseBusqueda,str(request.user.username),request.session['proyecto'])
#data = funciones.busqueda(fraseBusqueda)
#for d in data:
# d['path'] = d['path'].replace("/home/vigtech/shared/repository/", "/media/").encode("utf8")
# print data
# print fraseBusqueda
return render(request, "GestionBusqueda/Busqueda_Navegacion.html", {'resultados': data, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
else:
return render(request, "GestionBusqueda/Busqueda_Navegacion.html", {'resultados': data, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto,'lista_permisos': permisos})
@login_required
def analisisView(request):
global proyectos_list
global model_proyecto
#data = ConsumirServicios.consumir_red(request.user.username, request.session['proyecto'])
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
with open(REPOSITORY_DIR + proyecto + "/coautoria.json") as json_file:
data = json.load(json_file)
#nodos, aristas = r.generar_json()
nodos1 = json.dumps(data['nodes'])
aristas1 = json.dumps(data['links'])
# return render(request, "GestionAnalisis/coautoria.html", {"nodos": nodos1, "aristas": aristas1})
return render(request, "GestionAnalisis/coautoria.html", {"nodos": nodos1, "aristas": aristas1, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto,'lista_permisos': permisos})
#return render(request, "GestionAnalisis/coautoria2.html", {"proyecto":proyecto})
@login_required
def coautoria_old(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
with open(REPOSITORY_DIR + proyecto + "/coautoria.json") as json_file:
data = json.load(json_file)
#nodos, aristas = r.generar_json()
nodos1 = json.dumps(data['nodes'])
aristas1 = json.dumps(data['links'])
# return render(request, "GestionAnalisis/coautoria.html", {"nodos": nodos1, "aristas": aristas1})
return render(request, "GestionAnalisis/Analisis.html", {"nodos": nodos1, "aristas": aristas1, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def eliminar_proyecto(request, id_proyecto):
global model_proyecto
global proyectos_list
try:
# print "#1"
proyectos_list = get_list_or_404(proyecto, idUsuario=self.request.user)
# print "#2"
model_proyecto = get_object_or_404(proyecto, id_proyecto=str(self.request.session['proyecto']))
# print "#3"
except:
proyectos_list = None
model_proyecto = None
user = request.user
project = get_object_or_404(proyecto, id_proyecto=id_proyecto)
funciones.eliminar_proyecto(id_proyecto, user)
project.delete()
messages.success(request, "El proyecto \""+project.nombre+"\" se elimino.")
return HttpResponseRedirect(reverse('ver_mis_proyectos'))
@login_required
def analisis_paises(request):
global proyectos_list
global model_proyecto
# print model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
with open(REPOSITORY_DIR+ proyecto + "/data.json") as json_file:
data = json.load(json_file)
# print data
labels=json.dumps(data['paises']['labels'])
values=json.dumps(data['paises']['valores'])
# print proyecto
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/paisesbar.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_autores(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/autoresbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_afiliaciones(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/afiliacionesbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_revistas(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/revistasbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_docsfechas(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/fechasbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_tipodocs(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/tiposbar.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_paisespie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/paisespie.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_autorespie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/autorespie.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_afiliacionespie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/afiliacionespie.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_revistaspie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/revistaspie.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_docsfechaspie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/fechaspie.html",{"proyecto":proyecto,'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_tipodocspie(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
return render(request, "GestionAnalisis/tipospie.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
@login_required
def analisis_clustering(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
return render(request, "GestionAnalisis/grupos.html",{"proyecto":proyecto, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
@login_required
def analisis_indicadores(request):
global proyectos_list
global model_proyecto
try:
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
with open(REPOSITORY_DIR + proyecto + "/data.json") as json_file:
data = json.load(json_file)
return render(request, "GestionAnalisis/indicadores.html",{"data":data, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
print traceback.format_exc()
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
# print data
#labels=json.dumps(data['paises']['labels'])
#values=json.dumps(data['paises']['valores'])
#print proyecto
#return render(request, "GestionAnalisis/paisesbar.html",{"labels": labels, "values": values})
#return render(request, "GestionAnalisis/indicadores.html",{"data":data, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto})
@login_required
def clasificacion_eisc(request):
global proyectos_list
global model_proyecto
try:
proyecto = str(request.user.username) + "." + str(model_proyecto.id_proyecto)
#proyecto = str(request.user.username) + "." + str(request.session['proyecto'])
with open(REPOSITORY_DIR + proyecto + "/eisc.json") as json_file:
data = json.load(json_file)
eids = data['clasificacion']
if eids :
adminBD = AdminBD()
papers =adminBD.get_papers_eid(eids)
return render (request, "GestionEISC/clasificacion_eisc.html", {"papers": papers, 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
else:
return render (request, "GestionEISC/clasificacion_eisc.html", {"papers": [], 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
except:
return render(request, "GestionAnalisis/Blank_default.html", { 'proyectos_user': proyectos_list, 'mproyecto': model_proyecto, 'lista_permisos': permisos})
def logmensajes(request):
"""
Permite consultar el estado del proceso de creacion de
un nuevo proyecto
"""
try:
cache_key = "%s_%s_%s" % (request.GET.get('csrfmiddlewaretoken'),request.GET.get('fraseB').replace(" ",""),request.user.username)
data = json.dumps(cache.get(cache_key))
print cache.get(cache_key)['estado']
cache.set(cache_key, {'estado': cache.get(cache_key)['estado'],'mensaje' : ""})
except:
print "hay problema"
cambia_mensaje(request.POST.get('csrfmiddlewaretoken'),request.POST.get('fraseB'),request.user.username,False,"",0)
return HttpResponse(data,content_type="application/json")
# Configuración de los permisos --links a mostrar alozada
@login_required
def configurar_permisos(request):
global model_proyecto
global proyectos_list
# print permisos["estadisticas"]
try:
proyectos_list = get_list_or_404(proyecto, idUsuario=request.user)
except:
proyectos_list =None
messages.success(request, "Usted no tiene proyectos")
if request.method == 'POST':
if 'cbIndicadores' in request.POST:
permisos["indicadores"] = 1
else:
permisos["indicadores"] = 0
if 'graficos_barra' in request.POST:
permisos["graficos_barra"] = 1
else:
permisos["graficos_barra"] = 0
if 'graficos_pie' in request.POST:
permisos["graficos_pie"] = 1
else:
permisos["graficos_pie"] = 0
if not ('cbIndicadores' in request.POST and 'graficos_barra' in request.POST and 'graficos_pie' and request.POST):
print "entra if"
permisos["estadisticas"] = 0
else:
print "entra else"
permisos["estadisticas"] = 1
if 'coautoria' in request.POST:
permisos["coautoria"] = 1
else:
permisos["coautoria"] = 0
if 'coautoria_medidas' in request.POST:
permisos["coautoria_medidas"] = 1
else:
permisos["coautoria_medidas"] = 0
if 'clustering' in request.POST:
permisos["clustering"] = 1
else:
permisos["clustering"] = 0
if 'clasificacion_eisc' in request.POST:
permisos["clasificacion_eisc"] = 1
else:
permisos["clasificacion_eisc"] = 0
return render(request, 'configurar_permisos.html', {'proyectos_user': proyectos_list, 'lista_permisos': permisos, 'mproyecto': model_proyecto}, context_instance=RequestContext(request))
# def registrarusuario(request):
# if request.method == 'GET':
# return render(request, "registrarUsuario.html")
# elif request.method == 'POST':
# data = request.POST.get('nombre')
# print data
# # messages.success(request, "Se ha creado exitosamente el usuario")
# # return redirect('login')
# return render (request, "registrarUsuario.html", {"response": data})
# else:
# return render(request, "registrarUsuario.html")
|
VigTech/Vigtech-Services
|
principal/views.py
|
Python
|
lgpl-3.0
| 45,303
|
#! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
try :
# TEST NUMBER ZERO : DEFAULT CONSTRUCTOR AND STRING CONVERTER
print "test number zero : default constructor and string converter"
# Default constructor
symmetricMatrix0 =SymmetricMatrix()
# String converter
print "symmetricMatrix0 = " , repr(symmetricMatrix0)
# TEST NUMBER ONE : CONSTRUCTOR WITH SIZE, OPERATOR() AND STRING CONVERTER
print "test number one : constructor with size, operator() and string converter"
# Constructor with size
symmetricMatrix1 = SymmetricMatrix(2)
# Check operator() methods
symmetricMatrix1[0,0]=1.
symmetricMatrix1[1,0]=2.
symmetricMatrix1[0,1]=3.
symmetricMatrix1[1,1]=4.
# String converter
print "symmetricMatrix1 = " , repr(symmetricMatrix1)
# TEST NUMBER TWO : COPY CONSTRUCTOR AND STRING CONVERTER
print "test number two : copy constructor and string converter"
# Copy constructor
symmetricMatrix2 = SymmetricMatrix(symmetricMatrix1)
# String converter
print "symmetricMatrix2 = " , repr(symmetricMatrix2)
# TEST NUMBER THREE : GET DIMENSIONS METHODS
print "test number three : get dimensions methods"
# Get dimension methods
print "symmetricMatrix1's nbRows = " , symmetricMatrix1.getNbRows()
print "symmetricMatrix1's nbColumns = " , symmetricMatrix1.getNbColumns()
# TEST NUMBER FIVE : ASSIGNMENT METHOD
print "test number five : assignment method"
# Assignment method
# No sense with pyton
# TEST NUMBER SIX : TRANSPOSITION METHOD
print "test number six : transposition method"
# Check transpose method
symmetricMatrix4 = symmetricMatrix1.transpose()
print "symmetricMatrix1 transposed = " , repr(symmetricMatrix4)
# TEST NUMBER SEVEN : ADDITION METHOD
print "test number seven : addition method"
# Check addition method : we check the operator and the symmetry of the operator, thus testing the comparison operator
sum1 = symmetricMatrix1 + symmetricMatrix4
sum2 = symmetricMatrix4 + symmetricMatrix1
print "sum1 = " , repr(sum1)
print "sum2 = " , repr(sum2)
print "sum1 equals sum2 = " , sum1 == sum2
# TEST NUMBER EIGHT : SUBSTRACTION METHOD
print "test number eight : substraction method"
# Check substraction method
diff = symmetricMatrix1-symmetricMatrix4
print "diff = " , repr(diff)
# TEST NUMBER NINE : MATRIX MULTIPLICATION METHOD
print "test number nine : matrix multiplication method"
# Check multiplication method
prod = symmetricMatrix1*symmetricMatrix4
print "prod = " , repr(prod)
# TEST NUMBER TEN : MULTIPLICATION WITH A NUMERICAL POINT METHOD
print "test number ten : multiplication with a numerical point method"
# Create the numerical point
pt = NumericalPoint()
pt.add(1.)
pt.add(2.)
print "pt = " , repr(pt)
# Check the product method
ptResult = symmetricMatrix1* pt
print "ptResult = " , repr(ptResult)
# TEST NUMBER ELEVEN : MULTIPLICATION AND DIVISION BY A NUMERICAL SCALAR METHODS
print "test number eleven : multiplication and division by a numerical scalar methods"
# Check the multiplication method
s=3.
scalprod1 = symmetricMatrix1 * s
# bug PYTHON scalprod2 = s * symmetricMatrix1
scalprod3 = symmetricMatrix1 * s
print "scalprod1 = " , repr(scalprod1)
# print "scalprod2 = " , scalprod2
print "scalprod3 = " , repr(scalprod3)
# print "scalprod1 equals scalprod2 = " , (scalprod1 == scalprod2)
print "scalprod1 equals scalprod3 = " , (scalprod1 == scalprod3)
# print "scalprod2 equals scalprod3 = " , (scalprod2 == scalprod3)
# Check the division method
scaldiv1 = symmetricMatrix1/s
scaldiv2 = symmetricMatrix1/s
print "scaldiv1 = " , repr(scaldiv1)
print "scaldiv2 = " , repr(scaldiv2)
print "scaldiv1 equals scaldiv2 = " , (scaldiv1 == scaldiv2)
# TEST NUMBER TWELVE : ISEMPTY METHOD
print "test number twelve : isEmpty method"
# Check method isEmpty
symmetricMatrix5 = SymmetricMatrix()
symmetricMatrix6 = SymmetricMatrix()
print "symmetricMatrix0 is empty = " , symmetricMatrix0.isEmpty()
print "symmetricMatrix1 is empty = " , symmetricMatrix1.isEmpty()
print "symmetricMatrix5 is empty = " , symmetricMatrix5.isEmpty()
except :
import sys
print "t_SymmetricMatrix_std.py", sys.exc_type, sys.exc_value
|
dbarbier/privot
|
python/test/t_SymmetricMatrix_std.py
|
Python
|
lgpl-3.0
| 4,523
|
from django.db.models.sql import compiler
from datetime import datetime
import re
from django.db.models.base import Model
REV_ODIR = {
'ASC': 'DESC',
'DESC': 'ASC'
}
SQL_SERVER_8_LIMIT_QUERY = \
"""SELECT *
FROM (
SELECT TOP %(limit)s *
FROM (
%(orig_sql)s
ORDER BY %(ord)s
) AS %(table)s
ORDER BY %(rev_ord)s
) AS %(table)s
ORDER BY %(ord)s"""
SQL_SERVER_8_NO_LIMIT_QUERY = \
"""SELECT *
FROM %(table)s
WHERE %(key)s NOT IN (
%(orig_sql)s
ORDER BY %(ord)s
)"""
# Strategies for handling limit+offset emulation:
USE_ROW_NUMBER = 0 # For SQL Server >= 2005
USE_TOP_HMARK = 1 # For SQL Server 2000 when both limit and offset are provided
USE_TOP_LMARK = 2 # For SQL Server 2000 when offset but no limit is provided
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
index_start = len(self.query.extra_select.keys())
values = [self.query.convert_values(v, None, connection=self.connection) for v in row[:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, connection=self.connection))
return tuple(values)
"""
use django as_sql with editing limit
"""
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark != 0)
if not do_offset:
return super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
select_re = re.compile('^SELECT[ ]+(DISTINCT\s)?')
query, params = super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
m = select_re.match(query)
if with_limits and m != None:
num = None
insert = None
if self.query.high_mark is not None:
num = self.query.high_mark - self.query.low_mark
if num <= 0:
return None, None
insert = 'TOP %d' % num
if insert is not None:
if m.groups()[0] != None:
query = select_re.sub('SELECT DISTINCT %s ' % insert, query)
else:
query = select_re.sub('SELECT %s ' % insert, query)
return query, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
def as_sql_legacy(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
returns_id = bool(self.return_id and
self.connection.features.can_return_id_from_insert)
if returns_id:
result = ['SET NOCOUNT ON']
else:
result = []
result.append('INSERT INTO %s' % qn(opts.db_table))
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
if returns_id:
result.append(';\nSELECT SCOPE_IDENTITY()')
params = self.query.params
sql = ' '.join(result)
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
if auto_field_column in self.query.columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
if returns_id:
sql = "SET NOCOUNT ON"
else:
sql = ""
if len(self.query.columns) == 1 and not params:
sql += "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql += "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
if returns_id:
sql += '\n;SELECT SCOPE_IDENTITY()'
return sql, params
def as_sql(self):
from django.db.models.fields import DateTimeField
from django.db.models.fields import DateField
"""
using django as_sql()
with exclude Datetime field with None value
which is nullable
"""
# return super(SQLInsertCompiler, self).as_sql()
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
preset_fields = self.query.fields if has_fields else [opts.pk]
fields = []
if len(self.query.objs) == 1:
# check only one row insert
# multi-row pass
# so multi-row rows will crash
for field in preset_fields:
# if not isinstance(field, (DateField, DateTimeField)):
# fields.append(field)
if field.get_db_prep_save(
getattr(self.query.objs[0], field.attname) if self.query.raw else field.pre_save(self.query.objs[0], True), connection=self.connection) is not None:
fields.append(field)
elif field.blank is not True:
fields.append(field)
else:
fields = preset_fields
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
def as_sql(self):
"""
Copy of django UpdateCommpiler as_sql
need cheack datetime field
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
|
VanyaDNDZ/django-sybase-backend
|
sqlsybase_server/pyodbc/compiler.py
|
Python
|
unlicense
| 10,019
|
class Symbol():
def __init__(self, name, isterm):
self.name = name
self.isterm = isterm
def __str__(self):
if self.isterm:
if self.name == "":
return "&"
return "'{}'".format(self.name)
return "<{}>".format(self.name)
|
ggabriel96/cfg_analyzer
|
Symbol.py
|
Python
|
unlicense
| 299
|
'''
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
'''
nums = range(999, 99, -1)
allProducts = [x * y for x in nums for y in nums]
palindromeProducts = [p for p in allProducts if str(p) == str(p)[::-1]]
answer = max(palindromeProducts)
print(answer)
|
nadrees/PyEuler
|
0004.py
|
Python
|
unlicense
| 416
|
from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adobetv import AdobeTVIE
from .adultswim import AdultSwimIE
from .aftonbladet import AftonbladetIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE, ARDMediathekIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .bet import BetIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .cinchcast import CinchcastIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .collegehumor import CollegeHumorIE
from .collegerama import CollegeRamaIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .empflix import EMPFlixIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firedrive import FiredriveIE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .fourtube import FourTubeIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .grooveshark import GroovesharkIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
)
from .ndr import NDRIE
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
NPOLiveIE,
TegenlichtVproIE,
)
from .nrk import (
NRKIE,
NRKTVIE,
)
from .ntv import NTVIE
from .nytimes import NYTimesIE
from .nuvid import NuvidIE
from .oktoberfesttv import OktoberfestTVIE
from .ooyala import OoyalaIE
from .openfilm import OpenFilmIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .planetaplay import PlanetaPlayIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
from .pornoxo import PornoXOIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
from .quickvid import QuickVidIE
from .radiode import RadioDeIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE
from .rtlnl import RtlXlIE
from .rtlnow import RTLnowIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenwavemedia import CinemassacreIE, ScreenwaveMediaIE, TeamFourIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snotr import SnotrIE
from .sockshare import SockshareIE
from .sohu import SohuIE
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import SoundgasmIE
from .southpark import (
SouthParkIE,
SouthparkDeIE,
)
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import SportBoxIE
from .sportdeutschland import SportDeutschlandIE
from .srmediathek import SRMediathekIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import ThePlatformIE
from .thesixtyone import TheSixtyOneIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import TMZIE
from .tnaflix import TNAFlixIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tvigle import TvigleIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .twentyfourvideo import TwentyFourVideoIE
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchBookmarksIE,
TwitchStreamIE,
)
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import VGTVIE
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import VikiIE
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vodlocker import VodlockerIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .webofstories import WebOfStoriesIE
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import XHamsterIE
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeUserIE, XTubeIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yesjapan import YesJapanIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def list_extractors(age_limit):
"""
Return a list of extractors that are suitable for the given age,
sorted by extractor ID.
"""
return sorted(
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
key=lambda ie: ie.IE_NAME.lower())
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']
|
rzhxeo/youtube-dl
|
youtube_dl/extractor/__init__.py
|
Python
|
unlicense
| 17,033
|
#!/usr/bin/env python
"""
books.py
reads a list of books from an input file and returns them filtered and sorted
features
- iterates through records without holding the entire dataset in memory, allowing for large datasets
- uses SQLite for storage and retrieval
"""
import os
import argparse
import sqlite3
from book_list.book_list_file_reader import BookListFileReader
from book_list.book_list import BookList
# Config
curdir = dir_path = os.path.dirname(os.path.realpath(__file__))
SQLITE3_DB_FILE = curdir + '/db/booklist.sqlite3'
file_import_list = {
'csv': curdir + '/code-test-source-files/csv',
'pipe': curdir + '/code-test-source-files/pipe',
'slash': curdir + '/code-test-source-files/slash',
}
# Command line parsing
parser = argparse.ArgumentParser(
prog='Read multiple formats of book data and display them filtered and sorted.'
)
parser.add_argument('--filter', action='store', default=None,
help='show a subset of books, looks for the argument as a substring of any of the fields')
parser.add_argument('--year', action='store_true', default=False,
help="sort the books by year, ascending instead of default sort")
parser.add_argument('--reverse', action='store_true', default=False,
help='reverse sort')
args = parser.parse_args()
# Read files and populate book list
sqlite3_connection = sqlite3.Connection(SQLITE3_DB_FILE);
book_list = BookList(sqlite3_connection)
for parse_type, file_path in file_import_list.iteritems():
reader = BookListFileReader(file_path, parse_type)
while True:
row = reader.get_result()
if row is None:
break
book_list.insert_record(row)
# Make query based on command line arguments
book_list.query_book_list(filter=args.filter, year=args.year, reverse=args.reverse)
# Output
while True:
row = book_list.get_record()
if row == None:
break
print("{}, {}, {}, {}".format(*row))
|
danieltalsky/gp-code-test
|
books.py
|
Python
|
unlicense
| 1,974
|
import glob
import sys
import string
import Bio.PDB
def parse_noe(filename):
f = open(filename, 'r')
noe_pairs = []
for line in f.readlines():
res_a = int(string.split(line)[2])
res_b = int(string.split(line)[7])
noe_pair = [res_a, res_b]
if noe_pair not in noe_pairs:
noe_pairs.append(noe_pair)
f.close()
print len(noe_pairs), "CA lines"
return noe_pairs
def count_restraints(filename):
f = open(filename, 'r')
noe_pairs = []
for line in f.readlines():
# print line
res_a = int(string.split(line)[2])
res_b = int(string.split(line)[7])
name_a = string.split(line)[5].rstrip(")")[:-1]
name_b = string.split(line)[10].rstrip(")")[:-1]
noe_pair = [res_a, res_b, name_a, name_b]
if [res_a, res_b, name_a, name_b] not in noe_pairs and \
[res_b, res_a, name_b, name_a] not in noe_pairs:
noe_pairs.append(noe_pair)
f.close()
print len(noe_pairs), "NOE contacts"
return len(noe_pairs)
native_pdb = sys.argv[1]
noe = False
noe_file = ""
if len(sys.argv) == 3:
noe = True
noe_file = sys.argv[2]
count_restraints(noe_file)
cmd.load(native_pdb, "native")
cmd.hide("all")
cmd.show("cartoon", "native")
if noe:
for pair in parse_noe(noe_file):
cmd.distance("noe", "native and resi %i and name ca" % (pair[0]),
"native and resi %i and name ca" % (pair[1]))
cmd.hide("labels", "noe")
cmd.color("grey", "native")
|
andersx/cs-proteins
|
scripts/visualize_noe.py
|
Python
|
unlicense
| 1,599
|
# -*- coding:utf-8 -*-
from bottle import route, run
@route("/")
def access():
return "OK!"
# hostデフォルト値は、127.0.0.1
# OK - localhost / 127.0.0.1
# NG - 192.168.0.10 / hostname
# run(port=8080, debug=True, reloader=True)
# run(host="localhost", port=8080, debug=True, reloader=True)
# OK - 192.168.0.10 / hostname
# NG - localhost / 127.0.0.1
run(host="192.168.0.10", port=8080, debug=True, reloader=True)
# run(host="<your hostname>", port=8080, debug=True, reloader=True)
# OK - ALL
# run(host="0.0.0.0", port=8080, debug=True, reloader=True)
|
thinkAmi-sandbox/Bottle-sample
|
lan_access.py
|
Python
|
unlicense
| 589
|
#!/usr/bin/python3.5
# import time
import math
class DataHeap:
"""Min-Heap with additionnal data field"""
def __init__(self):
self.list = [(0, None)]
self.size = 0
def insert(self, value, data = None):
self.list.append((value, data))
self.size += 1
self.percolateUp(self.size)
def percolateUp(self, i):
half_i = i // 2
while half_i:
if self.list[i][0] < self.list[half_i][0]:
self.list[i], self.list[half_i] = self.list[half_i], self.list[i]
i = half_i
half_i //= 2
def extractMin(self):
self.list[self.size], self.list[1] = self.list[1], self.list[self.size]
self.size -= 1
retval = self.list.pop()
self.percolateDown(1)
return retval
def percolateDown(self, i):
while 2 * i <= self.size:
minChildIndex = self.minChildIndex(i)
if self.list[i][0] > self.list[minChildIndex][0]:
self.list[i], self.list[minChildIndex] = self.list[minChildIndex], self.list[i]
i = minChildIndex
def minChildIndex(self, i):
if 2 * i + 1 > self.size:
return 2 * i
elif self.list[2 * i][0] < self.list[2 * i + 1][0]:
return 2 * i
else:
return 2 * i + 1
def extractByData(self, data):
for index, x in enumerate(self.list): # O(n) ...
if x[1] == data:
self.list[index] = (0, data)
self.percolateUp(index)
self.extractMin()
return x
return None
def dijkstra_shortest_path():
# input_file_name = 'dijkstra_shortest_path_test_result_is_0_1_2_3_4_4_3_2.txt'
# input_file_name = 'dijkstra_shortest_path_test_result_is_0_3_5_8_5_7_11_4_6_10_10.txt'
input_file_name = 'dijkstra_shortest_path_input.txt'
vertices = []
edges = []
with open(input_file_name) as f:
for line in f:
line_data = line.split('\n')[0].split('\t')
vertex = int(line_data.pop(0))
vertices.append(vertex)
for edge_data in line_data:
other_vertex, edge_length = edge_data.split(',')
edges.append((vertex, int(other_vertex), int(edge_length)))
# print(len(vertices))
# print(len(edges))
n_vertices = len(vertices)
shortest_paths = {}
# 1 is the start vertex
start_vertex = vertices.pop(0)
shortest_paths[start_vertex] = 0
heap = DataHeap()
for v in vertices:
heap.insert(math.inf, v)
def maintain_second_invariant(removed_vertex):
# print('maintain_second_invariant', removed_vertex)
# print(heap.size)
shortest_path = shortest_paths[removed_vertex]
for edge in edges:
if edge[0] == removed_vertex:
extracted_data = heap.extractByData(edge[1]) # O(n)...
# print('extracted_data', extracted_data, shortest_path + edge[2])
# print(heap.list)
if extracted_data:
heap.insert(min(extracted_data[0], shortest_path + edge[2]), extracted_data[1])
maintain_second_invariant(start_vertex)
# print(heap.list)
# print()
# print(heap.extractMin())
while heap.size > 0:
score, vertex = heap.extractMin()
# print(vertex)
shortest_paths[vertex] = score
maintain_second_invariant(vertex)
# print(shortest_paths)
# print('')
print(','.join([str(shortest_paths[x]) for x in [7,37,59,82,99,115,133,165,188,197]]))
# print('')
# print(shortest_paths[7])
# print(shortest_paths[37])
# print(shortest_paths[59])
# print(shortest_paths[82])
# print(shortest_paths[99])
# print(shortest_paths[115])
# print(shortest_paths[133])
# print(shortest_paths[165])
# print(shortest_paths[188])
# print(shortest_paths[197])
#while foobar
# (score, nearest_vertex) = heap.extractMin()
#
# if shortest_paths.has_key(nearest_vertex):
# shortest_paths[nearest_vertex] +=
# start_vertex = vertices.pop(0)
# shortest_paths[start_vertex] = 0
#
#
# outgoing_edges_from_start_vertex = [e for e in edges if e[0] == start_vertex]
#
# print(outgoing_edges_from_start_vertex)
#
# heap = DataHeap()
#
# for v in vertices:
# existing_edge_length_from_start_vertex = [e[2] for e in outgoing_edges_from_start_vertex if e[1] == v]
#
# if len(existing_edge_length_from_start_vertex):
# heap.insert(min(existing_edge_length_from_start_vertex), v)
# else:
# heap.insert(math.inf, v)
if __name__ == '__main__': dijkstra_shortest_path()
|
dherault/Coursera_algorithms
|
homework/dijkstra_shortest_path.py
|
Python
|
unlicense
| 4,317
|