repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dsoprea/PySecure
|
pysecure/types.py
|
1
|
4059
|
import platform
from ctypes import *
from datetime import datetime
from pysecure.constants import TIME_DATETIME_FORMAT
from pysecure.constants.sftp import SSH_FILEXFER_TYPE_REGULAR, \
SSH_FILEXFER_TYPE_DIRECTORY, \
SSH_FILEXFER_TYPE_SYMLINK, \
SSH_FILEXFER_TYPE_SPECIAL, \
SSH_FILEXFER_TYPE_UNKNOWN
c_mode_t = c_int
c_uid_t = c_uint32
c_gid_t = c_uint32
# This are very-very unpredictable. We can only hope that this holds up for
# most systems.
# Returns something like "32bit" or "64bit".
arch_name = platform.architecture()[0]
arch_width = int(arch_name[0:2])
if arch_width == 64:
c_time_t = c_uint64
c_suseconds_t = c_uint64
else:
c_time_t = c_uint32
c_suseconds_t = c_uint32
class _CSftpAttributesStruct(Structure):
_fields_ = [('name', c_char_p),
('longname', c_char_p),
('flags', c_uint32),
('type', c_uint8),
('size', c_uint64),
('uid', c_uint32),
('gid', c_uint32),
('owner', c_char_p),
('group', c_char_p),
('permissions', c_uint32),
('atime64', c_uint64),
('atime', c_uint32),
('atime_nseconds', c_uint32),
('createtime', c_uint64),
('createtime_nseconds', c_uint32),
('mtime64', c_uint64),
('mtime', c_uint32),
('mtime_nseconds', c_uint32),
('acl', c_void_p), # NI: ssh_string
('extended_count', c_uint32),
('extended_type', c_void_p), # NI: ssh_string
('extended_data', c_void_p)] # NI: ssh_string
def __repr__(self):
mtime_phrase = datetime.fromtimestamp(self.mtime).\
strftime(TIME_DATETIME_FORMAT)
return ('<ATTR "%s" S=(%d) T=(%d) MT=[%s]>' %
(self.name, self.size, self.type, mtime_phrase))
@property
def is_regular(self):
return self.type == SSH_FILEXFER_TYPE_REGULAR
@property
def is_directory(self):
return self.type == SSH_FILEXFER_TYPE_DIRECTORY
@property
def is_symlink(self):
return self.type == SSH_FILEXFER_TYPE_SYMLINK
@property
def is_special(self):
return self.type == SSH_FILEXFER_TYPE_SPECIAL
@property
def is_unknown_type(self):
return self.type == SSH_FILEXFER_TYPE_UNKNOWN
@property
def modified_time(self):
# TODO: We're not sure if the mtime64 value is available on a 32-bit platform. We do this to be safe.
return self.mtime64 if self.mtime64 else self.mtime
@property
def modified_time_dt(self):
if self.mtime64:
return datetime.fromtimestamp(self.mtime64)
else:
return datetime.fromtimestamp(self.mtime)
_CSftpAttributes = POINTER(_CSftpAttributesStruct)
class CTimeval(Structure):
# it was easier to set these types based on what libssh assigns to them.
# The traditional definition leaves some platform ambiguity.
_fields_ = [('tv_sec', c_uint32),
('tv_usec', c_uint32)]
c_timeval = CTimeval
class _CSshKeyStruct(Structure):
_fields_ = [('type', c_int),
('flags', c_int),
('type_c', c_char_p),
('ecdsa_nid', c_int),
('dsa', c_void_p),
('rsa', c_void_p),
('ecdsa', c_void_p),
('cert', c_void_p)]
# Fortunately, we should probably be able to avoid most/all of the mechanics
# for the vast number of structs.
c_ssh_session = c_void_p #POINTER(CSshSessionStruct)
c_ssh_channel = c_void_p
c_sftp_session = c_void_p
c_sftp_attributes = _CSftpAttributes
c_sftp_dir = c_void_p
c_sftp_file = c_void_p
c_ssh_key = POINTER(_CSshKeyStruct)
# A simple aliasing assignment doesn't work, here.
# c_sftp_statvfs = c_void_p
|
gpl-2.0
| -6,243,054,323,641,303,000
| 30.465116
| 101
| 0.55186
| false
| 3.478149
| false
| false
| false
|
dke-knu/i2am
|
i2am-app/AlgorithmSelectionEngine/PeriodicClassification/DeepLearning(local).py
|
1
|
2820
|
""" Learned classification model """
import tensorflow as tf
from PeriodicClassification import ModelConfig as myConfig
from PeriodicClassification import Preprocess as pre
def _model(X, keep_prob):
# input
W1 = tf.Variable(tf.random_normal([myConfig.INPUT_SIZE, myConfig.HIDDEN_SIZE]), name="weight1")
b1 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE]))
L1 = tf.matmul(X, W1) + b1
L1 = tf.nn.dropout(L1, keep_prob[0])
"""hidden Layers
dropout:
"""
W2 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE, myConfig.HIDDEN_SIZE]), name="weight2")
b2 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE]))
L2 = tf.nn.softsign(tf.matmul(L1, W2) + b2)
L2 = tf.nn.dropout(L2, keep_prob[1])
W3 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE, myConfig.HIDDEN_SIZE]), name="weight3")
b3 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE]))
L3 = tf.nn.softsign(tf.matmul(L2, W3) + b3)
L3 = tf.nn.dropout(L3, keep_prob[1])
W4 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE, myConfig.HIDDEN_SIZE]), name="weight4")
b4 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE]))
L4 = tf.nn.softsign(tf.matmul(L3, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob[1])
W5 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE, myConfig.OUTPUT_SIZE]), name="weight5")
b5 = tf.Variable(tf.random_normal([myConfig.OUTPUT_SIZE]))
L5 = tf.nn.softsign(tf.matmul(L4, W5) + b5)
hypothesis = tf.nn.dropout(L5, keep_prob[2])
# weight paramenters and bias
param_list = [W1, W2, W3, W4, W5, b1, b2, b3, b4, b5]
saver = tf.train.Saver(param_list)
return hypothesis, saver
def _classification(hypothesis):
p = tf.nn.softmax(hypothesis)
h_predict = tf.argmax(p, 1)
return h_predict
def _DNN_main(USER_DATA_PATH):
list_time_series = pre._reader(USER_DATA_PATH)
time_series = pre._resize(list_time_series)
print(time_series.shape)
X = tf.placeholder(tf.float32, [None, myConfig.INPUT_SIZE])
keep_prob = tf.placeholder(tf.float32) #0.1, 0.2, 0.3
hypo, model_saver = _model(X=X, keep_prob=keep_prob)
h_predict = _classification(hypothesis=hypo)
"""Initialize"""
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph(myConfig.SAVED_MODEL_PATH)
saver.restore(sess, tf.train.latest_checkpoint(myConfig.CHECKPOINT_PATH))
t_trained = sess.run([h_predict], feed_dict={X: time_series, keep_prob: [1.0, 1.0, 1.0]})
print(t_trained[0])
if t_trained[0] == 1:
print('Non periodic')
return False
else:
print('Periodic')
return True
# Usage Example
# _DNN_main("user's data path")
_DNN_main("D:/DKE/data/period_classification/시연데이터/ECG_데이터_1.csv")
|
apache-2.0
| 5,677,919,536,755,330,000
| 33.207317
| 100
| 0.661912
| false
| 2.781746
| true
| false
| false
|
jgliss/pyplis
|
pyplis/model_functions.py
|
1
|
8950
|
# -*- coding: utf-8 -*-
#
# Pyplis is a Python library for the analysis of UV SO2 camera data
# Copyright (C) 2017 Jonas Gliss (jonasgliss@gmail.com)
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Pyplis module containing mathematical model functions."""
from __future__ import (absolute_import, division)
from numpy import exp, sin, cos
from pyplis import logger
import six
# Polynomial fit functions of different order, including versions that go
# through the origin of the coordinate system
# (e.g. used in doascalib.py), dictionary keys are the polynomial order
polys = {1: lambda x, a0, a1: a0 * x + a1,
2: lambda x, a0, a1, a2: a0 * x**2 + a1 * x + a2,
3: lambda x, a0, a1, a2, a3: a0 * x**3 + a1 * x**2 + a2 * x + a3}
polys_through_origin = {1: lambda x, a0: a0 * x,
2: lambda x, a0, a1: a0 * x**2 + a1 * x,
3: lambda x, a0, a1, a2: a0 * x**3 + a1 * x**2 + a2 * x
}
def cfun_kern2015(x, a0, a1):
return a0 * exp(x * a1) - 1
def cfun_kern2015_offs(x, a0, a1, a2):
return a0 * (exp(x * a1) - 1) + a2
class CalibFuns(object):
"""Class containing functions for fit of calibration curve."""
def __init__(self):
self.polys = {0: polys,
1: polys_through_origin}
self.custom_funs = {"kern2015": cfun_kern2015,
"kern2015_offs": cfun_kern2015_offs}
self._custom_funs_info = {"kern2015": ("see Eq. 6 in Kern et al., 2015"
"https://doi.org/10.1016/j."
"jvolgeores.2014.12.004"),
"kern2015_offs": ("Like previous, including "
"an offset term")}
def available_poly_orders(self, through_origin=False):
"""Return the available polynomial orders.
Parameter
---------
through_origin : bool
polys without offset
Returns
-------
list
list containing available polyorders
"""
return list(self.polys[through_origin].keys())
def print_poly_info(self):
"""Print information about available polynomials."""
logger.info("Available polyorders (with offset): %s"
"Available polyorders (without offset): %s"
% (list(self.polys[0].keys()), list(self.polys[1].keys())))
def print_custom_funs_info(self):
"""Print information about available curtom calib functions."""
logger.info("Available polyorders (with offset): %s"
"Available polyorders (without offset): %s"
% (list(self.polys[0].keys()), list(self.polys[1].keys())))
for k, v in six.iteritems(self._custom_funs_info):
logger.info("%s : %s" % (k, v))
def get_custom_fun(self, key="kern2015"):
"""Return an available custom calibration function.
Parameters
----------
key : str
access key of custom function (call :func:`print_custom_funs_info`
for info about available functions)
Returns
-------
the function object
"""
if key not in self.custom_funs.keys():
raise KeyError("No custom calibration function with key %s "
"available" % key)
return self.custom_funs[key]
def get_poly(self, order=1, through_origin=False):
"""Get a polynomial of certain order.
Parameters
----------
order : int
order of polynomial (choose from 1-3)
through_origin : bool
if True, the polynomial will have no offset term
Return
------
function
the polynomial function object (callable)
"""
if order not in self.polys[through_origin].keys():
raise ValueError("Polynomial of order %s is not supported "
"available orders are %s"
% (order,
list(self.polys[through_origin].keys())))
return self.polys[through_origin][order]
def dilutioncorr_model(dist, rad_ambient, i0, ext):
r"""Model function for light dilution correction.
This model is based on the findings of `Campion et al., 2015
<http://www.sciencedirect.com/science/article/pii/S0377027315000189>`_.
:param float dist: distance of dark (black) object in m
:param float rad_ambient: intensity of ambient atmosphere at position of
dark object
:param float i0: initial intensity of dark object before it enters the
scattering medium. It is determined from the illumination intensity
and the albedo of the dark object.
:param float atm_ext: atmospheric scattering extincion coefficient
:math:`\epsilon` (in Campion et al., 2015 denoted with :math:`\sigma`).
"""
return i0 * exp(-ext * dist) + rad_ambient * (1 - exp(-ext * dist))
def gaussian_no_offset(x, ampl, mu, sigma):
"""1D gauss with baseline zero.
:param float x: x position of evaluation
:param float ampl: Amplitude of gaussian
:param float mu: center poistion
:param float sigma: standard deviation
:returns float: value at position x
"""
# return float(ampl)*exp(-(x - float(mu))**2/(2*float(sigma)**2))
return ampl * exp(-(x - mu)**2 / (2 * sigma**2))
def gaussian(x, ampl, mu, sigma, offset):
"""1D gauss with arbitrary baseline.
:param float x: x position of evaluation
:param float ampl: Amplitude of gaussian
:param float mu: center poistion
:param float sigma: standard deviation
:param float offset: baseline of gaussian
:returns float: value at position x
"""
return gaussian_no_offset(x, ampl, mu, sigma) + offset
def multi_gaussian_no_offset(x, *params):
"""Superimposed 1D gauss functions with baseline zero.
:param array x: x array used for evaluation
:param list *params: List of length L = 3xN were N corresponds to the
number of gaussians e.g.::
[100,10,3,50,15,6]
would correspond to 2 gaussians with the following characteristics:
1. Peak amplitude: 100, Mu: 10, sigma: 3
2. Peak amplitude: 50, Mu: 15, sigma: 6
"""
res = 0
num = int(len(params) / 3)
for k in range(num):
p = params[k * 3:(k + 1) * 3]
res = res + gaussian_no_offset(x, *p)
return res
def multi_gaussian_same_offset(x, offset, *params):
"""Superimposed 1D gauss functions with baseline (offset).
See :func:`multi_gaussian_no_offset` for instructions
"""
return multi_gaussian_no_offset(x, *params) + offset
def supergauss_2d(position, amplitude, xm, ym, sigma, asym, shape, offset):
"""2D super gaussian without tilt.
:param tuple position: position (x, y) of Gauss
:param float amplitude: amplitude of peak
:param float xm: x position of maximum
:param float ym: y position of maximum
:param float asym: assymetry in y direction (1 is circle, smaller
means dillated in y direction)
:param float shape: super gaussian shape parameter (1 is gaussian)
:param float offset: base level of gaussian
"""
x, y = position
u = ((x - xm) / sigma) ** 2 + ((y - ym) * asym / sigma)**2
g = offset + amplitude * exp(-u**shape)
return g.ravel()
def supergauss_2d_tilt(position, amplitude, xm, ym, sigma, asym, shape, offset,
theta):
"""2D super gaussian without tilt.
:param tuple position: position (x, y) of Gauss
:param float amplitude: amplitude of peak
:param float xm: x position of maximum
:param float ym: y position of maximum
:param float asym: assymetry in y direction (1 is circle, smaller
means dillated in y direction)
:param float shape: super gaussian shape parameter (2 is gaussian)
:param float offset: base level of gaussian
:param float theta: tilt angle (rad) of super gaussian
"""
x, y = position
xprime = (x - xm) * cos(theta) - (y - ym) * sin(theta)
yprime = (x - xm) * sin(theta) + (y - ym) * cos(theta)
u = (xprime / sigma)**2 + (yprime * asym / sigma)**2
g = offset + amplitude * exp(-u**shape)
return g.ravel()
|
gpl-3.0
| 7,274,057,895,621,544,000
| 35.234818
| 79
| 0.603128
| false
| 3.762085
| false
| false
| false
|
UNC-Major-Lab/Fragment-Isotope-Distribution-Paper
|
scripts/theoretical/mergeHistogram.py
|
1
|
1930
|
#!/usr/bin/env python
import sys
import os
import re
from collections import defaultdict
from math import floor
from math import isnan
root_dir = sys.argv[1]
prefix = sys.argv[2]
do_iso = sys.argv[3]
if do_iso == 'F':
comp2bin2count = defaultdict(dict)
for f in os.listdir(root_dir):
fp = root_dir+"/"+f
if os.path.isfile(fp) and ".out" in f and f.startswith(prefix):
infile = open(fp)
for line in infile:
if (len(line.strip().split("\t")) == 2):
1
else:
[comp, bin, count] = line.strip().split("\t")
count = int(count)
if not comp2bin2count[comp].has_key(bin):
comp2bin2count[comp][bin] = 0
comp2bin2count[comp][bin]+=count
infile.close()
for comp in comp2bin2count:
for bin in comp2bin2count[comp]:
print "\t".join([comp, str(bin), str(comp2bin2count[comp][bin])])
else:
comp2iso2bin2count = defaultdict(dict)
for f in os.listdir(root_dir):
fp = root_dir+"/"+f
if os.path.isfile(fp) and ".out" in f and f.startswith(prefix):
infile = open(fp)
for line in infile:
if (len(line.strip().split("\t")) == 2):
1
else:
[comp, bin, iso, count] = line.strip().split("\t")
count = int(count)
if not comp2iso2bin2count[comp].has_key(iso):
comp2iso2bin2count[comp][iso] = defaultdict(int)
comp2iso2bin2count[comp][iso][bin]+=count
infile.close()
for comp in comp2iso2bin2count:
for iso in comp2iso2bin2count[comp]:
for bin in comp2iso2bin2count[comp][iso]:
print "\t".join([comp, str(bin), iso, str(comp2iso2bin2count[comp][iso][bin])])
|
mit
| 324,607,062,226,680,960
| 32.877193
| 95
| 0.526425
| false
| 3.607477
| false
| false
| false
|
NETWAYS/ingraph
|
ingraph/api.py
|
1
|
16388
|
# inGraph (https://www.netways.org/projects/ingraph)
# Copyright (C) 2011-2012 NETWAYS GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import xmlrpclib
import cPickle
import ingraph.model as model
class BackendRPCMethods(object):
def __init__(self, engine, queryqueue, logger):
self.hosts = {}
self.services = {}
self.hostservices = {}
self.plots = {}
self.engine = engine
self.queryqueue = queryqueue
self.logger = logger
self.shutdown_server = False
def setupTimeFrame(self, interval, retention_period=None):
tfs = model.TimeFrame.getAll(self.engine)
for tf in tfs:
if (tf.interval < interval and interval % tf.interval != 0) or \
(tf.interval > interval and tf.interval % interval != 0):
raise xmlrpclib.Fault(
1, 'Timeframe interval is invalid. Must be multiple of '
'existing timeframe or evenly divisible by existing '
'larger intervals.')
if tf.interval == interval:
tf.retention_period = retention_period
tf.save(self.engine)
return tf.id
tf = model.TimeFrame(interval, retention_period)
tf.save(self.engine)
return tf.id
def getTimeFrames(self):
tfs = model.TimeFrame.getAll(self.engine)
items = {}
for tf in tfs:
items[str(tf.interval)] = {'id': tf.id,
'interval': tf.interval,
'retention-period': tf.retention_period}
return items
def disableTimeFrame(self, tf_id):
tf = model.TimeFrame.getByID(self.engine, tf_id)
tf.active = False;
tf.save(self.engine)
return True
def _createHost(self, conn, name):
if name in self.hosts:
return self.hosts[name]
obj = model.Host.getByName(conn, name)
if obj == None:
obj = model.Host(name)
obj.save(conn)
self.hosts[name] = obj
return obj
def _createService(self, conn, name):
if name in self.services:
return self.services[name]
obj = model.Service.getByName(conn, name)
if obj == None:
obj = model.Service(name)
obj.save(conn)
self.services[name] = obj
return obj
def _createHostService(self, conn, host, service, parent_hostservice):
hostservice_key = (host, service)
if hostservice_key in self.hostservices:
return self.hostservices[hostservice_key]
objs = model.HostService.getByHostAndService(conn, host, service,
parent_hostservice)
if len(objs) == 0:
obj = model.HostService(host, service, parent_hostservice)
obj.save(conn)
else:
obj = objs[0]
self.hostservices[hostservice_key] = obj
return obj
def _createPlot(self, conn, hostservice, name):
plot_key = (hostservice, name)
if plot_key in self.plots:
return self.plots[plot_key]
objs = model.Plot.getByHostServiceAndName(conn, hostservice, name)
if len(objs) == 0:
obj = model.Plot(hostservice, name)
obj.save(conn)
else:
obj = objs[0]
self.plots[plot_key] = obj
return obj
def insertValueBulk(self, updates_raw):
updates = cPickle.loads(updates_raw)
conn = self.engine.connect()
for update in updates:
(host, parent_service, service, plot, timestamp, unit, value, lower_limit, upper_limit, warn_lower, warn_upper, warn_type,
crit_lower, crit_upper, crit_type, pluginstatus) = update
try:
host_obj = self._createHost(conn, host)
if parent_service != None:
parent_service_obj = self._createService(conn, parent_service)
parent_hostservice_obj = self._createHostService(
conn, host_obj, parent_service_obj, None)
else:
parent_hostservice_obj = None
service_obj = self._createService(conn, service)
hostservice_obj = self._createHostService(conn, host_obj,
service_obj,
parent_hostservice_obj)
plot_obj = self._createPlot(conn, hostservice_obj, plot)
queries = plot_obj.buildUpdateQueries(
conn, timestamp, unit, value, value, value, lower_limit,
upper_limit, warn_lower, warn_upper, warn_type, crit_lower,
crit_upper, crit_type)
for query in queries:
self.queryqueue.put(query)
# if pluginstatus in ['warning', 'critical']:
# status_obj = model.PluginStatus(hostservice_obj, timestamp, pluginstatus)
# status_obj.save(conn)
except Exception, e:
print e
continue
conn.close()
return True
def getHosts(self):
hosts = model.Host.getAll(self.engine)
items = []
for host in hosts:
items.append(host.name)
return items
def getHostsFiltered(self, pattern, limit=None, offset=None):
result = model.Host.getByPattern(self.engine,
pattern.replace('*', '%'),
limit, offset)
items = []
for host in result['hosts']:
items.append(host.name)
return {'total': result['total'], 'hosts': items}
def getServices(self, host_pattern, service_pattern=None, limit=None,
offset=None):
result = model.HostService.getByHostAndServicePattern(
self.engine, host_pattern.replace('*', '%'),
service_pattern.replace('*', '%'), limit, offset)
items = []
for hostservice_obj in result['services']:
if hostservice_obj.parent_hostservice == None:
parentservice = None
else:
parentservice = hostservice_obj.parent_hostservice.service.name
item = { 'service': hostservice_obj.service.name,
'parent_service': parentservice }
items.append(item)
return {'total': result['total'], 'services': items}
def _flattenCharts(self, dps):
charts = []
for plot_obj, plot_charts in dps['charts'].iteritems():
for type, data in plot_charts.iteritems():
label = plot_obj.name + '-' + type
hostservice_obj = plot_obj.hostservice
if hostservice_obj.parent_hostservice != None:
label = hostservice_obj.service.name + '-' + label
if hostservice_obj.service.name != '':
svc_id = ' - ' + hostservice_obj.service.name
else:
svc_id = ''
plot_id = hostservice_obj.host.name + svc_id + ' - ' + plot_obj.name + ' - ' + type
charts.append({'host': hostservice_obj.host.name,
'service': hostservice_obj.service.name,
'plot': plot_obj.name, 'type': type,
'label': label, 'unit': plot_obj.unit,
'start_timestamp': dps['start_timestamp'],
'end_timestamp': dps['end_timestamp'],
'granularity': dps['granularity'],
'data': data,
'plot_id': plot_id})
return charts
def getPlotValues2(self, query, start_timestamp=None, end_timestamp=None,
granularity=None, null_tolerance=0):
conn = self.engine.connect()
st = time.time()
charts = []
comments = []
statusdata = []
result = {'comments': comments, 'charts': charts, 'statusdata': statusdata,
'min_timestamp': model.dbload_min_timestamp,
'max_timestamp': time.time()}
if start_timestamp == '':
start_timestamp = None
if end_timestamp == '':
end_timestamp = None
if granularity == '':
granularity = None
vquery = {}
for spec in query:
host = model.Host.getByName(conn, spec['host'])
parent_hostservice = None
if spec['parent_service']:
parent_service = model.Service.getByName(conn, spec['parent_service'])
parent_hostservice = model.HostService.getByHostAndService(conn, host, parent_service, None)
try:
parent_hostservice = parent_hostservice[0]
except IndexError:
parent_hostservice = None
service = model.Service.getByName(conn, spec['service'], None)
hose = model.HostService.getByHostAndService(conn, host, service, parent_hostservice)
try:
hose = hose[0]
except IndexError:
# Not found
continue
plots = model.Plot.getByHostServiceAndName(conn, hose, spec['plot'])
for plot in plots:
if plot not in vquery:
vquery[plot] = []
if spec['type'] not in vquery[plot]:
vquery[plot].append(spec['type'])
dps = model.DataPoint.getValuesByInterval(conn, vquery,
start_timestamp, end_timestamp,
granularity, null_tolerance)
conn.close()
if 'comments' in dps:
comments.extend(dps['comments'])
if 'statusdata' in dps:
statusdata.extend(dps['statusdata'])
if 'charts' in dps:
charts.extend(self._flattenCharts(dps))
et = time.time()
self.logger.debug("Got filtered plot values in %f seconds" % (et - st))
return result
def _optimizePlot(self, plot):
prev = None
same = False
result = []
for nvpair in plot:
if prev != None and prev[1] == nvpair[1]:
same = True
elif prev == None or same:
same = False
result.append({'x': nvpair[0], 'y': nvpair[1]})
else:
result.append({'y': nvpair[1]})
prev = nvpair
return result
def getPlotValues3(self, query, start_timestamp=None, end_timestamp=None,
granularity=None, null_tolerance=0):
data = self.getPlotValues2(query, start_timestamp, end_timestamp,
granularity, null_tolerance)
for chart in data['charts']:
chart['data'] = self._optimizePlot(chart['data'])
return data
def shutdown(self):
self.shutdown_server = True
return True
def addOrUpdateComment(self, comment_id, host, parent_service, service,
timestamp, author, text):
host_obj = self._createHost(self.engine, host)
if comment_id == '':
comment_id = None
if parent_service == '':
parent_service = None
if parent_service != None:
parent_service_obj = self._createService(self.engine,
parent_service)
parent_hostservice_obj = self._createHostService(
self.engine, host_obj, parent_service_obj, None)
else:
parent_hostservice_obj = None
service_obj = self._createService(self.engine, service)
hostservice_obj = self._createHostService(self.engine, host_obj,
service_obj,
parent_hostservice_obj)
if comment_id == None:
comment = model.Comment(hostservice_obj, timestamp, author, text)
else:
comment = model.Comment.getByID(self.engine, comment_id)
comment.hostservice = hostservice_obj
comment.timestamp = timestamp
comment.author = author
comment.text = text
comment.save(self.engine)
return comment.id
def addComment(self, host, parent_service, service, timestamp, author,
text):
return self.addOrUpdateComment(None, host, parent_service, service, timestamp,
author, text)
def deleteComment(self, comment_id):
comment = model.Comment.getByID(self.engine, comment_id)
comment.delete(self.engine)
def updateComment(self, comment_id, host, parent_service, service,
timestamp, author, text):
return self.addOrUpdateComment(comment_id, host, parent_service,
service, timestamp, author, text)
def getPlots(self, host_name, service_name, parent_service_name=None):
res = []
host = model.Host.getByName(self.engine, host_name)
if host_name and not host:
return res
service = model.Service.getByName(self.engine, service_name,
parent_service_name)
if service_name and not service:
return res
parent_hose = None
"""
if parent_service_name:
parent_service = model.Service.getByName(
self.engine, parent_service_name, None)
if not parent_service:
return res
parent_hose = model.HostService.getByHostAndService(
self.engine, host, parent_service, None)
try:
parent_hose = parent_hose[0]
except IndexError:
# Not found
pass
"""
hose = model.HostService.getByHostAndService(
self.engine, host, service, parent_hose)
try:
hose = hose[0]
except IndexError:
# Not found
pass
else:
children = model.HostService.getByHostAndService(
self.engine, hose.host, None, hose)
if children:
for child in children:
if child.parent_hostservice != None:
parent_service_name = child.parent_hostservice.service.name
else:
parent_service_name = None
plots = model.Plot.getByHostServiceAndName(
self.engine, child, None)
for plot in plots:
res.append({
'service': child.service.name,
'plot': plot.name,
'parent_service': parent_service_name
})
else:
if hose.parent_hostservice != None:
parent_service_name = hose.parent_hostservice.service.name
else:
parent_service_name = None
plots = model.Plot.getByHostServiceAndName(
self.engine, hose, None)
for plot in plots:
res.append({
'service': hose.service.name,
'plot': plot.name,
'parent_service': parent_service_name
})
return res
|
gpl-3.0
| 3,848,192,613,168,572,400
| 34.703704
| 134
| 0.527886
| false
| 4.509631
| false
| false
| false
|
spark8103/deploy
|
app/celery_runner.py
|
1
|
4976
|
import subprocess
from subprocess import Popen, PIPE
from . import celery
@celery.task(bind=True)
def deploy_running_task(self, cmd, type='Deploy'):
has_error = False
result = None
output = ""
self.update_state(state='PROGRESS',
meta={'output': output,
'description': "",
'returncode': None})
print(str.format("About to execute: {0}", cmd))
proc = Popen([cmd], stdout=PIPE, stderr=subprocess.STDOUT, shell=True)
for line in iter(proc.stdout.readline, ''):
print(str(line))
output = output + line
self.update_state(state='PROGRESS', meta={'output': output, 'description': "", 'returncode': None})
return_code = proc.poll()
if return_code is 0:
meta = {'output': output,
'returncode': proc.returncode,
'description': ""
}
self.update_state(state='FINISHED',
meta=meta)
elif return_code is not 0:
# failure
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
self.update_state(state='FAILED',
meta=meta)
if len(output) is 0:
output = "no output, maybe no matching hosts?"
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
return meta
@celery.task(bind=True)
def ansible_running_task(self, cmd, type='Ansible'):
has_error = False
result = None
output = ""
self.update_state(state='PROGRESS',
meta={'output': output,
'description': "",
'returncode': None})
print(str.format("About to execute: {0}", cmd))
proc = Popen([cmd], stdout=PIPE, stderr=subprocess.STDOUT, shell=True)
for line in iter(proc.stdout.readline, ''):
print(str(line))
output = output + line
self.update_state(state='PROGRESS', meta={'output': output, 'description': "", 'returncode': None})
return_code = proc.poll()
if return_code is 0:
meta = {'output': output,
'returncode': proc.returncode,
'description': ""
}
self.update_state(state='FINISHED',
meta=meta)
elif return_code is not 0:
# failure
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
self.update_state(state='FAILED',
meta=meta)
if len(output) is 0:
output = "no output, maybe no matching hosts?"
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
return meta
@celery.task(bind=True)
def ansible_playbook_task(self, cmd, type='Ansible-Playbook'):
has_error = False
result = None
output = ""
self.update_state(state='PROGRESS',
meta={'output': output,
'description': "",
'returncode': None})
print(str.format("About to execute: {0}", cmd))
proc = Popen([cmd], stdout=PIPE, stderr=subprocess.STDOUT, shell=True)
for line in iter(proc.stdout.readline, ''):
print(str(line))
output = output + line
self.update_state(state='PROGRESS', meta={'output': output, 'description': "", 'returncode': None})
return_code = proc.poll()
if return_code is 0:
meta = {'output': output,
'returncode': proc.returncode,
'description': ""
}
self.update_state(state='FINISHED',
meta=meta)
elif return_code is not 0:
# failure
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
self.update_state(state='FAILED',
meta=meta)
if len(output) is 0:
output = "no output, maybe no matching hosts?"
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
return meta
@celery.task(name='celery_tasks.cmd')
def schedule_cmd(cmd):
child = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT, shell=True)
out, err = child.communicate()
ret = child.wait()
return {'returncode': ret,
'output': out,
'error:': err
}
|
mit
| 2,258,274,440,267,406,800
| 35.321168
| 107
| 0.527532
| false
| 4.319444
| false
| false
| false
|
talumbau/webapp-public
|
webapp/apps/taxbrain/helpers.py
|
1
|
26371
|
from collections import namedtuple
import taxcalc
import dropq
import os
import requests
from requests.exceptions import Timeout, RequestException
import json
import pandas as pd
import time
#
# Prepare user params to send to DropQ/Taxcalc
#
NUM_BUDGET_YEARS = int(os.environ.get('NUM_BUDGET_YEARS', 10))
START_YEAR = int(os.environ.get('START_YEAR', 2015))
#Hard fail on lack of dropq workers
dropq_workers = os.environ.get('DROPQ_WORKERS', '')
DROPQ_WORKERS = dropq_workers.split(",")
TAXCALC_COMING_SOON_FIELDS = [
'_Dividend_rt1', '_Dividend_thd1',
'_Dividend_rt2', '_Dividend_thd2',
'_Dividend_rt3', '_Dividend_thd3', '_BE_inc', '_BE_sub',
'_BE_cg_per', '_BE_cg_trn'
]
TAXCALC_COMING_SOON_INDEXED_BY_MARS = [
'_CG_thd1', '_CG_thd2', '_Dividend_thd1','_Dividend_thd2', '_Dividend_thd3'
]
TIMEOUT_IN_SECONDS = 1.0
MAX_ATTEMPTS_SUBMIT_JOB = 20
#
# Display TaxCalc result data
#
TAXCALC_RESULTS_START_YEAR = START_YEAR
TAXCALC_RESULTS_MTABLE_COL_LABELS = taxcalc.TABLE_LABELS
TAXCALC_RESULTS_DFTABLE_COL_LABELS = taxcalc.DIFF_TABLE_LABELS
TAXCALC_RESULTS_MTABLE_COL_FORMATS = [
# divisor, unit, decimals
[ 1000, None, 0], # 'Returns',
[1000000000, 'Dollars', 1], # 'AGI',
[ 1000, None, 0], # 'Standard Deduction Filers',
[1000000000, 'Dollars', 1], # 'Standard Deduction',
[ 1000, None, 0], # 'Itemizers',
[1000000000, 'Dollars', 1], # 'Itemized Deduction',
[1000000000, 'Dollars', 1], # 'Personal Exemption',
[1000000000, 'Dollars', 1], # 'Taxable Income',
[1000000000, 'Dollars', 1], # 'Regular Tax',
[1000000000, 'Dollars', 1], # 'AMTI',
[ 1000, None, 0], # 'AMT Filers',
[1000000000, 'Dollars', 1], # 'AMT',
[1000000000, 'Dollars', 1], # 'Tax before Credits',
[1000000000, 'Dollars', 1], # 'Non-refundable Credits',
[1000000000, 'Dollars', 1], # 'Tax before Refundable Credits',
[1000000000, 'Dollars', 1], # 'Refundable Credits',
[1000000000, 'Dollars', 1], # 'Revenue'
]
TAXCALC_RESULTS_DFTABLE_COL_FORMATS = [
[ 1000, None, 0], # "Inds. w/ Tax Cut",
[ 1000, None, 0], # "Inds. w/ Tax Increase",
[ 1000, None, 0], # "Count",
[ 1, 'Dollars', 0], # "Mean Tax Difference",
[1000000000, 'Dollars', 1], # "Total Tax Difference",
[ 1, '%', 1], # "%age Tax Increase",
[ 1, '%', 1], # "%age Tax Decrease",
[ 1, '%', 1], # "Share of Overall Change"
]
TAXCALC_RESULTS_BIN_ROW_KEYS = dropq.dropq.bin_row_names
TAXCALC_RESULTS_BIN_ROW_KEY_LABELS = {
'less_than_10':'Less than 10',
'ten_twenty':'10-20',
'twenty_thirty':'20-30',
'thirty_forty':'30-40',
'forty_fifty':'40-50',
'fifty_seventyfive':'50-75',
'seventyfive_hundred':'75-100',
'hundred_twohundred':'100-200',
'twohundred_fivehundred':'200-500',
'fivehundred_thousand':'500-1000',
'thousand_up':'1000+',
'all':'All'
}
TAXCALC_RESULTS_DEC_ROW_KEYS = dropq.dropq.decile_row_names
TAXCALC_RESULTS_DEC_ROW_KEY_LABELS = {
'perc0-10':'0-10%',
'perc10-20':'10-20%',
'perc20-30':'20-30%',
'perc30-40':'30-40%',
'perc40-50':'40-50%',
'perc50-60':'50-60%',
'perc60-70':'60-70%',
'perc70-80':'70-80%',
'perc80-90':'80-90%',
'perc90-100':'90-100%',
'all':'All'
}
TAXCALC_RESULTS_TABLE_LABELS = {
'mX_dec': 'Base plan tax vars, weighted avg per AGI decile',
'mY_dec': 'User plan tax vars, weighted avg per AGI decile',
'df_dec': 'Difference between Base and User plans by AGI decile',
'mX_bin': 'Base plan tax vars, weighted avg per income bin',
'mY_bin': 'User plan tax vars, weighted avg per income bin',
'df_bin': 'Difference between Base and User plans by income bin',
'fiscal_tots': 'Total Revenue Change by Calendar Year',
}
def expand_1D(x, num_years):
"""
Expand the given data to account for the given number of budget years.
Expanded entries are None by default
"""
if len(x) >= num_years:
return list(x)
else:
ans = [None] * num_years
ans[:len(x)] = x
return ans
def expand_2D(x, num_years):
"""
Expand the given data to account for the given number of budget years.
For 2D arrays, we expand out the number of rows until we have num_years
number of rows. Added rows have all 'None' entries
"""
if len(x) >= num_years:
return list(x)
else:
ans = []
for i in range(0, num_years):
ans.append([None] * len(x[0]))
for i, arr in enumerate(x):
ans[i] = arr
return ans
def expand_list(x, num_years):
"""
Dispatch to either expand_1D or expand2D depending on the dimension of x
Parameters:
-----------
x : value to expand
num_years: int
Number of budget years to expand
Returns:
--------
expanded list
"""
if isinstance(x[0], list):
return expand_2D(x, num_years)
else:
return expand_1D(x, num_years)
def convert_to_floats(tsi):
"""
A helper function that tax all of the fields of a TaxSaveInputs model
and converts them to floats, or list of floats
"""
def numberfy_one(x):
if isinstance(x, float):
return x
else:
return float(x)
def numberfy(x):
if isinstance(x, list):
return [numberfy_one(i) for i in x]
else:
return numberfy_one(x)
attrs = vars(tsi)
return { k:numberfy(v) for k,v in attrs.items() if v}
def leave_name_in(key, val, dd):
"""
Under certain conditions, we will remove 'key' and its value
from the dictionary we pass to the dropq package. This function
will test those conditions and return a Bool.
Parameters:
-----------
key: a field name to potentially pass to the dropq package
dd: the default dictionary of data in taxcalc Parameters
Returns:
--------
Bool: True if we allow this field to get passed on. False
if it should be removed.
"""
if key in dd:
return True
else:
print "Don't have this pair: ", key, val
underscore_name_in_defaults = "_" + key in dd
is_cpi_name = key.endswith("_cpi")
is_array_name = (key.endswith("_0") or key.endswith("_1") or
key.endswith("_2") or key.endswith("_3"))
if (underscore_name_in_defaults or is_cpi_name or is_array_name):
return True
else:
return False
def package_up_vars(user_values):
dd = taxcalc.parameters.default_data(start_year=START_YEAR)
for k, v in user_values.items():
if not leave_name_in(k, v, dd):
print "Removing ", k, v
del user_values[k]
name_stems = {}
ans = {}
#Find the 'broken out' array values, these have special treatment
for k, v in user_values.items():
if (k.endswith("_0") or k.endswith("_1") or k.endswith("_2")
or k.endswith("_3")):
vals = name_stems.setdefault(k[:-2], [])
vals.append(k)
#For each array value, expand as necessary based on default data
#then add user values. It is acceptable to leave 'blanks' as None.
#This is handled on the taxcalc side
for k, vals in name_stems.items():
if k in dd:
default_data = dd[k]
param = k
else:
#add a leading underscore
default_data = dd["_" + k]
param = "_" + k
# get max number of years to advance
_max = 0
for name in vals:
num_years = len(user_values[name])
if num_years > _max:
_max = num_years
expnded = expand_list(default_data, _max)
#Now copy necessary data to expanded array
for name in vals:
idx = int(name[-1]) # either 0, 1, 2, 3
user_arr = user_values[name]
for new_arr, user_val in zip(expnded, user_arr):
new_arr[idx] = int(user_val)
del user_values[name]
ans[param] = expnded
#Process remaining values set by user
for k, v in user_values.items():
if k in dd:
default_data = dd[k]
param = k
elif k.endswith("_cpi"):
if k[:-4] in dd:
ans[k] = v
else:
ans['_' + k] = v
continue
else:
#add a leading underscore
default_data = dd["_" + k]
param = "_" + k
num_years = len(v)
expnded = expand_list(default_data, num_years)
for i, new_val in enumerate(v):
expnded[i] = new_val
ans[param] = expnded
return ans
#
# Gather data to assist in displaying TaxCalc param form
#
class TaxCalcField(object):
"""
An atomic unit of data for a TaxCalcParam, which can be stored as a field
Used for both CSV float fields (value column data) and boolean fields (cpi)
"""
def __init__(self, id, label, values, param):
self.id = id
self.label = label
self.values = values
self.param = param
self.values_by_year = {}
for i, value in enumerate(values):
year = param.start_year + i
self.values_by_year[year] = value
self.default_value = self.values_by_year[START_YEAR]
class TaxCalcParam(object):
"""
A collection of TaxCalcFields that represents all configurable details
for one of TaxCalc's Parameters
"""
def __init__(self, param_id, attributes):
self.__load_from_json(param_id, attributes)
def __load_from_json(self, param_id, attributes):
values_by_year = attributes['value']
col_labels = attributes['col_label']
self.tc_id = param_id
self.nice_id = param_id[1:] if param_id[0] == '_' else param_id
self.name = attributes['long_name']
self.info = " ".join([
attributes['description'],
attributes.get('irs_ref') or "", # sometimes this is blank
attributes.get('notes') or "" # sometimes this is blank
]).strip()
# Pretend the start year is 2015 (instead of 2013),
# until values for that year are provided by taxcalc
#self.start_year = int(attributes['start_year'])
self.start_year = START_YEAR
self.coming_soon = (self.tc_id in TAXCALC_COMING_SOON_FIELDS)
# normalize single-year default lists [] to [[]]
if not isinstance(values_by_year[0], list):
values_by_year = [values_by_year]
# organize defaults by column [[A1,B1],[A2,B2]] to [[A1,A2],[B1,B2]]
values_by_col = [list(x) for x in zip(*values_by_year)]
#
# normalize and format column labels
#
if self.tc_id in TAXCALC_COMING_SOON_INDEXED_BY_MARS:
col_labels = ["Single", "Married filing Jointly",
"Married filing Separately", "Head of Household"]
values_by_col = ['0','0','0','0']
elif isinstance(col_labels, list):
if col_labels == ["0kids", "1kid", "2kids", "3+kids"]:
col_labels = ["0 Kids", "1 Kid", "2 Kids", "3+ Kids"]
elif col_labels == ["single", "joint", "separate", "head of household",
"widow", "separate"] or col_labels == \
["single", "joint", "separate", "head of household",
"widow", "separate","dependent"]:
col_labels = ["Single", "Married filing Jointly",
"Married filing Separately", "Head of Household"]
else:
if col_labels == "NA" or col_labels == "":
col_labels = [""]
elif col_labels == "0kids 1kid 2kids 3+kids":
col_labels = ["0 Kids", "1 Kid", "2 Kids", "3+ Kids"]
# create col params
self.col_fields = []
if len(col_labels) == 1:
self.col_fields.append(TaxCalcField(
self.nice_id,
col_labels[0],
values_by_col[0],
self
))
else:
for col, label in enumerate(col_labels):
self.col_fields.append(TaxCalcField(
self.nice_id + "_{0}".format(col),
label,
values_by_col[col],
self
))
# we assume we can CPI inflate if first value isn't a ratio
first_value = self.col_fields[0].values[0]
self.inflatable = first_value > 1 and self.tc_id != '_ACTC_ChildNum'
if self.inflatable:
self.cpi_field = TaxCalcField(self.nice_id + "_cpi", "CPI", [True], self)
# Create a list of default parameters
TAXCALC_DEFAULT_PARAMS_JSON = taxcalc.parameters.default_data(metadata=True, start_year=2015)
default_taxcalc_params = {}
for k,v in TAXCALC_DEFAULT_PARAMS_JSON.iteritems():
param = TaxCalcParam(k,v)
default_taxcalc_params[param.nice_id] = param
#Behavior Effects not in params.json yet. Add in the appropriate info so that
#the params dictionary has the right info
# value, col_label, long_name, description, irs_ref, notes
be_params = []
be_inc_param = {'value':[0], 'col_label':['label'], 'long_name':'Income Effect',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_sub_param = {'value':[0], 'col_label':['label'], 'long_name':'Substitution Effect',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_cg_per_param = {'value':[0], 'col_label':['label'], 'long_name':'Persistent',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_cg_trn_param= {'value':[0], 'col_label':['label'], 'long_name':'Transitory',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_params.append(('_BE_inc', be_inc_param))
be_params.append(('_BE_sub', be_sub_param))
be_params.append(('_BE_cg_per', be_cg_per_param))
be_params.append(('_BE_cg_trn', be_cg_trn_param))
for k,v in be_params:
param = TaxCalcParam(k,v)
default_taxcalc_params[param.nice_id] = param
TAXCALC_DEFAULT_PARAMS = default_taxcalc_params
# Debug TaxParams
"""
for k, param in TAXCALC_DEFAULT_PARAMS.iteritems():
print(' -- ' + k + ' -- ')
print('TC id: ' + param.tc_id)
print('Nice id: ' + param.nice_id)
print('name: ' + param.name)
print('info: ' + param.info + '\n')
if param.inflatable:
field = param.cpi_field
print(field.id + ' - ' + field.label + ' - ' + str(field.values))
for field in param.col_fields:
print(field.id + ' - ' + field.label + ' - ' + str(field.values))
print('\n')
"""
def taxcalc_results_to_tables(results):
"""
Take various results from dropq, i.e. mY_dec, mX_bin, df_dec, etc
Return organized and labeled table results for display
"""
num_years = len(results['fiscal_tots'])
years = list(range(TAXCALC_RESULTS_START_YEAR,
TAXCALC_RESULTS_START_YEAR + num_years))
tables = {}
for table_id in results:
# Debug inputs
"""
print('\n ----- inputs ------- ')
print('looking at {0}'.format(table_id))
if table_id == 'fiscal_tots':
print('{0}'.format(results[table_id]))
else:
print('{0}'.format(results[table_id].keys()))
print(' ----- inputs ------- \n')
"""
if table_id in ['mX_dec', 'mY_dec']:
row_keys = TAXCALC_RESULTS_DEC_ROW_KEYS
row_labels = TAXCALC_RESULTS_DEC_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_MTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_MTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id in ['mX_bin', 'mY_bin']:
row_keys = TAXCALC_RESULTS_BIN_ROW_KEYS
row_labels = TAXCALC_RESULTS_BIN_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_MTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_MTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id == 'df_dec':
row_keys = TAXCALC_RESULTS_DEC_ROW_KEYS
row_labels = TAXCALC_RESULTS_DEC_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_DFTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_DFTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id == 'df_bin':
row_keys = TAXCALC_RESULTS_BIN_ROW_KEYS
row_labels = TAXCALC_RESULTS_BIN_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_DFTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_DFTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id == 'fiscal_tots':
# todo - move these into the above TC result param constants
row_keys = ['totals']
row_labels = {'totals': 'Total Revenue'}
col_labels = years
col_formats = [ [1000000000, 'Dollars', 1] for y in years]
table_data = {'totals': results[table_id]}
multi_year_cells = False
table = {
'col_labels': col_labels,
'cols': [],
'label': TAXCALC_RESULTS_TABLE_LABELS[table_id],
'rows': [],
'multi_valued': multi_year_cells
}
for col_key, label in enumerate(col_labels):
table['cols'].append({
'label': label,
'divisor': col_formats[col_key][0],
'units': col_formats[col_key][1],
'decimals': col_formats[col_key][2],
})
col_count = len(col_labels)
for row_key in row_keys:
row = {
'label': row_labels[row_key],
'cells': []
}
for col_key in range(0, col_count):
cell = {
'year_values': {},
'format': {
'divisor': table['cols'][col_key]['divisor'],
'decimals': table['cols'][col_key]['decimals'],
}
}
if multi_year_cells:
for yi, year in enumerate(years):
value = table_data["{0}_{1}".format(row_key, yi)][col_key]
if value[-1] == "%":
value = value[:-1]
cell['year_values'][year] = value
cell['first_value'] = cell['year_values'][TAXCALC_RESULTS_START_YEAR]
else:
value = table_data[row_key][col_key]
if value[-1] == "%":
value = value[:-1]
cell['value'] = value
row['cells'].append(cell)
table['rows'].append(row)
tables[table_id] = table
# Debug results
"""
print('\n ----- result ------- ')
print('{0}'.format(table))
print(' ----- result ------- \n')
"""
tables['result_years'] = years
return tables
def format_csv(tax_results, url_id):
"""
Takes a dictionary with the tax_results, having these keys:
[u'mY_bin', u'mX_bin', u'mY_dec', u'mX_dec', u'df_dec', u'df_bin',
u'fiscal_tots']
And then returns a list of list of strings for CSV output. The format
of the lines is as follows:
#URL: http://www.ospc.org/taxbrain/ID/csv/
#fiscal tots data
YEAR_0, ... YEAR_K
val, val, ... val
#mX_dec
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#mY_dec
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#df_dec
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#mX_bin
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#mY_bin
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#df_bin
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
"""
res = []
#URL
res.append(["#URL: http://www.ospc.org/taxbrain/" + str(url_id) + "/"])
#FISCAL TOTS
res.append(["#fiscal totals data"])
ft = tax_results.get('fiscal_tots', [])
yrs = [START_YEAR + i for i in range(0, len(ft))]
if yrs:
res.append(yrs)
if ft:
res.append(ft)
#MX_DEC
res.append(["#mX_dec"])
mxd = tax_results.get('mX_dec', {})
if mxd:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_DEC_ROW_KEYS:
res.append(mxd[row+"_" + str(count)])
#MY_DEC
res.append(["#mY_dec"])
myd = tax_results.get('mY_dec', {})
if myd:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_DEC_ROW_KEYS:
res.append(myd[row+"_" + str(count)])
#DF_DEC
res.append(["#df_dec"])
dfd = tax_results.get('df_dec', {})
if dfd:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_DFTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_DEC_ROW_KEYS:
res.append(dfd[row+"_" + str(count)])
#MX_BIN
res.append(["#mX_bin"])
mxb = tax_results.get('mX_bin', {})
if mxb:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_BIN_ROW_KEYS:
res.append(mxb[row+"_" + str(count)])
#MY_BIN
res.append(["#mY_bin"])
myb = tax_results.get('mY_bin', {})
if myb:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_BIN_ROW_KEYS:
res.append(myb[row+"_" + str(count)])
#DF_BIN
res.append(["#df_bin"])
dfb = tax_results.get('df_bin', {})
if dfb:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_DFTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_BIN_ROW_KEYS:
res.append(dfb[row+"_" + str(count)])
return res
def submit_dropq_calculation(mods):
print "mods is ", mods
user_mods = package_up_vars(mods)
if not bool(user_mods):
return False
print "user_mods is ", user_mods
print "submit work"
user_mods={START_YEAR:user_mods}
years = list(range(0,NUM_BUDGET_YEARS))
hostnames = DROPQ_WORKERS
num_hosts = len(hostnames)
data = {}
data['user_mods'] = json.dumps(user_mods)
job_ids = []
hostname_idx = 0
for y in years:
year_submitted = False
attempts = 0
while not year_submitted:
data['year'] = str(y)
theurl = "http://{hn}/dropq_start_job".format(hn=hostnames[hostname_idx])
try:
response = requests.post(theurl, data=data, timeout=TIMEOUT_IN_SECONDS)
if response.status_code == 200:
print "submitted: ", str(y), hostnames[hostname_idx]
year_submitted = True
job_ids.append((response.text, hostnames[hostname_idx]))
hostname_idx = (hostname_idx + 1) % num_hosts
else:
print "FAILED: ", str(y), hostnames[hostname_idx]
hostname_idx = (hostname_idx + 1) % num_hosts
attempts += 1
except Timeout:
print "Couldn't submit to: ", hostnames[hostname_idx]
hostname_idx = (hostname_idx + 1) % num_hosts
attempts += 1
except RequestException as re:
print "Something unexpected happened: ", re
hostname_idx = (hostname_idx + 1) % num_hosts
attempts += 1
if attempts > MAX_ATTEMPTS_SUBMIT_JOB:
print "Exceeded max attempts. Bailing out."
raise IOError()
return job_ids
def dropq_results_ready(job_ids):
jobs_done = [False] * len(job_ids)
for idx, id_hostname in enumerate(job_ids):
id_, hostname = id_hostname
result_url = "http://{hn}/dropq_query_result".format(hn=hostname)
job_response = requests.get(result_url, params={'job_id':id_})
if job_response.status_code == 200: # Valid response
rep = job_response.text
if rep == 'YES':
jobs_done[idx] = True
print "got one!: ", id_
return all(jobs_done)
def dropq_get_results(job_ids):
ans = []
for idx, id_hostname in enumerate(job_ids):
id_, hostname = id_hostname
result_url = "http://{hn}/dropq_get_result".format(hn=hostname)
job_response = requests.get(result_url, params={'job_id':id_})
if job_response.status_code == 200: # Valid response
ans.append(job_response.json())
mY_dec = {}
mX_dec = {}
df_dec = {}
mY_bin = {}
mX_bin = {}
df_bin = {}
fiscal_tots = []
for result in ans:
mY_dec.update(result['mY_dec'])
mX_dec.update(result['mX_dec'])
df_dec.update(result['df_dec'])
mY_bin.update(result['mY_bin'])
mX_bin.update(result['mX_bin'])
df_bin.update(result['df_bin'])
fiscal_tots.append(result['fiscal_tots'])
results = {'mY_dec': mY_dec, 'mX_dec': mX_dec, 'df_dec': df_dec,
'mY_bin': mY_bin, 'mX_bin': mX_bin, 'df_bin': df_bin,
'fiscal_tots': fiscal_tots}
return results
|
mit
| -2,011,549,738,239,617,500
| 31.881546
| 93
| 0.540101
| false
| 3.344451
| false
| false
| false
|
lucc/alot
|
tests/utils/argparse_test.py
|
1
|
5935
|
# encoding=utf-8
# Copyright © 2017 Dylan Baker
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests for alot.utils.argparse"""
import argparse
import contextlib
import os
import shutil
import tempfile
import unittest
import mock
from alot.utils import argparse as cargparse
# Good descriptive test names often don't fit PEP8, which is meant to cover
# functions meant to be called by humans.
# pylint: disable=invalid-name
# When using mock asserts its possible that many methods will not use self,
# that's fine
# pylint: disable=no-self-use
class TestValidatedStore(unittest.TestCase):
"""Tests for the ValidatedStore action class."""
def _argparse(self, args):
"""Create an argparse instance with a validator."""
def validator(args):
if args == 'fail':
raise cargparse.ValidationFailed
parser = argparse.ArgumentParser()
parser.add_argument(
'foo',
action=cargparse.ValidatedStoreAction,
validator=validator)
with mock.patch('sys.stderr', mock.Mock()):
return parser.parse_args(args)
def test_validates(self):
# Arparse will raise a SystemExit (calls sys.exit) rather than letting
# the exception cause the program to close.
with self.assertRaises(SystemExit):
self._argparse(['fail'])
@contextlib.contextmanager
def temporary_directory(suffix='', prefix='', dir=None): # pylint: disable=redefined-builtin
"""Python3 interface implementation.
Python3 provides a class that can be used as a context manager, which
creates a temporary directory and removes it when the context manager
exits. This function emulates enough of the interface of
TemporaryDirectory, for this module to use, and is designed as a drop in
replacement that can be replaced after the python3 port.
The only user visible difference is that this does not implement the
cleanup method that TemporaryDirectory does.
"""
directory = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
yield directory
shutil.rmtree(directory)
class TestRequireFile(unittest.TestCase):
"""Tests for the require_file validator."""
def test_doesnt_exist(self):
with temporary_directory() as d:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_file(os.path.join(d, 'doesnt-exist'))
def test_dir(self):
with temporary_directory() as d:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_file(d)
def test_file(self):
with tempfile.NamedTemporaryFile() as f:
cargparse.require_file(f.name)
def test_char_special(self):
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_file('/dev/null')
def test_fifo(self):
with temporary_directory() as d:
path = os.path.join(d, 'fifo')
os.mkfifo(path)
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_file(path)
class TestRequireDir(unittest.TestCase):
"""Tests for the require_dir validator."""
def test_doesnt_exist(self):
with temporary_directory() as d:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_dir(os.path.join(d, 'doesnt-exist'))
def test_dir(self):
with temporary_directory() as d:
cargparse.require_dir(d)
def test_file(self):
with tempfile.NamedTemporaryFile() as f:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_dir(f.name)
def test_char_special(self):
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_dir('/dev/null')
def test_fifo(self):
with temporary_directory() as d:
path = os.path.join(d, 'fifo')
os.mkfifo(path)
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_dir(path)
class TestOptionalFileLike(unittest.TestCase):
"""Tests for the optional_file_like validator."""
def test_doesnt_exist(self):
with temporary_directory() as d:
cargparse.optional_file_like(os.path.join(d, 'doesnt-exist'))
def test_dir(self):
with temporary_directory() as d:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.optional_file_like(d)
def test_file(self):
with tempfile.NamedTemporaryFile() as f:
cargparse.optional_file_like(f.name)
def test_char_special(self):
cargparse.optional_file_like('/dev/null')
def test_fifo(self):
with temporary_directory() as d:
path = os.path.join(d, 'fifo')
os.mkfifo(path)
cargparse.optional_file_like(path)
class TestIntOrPlusOrMinus(unittest.TestCase):
"""Tests for the is_int_or_pm validator."""
def test_int(self):
self.assertTrue(cargparse.is_int_or_pm('5'))
def test_pm(self):
self.assertTrue(cargparse.is_int_or_pm('+'))
self.assertTrue(cargparse.is_int_or_pm('-'))
def test_rubbish(self):
with self.assertRaises(cargparse.ValidationFailed):
cargparse.is_int_or_pm('XX')
|
gpl-3.0
| -1,888,860,210,976,214,300
| 32.337079
| 93
| 0.667678
| false
| 4.138075
| true
| false
| false
|
monkut/deso
|
deso/deso/layers/raster/management/commands/fill_raster_layer_cache.py
|
1
|
2025
|
"""
Make requests for tiles at given zoom levels to fill the tilecache.
"""
from django.core.management.base import BaseCommand
from django.conf import settings
from ...models import RasterAggregatedLayer
WGS84_SRID = settings.WGS84_SRID
def request_layer_tiles(layer_url, layer, zoom):
"""
Request tiles for given layer
:param layer_url: Abosulute URL with layer_id (for example: http://HOST:PORT/pathtolayer/{layer_id}/)
:param layer: RasterAggregatedLayer object
:param zoom: Zoom level
:return: tile count
"""
count = 0
pass
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument("-l", "--layers",
type=int,
nargs="+",
required=True,
default=None,
help="RasterAggregatedLayer Id(s) of layers to cache")
parser.add_argument("-z", "--zooms",
type=int,
nargs="+",
default=[14,],
help="Zoom Level(s) to cache [DEFAULT=14]")
DEFAULT_RASTER_LAYERS_URL = "http://{}:{}/raster/layer/{{layer_id}}/".format(settings.HOST,
settings.PORT)
parser.add_argument("-u", "--url",
default=DEFAULT_RASTER_LAYERS_URL,
help="Raster Layers URL to send requests to [DEFAULT='{}']".format(DEFAULT_RASTER_LAYERS_URL))
def handle(self, *args, **options):
layer_ids = sorted(options["layers"])
for layer_id in layer_ids:
try:
layer = RasterAggregatedLayer.objects.get(id=layer_id)
except RasterAggregatedLayer.DoesNotExist:
self.stderr.write("Given RasterAggregatedLayer({}) Does Not Exist -- SKIPPING!".format(layer_id))
center = layer.get_center()
|
mit
| 2,064,351,220,925,089,800
| 37.207547
| 122
| 0.536296
| false
| 4.336188
| false
| false
| false
|
MissionCriticalCloud/marvin
|
marvin/cloudstackAPI/uploadCustomCertificate.py
|
1
|
1685
|
"""Uploads a custom certificate for the console proxy VMs to use for SSL. Can be used to upload a single certificate signed by a known CA. Can also be used, through multiple calls, to upload a chain of certificates from CA to the custom certificate itself."""
from baseCmd import *
from baseResponse import *
class uploadCustomCertificateCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""The certificate to be uploaded."""
"""Required"""
self.certificate = None
self.typeInfo['certificate'] = 'string'
"""DNS domain suffix that the certificate is granted for."""
"""Required"""
self.domainsuffix = None
self.typeInfo['domainsuffix'] = 'string'
"""An integer providing the location in a chain that the certificate will hold. Usually, this can be left empty. When creating a chain, the top level certificate should have an ID of 1, with each step in the chain incrementing by one. Example, CA with id = 1, Intermediate CA with id = 2, Site certificate with ID = 3"""
self.id = None
self.typeInfo['id'] = 'integer'
"""A name / alias for the certificate."""
self.name = None
self.typeInfo['name'] = 'string'
"""The private key for the attached certificate."""
self.privatekey = None
self.typeInfo['privatekey'] = 'string'
self.required = ["certificate", "domainsuffix", ]
class uploadCustomCertificateResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""message of the certificate upload operation"""
self.message = None
self.typeInfo['message'] = 'string'
|
apache-2.0
| 2,818,282,309,625,276,000
| 43.342105
| 328
| 0.651632
| false
| 4.469496
| false
| false
| false
|
intelligent-agent/redeem
|
tests/gcode/test_M114.py
|
1
|
1069
|
from __future__ import absolute_import
import mock
from random import random
from .MockPrinter import MockPrinter
from redeem.Gcode import Gcode
class M114_Tests(MockPrinter):
def test_gcodes_M114(self):
A = round(random() * 200, 1)
B = round(random() * 200, 1)
C = round(random() * 200, 1)
X = round(random() * 200, 1)
Y = round(random() * 200, 1)
Z = round(random() * 200, 1)
E = round(random() * 200, 1)
H = round(random() * 200, 1)
self.printer.path_planner.get_current_pos = mock.Mock(return_value={
'A': A,
'C': C,
'B': B,
'E': E,
'H': H,
'Y': Y,
'X': X,
'Z': Z
})
g = Gcode({"message": "M114"})
self.printer.processor.gcodes[g.gcode].execute(g)
self.printer.path_planner.get_current_pos.assert_called_with(
ideal=True, mm=True) # kinda redundant, but hey.
self.assertEqual(
g.answer,
"ok C: X:{:.1f} Y:{:.1f} Z:{:.1f} E:{:.1f} A:{:.1f} B:{:.1f} C:{:.1f} H:{:.1f}".format(
X, Y, Z, E, A, B, C, H))
|
gpl-3.0
| 9,131,512,291,480,535,000
| 27.891892
| 95
| 0.533209
| false
| 2.783854
| false
| false
| false
|
sunnychaudhari/gstudio
|
gnowsys-ndf/gnowsys_ndf/ndf/views/ajax_views.py
|
1
|
226833
|
''' -- imports from python libraries -- '''
# import os -- Keep such imports here
import datetime
import csv
import time
import ast
import json
import math
import multiprocessing
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.http import StreamingHttpResponse
from django.http import Http404
from django.core.paginator import Paginator
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from mongokit import paginator
from django.contrib.sites.models import Site
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import GAPPS
from gnowsys_ndf.settings import STATIC_ROOT, STATIC_URL
from gnowsys_ndf.ndf.models import NodeJSONEncoder
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.models import *
from gnowsys_ndf.ndf.org2any import org2html
from gnowsys_ndf.ndf.views.file import *
from gnowsys_ndf.ndf.views.methods import check_existing_group, get_drawers, get_node_common_fields, get_node_metadata, create_grelation,create_gattribute,create_task,parse_template_data,get_execution_time
from gnowsys_ndf.ndf.views.methods import get_widget_built_up_data, parse_template_data
from gnowsys_ndf.ndf.views.methods import create_grelation, create_gattribute, create_task
from gnowsys_ndf.ndf.templatetags.ndf_tags import get_profile_pic, edit_drawer_widget, get_contents
from gnowsys_ndf.mobwrite.models import ViewObj
from gnowsys_ndf.notification import models as notification
theme_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Theme'})
topic_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Topic'})
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
# This function is used to check (while creating a new group) group exists or not
# This is called in the lost focus event of the group_name text box, to check the existance of group, in order to avoid duplication of group names.
@get_execution_time
class Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ObjectId):
return str(obj)
else:
return obj
def checkgroup(request, group_name):
titl = request.GET.get("gname", "")
retfl = check_existing_group(titl)
if retfl:
return HttpResponse("success")
else:
return HttpResponse("failure")
@get_execution_time
def terms_list(request, group_id):
if request.is_ajax() and request.method == "POST":
# page number which have clicked on pagination
page_no = request.POST.get("page_no", '')
terms = []
gapp_GST = node_collection.one({'_type': 'MetaType', 'name': 'GAPP'})
term_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Term', 'member_of':ObjectId(gapp_GST._id) })
# To list all term instances
terms_list = node_collection.find({
'_type': 'GSystem', 'member_of': ObjectId(term_GST._id),
'group_set': ObjectId(group_id)
}).sort('name', 1)
paged_terms = paginator.Paginator(terms_list, page_no, 25)
# Since "paged_terms" returns dict ,we append the dict items in a list to forwarded into template
for each in paged_terms.items:
terms.append(each)
return render_to_response(
'ndf/terms_list.html',
{
'group_id': group_id, 'groupid': group_id, "paged_terms": terms,
'page_info': paged_terms
},
context_instance=RequestContext(request)
)
# This ajax view renders the output as "node view" by clicking on collections
@get_execution_time
def collection_nav(request, group_id):
'''
This ajax function retunrs the node on main template, when clicked on collection hierarchy
'''
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
curr_node_id = request.POST.get("curr_node", '')
node_type = request.POST.get("nod_type", '')
breadcrumbs_list = []
curr_node_obj = node_collection.one({'_id': ObjectId(curr_node_id)})
if node_type == "Topic":
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
for e in curr_node_obj.prior_node:
prior = node_collection.one({'_id': ObjectId(e)})
if curr_node_obj._id in prior.collection_set and theme_item_GST._id in prior.member_of:
breadcrumbs_list.append((str(prior._id), prior.name))
topic = ""
node_obj = node_collection.one({'_id': ObjectId(node_id)})
nav_list = request.POST.getlist("nav[]", '')
n_list = request.POST.get("nav", '')
# This "n_list" is for manipulating breadcrumbs events and its navigation
if n_list:
# Convert the incomming listfrom template into python list
n_list = n_list.replace("'", "'")
n_list = ast.literal_eval(n_list)
# For removing elements from breadcrumbs list to manipulate basd on which node is clicked
for e in reversed(n_list):
if e != unicode(node_obj._id):
n_list.remove(e)
else:
break
nav_list = n_list
# Firstly original node should go into breadcrumbs list
breadcrumbs_list.append( (str(curr_node_obj._id), curr_node_obj.name) )
if nav_list:
# create beadcrumbs list from navigation list sent from template.
for each in nav_list:
obj = node_collection.one({'_id': ObjectId(each) })
breadcrumbs_list.append( (str(obj._id), obj.name) )
b_list = []
for each in breadcrumbs_list:
b_list.append(each[0])
if str(node_obj._id) not in b_list:
# Add the tuple if clicked node is not there in breadcrumbs list
breadcrumbs_list.append( (str(node_obj._id), node_obj.name) )
else:
# To remove breadcrumbs untill clicked node have not reached(Removal starts in reverse order)
for e in reversed(breadcrumbs_list):
if node_id in e:
break
else:
breadcrumbs_list.remove(e)
# print "breadcrumbs_list: ",breadcrumbs_list,"\n"
return render_to_response('ndf/node_ajax_view.html',
{ 'node': node_obj,
'original_node':curr_node_obj,
'group_id': group_id,
'groupid':group_id,
'breadcrumbs_list':breadcrumbs_list,
'app_id': node_id, 'topic':topic, 'nav_list':nav_list
},
context_instance = RequestContext(request)
)
# This view handles the collection list of resource and its breadcrumbs
@get_execution_time
def collection_view(request, group_id):
'''
This ajax function returns breadcrumbs_list for clicked node in collection hierarchy
'''
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
# breadcrumbs_list = request.POST.get("breadcrumbs_list", '')
node_obj = node_collection.one({'_id': ObjectId(node_id)})
# breadcrumbs_list = breadcrumbs_list.replace("'","'")
# breadcrumbs_list = ast.literal_eval(breadcrumbs_list)
# b_list = []
# for each in breadcrumbs_list:
# b_list.append(each[0])
# if str(node_obj._id) not in b_list:
# # Add the tuple if clicked node is not there in breadcrumbs list
# breadcrumbs_list.append( (str(node_obj._id), node_obj.name) )
# else:
# # To remove breadcrumbs untill clicked node have not reached(Removal starts in reverse order)
# for e in reversed(breadcrumbs_list):
# if node_id in e:
# break
# else:
# breadcrumbs_list.remove(e)
return render_to_response('ndf/collection_ajax_view.html',
{
'node': node_obj, 'group_id': group_id, 'groupid': group_id
},context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def shelf(request, group_id):
if request.is_ajax() and request.method == "POST":
shelf = request.POST.get("shelf_name", '')
shelf_add = request.POST.get("shelf_add", '')
shelf_remove = request.POST.get("shelf_remove", '')
shelf_item_remove = request.POST.get("shelf_item_remove", '')
shelf_available = ""
shelf_item_available = ""
shelf_gst = node_collection.one({'_type': u'GSystemType', 'name': u'Shelf'})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
has_shelf_RT = node_collection.one({'_type': u'RelationType', 'name': u'has_shelf'})
if shelf:
shelf_gs = node_collection.one({'name': unicode(shelf), 'member_of': [ObjectId(shelf_gst._id)] })
if shelf_gs is None:
shelf_gs = node_collection.collection.GSystem()
shelf_gs.name = unicode(shelf)
shelf_gs.created_by = int(request.user.id)
shelf_gs.member_of.append(shelf_gst._id)
shelf_gs.save()
shelf_R = triple_collection.collection.GRelation()
shelf_R.subject = ObjectId(auth._id)
shelf_R.relation_type = has_shelf_RT
shelf_R.right_subject = ObjectId(shelf_gs._id)
shelf_R.save()
else:
if shelf_add:
shelf_item = ObjectId(shelf_add)
if shelf_item in shelf_gs.collection_set:
shelf_Item = node_collection.one({'_id': ObjectId(shelf_item)}).name
shelf_item_available = shelf_Item
return HttpResponse("failure")
else:
node_collection.collection.update({'_id': shelf_gs._id}, {'$push': {'collection_set': ObjectId(shelf_item) }}, upsert=False, multi=False)
shelf_gs.reload()
elif shelf_item_remove:
shelf_item = node_collection.one({'name': unicode(shelf_item_remove)})._id
node_collection.collection.update({'_id': shelf_gs._id}, {'$pull': {'collection_set': ObjectId(shelf_item) }}, upsert=False, multi=False)
shelf_gs.reload()
else:
shelf_available = shelf
elif shelf_remove:
shelf_gs = node_collection.one({'name': unicode(shelf_remove), 'member_of': [ObjectId(shelf_gst._id)] })
shelf_rel = triple_collection.one({'_type': 'GRelation', 'subject': ObjectId(auth._id),'right_subject': ObjectId(shelf_gs._id) })
shelf_rel.delete()
shelf_gs.delete()
else:
shelf_gs = None
shelves = []
shelf_list = {}
if auth:
shelf = triple_collection.find({'_type': 'GRelation', 'subject': ObjectId(auth._id), 'relation_type.$id': has_shelf_RT._id})
if shelf:
for each in shelf:
shelf_name = node_collection.one({'_id': ObjectId(each.right_subject)})
shelves.append(shelf_name)
shelf_list[shelf_name.name] = []
for ID in shelf_name.collection_set:
shelf_item = node_collection.one({'_id': ObjectId(ID)})
shelf_list[shelf_name.name].append(shelf_item.name)
else:
shelves = []
return render_to_response('ndf/shelf.html',
{ 'shelf_obj': shelf_gs,'shelf_list': shelf_list,'shelves': shelves,
'groupid':group_id
},
context_instance = RequestContext(request)
)
@get_execution_time
def drawer_widget(request, group_id):
drawer = None
drawers = None
drawer1 = None
drawer2 = None
dict_drawer = {}
dict1 = {}
dict2 = []
nlist = []
node = None
node_id = request.POST.get("node_id", '')
field = request.POST.get("field", '')
app = request.POST.get("app", '')
page_no = request.POST.get("page_no", '')
if node_id:
node = node_collection.one({'_id': ObjectId(node_id)})
if field == "prior_node":
app = None
nlist = node.prior_node
drawer, paged_resources = get_drawers(group_id, node._id, nlist, page_no, app)
elif field == "teaches":
app = None
relationtype = node_collection.one({"_type": "RelationType", "name": "teaches"})
list_grelations = triple_collection.find({"_type": "GRelation", "subject": node._id, "relation_type.$id": relationtype._id})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
drawer, paged_resources = get_drawers(group_id, node._id, nlist, page_no, app)
elif field == "assesses":
app = field
relationtype = node_collection.one({"_type": "RelationType", "name": "assesses"})
list_grelations = triple_collection.find({"_type": "GRelation", "subject": node._id, "relation_type.$id": relationtype._id})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
drawer, paged_resources = get_drawers(group_id, node._id, nlist, page_no, app)
elif field == "collection":
if app == "Quiz":
app = "QuizItem"
elif app == "Theme":
app = "Theme"
elif app == "Theme Item":
app == "theme_item"
elif app == "Topic":
app = "Topic"
elif app == "Module":
app = "Module"
else:
app = None
nlist = node.collection_set
drawer, paged_resources = get_drawers(group_id, node._id, nlist, page_no, app)
else:
if field == "collection" and app == "Quiz":
app = "QuizItem"
elif field == "collection" and app == "Theme":
app = "Theme"
elif field == "collection" and app == "Theme Item":
app = "theme_item"
elif field == "collection" and app == "Course":
app = "Module"
else:
app = None
nlist = []
drawer, paged_resources = get_drawers(group_id, None, nlist, page_no, app)
drawers = drawer
if not node_id:
drawer1 = drawers
else:
drawer1 = drawers['1']
drawer2 = drawers['2']
return render_to_response('ndf/drawer_widget.html',
{ 'widget_for': field,'drawer1': drawer1, 'drawer2': drawer2,'node_id': node_id,
'group_id': group_id,'groupid': group_id,"page_info": paged_resources
},
context_instance = RequestContext(request)
)
@get_execution_time
def select_drawer(request, group_id):
if request.is_ajax() and request.method == "POST":
drawer = None
drawers = None
drawer1 = None
drawer2 = None
selection_flag = True
node = None
dict_drawer = {}
dict1 = {}
dict2 = []
nlist=[]
check = ""
checked = ""
relationtype = ""
node_id = request.POST.get("node_id", '')
page_no = request.POST.get("page_no", '')
field = request.POST.get("field", '')
checked = request.POST.get("homo_collection", '')
node_type = request.POST.get("node_type", '')
if node_id:
node_id = ObjectId(node_id)
node = node_collection.one({'_id': ObjectId(node_id)})
if node_type:
if len(node.member_of) > 1:
n_type = node_collection.one({'_id': ObjectId(node.member_of[1])})
else:
n_type = node_collection.one({'_id': ObjectId(node.member_of[0])})
checked = n_type.name
if checked:
if checked == "QuizObj" :
quiz = node_collection.one({'_type': 'GSystemType', 'name': "Quiz" })
quizitem = node_collection.one({'_type': 'GSystemType', 'name': "QuizItem"})
elif checked == "Pandora Video":
check = node_collection.one({'_type': 'GSystemType', 'name': 'Pandora_video'})
else:
check = node_collection.one({'_type': 'GSystemType', 'name': unicode(checked)})
if node_id:
if field:
if field == "teaches":
relationtype = node_collection.one({"_type": "RelationType", "name":"teaches"})
list_grelations = triple_collection.find({
"_type": "GRelation", "subject": node._id,
"relation_type": relationtype._id
})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
elif field == "assesses":
relationtype = node_collection.one({"_type": "RelationType", "name":"assesses"})
list_grelations = triple_collection.find({
"_type": "GRelation", "subject": node._id,
"relation_type": relationtype._id
})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
elif field == "prior_node":
nlist = node.prior_node
elif field == "collection":
nlist = node.collection_set
else:
node_id = None
if node_id:
if node.collection_set:
if checked:
for k in node.collection_set:
obj = node_collection.one({'_id': ObjectId(k)})
if check:
if check._id in obj.member_of:
nlist.append(k)
else:
if quiz._id in obj.member_of or quizitem._id in obj.member_of:
nlist.append(k)
else:
nlist = node.collection_set
if field == "assesses":
checked = field
checked = None
drawer, paged_resources = get_drawers(group_id, node_id, nlist, page_no, checked)#get_drawers(group_id, node_id, nlist, checked)
if field == "course_units":
nlist.append("course_units")
selection_flag = False
drawers = get_drawers(group_id, node_id, nlist, checked)
drawers = drawer
if not node_id:
drawer1 = drawers
else:
drawer1 = drawers['1']
drawer2 = drawers['2']
if not field:
field = "collection"
return render_to_response("ndf/drawer_widget.html",
{"widget_for": field, "page_info": paged_resources,
"drawer1": drawer1, 'selection': selection_flag, 'node_id':node_id,
"drawer2": drawer2, "checked": checked,
"groupid": group_id
},
context_instance=RequestContext(request)
)
@get_execution_time
def search_drawer(request, group_id):
if request.is_ajax() and request.method == "POST":
search_name = request.POST.get("search_name", '')
node_id = request.POST.get("node_id", '')
selection = request.POST.get("selection", '')
field = request.POST.get("field", '')
search_drawer = None
drawers = None
drawer1 = None
drawer2 = None
dict_drawer = {}
dict1 = {}
dict2 = []
nlist=[]
node = None
page_no = 1
Page = node_collection.one({'_type': 'GSystemType', 'name': 'Page'})
File = node_collection.one({'_type': 'GSystemType', 'name': 'File'})
Quiz = node_collection.one({'_type': "GSystemType", 'name': "Quiz"})
if node_id:
node = node_collection.one({'_id': ObjectId(node_id)})
node_type = node_collection.one({'_id': ObjectId(node.member_of[0])})
if field:
if field == "teaches":
relationtype = node_collection.one({"_type": "RelationType", "name": "teaches"})
list_grelations = triple_collection.find({
"_type": "GRelation", "subject": node._id, "relation_type.$id": relationtype._id
})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
elif field == "assesses":
relationtype = node_collection.one({"_type": "RelationType", "name": "assesses"})
list_grelations = triple_collection.find({
"_type": "GRelation", "subject": node._id, "relation_type.$id": relationtype._id
})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
elif field == "prior_node":
nlist = node.prior_node
elif field == "collection":
nlist = node.collection_set
node.reload()
search_drawer = node_collection.find({'_type': {'$in' : [u"GSystem", u"File"]},
'member_of':{'$in':[Page._id,File._id,Quiz._id]},
'$and': [
{'name': {'$regex': str(search_name), '$options': "i"}},
{'group_set': {'$all': [ObjectId(group_id)]} }
]
})
else:
search_drawer = node_collection.find({'_type': {'$in' : [u"GSystem", u"File"]},
'member_of':{'$in':[Page._id,File._id,Quiz._id]},
'$and': [
{'name': {'$regex': str(search_name), '$options': "i"}},
{'group_set': {'$all': [ObjectId(group_id)]} }
]
})
if node_id:
for each in search_drawer:
if each._id != node._id:
if each._id not in nlist:
dict1[each._id] = each
for oid in nlist:
obj = node_collection.one({'_id': oid })
dict2.append(obj)
dict_drawer['1'] = dict1
dict_drawer['2'] = dict2
else:
if (node is None) and (not nlist):
for each in search_drawer:
dict_drawer[each._id] = each
drawers = dict_drawer
if not node_id:
drawer1 = drawers
else:
drawer1 = drawers['1']
drawer2 = drawers['2']
return render_to_response("ndf/drawer_widget.html",
{"widget_for": field,
"drawer1": drawer1, 'selection': selection,
"drawer2": drawer2, 'search_name': search_name,
"groupid": group_id, 'node_id': node_id
},
context_instance=RequestContext(request)
)
@get_execution_time
def get_topic_contents(request, group_id):
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
selected = request.POST.get("selected", '')
choice = request.POST.get("choice", '')
# node = node_collection.one({'_id': ObjectId(node_id) })
contents = get_contents(node_id, selected, choice)
return HttpResponse(json.dumps(contents))
####Bellow part is for manipulating theme topic hierarchy####
@get_execution_time
def get_collection_list(collection_list, node):
inner_list = []
error_list = []
if node.collection_set:
for each in node.collection_set:
col_obj = node_collection.one({'_id': ObjectId(each)})
if col_obj:
if theme_item_GST._id in col_obj.member_of or topic_GST._id in col_obj.member_of:
for cl in collection_list:
if cl['id'] == node.pk:
node_type = node_collection.one({'_id': ObjectId(col_obj.member_of[0])}).name
inner_sub_dict = {'name': col_obj.name, 'id': col_obj.pk , 'node_type': node_type}
inner_sub_list = [inner_sub_dict]
inner_sub_list = get_collection_list(inner_sub_list, col_obj)
if inner_sub_list:
inner_list.append(inner_sub_list[0])
else:
inner_list.append(inner_sub_dict)
cl.update({'children': inner_list })
else:
error_message = "\n TreeHierarchyError: Node with given ObjectId ("+ str(each) +") not found!!!\n"
print "\n " + error_message
return collection_list
else:
return collection_list
@get_execution_time
def get_tree_hierarchy(request, group_id, node_id):
node = node_collection.one({'_id':ObjectId(node_id)})
Collapsible = request.GET.get("collapsible", "");
data = ""
collection_list = []
themes_list = []
theme_node = node_collection.one({'_id': ObjectId(node._id) })
# print "\ntheme_node: ",theme_node.name,"\n"
if theme_node.collection_set:
for e in theme_node.collection_set:
objs = node_collection.one({'_id': ObjectId(e) })
for l in objs.collection_set:
themes_list.append(l)
for each in theme_node.collection_set:
obj = node_collection.one({'_id': ObjectId(each) })
if obj._id not in themes_list:
if theme_item_GST._id in obj.member_of or topic_GST._id in obj.member_of:
node_type = node_collection.one({'_id': ObjectId(obj.member_of[0])}).name
collection_list.append({'name': obj.name, 'id': obj.pk, 'node_type': node_type})
collection_list = get_collection_list(collection_list, obj)
if Collapsible:
data = { "name": theme_node.name, "children": collection_list }
else:
data = collection_list
return HttpResponse(json.dumps(data))
# ###End of manipulating theme topic hierarchy####
##### bellow part is for manipulating nodes collections#####
@get_execution_time
def get_inner_collection(collection_list, node):
inner_list = []
error_list = []
if node.collection_set:
for each in node.collection_set:
col_obj = node_collection.one({'_id': ObjectId(each)})
if col_obj:
for cl in collection_list:
if cl['id'] == node.pk:
node_type = node_collection.one({'_id': ObjectId(col_obj.member_of[0])}).name
inner_sub_dict = {'name': col_obj.name, 'id': col_obj.pk,'node_type': node_type}
inner_sub_list = [inner_sub_dict]
inner_sub_list = get_inner_collection(inner_sub_list, col_obj)
if inner_sub_list:
inner_list.append(inner_sub_list[0])
else:
inner_list.append(inner_sub_dict)
cl.update({'children': inner_list })
else:
error_message = "\n TreeHierarchyError: Node with given ObjectId ("+ str(each) +") not found!!!\n"
print "\n " + error_message
return collection_list
else:
return collection_list
@get_execution_time
def get_collection(request, group_id, node_id):
node = node_collection.one({'_id':ObjectId(node_id)})
# print "\nnode: ",node.name,"\n"
collection_list = []
if node:
if node.collection_set:
for each in node.collection_set:
obj = node_collection.one({'_id': ObjectId(each) })
if obj:
node_type = node_collection.one({'_id': ObjectId(obj.member_of[0])}).name
collection_list.append({'name': obj.name, 'id': obj.pk,'node_type': node_type})
collection_list = get_inner_collection(collection_list, obj)
data = collection_list
return HttpResponse(json.dumps(data))
# ###End of manipulating nodes collection####
@get_execution_time
def add_sub_themes(request, group_id):
if request.is_ajax() and request.method == "POST":
context_node_id = request.POST.get("context_node", '')
sub_theme_name = request.POST.get("sub_theme_name", '')
themes_list = request.POST.get("nodes_list", '')
themes_list = themes_list.replace(""","'")
themes_list = ast.literal_eval(themes_list)
theme_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
context_node = node_collection.one({'_id': ObjectId(context_node_id) })
# Save the sub-theme first
if sub_theme_name:
if not sub_theme_name.upper() in (theme_name.upper() for theme_name in themes_list):
node = node_collection.collection.GSystem()
# get_node_common_fields(request, node, group_id, theme_GST)
node.save(is_changed=get_node_common_fields(request, node, group_id, theme_item_GST))
node.reload()
# Add this sub-theme into context nodes collection_set
node_collection.collection.update({'_id': context_node._id}, {'$push': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
context_node.reload()
return HttpResponse("success")
return HttpResponse("failure")
return HttpResponse("None")
@get_execution_time
def add_theme_item(request, group_id):
if request.is_ajax() and request.method == "POST":
context_theme_id = request.POST.get("context_theme", '')
name =request.POST.get('name','')
context_theme = node_collection.one({'_id': ObjectId(context_theme_id) })
list_theme_items = []
if name and context_theme:
for each in context_theme.collection_set:
obj = node_collection.one({'_id': ObjectId(each) })
if obj.name == name:
return HttpResponse("failure")
theme_item_node = node_collection.collection.GSystem()
theme_item_node.save(is_changed=get_node_common_fields(request, theme_item_node, group_id, theme_item_GST))
theme_item_node.reload()
# Add this theme item into context theme's collection_set
node_collection.collection.update({'_id': context_theme._id}, {'$push': {'collection_set': ObjectId(theme_item_node._id) }}, upsert=False, multi=False)
context_theme.reload()
return HttpResponse("success")
@get_execution_time
def add_topics(request, group_id):
if request.is_ajax() and request.method == "POST":
# print "\n Inside add_topics ajax view\n"
context_node_id = request.POST.get("context_node", '')
add_topic_name = request.POST.get("add_topic_name", '')
topics_list = request.POST.get("nodes_list", '')
topics_list = topics_list.replace(""","'")
topics_list = ast.literal_eval(topics_list)
topic_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Topic'})
context_node = node_collection.one({'_id': ObjectId(context_node_id) })
# Save the topic first
if add_topic_name:
# print "\ntopic name: ", add_topic_name
if not add_topic_name.upper() in (topic_name.upper() for topic_name in topics_list):
node = node_collection.collection.GSystem()
# get_node_common_fields(request, node, group_id, topic_GST)
node.save(is_changed=get_node_common_fields(request, node, group_id, topic_GST))
node.reload()
# Add this topic into context nodes collection_set
node_collection.collection.update({'_id': context_node._id}, {'$push': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
context_node.reload()
return HttpResponse("success")
return HttpResponse("failure")
return HttpResponse("None")
@get_execution_time
def add_page(request, group_id):
if request.is_ajax() and request.method == "POST":
context_node_id = request.POST.get("context_node", '')
css_node_id = request.POST.get("css_node", '')
unit_name = request.POST.get("unit_name", '')
context_name = request.POST.get("context_name", '')
gst_page = node_collection.one({'_type': "GSystemType", 'name': "Page"})
name = request.POST.get('name', '')
collection_list = []
context_node = None
response_dict = {"success": False}
context_node = node_collection.one({'_id': ObjectId(context_node_id)})
for each in context_node.collection_set:
obj = node_collection.one({'_id': ObjectId(each), 'group_set': ObjectId(group_id)})
collection_list.append(obj.name)
if name not in collection_list:
page_node = node_collection.collection.GSystem()
page_node.save(is_changed=get_node_common_fields(request, page_node, group_id, gst_page))
context_node.collection_set.append(page_node._id)
context_node.save()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
else:
response_dict["success"] = False
return HttpResponse(json.dumps(response_dict))
response_dict["success"] = None
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def add_file(request, group_id):
# this is context node getting from the url get request
context_node_id = request.GET.get('context_node', '')
context_node = node_collection.one({'_id': ObjectId(context_node_id)})
if request.method == "POST":
context_name = request.POST.get("context_name", "")
css_node_id = request.POST.get("css_node_id", "")
course_node = request.POST.get("course_node", "")
unit_name = request.POST.get("unit_name_file", "")
app_id = request.POST.get("app_id", "")
app_set_id = request.POST.get("app_set_id", "")
if context_name is "Topic":
url_name = "/" + group_id + "/topic_details/" + context_node_id + ""
else:
# i.e if context_name is "Course"
url_name = "/" + group_id + "/course/add_units/?css_node_id=" + \
css_node_id + "&unit_node_id=" + context_node_id + "&course_node="+ course_node
if app_id and app_set_id:
url_name += "&app_id=" + app_id + "&app_set_id=" + app_set_id + ""
if context_node_id:
# set the unit node name
node_collection.collection.update({'_id': ObjectId(context_node_id)}, {'$set': {'name': unit_name }}, upsert=False, multi=False)
new_list = []
# For checking the node is already available in gridfs or not
for index, each in enumerate(request.FILES.getlist("doc[]", "")):
fileobj = node_collection.collection.File()
filemd5 = hashlib.md5(each.read()).hexdigest()
if not fileobj.fs.files.exists({"md5": filemd5}):
# If not available append to the list for making the collection for topic below
new_list.append(each)
else:
if context_name == "Course":
# If file exists, PUBLISH it and add to collection set
cur_oid = gridfs_collection.find_one({"md5": filemd5}, {'docid': 1, '_id': 0})
old_file_node = node_collection.find_one({'_id': ObjectId(str(cur_oid["docid"]))})
if old_file_node._id not in context_node.collection_set:
context_node.collection_set.append(old_file_node._id)
old_file_node.status = u"PUBLISHED"
old_file_node.prior_node.append(context_node._id)
old_file_node.save()
context_node.save()
else:
# If availbale ,then return to the topic page
return HttpResponseRedirect(url_name)
# After taking new_lst[] , now go for saving the files
submitDoc(request, group_id)
# After file gets saved , that file's id should be saved in collection_set of context topic node
for k in new_list:
cur_oid = gridfs_collection.find_one({"md5": filemd5}, {'docid': 1, '_id': 0})
file_obj = node_collection.find_one({'_id': ObjectId(str(cur_oid["docid"]))})
file_obj.prior_node.append(context_node._id)
file_obj.status = u"PUBLISHED"
file_obj.save()
context_node.collection_set.append(file_obj._id)
file_obj.save()
context_node.save()
return HttpResponseRedirect(url_name)
def collection_of_node(node=None, group_id=None):
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
if node.collection_set:
for each in node.collection_set:
each_node = node_collection.one({'_id': ObjectId(each)})
if each_node.collection_set:
collection_of_node(each_node, group_id)
else:
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if each_node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(each_node._id) }}, upsert=False, multi=False)
# print "\n node ", each_node.name ,"has been deleted \n"
each_node.delete()
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
# print "\n node ", node.name ,"has been deleted \n"
node.delete()
else:
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
# print "\n node ", node.name ,"has been deleted \n"
node.delete()
return True
@get_execution_time
def theme_node_collection(node=None, group_id=None):
theme_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Theme'})
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
if node.collection_set:
for each in node.collection_set:
each_node = node_collection.one({'_id': ObjectId(each)})
if each_node.collection_set:
collection_of_node(each_node, group_id)
else:
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_GST._id,theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if each_node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(each_node._id) }}, upsert=False, multi=False)
# print "\n node ", each_node.name ,"has been deleted \n"
each_node.delete()
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_GST._id,theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
# print "\n node ", node.name ,"has been deleted \n"
node.delete()
else:
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_GST._id,theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
# print "\n node ", node.name ,"has been deleted \n"
node.delete()
return True
@get_execution_time
def delete_themes(request, group_id):
'''delete themes objects'''
send_dict = []
deleteobjects = ""
deleteobj = ""
if request.is_ajax() and request.method =="POST":
context_node_id=request.POST.get('context_theme','')
if context_node_id:
context_theme_node = node_collection.one({'_id': ObjectId(context_node_id)})
confirm = request.POST.get("confirm","")
deleteobj = request.POST.get('deleteobj',"")
theme_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Theme'})
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
if deleteobj:
obj = node_collection.one({'_id': ObjectId(deleteobj) })
obj.delete()
node = node_collection.one({'member_of': {'$in':[theme_GST._id, theme_item_GST._id]}, 'collection_set': ObjectId(deleteobj) })
node_collection.collection.update({'_id': node._id}, {'$pull': {'collection_set': ObjectId(deleteobj) }}, upsert=False, multi=False)
else:
deleteobjects = request.POST['deleteobjects']
if deleteobjects:
for each in deleteobjects.split(","):
node = node_collection.one({ '_id': ObjectId(each)})
# print "\n confirmed objects: ", node.name
if confirm:
if context_node_id:
collection_of_node(node, group_id)
if node._id in context_theme_node.collection_set:
node_collection.collection.update({'_id': context_theme_node._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
else:
theme_node_collection(node, group_id)
else:
send_dict.append({"title":node.name})
return StreamingHttpResponse(json.dumps(send_dict).encode('utf-8'),content_type="text/json", status=200)
@login_required
@get_execution_time
def change_group_settings(request,group_id):
'''
changing group's object data
'''
if request.is_ajax() and request.method == "POST":
try:
edit_policy = request.POST['edit_policy']
group_type = request.POST['group_type']
subscription_policy = request.POST['subscription_policy']
visibility_policy = request.POST['visibility_policy']
disclosure_policy = request.POST['disclosure_policy']
encryption_policy = request.POST['encryption_policy']
# group_id = request.POST['group_id']
group_node = node_collection.one({"_id": ObjectId(group_id)})
if group_node:
group_node.edit_policy = edit_policy
group_node.group_type = group_type
group_node.subscription_policy = subscription_policy
group_node.visibility_policy = visibility_policy
group_node.disclosure_policy = disclosure_policy
group_node.encryption_policy = encryption_policy
group_node.modified_by = int(request.user.id)
group_node.save()
return HttpResponse("changed successfully")
except:
return HttpResponse("failed")
return HttpResponse("failed")
list_of_collection = []
hm_obj = HistoryManager()
@get_execution_time
def get_module_set_list(node):
'''
Returns the list of collection inside the collections with hierarchy as they are in collection
'''
list = []
for each in node.collection_set:
each = node_collection.one({'_id': each})
dict = {}
dict['id'] = unicode(each._id)
dict['version_no'] = hm_obj.get_current_version(each)
if each._id not in list_of_collection:
list_of_collection.append(each._id)
if each.collection_set: #checking that same collection can'not be called again
dict['collection'] = get_module_set_list(each) #calling same function recursivaly
list.append(dict)
return list
@login_required
@get_execution_time
def make_module_set(request, group_id):
'''
This methode will create module of collection and stores objectid's with version number's
'''
if request.is_ajax():
try:
GST_MODULE = node_collection.one({"_type": "GSystemType", 'name': GAPPS[8]})
_id = request.GET.get("_id","")
if _id:
node = node_collection.one({'_id':ObjectId(_id)})
if node:
list_of_collection.append(node._id)
dict = {}
dict['id'] = unicode(node._id)
dict['version_no'] = hm_obj.get_current_version(node)
if node.collection_set:
dict['collection'] = get_module_set_list(node) #gives the list of collection with proper hierarchy as they are
#creating new Gsystem object and assining data of collection object
gsystem_obj = node_collection.collection.GSystem()
gsystem_obj.name = unicode(node.name)
gsystem_obj.content = unicode(node.content)
gsystem_obj.member_of.append(GST_MODULE._id)
gsystem_obj.group_set.append(ObjectId(group_id))
# if usrname not in gsystem_obj.group_set:
# gsystem_obj.group_set.append(int(usrname))
user_id = int(request.user.id)
gsystem_obj.created_by = user_id
gsystem_obj.modified_by = user_id
if user_id not in gsystem_obj.contributors:
gsystem_obj.contributors.append(user_id)
gsystem_obj.module_set.append(dict)
module_set_md5 = hashlib.md5(str(gsystem_obj.module_set)).hexdigest() #get module_set's md5
check =check_module_exits(module_set_md5) #checking module already exits or not
if(check == 'True'):
return HttpResponse("This module already Exists")
else:
gsystem_obj.save()
create_relation_of_module(node._id, gsystem_obj._id)
create_version_of_module(gsystem_obj._id,node._id)
check1 = sotore_md5_module_set(gsystem_obj._id, module_set_md5)
if (check1 == 'True'):
return HttpResponse("module succesfull created")
else:
gsystem_obj.delete()
return HttpResponse("Error Occured while storing md5 of object in attribute'")
else:
return HttpResponse("Object not present corresponds to this id")
else:
return HttpResponse("Not a valid id passed")
except Exception as e:
print "Error:",e
return HttpResponse(e)
@get_execution_time
def sotore_md5_module_set(object_id, module_set_md5):
'''
This method will store md5 of module_set of perticular GSystem into an Attribute
'''
node_at = node_collection.one({'$and':[{'_type': 'AttributeType'},{'name': 'module_set_md5'}]}) #retrving attribute type
if node_at is not None:
try:
attr_obj = triple_collection.collection.GAttribute() #created instance of attribute class
attr_obj.attribute_type = node_at
attr_obj.subject = object_id
attr_obj.object_value = unicode(module_set_md5)
attr_obj.save()
except Exception as e:
print "Exception:",e
return 'False'
return 'True'
else:
print "Run 'python manage.py filldb' commanad to create AttributeType 'module_set_md5' "
return 'False'
# under construction
@get_execution_time
def create_version_of_module(subject_id, node_id):
'''
This method will create attribute version_no of module with at type version
'''
rt_has_module = node_collection.one({'_type':'RelationType', 'name':'has_module'})
relation = triple_collection.find({'_type': 'GRelation', 'subject': node_id, 'relation_type.$id':rt_has_module._id})
at_version = node_collection.one({'_type':'AttributeType', 'name':'version'})
attr_versions = []
if relation.count() > 0:
for each in relation:
module_id = triple_collection.one({'_id': each['_id']})
if module_id:
attr = triple_collection.one({
'_type': 'GAttribute', 'subject': ObjectId(module_id.right_subject),
'attribute_type.$id': at_version._id
})
if attr:
attr_versions.append(attr.object_value)
if attr_versions:
attr_versions.sort()
attr_ver = float(attr_versions[-1])
attr = triple_collection.collection.GAttribute()
attr.attribute_type = at_version
attr.subject = subject_id
attr.object_value = round((attr_ver+0.1),1)
attr.save()
else:
attr = triple_collection.collection.GAttribute()
attr.attribute_type = at_version
attr.subject = ObjectId(subject_id)
attr.object_value = 1
attr.save()
@get_execution_time
def create_relation_of_module(subject_id, right_subject_id):
rt_has_module = node_collection.one({'_type': 'RelationType', 'name': 'has_module'})
if rt_has_module and subject_id and right_subject_id:
relation = triple_collection.collection.GRelation() #instance of GRelation class
relation.relation_type = rt_has_module
relation.right_subject = right_subject_id
relation.subject = subject_id
relation.save()
@get_execution_time
def check_module_exits(module_set_md5):
'''
This method will check is module already exits ?
'''
node_at = node_collection.one({'$and':[{'_type': 'AttributeType'},{'name': 'module_set_md5'}]})
attribute = triple_collection.one({'_type':'GAttribute', 'attribute_type.$id': node_at._id, 'object_value': module_set_md5})
if attribute is not None:
return 'True'
else:
return 'False'
@get_execution_time
def walk(node):
hm = HistoryManager()
list = []
for each in node:
dict = {}
node = node_collection.one({'_id':ObjectId(each['id'])})
n = hm.get_version_document(node,each['version_no'])
dict['label'] = n.name
dict['id'] = each['id']
dict['version_no'] = each['version_no']
if "collection" in each.keys():
dict['children'] = walk(each['collection'])
list.append(dict)
return list
@get_execution_time
def get_module_json(request, group_id):
_id = request.GET.get("_id", "")
node = node_collection.one({'_id': ObjectId(_id)})
data = walk(node.module_set)
return HttpResponse(json.dumps(data))
# ------------- For generating graph json data ------------
@get_execution_time
def graph_nodes(request, group_id):
page_node = node_collection.one({'_id': ObjectId(request.GET.get("id"))})
page_node.get_neighbourhood(page_node.member_of)
# print page_node.keys()
coll_relation = {'relation_name': 'has_collection', 'inverse_name': 'member_of_collection'}
prior_relation = {'relation_name': 'prerequisite', 'inverse_name': 'is_required_for'}
def _get_node_info(node_id):
node = node_collection.one( {'_id':node_id} )
# mime_type = "true" if node.structure.has_key('mime_type') else 'false'
return node.name
# def _get_username(id_int):
# return User.objects.get(id=id_int).username
# def _get_node_url(node_id):
# node_url = '/' + str(group_id)
# node = node_collection.one({'_id':node_id})
# if len(node.member_of) > 1:
# if node.mime_type == 'image/jpeg':
# node_url += '/image/image_detail/' + str(node_id)
# elif node.mime_type == 'video':
# node_url += '/video/video_detail/' + str(node_id)
# elif len(node.member_of) == 1:
# gapp_name = (node_collection.one({'_id':node.member_of[0]}).name).lower()
# if gapp_name == 'forum':
# node_url += '/forum/show/' + str(node_id)
# elif gapp_name == 'file':
# node_url += '/image/image_detail/' + str(node_id)
# elif gapp_name == 'page':
# node_url += '/page/details/' + str(node_id)
# elif gapp_name == 'quiz' or 'quizitem':
# node_url += '/quiz/details/' + str(node_id)
# return node_url
# page_node_id = str(id(page_node._id))
node_metadata ='{"screen_name":"' + page_node.name + '", "title":"' + page_node.name + '", "_id":"'+ str(page_node._id) +'", "refType":"GSystem"}, '
node_relations = ''
exception_items = [
"name", "content", "_id", "login_required", "attribute_set",
"member_of", "status", "comment_enabled", "start_publication",
"_type", "contributors", "created_by", "modified_by", "last_update", "url", "featured",
"created_at", "group_set", "type_of", "content_org", "author_set",
"fs_file_ids", "file_size", "mime_type", "location", "language",
"property_order", "rating", "apps_list", "annotations", "instance of"
]
# username = User.objects.get(id=page_node.created_by).username
i = 1
for key, value in page_node.items():
if (key in exception_items) or (not value):
pass
elif isinstance(value, list):
if len(value):
# node_metadata +='{"screen_name":"' + key + '", "_id":"'+ str(i) +'_r"}, '
node_metadata +='{"screen_name":"' + key + '", "_id":"'+ str(abs(hash(key+str(page_node._id)))) +'_r"}, '
node_relations += '{"type":"'+ key +'", "from":"'+ str(page_node._id) +'", "to": "'+ str(abs(hash(key+str(page_node._id)))) +'_r"},'
# key_id = str(i)
key_id = str(abs(hash(key+str(page_node._id))))
# i += 1
# if key in ("modified_by", "author_set"):
# for each in value:
# node_metadata += '{"screen_name":"' + _get_username(each) + '", "_id":"'+ str(i) +'_n"},'
# node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ str(i) +'_n"},'
# i += 1
# else:
for each in value:
# print "\n====", key, "------", type(each)
if isinstance(each, ObjectId):
node_name = _get_node_info(each)
if key == "collection_set":
inverse = coll_relation['inverse_name']
elif key == "prior_node":
inverse = prior_relation['inverse_name']
else:
inverse = ""
node_metadata += '{"screen_name":"' + node_name + '", "title":"' + page_node.name + '", "_id":"'+ str(each) +'", "refType":"Relation", "inverse":"' + inverse + '", "flag":"1"},'
# node_metadata += '{"screen_name":"' + node_name + '", "_id":"'+ str(each) +'", "refType":"relation"},'
node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ str(each) +'"},'
i += 1
# if "each" is Object of GSystem
elif isinstance(each, GSystem):
node_metadata += '{"screen_name":"' + each.name + '", "title":"' + page_node.name + '", "_id":"'+ str(each._id) + '", "refType":"Relation"},'
node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ str(each._id) +'"},'
else:
node_metadata += '{"screen_name":"' + unicode(each) + '", "_id":"'+ unicode(each) +'_n"},'
node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ unicode(each) +'_n"},'
i += 1
else:
# possibly gives GAttribute
node_metadata +='{"screen_name":"' + key + '", "_id":"'+ str(abs(hash(key+str(page_node._id)))) +'_r"},'
node_relations += '{"type":"'+ key +'", "from":"'+ str(page_node._id) +'", "to": "'+ str(abs(hash(key+str(page_node._id)))) +'_r"},'
# key_id = str(i)
key_id = str(abs(hash(key+str(page_node._id))))
if isinstance( value, list):
for each in value:
node_metadata += '{"screen_name":"' + each + '", "_id":"'+ str(i) +'_n"},'
node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ str(i) +'_n"},'
i += 1
else:
node_metadata += '{"screen_name":"' + str(value) + '", "_id":"'+ str(i) +'_n"},'
node_relations += '{"type":"'+ key +'", "from":"'+ str(abs(hash(key+str(page_node._id)))) +'_r", "to": "'+ str(i) +'_n"},'
i += 1
# End of if - else
# End of for loop
# # getting all the relations of current node
# node_rel = page_node.get_possible_relations(page_node.member_of)
# # print "\n\n", node_rel
# for keyy, vall in node_rel.iteritems():
# if vall['subject_or_right_subject_list']:
# for eachnode in vall['subject_or_right_subject_list']:
# if keyy == "event_organised_by":
# pass
# # node_metadata +='{"screen_name":"' + keyy + '", "_id":"'+ str(abs(hash(keyy+str(page_node._id)))) +'_r"},'
# # node_relations += '{"type":"'+ keyy +'", "from":"'+ str(page_node._id) +'", "to": "'+ str(abs(hash(keyy+str(page_node._id)))) +'_r"},'
# # node_metadata += '{"screen_name":"' + str(vall) + '", "_id":"'+ str(i) +'_n"},'
# # node_relations += '{"type":"'+ keyy +'", "from":"'+ str(abs(hash(keyy+str(page_node._id)))) +'_r", "to": "'+ str(i) +'_n"},'
# else:
# node_metadata +='{"screen_name":"' + keyy + '", "_id":"'+ str(abs(hash(keyy+str(page_node._id)))) +'_r"},'
# node_relations += '{"type":"'+ keyy +'", "from":"'+ str(page_node._id) +'", "to": "'+ str(abs(hash(keyy+str(page_node._id)))) +'_r"},'
# vall = vall.altnames if ( len(vall['altnames'])) else _get_node_info(vall['subject_or_right_subject_list'][0])
# node_metadata += '{"screen_name":"' + str(vall) + '", "_id":"'+ str(i) +'_n"},'
# node_relations += '{"type":"'+ keyy +'", "f**rom":"'+ str(abs(hash(keyy+str(page_node._id)))) +'_r", "to": "'+ str(i) +'_n"},'
# print "\nkey : ", key, "=====", val
node_metadata = node_metadata[:-1]
node_relations = node_relations[:-1]
node_graph_data = '{ "node_metadata": [' + node_metadata + '], "relations": [' + node_relations + '] }'
# print node_graph_data
return StreamingHttpResponse(node_graph_data)
# ------ End of processing for graph ------
@get_execution_time
def get_data_for_switch_groups(request,group_id):
coll_obj_list = []
node_id = request.GET.get("object_id", "")
st = node_collection.find({"_type": "Group"})
node = node_collection.one({"_id": ObjectId(node_id)})
for each in node.group_set:
coll_obj_list.append(node_collection.one({'_id': each}))
data_list = set_drawer_widget(st, coll_obj_list)
return HttpResponse(json.dumps(data_list))
@get_execution_time
def get_data_for_drawer(request, group_id):
'''
designer module's drawer widget function
'''
coll_obj_list = []
node_id = request.GET.get("id","")
st = node_collection.find({"_type":"GSystemType"})
node = node_collection.one({"_id":ObjectId(node_id)})
for each in node.collection_set:
coll_obj_list.append(node_collection.one({'_id':each}))
data_list=set_drawer_widget(st,coll_obj_list)
return HttpResponse(json.dumps(data_list))
# This method is not in use
@get_execution_time
def get_data_for_user_drawer(request, group_id,):
# This method will return data for user widget
d1 = []
d2 = []
draw1 = {}
draw2 = {}
drawer1 = []
drawer2 = []
data_list = []
all_batch_user = []
users = []
st_batch_id = request.GET.get('st_batch_id','')
node_id = request.GET.get('_id','')
if st_batch_id:
batch_coll = node_collection.find({'member_of': {'$all': [ObjectId(st_batch_id)]}, 'group_set': {'$all': [ObjectId(group_id)]}})
group = node_collection.one({'_id':ObjectId(group_id)})
if batch_coll:
for each in batch_coll:
users = users+each.author_set
else:
users = []
user_list = list(set(group.author_set) - set(users))
for each in user_list:
user= User.objects.get(id=each)
dic = {}
dic['id'] = user.id
dic['name'] = user.username
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
if node_id:
for each in node_collection.one({'_id':ObjectId(node_id)}).author_set:
user= User.objects.get(id=each)
dic = {}
dic['id'] = user.id
dic['name'] = user.username
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return HttpResponse(json.dumps(data_list))
else:
return HttpResponse("GSystemType for batch required")
@get_execution_time
def set_drawer_widget_for_users(st, coll_obj_list):
'''
NOTE : this method is used only for user drwers (Django user class)
'''
draw2={}
draw1={}
data_list=[]
d1=[]
d2=[]
for each in st:
dic = {}
dic['id'] = str(each.id)
dic['name'] = each.email # username
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in coll_obj_list:
dic = {}
dic['id'] = str(each.id)
dic['name'] = each.email # username
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return data_list
@get_execution_time
def get_data_for_batch_drawer(request, group_id):
'''
This method will return data for batch drawer widget
'''
d1 = []
d2 = []
draw1 = {}
draw2 = {}
drawer1 = []
drawer2 = []
data_list = []
st = node_collection.one({'_type':'GSystemType','name':'Student'})
node_id = request.GET.get('_id','')
batch_coll = node_collection.find({"_type": "GSystem", 'member_of':st._id, 'group_set': {'$all': [ObjectId(group_id)]}})
if node_id:
rt_has_batch_member = node_collection.one({'_type':'RelationType','name':'has_batch_member'})
relation_coll = triple_collection.find({'_type':'GRelation', 'right_subject':ObjectId(node_id), 'relation_type.$id':rt_has_batch_member._id})
for each in relation_coll:
dic = {}
n = triple_collection.one({'_id':ObjectId(each.subject)})
drawer2.append(n)
for each in batch_coll:
drawer1.append(each)
drawer_set1 = set(drawer1) - set(drawer2)
drawer_set2 = drawer2
for each in drawer_set1:
dic = {}
dic['id'] = str(each._id)
dic['name'] = each.name
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in drawer_set2:
dic = {}
dic['id'] = str(each._id)
dic['name'] = each.name
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return HttpResponse(json.dumps(data_list))
@get_execution_time
def set_drawer_widget(st, coll_obj_list):
'''
this method will set data for drawer widget
'''
stobjs=[]
coll_objs=[]
data_list = []
d1 = []
d2 = []
draw1 = {}
draw2 = {}
drawer1=[]
drawer2=[]
for each in st:
stobjs.append(each['_id'])
for each in coll_obj_list:
coll_objs.append(each['_id'])
drawer1_set = set(stobjs) - set(coll_objs)
lstset=[]
for each in drawer1_set:
obj=node_collection.one({'_id':each})
lstset.append(obj)
drawer1=lstset
drawer2 = coll_obj_list
for each in drawer1:
dic = {}
dic['id'] = str(each['_id'])
dic['name'] = each['name']
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in drawer2:
dic = {}
dic['id'] = str(each['_id'])
dic['name'] = each['name']
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return data_list
@get_execution_time
def get_data_for_event_task(request, group_id):
#date creation for task type is date month and year
day_list=[]
append = day_list.append
event_count={}
list31=[1,3,5,7,8,10,12]
list30=[4,6,9,11]
currentYear = datetime.datetime.now().year
#create the date format in unix format for querying it from data
#Task attribute_type start time's object value takes the only date
#in month/date/year format
#As events are quried from the nodes which store the date time in unix format
no = request.GET.get('no','')
month = request.GET.get('start','')[5:7]
year = request.GET.get('start','')[0:4]
start = datetime.datetime(int(currentYear), int(month), 1)
task_start = str(int(month))+"/"+"01"+"/"+str(int(year))
if int(month) in list31:
end=datetime.datetime(int(currentYear),int(month), 31)
task_end=str(int(month))+"/"+"31"+"/"+str(int(year))
elif int(month) in list30:
end=datetime.datetime(int(currentYear),int(month), 30)
task_end=str(int(month))+"/"+"30"+"/"+str(int(year))
else:
end=datetime.datetime(int(currentYear),int(month), 28)
task_end=str(int(month))+"/"+"28"+"/"+str(int(year))
#day_list of events
if no == '1' or no == '2':
#condition to search events only in case of above condition so that it
#doesnt gets executed when we are looking for other data
event = node_collection.one({'_type': "GSystemType", 'name': "Event"})
obj = node_collection.find({'type_of': event._id},{'_id':1})
all_list = [ each_gst._id for each_gst in obj ]
if no == '1':
nodes = node_collection.find({'_type':'GSystem','member_of':{'$in':all_list},'attribute_set.start_time':{'$gte':start,'$lt': end},'group_set':ObjectId(group_id)})
for i in nodes:
attr_value={}
update = attr_value.update
event_url="/"+str(group_id)+"/event/"+str(i.member_of[0]) +"/"+str(i._id)
update({'url':event_url})
update({'id':i._id})
update({'title':i.name})
date=i.attribute_set[0]['start_time']
formated_date=date.strftime("%Y-%m-%dT%H:%M:%S")
update({'start':formated_date})
for j in i.attribute_set:
if unicode('event_status') in j.keys():
if j['event_status'] == 'Scheduled':
#Default Color Blue would be applied
pass
if j['event_status'] == 'Rescheduled':
update({'backgroundColor':'#ffd700'})
if j['event_status'] == 'Completed':
update({'backgroundColor':'green'})
if j['event_status'] == 'Incomplete':
update({'backgroundColor':'red'})
append(dict(attr_value))
if no == '2':
#All the Rescheduled ones
nodes = node_collection.find({'_type':'GSystem','member_of':{'$in':list(all_list)},'attribute_set.event_edit_reschedule.reschedule_dates':{ '$elemMatch':{'$gt':start}},'group_set':ObjectId(group_id)},{'attribute_set.event_edit_reschedule.reschedule_dates':1,"name":1})
for k in nodes:
for a in k.attribute_set:
if unicode('event_edit_reschedule') in a:
for v in a['event_edit_reschedule']['reschedule_dates']:
attr_value={}
update = attr_value.update
event_url=" "
update({'url':event_url})
update({'id':k._id})
update({'title':k.name})
date = v
try:
formated_date=date.strftime("%Y-%m-%dT%H:%M:%S")
update({'start':formated_date})
update({'backgroundColor':'#7e7e7e'})
append(dict(attr_value))
except:
pass
date=""
user_assigned=[]
user_append = user_assigned.append
#day_list of task
if no == '3':
groupname=node_collection.find_one({"_id":ObjectId(group_id)})
attributetype_assignee = node_collection.find_one({"_type":'AttributeType', 'name':'Assignee'})
attributetype_key1 = node_collection.find_one({"_type":'AttributeType', 'name':'start_time'})
#check wheather the group is author group or the common group
if groupname._type == "Group":
GST_TASK = node_collection.one({'_type': "GSystemType", 'name': 'Task'})
task_nodes = node_collection.find({"_type": "GSystem", 'member_of':GST_TASK._id, 'group_set': ObjectId(group_id)})
if groupname._type == "Author":
task_nodes = triple_collection.find({"_type":"GAttribute", "attribute_type.$id":attributetype_assignee._id, "object_value":request.user.id}).sort('last_update',-1)
for attr in task_nodes:
if groupname._type == "Group":
task_node = node_collection.one({'_id':attr._id})
if groupname._type == "Author":
task_node = node_collection.one({'_id':attr.subject})
if task_node:
attr1=triple_collection.find_one({
"_type":"GAttribute", "subject":task_node._id, "attribute_type.$id":attributetype_key1._id,
'object_value':{'$gte':task_start,'$lte':task_end}
})
attr_value={}
update = attr_value.update
task_url="/" + groupname.name +"/" + "task"+"/" + str(task_node._id)
update({'id':task_node._id})
update({'title':task_node.name})
if attr1:
date = attr1.object_value
formated_date=date.strftime("%Y-%m-%dT%H:%M:%S")
update({'start':formated_date})
else:
date=task_node.created_at
formated_date=date.strftime("%Y-%m-%dT%H:%M:%S")
attr_value.update({'start':formated_date})
update({'url':task_url})
append(attr_value)
return HttpResponse(json.dumps(day_list,cls=NodeJSONEncoder))
@get_execution_time
def get_data_for_drawer_of_attributetype_set(request, group_id):
'''
this method will fetch data for designer module's drawer widget
'''
data_list = []
d1 = []
d2 = []
draw1 = {}
draw2 = {}
node_id = request.GET.get("id","")
coll_obj_list = []
st = node_collection.find({"_type":"AttributeType"})
node = node_collection.one({"_id":ObjectId(node_id)})
for each in node.attribute_type_set:
coll_obj_list.append(each)
drawer1 = list(set(st) - set(coll_obj_list))
drawer2 = coll_obj_list
for each in drawer1:
dic = {}
dic['id'] = str(each._id)
dic['name'] = str(each.name)
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in drawer2:
dic = {}
dic['id'] = str(each._id)
dic['name'] = str(each.name)
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return HttpResponse(json.dumps(data_list))
@get_execution_time
def get_data_for_drawer_of_relationtype_set(request, group_id):
'''
this method will fetch data for designer module's drawer widget
'''
data_list = []
d1 = []
d2 = []
draw1 = {}
draw2 = {}
node_id = request.GET.get("id","")
coll_obj_list = []
st = node_collection.find({"_type":"RelationType"})
node = node_collection.one({"_id":ObjectId(node_id)})
for each in node.relation_type_set:
coll_obj_list.append(each)
drawer1 = list(set(st) - set(coll_obj_list))
drawer2 = coll_obj_list
for each in drawer1:
dic = {}
dic['id'] = str(each._id)
dic['name'] = str(each.name)
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in drawer2:
dic = {}
dic['id'] = str(each._id)
dic['name'] = str(each.name)
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return HttpResponse(json.dumps(data_list))
@login_required
@get_execution_time
def deletion_instances(request, group_id):
"""
Deletes the given node(s) and associated GAttribute(s) & GRelation(s)
or provides all information before deleting for confirmation.
"""
send_dict = []
if request.is_ajax() and request.method =="POST":
deleteobjects = request.POST['deleteobjects']
confirm = request.POST.get("confirm", "")
for each in deleteobjects.split(","):
delete_list = []
node = node_collection.one({'_id': ObjectId(each)})
left_relations = triple_collection.find({"_type": "GRelation", "subject": node._id})
right_relations = triple_collection.find({"_type": "GRelation", "right_subject": node._id})
attributes = triple_collection.find({"_type": "GAttribute", "subject": node._id})
# When confirm holds "yes" value, all given node(s) is/are deleted.
# Otherwise, required information is provided for confirmation before deletion.
if confirm:
# Deleting GRelation(s) where given node is used as "subject"
for each_left_gr in left_relations:
# Special case
if each_left_gr.relation_type.name == "has_login":
auth_node = node_collection.one(
{'_id': each_left_gr.right_subject},
{'created_by': 1}
)
if auth_node:
node_collection.collection.update(
{'_type': "Group", '$or': [{'group_admin': auth_node.created_by}, {'author_set': auth_node.created_by}]},
{'$pull': {'group_admin': auth_node.created_by, 'author_set': auth_node.created_by}},
upsert=False, multi=True
)
# If given node is used in relationship with any other node (as subject)
# Then given node's ObjectId must be removed from "relation_set" field
# of other node, referred under key as inverse-name of the RelationType
node_collection.collection.update(
{'_id': each_left_gr.right_subject, 'relation_set.'+each_left_gr.relation_type.inverse_name: {'$exists': True}},
{'$pull': {'relation_set.$.'+each_left_gr.relation_type.inverse_name: node._id}},
upsert=False, multi=False
)
each_left_gr.delete()
# Deleting GRelation(s) where given node is used as "right_subject"
for each_right_gr in right_relations:
# If given node is used in relationship with any other node (as subject)
# Then given node's ObjectId must be removed from "relation_set" field
# of other node, referred under key as name of the RelationType
node_collection.collection.update({'_id': each_right_gr.subject, 'relation_set.'+each_right_gr.relation_type.name: {'$exists': True}},
{'$pull': {'relation_set.$.'+each_right_gr.relation_type.name: node._id}},
upsert=False, multi=False
)
each_right_gr.delete()
# Deleting GAttribute(s)
for each_ga in attributes:
each_ga.delete()
# Finally deleting given node
node.delete()
else:
if left_relations :
list_rel = []
for each in left_relations:
rname = node_collection.find_one({"_id":each.right_subject})
if not rname:
continue
rname = rname.name
alt_names = each.relation_type.name
if each.relation_type.altnames:
if ";" in each.relation_type.altnames:
alt_names = each.relation_type.altnames.split(";")[0]
list_rel.append(alt_names + " (Relation): " + rname)
delete_list.append({'left_relations': list_rel})
if right_relations :
list_rel = []
for each in right_relations:
lname = node_collection.find_one({"_id":each.subject})
if not lname:
continue
lname = lname.name
alt_names = each.relation_type.name
if each.relation_type.altnames:
if ";" in each.relation_type.altnames:
alt_names = each.relation_type.altnames.split(";")[1]
list_rel.append(alt_names + " (Inverse-Relation): " + lname)
delete_list.append({'right_relations': list_rel})
if attributes:
list_att = []
for each in attributes:
alt_names = each.attribute_type.name
if each.attribute_type.altnames:
alt_names = each.attribute_type.altnames
list_att.append(alt_names + " (Attribute): " + str(each.object_value))
delete_list.append({'attributes': list_att})
send_dict.append({"title": node.name, "content": delete_list})
if confirm:
return StreamingHttpResponse(str(len(deleteobjects.split(",")))+" objects deleted")
return StreamingHttpResponse(json.dumps(send_dict).encode('utf-8'),content_type="text/json", status=200)
@get_execution_time
def get_visited_location(request, group_id):
usrid = request.user.id
visited_location = ""
if(usrid):
usrid = int(request.user.id)
usrname = unicode(request.user.username)
author = node_collection.one({'_type': "GSystemType", 'name': "Author"})
user_group_location = node_collection.one({'_type': "Author", 'member_of': author._id, 'created_by': usrid, 'name': usrname})
if user_group_location:
visited_location = user_group_location.visited_location
return StreamingHttpResponse(json.dumps(visited_location))
@login_required
@get_execution_time
def get_online_editing_user(request, group_id):
'''
get user who is currently online and editing the node
'''
if request.is_ajax() and request.method == "POST":
editorid = request.POST.get('editorid', "")
viewobj = ViewObj.objects.filter(filename=editorid)
userslist = []
if viewobj:
for each in viewobj:
if not each.username == request.user.username:
blankdict = {}
blankdict['username']=each.username
get_profile = get_profile_pic(each.username)
if get_profile :
blankdict['pro_img'] = "/"+str(group_id)+"/image/thumbnail/"+str(get_profile._id)
else :
blankdict['pro_img'] = "no";
userslist.append(blankdict)
if len(userslist) == 0:
userslist.append("No users")
else :
userslist.append("No users")
return StreamingHttpResponse(json.dumps(userslist).encode('utf-8'),content_type="text/json")
@get_execution_time
def view_articles(request, group_id):
if request.is_ajax():
# extracting all the bibtex entries from database
GST_one=node_collection.one({'_type':'AttributeType','name':'Citation'})
list_item=['article','book','booklet','conference','inbook','incollection','inproceedings','manual','masterthesis','misc','phdthesis','proceedings','techreport','unpublished_entry']
response_dict=[]
for each in list_item:
dict2={}
ref=node_collection.one({'_type':'GSystemType','name':each})
ref_entry=node_collection.find({"_type": "GSystem", 'member_of':{'$all':[ref._id]},'group_set':{'$all':[ObjectId(group_id)]},'status':u'PUBLISHED'})
list_entry=[]
for every in ref_entry:
id=every._id
gst_attribute=triple_collection.one({"_type": "GAttribute", 'subject': ObjectId(every._id), 'attribute_type.$id':ObjectId(GST_one._id)})
cite=gst_attribute.object_value
dict1 = {'name': every.name, 'cite': cite}
list_entry.append(dict1)
dict2[each]=list_entry
response_dict.append(dict2)
return StreamingHttpResponse(json.dumps(response_dict))
@get_execution_time
def get_author_set_users(request, group_id):
'''
This ajax function will give all users present in node's author_set field
'''
user_list = []
can_remove = False
if request.is_ajax():
_id = request.GET.get('_id',"")
node = node_collection.one({'_id':ObjectId(_id)})
course_name = ""
rt_has_course = node_collection.one({'_type':'RelationType', 'name':'has_course'})
if rt_has_course and node._id:
course = triple_collection.one({"_type": "GRelation", 'right_subject':node._id, 'relation_type.$id':rt_has_course._id})
if course:
course_name = node_collection.one({'_id':ObjectId(course.subject)}).name
if node.created_by == request.user.id:
can_remove = True
if node.author_set:
for each in node.author_set:
user_list.append(User.objects.get(id = each))
return render_to_response("ndf/refresh_subscribed_users.html",
{"user_list":user_list,'can_remove':can_remove,'node_id':node._id,'course_name':course_name},
context_instance=RequestContext(request)
)
else:
return StreamingHttpResponse("Empty")
else:
return StreamingHttpResponse("Invalid ajax call")
@login_required
@get_execution_time
def remove_user_from_author_set(request, group_id):
'''
This ajax function remove the user from athor_set
'''
user_list = []
can_remove = False
if request.is_ajax():
_id = request.GET.get('_id',"")
user_id = int(request.GET.get('user_id',""))
node = node_collection.one({'_id':ObjectId(_id)})
if node.created_by == request.user.id:
node.author_set.remove(user_id)
can_remove = True
node.save()
if node.author_set:
for each in node.author_set:
user_list.append(User.objects.get(id = each))
return render_to_response("ndf/refresh_subscribed_users.html",
{"user_list":user_list,'can_remove':can_remove,'node_id':node._id},
context_instance=RequestContext(request)
)
else:
return StreamingHttpResponse("You are not authorised to remove user")
else:
return StreamingHttpResponse("Invalid Ajax call")
@get_execution_time
def get_filterd_user_list(request, group_id):
'''
This function will return (all user's) - (subscribed user for perticular group)
'''
user_list = []
if request.is_ajax():
_id = request.GET.get('_id',"")
node = node_collection.one({'_id':ObjectId(_id)})
all_users_list = [each.username for each in User.objects.all()]
if node._type == 'Group':
for each in node.author_set:
user_list.append(User.objects.get(id = each).username)
filtered_users = list(set(all_users_list) - set(user_list))
return HttpResponse(json.dumps(filtered_users))
@get_execution_time
def search_tasks(request, group_id):
'''
This function will return (all task's)
'''
user_list = []
app_id = node_collection.find_one({'_type':"GSystemType", "name":"Task"})
if request.is_ajax():
term = request.GET.get('term',"")
task_nodes = node_collection.find({
'member_of': {'$all': [app_id._id]},
'name': {'$regex': term, '$options': 'i'},
'group_set': {'$all': [ObjectId(group_id)]},
'status': {'$nin': ['HIDDEN']}
}).sort('last_update', -1)
for each in task_nodes :
user_list.append({"label":each.name,"value":each.name,"id":str(each._id)})
return HttpResponse(json.dumps(user_list))
else:
raise Http404
@get_execution_time
def get_group_member_user(request, group_id):
"""Returns member(s) of the group excluding (group-admin(s)) in form of
dictionary that consists of key-value pair:
key: Primary key from Django's User table
value: User-name of that User record
"""
user_list = {}
group = node_collection.find_one({'_id': ObjectId(group_id)})
if request.is_ajax():
if group.author_set:
for each in group.author_set:
user_list[each] = User.objects.get(id=each).username
return HttpResponse(json.dumps(user_list))
else:
raise Http404
@get_execution_time
def annotationlibInSelText(request, group_id):
"""
This view parses the annotations field of the currently selected node_id and evaluates if entry corresponding this selectedText already exists.
If it does, it appends the comment to this entry else creates a new one.
Arguments:
group_id - ObjectId of the currently selected group
obj_id - ObjectId of the currently selected node_id
comment - The comment added by user
selectedText - text for which comment was added
Returns:
The updated annoatations field
"""
obj_id = str(request.POST["node_id"])
sg_obj = node_collection.one({"_id":ObjectId(obj_id)})
comment = request.POST ["comment"]
comment = json.loads(comment)
comment_modified = {
'authorAvatarUrl' : comment['authorAvatarUrl'],
'authorName' : comment['authorName'],
'comment' : comment['comment']
}
selectedText = request.POST['selectedText']
# check if annotations for this text already exist!
flag = False
for entry in sg_obj.annotations:
if (entry['selectedText'].lower() == selectedText.lower()):
entry['comments'].append(comment_modified)
flag = True
break
if(not(flag)):
comment_list = []
comment_list.append(comment_modified)
ann = {
'selectedText' : selectedText,
'sectionId' : str(comment['sectionId']),
'comments' : comment_list
}
sg_obj.annotations.append(ann)
sg_obj.save()
return HttpResponse(json.dumps(sg_obj.annotations))
@get_execution_time
def delComment(request, group_id):
'''
Delete comment from thread
'''
return HttpResponse("comment deleted")
# Views related to MIS -------------------------------------------------------------
@get_execution_time
def get_students(request, group_id):
"""
This view returns list of students along with required data based on selection criteria
to student_data_review.html
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
"""
response_dict = {'success': False, 'message': ""}
all_students_text = ""
try:
if request.is_ajax() and request.method == "POST":
groupid = request.POST.get("groupid", None)
app_id = request.POST.get("app_id", None)
app_set_id = request.POST.get("app_set_id", None)
stud_reg_year = str(request.POST.get("reg_year", None))
university_id = request.POST.get("student_belongs_to_university",None)
college_id = request.POST.get("student_belongs_to_college",None)
person_gst = node_collection.one({'_type': "GSystemType", 'name': "Student"}, {'name': 1, 'type_of': 1})
widget_for = []
query = {}
person_gs = node_collection.collection.GSystem()
person_gs.member_of.append(person_gst._id)
person_gs.get_neighbourhood(person_gs.member_of)
# university_gst = node_collection.one({'_type': "GSystemType", 'name': "University"})
mis_admin = node_collection.one({"_type": "Group", "name": "MIS_admin"}, {"_id": 1})
# univ_cur = node_collection.find({"member_of":university_gst._id,'group_set':mis_admin._id},{'name':1,"_id":1})
# rel_univ = node_collection.one({'_type': "RelationType", 'name': "student_belongs_to_university"}, {'_id': 1})
# rel_colg = node_collection.one({'_type': "RelationType", 'name': "student_belongs_to_college"}, {'_id': 1})
attr_deg_yr = node_collection.one({'_type': "AttributeType", 'name': "degree_year"}, {'_id': 1})
widget_for = ["name",
# rel_univ._id,
# rel_colg._id,
attr_deg_yr._id
]
# 'status'
# ]
widget_for = get_widget_built_up_data(widget_for, person_gs)
# Fetch field(s) from POST object
# if request.POST.has_key("student_belongs_to_university"):
# university_id = query_data = request.POST.get("student_belongs_to_university", "")
for each in widget_for:
field_name = each["name"]
if each["_type"] == "BaseField":
if field_name in request.POST:
query_data = request.POST.get(field_name, "")
query_data = parse_template_data(each["data_type"], query_data)
if field_name == "name":
query.update({field_name: {'$regex': query_data, '$options': "i"}})
else:
query.update({field_name: query_data})
elif each["_type"] == "AttributeType":
if field_name in request.POST:
query_data = request.POST.get(field_name, "")
query_data = parse_template_data(each["data_type"], query_data)
query.update({"attribute_set."+field_name: query_data})
# elif each["_type"] == "RelationType":
# if request.POST.has_key(field_name):
# print field_name,"\n\n"
# query_data = request.POST.get(field_name, "")
# query_data = parse_template_data(each["data_type"], query_data, field_instance=each)
# print query_data,"\n\n"
# if field_name == "student_belongs_to_university":
# university_id = query_data
# else:
# query.update({"relation_set."+field_name: query_data})
student = node_collection.one({'_type': "GSystemType", 'name': "Student"}, {'_id': 1})
query["member_of"] = student._id
date_lte = datetime.datetime.strptime("31/12/" + stud_reg_year, "%d/%m/%Y")
date_gte = datetime.datetime.strptime("1/1/" + stud_reg_year, "%d/%m/%Y")
query["attribute_set.registration_date"] = {'$gte': date_gte, '$lte': date_lte}
college_groupid = None
if college_id:
# Get selected college's groupid, where given college should belongs to MIS_admin group
college_groupid = node_collection.one({'_id': ObjectId(college_id), 'group_set': mis_admin._id, 'relation_set.has_group': {'$exists': True}},
{'relation_set.has_group': 1, 'name': 1}
)
response_dict["college"] = college_groupid.name
if college_groupid:
for each in college_groupid.relation_set:
if "has_group" in each.keys():
college_groupid = each["has_group"][0]
break
else:
college_groupid = None
groupid = ObjectId(groupid)
group_set_to_check = []
if groupid == mis_admin._id:
# It means group is either a college group or MIS_admin group
# In either case append MIS_admin group's ObjectId
# and if college_groupid exists, append it's ObjectId too!
if college_groupid:
group_set_to_check.append(college_groupid)
else:
group_set_to_check.append(mis_admin._id)
else:
# Otherwise, append given group's ObjectId
group_set_to_check.append(groupid)
if university_id:
university_id = ObjectId(university_id)
university = node_collection.one({'_id': university_id}, {'name': 1})
if university:
response_dict["university"] = university.name
query.update({'relation_set.student_belongs_to_university': university_id})
query.update({'group_set': {'$in': group_set_to_check}})
query.update({'status': u"PUBLISHED"})
rec = node_collection.collection.aggregate([{'$match': query},
{'$project': {'_id': 0,
'stud_id': '$_id',
'Enrollment Code': '$attribute_set.enrollment_code',
'Name': '$name',
# 'First Name': '$attribute_set.first_name',
# 'Middle Name': '$attribute_set.middle_name',
# 'Last Name': '$attribute_set.last_name',
'Reg# Date': '$attribute_set.registration_date',
'Gender': '$attribute_set.gender',
'Birth Date': '$attribute_set.dob',
'Religion': '$attribute_set.religion',
'Email ID': '$attribute_set.email_id',
'Languages Known': '$attribute_set.languages_known',
'Caste': '$relation_set.student_of_caste_category',
'Contact Number (Mobile)': '$attribute_set.mobile_number',
'Alternate Number / Landline': '$attribute_set.alternate_number',
'House / Street': '$attribute_set.house_street',
'Village': '$attribute_set.village',
'Taluka': '$attribute_set.taluka',
'Town / City': '$attribute_set.town_city',
'District': '$relation_set.person_belongs_to_district',
'State': '$relation_set.person_belongs_to_state',
'Pin Code': '$attribute_set.pin_code',
'Year of Passing 12th Standard': '$attribute_set.12_passing_year',
'Degree Name / Highest Degree': '$attribute_set.degree_name',
'Year of Study': '$attribute_set.degree_year',
'Stream / Degree Specialization': '$attribute_set.degree_specialization',
'College Enrolment Number / Roll No': '$attribute_set.college_enroll_num',
'College ( Graduation )': '$relation_set.student_belongs_to_college',
'Are you registered for NSS?': '$attribute_set.is_nss_registered'
}},
{'$sort': {'Name': 1}}
])
json_data = []
filename = ""
column_header = []
if len(rec["result"]):
for each_dict in rec["result"]:
new_dict = {}
for each_key in each_dict:
if each_dict[each_key]:
if type(each_dict[each_key]) == list:
data = each_dict[each_key][0]
else:
data = each_dict[each_key]
if type(data) == list:
# Perform parsing
if type(data) == list:
# Perform parsing
if type(data[0]) in [unicode, basestring, int]:
new_dict[each_key] = ', '.join(str(d) for d in data)
elif type(data[0]) in [ObjectId]:
# new_dict[each_key] = str(data)
d_list = []
for oid in data:
d = node_collection.one({'_id': oid}, {'name': 1})
d_list.append(str(d.name))
new_dict[each_key] = ', '.join(str(n) for n in d_list)
elif type(data) == datetime.datetime:
new_dict[each_key] = data.strftime("%d/%m/%Y")
elif type(data) == long:
new_dict[each_key] = str(data)
elif type(data) == bool:
if data:
new_dict[each_key] = "Yes"
else:
new_dict[each_key] = "No"
else:
new_dict[each_key] = str(data)
else:
# Perform parsing
if type(data) == list:
# Perform parsing
if type(data[0]) in [unicode, basestring, int]:
new_dict[each_key] = ', '.join(str(d) for d in data)
elif type(data[0]) in [ObjectId]:
new_dict[each_key] = str(data)
elif type(data) == datetime.datetime:
new_dict[each_key] = data.strftime("%d/%m/%Y")
elif type(data) == long:
new_dict[each_key] = str(data)
elif type(data) == bool:
if data:
new_dict[each_key] = "Yes"
else:
new_dict[each_key] = "No"
else:
new_dict[each_key] = str(data)
else:
new_dict[each_key] = ""
json_data.append(new_dict)
# Start: CSV file processing -------------------------------------------
column_header = [u"Enrollment Code", u'Name', u'Reg# Date', u'Gender', u'Birth Date', u'Religion', u'Email ID', u'Languages Known', u'Caste', u'Contact Number (Mobile)', u'Alternate Number / Landline', u'House / Street', u'Village', u'Taluka', u'Town / City', u'District', u'State', u'Pin Code', u'Year of Passing 12th Standard', u'Degree Name / Highest Degree', u'Year of Study', u'Stream / Degree Specialization', u'College Enrolment Number / Roll No', u'College ( Graduation )', u'Are you registered for NSS?']
t = time.strftime("%c").replace(":", "_").replace(" ", "_")
filename = "csv/" + "student_registration_data_" + t + ".csv"
filepath = os.path.join(STATIC_ROOT, filename)
filedir = os.path.dirname(filepath)
if not os.path.exists(filedir):
os.makedirs(filedir)
with open(filepath, 'wb') as csv_file:
fw = csv.DictWriter(csv_file, delimiter=',', fieldnames=column_header)
fw.writerow(dict((col,col) for col in column_header))
for row in json_data:
v = {}
v["stud_id"] = row.pop("stud_id")
fw.writerow(row)
row.update(v)
# End: CSV file processing ----------------------------------------------
# Column headers to be displayed on html
column_headers = [
("Enrollment Code", "Enrollment Code"),
("stud_id", "Edit"),
("Name", "Name"),
("Reg# Date", "Reg# Date"),
("Gender", "Gender"),
("Birth Date", "Birth Date"),
("Email ID", "Email ID"),
]
# college = node_collection.one({'_id': ObjectId(college_id)}, {"name": 1})
students_count = len(json_data)
response_dict["success"] = True
response_dict["groupid"] = groupid
response_dict["app_id"] = app_id
response_dict["app_set_id"] = app_set_id
response_dict["filename"] = filename
response_dict["students_count"] = students_count
response_dict["column_headers"] = column_headers
response_dict["students_data_set"] = json_data
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
else:
error_message = "StudentFindError: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
except OSError as oe:
error_message = "StudentFindError: " + str(oe) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
except Exception as e:
error_message = "StudentFindError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
@get_execution_time
def get_statewise_data(request, group_id):
"""
This view returns a download link of CSV created consisting of students statistical data based on degree_year for each college.
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
download_link - file path of CSV created
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
# Fetching selected state's name
state_val = request.GET.get("state_val", None)
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"},
{'_id': 1}
)
# Fetching selected state's node
state_gst = node_collection.one(
{'_type': "GSystemType", 'name': "State"}
)
state_gs = node_collection.one(
{
'member_of': state_gst._id,
'name': {'$regex': state_val, '$options': "i"},
'group_set': mis_admin._id
}
)
# Fetching universities belonging to that state
university_gst = node_collection.one(
{'_type': "GSystemType", 'name': "University"}
)
university_cur = node_collection.find(
{
'member_of': university_gst._id,
'group_set': mis_admin._id,
'relation_set.organization_belongs_to_state': state_gs._id
},
{
'name': 1,
'relation_set.affiliated_college': 1
}
).sort('name', 1)
student_gst = node_collection.one(
{'_type': "GSystemType", 'name': "Student"}
)
university_wise_data = {}
# Fetching university-wise data
for each_univ in university_cur:
university_wise_data[each_univ.name] = {}
# Fetching college(s) affiliated to given university
colleges_id_list = []
for rel in each_univ.relation_set:
if rel and "affiliated_college" in rel:
colleges_id_list = rel["affiliated_college"]
break
# Fetching college-wise data
college_cur = node_collection.find(
{'_id': {'$in': colleges_id_list}}
).sort('name', 1)
for each_college in college_cur:
university_wise_data[each_univ.name][each_college.name] = {}
rec = node_collection.collection.aggregate([
{
'$match': {
'member_of': student_gst._id,
'relation_set.student_belongs_to_college': each_college._id,
# 'attribute_set.registration_date': {
# '$gte': date_gte, '$lte': date_lte
# },
'status': u"PUBLISHED"
}
},
{
'$group': {
'_id': {
'College': '$each_college.name',
'Degree Year': '$attribute_set.degree_year'
},
'No of students': {'$sum': 1}
}
}
])
data = {}
for res in rec["result"]:
if res["_id"]["Degree Year"]:
data[res["_id"]["Degree Year"][0]] = \
res["No of students"]
if "I" not in data:
data["I"] = 0
if "II" not in data:
data["II"] = 0
if "III" not in data:
data["III"] = 0
data["Total"] = data["I"] + data["II"] + data["III"]
university_wise_data[each_univ.name][each_college.name] = data
response_dict["success"] = True
response_dict["university_wise_data"] = university_wise_data
return HttpResponse(json.dumps(response_dict))
else:
error_message = "CollegeSummaryDataError: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except OSError as oe:
error_message = "CollegeSummaryDataError: " + str(oe) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "CollegeSummaryDataError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_college_wise_students_data(request, group_id):
"""
This view returns a download link of CSV created consisting of students statistical data based on degree_year for each college.
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
download_link - file path of CSV created
"""
response_dict = {'success': False, 'message': ""}
all_students_text = ""
try:
if request.is_ajax() and request.method == "GET":
groupid = request.GET.get("groupid", None)
mis_admin = node_collection.one({'_type': "Group", 'name': "MIS_admin"}, {'_id': 1})
college_gst = node_collection.one({'_type': "GSystemType", 'name': "College"}, {'_id': 1})
student = node_collection.one({'_type': "GSystemType", 'name': "Student"})
current_year = str(datetime.datetime.today().year)
date_gte = datetime.datetime.strptime("1/1/" + current_year, "%d/%m/%Y")
date_lte = datetime.datetime.strptime("31/12/" + current_year, "%d/%m/%Y")
college_cur = node_collection.find({'member_of': college_gst._id, 'group_set': mis_admin._id},
{'_id': 1, 'name': 1, 'relation_set': 1}).sort('name', 1)
json_data = []
for i, each in enumerate(college_cur):
data = {}
college_group_id = None
for each_dict in each.relation_set:
if u"has_group" in each_dict.keys():
college_group_id = each_dict["has_group"]
break
rec = node_collection.collection.aggregate([{'$match': {'member_of': student._id,
'group_set': {'$in': [college_group_id, mis_admin._id]},
'relation_set.student_belongs_to_college': each._id,
'attribute_set.registration_date': {'$gte': date_gte, '$lte': date_lte},
'status': u"PUBLISHED"
}},
{'$group': {
'_id': {'College': '$each.name', 'Degree Year': '$attribute_set.degree_year'},
'No of students': {'$sum': 1}
}}
])
data["College"] = each.name
for res in rec["result"]:
data[res["_id"]["Degree Year"][0]] = res["No of students"]
if "I" not in data:
data["I"] = 0
if "II" not in data:
data["II"] = 0
if "III" not in data:
data["III"] = 0
data["Total"] = data["I"] + data["II"] + data["III"]
json_data.append(data)
t = time.strftime("%c").replace(":", "_").replace(" ", "_")
filename = "csv/" + "college_wise_student_data_" + t + ".csv"
filepath = os.path.join(STATIC_ROOT, filename)
filedir = os.path.dirname(filepath)
if not os.path.exists(filedir):
os.makedirs(filedir)
column_header = [u"College", u"Program Officer", u"I", u"II", u"III", u"Total"]
PO = {
"Agra College": ["Mr. Rajaram Yadav"],
"Arts College Shamlaji": ["Mr. Ashish Varia"],
"Baba Bhairabananda Mahavidyalaya": ["Mr. Mithilesh Kumar"],
"Balugaon College": ["Mr. Pradeep Pradhan"],
"City Women's College": ["Ms. Rajni Sharma"],
"Comrade Godavari Shamrao Parulekar College of Arts, Commerce & Science": ["Mr. Rahul Sable"],
"Faculty of Arts": ["Mr. Jokhim", "Ms. Tusharika Kumbhar"],
"Gaya College": ["Ms. Rishvana Sheik"],
"Govt. M. H. College of Home Science & Science for Women, Autonomous": [],
"Govt. Mahakoshal Arts and Commerce College": ["Ms. Davis Yadav"],
"Govt. Mahaprabhu Vallabhacharya Post Graduate College": ["Mr. Gaurav Sharma"],
"Govt. Rani Durgavati Post Graduate College": ["Mr. Asad Ullah"],
"Jamshedpur Women's College": ["Mr. Arun Agrawal"],
"Kalyan Post Graduate College": ["Mr. Praveen Kumar"],
"Kamla Nehru College for Women": ["Ms. Tusharika Kumbhar", "Ms. Thaku Pujari"],
"L. B. S. M. College": ["Mr. Charles Kindo"],
"Mahila College": ["Mr. Sonu Kumar"],
"Marwari College": ["Mr. Avinash Anand"],
"Matsyodari Shikshan Sanstha's Arts, Commerce & Science College": ["Ms. Jyoti Kapale"],
"Nirmala College": [],
"Ranchi College": [],
"Ranchi Women's College": ["Mr. Avinash Anand"],
"Shiv Chhatrapati College": ["Mr. Swapnil Sardar"],
"Shri & Smt. PK Kotawala Arts College": ["Mr. Sawan Kumar"],
"Shri VR Patel College of Commerce": ["Mr. Sushil Mishra"],
"Sree Narayana Guru College of Commerce": ["Ms. Bharti Bhalerao"],
"Sri Mahanth Shatanand Giri College": ["Mr. Narendra Singh"],
"St. John's College": ["Mr. Himanshu Guru"],
"The Graduate School College For Women": ["Mr. Pradeep Gupta"],
"Vasant Rao Naik Mahavidyalaya": ["Mr. Dayanand Waghmare"],
"Vivekanand Arts, Sardar Dalip Singh Commerce & Science College": ["Mr. Anis Ambade"]
}
with open(filepath, 'wb') as csv_file:
fw = csv.DictWriter(csv_file, delimiter=',', fieldnames=column_header)
fw.writerow(dict((col,col) for col in column_header))
for row in json_data:
if row[u"College"] not in PO or not PO[row[u"College"]]:
row[u"Program Officer"] = "Not assigned yet"
else:
row[u"Program Officer"] = ", ".join(PO[row[u"College"]])
fw.writerow(row)
response_dict["success"] = True
response_dict["download_link"] = (STATIC_URL + filename)
return HttpResponse(json.dumps(response_dict))
else:
error_message = "CollegeSummaryDataError: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except OSError as oe:
error_message = "CollegeSummaryDataError: " + str(oe) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "CollegeSummaryDataError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def set_user_link(request, group_id):
"""
This view creates a relationship (has_login) between the given node (node_id) and the author node (username);
and also subscribes the user to his/her respective college group
Arguments:
group_id - ObjectId of the currently selected group
node_id - ObjectId of the currently selected node_id
username - Username of the user
Returns:
A dictionary consisting of following key:-
result - a bool variable indicating whether link is created or not and subscribed to group or not
message - a string variable giving the status of the link (also reason if any error occurs)
"""
gr_node = None
try:
if request.is_ajax() and request.method =="POST":
node_id = request.POST.get("node_id", "")
username = request.POST.get("username", "")
# Creating link between user-node and it's login credentials
author = node_collection.one({'_type': "Author", 'name': unicode(username)}, {'created_by': 1})
rt_has_login = node_collection.one({'_type': "RelationType", 'name': u"has_login"})
gr_node = create_grelation(node_id, rt_has_login, author._id)
if gr_node:
# Assigning the userid to respective private college groups's author_set,
# i.e. making user, member of college group to which he/she belongs
# Only after the given user's link (i.e., has_login relation) gets created
node = node_collection.one({'_id': ObjectId(node_id)}, {'member_of': 1})
node_type = node.member_of_names_list
has_group = node_collection.one({'_type': "RelationType", 'name': "has_group"}, {'_id': 1})
if "Student" in node_type:
student_belonds_to_college = node_collection.one({'_type': "RelationType", 'name': "student_belongs_to_college"}, {'_id': 1})
colleges = triple_collection.find({
'_type': "GRelation", 'subject': node._id,
'relation_type.$id': student_belonds_to_college._id
})
for each in colleges:
g = triple_collection.one({'_type': "GRelation", 'subject': each.right_subject, 'relation_type.$id': has_group._id})
node_collection.collection.update({'_id': g.right_subject}, {'$addToSet': {'author_set': author.created_by}}, upsert=False, multi=False)
elif "Voluntary Teacher" in node_type:
trainer_of_college = node_collection.one({'_type': "RelationType", 'name': "trainer_of_college"}, {'_id': 1})
colleges = triple_collection.find({'_type': "GRelation", 'subject': node._id, 'relation_type.$id': trainer_of_college._id})
for each in colleges:
g = triple_collection.one({'_type': "GRelation", 'subject': each.right_subject, 'relation_type.$id': has_group._id})
node_collection.collection.update({'_id': g.right_subject}, {'$addToSet': {'author_set': author.created_by}}, upsert=False, multi=False)
return HttpResponse(json.dumps({'result': True, 'message': " Link successfully created. \n\n Also subscribed to respective college group(s)."}))
else:
error_message = " UserLinkSetUpError: Either not an ajax call or not a POST request!!!"
return HttpResponse(json.dumps({'result': False, 'message': " Link not created - Something went wrong in ajax call !!! \n\n Please contact system administrator."}))
except Exception as e:
error_message = "\n UserLinkSetUpError: " + str(e) + "!!!"
result = False
if gr_node:
# node_collection.collection.remove({'_id': gr_node._id})
result = True
error_message = " Link created successfully. \n\n But facing problem(s) in subscribing to respective college group(s)!!!\n Please use group's 'Subscribe members' button to do so !!!"
else:
result = False
error_message = " Link not created - May be invalid username entered !!!"
return HttpResponse(json.dumps({'result': result, 'message': error_message}))
@get_execution_time
def set_enrollment_code(request, group_id):
"""
"""
if request.is_ajax() and request.method == "POST":
return HttpResponse("Five digit code")
else:
error_message = " EnrollementCodeError: Either not an ajax call or not a POST request!!!"
raise Exception(error_message)
@get_execution_time
def get_students_assignments(request, group_id):
"""
Arguments:
group_id - ObjectId of the currently selected group
Returns:
"""
gr_node = None
try:
if request.is_ajax() and request.method =="GET":
user_id = 0
if "user_id" in request.GET:
user_id = int(request.GET.get("user_id", ""))
# Fetching college group
college_group = node_collection.one({'_id': ObjectId(group_id)}, {'name': 1, 'tags': 1, 'author_set': 1, 'created_by': 1})
page_res = node_collection.one({'_type': "GSystemType", 'name': "Page"}, {'_id': 1})
file_res = node_collection.one({'_type': "GSystemType", 'name': "File"}, {'_id': 1})
image_res = node_collection.one({'_type': "GSystemType", 'name': "Image"}, {'_id': 1})
video_res = node_collection.one({'_type': "GSystemType", 'name': "Video"}, {'_id': 1})
student_list = []
if user_id:
# Fetch assignment details of a given student
student_dict = {}
num_pages = []
num_images = []
num_videos = []
num_files = []
# Fetch student's user-group
user_group = node_collection.one({'_type': "Author", 'created_by': user_id})
student_dict["username"] = user_group.name
# Fetch all resources from student's user-group
resources = node_collection.find({'group_set': user_group._id}, {'name': 1, 'member_of': 1, 'created_at': 1})
for res in resources:
if page_res._id in res.member_of:
num_pages.append(res)
elif image_res._id in res.member_of:
num_images.append(res)
elif video_res._id in res.member_of:
num_videos.append(res)
elif file_res._id in res.member_of:
num_files.append(res)
student_dict["Pages"] = num_pages
student_dict["Images"] = num_images
student_dict["Videos"] = num_videos
student_dict["Files"] = num_files
return HttpResponse(json.dumps(student_dict, cls=NodeJSONEncoder))
else:
# Fetch assignment details of all students belonging to the college group
for user_id in college_group.author_set:
if user_id == college_group.created_by:
continue
student_dict = {}
num_pages = 0
num_images = 0
num_videos = 0
num_files = 0
# Fetch student's user-group
user_group = node_collection.one({'_type': "Author", 'created_by': user_id})
# Fetch student's node from his/her has_login relationship
student_has_login_rel = triple_collection.one({'_type': "GRelation", 'right_subject': user_group._id})
student_node = node_collection.one({'_id': student_has_login_rel.subject}, {'name': 1})
student_dict["Name"] = student_node.name
student_dict["user_id"] = user_id
# Fetch all resources from student's user-group
resources = node_collection.find({'group_set': user_group._id}, {'member_of': 1})
for res in resources:
if page_res._id in res.member_of:
num_pages = num_pages + 1
elif image_res._id in res.member_of:
num_images = num_images + 1
elif video_res._id in res.member_of:
num_videos = num_videos + 1
elif file_res._id in res.member_of:
num_files = num_files + 1
student_dict["Pages"] = num_pages
student_dict["Images"] = num_images
student_dict["Videos"] = num_videos
student_dict["Files"] = num_files
student_dict["Total"] = num_pages + num_images + num_videos + num_files
student_list.append(student_dict)
# Outside of above for loop
return render_to_response("ndf/student_statistics.html",
{'node': college_group,'student_list': student_list},
context_instance = RequestContext(request)
)
else:
error_message = "StudentDataGetError: Invalid ajax call!!!"
return StreamingHttpResponse(error_message)
except Exception as e:
print "\n StudentDataGetError: " + str(e)
raise Http404(e)
@get_execution_time
def get_districts(request, group_id):
"""
This view fetches district(s) belonging to given state.
Arguments:
group_id - ObjectId of the currently selected group
state_id - ObjectId of the currently selected state`
Returns:
A dictionary consisting of following key:-
districts - a list variable consisting of two elements i.e.,
first-element: subject (District's ObjectId), second-element: manipulated-name-value (District's name)
message - a string variable giving the error-message
"""
gr_node = None
try:
if request.is_ajax() and request.method == "GET":
state_id = request.GET.get("state_id", "")
# districts -- [first-element: subject (District's ObjectId), second-element: manipulated-name-value (District's name)]
districts = []
# Fetching RelationType: District - district_of (name) | has_district (inverse_name) - State
rt_district_of = node_collection.one({'_type': "RelationType", 'name': "district_of"})
# Fetching all districts belonging to given state in sorted order by name
if rt_district_of:
cur_districts = triple_collection.find({
'_type': "GRelation", 'right_subject': ObjectId(state_id),
'relation_type.$id': rt_district_of._id
}).sort('name', 1)
if cur_districts.count():
for d in cur_districts:
districts.append([str(d.subject), d.name.split(" -- ")[0]])
else:
error_message = "No districts found"
raise Exception(error_message)
else:
error_message = "RelationType (district_of) doesn't exists"
raise Exception(error_message)
return HttpResponse(json.dumps(districts))
else:
error_message = " DistrictFetchError: Either not an ajax call or not a GET request!!!"
return HttpResponse(json.dumps({'message': " DistrictFetchError - Something went wrong in ajax call !!! \n\n Please contact system administrator."}))
except Exception as e:
error_message = "\n DistrictFetchError: " + str(e) + "!!!"
return HttpResponse(json.dumps({'message': error_message}))
@get_execution_time
def get_affiliated_colleges(request, group_id):
"""
This view returns list of colleges affiliated to given university.
Each element of the list is again a list where,
0th index-element: ObjectId of college
1st index-element: Name of college
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
affiliated_colleges - List consisting of affiliated colleges (ObjectIds & names)
"""
response_dict = {'success': False, 'message': ""}
all_students_text = ""
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
university_id = request.GET.get("university_id", "")
req_university = None
req_affiliated_colleges = None
# Check whether any field has missing value or not
if university_id == "":
error_message = "AffiliatedCollegeFindError: Invalid data (No university selected)!!!"
raise Exception(error_message)
# Type-cast fetched field(s) into their appropriate type
university_id = ObjectId(university_id)
# Fetch required university
req_university = node_collection.one({'_id': university_id})
if not req_university:
error_message = "AffiliatedCollegeFindError: No university exists with given ObjectId("+university_id+")!!!"
raise Exception(error_message)
for each in req_university["relation_set"]:
if u"affiliated_college" in each.keys():
req_affiliated_colleges = node_collection.find({'_id': {'$in': each[u"affiliated_college"]}}, {'name': 1}).sort('name', 1)
req_affiliated_colleges_list = []
for each in req_affiliated_colleges:
req_affiliated_colleges_list.append([str(each._id), each.name])
response_dict["affiliated_colleges"] = req_affiliated_colleges_list
response_dict["success"] = True
response_dict["message"] = "This university ("+req_university.name+") has following list of affiliated colleges:"
for i, each in enumerate(req_affiliated_colleges_list):
response_dict["message"] += "\n\n " + str(i+1) + ". " + each[1]
return HttpResponse(json.dumps(response_dict))
else:
error_message = "AffiliatedCollegeFindError: Either not an ajax call or not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "AffiliatedCollegeFindError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_courses(request, group_id):
"""
This view returns list of NUSSD-Course(s) belonging to given course type.
Arguments:
group_id - ObjectId of the currently selected group
nussd_course_type - Type of NUSSD Course
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
unset_nc - dictionary consisting of NUSSD-Course(s)
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
nussd_course_type = request.GET.get("nussd_course_type", "")
# Check whether any field has missing value or not
if nussd_course_type == "":
error_message = "Invalid data: No data found in any of the " \
+ "field(s)!!!"
raise Exception(error_message)
# Fetch "Announced Course" GSystemType
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"},
{'name': 1}
)
if not mis_admin:
# If not found, throw exception
error_message = "'MIS_admin' (Group) doesn't exists... " \
+ "Please create it first!"
raise Exception(error_message)
# Fetch "Announced Course" GSystemType
nussd_course_gt = node_collection.one(
{'_type': "GSystemType", 'name': "NUSSD Course"}
)
if not nussd_course_gt:
# If not found, throw exception
error_message = "'NUSSD Course' (GSystemType) doesn't exists... " \
+ "Please create it first!"
raise Exception(error_message)
# Type-cast fetched field(s) into their appropriate type
nussd_course_type = unicode(nussd_course_type)
# Fetch registered NUSSD-Courses of given type
nc_cur = node_collection.find(
{
'member_of': nussd_course_gt._id,
'group_set': mis_admin._id,
'attribute_set.nussd_course_type': nussd_course_type
},
{'name': 1}
)
nc_dict = {}
if nc_cur.count():
# If found, append them to a dict
for each in nc_cur:
nc_dict[str(each._id)] = each.name
response_dict["success"] = True
response_dict["unset_nc"] = nc_dict
else:
response_dict["message"] = "No " + nussd_course_type + " type of course exists." \
+ " Please register"
response_dict["success"] = False
return HttpResponse(json.dumps(response_dict))
else:
error_message = "AnnouncedCourseError: Either not an ajax call or" \
" not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "AnnouncedCourseError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_announced_courses_with_ctype(request, group_id):
"""
This view returns list of announced-course(s) that match given criteria
along with NUSSD-Course(s) for which match doesn't exists.
Arguments:
group_id - ObjectId of the currently selected group
nussd_course_type - Type of NUSSD course
Returns:
A dictionary consisting of following key-value pairs:-
acourse_ctype_list - list consisting of announced-course(s)
and/or NUSSD-Courses [if match not found]
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
nussd_course_type = request.GET.get("nussd_course_type", "")
ann_course_type = request.GET.get("ann_course_type", "0")
acourse_ctype_list = []
ac_of_colg = []
start_enroll = ""
end_enroll = ""
query = {}
# curr_date = datetime.datetime.now()
# Fetch "Announced Course" GSystemType
announced_course_gt = node_collection.one(
{'_type': "GSystemType", 'name': "Announced Course"}
)
if not announced_course_gt:
# If not found, throw exception
error_message = "Announced Course (GSystemType) doesn't " \
+ "exists... Please create it first!"
raise Exception(error_message)
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"}
)
# Type-cast fetched field(s) into their appropriate type
nussd_course_type = unicode(nussd_course_type)
ann_course_type = int(ann_course_type)
if ann_course_type == 1:
# Return all Announced Course(s) for which enrollment not started yet
query = {
"member_of": announced_course_gt._id,
"group_set": ObjectId(mis_admin._id),
"status": "PUBLISHED",
"attribute_set.nussd_course_type": nussd_course_type,
"attribute_set.ann_course_closure": u"Open",
"relation_set.course_has_enrollment": {"$exists": False}
}
college = {}
course = {}
ac_data_set = []
records_list = []
if nussd_course_type == "Foundation Course":
rec = node_collection.collection.aggregate([{
"$match": {
"member_of": announced_course_gt._id,
"group_set": ObjectId(mis_admin._id),
"status": "PUBLISHED",
"attribute_set.nussd_course_type": nussd_course_type,
"attribute_set.ann_course_closure": u"Open",
"relation_set.course_has_enrollment": {"$exists": False}
}
}, {
'$group': {
"_id": {
"start_time": "$attribute_set.start_time",
"end_time": "$attribute_set.end_time",
'college': '$relation_set.acourse_for_college'
},
"foundation_course": {"$addToSet": {'ac_id': "$_id", 'course': '$relation_set.announced_for', 'created_at': "$created_at"}},
"fc_ann_ids": {"$addToSet": "$_id"}
}
}, {
'$sort': {'created_at': 1}
}])
records_list = rec["result"]
if records_list:
for each in records_list:
newrec = {}
if each['_id']["college"]:
colg_id = each['_id']["college"][0][0]
if colg_id not in college:
c = node_collection.one({"_id": colg_id}, {"name": 1, "relation_set.college_affiliated_to": 1,"attribute_set.enrollment_code":1})
newrec[u"college"] = c.name
newrec[u"college_id"] = c._id
newrec[u"created_at"] = each["foundation_course"][0]["created_at"]
college[colg_id] = {}
college[colg_id]["name"] = newrec[u"college"]
for rel in c.relation_set:
if rel and "college_affiliated_to" in rel:
univ_id = rel["college_affiliated_to"][0]
u = node_collection.one({"_id": univ_id}, {"name": 1})
each.update({"university": u.name})
college[colg_id]["university"] = each["university"]
college[colg_id]["university_id"] = u._id
newrec[u"university"] = u.name
newrec[u"university_id"] = u._id
else:
newrec["college"] = college[colg_id]["name"]
newrec["college_id"] = ObjectId(colg_id)
newrec["university_id"] = college[colg_id]["university_id"]
newrec["university"] = college[colg_id]["university"]
newrec[u"course"] = "Foundation Course"
newrec[u"ac_id"] = each["fc_ann_ids"]
newrec[u"name"] = "Foundation_Course_" + c["attribute_set"][0]["enrollment_code"] + "_" + each["_id"]["start_time"][0].strftime('%Y') + "_" + each["_id"]["end_time"][0].strftime('%Y')
ac_data_set.append(newrec)
else:
rec = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 0,
'ac_id': "$_id",
'name': '$name',
'course': '$relation_set.announced_for',
'college': '$relation_set.acourse_for_college',
'created_at': "$created_at"
}
},
{
'$sort': {'created_at': 1}
}
])
records_list = rec["result"]
if records_list:
for each in rec["result"]:
if each["college"]:
colg_id = each["college"][0][0]
if colg_id not in college:
c = node_collection.one({"_id": colg_id}, {"name": 1, "relation_set.college_affiliated_to": 1})
each["college"] = c.name
each["college_id"] = c._id
college[colg_id] = {}
college[colg_id]["name"] = each["college"]
for rel in c.relation_set:
if rel and "college_affiliated_to" in rel:
univ_id = rel["college_affiliated_to"][0]
u = node_collection.one({"_id": univ_id}, {"name": 1})
each.update({"university": u.name})
college[colg_id]["university"] = each["university"]
college[colg_id]["university_id"] = u._id
each["university_id"] = u._id
else:
each["college"] = college[colg_id]["name"]
each["college_id"] = colg_id
each.update({"university": college[colg_id]["university"]})
each.update({"university_id": college[colg_id]["university_id"]})
if each["course"]:
course_id = each["course"][0][0]
if course_id not in course:
each["course"] = node_collection.one({"_id": course_id}).name
course[course_id] = each["course"]
else:
each["course"] = course[course_id]
ac_data_set.append(each)
column_headers = [
("name", "Announced Course Name"),
("course", "Course Name"),
("college", "College"),
("university", "University")
]
if records_list:
# If Announced Course(s) records found
response_dict["column_headers"] = column_headers
response_dict["ac_data_set"] = ac_data_set
else:
# Else, where No Announced Course exist
response_dict["ac_data_set"] = records_list
response_dict["message"] = "No Announced Course found of selected type (" + nussd_course_type + ") !"
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
if(ObjectId(group_id) == mis_admin._id):
ac_cur = node_collection.find({
'member_of': announced_course_gt._id,
'group_set': ObjectId(group_id),
'attribute_set.nussd_course_type': nussd_course_type
}, {
"name": 1, "attribute_set": 1, "relation_set": 1
})
else:
colg_gst = node_collection.one(
{'_type': "GSystemType", 'name': 'College'}
)
# Fetch Courses announced for given college (or college group)
# Get college node & courses announced for it from
# college group's ObjectId
req_colg_id = node_collection.one({
'member_of': colg_gst._id,
'relation_set.has_group': ObjectId(group_id)
}, {
'relation_set.college_has_acourse': 1
})
for rel in req_colg_id.relation_set:
if rel and "college_has_acourse" in rel:
ac_of_colg = rel["college_has_acourse"]
# Keeping only those announced courses which are active
# (i.e. PUBLISHED)
ac_cur = node_collection.find({
'_id': {'$in': ac_of_colg},
'member_of': announced_course_gt._id,
'attribute_set.nussd_course_type': nussd_course_type,
# 'relation_set.course_selected': {'$exists': True, '$not': {'$size': 0}},
'status': u"PUBLISHED"
# 'attribute_set.start_enroll':{'$lte': curr_date},
# 'attribute_set.end_enroll':{'$gte': curr_date}
}, {
"name": 1, "attribute_set": 1, "relation_set": 1
})
if ac_cur.count():
sce_gs_dict = {}
for each_ac in ac_cur:
# NOTE: This ajax-call is used in various templates
# Following is used especially only in new_create_batch.html
# Fetch enrolled students count from announced course node's course_selected
enrolled_stud_count = 0
if ann_course_type != 1:
for rel in each_ac.relation_set:
if rel and "course_has_enrollment" in rel:
if rel["course_has_enrollment"]:
sce_gs_id = rel["course_has_enrollment"][0]
str_sce_gs_id = str(sce_gs_id)
if str_sce_gs_id in sce_gs_dict:
enrolled_stud_count = sce_gs_dict[str_sce_gs_id]
break
sce_gs_node = node_collection.one({
"_id": ObjectId(sce_gs_id)
}, {
"attribute_set.has_approved": 1
})
sce_gs_dict[str_sce_gs_id] = enrolled_stud_count
for attr in sce_gs_node.attribute_set:
if attr and "has_approved" in attr:
if attr["has_approved"]:
enrolled_stud_count = len(attr["has_approved"])
sce_gs_dict[str_sce_gs_id] = enrolled_stud_count
break
break
each_ac["enrolled_stud_count"] = enrolled_stud_count
acourse_ctype_list.append(each_ac)
response_dict["success"] = True
info_message = "Announced Courses are available"
else:
response_dict["success"] = False
info_message = "No Announced Courses are available"
response_dict["message"] = info_message
response_dict["acourse_ctype_list"] = json.dumps(
acourse_ctype_list, cls=NodeJSONEncoder
)
return HttpResponse(json.dumps(response_dict))
else:
error_message = " AnnouncedCourseFetchError - Something went wrong in " \
+ "ajax call !!! \n\n Please contact system administrator."
return HttpResponse(json.dumps({
'message': error_message
}))
except Exception as e:
error_message = "\n AnnouncedCourseFetchError: Either you are in user " \
+ "group or something went wrong!!!"
return HttpResponse(json.dumps({'message': error_message}))
@get_execution_time
def get_colleges(request, group_id, app_id):
"""This view returns HttpResponse with following data:
- List of college(s) affiliated to given university where
Program Officer is not subscribed
- List of college(s) affiliated to given university where
Course(s) is/are already announced for given duration
- List of college(s) affiliated to given university where
Course(s) is/are not announced for given duration
Arguments:
group_id - ObjectId of the currently selected group
univ_id - ObjectId of currently selected University
start_time - Start time of announcement (MM/YYYY)
end_time - End time of announcement (MM/YYYY)
dc_courses_id_list - List of ObjectId(s) of Course(s)
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
unassigned_po_colg_list - List of college(s) affiliated to given university
where Program Officer is not subscribed
already_announced_in_colg_list - List of college(s) affiliated to given
university where Course(s) is/are already announced for given duration
drawer_widget - Drawer containing list of college(s) affiliated to given
university where Course(s) is/are not announced for given duration
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
nussd_course_type = request.GET.get("nussd_course_type", "")
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"}, {'name': 1}
)
if not mis_admin:
# If not found, throw exception
error_message = "'MIS_admin' (Group) doesn't exists... " \
"Please create it first!"
raise Exception(error_message)
univ_id = request.GET.get("univ_id", "")
start_time = request.GET.get("start_time", "")
end_time = request.GET.get("end_time", "")
dc_courses_id_list = request.GET.getlist("dc_courses_id_list[]")
# all_univs = request.GET.get("all_univs", "")
# Check whether any field has missing value or not
if univ_id == "" or start_time == "" or end_time == "":
error_message = "Invalid data: " \
"No data found in any of the field(s)!!!"
raise Exception(error_message)
# Fetch all college groups
college = node_collection.one(
{'_type': "GSystemType", 'name': "College"}, {'name': 1}
)
if not college:
# If not found, throw exception
error_message = "'College' (GSystemType) doesn't exists... "\
"Please create it first!"
raise Exception(error_message)
# Type-cast fetched field(s) into their appropriate type
univ_id = ObjectId(univ_id)
start_time = datetime.datetime.strptime(start_time, "%m/%Y")
end_time = datetime.datetime.strptime(end_time, "%m/%Y")
dc_courses_id_list = [ObjectId(dc) for dc in dc_courses_id_list]
# Fetch the node of selected university
# university_node = node_collection.one(
# {'_id': univ_id},
# {'relation_set': 1, 'name': 1}
# )
# Fetch the list of colleges that are affiliated to
# the selected university (univ_id)
colg_under_univ_id = node_collection.find(
{
'member_of': college._id,
'relation_set.college_affiliated_to': univ_id
},
{
'name': 1, 'member_of': 1, 'created_by': 1,
'created_at': 1, 'content': 1,
'relation_set.has_officer_incharge': 1,
'relation_set.college_has_acourse': 1
}
).sort('name', 1)
list_colg = []
unassigned_po_colg_list = []
already_announced_in_colg_list = []
for each in colg_under_univ_id:
is_po_exists = False
if each.relation_set:
for rel in each.relation_set:
if rel and "has_officer_incharge" in rel:
if rel["has_officer_incharge"]:
is_po_exists = True
if rel and "college_has_acourse" in rel:
if rel["college_has_acourse"]:
if dc_courses_id_list:
acourse_exists = node_collection.find_one({
'_id': {'$in': rel["college_has_acourse"]},
'relation_set.announced_for': {'$in': dc_courses_id_list},
'attribute_set.start_time': start_time,
'attribute_set.end_time': end_time,
'attribute_set.ann_course_closure': "Open",
'status': "PUBLISHED"
})
if acourse_exists:
if each._id not in already_announced_in_colg_list:
already_announced_in_colg_list.append(each.name)
if each.name in already_announced_in_colg_list:
continue
elif is_po_exists:
if each not in list_colg:
list_colg.append(each)
else:
if each not in unassigned_po_colg_list:
unassigned_po_colg_list.append(each.name)
response_dict["already_announced_in_colg_list"] = \
already_announced_in_colg_list
response_dict["unassigned_PO_colg_list"] = unassigned_po_colg_list
if list_colg:
drawer_template_context = edit_drawer_widget(
"RelationType", group_id, None, None,
checked="announced_course_create_edit",
left_drawer_content=list_colg
)
drawer_template_context["widget_for"] = \
"announced_course_create_edit"
drawer_widget = render_to_string(
'ndf/drawer_widget.html', drawer_template_context,
context_instance=RequestContext(request)
)
response_dict["drawer_widget"] = drawer_widget
msg_string = "Following are the list of colleges where " + \
"selected Course(s) should be announced:"
else:
msg_string = "There are no colleges under this university " + \
"where selected Course(s) could be announced!!!"
# nc_dict = {}
if colg_under_univ_id.count():
response_dict["success"] = True
else:
msg_string = "No college is affiliated to under selected " + \
"University!!!"
response_dict["success"] = False
# response_dict["unset_nc"] = nc_dict
response_dict["message"] = msg_string
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "CollegeFetchError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_anncourses_allstudents(request, group_id):
"""
This view returns ...
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
"""
response_dict = {'success': False, 'message': ""}
all_students_text = ""
query = {}
try:
if request.is_ajax() and request.method == "GET":
registration_year = str(request.GET.get("registration_year", ""))
all_students = request.GET.get("all_students", "")
degree_year = request.GET.get("degree_year", "")
degree_name = request.GET.get("degree_name", "")
sce_gs_id = request.GET.get("sce_gs_id", "")
acourse_val = request.GET.getlist("acourse_val[]", "")
for i, each in enumerate(acourse_val):
acourse_val[i] = ObjectId(each)
# Following parameters to be used for edit_drawer_widget()
# node = None
# checked = None
enrolled_stud_count = 0
non_enrolled_stud_count = 0
colg_of_acourse_id = None
# Check whether any field has missing value or not
if registration_year == "" or all_students == "":
# registration_year = datetime.datetime.now().year.__str__()
all_students = u"false"
# error_message = "Invalid data: No data found in any of the field(s)!!!"
student = node_collection.one({'_type': "GSystemType", 'name': "Student"})
sce_gs = node_collection.one({'_id':ObjectId(sce_gs_id)},
{'member_of': 1, 'attribute_set.has_enrolled': 1, 'relation_set.for_college':1}
)
# From Announced Course node fetch College's ObjectId
# acourse_node = node_collection.find_one(
# {'_id': {'$in': acourse_val}, 'relation_set.acourse_for_college': {'$exists': True}},
# {'attribute_set': 1, 'relation_set.acourse_for_college': 1}
# )
for rel in sce_gs.relation_set:
if rel:
colg_of_acourse_id = rel["for_college"][0]
break
date_gte = datetime.datetime.strptime("1/1/"+registration_year, "%d/%m/%Y")
date_lte = datetime.datetime.strptime("31/12/"+registration_year, "%d/%m/%Y")
# query = {
# 'member_of': student._id,
# 'attribute_set.registration_date': {'$gte': date_gte, '$lte': date_lte},
# # 'attribute_set.degree_year':degree_year,
# # 'attribute_set.degree_name':degree_name,
# 'relation_set.student_belongs_to_college': ObjectId(colg_of_acourse_id)
# }
# If College's ObjectId exists, fetch respective College's group
if colg_of_acourse_id:
colg_of_acourse = node_collection.one(
{'_id': colg_of_acourse_id, 'relation_set.has_group': {'$exists': True}},
{'relation_set.has_group': 1}
)
if colg_of_acourse:
for rel in colg_of_acourse.relation_set:
if rel and "has_group" in rel:
# If rel exists, it means it's has_group
# then update query
query = {
'$or': [
{
'member_of': student._id,
'group_set': rel["has_group"][0],
'attribute_set.registration_date': {'$gte': date_gte, '$lte': date_lte},
},
{
'member_of': student._id,
'relation_set.student_belongs_to_college': ObjectId(colg_of_acourse_id),
'attribute_set.registration_date': {'$gte': date_gte, '$lte': date_lte},
}
]
}
break
if degree_year:
query.update({'attribute_set.degree_year': degree_year })
if degree_name:
query.update({'attribute_set.degree_name': degree_name })
# Check whether StudentCourseEnrollment created for given acourse_val
# Set node as StudentCourseEnrollment node
# and checked as "has_enrolled", i.e. AT of StudentCourseEnrollment node
enrolled_stud_list = []
if sce_gs:
for attr in sce_gs.attribute_set:
if attr and "has_enrolled" in attr:
enrolled_stud_list = attr["has_enrolled"]
enrolled_stud_count = str(len(attr["has_enrolled"]))
break
# sce_gs.get_neighbourhood(sce_gs.member_of)
# node = sce_gs
# checked = "has_enrolled"
res = None
if all_students == u"true":
all_students_text = "All students (including enrolled ones)"
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 1,
'name': '$name',
'degree_name': '$attribute_set.degree_name',
'degree_year':'$attribute_set.degree_year',
'registration_year':'$attribute_set.registration_year'
}
},
{
'$sort': {'name': 1}
}
])
total_students_count = len(res["result"])
all_students_text += " [Count("+str(total_students_count)+")]"
non_enrolled_stud_count = total_students_count - int(enrolled_stud_count)
elif all_students == u"false":
query.update({'_id': {'$nin': enrolled_stud_list}})
all_students_text = "Only non-enrolled students"
# Find students which are not enrolled in selected announced course
# query.update({'relation_set.selected_course': {'$ne': acourse_node._id}})
query.update({'relation_set.selected_course': {'$nin': acourse_val}})
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 1,
'name': '$name',
'degree_name': '$attribute_set.degree_name',
'degree_year':'$attribute_set.degree_year',
'registration_year':'$attribute_set.registration_year'
}
},
{
'$sort': {'name': 1}
}
])
non_enrolled_stud_count = str(len(res["result"]))
all_students_text += " [Count("+non_enrolled_stud_count+")]"
# response_dict["announced_courses"] = []
column_headers = [
("name", "Name"),
("degree_name", "Degree"),
("degree_year", "Year"),
]
response_dict["column_headers"] = column_headers
response_dict["success"] = True
response_dict["students_data_set"] = res["result"]
if not res["result"]:
response_dict["message"] = "No filtered results found"
response_dict["enrolled_stud_count"] = enrolled_stud_count
response_dict["non_enrolled_stud_count"] = non_enrolled_stud_count
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
else:
error_message = "EnrollInCourseError: Either not an ajax call or not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "EnrollInCourseError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_course_details_for_trainer(request, group_id):
"""
This view returns a dictionary holding data required for trainer's enrollment
into given announced course(s).
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
course_enrollement_details - Dictionary that has following structure:
Key: Course-name
Value: A list of dictionary where this dictionary's structure is as follows:
1) Key: ann_course_id; Value: ObjectId of corresponding Announced Course
2) Key: university; Value: University-name
3) Key: college; Value: College GSystem's document
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
course_type = request.GET.get("course_type", "")
trainer_type = request.GET.get("trainer_type", "")
# Check whether any field has missing value or not
if course_type == "" or trainer_type == "":
error_message = "Invalid data: No data found in any of the field(s)!!!"
raise Exception(error_message)
# Using below text variable to fetch specific attribute based on which
# type of trainer we are dealing with
# Voluntary Teacher -- voln_tr_qualifications
# Master Trainer -- mast_tr_qualifications
fetch_attribute_for_trainer = ""
bool_trainer_type = None
if trainer_type == "Voluntary Teacher":
fetch_attribute_for_trainer = "voln_tr_qualifications"
bool_trainer_type = True
elif trainer_type == "Master Trainer":
fetch_attribute_for_trainer = "mast_tr_qualifications"
bool_trainer_type = False
# Fetch required GSystemTypes (NUSSD Course, Announced Course, University, College)
course_gst = node_collection.one({
'_type': "GSystemType", 'name': "NUSSD Course"
}, {
'_id': 1
})
college_gst = node_collection.one({
'_type': "GSystemType", 'name': "College"
}, {
'_id': 1
})
university_gst = node_collection.one({
'_type': "GSystemType", 'name': "University"
}, {
'_id': 1
})
mis_admin = node_collection.one({
'_type': "Group", 'name': "MIS_admin"
}, {
'_id': 1
})
course_enrollement_details = {}
course_requirements = {}
college_dict = {}
university_dict = {}
course_dict = {}
# Fetching NUSSD Course(s) registered under MIS_admin group
nussd_courses_cur = node_collection.find({
"member_of": course_gst._id,
"group_set": mis_admin._id,
"attribute_set.nussd_course_type": course_type
}, {
"name": 1,
"attribute_set." + fetch_attribute_for_trainer: 1
})
for course in nussd_courses_cur:
course_dict[course.name] = course._id
# Set given course's requirements
for requirement in course.attribute_set:
if requirement:
course_requirements[course.name] = requirement[fetch_attribute_for_trainer]
course_enrollement_details[course.name] = []
if nussd_courses_cur.count():
college_cur = node_collection.find({
"member_of": college_gst._id,
"group_set": mis_admin._id
}, {
"name": 1,
"college_affiliated_to": 1
})
for college in college_cur:
university_gs = None
if college._id not in university_dict:
university_gs = node_collection.find_one({
'member_of': university_gst._id,
'relation_set.affiliated_college': college._id
}, {
'_id': 1,
'name': 1
})
if university_gs:
university_dict[college._id] = university_gs
college_data = {}
college_data["college"] = college.name
college_data["university"] = university_gs.name
if bool_trainer_type:
# If bool_trainer_type (True, i.e Voluntary Teacher)
# Set organization_id as College's ObjectId
# As creating linking between Voluntary Teacher & College
college_data["organization_id"] = college._id
else:
# If bool_trainer_type (False, i.e Master Trainer)
# Set organization_id as University's ObjectId
# As creating linking between Master Trainer & University
college_data["organization_id"] = university_gs._id
college_dict[college._id] = college_data
if college._id in university_dict:
for course_name in course_enrollement_details.keys():
data_dict = {}
data_dict["ann_course_id"] = course_dict[course_name]
data_dict.update(college_dict[college._id])
course_enrollement_details[course_name].append(data_dict)
response_dict["course_enrollement_details"] = course_enrollement_details
response_dict["course_requirements"] = course_requirements
response_dict["success"] = True
response_dict["message"] = ""
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
else:
error_message = "TrainerCourseDetailError: Either not an ajax call or not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "TrainerCourseDetailError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_students_for_approval(request, group_id):
"""This returns data-review list of students that need approval for Course enrollment.
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "POST":
enrollment_id = request.POST.get("enrollment_id", "")
sce_gst = node_collection.one({'_type': "GSystemType", 'name': "StudentCourseEnrollment"})
if sce_gst:
sce_gs = node_collection.one(
{'_id': ObjectId(enrollment_id), 'member_of': sce_gst._id, 'group_set': ObjectId(group_id), 'status': u"PUBLISHED"},
{'member_of': 1}
)
approval_nodes = []
data = {}
if sce_gs:
sce_gs.get_neighbourhood(sce_gs.member_of)
data["pk"] = str(sce_gs._id)
data["CollegeId"] = sce_gs.for_college[0]._id
data["College"] = sce_gs.for_college[0].name
course_id_list = []
for each in sce_gs.for_acourse:
course_id_list.append(each._id.__str__())
data["CourseId"] = ",".join(course_id_list)
if len(sce_gs.for_acourse) > 1:
# It means it's a Foundation Course's (FC) enrollment
start_enroll = None
end_enroll = None
for each in sce_gs.for_acourse[0].attribute_set:
if not each:
pass
elif "start_time" in each:
start_time = each["start_time"]
elif "end_time" in each:
end_time = each["end_time"]
data["Course"] = "Foundation_Course" + "_" + start_time.strftime("%b-%Y") + "_" + end_time.strftime("%b-%Y")
else:
# Courses other than FC
data["Course"] = sce_gs.for_acourse[0].name
# data["CompletedOn"] = sce_gs.completed_on
data["Enrolled"] = len(sce_gs.has_enrolled)
# approve_task = sce_gs.has_current_approval_task[0]
approve_task = sce_gs.has_current_approval_task[0]
approve_task.get_neighbourhood(approve_task.member_of)
# Code should be written in create_task: rename it create_update_task
# Patch: doing here only
# if data["Enrolled"] > 0:
# approve_task.Status = u"In Progress"
# else:
# approve_task.Status = u"Resolved"
# approve_task.save()
data["Status"] = approve_task.Status
if sce_gs.has_key("has_approved"):
if sce_gs.has_approved:
data["Approved"] = len(sce_gs.has_approved)
else:
data["Approved"] = None
if sce_gs.has_key("has_rejected"):
if sce_gs.has_rejected:
data["Rejected"] = len(sce_gs.has_rejected)
else:
data["Rejected"] = None
enrolled_students_list = []
if sce_gs.has_enrolled:
enrolled_students_list = sce_gs.has_enrolled
approved_students_list = []
if sce_gs.has_approved:
approved_students_list = sce_gs.has_approved
rejected_students_list = []
if sce_gs.has_rejected:
rejected_students_list = sce_gs.has_rejected
# Update Enrolled students list
updated_enrolled_students_list = []
for each_id in enrolled_students_list:
if (each_id not in approved_students_list) and (each_id not in rejected_students_list):
updated_enrolled_students_list.append(each_id)
res = node_collection.collection.aggregate([
{
'$match': {
'_id':{"$in":updated_enrolled_students_list}
}
}, {
'$project': {
'_id': 1,
'name': '$name',
'degree_name': '$attribute_set.degree_name',
'degree_year':'$attribute_set.degree_year',
# 'registration_year':{"$date": "$attribute_set.registration_date"}
'registration_year':"$attribute_set.registration_date"
}
},
{
'$sort': {'name': 1}
}
])
# To convert full registration date
for each in res["result"]:
reg_year = each["registration_year"][0]
each["registration_year"] = datetime.datetime.strftime(reg_year,"%Y")
enrollment_columns = [
("name", "Name"),
("degree_name", "Degree"),
("degree_year", "Year of Study"),
("registration_year", "Registration Year")
]
response_dict["success"] = True
response_dict["enrollment_details"] = data
response_dict["column_headers"] = enrollment_columns
response_dict["student_approval_data"] = res["result"]
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
except Exception as e:
error_message = "StudentCourseApprovalError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def approve_students(request, group_id):
"""This returns approved and/or rejected students count respectively.
"""
try:
response_dict = {'success': False, 'message': ""}
if request.is_ajax() and request.method == "POST":
approval_state = request.POST.get("approval_state", "")
enrollment_id = request.POST.get("enrollment_id", "")
enrollment_id = ObjectId(enrollment_id)
course_ids = request.POST.get("course_id", "")
course_ids = [(ObjectId(each.strip()), each.strip()) for each in course_ids.split(",")]
course_name = request.POST.get("course_name", "")
students_selected = request.POST.getlist("students_selected[]", "")
students_selected = [ObjectId(each_str_id) for each_str_id in students_selected]
college_id = request.POST.get("college_id", "")
college_id = ObjectId(college_id)
college_name = request.POST.get("college_name", "")
sce_gs = node_collection.collection.aggregate([{
"$match": {
"_id": enrollment_id, "group_set": ObjectId(group_id),
"relation_set.has_current_approval_task": {"$exists": True},
"status": u"PUBLISHED"
}
}, {
"$project": {
"has_enrolled": "$attribute_set.has_enrolled",
"has_approved": "$attribute_set.has_approved",
"has_rejected": "$attribute_set.has_rejected",
"has_approval_task": "$attribute_set.has_approval_task",
"has_current_approval_task": "$relation_set.has_current_approval_task"
}
}])
user_id = int(request.user.id) # getting django user's id
user_name = request.user.username # getting django user's username
remaining_count = None
enrolled_list = []
approved_list = []
rejected_list = []
error_id_list = []
has_approval_task_dict = {}
approved_or_rejected_list = []
has_approval_task_dict = sce_gs["result"][0]["has_approval_task"]
if has_approval_task_dict:
has_approval_task_dict = has_approval_task_dict[0]
enrolled_list = sce_gs["result"][0]["has_enrolled"]
if enrolled_list:
enrolled_list = enrolled_list[0]
approved_list = sce_gs["result"][0]["has_approved"]
if approved_list:
approved_list = approved_list[0]
rejected_list = sce_gs["result"][0]["has_rejected"]
if rejected_list:
rejected_list = rejected_list[0]
at_name = ""
course_enrollment_status_text = u""
has_approved_or_rejected_at = None
if approval_state == "Approve":
at_name = "has_approved"
course_enrollment_status_text = u"Enrollment Approved"
approved_or_rejected_list = approved_list
elif approval_state == "Reject":
at_name = "has_rejected"
course_enrollment_status_text = u"Enrollment Rejected"
approved_or_rejected_list = rejected_list
course_enrollment_status_at = node_collection.one({
'_type': "AttributeType", 'name': "course_enrollment_status"
})
# For each student, approve enrollment into given course(Domain)/courses(Foundation Course)
# For that update value as "Enrollment Approved" against corresponding course (Course ObjectId)
# in "course_enrollment_status" attribute of respective student
# This should be done only for Course(s) which exists in "selected_course" relation for that student
stud_cur = node_collection.collection.aggregate([{
"$match": {
"_id": {"$in": students_selected}
}
}, {
"$project": {
"_id": 1,
"selected_course": "$relation_set.selected_course",
"course_enrollment_status": "$attribute_set.course_enrollment_status"
}
}])
# Performing multiprocessing to fasten out the below processing of
# for loop; that is, performing approval of students to respective course(s)
prev_approved_or_rejected_list = []
new_list = []
prev_approved_or_rejected_list.extend(approved_or_rejected_list)
new_list = mp_approve_students(
stud_cur["result"], course_ids,
course_enrollment_status_text,
course_enrollment_status_at,
prev_approved_or_rejected_list,
num_of_processes=multiprocessing.cpu_count()
)
approved_or_rejected_list.extend(new_list)
has_approved_or_rejected_at = node_collection.one({
'_type': "AttributeType", 'name': at_name
})
try:
attr_node = create_gattribute(
enrollment_id,
has_approved_or_rejected_at,
approved_or_rejected_list
)
except Exception as e:
error_id_list.append(enrollment_id)
# Update student's counts in enrolled, approved & rejecetd list
enrolled_count = len(enrolled_list)
if approval_state == "Approve":
approved_count = len(approved_or_rejected_list)
else:
approved_count = len(approved_list)
if approval_state == "Reject":
rejected_count = len(approved_or_rejected_list)
else:
rejected_count = len(rejected_list)
remaining_count = enrolled_count - (approved_count + rejected_count)
# Update status of Approval task
has_current_approval_task_id = sce_gs["result"][0]["has_current_approval_task"]
if has_current_approval_task_id:
has_current_approval_task_id = has_current_approval_task_id[0][0]
task_status_at = node_collection.one({
'_type': "AttributeType", 'name': "Status"
})
task_status_value = ""
task_status_msg = ""
if remaining_count == 0:
if enrolled_count == (approved_count + rejected_count):
task_status_value = u"Closed"
task_status_msg = "This task has been closed after successful completion " + \
"of approval process of students."
else:
task_status_value = u"In Progress"
task_status_msg = "This task is in progress."
try:
# Update the approval task's status as "Closed"
task_dict = {}
task_dict["_id"] = has_current_approval_task_id
task_dict["Status"] = task_status_value
# Update description of Approval task only at time of it's closure
if task_status_value is u"Closed":
task_dict["created_by_name"] = user_name
task_message = task_status_msg + " Following are the details " + \
"of this approval process:-" + \
"\n Total No. of student(s) enrolled: " + str(enrolled_count) + \
"\n Total No. of student(s) approved: " + str(approved_count) + \
"\n Total No. of student(s) rejected: " + str(rejected_count) + \
"\n Total No. of student(s) remaining: " + str(remaining_count)
task_dict["content_org"] = unicode(task_message)
task_dict["modified_by"] = user_id
task_node = create_task(task_dict)
if task_status_value == u"Closed":
# Update the StudentCourseEnrollment node's status as "CLOSED"
at_type_node = None
at_type_node = node_collection.one({
'_type': "AttributeType",
'name': u"enrollment_status"
})
if at_type_node:
at_node = create_gattribute(enrollment_id, at_type_node, u"CLOSED")
# Set completion status for closed approval task in StudentCourseEnrollment node's has_enrollment_task
completed_on = datetime.datetime.now()
if str(has_current_approval_task_id) in has_approval_task_dict:
has_approval_task_dict[str(has_current_approval_task_id)] = {
"completed_on": completed_on, "completed_by": user_id
}
at_type_node = None
at_type_node = node_collection.one({
'_type': "AttributeType",
'name': u"has_approval_task"
})
if at_type_node:
attr_node = create_gattribute(enrollment_id, at_type_node, has_approval_task_dict)
# Send intimation to PO's and admin to create batches
from_user = user_id
url_link_without_domain_part = ""
url_link = ""
activity_text = "batch creation"
msg = "This is to inform you that approval process of " + \
"students for " + college_name + " college has been " + \
"completed with following details:" + \
"\n\tCourse name: " + course_name + \
"\n\tTotal No. of student(s) enrolled: " + str(enrolled_count) + \
"\n\tTotal No. of student(s) approved: " + str(approved_count) + \
"\n\tTotal No. of student(s) rejected: " + str(rejected_count) + \
"\n\tTotal No. of student(s) remaining: " + str(remaining_count) + \
"\n\nYou can proceed with batch creation for given course in this college."
# Fetch college group to get Program Officers of the college
college_group_node = node_collection.find_one({
"_type": "Group", "relation_set.group_of": college_id
}, {
"created_by": 1, "group_admin": 1
})
to_django_user_list = []
user_id_list = []
user_id_list.extend(college_group_node.group_admin)
user_id_list.append(college_group_node.created_by)
for each_user_id in user_id_list:
user_obj = User.objects.get(id=each_user_id)
if user_obj not in to_django_user_list:
to_django_user_list.append(user_obj)
if url_link_without_domain_part:
site = Site.objects.get(pk=1)
site = site.name.__str__()
domain = "http://" + site
url_link = domain + url_link_without_domain_part
render_label = render_to_string(
"notification/label.html",
{
"sender": from_user,
"activity": activity_text,
"conjunction": "-",
"link": url_link
}
)
notification.create_notice_type(render_label, msg, "notification")
notification.send(to_django_user_list, render_label, {"from_user": from_user})
except Exception as e:
error_id_list.append(has_current_approval_task_id)
response_dict["success"] = True
response_dict["enrolled"] = enrolled_count
response_dict["approved"] = approved_count
response_dict["rejected"] = rejected_count
response_dict["remaining"] = remaining_count
response_dict["task_status"] = task_status_value
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
except Exception as e:
error_message = "ApproveStudentsError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def mp_approve_students(student_cur, course_ids, course_enrollment_status_text, course_enrollment_status_at, approved_or_rejected_list, num_of_processes=4):
def worker(student_cur, course_ids, course_enrollment_status_text, course_enrollment_status_at, approved_or_rejected_list, out_q):
updated_approved_or_rejected_list = []
for each_stud in student_cur:
# Fetch student node along with selected_course and course_enrollment_status
student_id = each_stud["_id"]
selected_course = each_stud["selected_course"]
if selected_course:
selected_course = selected_course[0]
# Fetch course_enrollment_status -- Holding Course(s) along with it's enrollment status
course_enrollment_status = each_stud["course_enrollment_status"]
if course_enrollment_status:
course_enrollment_status = course_enrollment_status[0]
else:
course_enrollment_status = {}
for each_course_id, str_course_id in course_ids:
# If ObjectId exists in selected_course and ObjectId(in string format)
# exists as key in course_enrollment_status
# Then only update status as "Enrollment Approved"/"Enrollment Rejected"
if each_course_id in selected_course and str_course_id in course_enrollment_status:
# course_enrollment_status.update({str_course_id: course_enrollment_status_text})
course_enrollment_status[str_course_id] = course_enrollment_status_text
try:
at_node = create_gattribute(student_id, course_enrollment_status_at, course_enrollment_status)
if at_node:
# If status updated, then only update approved_or_rejected_list
# by appending given student's ObjectId into it
if student_id not in approved_or_rejected_list and student_id not in updated_approved_or_rejected_list:
# approved_or_rejected_list.appendingpend(student_id)
updated_approved_or_rejected_list.append(student_id)
except Exception as e:
error_id_list.append(student_id)
continue
out_q.put(updated_approved_or_rejected_list)
# Each process will get 'chunksize' student_cur and a queue to put his out
# dict into
out_q = multiprocessing.Queue()
chunksize = int(math.ceil(len(student_cur) / float(num_of_processes)))
procs = []
for i in range(num_of_processes):
p = multiprocessing.Process(
target=worker,
args=(student_cur[chunksize * i:chunksize * (i + 1)], course_ids, course_enrollment_status_text, course_enrollment_status_at, approved_or_rejected_list, out_q)
)
procs.append(p)
p.start()
# Collect all results into a single result list. We know how many lists
# with results to expect.
resultlist = []
for i in range(num_of_processes):
resultlist.extend(out_q.get())
# Wait for all worker processes to finish
for p in procs:
p.join()
return resultlist
@get_execution_time
def get_students_for_batches(request, group_id):
"""
This view returns ...
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
"""
response_dict = {'success': False, 'message': ""}
b_arr=[]
try:
if request.is_ajax() and request.method == "GET":
btn_id = request.GET.get('btn_id', "")
batch_id = request.GET.get('node_id', "")
ac_id = request.GET.get('ac_id', "")
batch_name_index = 1
batches_for_same_course = []
all_batches_in_grp = []
batch_mem_dict = {}
batch_member_list = []
batch_gst = node_collection.one({'_type':"GSystemType", 'name':"Batch"})
batch_for_group = node_collection.find({'member_of': batch_gst._id, 'relation_set.has_course': ObjectId(ac_id)})
for each1 in batch_for_group:
existing_batch = node_collection.one({'_id': ObjectId(each1._id)})
batch_name_index += 1
for each2 in each1.relation_set:
if "has_batch_member" in each2:
batch_member_list.extend(each2['has_batch_member'])
break
each1.get_neighbourhood(each1.member_of)
batch_mem_dict[each1.name] = each1
# College's ObjectId is required, if student record can't be found
# using group's ObjectId
# A use-case where records created via csv file appends MIS_admin group's
# ObjectId in group_set field & not college-group's ObjectId
ann_course = node_collection.one({'_id': ObjectId(ac_id)}, {'relation_set.acourse_for_college': 1,"relation_set.course_has_enrollment":1})
sce_id = None
for rel in ann_course.relation_set:
if rel and "course_has_enrollment" in rel:
sce_id = rel["course_has_enrollment"][0]
break
sce_node = node_collection.one({"_id":ObjectId(sce_id)},{"attribute_set.has_approved":1})
approved_students_list = []
for attr in sce_node.attribute_set:
if attr and "has_approved" in attr:
approved_students_list = attr["has_approved"]
break
approve_not_in_batch_studs = [stud_id for stud_id in approved_students_list if stud_id not in batch_member_list]
student = node_collection.one({'_type': "GSystemType", 'name': "Student"})
res = node_collection.find(
{
'_id': {"$in": approve_not_in_batch_studs},
'member_of': student._id
# '$or': [
# {'group_set': ObjectId(group_id)},
# {'relation_set.student_belongs_to_college': college_id}
# ],
# 'relation_set.selected_course': ObjectId(ac_id)
},
{'_id': 1, 'name': 1, 'member_of': 1, 'created_by': 1, 'created_at': 1, 'content': 1}
).sort("name", 1)
drawer_template_context = edit_drawer_widget("RelationType", group_id, None, None, None, left_drawer_content=res)
drawer_template_context["widget_for"] = "new_create_batch"
drawer_widget = render_to_string(
'ndf/drawer_widget.html',
drawer_template_context,
context_instance = RequestContext(request)
)
response_dict["success"] = True
response_dict["drawer_widget"] = drawer_widget
response_dict["student_count"] = res.count()
response_dict["batch_name_index"] = batch_name_index
response_dict["batches_for_same_course"] = json.dumps(batch_mem_dict, cls=NodeJSONEncoder)
return HttpResponse(json.dumps(response_dict))
else:
error_message = "Batch Drawer: Either not an ajax call or not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "Batch Drawer: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
# ====================================================================================================
@get_execution_time
def edit_task_title(request, group_id):
'''
This function will edit task's title
'''
if request.is_ajax() and request.method =="POST":
taskid = request.POST.get('taskid',"")
title = request.POST.get('title',"")
task = node_collection.find_one({'_id':ObjectId(taskid)})
task.name = title
task.save()
return HttpResponse(task.name)
else:
raise Http404
@get_execution_time
def edit_task_content(request, group_id):
'''
This function will edit task's title
'''
if request.is_ajax() and request.method =="POST":
taskid = request.POST.get('taskid',"")
content_org = request.POST.get('content_org',"")
task = node_collection.find_one({'_id':ObjectId(taskid)})
task.content_org = unicode(content_org)
# Required to link temporary files with the current user who is modifying this document
usrname = request.user.username
filename = slugify(task.name) + "-" + usrname + "-"
task.content = org2html(content_org, file_prefix=filename)
task.save()
return HttpResponse(task.content)
else:
raise Http404
@get_execution_time
def insert_picture(request, group_id):
if request.is_ajax():
resource_list=node_collection.find({'_type' : 'File', 'mime_type' : u"image/jpeg" },{'name': 1})
resources=list(resource_list)
n=[]
for each in resources:
each['_id'] =str(each['_id'])
file_obj = node_collection.one({'_id':ObjectId(str(each['_id']))})
if file_obj.fs_file_ids:
grid_fs_obj = file_obj.fs.files.get(file_obj.fs_file_ids[0])
each['fname']=grid_fs_obj.filename
each['name'] = each['name']
n.append(each)
return StreamingHttpResponse(json.dumps(n))
# =============================================================================
@get_execution_time
def close_event(request, group_id, node):
#close_event checks if the event start date is greater than or less than current date time
#if current date time if greater than event time than it changes tha edit button
#on the Gui to reschedule and in database puts the current date and time for reference check
#till when the event is allowed to reschedule
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
create_gattribute(ObjectId(node),reschedule_event,{"reschedule_till":datetime.datetime.today(),"reschedule_allow":False})
return HttpResponse("event closed")
@get_execution_time
def save_time(request, group_id, node):
start_time = request.POST.get('start_time','')
end_time = request.POST.get('end_time','')
reschedule_event_start = node_collection.one({"_type":"AttributeType","name":"start_time"})
reschedule_event_end = node_collection.one({"_type":"AttributeType","name":"end_time"})
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
start_time= parse_template_data(datetime.datetime,start_time, date_format_string="%d/%m/%Y %H:%M")
end_time= parse_template_data(datetime.datetime,end_time, date_format_string="%d/%m/%Y %H:%M")
create_gattribute(ObjectId(node),reschedule_event_start,start_time)
create_gattribute(ObjectId(node),reschedule_event_end,end_time)
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
event_node = node_collection.one({"_id":ObjectId(node)})
# below code gets the old value from the database
# if value exists it append new value to it
# else a new time is assigned to it
a = {}
for i in event_node.attribute_set:
if unicode('event_edit_reschedule') in i.keys():
a = i['event_edit_reschedule']
a['reschedule_till'] = start_time
create_gattribute(ObjectId(node),reschedule_event,a)
#change the name of the event based on new time
if event_node:
name = event_node.name
name_arr = name.split("--")
new_name = unicode(str(name_arr[0]) + "--" + str(name_arr[1]) + "--" + str(start_time))
event_node.name = new_name
event_node.save()
return HttpResponse("Session rescheduled")
@get_execution_time
def check_date(request, group_id, node):
reschedule = request.POST.get('reschedule','')
test_output = node_collection.find({"_id":ObjectId(node),"attribute_set.start_time":{'$gt':datetime.datetime.today()}})
a = {}
if test_output.count() == 0 and reschedule == 'True':
test_output = node_collection.find({"_id":ObjectId(node),"attribute_set.event_edit_reschedule.reschedule_till":{'$gt':datetime.datetime.today()}})
if test_output.count() != 0:
message = "event Open"
if test_output.count() == 0:
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
event_node = node_collection.one({"_id":ObjectId(node)})
a=""
for i in event_node.attribute_set:
if unicode('event_edit_reschedule') in i.keys():
a = i['event_edit_reschedule']
if a:
for i in a:
if unicode('reschedule_allow') in i:
a['reschedule_allow'] = False
create_gattribute(ObjectId(node),reschedule_event,a)
else:
create_gattribute(ObjectId(node),reschedule_event,{'reschedule_allow':False})
event_node = node_collection.one({"_id":ObjectId(node)})
message = "event closed"
return HttpResponse(message)
@get_execution_time
def reschedule_task(request, group_id, node):
task_dict={}
#name of the programe officer who has initiated this task
'''Required keys: _id[optional], name, group_set, created_by, modified_by, contributors, content_org,
created_by_name, Status, Priority, start_time, end_time, Assignee, has_type
'''
task_groupset=node_collection.one({"_type":"Group","name":"MIS_admin"})
a=[]
b=[]
c=[]
listing=task_groupset.group_admin
listing.append(task_groupset.created_by)
return_message=""
values=[]
if request.user.id in listing:
reschedule_attendance = node_collection.one({"_type":"AttributeType","name":"reschedule_attendance"})
marks_entry = node_collection.find({"_type":"AttributeType","name":"marks_entry_completed"})
reschedule_type = request.POST.get('reschedule_type','')
reshedule_choice = request.POST.get('reshedule_choice','')
session = request.POST.get('session','')
end_time = node_collection.one({"name":"end_time"})
from datetime import date,time,timedelta
date1 = datetime.date.today() + timedelta(2)
ti = datetime.time(0,0)
event_start_time = ""
start_time = request.POST.get('reschedule_date','')
b = parse_template_data(datetime.datetime,start_time, date_format_string="%d/%m/%Y %H:%M")
#fetch event
event_node = node_collection.one({"_id":ObjectId(node)})
reschedule_dates = []
#for any type change the event status to re-schdueled if the request comes
#for generating a task for reschdueling a event
event_status = node_collection.one({"_type":"AttributeType","name":"event_status"})
create_gattribute(ObjectId(node),event_status,unicode('Rescheduled'))
task_id= {}
if reschedule_type == 'event_reschedule' :
for i in event_node.attribute_set:
if unicode('event_edit_reschedule') in i.keys():
if unicode ('reschedule_dates') in i['event_edit_reschedule']:
reschedule_dates = i['event_edit_reschedule']['reschedule_dates']
if unicode("event_date_task") in i.keys():
task_id = i["event_date_task"]
if unicode("start_time") in i.keys():
event_start_time = i["start_time"]
if task_id:
for i in task_id:
if unicode('Task') == i:
tid = i
task_node = node_collection.find({"_id":ObjectId(task_id["Task"])})
task_attribute = node_collection.one({"_type":"AttributeType","name":"Status"})
create_gattribute(ObjectId(task_node[0]._id),task_attribute,unicode("Closed"))
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_date_task"})
task_id['Reschedule_Task'] = True
create_gattribute(ObjectId(node),reschedule_event,task_id)
reschedule_dates.append(event_start_time)
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
create_gattribute(ObjectId(node),reschedule_event,{"reschedule_till":b,"reschedule_allow":True,"reschedule_dates":reschedule_dates})
return_message = "Event Dates Re-Schedule Opened"
else:
event_details = ""
for i in event_node.attribute_set:
if unicode('reschedule_attendance') in i.keys():
if unicode ('reschedule_dates') in i['reschedule_attendance']:
reschedule_dates = i['reschedule_attendance']['reschedule_dates']
if unicode('marks_entry_completed') in i.keys():
marks_entry_completed = i['marks_entry_completed']
if unicode("event_attendance_task") in i.keys():
task_id = i["event_attendance_task"]
if task_id:
for i in task_id:
if unicode('Task') == i:
tid = task_id['Task']
task_node = node_collection.find({"_id":ObjectId(tid)})
task_attribute = node_collection.one({"_type":"AttributeType","name":"Status"})
create_gattribute(ObjectId(task_node[0]._id),task_attribute,unicode("Closed"))
break
reschedule_dates.append(datetime.datetime.today())
if reshedule_choice == "Attendance" or reshedule_choice == "" :
create_gattribute(ObjectId(node),reschedule_attendance,{"reschedule_till":b,"reschedule_allow":True,"reschedule_dates":reschedule_dates})
if session != str(1) and reshedule_choice == "Assessment" :
create_gattribute(ObjectId(node),marks_entry[0],False)
task_id['Reschedule_Task'] = True
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_attendance_task"})
create_gattribute(ObjectId(node),reschedule_event,task_id)
return_message="Event Re-scheduled"
else:
reschedule_type = request.POST.get('reschedule_type','')
reshedule_choice = request.POST.get('reshedule_choice','')
if reschedule_type == "attendance_reschedule":
if reshedule_choice == "Attendance" or reshedule_choice == "":
content = "Attendance"
if reshedule_choice == "Assessment":
content = "Assessment"
else:
content = "start time"
Mis_admin=node_collection.find({"name":"MIS_admin"})
Mis_admin_list=Mis_admin[0].group_admin
Mis_admin_list.append(Mis_admin[0].created_by)
path=request.POST.get('path','')
site = Site.objects.get(pk=1)
site = site.name.__str__()
event_reschedule_link = "http://" + site + path
b.append(task_groupset._id)
glist_gst = node_collection.one({'_type': "GSystemType", 'name': "GList"})
task_type = []
task_type.append(node_collection.one({'member_of': glist_gst._id, 'name':"Re-schedule Event"})._id)
task_dict.update({"has_type" :task_type})
task_dict.update({'name':unicode("Re-schedule Event" + " " + content)})
task_dict.update({'group_set':b})
task_dict.update({'created_by':request.user.id})
task_dict.update({'modified_by':request.user.id})
task_dict.update({'content_org':unicode("Please Re-Schedule the Following event"+" \t " "\n- Please click [[" + event_reschedule_link + "][here]] to reschedule event " + " " + content )})
task_dict.update({'created_by_name':request.user.username})
task_dict.update({'Status':unicode("New")})
task_dict.update({'Priority':unicode('Normal')})
date1=datetime.date.today()
ti=datetime.time(0,0)
Today=datetime.datetime.combine(date1,ti)
task_dict.update({'start_time':Today})
task_dict.update({'Assignee':Mis_admin_list})
task = create_task(task_dict)
if reschedule_type == 'event_reschedule' :
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_date_task"})
create_gattribute(ObjectId(node),reschedule_event,{'Task':ObjectId(task._id),'Reschedule_Task':False})
else:
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_attendance_task"})
create_gattribute(ObjectId(node),reschedule_event,{'Task':ObjectId(task._id),'Reschedule_Task':False})
return_message="Message is sent to central office soon you will get update."
return HttpResponse(return_message)
@get_execution_time
def event_assginee(request, group_id, app_set_instance_id=None):
Event= request.POST.getlist("Event","")
Event_attended_by=request.POST.getlist("Event_attended_by[]","")
marks=request.POST.getlist("marks","")
assessmentdone = request.POST.get("assessmentdone","")
attendancedone = request.POST.get("attendancedone","")
attendancesession = request.POST.get("attendancesession","")
oid=node_collection.find_one({"_type" : "RelationType","name":"has_attended"})
Assignment_rel=node_collection.find({"_type":"AttributeType","name":"Assignment_marks_record"})
Assessmentmarks_rel=node_collection.find({"_type":"AttributeType","name":"Assessment_marks_record"})
performance_record=node_collection.find({"_type":"AttributeType","name":"performance_record"})
student_details=node_collection.find({"_type":"AttributeType","name":"attendance_record"})
marks_entry_completed=node_collection.find({"_type":"AttributeType","name":"marks_entry_completed"})
reschedule_attendance = node_collection.one({"_type":"AttributeType","name":"reschedule_attendance"})
event_node = node_collection.one({"_id":ObjectId(app_set_instance_id)})
#code for saving Attendance and Assesment of Assignment And Assesment Session
attendedlist=[]
for info in Event_attended_by:
a=ast.literal_eval(info)
if (a['Name'] != 'undefined'):
student_dict={}
if (a['save'] == '2' or a['save'] == '3'):
student_dict.update({"marks":a['Attendance_marks'],'Event':ObjectId(Event[0])})
create_gattribute(ObjectId(a['Name']),Assignment_rel[0], student_dict)
if(a['save'] == '2' or a['save'] == '4'):
student_dict.update({"marks":a['Assessment_marks'],'Event':ObjectId(Event[0])})
create_gattribute(ObjectId(a['Name']),Assessmentmarks_rel[0], student_dict)
if(a['save'] == '5'):
student_dict.update({"marks":a['Assessment_marks'],'Event':ObjectId(Event[0])})
create_gattribute(ObjectId(a['Name']),performance_record[0], student_dict)
create_gattribute(ObjectId(a['Name']),student_details[0],{"atandance":a['Presence'],'Event':ObjectId(Event[0])})
if(a['Presence'] == 'True'):
attendedlist.append(a['Name'])
if attendancesession != str(1):
create_gattribute(ObjectId(app_set_instance_id),marks_entry_completed[0],False)
if assessmentdone == 'True':
event_status = node_collection.one({"_type":"AttributeType","name":"event_status"})
create_gattribute(ObjectId(app_set_instance_id),event_status,unicode('Completed'))
create_gattribute(ObjectId(app_set_instance_id),marks_entry_completed[0],True)
reschedule_dates={}
if attendancedone == 'True' or assessmentdone == 'True':
for j in event_node.attribute_set:
if unicode('reschedule_attendance') in j.keys():
reschedule_dates = j['reschedule_attendance']
reschedule_dates["reschedule_allow"] = False
create_gattribute(ObjectId(app_set_instance_id),reschedule_attendance,reschedule_dates)
if attendancesession == str(1):
event_status = node_collection.one({"_type":"AttributeType","name":"event_status"})
create_gattribute(ObjectId(app_set_instance_id),event_status,unicode('Completed'))
create_grelation(ObjectId(app_set_instance_id), oid,attendedlist)
return HttpResponse("Details Entered")
@get_execution_time
def fetch_course_name(request, group_id,Course_type):
courses=node_collection.find({"attribute_set.nussd_course_type":unicode(Course_type)})
course_detail={}
course_list=[]
for i in courses:
course_detail.update({"name":i.name})
course_detail.update({"id":str(i._id)})
course_list.append(course_detail)
course_detail={}
return HttpResponse(json.dumps(course_list))
@get_execution_time
def fetch_course_Module(request, group_id,announced_course):
#Course_name
batch = request.GET.get('batchid','')
superdict={}
module_Detail={}
module_list=[]
event_type_ids=[]
courses = node_collection.one({"_id":ObjectId(announced_course)},{'relation_set.announced_for':1,'relation_set.acourse_for_college':1})
eventtypes = node_collection.find({'_type': "GSystemType", 'name': {'$in': ["Classroom Session", "Exam"]}})
for i in eventtypes:
event_type_ids.append(i._id)
for i in courses.relation_set:
if unicode('announced_for') in i.keys():
announced_for = i['announced_for']
if unicode('acourse_for_college') in i.keys():
for j in i['acourse_for_college']:
group_of = j
courses = node_collection.find({"_id":{'$in':announced_for}})
trainers = node_collection.find({"relation_set.trainer_teaches_course_in_college":[ObjectId(courses[0]._id),ObjectId(group_of)]})
course_modules = node_collection.find({"_id":{'$in':courses[0].collection_set}})
#condition for all the modules to be listed is session in it should not be part of the event
checklist=[]
for i in course_modules:
checklist = i.collection_set
#check if this collection_set exists in any
event = node_collection.find({"member_of":{'$in':event_type_ids},"relation_set.session_of":{'$elemMatch':{'$in':i.collection_set}}
,'relation_set.event_has_batch':ObjectId(batch)})
for k in event:
for j in k.relation_set:
if unicode('session_of') in j.keys():
if j['session_of'][0] in checklist:
checklist.remove(j['session_of'][0])
if len(checklist) > 0:
module_Detail.update({"name":i.name})
module_Detail.update({"id":str(i._id)})
module_list.append(module_Detail)
module_Detail={}
trainerlist=[]
trainer_detail={}
for i in trainers:
trainer_detail.update({"name":i.name})
trainer_detail.update({"id":str(i._id)})
trainerlist.append(trainer_detail)
trainer_detail={}
superdict['Module']=json.dumps(module_list,cls=NodeJSONEncoder)
superdict['trainer'] = json.dumps(trainerlist,cls=NodeJSONEncoder)
return HttpResponse(json.dumps(superdict))
@get_execution_time
def fetch_batch_student(request, group_id,Course_name):
try:
courses=node_collection.one({"_id":ObjectId(Course_name)},{'relation_set.has_batch_member':1})
dict1={}
list1=[]
for i in courses.relation_set:
if unicode('has_batch_member') in i.keys():
has_batch = i['has_batch_member']
for i in has_batch:
dict1.update({"id":str(i)})
list1.append(dict1)
dict1={}
return HttpResponse(json.dumps(list1))
except:
return HttpResponse(json.dumps(list1))
@get_execution_time
def fetch_course_session(request, group_id,Course_name):
try:
courses=node_collection.one({"_id":ObjectId(Course_name)})
batch = request.GET.get('batchid','')
dict1={}
list1=[]
checklist = []
event_type_ids = []
checklist = courses.collection_set
eventtypes = node_collection.find({'_type': "GSystemType", 'name': {'$in': ["Classroom Session", "Exam"]}})
for i in eventtypes:
event_type_ids.append(i._id)
module_node = node_collection.find({"member_of":{'$in':event_type_ids},"relation_set.session_of":{'$elemMatch':{'$in':checklist}}
,'relation_set.event_has_batch':ObjectId(batch)})
for i in module_node:
for k in i.relation_set:
if unicode('session_of') in k.keys():
if k['session_of'][0] in checklist:
checklist.remove(k['session_of'][0])
course_modules=node_collection.find({"_id":{'$in':checklist}})
for i in course_modules:
dict1.update({"name":i.name})
dict1.update({"id":str(i._id)})
for j in i.attribute_set:
if "course_structure_minutes" in j.keys() :
dict1.update({"minutes":str(j["course_structure_minutes"])})
list1.append(dict1)
dict1={}
return HttpResponse(json.dumps(list1))
except:
return HttpResponse(json.dumps(list1))
@get_execution_time
def fetch_course_batches(request, group_id,Course_name):
#courses=node_collection.one({"_id":ObjectId(Course_name)})
#courses=node_collection.find({"relation_set.announced_for":ObjectId(Course_name)})
try:
dict1={}
list1=[]
batch=node_collection.find({"_type":"GSystemType","name":"Batch"})
batches=node_collection.find({"member_of":batch[0]._id,"relation_set.has_course":ObjectId(Course_name)})
for i in batches:
dict1.update({"name":i.name})
dict1.update({"id":str(i._id)})
list1.append(dict1)
dict1={}
return HttpResponse(json.dumps(list1))
except:
return HttpResponse(json.dumps(list1))
@get_execution_time
def save_csv(request,group_id,app_set_instance_id=None):
#column_header = [u'Name', 'Presence','Attendance_marks','Assessment_marks']
json_data=request.POST.getlist("attendance[]","")
column_header=request.POST.getlist("column[]","")
t = time.strftime("%c").replace(":", "_").replace(" ", "_")
filename = "csv/" + "Attendance_data_" + t + ".csv"
filepath = os.path.join(STATIC_ROOT, filename)
filedir = os.path.dirname(filepath)
if not os.path.exists(filedir):
os.makedirs(filedir)
data={}
with open(filepath, 'wb') as csv_file:
fw = csv.DictWriter(csv_file, delimiter=',', fieldnames=column_header)
fw.writerow(dict((col,col) for col in column_header))
for row in list(json_data):
v = {}
fw.writerow(ast.literal_eval(row))
return HttpResponse((STATIC_URL + filename))
def get_assessment(request,group_id,app_set_instance_id):
node = node_collection.one({'_type': "GSystem", '_id': ObjectId(app_set_instance_id)})
node.get_neighbourhood(node.member_of)
marks_list=[]
Assesslist=[]
val=False
for i in node.has_attendees:
dict1={}
dict1.update({'name':i.name})
for j in i.attribute_set:
if j.keys()[0] == 'performance_record':
if (str(j['performance_record']['Event']) == str(app_set_instance_id)) is True:
val=True
dict1.update({'marks':j['performance_record']['marks']})
else:
dict1.update({'marks':""})
dict1.update({'id':str(i._id)})
if val is True:
marks_list.append(dict1)
else:
dict1.update({'marks':"0"})
marks_list.append(dict1)
return HttpResponse(json.dumps(marks_list))
@get_execution_time
def get_attendees(request,group_id,node):
#get all the ObjectId of the people who would attend the event
node=node_collection.one({'_id':ObjectId(node)})
attendieslist=[]
#below code would give the the Object Id of Possible attendies
for i in node.relation_set:
if ('has_attendees' in i):
for j in i['has_attendees']:
attendieslist.append(j)
attendee_name=[]
#below code is meant for if a batch or member of group id is found, fetch the attendees list-
#from the members of the batches if members are selected from the interface their names would be returned
#attendees_id=node_collection.find({ '_id':{'$in': attendieslist}},{"group_admin":1})
attendees_id=node_collection.find({ '_id':{'$in': attendieslist}})
for i in attendees_id:
#if i["group_admin"]:
# User_info=(collectigeton.Node.find({'_type':"Author",'created_by':{'$in':i["group_admin"]}}))
#else:
User_info=(node_collection.find({'_id':ObjectId(i._id)}))
for i in User_info:
attendee_name.append(i)
attendee_name_list=[]
for i in attendee_name:
if i not in attendee_name_list:
attendee_name_list.append(i)
a=[]
d={}
for i in attendee_name_list:
d={}
d.update({'name':i.name})
d.update({'id':str(i._id)})
a.append(d)
return HttpResponse(json.dumps(a))
@get_execution_time
def get_attendance(request,group_id,node):
#method is written to get the presence and absence of attendees for the event
node=node_collection.one({'_id':ObjectId(node)})
attendieslist=[]
#below code would give the the Object Id of Possible attendies
for i in node.relation_set:
if ('has_attendees' in i):
for j in i['has_attendees']:
attendieslist.append(j)
attendee_name=[]
attendees_id=node_collection.find({ '_id':{'$in': attendieslist}})
for i in attendees_id:
#if i["group_admin"]:
# User_info=(node_collection.find({'_type':"Author",'created_by':{'$in':i["group_admin"]}}))
#else:
User_info=(node_collection.find({'_id':ObjectId(i._id)}))
for i in User_info:
attendee_name.append(i)
attendee_name_list=[]
for i in attendee_name:
if i not in attendee_name_list:
attendee_name_list.append(i)
a=[]
d={}
has_attended_event=node_collection.find({'_id':ObjectId(node.pk)},{'relation_set':1})
#get all the objectid
attendieslist=[]
for i in has_attended_event[0].relation_set:
if ('has_attended' in i):
for j in i['has_attended']:
attendieslist.append(j)
#create the table
count=0
attendance=[]
temp_attendance={}
#the below code would compare between the supposed attendees and has_attended the event
#and accordingly mark their presence or absence for the event
node.get_neighbourhood(node.member_of)
Assess_marks_list=[]
Assign_marks_list=[]
Assesslist=[]
marks_list=[]
val=False
assign=False
asses=False
member_of=node_collection.one({"_id":{'$in':node.member_of}})
for i in attendee_name_list:
if (i._id in attendieslist):
attendees=node_collection.one({"_id":ObjectId(i._id)})
dict1={}
dict2={}
for j in attendees.attribute_set:
if member_of.name != "Exam":
if unicode('Assignment_marks_record') in j.keys():
if (str(j['Assignment_marks_record']['Event']) == str(node._id)) is True:
val=True
assign=True
dict1.update({'marks':j['Assignment_marks_record']['marks']})
else:
dict1.update({'marks':"0"})
if unicode('Assessment_marks_record') in j.keys():
if(str(j['Assessment_marks_record']['Event']) == str(node._id)) is True:
val=True
asses=True
dict2.update({'marks':j['Assessment_marks_record']['marks']})
else:
dict2.update({'marks':"0"})
if member_of.name == "Exam":
dict1.update({'marks':"0"})
if unicode('performance_record') in j.keys():
if(str(j['performance_record']['Event']) == str(node._id)) is True:
val=True
asses=True
dict2.update({'marks':j['performance_record']['marks']})
else:
dict2.update({'marks':"0"})
temp_attendance.update({'id':str(i._id)})
temp_attendance.update({'name':i.name})
temp_attendance.update({'presence':'Present'})
if dict1.has_key('marks'):
temp_attendance.update({'Assignment_marks':dict1['marks']})
if dict2.has_key('marks'):
temp_attendance.update({'Assessment_marks':dict2['marks']})
attendance.append(temp_attendance)
else:
temp_attendance.update({'id':str(i._id)})
temp_attendance.update({'name':i.name})
temp_attendance.update({'presence':'Absent'})
temp_attendance.update({'Assignment_marks':"0"})
temp_attendance.update({'Assessment_marks':"0"})
attendance.append(temp_attendance)
temp_attendance={}
return HttpResponse(json.dumps(attendance))
@get_execution_time
def attendees_relations(request,group_id,node):
test_output = node_collection.find({"_id":ObjectId(node),"attribute_set.start_time":{'$lt':datetime.datetime.today()}})
if test_output.count() != 0:
event_has_attended=node_collection.find({'_id':ObjectId(node)})
column_list=[]
column_count=0
course_assignment=False
course_assessment=False
reschedule = True
#marks = False
marks = True
member_of=node_collection.one({"_id":{'$in':event_has_attended[0].member_of}})
if member_of.name != "Exam":
for i in event_has_attended[0].relation_set:
#True if (has_attended relation is their means attendance is already taken)
#False (signifies attendence is not taken yet for the event)
if ('has_attended' in i):
a = "True"
else:
a = "False"
if ('session_of' in i):
session=node_collection.one({"_id":{'$in':i['session_of']}})
for i in session.attribute_set:
if unicode('course_structure_assignment') in i:
if i['course_structure_assignment'] == True:
course_assignment=True
if unicode('course_structure_assessment') in i:
if i['course_structure_assessment'] == True:
course_assessment=True
# meaning of the numbers
#2 :- populate both assesment and assignment marks columns
#3 :- popuplate only Asssignment marks Columns
#4 :- populate only Assesment marks Columns
#1 :- populate Only Attendance taking part donot populate Assesment and Attendance taking part
if course_assessment == True:
column_count = 4
if course_assignment == True:
column_count = 3
if (course_assessment == True and course_assignment == True):
column_count = 2
if (course_assignment == False and course_assessment == False):
column_count = 1
column_list.append(a)
column_list.append(column_count)
else:
column_count=5
column_list.append('True')
column_list.append(column_count)
node = node_collection.one({"_id":ObjectId(node)})
for i in node.attribute_set:
if unicode("reschedule_attendance") in i.keys():
if unicode('reschedule_allow') in i['reschedule_attendance']:
reschedule=i['reschedule_attendance']['reschedule_allow']
if unicode("marks_entry_completed") in i.keys():
marks=i["marks_entry_completed"]
column_list.append(reschedule)
column_list.append(marks)
else:
column_list=[]
return HttpResponse(json.dumps(column_list))
@get_execution_time
def page_scroll(request,group_id,page):
Group_Activity = node_collection.find(
{'group_set':ObjectId(group_id)}).sort('last_update', -1)
if Group_Activity.count() >=10:
paged_resources = Paginator(Group_Activity,10)
else:
paged_resources = Paginator(Group_Activity,Group_Activity.count())
files_list = []
user_activity = []
tot_page=paged_resources.num_pages
if int(page) <= int(tot_page):
if int(page)==1:
page='1'
if int(page) != int(tot_page) and int(page) != int(1):
page=int(page)+1
for each in (paged_resources.page(int(page))).object_list:
if each.created_by == each.modified_by :
if each.last_update == each.created_at:
activity = 'created'
else:
activity = 'modified'
else:
activity = 'created'
if each._type == 'Group':
user_activity.append(each)
each.update({'activity':activity})
files_list.append(each)
else:
page=0
return render_to_response('ndf/scrolldata.html',
{ 'activity_list': files_list,
'group_id': group_id,
'groupid':group_id,
'page':page
# 'imageCollection':imageCollection
},
context_instance = RequestContext(request)
)
@get_execution_time
def get_batches_with_acourse(request, group_id):
"""
This view returns list of batches that match given criteria
along with Announced-course for which match doesn't exists.
Arguments:
group_id - ObjectId of the currently selected group
"""
response_dict = {'success': False, 'message': ""}
batches_list = []
batch_gst = node_collection.one({'_type':'GSystemType','name':'Batch'})
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
announced_course_id = request.GET.get("ac_id", "")
mis_admin = node_collection.one({'_type': "Group", 'name': "MIS_admin"})
if(ObjectId(group_id) == mis_admin._id):
pass
else:
colg_gst = node_collection.one({'_type': "GSystemType", 'name': 'College'})
req_colg_id = node_collection.one({'member_of':colg_gst._id,'relation_set.has_group':ObjectId(group_id)})
b = node_collection.find({'member_of':batch_gst._id,'relation_set.has_course':ObjectId(announced_course_id)})
for each in b:
batches_list.append(each)
response_dict["success"] = True
info_message = "Batch for this course is available"
response_dict["message"] = info_message
response_dict["batches_list"] = json.dumps(batches_list, cls=NodeJSONEncoder)
return HttpResponse(json.dumps(response_dict))
else:
error_message = " BatchFetchError: Either not an ajax call or not a GET request!!!"
return HttpResponse(json.dumps({'message': " BatchCourseFetchError - Something went wrong in ajax call !!! \n\n Please contact system administrator."}))
except Exception as e:
error_message = "\n BatchFetchError: " + str(e) + "!!!"
return HttpResponse(json.dumps({'message': error_message}))
|
agpl-3.0
| 7,496,256,682,763,233,000
| 40.658953
| 521
| 0.557128
| false
| 3.780928
| false
| false
| false
|
yuhangc/HRI_planner
|
scripts/hri/human_traj_generator.py
|
1
|
5437
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
class HumanTrajGenerator:
def __init__(self, T, dt):
self.T = T
self.dt = dt
# set some parameters
self.v_max = 1.0
self.a_max = 0.6
self.k_v = 0.8
self.k_hr = 0.6
self.th_hr = 2.5
self.u_std = [0.1, 0.1]
def generate_path_ignore_robot(self, x_init, x_goal):
x = []
u = []
x_last = x_init
for t in range(self.T):
# compute the desired velocity first
x_diff = x_goal - x_last[0:2]
vd = self.k_v * x_diff
# clip the velocity
v_dir = np.abs(vd) / np.linalg.norm(vd)
vd = np.clip(vd, -self.v_max * v_dir, self.v_max * v_dir)
# compute desired acceleration and clip
ud = (vd - x_last[2:4]) / self.dt
u_dir = np.abs(ud) / np.linalg.norm(ud)
ud = np.clip(ud, -self.a_max * u_dir, self.a_max * u_dir)
# inject noise into control
dux = np.random.normal(0.0, self.u_std[0], 1)[0]
duy = np.random.normal(0.0, self.u_std[1], 1)[0]
ud += np.array([dux, duy])
# compute the actual velocity and displacement
x_new = np.zeros((4, ))
x_new[0:2] = x_last[0:2] + x_last[2:4] * self.dt + 0.5 * ud * self.dt**2
x_new[2:4] = x_last[2:4] + ud * self.dt
# append to list
x.append(x_new)
u.append(ud)
x_last = x_new
# visualize
x = np.asarray(x)
u = np.asarray(u)
fig, ax = plt.subplots()
ax.plot(x[:, 0], x[:, 1], "-o", color=(0.1, 0.1, 0.1), fillstyle="none", lw=1.5, label="human_traj")
ax.plot(x_goal[0], x_goal[1], 'ok')
ax.axis("equal")
plt.show()
return x, u
def generate_path_avoid_robot(self, x_init, x_goal, x_robot):
x = []
u = []
x_last = x_init
for t in range(self.T):
# compute the desired velocity first
x_diff = x_goal - x_last[0:2]
vd = self.k_v * x_diff
# clip the velocity
v_dir = np.abs(vd) / np.linalg.norm(vd)
vd = np.clip(vd, -self.v_max * v_dir, self.v_max * v_dir)
# compute desired acceleration and clip
ud = (vd - x_last[2:4]) / self.dt
# add in "force/acc" from avoiding robot
x_rh = x_last[0:2] - x_robot
dot = np.dot(-x_rh, x_diff)
if dot > 0 and np.linalg.norm(x_rh) < self.th_hr:
f_hr = self.k_hr * x_rh
# make f_hr perpendicular to ud
f_hr = np.array([-x_diff[1], x_diff[0]]) / np.linalg.norm(x_diff) * np.linalg.norm(f_hr)
else:
f_hr = np.array([0.0, 0.0])
ud += f_hr
u_dir = np.abs(ud) / np.linalg.norm(ud)
ud = np.clip(ud, -self.a_max * u_dir, self.a_max * u_dir)
# inject noise into control
dux = np.random.normal(0.0, self.u_std[0], 1)[0]
duy = np.random.normal(0.0, self.u_std[1], 1)[0]
ud += np.array([dux, duy])
# compute the actual velocity and displacement
x_new = np.zeros((4, ))
x_new[0:2] = x_last[0:2] + x_last[2:4] * self.dt + 0.5 * ud * self.dt**2
x_new[2:4] = x_last[2:4] + ud * self.dt
# append to list
x.append(x_new)
u.append(ud)
x_last = x_new
# visualize
x = np.asarray(x)
u = np.asarray(u)
fig, ax = plt.subplots()
ax.plot(x[:, 0], x[:, 1], "-o", color=(0.1, 0.1, 0.1), fillstyle="none", lw=1.5, label="human_traj")
ax.plot(x_goal[0], x_goal[1], 'ok')
ax.axis("equal")
plt.show()
return x, u
def gen_and_save_trajectories(path, trial=-1, method="ignore_robot"):
# load init and goal data
init_data = np.loadtxt(path + "/init.txt", delimiter=",")
goal_data = np.loadtxt(path + "/goal.txt", delimiter=",")
# create a generator
generator = HumanTrajGenerator(16, 0.5)
# generate a single trajectory
if trial == -1:
i = 0
for x_init, x_goal in zip(init_data, goal_data):
if method == "ignore_robot":
x, u = generator.generate_path_ignore_robot(x_init[0:4], x_goal[0:2])
else:
x_robot = 0.5 * (x_init[0:2] + x_goal[0:2])
x, u = generator.generate_path_avoid_robot(x_init[0:4], x_goal[0:2], x_robot)
# save data to file
np.savetxt(path + "/test" + str(i) + ".txt", np.hstack((x, u)), delimiter=',')
i += 1
else:
x_init = init_data[trial]
x_goal = goal_data[trial]
if method == "ignore_robot":
x, u = generator.generate_path_ignore_robot(x_init[0:4], x_goal[0:2])
else:
x_robot = 0.5 * (x_init[0:2] + x_goal[0:2])
x, u = generator.generate_path_avoid_robot(x_init[0:4], x_goal[0:2], x_robot)
# save data to file
np.savetxt(path + "/test" + str(trial) + ".txt", np.hstack((x, u)), delimiter=',')
if __name__ == "__main__":
# gen_and_save_trajectories("/home/yuhang/Documents/hri_log/test_data")
gen_and_save_trajectories("/home/yuhang/Documents/hri_log/test_data", trial=4, method="avoid_robot")
|
apache-2.0
| -6,814,724,475,351,384,000
| 31.363095
| 108
| 0.48924
| false
| 2.948482
| false
| false
| false
|
mromanello/CitationExtractor
|
citation_extractor/settings/svm.py
|
1
|
1036
|
"""Settings for an SVM-based citation extractor."""
import pkg_resources
from sklearn.svm import LinearSVC
# Sets debug on (=true) or off (=false)
DEBUG = False
POS = True
# leave empty to write the log to the console
LOG_FILE = ""
# list of directories containing data (IOB format with .iob extension)
DATA_DIRS = (
pkg_resources.resource_filename(
'citation_extractor',
'data/aph_corpus/goldset/iob/'
),
)
CLASSIFIER = CLASSIFIER = LinearSVC(verbose=False)
TEST_DIR = ()
TRAIN_COLLECTIONS = ()
TEST_COLLECTIONS = ()
DATA_FILE = ""
TEMP_DIR = ""
OUTPUT_DIR = ""
# number of iterations for the k-fold cross validation
CROSS_VAL_FOLDS = 10
CRFPP_TEMPLATE_DIR = pkg_resources.resource_filename(
'citation_extractor',
'crfpp_templates/'
)
CRFPP_TEMPLATE = "template_5.tpl"
# Leave empty to use CRF++'s default value
CRFPP_PARAM_C = ''
# Leave empty to use CRF++'s default value
CRFPP_PARAM_A = ''
# Leave empty to use CRF++'s default value
CRFPP_PARAM_F = ''
|
gpl-3.0
| 4,482,813,493,128,714,000
| 19.313725
| 70
| 0.665058
| false
| 3.158537
| false
| false
| false
|
Astyan-42/skepticalscience
|
skepticalsciencewebsite/custompayment/forms.py
|
1
|
3142
|
from django import forms
from django.core.exceptions import ValidationError
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from custompayment.models import Address, Order
class AddressForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddressForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_addressForm'
self.helper.add_input(Submit('submit', _('Save')))
class Meta:
model = Address
fields = ["first_name", "last_name", "company_name", "street_address_1", "street_address_2", "city",
"city_area", "postal_code", "country", "country_area", "phone"]
class DiscountOrderForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DiscountOrderForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
# self.helper.form_class = 'form-inline' STUPID INLINE FORM
# self.helper.field_template = 'bootstrap3/layout/inline_field.html'
self.helper.form_id = 'id_discountorderForm'
self.helper.add_input(Submit('submit', _('Apply')))
def clean(self):
try:
discount = self.cleaned_data['discount']
except KeyError:
pass
def is_valid(self):
valid = super(DiscountOrderForm, self).is_valid()
if not valid:
return valid
discount = self.cleaned_data['discount']
if discount is None:
self.add_error('discount', forms.ValidationError(_("Empty value not authorised")))
return False
today = timezone.now().date()
if today < discount.starting_date:
self.add_error('discount', forms.ValidationError(_("This discount code hasn't started yet")))
return False
elif today > discount.ending_date:
self.add_error('discount', forms.ValidationError(_("This discount code has ended")))
return False
return True
class Meta:
model = Order
fields = ["discount"]
widgets = {'discount' : forms.TextInput()}
class PaymentMethodsForm(forms.Form):
method = forms.ChoiceField(
choices=settings.CHECKOUT_PAYMENT_CHOICES, widget=forms.RadioSelect,
initial=settings.CHECKOUT_PAYMENT_CHOICES[0][0])
def __init__(self, *args, **kwargs):
super(PaymentMethodsForm, self).__init__(*args, ** kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_paymentmethodForm'
self.helper.add_input(Submit('submit', _('Proceed to payment')))
class AcceptSellingForm(forms.Form):
accepted = forms.BooleanField(label="Accept the conditions of sell", initial=False)
def __init__(self, *args, **kwargs):
super(AcceptSellingForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_acceptsellingForm'
self.helper.add_input(Submit('submit', _('Accept and pay')))
|
agpl-3.0
| -482,083,300,925,337,660
| 36.86747
| 108
| 0.63972
| false
| 4.007653
| false
| false
| false
|
giliam/turbo-songwriter
|
backend/songwriter/migrations/0001_initial.py
|
1
|
5963
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-04 14:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(blank=True, default='', max_length=150)),
('lastname', models.CharField(blank=True, default='', max_length=150)),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.CreateModel(
name='Chord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.CharField(default='', max_length=15)),
],
),
migrations.CreateModel(
name='Editor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default='', max_length=150)),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.CreateModel(
name='Harmonization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spot_in_verse', models.PositiveIntegerField()),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
('chord', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Chord')),
],
),
migrations.CreateModel(
name='Paragraph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField()),
('is_refrain', models.BooleanField(default=False, verbose_name='Is a refrain paragraph?')),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=150)),
('rights_paid', models.BooleanField(default=True, verbose_name='rights paid')),
('secli_number', models.CharField(blank=True, default='', max_length=150)),
('sacem_number', models.CharField(blank=True, default='', max_length=150)),
('comments', models.TextField(verbose_name='Comments')),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Author')),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Editor')),
],
),
migrations.CreateModel(
name='Theme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=150)),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.CreateModel(
name='Verse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField()),
('content', models.TextField()),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.AddField(
model_name='song',
name='theme',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Theme'),
),
migrations.AddField(
model_name='paragraph',
name='song',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paragraphs', to='songwriter.Song'),
),
migrations.AddField(
model_name='harmonization',
name='verse',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='harmonizations', to='songwriter.Verse'),
),
]
|
mit
| 1,539,306,744,246,456,000
| 52.241071
| 135
| 0.584605
| false
| 4.381337
| false
| false
| false
|
hkzhe/ddz_project
|
game_server/client.py
|
1
|
1279
|
import socket
import json
import struct
import sys
import time
def build_login_cmd():
cmd_dict = {}
cmd_dict["userID"] = sys.argv[1]
cmd_dict["cmd"] = "login"
return json.dumps( cmd_dict )
def send_cmd( sock , cmd ):
cmd_len = len( cmd )
send_str = struct.pack( 'i' , cmd_len )
sock.send( send_str )
sock.send( cmd )
def send_out_cards( sock , my_pokes ):
uid = sys.argv[1]
cmd_dict = {}
cmd_dict[ "cmd" ] = "outcard"
cmd_dict["userID"] = sys.argv[1]
cmd_dict[ "outPokes" ] = [ my_pokes[0] , my_pokes[1] ]
print "send pokes = %d , %d" %( my_pokes[0] , my_pokes[1] )
send_cmd( sock , json.dumps( cmd_dict ) )
def recv_cmd( sock ):
head_str = sock.recv( 4 )
tmp_tuple = struct.unpack( 'i' , head_str )
body_len = tmp_tuple[0]
body_str = sock.recv( body_len )
print "recv cmd = " + body_str
return body_str
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 8000))
cmd = build_login_cmd()
send_cmd( sock , cmd )
cmd_str = recv_cmd( sock )
cmd_dict = json.loads( cmd_str )
my_pokes = cmd_dict[ sys.argv[1] ]
boss_id = cmd_dict[ "boss" ]
#if boss_id == sys.argv[1] :
#send_out_cards( sock , my_pokes )
recv_cmd( sock )
time.sleep(10)
|
bsd-3-clause
| 6,844,430,092,311,269,000
| 24.102041
| 60
| 0.591869
| false
| 2.417769
| false
| false
| false
|
dongweiming/web_develop
|
chapter10/section2/server.py
|
1
|
3462
|
# coding=utf-8
import os
import sys
from datetime import datetime
sys.path.append('gen-py')
sys.path.append('/usr/lib/python2.7/site-packages')
from flask_sqlalchemy import SQLAlchemy
from app import app
from models import PasteFile as BasePasteFile
from utils import get_file_md5
db = SQLAlchemy(app)
from thrift.transport import TTransport, TSocket
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from pastefile import PasteFileService
from pastefile.ttypes import PasteFile, UploadImageError, NotFound
class RealPasteFile(db.Model, BasePasteFile):
def __init__(self, *args, **kwargs):
BasePasteFile.__init__(self, *args, **kwargs)
@classmethod
def create_by_upload_file(cls, uploaded_file):
rst = uploaded_file
with open(rst.path) as f:
filemd5 = get_file_md5(f)
uploaded_file = cls.get_by_md5(filemd5)
if uploaded_file:
os.remove(rst.path)
return uploaded_file
filestat = os.stat(rst.path)
rst.size = filestat.st_size
rst.filemd5 = filemd5
return rst
def get_url(self, subtype, is_symlink=False):
hash_or_link = self.symlink if is_symlink else self.filehash
return 'http://%s/{subtype}/{hash_or_link}'.format(
subtype=subtype, hash_or_link=hash_or_link)
class PasteFileHandler(object):
def get_file_info(self, filename, mimetype):
rst = RealPasteFile(filename, mimetype, 0)
return rst.filehash, rst.path
def create(self, request):
width = request.width
height = request.height
upload_file = RealPasteFile(request.filename, request.mimetype, 0,
request.filehash)
try:
if width and height:
paste_file = RealPasteFile.rsize(upload_file, width, height)
else:
paste_file = RealPasteFile.create_by_upload_file(
upload_file)
except:
raise UploadImageError()
db.session.add(paste_file)
db.session.commit()
return self.convert_type(paste_file)
def get(self, pid):
paste_file = RealPasteFile.query.filter_by(id=pid).first()
if not paste_file:
raise NotFound()
return self.convert_type(paste_file)
@classmethod
def convert_type(cls, paste_file):
'''将模型转化为Thrift结构体的类型'''
new_paste_file = PasteFile()
for attr in ('id', 'filehash', 'filename', 'filemd5', 'uploadtime',
'mimetype', 'symlink', 'size', 'quoteurl', 'size', 'type',
'url_d', 'url_i', 'url_s', 'url_p'):
val = getattr(paste_file, attr)
if isinstance(val, unicode):
val = val.encode('utf-8')
if isinstance(val, datetime):
val = str(val)
setattr(new_paste_file, attr, val)
return new_paste_file
if __name__ == '__main__':
import logging
logging.basicConfig()
handler = PasteFileHandler()
processor = PasteFileService.Processor(handler)
transport = TSocket.TServerSocket(port=8200)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TThreadPoolServer(
processor, transport, tfactory, pfactory)
print 'Starting the server...'
server.serve()
|
gpl-3.0
| 7,807,537,685,541,028,000
| 31.130841
| 79
| 0.62071
| false
| 3.782178
| false
| false
| false
|
lambdamusic/OntoSPy
|
ontospy/ontodocs/utils.py
|
1
|
6309
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
import json
# ===========
# Utilities
# ===========
def build_D3treeStandard(old, MAX_DEPTH, level=1, toplayer=None):
"""
For d3s examples all we need is a json with name, children and size .. eg
{
"name": "flare",
"children": [
{
"name": "analytics",
"children": [
{
"name": "cluster",
"children": [
{"name": "AgglomerativeCluster", "size": 3938},
{"name": "CommunityStructure", "size": 3812},
{"name": "HierarchicalCluster", "size": 6714},
{"name": "MergeEdge", "size": 743}
]
},
etc...
"""
out = []
if not old:
old = toplayer
for x in old:
d = {}
# print "*" * level, x.label
d['qname'] = x.qname
d['name'] = x.bestLabel(quotes=False).replace("_", " ")
d['objid'] = x.id
if x.children() and level < MAX_DEPTH:
d['size'] = len(x.children()) + 5 # fake size
d['realsize'] = len(x.children()) # real size
d['children'] = build_D3treeStandard(x.children(), MAX_DEPTH,
level + 1)
else:
d['size'] = 1 # default size
d['realsize'] = 0 # default size
out += [d]
return out
# note: duplicate of templatetagg so to avoid circular imports
def truncchar_inverse(value, arg):
if len(value) < arg:
return value
else:
x = len(value) - arg
return '...' + value[x:]
def build_D3bubbleChart(old, MAX_DEPTH, level=1, toplayer=None):
"""
Similar to standar d3, but nodes with children need to be duplicated otherwise they are
not depicted explicitly but just color coded
"name": "all",
"children": [
{"name": "Biological Science", "size": 9000},
{"name": "Biological Science", "children": [
{"name": "Biological techniques", "size": 6939},
{"name": "Cell biology", "size": 4166},
{"name": "Drug discovery X", "size": 3620, "children": [
{"name": "Biochemistry X", "size": 4585},
{"name": "Biochemistry X", "size": 4585 },
]},
{"name": "Drug discovery Y", "size": 3620, "children": [
{"name": "Biochemistry Y", "size": 4585},
{"name": "Biochemistry Y", "size": 4585 },
]},
{"name": "Drug discovery A", "size": 3620, "children": [
{"name": "Biochemistry A", "size": 4585},
]},
{"name": "Drug discovery B", "size": 3620, },
]},
etc...
"""
out = []
if not old:
old = toplayer
for x in old:
d = {}
# print "*" * level, x.label
d['qname'] = x.qname
d['name'] = x.bestLabel(quotes=False).replace("_", " ")
d['objid'] = x.id
if x.children() and level < MAX_DEPTH:
duplicate_row = {}
duplicate_row['qname'] = x.qname
duplicate_row['name'] = x.bestLabel(quotes=False).replace("_", " ")
duplicate_row['objid'] = x.id
duplicate_row['size'] = len(x.children()) + 5 # fake size
duplicate_row['realsize'] = len(x.children()) # real size
out += [duplicate_row]
d['children'] = build_D3bubbleChart(x.children(), MAX_DEPTH,
level + 1)
else:
d['size'] = 1 # default size
d['realsize'] = 0 # default size
out += [d]
return out
def build_D3treepie(old, MAX_DEPTH, level=1, toplayer=None):
"""
Create the JSON needed by the treePie viz
http://bl.ocks.org/adewes/4710330/94a7c0aeb6f09d681dbfdd0e5150578e4935c6ae
Eg
['origin' , [n1, n2],
{ 'name1' :
['name1', [n1, n2],
{'name1-1' : ...}
] ,
} ,
]
"""
d = {}
if not old:
old = toplayer
for x in old:
label = x.bestLabel(quotes=False).replace("_", " ")
if x.children() and level < MAX_DEPTH:
size = len(x.children())
d[x.qname] = [
label, [size, size],
build_D3treepie(x.children(), MAX_DEPTH, level + 1)
]
else:
size = 1
d[x.qname] = [label, [size, size], {}]
return d
##################
#
# TREE DISPLAY FUNCTIONS [from ontospy web]
#
##################
def formatHTML_EntityTreeTable(treedict, element=0):
""" outputs an html tree representation based on the dictionary we get from the Inspector
object....
EG:
<table class=h>
<tr>
<td class="tc" colspan=4><a href="../DataType">DataType</a>
</td>
</tr>
<tr>
<td class="tc" colspan=4><a href="../DataType">DataType</a>
</td>
</tr>
<tr>
<td class="space"></td>
<td class="bar"></td>
<td class="space"></td>
<td>
<table class=h>
<tr><td class="tc" colspan=4><a href="../Boolean">Boolean</a>
</td>
</tr>
<tr><td class="tc" colspan=4><a href="../Boolean">Boolean</a>
</td>
</tr>
</table>
</td>
</tr>
</table>
Note: The top level owl:Thing never appears as a link.
"""
# ontoFile = onto.ontologyMaskedLocation or onto.ontologyPhysicalLocation
# if not treedict:
# treedict = onto.ontologyClassTree()
stringa = """<table class="h">"""
for x in treedict[element]:
if x.qname == "owl:Thing":
stringa += """<tr>
<td class="tc" colspan=4><a>%s</a></td>
</tr>""" % (truncchar_inverse(x.qname, 50))
else:
stringa += """<tr>
<td class="tc" colspan=4><a title=\"%s\" class=\"treelinks\" href=\"%s.html\">%s</a></td>
</tr>""" % (x.uri, x.slug, truncchar_inverse(x.qname, 50))
if treedict.get(x, None):
stringa += """ <tr>
<td class="space"></td>
<td class="bar"></td>
<td class="space"></td>
<td>%s</td>
</tr>""" % formatHTML_EntityTreeTable(treedict, x)
# stringa += formatHTML_ClassTree(onto, treedict, x)
# stringa += "</li>"
stringa += "</table>"
return stringa
def get_onto_for_testing(TEST_ONLINE=False):
"Wrapper for util script used in viz main methods"
if TEST_ONLINE:
from ontospy import Ontospy
g = Ontospy("http://cohere.open.ac.uk/ontology/cohere.owl#")
else:
from ontospy.core.manager import get_random_ontology
uri, g = get_random_ontology(50)
return g
|
gpl-3.0
| -6,854,177,205,183,017,000
| 26.077253
| 96
| 0.5191
| false
| 3.146633
| false
| false
| false
|
wutali/sauron
|
sauron/metrics/RedisMetric.py
|
1
|
6731
|
import redis
from sauron import logger
from sauron.metrics import Metric, MetricException
class RedisMetric(Metric):
@staticmethod
def parseMemory(x):
try:
if 'G' in x:
return (x.replace('G', ''), 'Gigabytes')
elif 'M' in x:
return (x.replace('M', ''), 'Megabytes')
elif 'K' in x:
return (x.replace('K', ''), 'Kilobytes')
else:
return (x, 'Bytes')
except:
return (x, 'Bytes')
infoUnits = {
'redis_version' : lambda x: (int(x.replace('.', '')), 'None'),
'redis_git_sha1' : lambda x: (int(x, 16), 'None'),
'redis_dig_dirty' : lambda x: (x, 'None'),
'arch_bits' : lambda x: (x, 'Count'),
'process_id' : lambda x: (x, 'None'),
'uptime_in_seconds' : lambda x: (x, 'Seconds'),
'uptime_in_days' : lambda x: (x, 'None'),
'lru_clock' : lambda x: (x, 'Seconds'),
'used_cpu_sys' : lambda x: (x, 'Seconds'),
'used_cpu_user' : lambda x: (x, 'Seconds'),
'used_cpu_sys_children' : lambda x: (x, 'Seconds'),
'used_cpu_user_children' : lambda x: (x, 'Seconds'),
'connected_clients' : lambda x: (x, 'Count'),
'connected_slaves' : lambda x: (x, 'Count'),
'client_longest_output_list': lambda x: (x, 'Count'),
'client_biggest_input_buf' : lambda x: (x, 'Bytes'),
'blocked_clients' : lambda x: (x, 'Count'),
'used_memory' : lambda x: RedisMetric.parseMemory(x),
'used_memory_human' : lambda x: RedisMetric.parseMemory(x),
'used_memory_rss' : lambda x: RedisMetric.parseMemory(x),
'used_memroy_peak' : lambda x: RedisMetric.parseMemory(x),
'used_memory_peak_human' : lambda x: RedisMetric.parseMemory(x),
'mem_fragmentation_ratio' : lambda x: (x, 'None'),
'loading' : lambda x: (x, 'None'),
'aof_enabled' : lambda x: (x, 'None'),
'changes_since_last_save' : lambda x: (x, 'Count'),
'bgsave_in_progress' : lambda x: (x, 'None'),
'last_save_time' : lambda x: (x, 'Seconds'),
'bgrewriteaof_in_progress' : lambda x: (x, 'None'),
'total_connections_received': lambda x: (x, 'Count'),
'total_commands_processed' : lambda x: (x, 'Count'),
'expired_keys' : lambda x: (x, 'Count'),
'evicted_keys' : lambda x: (x, 'Count'),
'keyspace_hits' : lambda x: (x, 'Count'),
'keyspace_misses' : lambda x: (x, 'Count'),
'pubsub_channels' : lambda x: (x, 'Count'),
'pubsub_patterns' : lambda x: (x, 'Count'),
'latest_fork_usec' : lambda x: (x, 'Microseconds'),
'vm_enabled' : lambda x: (x, 'None'),
'aof_current_size' : lambda x: (x, 'Bytes'),
'aof_base_size' : lambda x: (x, 'Bytes'),
'aof_pending_rewrite' : lambda x: (x, 'None'),
}
def __init__(self, name, **kwargs):
Metric.__init__(self, name, **kwargs)
self.reconfig(name, **kwargs)
def reconfig(self, name, **kwargs):
Metric.reconfig(self, name, **kwargs)
# These are a selection of argument names. If they're
# present, then we'll use them, otherwise, we'll use
# the default provided by the redis module itself
redisArgs = {}
for arg in ['host', 'port', 'db', 'password', 'charset', 'errors', 'unix_socket_path']:
try:
redisArgs[arg] = kwargs[arg]
except KeyError:
pass
self.redis = redis.Redis(**redisArgs)
# The keys we should save from the 'info' command in redis
self.info = kwargs.get('info' , [])
# The keys we should get and interpret as numbers
self.get = kwargs.get('get' , [])
# The keys we should get, and report their length
self.llen = kwargs.get('llen', [])
# The keys we should get and report the hash length
self.hlen = kwargs.get('hlen', [])
# The keys we should get and report the particular key from
self.hget = kwargs.get('hget', {})
# The keys we should get and report the cardinality of
self.scard = kwargs.get('scard', [])
# The keys we should get and report the zcardinality of
self.zcard = kwargs.get('zcard', [])
# The patterns we should count the number of keys of
self.patterns = kwargs.get('patterns', [])
def values(self):
try:
results = {}
info = self.redis.info()
for i in self.info:
try:
results[i] = RedisMetric.infoUnits[i](info[i])
except Exception as e:
print repr(e)
results[i] = (info[i], 'None')
both = list(self.get)
both.extend(self.llen)
both.extend(self.hlen)
both.extend(['%s-%s' % (k, v) for k,v in self.hget.items()])
both.extend(self.scard)
both.extend(self.zcard)
both.extend(self.patterns)
with self.redis.pipeline() as pipe:
for g in self.get:
logger.debug('get %s' % g)
pipe.get(g)
for l in self.llen:
logger.debug('llen %s' % l)
pipe.llen(l)
for h in self.hlen:
logger.debug('hlen %s' % h)
pipe.hlen(h)
for k,v in self.hget.items():
logger.debug('hget %s %s' % (k, v))
pipe.hget(k, v)
for s in self.scard:
logger.debug('scard %s' % s)
pipe.scard(s)
for z in self.zcard:
logger.debug('zcard %s' % z)
pipe.zcard(z)
for pattern in self.patterns:
logger.debug('keys %s' % pattern)
pipe.keys(pattern)
fetched = pipe.execute()
for k, f in zip(both, fetched):
if isinstance(f, list):
results[k] = (len(f), 'Count')
else:
results[k] = (f, 'Count')
return {'results': results}
except redis.RedisError as e:
raise MetricException(e)
|
mit
| 2,291,710,560,482,880,000
| 44.47973
| 95
| 0.473332
| false
| 3.745687
| false
| false
| false
|
rolandgeider/wger
|
wger/mailer/urls.py
|
1
|
1189
|
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.conf.urls import include
from django.urls import path
# wger
from wger.mailer.forms import EmailListForm
from wger.mailer.views import gym
# sub patterns for email lists
patterns_email = [
path('overview/gym/<int:gym_pk>',
gym.EmailLogListView.as_view(),
name='overview'),
path('add/gym/<int:gym_pk>',
gym.EmailListFormPreview(EmailListForm),
name='add-gym'),
]
urlpatterns = [
path('email', include((patterns_email, 'email'), namespace="email")),
]
|
agpl-3.0
| 4,714,017,575,843,777,000
| 29.487179
| 78
| 0.721615
| false
| 3.738994
| false
| false
| false
|
rvykydal/blivet
|
blivet/formats/disklabel.py
|
1
|
22009
|
# disklabel.py
# Device format classes for anaconda's storage configuration module.
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
import gi
import os
gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
from ..storage_log import log_exception_info, log_method_call
import parted
import _ped
from ..errors import DiskLabelCommitError, InvalidDiskLabelError, AlignmentError
from .. import arch
from ..events.manager import event_manager
from .. import udev
from .. import util
from ..flags import flags
from ..i18n import _, N_
from . import DeviceFormat, register_device_format
from ..size import Size
import logging
log = logging.getLogger("blivet")
class DiskLabel(DeviceFormat):
""" Disklabel """
_type = "disklabel"
_name = N_("partition table")
_formattable = True # can be formatted
_default_label_type = None
def __init__(self, **kwargs):
"""
:keyword device: full path to the block device node
:type device: str
:keyword str uuid: disklabel UUID
:keyword label_type: type of disklabel to create
:type label_type: str
:keyword exists: whether the formatting exists
:type exists: bool
"""
log_method_call(self, **kwargs)
DeviceFormat.__init__(self, **kwargs)
self._label_type = ""
if not self.exists:
self._label_type = kwargs.get("label_type") or ""
self._size = Size(0)
self._parted_device = None
self._parted_disk = None
self._orig_parted_disk = None
self._supported = True
self._disk_label_alignment = None
self._minimal_alignment = None
self._optimal_alignment = None
if self.parted_device:
# set up the parted objects and raise exception on failure
try:
self.update_orig_parted_disk()
except Exception as e: # pylint: disable=broad-except
self._supported = False
self._label_type = kwargs.get("label_type") or ""
log.warning("error setting up disklabel object on %s: %s", self.device, str(e))
def __deepcopy__(self, memo):
""" Create a deep copy of a Disklabel instance.
We can't do copy.deepcopy on parted objects, which is okay.
"""
return util.variable_copy(self, memo,
shallow=('_parted_device', '_optimal_alignment', '_minimal_alignment',
'_disk_label_alignment'),
duplicate=('_parted_disk', '_orig_parted_disk'))
def __repr__(self):
s = DeviceFormat.__repr__(self)
if flags.testing:
return s
s += (" type = %(type)s partition count = %(count)s"
" sector_size = %(sector_size)s\n"
" align_offset = %(offset)s align_grain = %(grain)s\n"
" parted_disk = %(disk)s\n"
" orig_parted_disk = %(orig_disk)r\n"
" parted_device = %(dev)s\n" %
{"type": self.label_type, "count": len(self.partitions),
"sector_size": self.sector_size,
"offset": self.get_alignment().offset,
"grain": self.get_alignment().grainSize,
"disk": self.parted_disk, "orig_disk": self._orig_parted_disk,
"dev": self.parted_device})
return s
@property
def desc(self):
return "%s %s" % (self.label_type, self.type)
@property
def dict(self):
d = super(DiskLabel, self).dict
if flags.testing:
return d
d.update({"label_type": self.label_type,
"partition_count": len(self.partitions),
"sector_size": self.sector_size,
"offset": self.get_alignment().offset,
"grain_size": self.get_alignment().grainSize})
return d
@property
def supported(self):
return self._supported
def update_parted_disk(self):
""" re-read the disklabel from the device """
self._parted_disk = None
mask = event_manager.add_mask(device=os.path.basename(self.device), partitions=True)
self.update_orig_parted_disk()
udev.settle()
event_manager.remove_mask(mask)
def update_orig_parted_disk(self):
self._orig_parted_disk = self.parted_disk.duplicate()
def reset_parted_disk(self):
""" Set this instance's parted_disk to reflect the disk's contents. """
log_method_call(self, device=self.device)
self._parted_disk = self._orig_parted_disk
def fresh_parted_disk(self):
""" Return a new, empty parted.Disk instance for this device. """
log_method_call(self, device=self.device, label_type=self.label_type)
return parted.freshDisk(device=self.parted_device, ty=self.label_type)
@property
def parted_disk(self):
if not self.parted_device:
return None
if not self._parted_disk and self.supported:
if self.exists:
try:
self._parted_disk = parted.Disk(device=self.parted_device)
except (_ped.DiskLabelException, _ped.IOException, NotImplementedError):
self._supported = False
return None
if self._parted_disk.type == "loop":
# When the device has no partition table but it has a FS,
# it will be created with label type loop. Treat the
# same as if the device had no label (cause it really
# doesn't).
raise InvalidDiskLabelError()
else:
self._parted_disk = self.fresh_parted_disk()
# turn off cylinder alignment
if self._parted_disk.isFlagAvailable(parted.DISK_CYLINDER_ALIGNMENT):
self._parted_disk.unsetFlag(parted.DISK_CYLINDER_ALIGNMENT)
# Set the boot flag on the GPT PMBR, this helps some BIOS systems boot
if self._parted_disk.isFlagAvailable(parted.DISK_GPT_PMBR_BOOT):
# MAC can boot as EFI or as BIOS, neither should have PMBR boot set
if arch.is_efi() or arch.is_mactel():
self._parted_disk.unsetFlag(parted.DISK_GPT_PMBR_BOOT)
log.debug("Clear pmbr_boot on %s", self._parted_disk)
else:
self._parted_disk.setFlag(parted.DISK_GPT_PMBR_BOOT)
log.debug("Set pmbr_boot on %s", self._parted_disk)
else:
log.debug("Did not change pmbr_boot on %s", self._parted_disk)
udev.settle(quiet=True)
return self._parted_disk
@property
def parted_device(self):
if not self._parted_device and self.device:
if os.path.exists(self.device):
# We aren't guaranteed to be able to get a device. In
# particular, built-in USB flash readers show up as devices but
# do not always have any media present, so parted won't be able
# to find a device.
try:
self._parted_device = parted.Device(path=self.device)
except (_ped.IOException, _ped.DeviceException) as e:
log.error("DiskLabel.parted_device: Parted exception: %s", e)
else:
log.info("DiskLabel.parted_device: %s does not exist", self.device)
if not self._parted_device:
log.info("DiskLabel.parted_device returning None")
return self._parted_device
@classmethod
def get_platform_label_types(cls):
label_types = ["msdos", "gpt"]
if arch.is_pmac():
label_types = ["mac"]
elif arch.is_aarch64():
label_types = ["gpt", "msdos"]
elif arch.is_efi() and arch.is_arm():
label_types = ["msdos", "gpt"]
elif arch.is_efi() and not arch.is_aarch64():
label_types = ["gpt", "msdos"]
elif arch.is_s390():
label_types = ["msdos", "dasd"]
return label_types
@classmethod
def set_default_label_type(cls, labeltype):
cls._default_label_type = labeltype
log.debug("default disklabel has been set to %s", labeltype)
def _label_type_size_check(self, label_type):
if self.parted_device is None:
return False
label = parted.freshDisk(device=self.parted_device, ty=label_type)
return self.parted_device.length < label.maxPartitionStartSector
def _get_best_label_type(self):
label_type = self._default_label_type
label_types = self.get_platform_label_types()[:]
if label_type in label_types:
label_types.remove(label_type)
if label_type:
label_types.insert(0, label_type)
if arch.is_s390():
if blockdev.s390.dasd_is_fba(self.device):
# the device is FBA DASD
return "msdos"
elif self.parted_device.type == parted.DEVICE_DASD:
# the device is DASD
return "dasd"
elif util.detect_virt():
# check for dasds exported into qemu as normal virtio/scsi disks
try:
_parted_disk = parted.Disk(device=self.parted_device)
except (_ped.DiskLabelException, _ped.IOException, NotImplementedError):
pass
else:
if _parted_disk.type == "dasd":
return "dasd"
for lt in label_types:
if self._label_type_size_check(lt):
log.debug("selecting %s disklabel for %s based on size",
label_type, os.path.basename(self.device))
label_type = lt
break
return label_type
@property
def label_type(self):
""" The disklabel type (eg: 'gpt', 'msdos') """
if not self.supported:
return self._label_type
# For new disklabels, user-specified type overrides built-in logic.
# XXX This determines the type we pass to parted.Disk
if not self.exists and not self._parted_disk:
if self._label_type:
lt = self._label_type
else:
lt = self._get_best_label_type()
return lt
try:
lt = self.parted_disk.type
except Exception: # pylint: disable=broad-except
log_exception_info()
lt = self._label_type
return lt
@property
def sector_size(self):
try:
return Size(self.parted_device.sectorSize)
except AttributeError:
log_exception_info()
return None
@property
def name(self):
if self.supported:
_str = "%(name)s (%(type)s)"
else:
# Translators: Name for an unsupported disklabel; e.g. "Unsupported partition table"
_str = _("Unsupported %(name)s")
return _str % {"name": _(self._name), "type": self.label_type.upper()}
@property
def size(self):
size = self._size
if not size:
try:
size = Size(self.parted_device.getLength(unit="B"))
except Exception: # pylint: disable=broad-except
log_exception_info()
size = Size(0)
return size
@property
def status(self):
""" Device status. """
return False
@property
def supports_names(self):
if not self.supported or not self.parted_disk:
return False
return self.parted_disk.supportsFeature(parted.DISK_TYPE_PARTITION_NAME)
def _create(self, **kwargs):
""" Create the device. """
log_method_call(self, device=self.device,
type=self.type, status=self.status)
# We're relying on someone having called reset_parted_disk -- we
# could ensure a fresh disklabel by setting self._parted_disk to
# None right before calling self.commit(), but that might hide
# other problems.
self.commit()
def commit(self):
""" Commit the current partition table to disk and notify the OS. """
log_method_call(self, device=self.device,
numparts=len(self.partitions))
try:
self.parted_disk.commit()
except parted.DiskException as msg:
raise DiskLabelCommitError(msg)
else:
self.update_orig_parted_disk()
udev.settle()
def commit_to_disk(self):
""" Commit the current partition table to disk. """
log_method_call(self, device=self.device,
numparts=len(self.partitions))
try:
self.parted_disk.commitToDevice()
except parted.DiskException as msg:
raise DiskLabelCommitError(msg)
else:
self.update_orig_parted_disk()
def add_partition(self, start, end, ptype=None):
""" Add a partition to the disklabel.
:param int start: start sector
:param int end: end sector
:param ptype: partition type or None
:type ptype: int (parted partition type constant) or NoneType
Partition type will default to either PARTITION_NORMAL or
PARTITION_LOGICAL, depending on whether the start sector is within
an extended partition.
"""
if ptype is None:
extended = self.extended_partition
if extended and extended.geometry.contains(start):
ptype = parted.PARTITION_LOGICAL
else:
ptype = parted.PARTITION_NORMAL
geometry = parted.Geometry(device=self.parted_device,
start=start, end=end)
new_partition = parted.Partition(disk=self.parted_disk,
type=ptype,
geometry=geometry)
constraint = parted.Constraint(exactGeom=geometry)
self.parted_disk.addPartition(partition=new_partition,
constraint=constraint)
def remove_partition(self, partition):
""" Remove a partition from the disklabel.
:param partition: the partition to remove
:type partition: :class:`parted.Partition`
"""
self.parted_disk.removePartition(partition)
@property
def extended_partition(self):
try:
extended = self.parted_disk.getExtendedPartition()
except Exception: # pylint: disable=broad-except
log_exception_info()
extended = None
return extended
@property
def logical_partitions(self):
try:
logicals = self.parted_disk.getLogicalPartitions()
except Exception: # pylint: disable=broad-except
log_exception_info()
logicals = []
return logicals
@property
def primary_partitions(self):
try:
primaries = self.parted_disk.getPrimaryPartitions()
except Exception: # pylint: disable=broad-except
log_exception_info()
primaries = []
return primaries
@property
def first_partition(self):
try:
part = self.parted_disk.getFirstPartition()
except Exception: # pylint: disable=broad-except
log_exception_info()
part = None
return part
@property
def partitions(self):
return getattr(self.parted_disk, "partitions", [])
def _get_disk_label_alignment(self):
""" Return the disklabel's required alignment for new partitions.
:rtype: :class:`parted.Alignment`
"""
if not self._disk_label_alignment:
try:
self._disk_label_alignment = self.parted_disk.partitionAlignment
except (_ped.CreateException, AttributeError):
self._disk_label_alignment = parted.Alignment(offset=0,
grainSize=1)
return self._disk_label_alignment
def get_minimal_alignment(self):
""" Return the device's minimal alignment for new partitions.
:rtype: :class:`parted.Alignment`
"""
if not self._minimal_alignment:
disklabel_alignment = self._get_disk_label_alignment()
try:
minimal_alignment = self.parted_device.minimumAlignment
except (_ped.CreateException, AttributeError):
# handle this in the same place we'd handle an ArithmeticError
minimal_alignment = None
try:
alignment = minimal_alignment.intersect(disklabel_alignment)
except (ArithmeticError, AttributeError):
alignment = disklabel_alignment
self._minimal_alignment = alignment
return self._minimal_alignment
def get_optimal_alignment(self):
""" Return the device's optimal alignment for new partitions.
:rtype: :class:`parted.Alignment`
.. note::
If there is no device-supplied optimal alignment this method
returns the minimal device alignment.
"""
if not self._optimal_alignment:
disklabel_alignment = self._get_disk_label_alignment()
try:
optimal_alignment = self.parted_device.optimumAlignment
except (_ped.CreateException, AttributeError):
# if there is no optimal alignment, use the minimal alignment,
# which has already been intersected with the disklabel
# alignment
alignment = self.get_minimal_alignment()
else:
try:
alignment = optimal_alignment.intersect(disklabel_alignment)
except ArithmeticError:
alignment = disklabel_alignment
self._optimal_alignment = alignment
return self._optimal_alignment
def get_alignment(self, size=None):
""" Return an appropriate alignment for a new partition.
:keyword size: proposed partition size (optional)
:type size: :class:`~.size.Size`
:returns: the appropriate alignment to use
:rtype: :class:`parted.Alignment`
:raises :class:`~.errors.AlignmentError`: if the partition is too
small to be aligned
"""
# default to the optimal alignment
alignment = self.get_optimal_alignment()
if size is None:
return alignment
# use the minimal alignment if the requested size is smaller than the
# optimal io size
minimal_alignment = self.get_minimal_alignment()
optimal_grain_size = Size(alignment.grainSize * self.sector_size)
minimal_grain_size = Size(minimal_alignment.grainSize * self.sector_size)
if size < minimal_grain_size:
raise AlignmentError("requested size cannot be aligned")
elif size < optimal_grain_size:
alignment = minimal_alignment
return alignment
def get_end_alignment(self, size=None, alignment=None):
""" Return an appropriate end-alignment for a new partition.
:keyword size: proposed partition size (optional)
:type size: :class:`~.size.Size`
:keyword alignment: the start alignment (optional)
:type alignment: :class:`parted.Alignment`
:returns: the appropriate alignment to use
:rtype: :class:`parted.Alignment`
:raises :class:`~.errors.AlignmentError`: if the partition is too
small to be aligned
"""
if alignment is None:
alignment = self.get_alignment(size=size)
return parted.Alignment(offset=alignment.offset - 1,
grainSize=alignment.grainSize)
@property
def alignment(self):
return self.get_alignment()
@property
def end_alignment(self):
return self.get_end_alignment()
@property
def free(self):
if self.parted_disk is not None:
free_areas = self.parted_disk.getFreeSpacePartitions()
else:
free_areas = []
return sum((Size(f.getLength(unit="B")) for f in free_areas), Size(0))
@property
def magic_partition_number(self):
""" Number of disklabel-type-specific special partition. """
if self.label_type == "mac":
return 1
elif self.label_type == "sun":
return 3
else:
return 0
register_device_format(DiskLabel)
|
lgpl-2.1
| 3,892,311,891,357,951,500
| 35.927852
| 104
| 0.576809
| false
| 4.274422
| false
| false
| false
|
JianfengYao/python-web-app
|
www/transwarp/db.py
|
1
|
13974
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao'
'''
Database operation module.
'''
import time, uuid, functools, threading, logging
# Dict object:
class Dict(dict):
'''
Simple dict but support access as x.y style.
>>> d1 = Dict()
>>> d1['x'] = 100
>>> d1.x
100
>>> d1.y = 200
>>> d1['y']
200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
>>> d3 = Dict(('a', 'b', 'c'), (1, 2, 3))
>>> d3.a
1
>>> d3.b
2
>>> d3.c
3
'''
def __init__(self, names=(), values=(), **kw):
super(Dict, self).__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def next_id(t=None):
'''
Return next id as 50-char string.
Args:
t: unix timestamp, default to None and using time.time().
'''
if t is None:
t = time.time()
return '%015d%s000' % (int(t * 1000), uuid.uuid4().hex)
def _profiling(start, sql=''):
t = time.time() - start
if t > 0.1:
logging.warning('[PROFILING] [DB] %s: %s' % (t, sql))
else:
logging.info('[PROFILING] [DB] %s: %s' % (t, sql))
class DBError(Exception):
pass
class MultiColumnsError(DBError):
pass
class _LasyConnection(object):
def __init__(self):
self.connection = None
def cursor(self):
if self.connection is None:
connection = engine.connect()
logging.info('open connection <%s>...' % hex(id(connection)))
self.connection = connection
return self.connection.cursor()
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def cleanup(self):
if self.connection:
connection = self.connection
self.connection = None
logging.info('close connection <%s>...' % hex(id(connection)))
connection.close()
class _DbCtx(threading.local):
'''
Thread local object that holds connection info.
'''
def __init__(self):
self.connection = None
self.transactions = 0
def is_init(self):
return not self.connection is None
def init(self):
logging.info('open lazy connection...')
self.connection = _LasyConnection()
self.transactions = 0
def cleanup(self):
self.connection.cleanup()
self.connection = None
def cursor(self):
'''
Return cursor
'''
return self.connection.cursor()
# thread-local db context:
_db_ctx = _DbCtx()
# global engine object:
engine = None
class _Engine(object):
def __init__(self, connect):
self._connect = connect
def connect(self):
return self._connect()
def create_engine(user, password, database, host='127.0.0.1', port=3306, **kw):
import mysql.connector
global engine
if engine is not None:
raise DBError('Engine is already initialized.')
params = dict(user=user, password=password, database=database, host=host, port=port)
defaults = dict(use_unicode=True, charset='utf8', collation='utf8_general_ci', autocommit=False)
for k, v in defaults.iteritems():
params[k] = kw.pop(k, v)
params.update(kw)
params['buffered'] = True
engine = _Engine(lambda: mysql.connector.connect(**params))
# test connection...
logging.info('Init mysql engine <%s> ok.' % hex(id(engine)))
class _ConnectionCtx(object):
'''
_ConnectionCtx object that can open and close connection context. _ConnectionCtx object can be nested and only the most
outer connection has effect.
with connection():
pass
with connection():
pass
'''
def __enter__(self):
global _db_ctx
self.should_cleanup = False
if not _db_ctx.is_init():
_db_ctx.init()
self.should_cleanup = True
return self
def __exit__(self, exctype, excvalue, traceback):
global _db_ctx
if self.should_cleanup:
_db_ctx.cleanup()
def connection():
'''
Return _ConnectionCtx object that can be used by 'with' statement:
with connection():
pass
'''
return _ConnectionCtx()
def with_connection(func):
'''
Decorator for reuse connection.
@with_connection
def foo(*args, **kw):
f1()
f2()
f3()
'''
@functools.wraps(func)
def _wrapper(*args, **kw):
with _ConnectionCtx():
return func(*args, **kw)
return _wrapper
class _TransactionCtx(object):
'''
_TransactionCtx object that can handle transactions.
with _TransactionCtx():
pass
'''
def __enter__(self):
global _db_ctx
self.should_close_conn = False
if not _db_ctx.is_init():
# needs open a connection first:
_db_ctx.init()
self.should_close_conn = True
_db_ctx.transactions = _db_ctx.transactions + 1
logging.info('begin transaction...' if _db_ctx.transactions==1 else 'join current transaction...')
return self
def __exit__(self, exctype, excvalue, traceback):
global _db_ctx
_db_ctx.transactions = _db_ctx.transactions - 1
try:
if _db_ctx.transactions==0:
if exctype is None:
self.commit()
else:
self.rollback()
finally:
if self.should_close_conn:
_db_ctx.cleanup()
def commit(self):
global _db_ctx
logging.info('commit transaction...')
try:
_db_ctx.connection.commit()
logging.info('commit ok.')
except:
logging.warning('commit failed. try rollback...')
_db_ctx.connection.rollback()
logging.warning('rollback ok.')
raise
def rollback(self):
global _db_ctx
logging.warning('rollback transaction...')
_db_ctx.connection.rollback()
logging.info('rollback ok.')
def transaction():
'''
Create a transaction object so can use with statement:
with transaction():
pass
>>> def update_profile(id, name, rollback):
... u = dict(id=id, name=name, email='%s@test.org' % name, passwd=name, last_modified=time.time())
... insert('user', **u)
... r = update('update user set passwd=? where id=?', name.upper(), id)
... if rollback:
... raise StandardError('will cause rollback...')
>>> with transaction():
... update_profile(900301, 'Python', False)
>>> select_one('select * from user where id=?', 900301).name
u'Python'
>>> with transaction():
... update_profile(900302, 'Ruby', True)
Traceback (most recent call last):
...
StandardError: will cause rollback...
>>> select('select * from user where id=?', 900302)
[]
'''
return _TransactionCtx()
def with_transaction(func):
'''
A decorator that makes function around transaction.
>>> @with_transaction
... def update_profile(id, name, rollback):
... u = dict(id=id, name=name, email='%s@test.org' % name, passwd=name, last_modified=time.time())
... insert('user', **u)
... r = update('update user set passwd=? where id=?', name.upper(), id)
... if rollback:
... raise StandardError('will cause rollback...')
>>> update_profile(8080, 'Julia', False)
>>> select_one('select * from user where id=?', 8080).passwd
u'JULIA'
>>> update_profile(9090, 'Robert', True)
Traceback (most recent call last):
...
StandardError: will cause rollback...
>>> select('select * from user where id=?', 9090)
[]
'''
@functools.wraps(func)
def _wrapper(*args, **kw):
_start = time.time()
with _TransactionCtx():
return func(*args, **kw)
_profiling(_start)
return _wrapper
def _select(sql, first, *args):
' execute select SQL and return unique result or list results.'
global _db_ctx
cursor = None
sql = sql.replace('?', '%s')
logging.info('SQL: %s, ARGS: %s' % (sql, args))
try:
cursor = _db_ctx.connection.cursor()
cursor.execute(sql, args)
if cursor.description:
names = [x[0] for x in cursor.description]
if first:
values = cursor.fetchone()
if not values:
return None
return Dict(names, values)
return [Dict(names, x) for x in cursor.fetchall()]
finally:
if cursor:
cursor.close()
@with_connection
def select_one(sql, *args):
'''
Execute select SQL and expected one result.
If no result found, return None.
If multiple results found, the first one returned.
>>> u1 = dict(id=100, name='Alice', email='alice@test.org', passwd='ABC-12345', last_modified=time.time())
>>> u2 = dict(id=101, name='Sarah', email='sarah@test.org', passwd='ABC-12345', last_modified=time.time())
>>> insert('user', **u1)
1
>>> insert('user', **u2)
1
>>> u = select_one('select * from user where id=?', 100)
>>> u.name
u'Alice'
>>> select_one('select * from user where email=?', 'abc@email.com')
>>> u2 = select_one('select * from user where passwd=? order by email', 'ABC-12345')
>>> u2.name
u'Alice'
'''
return _select(sql, True, *args)
@with_connection
def select_int(sql, *args):
'''
Execute select SQL and expected one int and only one int result.
>>> n = update('delete from user')
>>> u1 = dict(id=96900, name='Ada', email='ada@test.org', passwd='A-12345', last_modified=time.time())
>>> u2 = dict(id=96901, name='Adam', email='adam@test.org', passwd='A-12345', last_modified=time.time())
>>> insert('user', **u1)
1
>>> insert('user', **u2)
1
>>> select_int('select count(*) from user')
2
>>> select_int('select count(*) from user where email=?', 'ada@test.org')
1
>>> select_int('select count(*) from user where email=?', 'notexist@test.org')
0
>>> select_int('select id from user where email=?', 'ada@test.org')
96900
>>> select_int('select id, name from user where email=?', 'ada@test.org')
Traceback (most recent call last):
...
MultiColumnsError: Expect only one column.
'''
d = _select(sql, True, *args)
if len(d)!=1:
raise MultiColumnsError('Expect only one column.')
return d.values()[0]
@with_connection
def select(sql, *args):
'''
Execute select SQL and return list or empty list if no result.
>>> u1 = dict(id=200, name='Wall.E', email='wall.e@test.org', passwd='back-to-earth', last_modified=time.time())
>>> u2 = dict(id=201, name='Eva', email='eva@test.org', passwd='back-to-earth', last_modified=time.time())
>>> insert('user', **u1)
1
>>> insert('user', **u2)
1
>>> L = select('select * from user where id=?', 900900900)
>>> L
[]
>>> L = select('select * from user where id=?', 200)
>>> L[0].email
u'wall.e@test.org'
>>> L = select('select * from user where passwd=? order by id desc', 'back-to-earth')
>>> L[0].name
u'Eva'
>>> L[1].name
u'Wall.E'
'''
return _select(sql, False, *args)
@with_connection
def _update(sql, *args):
global _db_ctx
cursor = None
sql = sql.replace('?', '%s')
logging.info('SQL: %s, ARGS: %s' % (sql, args))
try:
cursor = _db_ctx.connection.cursor()
cursor.execute(sql, args)
r = cursor.rowcount
if _db_ctx.transactions==0:
# no transaction enviroment:
logging.info('auto commit')
_db_ctx.connection.commit()
return r
finally:
if cursor:
cursor.close()
def insert(table, **kw):
'''
Execute insert SQL.
>>> u1 = dict(id=2000, name='Bob', email='bob@test.org', passwd='bobobob', last_modified=time.time())
>>> insert('user', **u1)
1
>>> u2 = select_one('select * from user where id=?', 2000)
>>> u2.name
u'Bob'
>>> insert('user', **u2)
Traceback (most recent call last):
...
IntegrityError: 1062 (23000): Duplicate entry '2000' for key 'PRIMARY'
'''
cols, args = zip(*kw.iteritems())
sql = 'insert into `%s` (%s) values (%s)' % (table, ','.join(['`%s`' % col for col in cols]), ','.join(['?' for i in range(len(cols))]))
return _update(sql, *args)
def update(sql, *args):
r'''
Execute update SQL.
>>> u1 = dict(id=1000, name='Michael', email='michael@test.org', passwd='123456', last_modified=time.time())
>>> insert('user', **u1)
1
>>> u2 = select_one('select * from user where id=?', 1000)
>>> u2.email
u'michael@test.org'
>>> u2.passwd
u'123456'
>>> update('update user set email=?, passwd=? where id=?', 'michael@example.org', '654321', 1000)
1
>>> u3 = select_one('select * from user where id=?', 1000)
>>> u3.email
u'michael@example.org'
>>> u3.passwd
u'654321'
>>> update('update user set passwd=? where id=?', '***', '123\' or id=\'456')
0
'''
return _update(sql, *args)
if __name__=='__main__':
logging.basicConfig(level=logging.DEBUG)
create_engine('www-data', 'www-data', 'test')
update('drop table if exists user')
update('create table user (id int primary key, name text, email text, passwd text, last_modified real)')
import doctest
doctest.testmod()
|
gpl-2.0
| -7,054,294,330,750,922,000
| 27.871901
| 140
| 0.563761
| false
| 3.643807
| true
| false
| false
|
suprotkin/atm
|
atm/atm/settings.py
|
1
|
2842
|
"""
Django settings for atm project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k%f-1_1n5y^c68*(wa^&oq)m6xevu5pgha31i6*v5ssm@6dl*e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'card',
'common',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.security.SecurityMiddleware',
'card.middleware.CardAuthMiddleware',
)
ROOT_URLCONF = 'atm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'atm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
|
gpl-2.0
| -6,900,148,488,778,329,000
| 25.314815
| 74
| 0.69247
| false
| 3.424096
| false
| false
| false
|
foundertherapy/django-users-plus
|
accountsplus/views.py
|
1
|
8660
|
from __future__ import unicode_literals
import logging
from django.utils.translation import ugettext as _
import django.views.decorators.cache
import django.views.decorators.csrf
import django.views.decorators.debug
import django.contrib.auth.decorators
import django.contrib.auth.views
import django.contrib.auth.forms
import django.contrib.auth
import django.contrib.messages
import django.shortcuts
import django.http
import django.template.response
import django.utils.module_loading
import django.core.urlresolvers
from django.conf import settings as app_settings
from axes import utils
import signals
import forms
import settings
logger = logging.getLogger(__name__)
def logout_then_login(request, login_url=None, extra_context=None):
"""
Logs out the user if they are logged in. Then redirects to the log-in page.
"""
# if a user is masquerading, don't log them out, just kill the masquerade
if request.session.get('is_masquerading'):
return django.shortcuts.redirect('end_masquerade')
else:
return django.contrib.auth.views.logout_then_login(request, login_url, extra_context)
@django.views.decorators.cache.never_cache
@django.contrib.auth.decorators.login_required
def masquerade(request, user_id=None):
User = django.contrib.auth.get_user_model()
return_page = request.META.get('HTTP_REFERER') or 'admin:index'
if not user_id:
django.contrib.messages.error(request, 'Masquerade failed: no user specified')
return django.shortcuts.redirect(return_page)
if not request.user.has_perm(User.PERMISSION_MASQUERADE):
django.contrib.messages.error(request, 'Masquerade failed: insufficient privileges')
return django.shortcuts.redirect(return_page)
if not (request.user.is_superuser or request.user.is_staff):
django.contrib.messages.error(request, 'Masquerade failed: must be staff or superuser')
return django.shortcuts.redirect(return_page)
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
logger.error('User {} ({}) masquerading failed for user {}'.format(request.user.email, request.user.id, user_id))
django.contrib.messages.error(request, 'Masquerade failed: unknown user {}'.format(user_id))
return django.shortcuts.redirect(return_page)
if user.is_superuser:
logger.warning(
'User {} ({}) cannot masquerade as superuser {} ({})'.format(request.user.email, request.user.id, user.email, user.id))
django.contrib.messages.warning(request, 'Cannot masquerade as a superuser')
return django.shortcuts.redirect(return_page)
admin_user = request.user
user.backend = request.session[django.contrib.auth.BACKEND_SESSION_KEY]
# log the new user in
signals.masquerade_start.send(sender=masquerade, request=request, user=admin_user, masquerade_as=user)
# this is needed to track whether this login is for a masquerade
setattr(user, 'is_masquerading', True)
setattr(user, 'masquerading_user', admin_user)
django.contrib.auth.login(request, user)
request.session['is_masquerading'] = True
request.session['masquerade_user_id'] = admin_user.id
request.session['return_page'] = return_page
request.session['masquerade_is_superuser'] = admin_user.is_superuser
logger.info(
'User {} ({}) masquerading as {} ({})'.format(admin_user.email, admin_user.id, request.user.email, request.user.id))
django.contrib.messages.success(request, 'Masquerading as user {0}'.format(user.email))
return django.http.HttpResponseRedirect(app_settings.LOGIN_REDIRECT_URL)
@django.views.decorators.cache.never_cache
@django.contrib.auth.decorators.login_required
def end_masquerade(request):
User = django.contrib.auth.get_user_model()
if 'is_masquerading' not in request.session:
return django.shortcuts.redirect('admin:index')
if 'masquerade_user_id' in request.session:
try:
masqueraded_user = request.user
user = User.objects.get(
pk=request.session['masquerade_user_id'])
user.backend = request.session[
django.contrib.auth.BACKEND_SESSION_KEY]
# this is needed to track whether this login is for a masquerade
django.contrib.auth.logout(request)
signals.masquerade_end.send(
sender=end_masquerade, request=request, user=user,
masquerade_as=masqueraded_user)
django.contrib.auth.login(request, user)
logging.info('End masquerade user: {} ({}) by: {} ({})'.format(
masqueraded_user.email, masqueraded_user.id,
user.email, user.id))
django.contrib.messages.success(request, 'Masquerade ended')
except User.DoesNotExist as e:
logging.critical(
'Masquerading user {} does not exist'.format(
request.session['masquerade_user_id']))
return django.shortcuts.redirect('admin:index')
@django.views.decorators.debug.sensitive_post_parameters()
@django.views.decorators.csrf.csrf_protect
@django.contrib.auth.decorators.login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=django.contrib.auth.forms.
PasswordChangeForm,
current_app=None, extra_context=None):
if post_change_redirect is None:
post_change_redirect = django.core.urlresolvers.reverse(
'password_change_done')
else:
post_change_redirect = django.shortcuts.resolve_url(
post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one if
# django.contrib.auth.middleware.SessionAuthenticationMiddleware
# is enabled.
django.contrib.auth.update_session_auth_hash(request, form.user)
signals.user_password_change.send(
sender=password_change, request=request, user=form.user)
return django.http.HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return django.template.response.TemplateResponse(request, template_name, context)
@django.views.decorators.csrf.csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=django.contrib.auth.forms.PasswordResetForm,
token_generator=django.contrib.auth.views.default_token_generator,
post_reset_redirect=None,
from_email=None,
current_app=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
User = django.contrib.auth.get_user_model()
response = django.contrib.auth.views.password_reset(
request, template_name, email_template_name,
subject_template_name, password_reset_form, token_generator,
post_reset_redirect, from_email, extra_context,
html_email_template_name, extra_email_context)
if request.method == 'POST':
email = request.POST['email']
try:
user = User.objects.get(email=email)
signals.user_password_reset_request.send(
sender=password_reset, request=request, user=user)
except User.DoesNotExist:
pass
return response
class GenericLockedView(django.views.generic.FormView):
template_name = settings.LOCKOUT_TEMPLATE
form_class = forms.CaptchaForm
urlPattern = ''
def get_success_url(self):
return django.urls.reverse_lazy(self.urlPattern)
def form_valid(self, form):
utils.reset(username=form.cleaned_data['username'])
return super(GenericLockedView, self).form_valid(form)
class UserLockedOutView(GenericLockedView):
urlPattern = 'login'
class AdminLockedOutView(GenericLockedView):
urlPattern = 'admin:index'
|
mit
| -1,332,505,607,043,437,000
| 40.238095
| 131
| 0.675751
| false
| 3.992623
| false
| false
| false
|
kevin-intel/scikit-learn
|
sklearn/datasets/_kddcup99.py
|
3
|
12676
|
"""KDDCUP 99 dataset.
A classic dataset for anomaly detection.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
"""
import errno
from gzip import GzipFile
import logging
import os
from os.path import dirname, exists, join
import numpy as np
import joblib
from ._base import _fetch_remote
from ._base import _convert_data_dataframe
from . import get_data_home
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import shuffle as shuffle_method
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
ARCHIVE = RemoteFileMetadata(
filename='kddcup99_data',
url='https://ndownloader.figshare.com/files/5976045',
checksum=('3b6c942aa0356c0ca35b7b595a26c89d'
'343652c9db428893e7494f837b274292'))
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz
ARCHIVE_10_PERCENT = RemoteFileMetadata(
filename='kddcup99_10_data',
url='https://ndownloader.figshare.com/files/5976042',
checksum=('8045aca0d84e70e622d1148d7df78249'
'6f6333bf6eb979a1b0837c42a9fd9561'))
logger = logging.getLogger(__name__)
def fetch_kddcup99(*, subset=None, data_home=None, shuffle=False,
random_state=None,
percent10=True, download_if_missing=True, return_X_y=False,
as_frame=False):
"""Load the kddcup99 dataset (classification).
Download it if necessary.
================= ====================================
Classes 23
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
================= ====================================
Read more in the :ref:`User Guide <kddcup99_dataset>`.
.. versionadded:: 0.18
Parameters
----------
subset : {'SA', 'SF', 'http', 'smtp'}, default=None
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
.. versionadded:: 0.19
shuffle : bool, default=False
Whether to shuffle dataset.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and for
selection of abnormal samples if `subset='SA'`. Pass an int for
reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.20
as_frame : bool, default=False
If `True`, returns a pandas Dataframe for the ``data`` and ``target``
objects in the `Bunch` returned object; `Bunch` return object will also
have a ``frame`` member.
.. versionadded:: 0.24
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (494021, 41)
The data matrix to learn. If `as_frame=True`, `data` will be a
pandas DataFrame.
target : {ndarray, series} of shape (494021,)
The regression target for each sample. If `as_frame=True`, `target`
will be a pandas Series.
frame : dataframe of shape (494021, 42)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
The full description of the dataset.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
kddcup99 = _fetch_brute_kddcup99(
data_home=data_home,
percent10=percent10,
download_if_missing=download_if_missing
)
data = kddcup99.data
target = kddcup99.target
feature_names = kddcup99.feature_names
target_names = kddcup99.target_names
if subset == 'SA':
s = target == b'normal.'
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == 'SF' or subset == 'http' or subset == 'smtp':
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
feature_names = feature_names[:11] + feature_names[12:]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False))
if subset == 'http':
s = data[:, 2] == b'http'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4],
feature_names[5]]
if subset == 'smtp':
s = data[:, 2] == b'smtp'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4],
feature_names[5]]
if subset == 'SF':
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[2],
feature_names[4], feature_names[5]]
if shuffle:
data, target = shuffle_method(data, target, random_state=random_state)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'kddcup99.rst')) as rst_file:
fdescr = rst_file.read()
frame = None
if as_frame:
frame, data, target = _convert_data_dataframe(
"fetch_kddcup99", data, target, feature_names, target_names
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=fdescr,
)
def _fetch_brute_kddcup99(data_home=None,
download_if_missing=True, percent10=True):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
target : ndarray of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
DESCR : str
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
dir_suffix = "-py3"
if percent10:
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
archive = ARCHIVE_10_PERCENT
else:
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
archive = ARCHIVE
samples_path = join(kddcup_dir, "samples")
targets_path = join(kddcup_dir, "targets")
available = exists(samples_path)
dt = [('duration', int),
('protocol_type', 'S4'),
('service', 'S11'),
('flag', 'S6'),
('src_bytes', int),
('dst_bytes', int),
('land', int),
('wrong_fragment', int),
('urgent', int),
('hot', int),
('num_failed_logins', int),
('logged_in', int),
('num_compromised', int),
('root_shell', int),
('su_attempted', int),
('num_root', int),
('num_file_creations', int),
('num_shells', int),
('num_access_files', int),
('num_outbound_cmds', int),
('is_host_login', int),
('is_guest_login', int),
('count', int),
('srv_count', int),
('serror_rate', float),
('srv_serror_rate', float),
('rerror_rate', float),
('srv_rerror_rate', float),
('same_srv_rate', float),
('diff_srv_rate', float),
('srv_diff_host_rate', float),
('dst_host_count', int),
('dst_host_srv_count', int),
('dst_host_same_srv_rate', float),
('dst_host_diff_srv_rate', float),
('dst_host_same_src_port_rate', float),
('dst_host_srv_diff_host_rate', float),
('dst_host_serror_rate', float),
('dst_host_srv_serror_rate', float),
('dst_host_rerror_rate', float),
('dst_host_srv_rerror_rate', float),
('labels', 'S16')]
column_names = [c[0] for c in dt]
target_names = column_names[-1]
feature_names = column_names[:-1]
if available:
try:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
except Exception as e:
raise IOError(
"The cache for fetch_kddcup99 is invalid, please delete "
f"{str(kddcup_dir)} and run the fetch_kddcup99 again") from e
elif download_if_missing:
_mkdirp(kddcup_dir)
logger.info("Downloading %s" % archive.url)
_fetch_remote(archive, dirname=kddcup_dir)
DT = np.dtype(dt)
logger.debug("extracting archive")
archive_path = join(kddcup_dir, archive.filename)
file_ = GzipFile(filename=archive_path, mode='r')
Xy = []
for line in file_.readlines():
line = line.decode()
Xy.append(line.replace('\n', '').split(','))
file_.close()
logger.debug('extraction done')
os.remove(archive_path)
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
# XXX bug when compress!=0:
# (error: 'Incorrect data length while decompressing[...] the file
# could be corrupted.')
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
else:
raise IOError("Data not found and `download_if_missing` is False")
return Bunch(
data=X,
target=y,
feature_names=feature_names,
target_names=[target_names],
)
def _mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
|
bsd-3-clause
| 3,550,723,356,904,437,000
| 32.983914
| 98
| 0.574787
| false
| 3.740336
| false
| false
| false
|
wheeler-microfluidics/dmf-device-ui
|
dmf_device_ui/canvas.py
|
1
|
51678
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
import itertools
import functools as ft
import logging
import threading
from cairo_helpers.surface import flatten_surfaces
from logging_helpers import _L
from pygtkhelpers.ui.views.shapes_canvas_view import GtkShapesCanvasView
from pygtkhelpers.utils import gsignal
from pygst_utils.video_view.video_sink import VideoSink
from pygst_utils.video_view import np_to_cairo
from svg_model import compute_shape_centers
from svg_model.color import hex_color_to_rgba
import cairo
import debounce
import gtk
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
class Route(object):
'''
Attributes
----------
device : microdrop.dmf_device.DmfDevice
electrode_ids : list
Ordered list of **connected** electrodes ids.
Represents an actuation sequence of electrodes that would support
liquid movement between the first and last electrode.
'''
def __init__(self, device):
self.device = device
self.electrode_ids = []
def __str__(self):
return '<Route electrode_ids=%s>' % self.electrode_ids
def append(self, electrode_id):
'''
Append the specified electrode to the route.
The route is not modified (i.e., electrode is not appended) if
electrode is not connected to the last electrode in the existing route.
Parameters
----------
electrode_id : str
Electrode identifier.
'''
do_append = False
if not self.electrode_ids:
do_append = True
elif self.device.shape_indexes.shape[0] > 0:
source = self.electrode_ids[-1]
target = electrode_id
if not (source == target):
source_id, target_id = self.device.shape_indexes[[source,
target]]
try:
if self.device.adjacency_matrix[source_id, target_id]:
# Electrodes are connected, so append target to current
# route.
do_append = True
except IndexError:
logger.warning('Electrodes `%s` and `%s` are not '
'connected.', source, target)
if do_append:
self.electrode_ids.append(electrode_id)
return do_append
class DmfDeviceCanvas(GtkShapesCanvasView):
'''
Draw device layout from SVG file.
Mouse events are handled as follows:
- Click and release on the same electrode emits electrode selected signal.
- Click on one electrode, drag, and release on another electrode emits
electrode *pair* selected signal, with *source* electrode and *target*
electrode.
- Moving mouse cursor over electrode emits electrode mouse over signal.
- Moving mouse cursor out of electrode emits electrode mouse out signal.
Signals are emitted as gobject signals. See `emit` calls for payload
formats.
'''
gsignal('device-set', object)
gsignal('electrode-command', str, str, object)
gsignal('electrode-mouseout', object)
gsignal('electrode-mouseover', object)
gsignal('electrode-pair-selected', object)
gsignal('electrode-selected', object)
#: .. versionadded:: 0.13
gsignal('global-command', str, str, object)
gsignal('key-press', object)
gsignal('key-release', object)
gsignal('route-command', str, str, object)
gsignal('route-electrode-added', object)
gsignal('route-selected', object)
#: .. versionadded:: 0.11.3
gsignal('routes-set', object)
gsignal('surface-rendered', str, object)
gsignal('surfaces-reset', object)
# Video signals
gsignal('point-pair-selected', object)
gsignal('video-enabled')
gsignal('video-disabled')
def __init__(self, connections_alpha=1., connections_color=1.,
transport='tcp', target_host='*', port=None, **kwargs):
# Video sink socket info.
self.socket_info = {'transport': transport,
'host': target_host,
'port': port}
# Identifier for video incoming socket check.
self.callback_id = None
self._enabled = False # Video enable
self.start_event = None # Video modify start click event
# Matched corner points between canvas and video frame. Used to
# generate map between coordinate spaces.
self.df_canvas_corners = pd.DataFrame(None, columns=['x', 'y'],
dtype=float)
self.df_frame_corners = pd.DataFrame(None, columns=['x', 'y'],
dtype=float)
# Matrix map from frame coordinates to canvas coordinates.
self.frame_to_canvas_map = None
# Matrix map from canvas coordinates to frame coordinates.
self.canvas_to_frame_map = None
# Shape of canvas (i.e., drawing area widget).
self.shape = None
self.mode = 'control'
# Read SVG polygons into dataframe, one row per polygon vertex.
df_shapes = pd.DataFrame(None, columns=['id', 'vertex_i', 'x', 'y'])
self.device = None
self.shape_i_column = 'id'
# Save alpha for drawing connections.
self.connections_alpha = connections_alpha
# Save color for drawing connections.
self.connections_color = connections_color
#: ..versionadded:: 0.12
self._dynamic_electrodes = pd.Series()
self.reset_states()
self.reset_routes()
self.connections_attrs = {}
self.last_pressed = None
self.last_hovered = None
self._route = None
self.connections_enabled = (self.connections_alpha > 0)
self.default_corners = {} # {'canvas': None, 'frame': None}
#: .. versionadded:: 0.13
#: Registered global commands
self.global_commands = OrderedDict()
# Registered electrode commands
self.electrode_commands = OrderedDict()
# Register test command
#self.register_electrode_command('ping',
#group='microdrop.device_info_plugin')
# Registered route commands
self.route_commands = OrderedDict()
super(DmfDeviceCanvas, self).__init__(df_shapes, self.shape_i_column,
**kwargs)
@property
def df_routes(self):
'''
.. versionadded:: 0.11.3
'''
return self._df_routes
@df_routes.setter
def df_routes(self, value):
'''
.. versionadded:: 0.11.3
'''
self._df_routes = value
try:
self.emit('routes-set', self._df_routes.copy())
except TypeError:
pass
def reset_canvas_corners(self):
self.df_canvas_corners = (self.default_corners
.get('canvas',
self.default_shapes_corners()))
def reset_frame_corners(self):
self.df_frame_corners = (self.default_corners
.get('frame', self.default_frame_corners()))
def default_shapes_corners(self):
if self.canvas is None:
return self.df_canvas_corners
width, height = self.canvas.source_shape
return pd.DataFrame([[0, 0], [width, 0], [width, height], [0, height]],
columns=['x', 'y'], dtype=float)
def default_frame_corners(self):
if self.video_sink.frame_shape is None:
return self.df_frame_corners
width, height = self.video_sink.frame_shape
return pd.DataFrame([[0, 0], [width, 0], [width, height], [0, height]],
columns=['x', 'y'], dtype=float)
def update_transforms(self):
from opencv_helpers.safe_cv import cv2
if (self.df_canvas_corners.shape[0] == 0 or
self.df_frame_corners.shape[0] == 0):
return
self.canvas_to_frame_map = cv2.findHomography(self.df_canvas_corners
.values,
self.df_frame_corners
.values)[0]
self.frame_to_canvas_map = cv2.findHomography(self.df_frame_corners
.values,
self.df_canvas_corners
.values)[0]
# Translate transform shape coordinate space to drawing area coordinate
# space.
transform = self.frame_to_canvas_map
if self.canvas is not None:
transform = (self.canvas.shapes_to_canvas_transform.values
.dot(transform))
self.video_sink.transform = transform
self.set_surface('registration', self.render_registration())
def create_ui(self):
'''
.. versionchanged:: 0.9
Update device registration in real-time while dragging video
control point to new position.
.. versionchanged:: 0.12
Add ``dynamic_electrode_state_shapes`` layer to show dynamic
electrode actuations.
'''
super(DmfDeviceCanvas, self).create_ui()
self.video_sink = VideoSink(*[self.socket_info[k]
for k in ['transport', 'host', 'port']])
# Initialize video sink socket.
self.video_sink.reset()
# Required to have key-press and key-release events trigger.
self.widget.set_flags(gtk.CAN_FOCUS)
self.widget.add_events(gtk.gdk.KEY_PRESS_MASK |
gtk.gdk.KEY_RELEASE_MASK)
# Create initial (empty) cairo surfaces.
surface_names = ('background', 'shapes', 'connections', 'routes',
'channel_labels', 'static_electrode_state_shapes',
'dynamic_electrode_state_shapes', 'registration')
self.df_surfaces = pd.DataFrame([[self.get_surface(), 1.]
for i in xrange(len(surface_names))],
columns=['surface', 'alpha'],
index=pd.Index(surface_names,
name='name'))
def _update_registration(event):
try:
start_event = self.start_event.copy()
self.start_event = event.copy()
self.emit('point-pair-selected', {'start_event': start_event,
'end_event': event})
except AttributeError:
# Mouse button was released, causing `self.start_event` to be
# `None` before event was handled here.
pass
# Debounce calls to `_update_registration` function to prevent too many
# calls being triggered from mouse movement events.
update_registration = debounce.Debounce(_update_registration, wait=10)
def _on_mouse_move(area, event):
# XXX Need to make a copy of the event here since the original
# event will be deallocated before the debounced
# `update_registration` function is called.
event = event.copy()
if self.mode == 'register_video' and self.start_event is not None:
update_registration(event.copy())
# Connect video registration update event to mouse movement event.
self.widget.connect("motion_notify_event", _on_mouse_move)
def reset_canvas(self, width, height):
super(DmfDeviceCanvas, self).reset_canvas(width, height)
if self.device is None or self.canvas.df_canvas_shapes.shape[0] == 0:
return
self.canvas.df_canvas_shapes =\
compute_shape_centers(self.canvas.df_canvas_shapes
[[self.shape_i_column, 'vertex_i', 'x',
'y']], self.shape_i_column)
self.canvas.df_shape_centers = (self.canvas.df_canvas_shapes
[[self.shape_i_column, 'x_center',
'y_center']].drop_duplicates()
.set_index(self.shape_i_column))
df_shape_connections = self.device.df_shape_connections
self.canvas.df_connection_centers =\
(df_shape_connections.join(self.canvas.df_shape_centers
.loc[df_shape_connections.source]
.reset_index(drop=True))
.join(self.canvas.df_shape_centers.loc[df_shape_connections
.target]
.reset_index(drop=True), lsuffix='_source',
rsuffix='_target'))
def reset_states(self):
self.electrode_states = pd.Series(name='electrode_states')
self.electrode_states.index.name = 'electrode_id'
def reset_routes(self):
self.df_routes = pd.DataFrame(None, columns=['route_i', 'electrode_i',
'transition_i'])
def set_device(self, dmf_device):
self.device = dmf_device
# Index channels by electrode ID for fast look up.
self.electrode_channels = (self.device.df_electrode_channels
.set_index('electrode_id'))
self.df_shapes = self.device.df_shapes
self.reset_routes()
self.reset_states()
x, y, width, height = self.widget.get_allocation()
if width > 0 and height > 0:
self.canvas = None
self._dirty_size = width, height
self.emit('device-set', dmf_device)
def get_labels(self):
if self.device is None:
return pd.Series(None, index=pd.Index([], name='channel'))
return (self.electrode_channels.astype(str)
.groupby(level='electrode_id', axis=0)
.agg(lambda v: ', '.join(v))['channel'])
###########################################################################
# Properties
@property
def connection_count(self):
return self.device.df_shape_connections.shape[0] if self.device else 0
@property
def shape_count(self):
return self.df_shapes[self.shape_i_column].unique().shape[0]
@property
def enabled(self):
return self._enabled
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if value in ('register_video', 'control'):
self._mode = value
###########################################################################
# ## Mutators ##
def insert_surface(self, position, name, surface, alpha=1.):
'''
Insert Cairo surface as new layer.
Args
----
position (int) : Index position to insert layer at.
name (str) : Name of layer.
surface (cairo.Context) : Surface to render.
alpha (float) : Alpha/transparency level in the range `[0, 1]`.
'''
if name in self.df_surfaces.index:
raise NameError('Surface already exists with `name="{}"`.'
.format(name))
self.df_surfaces.loc[name] = surface, alpha
# Reorder layers such that the new surface is placed at the specified
# layer position (relative to the background surface).
surfaces_order = self.df_surfaces.index.values.tolist()
surfaces_order.remove(name)
base_index = surfaces_order.index('background') + 1
if position < 0:
position = len(surfaces_order) + position
surfaces_order.insert(base_index + position, name)
self.reorder_surfaces(surfaces_order)
def append_surface(self, name, surface, alpha=1.):
'''
Append Cairo surface as new layer on top of existing layers.
Args
----
name (str) : Name of layer.
surface (cairo.ImageSurface) : Surface to render.
alpha (float) : Alpha/transparency level in the range `[0, 1]`.
'''
self.insert_surface(position=self.df_surfaces.index.shape[0],
name=name, surface=surface, alpha=alpha)
def remove_surface(self, name):
'''
Remove layer from rendering stack and flatten remaining layers.
Args
----
name (str) : Name of layer.
'''
self.df_surfaces.drop(name, axis=0, inplace=True)
# Order of layers may have changed after removing a layer. Trigger
# refresh of surfaces.
self.reorder_surfaces(self.df_surfaces.index)
def clone_surface(self, source_name, target_name, target_position=-1,
alpha=1.):
'''
Clone surface from existing layer to a new name, inserting new surface
at specified position.
By default, new surface is appended as the top surface layer.
Args
----
source_name (str) : Name of layer to clone.
target_name (str) : Name of new layer.
'''
source_surface = self.df_surfaces.surface.ix[source_name]
source_width = source_surface.get_width()
source_height = source_surface.get_height()
source_format = source_surface.get_format()
target_surface = cairo.ImageSurface(source_format, source_width,
source_height)
target_cairo_context = cairo.Context(target_surface)
target_cairo_context.set_source_surface(source_surface, 0, 0)
target_cairo_context.paint()
self.insert_surface(target_position, target_name, target_surface,
alpha)
def enable(self):
if self.callback_id is None:
self._enabled = True
self.set_surface('shapes', self.render_shapes())
# Add layer to which video frames will be rendered.
if 'video' in self.df_surfaces.index:
self.set_surface('video', self.render_shapes())
else:
self.df_surfaces.loc['video'] = self.render_shapes(), 1.
# Reorder layers such that the video layer is directly on top of
# the background layer.
surfaces_order = self.df_surfaces.index.values.tolist()
surfaces_order.remove('video')
surfaces_order.insert(surfaces_order.index('background') + 1,
'video')
self.reorder_surfaces(surfaces_order)
self.render()
self.callback_id = self.video_sink.connect('frame-update',
self.on_frame_update)
self.emit('video-enabled')
def disable(self):
if self.callback_id is not None:
self._enabled = False
self.set_surface('shapes', self.render_shapes())
self.video_sink.disconnect(self.callback_id)
self.callback_id = None
if 'video' in self.df_surfaces.index:
self.df_surfaces.drop('video', axis=0, inplace=True)
self.reorder_surfaces(self.df_surfaces.index)
self.emit('video-disabled')
self.on_frame_update(None, None)
###########################################################################
# ## Drawing area event handling ##
def check_dirty(self):
if self._dirty_size is not None:
width, height = self._dirty_size
self.set_shape(width, height)
transform_update_required = True
else:
transform_update_required = False
result = super(DmfDeviceCanvas, self).check_dirty()
if transform_update_required:
gtk.idle_add(self.update_transforms)
return result
def set_shape(self, width, height):
logger.debug('[set_shape]: Set drawing area shape to %sx%s', width,
height)
self.shape = width, height
# Set new target size for scaled frames from video sink.
self.video_sink.shape = width, height
self.update_transforms()
if not self._enabled:
gtk.idle_add(self.on_frame_update, None, None)
###########################################################################
# ## Drawing methods ##
def get_surfaces(self):
surface1 = cairo.ImageSurface(cairo.FORMAT_ARGB32, 320, 240)
surface1_context = cairo.Context(surface1)
surface1_context.set_source_rgba(0, 0, 1, .5)
surface1_context.rectangle(0, 0, surface1.get_width(), surface1.get_height())
surface1_context.fill()
surface2 = cairo.ImageSurface(cairo.FORMAT_ARGB32, 800, 600)
surface2_context = cairo.Context(surface2)
surface2_context.save()
surface2_context.translate(100, 200)
surface2_context.set_source_rgba(0, 1, .5, .5)
surface2_context.rectangle(0, 0, surface1.get_width(), surface1.get_height())
surface2_context.fill()
surface2_context.restore()
return [surface1, surface2]
def draw_surface(self, surface, operator=cairo.OPERATOR_OVER):
x, y, width, height = self.widget.get_allocation()
if width <= 0 and height <= 0 or self.widget.window is None:
return
cairo_context = self.widget.window.cairo_create()
cairo_context.set_operator(operator)
cairo_context.set_source_surface(surface)
cairo_context.rectangle(0, 0, width, height)
cairo_context.fill()
###########################################################################
# Render methods
def render_dynamic_electrode_state_shapes(self):
'''
Render **dynamic** states reported by the electrode controller.
**Dynamic** electrode states are only applied while a protocol is
running -- _not_ while in real-time programming mode.
See also :meth:`render_electrode_shapes()`.
.. versionadded:: 0.12
'''
df_shapes = self.canvas.df_canvas_shapes.copy()
# Only include shapes for electrodes reported as actuated.
on_electrodes = self._dynamic_electrodes[self._dynamic_electrodes > 0]
df_shapes = (df_shapes.set_index('id').loc[on_electrodes.index]
.reset_index())
return self.render_electrode_shapes(df_shapes=df_shapes,
shape_scale=0.75,
# Lignt blue
fill=(136 / 255.,
189 / 255.,
230 / 255.))
def render_static_electrode_state_shapes(self):
'''
Render **static** states reported by the electrode controller.
**Static** electrode states are applied while a protocol is **running**
_or_ while **real-time** control is activated.
See also :meth:`render_electrode_shapes()`.
.. versionadded:: 0.12
'''
df_shapes = self.canvas.df_canvas_shapes.copy()
if self.electrode_states.shape[0]:
df_shapes['state'] = self.electrode_states.ix[df_shapes.id].values
else:
df_shapes['state'] = 0
df_shapes = df_shapes.loc[df_shapes.state > 0].dropna(subset=['state'])
return self.render_electrode_shapes(df_shapes=df_shapes)
def render_electrode_shapes(self, df_shapes=None, shape_scale=0.8,
fill=(1, 1, 1)):
'''
Render electrode state shapes.
By default, draw each electrode shape filled white.
See also :meth:`render_shapes()`.
Parameters
----------
df_shapes = : pandas.DataFrame
.. versionadded:: 0.12
'''
surface = self.get_surface()
if df_shapes is None:
if hasattr(self.canvas, 'df_canvas_shapes'):
df_shapes = self.canvas.df_canvas_shapes
else:
return surface
if 'x_center' not in df_shapes or 'y_center' not in df_shapes:
# No center points have been computed for shapes.
return surface
cairo_context = cairo.Context(surface)
df_shapes = df_shapes.copy()
# Scale shapes to leave shape edges uncovered.
df_shapes[['x', 'y']] = (df_shapes[['x_center', 'y_center']] +
df_shapes[['x_center_offset',
'y_center_offset']].values *
shape_scale)
for path_id, df_path_i in (df_shapes.groupby(self.canvas
.shape_i_columns)[['x',
'y']]):
# Use attribute lookup for `x` and `y`, since it is considerably
# faster than `get`-based lookup using columns name strings.
vertices_x = df_path_i.x.values
vertices_y = df_path_i.y.values
cairo_context.move_to(vertices_x[0], vertices_y[0])
for x, y in itertools.izip(vertices_x[1:], vertices_y[1:]):
cairo_context.line_to(x, y)
cairo_context.close_path()
# Draw filled shape to indicate actuated electrode state.
cairo_context.set_source_rgba(*fill)
cairo_context.fill()
return surface
def render_background(self):
surface = self.get_surface()
context = cairo.Context(surface)
context.set_source_rgb(0, 0, 0)
context.paint()
return surface
def render_connections(self, indexes=None, hex_color='#fff', alpha=1.,
**kwargs):
surface = self.get_surface()
if not hasattr(self.canvas, 'df_connection_centers'):
return surface
cairo_context = cairo.Context(surface)
coords_columns = ['source', 'target',
'x_center_source', 'y_center_source',
'x_center_target', 'y_center_target']
df_connection_coords = (self.canvas.df_connection_centers
[coords_columns])
if indexes is not None:
df_connection_coords = df_connection_coords.loc[indexes].copy()
rgba = hex_color_to_rgba(hex_color, normalize_to=1.)
if rgba[-1] is None:
rgba = rgba[:-1] + (alpha, )
cairo_context.set_line_width(2.5)
for i, (target, source, x1, y1, x2, y2) in (df_connection_coords
.iterrows()):
cairo_context.move_to(x1, y1)
cairo_context.set_source_rgba(*rgba)
for k, v in kwargs.iteritems():
getattr(cairo_context, 'set_' + k)(v)
cairo_context.line_to(x2, y2)
cairo_context.stroke()
return surface
def render_shapes(self, df_shapes=None, clip=False):
'''
Render static electrode shapes (independent of actuation state).
If video is enabled, draw white outline for each electrode (no fill).
If video is disabled, draw white outline for each electrode and fill
blue.
See also :meth:`render_electrode_state_shapes()`.
'''
surface = self.get_surface()
if df_shapes is None:
if hasattr(self.canvas, 'df_canvas_shapes'):
df_shapes = self.canvas.df_canvas_shapes
else:
return surface
cairo_context = cairo.Context(surface)
for path_id, df_path_i in (df_shapes
.groupby(self.canvas
.shape_i_columns)[['x', 'y']]):
# Use attribute lookup for `x` and `y`, since it is considerably
# faster than `get`-based lookup using columns name strings.
vertices_x = df_path_i.x.values
vertices_y = df_path_i.y.values
cairo_context.move_to(vertices_x[0], vertices_y[0])
for x, y in itertools.izip(vertices_x[1:], vertices_y[1:]):
cairo_context.line_to(x, y)
cairo_context.close_path()
if self.enabled:
# Video is enabled.
# Draw white border around electrode.
line_width = 1
if path_id not in self.electrode_channels.index:
# on off on off
dashes = [10, 10]
color = (1, 0, 1)
line_width *= 2
else:
dashes = []
color = (1, 1, 1)
cairo_context.set_dash(dashes)
cairo_context.set_line_width(line_width)
cairo_context.set_source_rgb(*color)
cairo_context.stroke()
else:
# Video is enabled. Fill electrode blue.
color = ((0, 0, 1) if path_id in self.electrode_channels.index
else (1, 0, 1))
cairo_context.set_source_rgb(*color)
cairo_context.fill_preserve()
# Draw white border around electrode.
cairo_context.set_line_width(1)
cairo_context.set_source_rgba(1, 1, 1)
cairo_context.stroke()
return surface
def render_routes(self):
surface = self.get_surface()
if (not hasattr(self.device, 'df_shape_connections') or
not hasattr(self.canvas, 'df_shape_centers')):
return surface
cairo_context = cairo.Context(surface)
connections = self.device.df_shape_connections
for route_i, df_route in self.df_routes.groupby('route_i'):
source_id = df_route.electrode_i.iloc[0]
source_connections = connections.loc[(connections.source ==
source_id) |
(connections.target ==
source_id)]
# Colors from ["Show me the numbers"][1].
#
# [1]: http://blog.axc.net/its-the-colors-you-have/
# LiteOrange = rgb(251,178,88);
# MedOrange = rgb(250,164,58);
# LiteGreen = rgb(144,205,151);
# MedGreen = rgb(96,189,104);
if source_connections.shape[0] == 1:
# Electrode only has one adjacent electrode, assume reservoir.
color_rgb_255 = np.array([250, 164, 58, 255])
else:
color_rgb_255 = np.array([96, 189, 104, 255])
color = (color_rgb_255 / 255.).tolist()
self.draw_route(df_route, cairo_context, color=color,
line_width=.25)
return surface
def render_channel_labels(self, color_rgba=None):
return self.render_labels(self.get_labels(), color_rgba=color_rgba)
def render_registration(self):
'''
Render pinned points on video frame as red rectangle.
'''
surface = self.get_surface()
if self.canvas is None or self.df_canvas_corners.shape[0] == 0:
return surface
corners = self.df_canvas_corners.copy()
corners['w'] = 1
transform = self.canvas.shapes_to_canvas_transform
canvas_corners = corners.values.dot(transform.T.values).T
points_x = canvas_corners[0]
points_y = canvas_corners[1]
cairo_context = cairo.Context(surface)
cairo_context.move_to(points_x[0], points_y[0])
for x, y in zip(points_x[1:], points_y[1:]):
cairo_context.line_to(x, y)
cairo_context.line_to(points_x[0], points_y[0])
cairo_context.set_source_rgb(1, 0, 0)
cairo_context.stroke()
return surface
def set_surface(self, name, surface):
self.df_surfaces.loc[name, 'surface'] = surface
self.emit('surface-rendered', name, surface)
def set_surface_alpha(self, name, alpha):
if 'alpha' not in self.df_surfaces:
self.df_surfaces['alpha'] = 1.
if name in self.df_surfaces.index:
self.df_surfaces.loc[name, 'alpha'] = alpha
def reorder_surfaces(self, surface_names):
assert(len(surface_names) == self.df_surfaces.shape[0])
self.df_surfaces = self.df_surfaces.ix[surface_names]
self.emit('surfaces-reset', self.df_surfaces)
self.cairo_surface = flatten_surfaces(self.df_surfaces)
def render(self):
'''
.. versionchanged:: 0.12
Add ``dynamic_electrode_state_shapes`` layer to show dynamic
electrode actuations.
'''
# Render each layer and update data frame with new content for each
# surface.
surface_names = ('background', 'shapes', 'connections', 'routes',
'channel_labels', 'static_electrode_state_shapes',
'dynamic_electrode_state_shapes', 'registration')
for k in surface_names:
self.set_surface(k, getattr(self, 'render_' + k)())
self.emit('surfaces-reset', self.df_surfaces)
self.cairo_surface = flatten_surfaces(self.df_surfaces)
###########################################################################
# Drawing helper methods
def draw_route(self, df_route, cr, color=None, line_width=None):
'''
Draw a line between electrodes listed in a route.
Arguments
---------
- `df_route`:
* A `pandas.DataFrame` containing a column named `electrode_i`.
* For each row, `electrode_i` corresponds to the integer index of
the corresponding electrode.
- `cr`: Cairo context.
- `color`: Either a RGB or RGBA tuple, with each color channel in the
range [0, 1]. If `color` is `None`, the electrode color is set to
white.
'''
df_route_centers = (self.canvas.df_shape_centers
.ix[df_route.electrode_i][['x_center',
'y_center']])
df_endpoint_marker = (.6 * self.get_endpoint_marker(df_route_centers)
+ df_route_centers.iloc[-1].values)
# Save cairo context to restore after drawing route.
cr.save()
if color is None:
# Colors from ["Show me the numbers"][1].
#
# [1]: http://blog.axc.net/its-the-colors-you-have/
# LiteOrange = rgb(251,178,88);
# MedOrange = rgb(250,164,58);
# LiteGreen = rgb(144,205,151);
# MedGreen = rgb(96,189,104);
color_rgb_255 = np.array([96,189,104, .8 * 255])
color = (color_rgb_255 / 255.).tolist()
if len(color) < 4:
color += [1.] * (4 - len(color))
cr.set_source_rgba(*color)
cr.move_to(*df_route_centers.iloc[0])
for electrode_i, center_i in df_route_centers.iloc[1:].iterrows():
cr.line_to(*center_i)
if line_width is None:
line_width = np.sqrt((df_endpoint_marker.max().values -
df_endpoint_marker.min().values).prod()) * .1
cr.set_line_width(4)
cr.stroke()
cr.move_to(*df_endpoint_marker.iloc[0])
for electrode_i, center_i in df_endpoint_marker.iloc[1:].iterrows():
cr.line_to(*center_i)
cr.close_path()
cr.set_source_rgba(*color)
cr.fill()
# Restore cairo context after drawing route.
cr.restore()
def get_endpoint_marker(self, df_route_centers):
df_shapes = self.canvas.df_canvas_shapes
df_endpoint_electrode = df_shapes.loc[df_shapes.id ==
df_route_centers.index[-1]]
df_endpoint_bbox = (df_endpoint_electrode[['x_center_offset',
'y_center_offset']]
.describe().loc[['min', 'max']])
return pd.DataFrame([[df_endpoint_bbox.x_center_offset['min'],
df_endpoint_bbox.y_center_offset['min']],
[df_endpoint_bbox.x_center_offset['min'],
df_endpoint_bbox.y_center_offset['max']],
[df_endpoint_bbox.x_center_offset['max'],
df_endpoint_bbox.y_center_offset['max']],
[df_endpoint_bbox.x_center_offset['max'],
df_endpoint_bbox.y_center_offset['min']]],
columns=['x_center_offset', 'y_center_offset'])
###########################################################################
# ## Mouse event handling ##
def on_widget__button_press_event(self, widget, event):
'''
Called when any mouse button is pressed.
.. versionchanged:: 0.11
Do not trigger `route-electrode-added` event if `ALT` key is
pressed.
'''
if self.mode == 'register_video' and event.button == 1:
self.start_event = event.copy()
return
elif self.mode == 'control':
shape = self.canvas.find_shape(event.x, event.y)
if shape is None: return
state = event.get_state()
if event.button == 1:
# Start a new route.
self._route = Route(self.device)
self._route.append(shape)
self.last_pressed = shape
if not (state & gtk.gdk.MOD1_MASK):
# `<Alt>` key is not held down.
self.emit('route-electrode-added', shape)
def on_widget__button_release_event(self, widget, event):
'''
Called when any mouse button is released.
.. versionchanged:: 0.11.3
Always reset pending route, regardless of whether a route was
completed. This includes a) removing temporary routes from routes
table, and b) resetting the state of the current route electrode
queue. This fixes
https://github.com/sci-bots/microdrop/issues/256.
'''
event = event.copy()
if self.mode == 'register_video' and (event.button == 1 and
self.start_event is not None):
self.emit('point-pair-selected', {'start_event': self.start_event,
'end_event': event.copy()})
self.start_event = None
return
elif self.mode == 'control':
# XXX Negative `route_i` corresponds to temporary route being
# drawn. Since release of mouse button terminates route drawing,
# clear any rows corresponding to negative `route_i` values from
# the routes table.
self.df_routes = self.df_routes.loc[self.df_routes.route_i >=
0].copy()
shape = self.canvas.find_shape(event.x, event.y)
if shape is not None:
electrode_data = {'electrode_id': shape, 'event': event.copy()}
if event.button == 1:
if gtk.gdk.BUTTON1_MASK == event.get_state():
if self._route.append(shape):
self.emit('route-electrode-added', shape)
if len(self._route.electrode_ids) == 1:
# Single electrode, so select electrode.
self.emit('electrode-selected', electrode_data)
else:
# Multiple electrodes, so select route.
route = self._route
self.emit('route-selected', route)
elif (event.get_state() == (gtk.gdk.MOD1_MASK |
gtk.gdk.BUTTON1_MASK) and
self.last_pressed != shape):
# `<Alt>` key was held down.
self.emit('electrode-pair-selected',
{'source_id': self.last_pressed,
'target_id': shape, 'event': event.copy()})
self.last_pressed = None
elif event.button == 3:
# Create right-click pop-up menu.
menu = self.create_context_menu(event, shape)
# Display menu popup
menu.popup(None, None, None, event.button, event.time)
# Clear route.
self._route = None
def create_context_menu(self, event, shape):
'''
Parameters
----------
event : gtk.gdk.Event
GTK mouse click event.
shape : str
Electrode shape identifier (e.g., `"electrode028"`).
Returns
-------
gtk.Menu
Context menu.
.. versionchanged:: 0.13
- Deprecate hard-coded commands (e.g., clear electrodes, clear
routes).
- Add anonymous global commands section at head of menu (i.e.,
commands not specific to an electrode or route).
- Add "Electrode" and "Route(s)" sub-menus.
'''
routes = self.df_routes.loc[self.df_routes.electrode_i == shape,
'route_i'].astype(int).unique().tolist()
def _connect_callback(menu_item, command_signal, group, command, data):
callback_called = threading.Event()
def _callback(signal, widget, *args):
if callback_called.is_set():
return
callback_called.set()
_L().debug('`%s`: %s %s %s', signal, group, command, data)
gtk.idle_add(self.emit, command_signal, group, command, data)
menu_item.connect('activate', ft.partial(_callback, 'activate'))
menu_item.connect('button-press-event',
ft.partial(_callback, 'button-press-event'))
if group is not None:
menu_item.set_tooltip_text(group)
menu = gtk.Menu()
# Add menu items/groups for registered global commands.
if self.global_commands:
data = {'event': event.copy()}
command_signal = 'global-command'
for group, commands in self.global_commands.iteritems():
for command, title in commands.iteritems():
menu_item_j = gtk.MenuItem(title)
menu.append(menu_item_j)
_connect_callback(menu_item_j, command_signal, group,
command, data)
# Add menu items/groups for registered electrode commands.
if self.electrode_commands:
separator = gtk.SeparatorMenuItem()
menu.append(separator)
# Add electrode sub-menu.
menu_e = gtk.Menu()
menu_head_e = gtk.MenuItem('_Electrode')
menu_head_e.set_submenu(menu_e)
menu_head_e.set_use_underline(True)
menu.append(menu_head_e)
command_signal = 'electrode-command'
data = {'electrode_id': shape, 'event': event.copy()}
for group, commands in self.electrode_commands.iteritems():
for command, title in commands.iteritems():
menu_item_j = gtk.MenuItem(title)
menu_e.append(menu_item_j)
_connect_callback(menu_item_j, command_signal, group,
command, data)
# Add menu items/groups for registered route commands.
if routes and self.route_commands:
# TODO: Refactor electrode/route command menu code to reduce code
# duplication (i.e., DRY).
separator = gtk.SeparatorMenuItem()
menu.append(separator)
# Add route sub-menu.
menu_r = gtk.Menu()
menu_head_r = gtk.MenuItem('_Route(s)')
menu_head_r.set_submenu(menu_r)
menu_head_r.set_use_underline(True)
menu.append(menu_head_r)
command_signal = 'route-command'
data = {'route_ids': routes, 'event': event.copy()}
for group, commands in self.route_commands.iteritems():
for command, title in commands.iteritems():
menu_item_j = gtk.MenuItem(title)
menu_r.append(menu_item_j)
_connect_callback(menu_item_j, command_signal, group,
command, data)
menu.show_all()
return menu
def on_widget__motion_notify_event(self, widget, event):
'''
Called when mouse pointer is moved within drawing area.
.. versionchanged:: 0.11
Do not trigger `route-electrode-added` event if `ALT` key is
pressed.
'''
if self.canvas is None:
# Canvas has not been initialized. Nothing to do.
return
elif event.is_hint:
pointer = event.window.get_pointer()
x, y, mod_type = pointer
else:
x = event.x
y = event.y
shape = self.canvas.find_shape(x, y)
# Grab focus to [enable notification on key press/release events][1].
#
# [1]: http://mailman.daa.com.au/cgi-bin/pipermail/pygtk/2003-August/005770.html
self.widget.grab_focus()
if shape != self.last_hovered:
if self.last_hovered is not None:
# Leaving shape
self.emit('electrode-mouseout', {'electrode_id':
self.last_hovered,
'event': event.copy()})
self.last_hovered = None
elif shape is not None:
# Entering shape
self.last_hovered = shape
if self._route is not None:
if self._route.append(shape) and not (event.get_state() &
gtk.gdk.MOD1_MASK):
# `<Alt>` key was not held down.
self.emit('route-electrode-added', shape)
self.emit('electrode-mouseover', {'electrode_id':
self.last_hovered,
'event': event.copy()})
def on_widget__key_press_event(self, widget, event):
'''
Called when key is pressed when widget has focus.
'''
self.emit('key-press', {'event': event.copy()})
def on_widget__key_release_event(self, widget, event):
'''
Called when key is released when widget has focus.
'''
self.emit('key-release', {'event': event.copy()})
###########################################################################
# ## Slave signal handling ##
def on_video_sink__frame_shape_changed(self, slave, old_shape, new_shape):
# Video frame is a new shape.
if old_shape is not None:
# Switched video resolution, so scale existing corners to maintain
# video registration.
old_shape = pd.Series(old_shape, dtype=float, index=['width',
'height'])
new_shape = pd.Series(new_shape, dtype=float, index=['width',
'height'])
old_aspect_ratio = old_shape.width / old_shape.height
new_aspect_ratio = new_shape.width / new_shape.height
if old_aspect_ratio != new_aspect_ratio:
# The aspect ratio has changed. The registration will have the
# proper rotational orientation, but the scale will be off and
# will require manual adjustment.
logger.warning('Aspect ratio does not match previous frame. '
'Manual adjustment of registration is required.')
corners_scale = new_shape / old_shape
df_frame_corners = self.df_frame_corners.copy()
df_frame_corners.y = old_shape.height - df_frame_corners.y
df_frame_corners *= corners_scale.values
df_frame_corners.y = new_shape.height - df_frame_corners.y
self.df_frame_corners = df_frame_corners
else:
# No existing frame shape, so nothing to scale from.
self.reset_frame_corners()
self.update_transforms()
def on_frame_update(self, slave, np_frame):
if self.widget.window is None:
return
if np_frame is None or not self._enabled:
if 'video' in self.df_surfaces.index:
self.df_surfaces.drop('video', axis=0, inplace=True)
self.reorder_surfaces(self.df_surfaces.index)
else:
cr_warped, np_warped_view = np_to_cairo(np_frame)
self.set_surface('video', cr_warped)
self.cairo_surface = flatten_surfaces(self.df_surfaces)
# Execute a few gtk main loop iterations to improve responsiveness when
# using high video frame rates.
#
# N.B., Without doing this, for example, some mouse over events may be
# missed, leading to problems drawing routes, etc.
for i in xrange(5):
if not gtk.events_pending():
break
gtk.main_iteration_do()
self.draw()
###########################################################################
# ## Electrode operation registration ##
def register_global_command(self, command, title=None, group=None):
'''
.. versionadded:: 0.13
Register global command (i.e., not specific to electrode or route).
Add global command to context menu.
'''
commands = self.global_commands.setdefault(group, OrderedDict())
if title is None:
title = (command[:1].upper() + command[1:]).replace('_', ' ')
commands[command] = title
def register_electrode_command(self, command, title=None, group=None):
'''
Register electrode command.
Add electrode plugin command to context menu.
'''
commands = self.electrode_commands.setdefault(group, OrderedDict())
if title is None:
title = (command[:1].upper() + command[1:]).replace('_', ' ')
commands[command] = title
###########################################################################
# ## Route operation registration ##
def register_route_command(self, command, title=None, group=None):
'''
Register route command.
Add route plugin command to context menu.
'''
commands = self.route_commands.setdefault(group, OrderedDict())
if title is None:
title = (command[:1].upper() + command[1:]).replace('_', ' ')
commands[command] = title
|
lgpl-2.1
| 1,564,422,671,550,628,900
| 40.079491
| 88
| 0.531251
| false
| 4.264917
| false
| false
| false
|
hobson/pug-dj
|
pug/dj/miner/models.py
|
1
|
13381
|
import datetime
from django.db import models
#from django_hstore import hstore
from jsonfield import JSONField
from pug.nlp.db import representation
# FIXME: simplify circular import/dependencies with miner app
#from pug.dj.miner import explore
from model_mixin import DateMixin
class Connection(models.Model):
"The username, password, IP Address or URL required to access a database"
_IMPORTANT_FIELDS = ('pk', 'uri', 'user')
ip = models.CharField(max_length=15, null=True)
uri = models.CharField(max_length=256, null=True)
fqdn = models.CharField(max_length=128, null=True)
user = models.CharField(max_length=128, null=True)
password = models.CharField(max_length=128, null=True)
port = models.IntegerField(null=False)
def __unicode__(self):
return representation(self)
class AggregatedResults(DateMixin):
"""Storage a results json string that was returned by any restful web-based service
DateMixin adds the fields 'updated' and 'created'.
"""
name = models.CharField(max_length=2000, default='', blank=False)
slug = models.CharField(max_length=2000, default='', blank=False)
uri = models.URLField(max_length=2000, help_text='Base service URI without the GET API query')
get_dict = JSONField(
help_text='Complete GET Request URI')
filter_dict = JSONField(
help_text='The query `filter()` portion of the GET Request URI formatted in a form acceptable as a `queryset.filter(**filter_dict)`')
exclude_dict = JSONField(
help_text='The query `exclude()` portion of the GET Request URI formatted in a form evaluated as a `for k, v in exclude_dict.items(): queryset = queryset.exclude({k,v});`')
results = JSONField(
help_text="The dictionary of data used to display the Queries summary table at the top of the Quick Table with aggregate statistics 'mean' (lag), 'num_matches', 'num_returns', 'num_sales', 'effective_return_rate', 'word_match_rate', 'mura_match_rate', 'nprb_match_rate', 'last_update', 'num_mura_match', 'num_word_match', 'num_nprb_match'")
class Database(models.Model):
"""Metadata about a Database (postgres or Microsoft SQL "USE" argument)"""
_IMPORTANT_FIELDS = ('pk', 'name', 'date')
name = models.CharField(max_length=128, null=False, default='')
date = models.DateTimeField(help_text='Timestamp when the metadata was calculated', auto_now_add=True, null=False) # default=datetime.datetime.now, <-- mutually exclusive withe auto_now_add
connection = models.ForeignKey(Connection, null=True, default=None)
__unicode__ = representation
class Table(models.Model):
"""Metadata about a Database table and its Django model"""
_IMPORTANT_FIELDS = ('pk', 'django_model', 'db_table', 'count')
app = models.CharField(max_length=256, default='', null=False, blank=True)
database = models.ForeignKey(Database, default=None)
db_table = models.CharField(max_length=256, null=True)
django_model = models.CharField(max_length=256, null=True, default=None)
primary_key = models.OneToOneField('Field', null=True, default=None)
count = models.IntegerField(null=True, default=None)
__unicode__ = representation
class ChangeLog(models.Model):
'''Log of hash of `.values()` of records in any database.table (app.model)
Used to track changes to tables across databases.
Facilitates mirroring across databases.
'''
model = models.CharField(max_length=255, default='', null=False, blank=True)
app = models.CharField(max_length=255, default='', null=False, blank=True)
primary_key = models.IntegerField(default=None, null=True)
values_hash = models.IntegerField(db_index=True, help_text='Integer hash of a tuple of all of the fields, hash(tuple(record.values_list())), for the source data record.', default=None, null=True, blank=True)
class Type(models.Model):
FIND_DJANGO_TYPE = {
'Integer': 'IntegerField',
'long': 'IntegerField',
'LONG': 'IntegerField',
'int': 'IntegerField',
'INT': 'IntegerField',
'float': 'FloatField',
'Float': 'FloatField',
'double': 'FloatField',
'Double': 'FloatField',
'char': 'CharField',
'str': 'CharField',
'CHAR': 'CharField',
'STR': 'CharField',
'string': 'CharField',
'STRING': 'CharField',
'text': 'TextField',
'TEXT': 'TextField',
'23': '',
'1043': '',
'21': '',
'23': '',
'25': '',
'701': '',
'1043': '',
'1184': '',
'1700': '',
'boolean': 'NullBooleanField',
'decimal': 'DecimalField',
'Decimal': 'DecimalField',
'DECIMAL': 'DecimalField',
'VARCHAR': 'CharField',
'NCHAR': 'CharField',
'NVARCHAR': 'CharField',
'SMALLINT': 'IntegerField',
'REAL': 'FloatField',
'DOUBLE PRECISION': 'FloatField',
'NUMERIC': 'FloatField',
'numeric': 'FloatField',
'NUMBER': 'FloatField',
'number': 'FloatField',
'DATE': 'DateField',
'TIME': 'TimeField',
'datetime': 'DateTimeField',
'Datetime': 'DateTimeField',
'TIMESTAMP': 'DateTimeField',
'TIMESTAMPTZ': 'DateTimeField',
}
CHOICES_NATIVE_TYPE = (
('image', 'A Microsoft binary image'),
)
CHOICES_ANSI_TYPE = (
('CHAR', 'Fixed=width *n*-character string, padded with spaces as needed'),
('VARCHAR', 'Variable-width string with a maximum size of *n* characters'),
('NCHAR', 'Fixed width string supporting an international character set'),
('NVARCHAR', 'Variable-width string supporting an international character set'),
('BIT', 'A fixed-length array of *n* bits'),
('BIT VARYING', 'An array of up to *n* bits'),
('INTEGER', 'An integer'),
('SMALLINT', 'A reduced-precision integer'),
('FLOAT', 'A floating-point number'),
('REAL', 'A floating-point number'),
('DOUBLE PRECISION', 'A floating-point number with greater precision'),
('NUMERIC', 'A number with arbitratry *precision* and *scale*, e.g. 123.45 has a *precision* of 5 and a *scale* of 2'),
('DECIMAL', 'A number with arbitratry *precision* and *scale*, e.g. 123.45 has a *precision* of 5 and a *scale* of 2'),
('DATE', 'A date value, e.g. 1970-12-25'),
('TIME', 'A time value, typically with precision of 1 "tick" or 100 nanoseconds, e.g. 06:01:02'),
('TIMESTAMP', 'A naive date and time value (without timezone information), typically with precision of 1 "tick" or 100 nanoseconds, e.g. 1970-12-25 06:01:02'),
('TIMESTAMPTZ', 'A date and time value with timezone, typically with precision of 1 "tick" or 100 nanoseconds, e.g. 1970-12-25 06:01:02'),
)
CHOICES_DJANGO_TYPE = (
(None, 'Null'),
('FloatField', 'FloatField'),
('ForeignKey', 'ForeignKey'), # inspectdb produces this
('CharField', 'CharField'), # inspectdb produces this
('TextField', 'TextField'), # inspectdb produces this
('IntegerField', 'IntegerField'),
('NullBooleanField', 'NullBooleanField'), # inspectdb produces this
('BooleanField', 'BooleanField'),
('DecimalField', 'DecimalField'),
('DateTimeField', 'DateTimeField'), # inspectdb produces this
('DateField', 'DateField'),
('TextField', 'TextField'), # inspectdb produces this
('DecimalField', 'DecimalField'), # inspectdb produces this
)
django_type = models.CharField(choices=CHOICES_DJANGO_TYPE, default=None, max_length=20, null=True)
ansi_type = models.CharField(choices=CHOICES_ANSI_TYPE, max_length=20, null=True)
__unicode__ = representation
class Field(models.Model):
"""Metadata about a Database field and its Django Field"""
_IMPORTANT_FIELDS = ('pk', 'db_column', 'db_table', 'type', 'fraction_distinct')
# objects = hstore.HStoreManager()
table_stats = models.ForeignKey(Table)
django_field = models.CharField(max_length=255, null=False, default='', blank=True)
max_length = models.IntegerField(null=True)
blank = models.BooleanField(default=False)
choices = models.TextField(null=True)
django_type = models.ForeignKey(Type, null=True, default=None)
type = models.CharField(max_length=32, null=False, blank=True, default='')
scale = models.IntegerField(null=True)
db_column = models.CharField(max_length=255, null=False, default='', blank=True)
display_size = models.IntegerField(null=True)
min = models.TextField(help_text='Python string representation (repr) of the minimum value', null=True) # repr() of minimum value
max = models.TextField(help_text='Python string representation (repr) of the maximum value', null=True) # repr() of minimum value
shortest = models.TextField(help_text='Shortest string among the field values', null=True)
longest = models.TextField(help_text='Longest string among the field values', null=True)
num_distinct = models.IntegerField(help_text="count of distinct (different) discrete values within the column",
null=True, default=None)
num_null = models.IntegerField(null=True, default=None)
precision = models.IntegerField(null=True, default=None)
fraction_distinct = models.FloatField(help_text="num_distinct / float((count - num_null) or 1)",
null=True, default=None)
internal_size = models.IntegerField(null=True, default=None)
null_ok = models.NullBooleanField(default=None)
primary_key = models.NullBooleanField(default=None)
relative = models.ForeignKey('Field', help_text='A modeled foreign key or one-to-one relationship within the django model.', null=True, related_name='relative_source')
relative_type = models.CharField(choices=(('ForeignKey', 'ForeignKey'), ('OneToOneField', 'OneToOneField'), ('ManyToManyField', 'ManyToManyField')), max_length=20)
peer = models.ManyToManyField('Field', through='Correlation', help_text='A field statistically related to this one in some way other than as a foreign key')
# most_frequent = hstore.DictionaryField(db_index=True, default=None, null=True)
__unicode__ = representation
class Correlation(models.Model):
"Graph edges (connections) between fields. Can be across tables and databases."
source = models.ForeignKey(Field, related_name='source_correlation')
target = models.ForeignKey(Field, related_name='target_correlation')
correlation = models.FloatField(null=True)
mutual_information = models.FloatField(null=True)
shared_distinct_values = models.IntegerField(help_text='For nonreal, discrete-valued fields (strings, dates), the number of unique values that are shared between the two fields')
shared_values = models.IntegerField(help_text='For nonreal, discrete-valued fields (strings, dates), the number of values that are shared between the two fields, including duplicate occurrences of the same value')
shared_distinct_words = models.IntegerField(help_text='For strings, the number of unique words that are shared between all the strings in each field=')
shared_tokens = models.IntegerField(help_text='For strings, the number of unique tokens (words) that are shared between the two fields, including duplicate occurrences of the same value')
__unicode__ = representation
def import_meta(db_meta, db_name, db_date=None, verbosity=1):
db_obj, db_created = Database.objects.get_or_create(name=db_name, date=datetime.datetime.now())
for django_model, table_meta in db_meta.iteritems():
pk = table_meta['Meta'].get('primary_key', None)
if pk:
del(table_meta['Meta']['primary_key'])
table_obj, table_created = Table.objects.get_or_create(database=db_obj, django_model=django_model, **table_meta['Meta'])
for django_field, field_meta in table_meta.iteritems():
if django_field == "Meta":
# The table "Meta" has already been imported when Table object was created
continue
if verbosity > 1:
print django_field
if 'name' in field_meta and field_meta['name'] == django_field:
del(field_meta['name'])
if 'most_frequent' in field_meta:
field_meta['most_frequent'] = dict((str(k), '%016d' % v) for (k, v) in field_meta['most_frequent'])
#print field_meta['most_frequent']
del(field_meta['most_frequent']) # DatabaseError: can't adapt type 'HStoreDict'
field_obj, field_created = Field.objects.get_or_create(table_stats=table_obj, django_field=django_field, **field_meta)
if pk and pk in table_meta:
field_obj = Field.objects.get(table_stats=table_obj, django_field=pk, **table_meta[pk])
table_obj.django_field = field_obj
table_obj.save()
# def explore_app(app_name='call_center', verbosity=1):
# db_meta = explore.get_db_meta(app_name, verbosity=verbosity)
# try:
# print '&'*100
# print db_meta
# print '&'*100
# return import_meta(db_meta, db_name=app_name)
# except:
# return db_meta
|
mit
| 215,834,773,485,895,000
| 49.116105
| 348
| 0.653613
| false
| 3.896622
| false
| false
| false
|
devilry/devilry-django
|
devilry/devilry_admin/views/dashboard/overview.py
|
1
|
6086
|
# -*- coding: utf-8 -*-
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_cradmin.devilry_listbuilder.period import AdminItemValue
from django.db import models
from itertools import groupby
from django.utils.translation import gettext, gettext_lazy
from django.views.generic import TemplateView
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from cradmin_legacy.viewhelpers import listbuilderview
from cradmin_legacy.viewhelpers import listfilter
from cradmin_legacy.viewhelpers import listbuilder
from devilry.devilry_admin.listbuilder import admindashboard_subject_listbuilder
from devilry.apps.core import models as coremodels
from devilry.apps.core.models import Period, Subject
from devilry.devilry_account.models import SubjectPermissionGroup, PeriodPermissionGroup
from devilry.devilry_cradmin.devilry_listfilter.utils import WithResultValueRenderable, RowListWithMatchResults
class SubjectItemFrame(devilry_listbuilder.common.GoForwardLinkItemFrame):
"""
An item frame for the list of subjects in the Administrator Dashboard Overview
"""
valuealias = 'subject'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin_subject_for_periodadmin',
appname='subject_redirect',
roleid=self.subject.id,
viewname=crapp.INDEXVIEW_NAME
)
def get_extra_css_classes_list(self):
return ['devilry-admin-dashboard-overview-subjectitemframe']
class OrderSubjectFilter(listfilter.django.single.select.AbstractOrderBy):
def get_ordering_options(self):
return [
('', { # This will be the default sort order
'label': gettext_lazy('Short Name'),
'order_by': ['short_name'],
}),
('short_name_descending', {
'label': gettext_lazy('Short Name (descending)'),
'order_by': ['-short_name'],
}),
]
class SubjectListMatchResultRenderable(WithResultValueRenderable):
def get_object_name_singular(self, num_matches):
return gettext_lazy('course')
def get_object_name_plural(self, num_matches):
return gettext_lazy('courses')
class RowListBuilder(RowListWithMatchResults):
match_result_value_renderable = SubjectListMatchResultRenderable
class OverviewSubjectListView(listbuilderview.FilterListMixin, listbuilderview.View):
model = coremodels.Subject
template_name = 'devilry_admin/dashboard/overview.django.html'
listbuilder_class = RowListBuilder
frame_renderer_class = SubjectItemFrame
value_renderer_class = devilry_listbuilder.subject.AdminItemValue
paginate_by = 50
def get_pageheading(self):
return gettext("Administrator dashboard")
def get_pagetitle(self):
return self.get_pageheading()
def __get_all_subjects_where_user_is_subjectadmin(self):
return Subject.objects.filter_user_is_admin(user=self.request.user) \
.order_by('long_name') \
.distinct()
def __get_all_periods_where_user_is_subjectadmin_or_periodadmin(self):
groups = []
periods = Period.objects.filter_user_is_admin(user=self.request.user) \
.select_related('parentnode') \
.order_by('short_name', 'parentnode__long_name') \
.distinct()
for key, items in groupby(periods, lambda period: period.short_name):
groups.append(list(items))
return groups
def add_filterlist_items(self, filterlist):
"""
Add the filters to the filterlist.
"""
filterlist.append(listfilter.django.single.textinput.Search(
slug='search',
label='Search',
label_is_screenreader_only=True,
modelfields=['long_name']))
filterlist.append(OrderSubjectFilter(
slug='short_name', label=gettext_lazy('Short name')))
def get_filterlist_url(self, filters_string):
"""
This is used by the filterlist to create URLs.
"""
return self.request.cradmin_app.reverse_appurl(
'filter', kwargs={'filters_string': filters_string})
def get_unfiltered_queryset_for_role(self, site):
"""
Create the queryset, and apply the filters from the filterlist.
"""
# Return Subjects where the user can be admin on Subject and or admin on a Period within a Subject
queryset = coremodels.Subject.objects\
.filter_user_is_admin_for_any_periods_within_subject(self.request.user)\
.prefetch_active_period_objects()
# Set unfiltered count on self.
self.num_total = queryset.count()
return queryset
def get_context_data(self, **kwargs):
context = super(OverviewSubjectListView, self).get_context_data(**kwargs)
context['subjects_where_user_is_subjectadmin'] = \
self.__get_all_subjects_where_user_is_subjectadmin()
context['periods_where_user_is_subjectadmin_or_periodadmin'] = \
self.__get_all_periods_where_user_is_subjectadmin_or_periodadmin()
return context
#
# Add support for showing results on the top of the list.
#
def get_listbuilder_list_kwargs(self):
kwargs = super(OverviewSubjectListView, self).get_listbuilder_list_kwargs()
kwargs['num_matches'] = self.num_matches or 0
kwargs['num_total'] = self.num_total or 0
kwargs['page'] = self.request.GET.get('page', 1)
return kwargs
def get_queryset_for_role(self, role):
queryset = super(OverviewSubjectListView, self).get_queryset_for_role(role=role)
# Set filtered count on self.
self.num_matches = queryset.count()
return queryset
class App(crapp.App):
appurls = [
crapp.Url(r'^$', OverviewSubjectListView.as_view(), name=crapp.INDEXVIEW_NAME),
crapp.Url(
r'^filter/(?P<filters_string>.+)?$',
OverviewSubjectListView.as_view(),
name='filter'),
]
|
bsd-3-clause
| -6,426,557,289,801,527,000
| 36.801242
| 111
| 0.670884
| false
| 3.861675
| false
| false
| false
|
jeetsukumaran/Ginkgo
|
ginkgopy/ginkgo/ginkgogrid.py
|
1
|
5281
|
#! /usr/bin/env python
import random
import sys
import os
from ginkgo import argparse
##############################################################################\\
# Grid
class Grid(object):
def __init__(self, **kwargs):
self.ncols = kwargs.get("ncols", None)
self.nrows = kwargs.get("nrows", None)
self.value_type = kwargs.get("value_type", int)
self.values = None
self.matrix = None
if 'values' in kwargs:
self.values = kwargs['values']
elif 'pop_func' in kwargs:
self.populate(kwargs['pop_func'])
elif 'filepath' in kwargs:
self.read(open(kwargs['filepath'], "rU"))
elif 'stream' in kwargs:
self.read(kwargs['stream'])
else:
self.values = {}
self._max_formatted_value_len = None
def __str__(self):
return self.as_string(include_header=True)
def populate(self, func):
self.values = {}
for x in range(self.ncols):
self.values[x] = {}
for y in range(self.nrows):
self.values[x][y] = func(x, y)
def read(self, src):
self.values = []
for line in src:
line = line.replace('\n', '').strip()
parts = line.split(' ',1)
kw = parts[0].lower()
if kw == 'ncols':
assert len(parts) == 2
self.ncols = int(parts[1])
continue
elif kw == 'nrows':
assert len(parts) == 2
self.nrows = int(parts[1])
continue
elif kw in ['xllcorner', 'yllcorner', 'cellsize', 'nodata_value']:
continue
else:
parts = line.split(' ')
self.values.extend([self.value_type(i) for i in parts])
break
assert self.ncols > 0
assert self.nrows > 0
for line in src:
line = line.replace('\n', '').strip()
parts = line.split(' ')
self.values.extend([self.value_type(i) for i in parts])
return self.matrix_from_values()
def matrix_from_values(self):
assert len(self.values) == self.ncols * self.nrows
self.matrix = []
for r in range(self.nrows):
self.matrix.append([])
for c in range(self.ncols):
self.matrix[r].append(self.values[(r * self.ncols) + c])
assert len(self.matrix[r]) == self.ncols
assert len(self.matrix) == self.nrows
return self.matrix
def formatted_value_matrix(self, cell_width=None):
fv = {}
fv_lens = []
for x in range(self.ncols):
fv[x] = {}
for y in range(self.nrows):
v = self.values[x][y]
if isinstance(v, float):
fv[x][y] = "{0:>.4}".format(v)
else:
fv[x][y] = "{0:>}".format(v)
fv_lens.append(len(fv[x][y]))
if cell_width is None:
self._max_formatted_value_len = max(fv_lens)
else:
self._max_formatted_value_len = cell_width
return fv
def ascii_grid_header(self):
return ("""ncols {0}
nrows {1}
xllcorner 0.0
yllcorner 0.0
cellsize 50.0
NODATA_value -9999""").format(self.ncols, self.nrows)
def as_string(self, include_header=True, cell_width=None):
rows = []
if include_header:
rows.append(self.grid_header())
fv = self.formatted_value_matrix(cell_width=cell_width)
for y in range(self.nrows):
if y % 5 == 0:
rows.append("")
row = []
for x in range(self.ncols):
# leader = ("{0:{1}}".format(" ", self._max_formatted_value_len)) if (x and (x % 5 == 0)) else ""
leader = " " if (x and (x % 5 == 0)) else ""
v = fv[x][y]
row.append("{2}{0:>{1}}".format(v, self._max_formatted_value_len, leader))
rows.append(" ".join(row))
#rows.append("")
return "\n".join(rows)
###############################################################################\\
# Occurrences
class Occurrences(Grid):
def __init__(self, filepath=None):
Grid.__init__(self)
self.filepath = None
if filepath is not None:
self.read(open(filepath, "rU"))
def __str__(self):
s = []
for r in range(self.nrows):
s.append(" ".join(["{0:>3}".format(self.matrix[r][c]) for c in range(self.ncols)]))
return "\n".join(s)
###############################################################################\\
# Input Grid Generation
def random_gaussian_grid(ncols, nrows, mean=0, sd=1):
return Grid(ncols=ncols, nrows=nrows, pop_func=lambda x, y: random.gauss(mean, sd))
def random_uniform_real_grid(ncols, nrows, a, b):
return Grid(ncols=ncols, nrows=nrows, pop_func=lambda x, y: random.uniform(a, b))
def random_uniform_int_grid(ncols, nrows, a, b):
return Grid(ncols=ncols, nrows=nrows, pop_func=lambda x, y: random.randint(a, b))
def fixed_value_grid(ncols, nrows, val):
return Grid(ncols=ncols, nrows=nrows, pop_func=lambda x, y: val)
|
gpl-3.0
| 8,019,885,483,017,476,000
| 32.636943
| 112
| 0.492142
| false
| 3.659737
| false
| false
| false
|
mattvonrocketstein/smash
|
smashlib/ipy3x/nbconvert/preprocessors/execute.py
|
1
|
3693
|
"""Module containing a preprocessor that removes the outputs from code cells"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
try:
from queue import Empty # Py 3
except ImportError:
from Queue import Empty # Py 2
from IPython.utils.traitlets import List, Unicode
from IPython.nbformat.v4 import output_from_msg
from .base import Preprocessor
from IPython.utils.traitlets import Integer
class ExecutePreprocessor(Preprocessor):
"""
Executes all the cells in a notebook
"""
timeout = Integer(30, config=True,
help="The time to wait (in seconds) for output from executions."
)
extra_arguments = List(Unicode)
def preprocess(self, nb, resources):
from IPython.kernel import run_kernel
kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
self.log.info("Executing notebook with kernel: %s" % kernel_name)
with run_kernel(kernel_name=kernel_name,
extra_arguments=self.extra_arguments,
stderr=open(os.devnull, 'w')) as kc:
self.kc = kc
nb, resources = super(
ExecutePreprocessor, self).preprocess(nb, resources)
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each code cell. See base.py for details.
"""
if cell.cell_type != 'code':
return cell, resources
try:
outputs = self.run_cell(
self.kc.shell_channel, self.kc.iopub_channel, cell)
except Exception as e:
self.log.error("failed to run cell: " + repr(e))
self.log.error(str(cell.source))
raise
cell.outputs = outputs
return cell, resources
def run_cell(self, shell, iopub, cell):
msg_id = shell.execute(cell.source)
self.log.debug("Executing cell:\n%s", cell.source)
# wait for finish, with timeout
while True:
try:
msg = shell.get_msg(timeout=self.timeout)
except Empty:
self.log.error("Timeout waiting for execute reply")
raise
if msg['parent_header'].get('msg_id') == msg_id:
break
else:
# not our reply
continue
outs = []
while True:
try:
msg = iopub.get_msg(timeout=self.timeout)
except Empty:
self.log.warn("Timeout waiting for IOPub output")
break
if msg['parent_header'].get('msg_id') != msg_id:
# not an output from our execution
continue
msg_type = msg['msg_type']
self.log.debug("output: %s", msg_type)
content = msg['content']
# set the prompt number for the input and the output
if 'execution_count' in content:
cell['execution_count'] = content['execution_count']
if msg_type == 'status':
if content['execution_state'] == 'idle':
break
else:
continue
elif msg_type == 'execute_input':
continue
elif msg_type == 'clear_output':
outs = []
continue
try:
out = output_from_msg(msg)
except ValueError:
self.log.error("unhandled iopub msg: " + msg_type)
else:
outs.append(out)
return outs
|
mit
| -6,775,581,182,042,542,000
| 31.394737
| 86
| 0.541836
| false
| 4.444043
| false
| false
| false
|
pettazz/pygooglevoice
|
examples/parse_sms.py
|
1
|
1581
|
#
# SMS test via Google Voice
#
# John Nagle
# nagle@animats.com
#
from googlevoice import Voice
import BeautifulSoup
def extractsms(htmlsms):
"""
extractsms -- extract SMS messages from BeautifulSoup
tree of Google Voice SMS HTML.
Output is a list of dictionaries, one per message.
"""
msgitems = [] # accum message items here
# Extract all conversations by searching for a DIV with an ID at top level.
tree = BeautifulSoup.BeautifulSoup(htmlsms) # parse HTML into tree
conversations = tree.findAll("div", attrs={"id": True}, recursive=False)
for conversation in conversations:
# For each conversation, extract each row, which is one SMS message.
rows = conversation.findAll(attrs={"class": "gc-message-sms-row"})
for row in rows: # for all rows
# For each row, which is one message, extract all the fields.
# tag this message with conversation ID
msgitem = {"id": conversation["id"]}
spans = row.findAll("span", attrs={"class": True}, recursive=False)
for span in spans: # for all spans in row
cl = span["class"].replace('gc-message-sms-', '')
# put text in dict
msgitem[cl] = (" ".join(span.findAll(text=True))).strip()
msgitems.append(msgitem) # add msg dictionary to list
return msgitems
def run():
voice = Voice()
voice.login()
voice.sms()
for msg in extractsms(voice.sms.html):
print(msg)
__name__ == '__main__' and run()
|
bsd-3-clause
| 4,728,614,197,272,323,000
| 32.638298
| 79
| 0.611638
| false
| 3.856098
| false
| false
| false
|
AlphaSmartDog/DeepLearningNotes
|
Note-6 A3CNet/Note 6 simple ACNet/ACNet_adjust.py
|
1
|
3662
|
import random
import tensorflow as tf
from FCNet import FCNet
LOSS_V = 100
ENTROPY_BETA = 0.05
_EPSILON = 1e-6
L2_P = 1e-5
L2_V = 1e-2
actor_learning_rate = 1e-3
critic_learning_rate = 1e-3
class ACNet(object):
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.action_space = list(range(self.action_size))
self.inputs = tf.placeholder(tf.float32, [None, state_size], 'inputs')
self.actions = tf.placeholder(tf.int32, [None], 'aciton') # which action was taken
self.a_t = tf.one_hot(self.actions, self.action_size, name='action_taken')
self.targets = tf.placeholder(tf.float32, [None], 'disounted_reward')
# not immediate but n step discounted
self.R = tf.expand_dims(self.targets, axis=1)
# build network
self.actor = FCNet('actor')
self.critic = FCNet('critic')
# policy and deterministic policy
self.P = tf.nn.softmax(self.actor(self.inputs, self.action_size))
self.DP = tf.reduce_sum(self.P * self.a_t, axis=1, keep_dims=True)
# choose action one step, action probability
self.AP = tf.squeeze(self.P, axis=0)
self.log_DP = tf.log(self.DP + _EPSILON)
# value and advantage
self.V = self.critic(self.inputs, 1) # value predicted
self.A = self.R - self.V
# loss
self.loss_policy = -tf.reduce_sum(self.log_DP * tf.stop_gradient(self.A))
self.loss_value = LOSS_V * tf.nn.l2_loss(self.A)
self.loss_entropy = ENTROPY_BETA * tf.reduce_sum(self.P * tf.log(self.P + _EPSILON))
# optimizer
#self.actor_optimizer = tf.train.AdamOptimizer(
# actor_learning_rate).minimize(self.loss_policy + self.loss_entropy)
#self.critic_optimizer = tf.train.AdamOptimizer(
# critic_learning_rate).minimize(self.loss_value)
self.l2_policy = L2_P * tf.add_n(self.actor.get_regularizers())
self.l2_value = L2_V * tf.add_n(self.critic.get_regularizers())
self.actor_optimizer = tf.train.AdamOptimizer(
actor_learning_rate).minimize(
self.loss_policy + self.l2_policy + self.loss_entropy)
self.critic_optimizer = tf.train.AdamOptimizer(
critic_learning_rate).minimize(
self.loss_value + self.l2_value)
# session
self.sess = tf.Session()
self.init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init_op)
self.default_graph = tf.get_default_graph()
self.default_graph.finalize()
def predict_value(self, state):
return self.sess.run(self.V, {self.inputs: state})
def predict_policy(self, state):
return self.sess.run(self.P, {self.inputs: state})
def predict_action(self, state):
policy = self.sess.run(self.AP, {self.inputs: state})
return random.choices(self.action_space, policy)[0]
def train_actor(self, states, actions, targets):
self.sess.run(self.actor_optimizer,
{self.inputs: states, self.actions: actions, self.targets: targets})
def train_critic(self, states, targets):
self.sess.run(self.critic_optimizer,
{self.inputs: states, self.targets: targets})
def get_loss(self, states, actions, targets):
fetches = [self.loss_policy, self.loss_entropy, self.l2_policy,
self.loss_value, self.l2_value]
feed_dict = {self.inputs: states, self.actions: actions, self.targets: targets}
return self.sess.run(fetches, feed_dict)
|
mit
| -7,544,644,809,307,700,000
| 39.7
| 100
| 0.629164
| false
| 3.29613
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_operations.py
|
1
|
4744
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Lists all of the available Network Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_10_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Network/operations'} # type: ignore
|
mit
| -2,921,255,939,064,702,000
| 42.522936
| 133
| 0.640388
| false
| 4.610301
| false
| false
| false
|
datamicroscopes/lda
|
test/test_reuters.py
|
1
|
4285
|
import os
import numpy as np
from nose.plugins.attrib import attr
from nose.tools import assert_almost_equal, assert_dict_equal
from nose.tools import assert_list_equal
from microscopes.lda import model, runner
from microscopes.lda.definition import model_definition
from microscopes.common.rng import rng
from microscopes.lda import utils
# Based on test_lda_reuters.py in ariddell's lda
# https://github.com/ariddell/lda/blob/57f721b05ffbdec5cb11c2533f72aa1f9e6ed12d/lda/tests/test_lda_reuters.py
class TestLDANewsReuters():
@classmethod
def _load_docs(cls):
test_dir = os.path.dirname(__file__)
reuters_ldac_fn = os.path.join(test_dir, 'data', 'reuters.ldac')
with open(reuters_ldac_fn, 'r') as f:
cls.docs = utils.docs_from_ldac(f)
cls.V = utils.num_terms(cls.docs)
cls.N = len(cls.docs)
@classmethod
def setup_class(cls):
cls._load_docs()
cls.niters = 100 if os.environ.get('TRAVIS') else 2
cls.defn = model_definition(cls.N, cls.V)
cls.seed = 12345
cls.prng = rng(seed=cls.seed)
cls.latent = model.initialize(cls.defn, cls.docs, cls.prng)
cls.r = runner.runner(cls.defn, cls.docs, cls.latent)
cls.original_perplexity = cls.latent.perplexity()
cls.r.run(cls.prng, cls.niters)
cls.doc_topic = cls.latent.topic_distribution_by_document()
def test_lda_news(self):
assert len(self.doc_topic) == len(self.docs)
@attr('slow')
def test_lda_monotone(self):
# run additional iterations, verify improvement in log likelihood
self.r.run(self.prng, self.niters)
assert self.latent.perplexity() < self.original_perplexity
def test_lda_zero_iter(self):
# compare to model with 0 iterations
prng2 = rng(seed=54321)
latent2 = model.initialize(self.defn, self.docs, prng2)
assert latent2 is not None
r2 = runner.runner(self.defn, self.docs, latent2)
assert r2 is not None
doc_topic2 = latent2.topic_distribution_by_document()
assert doc_topic2 is not None
assert latent2.perplexity() > self.latent.perplexity()
@attr('slow')
def test_lda_random_seed(self):
# ensure that randomness is contained in rng
# by running model twice with same seed
niters = 10
# model 1
prng1 = rng(seed=54321)
latent1 = model.initialize(self.defn, self.docs, prng1)
runner1 = runner.runner(self.defn, self.docs, latent1)
runner1.run(prng1, niters)
# model2
prng2 = rng(seed=54321)
latent2 = model.initialize(self.defn, self.docs, prng2)
runner2 = runner.runner(self.defn, self.docs, latent2)
runner2.run(prng2, niters)
assert_list_equal(latent1.topic_distribution_by_document(),
latent2.topic_distribution_by_document())
for d1, d2 in zip(latent1.word_distribution_by_topic(),
latent2.word_distribution_by_topic()):
assert_dict_equal(d1, d2)
def test_lda_attributes(self):
assert np.array(self.doc_topic).shape == (self.N, self.latent.ntopics())
assert len(self.latent.word_distribution_by_topic()) == self.latent.ntopics()
for dist in self.latent.word_distribution_by_topic():
assert len(dist) == self.V
# check distributions sum to one
for dist in self.latent.word_distribution_by_topic():
assert_almost_equal(sum(dist.values()), 1)
for dist in self.latent.topic_distribution_by_document():
assert_almost_equal(sum(dist), 1)
def test_lda_1transform_basic(self):
n_docs = 3
n_topics = self.latent.ntopics()
docs_test = self.docs[0:n_docs]
doc_topic_test = np.array(self.latent.predict(docs_test, self.prng))
assert doc_topic_test.shape == (n_docs, n_topics)
np.testing.assert_almost_equal(doc_topic_test.sum(axis=1), np.ones(n_docs))
# one document
docs_test = self.docs[0]
doc_topic_test = np.array(self.latent.predict(docs_test, self.prng))
doc_topic_test.shape = (1, n_topics)
np.testing.assert_array_almost_equal(doc_topic_test.sum(axis=1), np.ones(1))
|
bsd-3-clause
| -5,277,405,691,397,815,000
| 37.258929
| 109
| 0.641774
| false
| 3.195377
| true
| false
| false
|
n6g7/django_markdown
|
django_markdown/widgets.py
|
1
|
1852
|
""" Widgets for django-markdown. """
import os
from django import forms
from django.contrib.admin.widgets import AdminTextareaWidget
from django.contrib.staticfiles.storage import staticfiles_storage
from django.utils.safestring import mark_safe
from . import settings
from .utils import editor_js_initialization
class MarkdownWidget(forms.Textarea):
""" Widget for a textarea.
Takes two additional optional keyword arguments:
``markdown_set_name``
Name for current set. Default: value of MARKDOWN_SET_NAME setting.
``markdown_skin``
Name for current skin. Default: value of MARKDOWN_EDITOR_SKIN setting.
"""
def __init__(self, attrs=None):
super(MarkdownWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
""" Render widget.
:returns: A rendered HTML
"""
html = super(MarkdownWidget, self).render(name, value, attrs)
attrs = self.build_attrs(attrs)
html += editor_js_initialization("#%s" % attrs['id'])
return mark_safe(html)
class Media:
css = {
'screen': (
staticfiles_storage.url(os.path.join('django_markdown', 'skins', settings.MARKDOWN_EDITOR_SKIN, 'style.css')),
staticfiles_storage.url(os.path.join(settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'style.css'))
)
}
js = (
staticfiles_storage.url(os.path.join('django_markdown', 'jquery.init.js')),
staticfiles_storage.url(os.path.join('django_markdown', 'jquery.markitup.js')),
staticfiles_storage.url(os.path.join(settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'set.js'))
)
class AdminMarkdownWidget(MarkdownWidget, AdminTextareaWidget):
""" Support markdown widget in Django Admin. """
pass
|
lgpl-3.0
| 4,647,823,019,649,413,000
| 30.389831
| 126
| 0.654428
| false
| 3.923729
| false
| false
| false
|
zpiman/MathIA
|
square.py
|
1
|
1854
|
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 8, 0.01)
myb = []
for i in x:
if np.floor(i)%2 == 0:
myb.append(1)
else:
myb.append(0)
y = np.array(myb)
plt.subplot(121)
plt.plot(x,y)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
myb = []
for i in x:
myb.append(0.5)
yl = np.array(myb)
plt.subplot(122)
plt.plot(x,yl)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
plt.show()
plt.subplot(121)
plt.plot(x,y)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
ys = yl + 2/np.pi*np.sin(x*np.pi)
plt.subplot(122)
plt.plot(x,ys)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
plt.show()
plt.subplot(131)
plt.plot(x,y)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
ys = np.zeros_like(ys)
ys += yl
for i in range(1,4,2):
ys += 2/(np.pi*i)*np.sin(x*np.pi*i)
plt.subplot(132)
plt.plot(x,ys)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
for i in range(5,100,2):
ys += 2/(np.pi*i)*np.sin(x*np.pi*i)
plt.subplot(133)
plt.plot(x,ys)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
plt.show()
|
apache-2.0
| 3,362,402,061,348,584,000
| 20.068182
| 62
| 0.59493
| false
| 2.228365
| false
| false
| false
|
muffato/pyEnsemblRest
|
template/genome.py
|
1
|
1247
|
#__GENERATED_OBJECTS__
__feature_types = {
'gene' : GeneFeature,
'transcript' : TranscriptFeature,
'cds': CDSFeature,
'exon' : ExonFeature,
'repeat' : RepeatFeature,
'simple' : SimpleFeature,
'misc' : MiscFeature,
'variation' : VariationFeature,
'somatic_variation' : VariationFeature,
'structural_variation' : StructuralVariationFeature,
'somatic_structural_variation' : StructuralVariationFeature,
'constrained' : ConstrainedElementFeature,
'regulatory' : RegulatoryFeature,
'motif' : MotifFeature,
'chipseq' : ChipSeqFeature,
'translation' : TranslationFeature,
}
def feature_wrapper(d, r):
"""
Wrapper arround the various types of features.
It automatically selects the appropriate type for the fetched features.
"""
t = d.get('object_type')
if t is None:
t = d.get('feature_type')
if t is None:
print("Unable to find the type of", d)
t = Feature
else:
t = t.lower()
if t not in __feature_types:
print("Unrecognized feature type:", t)
t = Feature
else:
t = __feature_types[t]
return t(d,r)
|
apache-2.0
| -6,060,772,300,965,686,000
| 28
| 75
| 0.587811
| false
| 3.778788
| false
| false
| false
|
johnbywater/eventsourcing
|
setup.py
|
1
|
2434
|
from distutils.core import setup
from eventsourcing import __version__
crypto_requires = ["pycryptodome<=3.9.99999"]
postgresql_requires = ["psycopg2<=2.8.99999"]
postgresql_dev_requires = ["psycopg2-binary<=2.8.99999"]
docs_requires = (
postgresql_dev_requires
+ crypto_requires
+ [
"Sphinx==1.8.5",
"python_docs_theme",
"sphinx_rtd_theme==0.4.3",
]
)
dev_requires = docs_requires + [
"python-coveralls",
"coverage",
"black",
"mypy",
"flake8",
"flake8-bugbear",
"isort",
]
long_description = """
A library for event sourcing in Python.
`Package documentation is now available <http://eventsourcing.readthedocs.io/>`_.
`Please raise issues on GitHub <https://github.com/johnbywater/eventsourcing/issues>`_.
"""
packages = [
"eventsourcing",
"eventsourcing.tests",
"eventsourcing.examples",
"eventsourcing.examples.bankaccounts",
"eventsourcing.examples.cargoshipping",
]
setup(
name="eventsourcing",
version=__version__,
description="Event sourcing in Python",
author="John Bywater",
author_email="john.bywater@appropriatesoftware.net",
url="https://github.com/johnbywater/eventsourcing",
license="BSD-3-Clause",
packages=packages,
package_data={"eventsourcing": ["py.typed"]},
install_requires=[],
extras_require={
"postgres": postgresql_requires,
"postgres_dev": postgresql_dev_requires,
"crypto": crypto_requires,
"docs": docs_requires,
"dev": dev_requires,
},
zip_safe=False,
long_description=long_description,
keywords=[
"event sourcing",
"event store",
"domain driven design",
"domain-driven design",
"ddd",
"cqrs",
"cqs",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
bsd-3-clause
| -1,621,732,981,441,119,200
| 26.348315
| 87
| 0.620378
| false
| 3.821036
| false
| false
| false
|
XuesongYang/end2end_dialog
|
PipelineLstmModel.py
|
1
|
9106
|
''' Pipelined bi-directional LSTM model.
This model stacked biLSTM NLU and biLSTM SAP separate models together,
and its weights are initilized by the ones of seprate models. Besides,
for the SAP task, the decision threshold on the output layer is tuned
on dev data.
Author : Xuesong Yang
Email : xyang45@illinois.edu
Created Date: Dec. 31, 2016
'''
import numpy as np
from utils import checkExistence, get_windowedVec, eval_intentPredict, getActPred
from AgentActClassifyingModel import writeUtterActTxt
from DataSetCSVagentActPred import DataSetCSVagentActPred
import os
import argparse
def load_model_NLU(model_weights, test_data):
from SlotTaggingModel_multitask import SlotTaggingModel
params = ['train_data', 'dev_data', 'epoch_nb', 'batch_size', 'embedding_size', 'hidden_size',
'dropout_ratio', 'optimizer', 'patience', 'loss', 'test_tag_only', 'test_intent_only', 'threshold']
argparams = {key: None for key in params}
argparams['weights_fname'] = model_weights
argparams['model_folder'] = os.path.dirname(model_weights).replace('/weights', '', 1)
argparams['test_data'] = test_data
model = SlotTaggingModel(**argparams)
model.load_model()
return model
#def load_model_Policy(model_weights, test_data, threshold):
def load_model_Policy(model_weights):
from AgentActClassifyingModel import AgentActClassifying
params = ['train_data', 'dev_data', 'test_data', 'epoch_nb', 'batch_size', 'hidden_size',
'dropout_ratio', 'optimizer', 'patience', 'loss', 'threshold']
argparams = {key: None for key in params}
argparams['weights_fname'] = model_weights
argparams['model_folder'] = os.path.dirname(model_weights).replace('/weights', '', 1)
argparams['threshold'] = 1.0
# argparams['test_data'] = test_data
model = AgentActClassifying(**argparams)
model.load_model()
return model
def readTagPredTxt(tag_pred_txt, userTag2id, sample_nb, userTag_vocab_size):
checkExistence(tag_pred_txt)
indicator = np.zeros((sample_nb, userTag_vocab_size))
with open(tag_pred_txt, 'rb') as f:
for idx, line in enumerate(f):
for tag in line.strip().split():
tag = 'tag-{}'.format(tag)
if tag in userTag2id:
pos = userTag2id[tag] - 1
else:
pos = 0
indicator[idx, pos] = 1.
return indicator
def readIntentPredTxt(intent_pred_txt, userIntent2id, sample_nb, userIntent_vocab_size):
checkExistence(intent_pred_txt)
indicator = np.zeros((sample_nb, userIntent_vocab_size))
with open(intent_pred_txt, 'rb') as f:
for idx, line in enumerate(f):
for intent in line.strip().split(';'):
if intent == 'null':
continue
intent = 'intent-{}'.format(intent)
if intent in userIntent2id:
pos = userIntent2id[intent] - 1
else:
pos = 0
indicator[idx, pos] = 1.
return indicator
def pipelinePrediction(test_data, tag_model_weights, intent_model_weights, act_model_weights, result_folder, tuneTh=True, threshold=None):
# load slot tagging model, and make prediction
tag_model = load_model_NLU(tag_model_weights, test_data)
tag_model.test_tag_flag = True
tag_model.model_folder = result_folder
tag_model.predict()
tag_pred_txt = '{}/test_result/tag_{}.pred'.format(tag_model.model_folder, os.path.basename(tag_model_weights).split('_')[0])
tag_pred_indicator = readTagPredTxt(tag_pred_txt, test_data.userTag2id,
len(test_data.userTag_txt), test_data.userTag_vocab_size)
# load user intent model and make prediction
intent_model = load_model_NLU(intent_model_weights, test_data)
intent_model.test_intent_flag = True
intent_model.threshold = threshold_intent
intent_model.model_folder = result_folder
intent_model.predict()
intent_pred_txt = '{}/test_result/intent_{}.pred'.format(intent_model.model_folder, os.path.basename(intent_model_weights).split('_')[0])
intent_pred_indicator = readIntentPredTxt(intent_pred_txt, test_data.userIntent2id,
len(test_data.userIntent_txt), test_data.userIntent_vocab_size)
# merge indicators of slot tagging and user intents, and generate windowed tagIntent matrix
assert len(tag_pred_indicator) == len(intent_pred_indicator), 'sample_nb is not equal.'
nlu_vecBin = np.hstack((tag_pred_indicator, intent_pred_indicator))
# load agent act model and make prediction
act_model = load_model_Policy(act_model_weights)
act_model.model_folder = result_folder
nlu_vecBin_windowed = get_windowedVec(nlu_vecBin, act_model.window_size)
if tuneTh:
# tune threshold
print('Tuning threshold on Dev ...')
act_probs = act_model.model.predict(nlu_vecBin_windowed)
precision, recall, fscore, accuracy_frame, act_threshold = eval_intentPredict(act_probs, test_data.agentAct_vecBin)
print('AgentActPred on Dev: precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}, threshold={:.4f}'.format(precision, recall, fscore, accuracy_frame, act_threshold))
dev_pred_txt = getActPred(act_probs, act_threshold, test_data.id2agentAct)
dev_results_fname = '{}/act_dev.pred'.format(act_model.model_folder)
writeUtterActTxt(test_data.userUtter_txt, dev_pred_txt, dev_results_fname)
print('Write dev results: {}'.format(dev_results_fname))
return act_threshold
else:
# make prediction based on well-tuned threshold
assert threshold is not None, 'Argument required: threshold for agent action prediction.'
act_model.threshold = threshold
act_model.test_data = test_data
act_model.test_data.userTagIntent_vecBin = nlu_vecBin_windowed
act_model.predict()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-npz', dest='npz_file', help='.npz file that contains the instance of DataSetCSVagentAct class')
parser.add_argument('--intent-weights', dest='intent_weights', help='.h5 weights for best user intent model')
parser.add_argument('--tag-weights', dest='tag_weights', help='.h5 weights for best user slot tagging model')
parser.add_argument('--act-weights', dest='act_weights', help='.h5 weights for oracle agent act model')
parser.add_argument('--intent-threshold', dest='intent_threshold', type=float, help='decision threshold for intent model')
parser.add_argument('--tune', dest='tune_threshold', action='store_true', help='tune decision threshold for act model if this option is activated.')
parser.add_argument('--act-threshold', dest='act_threshold', type=float, help='decision threshold for agent act model')
parser.add_argument('--model-folder', dest='model_folder', help='model folder')
args = parser.parse_args()
argparams = vars(args)
pid = os.getpid()
npz_file = argparams['npz_file']
intent_model_weights = argparams['intent_weights']
tag_model_weights = argparams['tag_weights']
act_model_weights = argparams['act_weights']
threshold_intent = argparams['intent_threshold']
tune_threshold = argparams['tune_threshold']
threshold_act = argparams['act_threshold']
# validate params
checkExistence(npz_file)
checkExistence(intent_model_weights)
checkExistence(tag_model_weights)
checkExistence(act_model_weights)
assert threshold_intent is not None, 'Argument required: --intent-threshold'
for key in sorted(argparams.keys()):
print('\t{}={}'.format(key, argparams[key]))
# load test data
data_npz = np.load(npz_file)
if tune_threshold:
dev_result_folder = './model/pipe_{}/dev'.format(pid)
if not os.path.exists(dev_result_folder):
os.makedirs(dev_result_folder)
print('\tdev_result_folder={}'.format(dev_result_folder))
dev_data = data_npz['dev_data'][()]
assert isinstance(dev_data, DataSetCSVagentActPred)
act_threshold = pipelinePrediction(dev_data, tag_model_weights, intent_model_weights, act_model_weights, dev_result_folder, tuneTh=True)
else:
assert threshold_act is not None, 'Argument required: --act-threshold.'
assert argparams['model_folder'] is not None, 'Argument required: --model-folder'
test_result_folder = '{}/test'.format(argparams['model_folder'])
if not os.path.exists(test_result_folder):
os.makedirs(test_result_folder)
print('\ttest_result_folder={}'.format(test_result_folder))
test_data = data_npz['test_data'][()]
assert isinstance(test_data, DataSetCSVagentActPred)
pipelinePrediction(test_data, tag_model_weights, intent_model_weights, act_model_weights, test_result_folder, tuneTh=False, threshold=threshold_act)
|
mit
| 5,262,195,568,939,314,000
| 49.871508
| 190
| 0.669888
| false
| 3.604909
| true
| false
| false
|
alviezhang/leetcode
|
91.decode_ways/solution.py
|
1
|
1077
|
# coding: utf-8
class Solution:
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if s == "" or s[0] == '0':
return 0
# Initialization
before_previous = 0
previous_one = 0
current = 1
for i in range(1, len(s)):
before_previous = previous_one
previous_one = current
# When current charactor is '0', if previous charactor is in ['1', '2'],
# f(i) = f(i-2), otherwise result is 0
if s[i] == '0':
if s[i-1] not in ['1', '2']:
return 0
# In particular, when i = 2 or before_previous == 0, current = 1
current = before_previous if before_previous else 1
continue
# f(i) = f(i - 1)
current = previous_one
if s[i-1] != '0' and int(s[i-1] + s[i]) <= 26:
# f(i) = f(i - 1) + f(i - 2)
current += before_previous if before_previous else 1
return current
|
mit
| 6,711,470,115,184,742,000
| 26.615385
| 84
| 0.44104
| false
| 3.792254
| false
| false
| false
|
openelections/openelections-core
|
openelex/us/vt/validate/validates.py
|
1
|
8951
|
import re
from openelex.models import Contest, Candidate, Office, Result
import logging
import time
import os
# if not os.path.isdir("logs"):
# os.makedirs("logs")
# logging.basicConfig(filename=time.strftime("logs/%Y%m%d-%H%M%S-validate.log"),level=logging.DEBUG)
# Generic validation helpers
def _validate_candidate_votes(election_id, reporting_level, contest_slug,
candidate_slug, expected_votes):
"""Sum sub-contest level results and compare them to known totals"""
msg = "Expected {} votes for contest {} and candidate {}, found {}"
votes = Result.objects.filter(election_id=election_id,
contest_slug=contest_slug, candidate_slug=candidate_slug,
reporting_level=reporting_level).sum('votes')
if votes != expected_votes:
logging.debug("db.getCollection('result').find({election_id:\"%s\", \
contest_slug:\"%s\", candidate_slug:\"%s\", \
reporting_level:\"%s\"})", election_id, contest_slug, candidate_slug, reporting_level)
assert votes == expected_votes, msg.format(expected_votes, contest_slug,
candidate_slug, votes)
def _validate_many_candidate_votes(election_id, reporting_level,
candidates):
"""
Sum sub-contest level results and compare them to known totals for
multiple contests and candidates.
Arguments:
election_id - Election ID of the election of interest.
reporting_level - Reporting level to use to aggregate results.
candidates - Tuple of contests slug, candidate slug and expected votes.
"""
for candidate_info in candidates:
contest, candidate, expected = candidate_info
_validate_candidate_votes(election_id, reporting_level,
contest, candidate, expected)
def validate_results_2012_president_general():
"""Sum some county-level results for 2012 general presidential and compare with known totals"""
election_id = 'vt-2012-11-06-general'
known_results = [
('president', 'barack-obama', 199053),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2014_house_general():
"""Sum some county-level results for 2014 general and compare with known totals"""
election_id = 'vt-2014-11-04-general'
known_results = [
('us-house-of-representatives', 'peter-welch', 123349),
('us-house-of-representatives', 'mark-donka', 59432),
('us-house-of-representatives', 'cris-ericson', 2750),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2014_house_primary():
"""Sum some county-level results for 2014 house primary and compare with known totals"""
election_id = 'vt-2014-08-26-primary'
known_results = [
('us-house-of-representatives-d', 'peter-welch', 19248),
('us-house-of-representatives-d', 'writeins', 224),
('us-house-of-representatives-r', 'mark-donka', 4340),
('us-house-of-representatives-r', 'donald-russell', 4026),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2002_lt_gov_general():
"""Sum some county-level results for 2002 lt-gov general and compare with known totals"""
election_id = 'vt-2002-11-05-general'
known_results = [
('lieutenant-governor', 'peter-shumlin', 73501),
('lieutenant-governor', 'brian-e-dubie', 94044),
('lieutenant-governor', 'anthony-pollina', 56564),
('lieutenant-governor', 'sally-ann-jones', 4310),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2002_lt_gov_primary():
"""Sum some county-level results for 2002 lt-gov primary and compare with known totals"""
election_id = 'vt-2002-09-10-primary'
known_results = [
('lieutenant-governor-d', 'peter-shumlin', 22633),
('lieutenant-governor-r', 'brian-e-dubie', 22584),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2004_misc_results_general():
"""Sum some state specific results for 2004 general and compare with known totals"""
election_id = 'vt-2004-11-02-general'
known_results = [
('treasurer', 'jeb-spaulding', 273705),
('secretary-of-state', 'deb-markowitz', 270744),
('auditor', 'randy-brock', 152848),
('auditor', 'elizabeth-m-ready', 122498),
('auditor', 'jerry-levy', 17685),
('attorney-general', 'william-h-sorrell', 169726),
# there is an error on the vermont website, I talked to the VT Sec state and the real result should be 81,285
# ('attorney-general', 'dennis-carver', 90285),
('attorney-general', 'susan-a-davis', 14351),
('attorney-general', 'james-mark-leas', 8769),
('attorney-general', 'karen-kerin', 6357),
('attorney-general', 'boots-wardinski', 2944),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2008_state_senate_primary():
"""Sum some county-level results for 2008 state senate primary and compare with known totals"""
election_id = 'vt-2008-09-08-primary'
known_results = [
('state-senate-orange-d', 'mark-a-macdonald', 557),
('state-senate-franklin-r', 'randy-brock', 879),
('state-senate-franklin-r', 'willard-rowell', 782),
('state-senate-essexorleans-d', 'robert-a-starr', 748),
('state-senate-essexorleans-d', 'writeins', 112),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2010_state_senate_general():
"""Sum some county-level results for 2010 state senate general and compare with known totals"""
election_id = 'vt-2010-11-02-general'
known_results = [
('state-senate-orange', 'mark-a-macdonald', 4524),
('state-senate-orange', 'stephen-w-webster', 3517),
('state-senate-franklin', 'randy-brock', 9014),
('state-senate-franklin', 'peter-d-moss', 793),
('state-senate-essexorleans', 'robert-a-starr', 9902),
('state-senate-essexorleans', 'vincent-illuzzi', 9231),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2012_state_house_primary():
"""Sum some county-level results for 2012 state house primary and compare with known totals"""
election_id = 'vt-2012-03-06-primary'
known_results = [
('house-of-representatives-addison-5-d', 'edward-v-mcguire', 220),
('house-of-representatives-addison-5-r', 'harvey-smith', 75),
('house-of-representatives-addison-1-d', 'betty-a-nuovo', 486),
('house-of-representatives-addison-1-d', 'paul-ralston', 446),
('house-of-representatives-bennington-1-d', 'bill-botzow', 152),
('house-of-representatives-caledonia-1-r', 'leigh-b-larocque', 72),
('house-of-representatives-chittenden-61-d', 'joanna-cole', 658),
('house-of-representatives-chittenden-61-d', 'bill-aswad', 619),
('house-of-representatives-chittenden-61-d', 'robert-hooper', 536),
('house-of-representatives-chittenden-61-r', 'kurt-wright', 116),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2012_state_house_general():
"""Sum some county-level results for 2012 state house general and compare with known totals"""
election_id = 'vt-2012-11-06-general'
known_results = [
('house-of-representatives-addison-5', 'edward-v-mcguire', 982),
('house-of-representatives-addison-5', 'harvey-smith', 1151),
('house-of-representatives-addison-1', 'betty-a-nuovo', 2601),
('house-of-representatives-addison-1', 'paul-ralston', 2378),
('house-of-representatives-bennington-1', 'bill-botzow', 1613),
('house-of-representatives-caledonia-1', 'leigh-b-larocque', 1143),
('house-of-representatives-chittenden-61', 'joanna-cole', 2008),
('house-of-representatives-chittenden-61', 'bill-aswad', 1987),
('house-of-representatives-chittenden-61', 'kurt-wright', 2332),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
|
mit
| -705,504,772,250,037,200
| 45.378238
| 117
| 0.669199
| false
| 3.174113
| true
| false
| false
|
snowflying/messager
|
messager/common/sslutils.py
|
1
|
2833
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from oslo.config import cfg
from messager.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
|
apache-2.0
| -7,891,878,470,662,829,000
| 27.908163
| 78
| 0.61772
| false
| 3.722733
| false
| false
| false
|
Yukarumya/Yukarum-Redfoxes
|
testing/marionette/harness/marionette_harness/tests/unit/test_screen_orientation.py
|
1
|
3917
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_driver import errors
from mozrunner.devices.emulator_screen import EmulatorScreen
from marionette_harness import MarionetteTestCase, skip_if_desktop, skip_if_mobile
default_orientation = "portrait-primary"
unknown_orientation = "Unknown screen orientation: {}"
class TestScreenOrientation(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
self.is_mobile = self.marionette.session_capabilities.get("rotatable", False)
def tearDown(self):
if self.is_mobile:
self.marionette.set_orientation(default_orientation)
self.assertEqual(self.marionette.orientation, default_orientation, "invalid state")
MarionetteTestCase.tearDown(self)
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_portrait_primary(self):
self.marionette.set_orientation("portrait-primary")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "portrait-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_landscape_primary(self):
self.marionette.set_orientation("landscape-primary")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "landscape-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_portrait_secondary(self):
self.marionette.set_orientation("portrait-secondary")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "portrait-secondary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_landscape_secondary(self):
self.marionette.set_orientation("landscape-secondary")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "landscape-secondary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_shorthand_portrait(self):
# Set orientation to something other than portrait-primary first, since the default is
# portrait-primary.
self.marionette.set_orientation("landscape-primary")
self.assertEqual(self.marionette.orientation, "landscape-primary", "invalid state")
self.marionette.set_orientation("portrait")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "portrait-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_shorthand_landscape(self):
self.marionette.set_orientation("landscape")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "landscape-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_with_mixed_casing(self):
self.marionette.set_orientation("lAnDsCaPe")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "landscape-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_invalid_orientation(self):
with self.assertRaisesRegexp(errors.MarionetteException, unknown_orientation.format("cheese")):
self.marionette.set_orientation("cheese")
@skip_if_desktop("Not supported in Firefox")
def test_set_null_orientation(self):
with self.assertRaisesRegexp(errors.MarionetteException, unknown_orientation.format("null")):
self.marionette.set_orientation(None)
@skip_if_mobile("Specific test for Firefox")
def test_unsupported_operation_on_desktop(self):
with self.assertRaises(errors.UnsupportedOperationException):
self.marionette.set_orientation("landscape-primary")
|
mpl-2.0
| -3,902,149,290,655,037,400
| 44.546512
| 103
| 0.725045
| false
| 3.825195
| true
| false
| false
|
YannThorimbert/ThorPy-1.4.1
|
thorpy/elements/browser.py
|
1
|
5542
|
# -*- coding: utf-8 -*-
import os
import pygame
from thorpy.elements.browserlight import BrowserLight
from thorpy.elements._explorerutils._pathelement import PathElement
from thorpy.elements.element import Element
from thorpy.elements.inserter import Inserter
from thorpy.elements.ddlf import DropDownListFast
from thorpy.elements.text import OneLineText
from thorpy.miscgui.storage import Storer
from thorpy.miscgui.reaction import Reaction
from thorpy.miscgui import constants, functions, parameters, style, painterstyle
class Browser(BrowserLight):
"""Either use it dynamically (create a menu based on an explorer that is
created "on fly", or statically adding an explorer to the main element"""
def __init__(self,
path="./",
ddl_size=None,
normal_params=None,
folders=True,
files=True,
file_types=None,
text=""):
ddl_size = style.BROWSERLIGHT_DDL_SIZE if ddl_size is None else ddl_size
super(BrowserLight, self).__init__(normal_params=normal_params)
self.path = path
self._ddl_size = ddl_size
self.file_types = file_types
self.folders = folders
self.files = files
# DropDownListFast
actual_folders, actual_files = self._get_folders_and_files()
actual_files = self._filter_files(actual_files)
if not folders:
actual_folders = None
if not files:
actual_files = []
self._ddlf = DropDownListFast(self._ddl_size, titles=actual_files,
folders=actual_folders, has_lift=True)
## self._ddlf.finish()
# selection button
inserter_width = 3*ddl_size[0]//4
## if inserter_width > style.MAX_INSERTER_WIDTH:
## inserter_width = style.MAX_INSERTER_WIDTH
self._selected = Inserter("Selected : ", size=(inserter_width, None))
self._selected.finish()
if isinstance(text, str):
self.text_element = OneLineText(text)
self.text_element.finish()
else:
self.text_element = text
self._path_element = PathElement(self, True)
self._path_element.finish()
self.add_elements([self.text_element, self._path_element, self._ddlf,
self._selected])
reac_pressed = Reaction(parameters.BUTTON_UNPRESS_EVENT,
self._reaction_press,
reac_name=constants.REAC_PRESSED)
## self._ddlf._force_lift = True
self._ddlf.finish()
self.add_reaction(reac_pressed)
self._clicked = None
self._something_selected = False
painter = functions.obtain_valid_painter(painterstyle.BOX_PAINTER,
pressed=True,
radius=style.BOX_RADIUS)
self.set_painter(painter)
self._last_click = -2 * parameters.DOUBLE_CLICK_DELAY
def finish(self):
self._path_element._set_path_elements()
Element.finish(self)
self.store()
centerx = self.get_fus_rect().centerx
self.text_element.set_center((centerx, None))
ycoord = self._path_element._elements[0].get_storer_rect().centery
self._path_element._set_path_elements(ycoord)
self.set_prison()
## def store(self):
#### r = self.get_family_rect()
#### self.set_size((r.width, r.height))
## storer = Storer(self, margins=style.BROWSERLIGHT_STORE_MARGINS,
## gaps=style.BROWSERLIGHT_STORE_GAPS)
## storer.autoset_framesize()
## def _refresh_ddlf_lift(self):
## if self._ddlf._lift:
## functions.remove_element(self._ddlf._lift)
## if self._ddlf.get_family_rect().height > self._ddlf.get_fus_rect().height:
## self._ddlf.add_lift()
## functions.refresh_current_menu()
def _refresh_ddlf(self):
self.path = self._path_element._path
actual_folders, actual_files = self._get_folders_and_files()
actual_files = self._filter_files(actual_files)
if not self.folders:
actual_folders = None
if not self.files:
actual_files = []
self._ddlf._dv = self._ddlf._get_dirviewer(titles=actual_files,
size=self._ddl_size,
folders=actual_folders)
self._refresh_ddlf_lift()
def _go_to_dir(self, selected):
self._path_element._path = selected
self._path_element._path_list = self._path_element._get_strs()
ycoord = self._path_element._elements[0].get_storer_rect().centery
self._path_element._set_path_elements(ycoord)
functions.refresh_current_menu()
self._refresh_ddlf()
self.unblit()
self.blit()
self.update()
def _reaction_press(self, event):
hit_lift = False
if self._ddlf._lift:
if self._ddlf._lift.get_fus_rect().collidepoint(event.pos):
hit_lift = True
if not hit_lift:
BrowserLight._reaction_press(self, event)
selected = self.get_value()
tick = pygame.time.get_ticks()
if os.path.isdir(selected):
if tick - self._last_click < parameters.DOUBLE_CLICK_DELAY:
self._go_to_dir(selected)
self._last_click = tick
|
mit
| -6,465,121,483,309,031,000
| 40.365672
| 84
| 0.579033
| false
| 3.827348
| false
| false
| false
|
avinet/adaptive3-qgis-plugin
|
dlgNewProjectBase.py
|
1
|
2600
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dlgNewProject.ui'
#
# Created: Fri Apr 01 12:27:36 2016
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_NewProjectDialogBase(object):
def setupUi(self, NewProjectDialogBase):
NewProjectDialogBase.setObjectName(_fromUtf8("NewProjectDialogBase"))
NewProjectDialogBase.resize(228, 86)
self.buttonBox = QtGui.QDialogButtonBox(NewProjectDialogBase)
self.buttonBox.setGeometry(QtCore.QRect(20, 50, 201, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.splitter = QtGui.QSplitter(NewProjectDialogBase)
self.splitter.setGeometry(QtCore.QRect(20, 10, 196, 20))
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.labelProjectName = QtGui.QLabel(self.splitter)
self.labelProjectName.setEnabled(True)
self.labelProjectName.setLayoutDirection(QtCore.Qt.LeftToRight)
self.labelProjectName.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.labelProjectName.setObjectName(_fromUtf8("labelProjectName"))
self.lineProjectName = QtGui.QLineEdit(self.splitter)
self.lineProjectName.setObjectName(_fromUtf8("lineProjectName"))
self.retranslateUi(NewProjectDialogBase)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), NewProjectDialogBase.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), NewProjectDialogBase.reject)
QtCore.QMetaObject.connectSlotsByName(NewProjectDialogBase)
def retranslateUi(self, NewProjectDialogBase):
NewProjectDialogBase.setWindowTitle(_translate("NewProjectDialogBase", "New project", None))
self.labelProjectName.setText(_translate("NewProjectDialogBase", "Project name", None))
|
mit
| 3,318,942,337,191,141,000
| 46.272727
| 115
| 0.740385
| false
| 3.939394
| false
| false
| false
|
TAMU-CPT/galaxy-tools
|
tools/webapollo/create_or_update_organism.py
|
1
|
3514
|
#!/usr/bin/env python
import sys
import json
import argparse
import time
from webapollo import WAAuth, WebApolloInstance, OrgOrGuess, GuessOrg, AssertUser
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create or update an organism in an Apollo instance"
)
WAAuth(parser)
parser.add_argument("jbrowse", help="JBrowse Data Directory")
parser.add_argument("email", help="User Email")
OrgOrGuess(parser)
parser.add_argument("--genus", help="Organism Genus")
parser.add_argument("--species", help="Organism Species")
parser.add_argument("--public", action="store_true", help="Make organism public")
parser.add_argument("--group", help="Give access to a user group")
args = parser.parse_args()
wa = WebApolloInstance(args.apollo, args.username, args.password)
org_cn = GuessOrg(args, wa)
if isinstance(org_cn, list):
org_cn = org_cn[0]
# User must have an account
gx_user = AssertUser(wa.users.loadUsers(email=args.email))
log.info("Determining if add or update required")
try:
org = wa.organisms.findOrganismByCn(org_cn)
except Exception:
org = None
if org:
has_perms = False
for user_owned_organism in gx_user.organismPermissions:
if "WRITE" in user_owned_organism["permissions"]:
has_perms = True
break
if not has_perms:
print(
"Naming Conflict. You do not have permissions to access this organism. Either request permission from the owner, or choose a different name for your organism."
)
sys.exit(2)
log.info("\tUpdating Organism")
data = wa.organisms.updateOrganismInfo(
org["id"],
org_cn,
args.jbrowse,
# mandatory
genus=args.genus,
species=args.species,
public=args.public,
)
time.sleep(20)
data = [wa.organisms.findOrganismById(org["id"])]
else:
# New organism
log.info("Adding Organism")
try:
data = wa.organisms.addOrganism(
org_cn,
args.jbrowse,
genus=args.genus,
species=args.species,
public=args.public,
)
except Exception as errorOut:
log.info("Exception on Organism Common Name '" + org_cn + "':")
log.info(errorOut)
if str(errorOut)[-3:] == "504":
log.info("\nThe Galaxy server timed out while waiting for Apollo to finish. Your organism was most likely created, but will need to be manually assigned to your account by an administrator. Please submit a bug report for this job and we will get back to you shortly.\n")
exit(2)
# Must sleep before we're ready to handle
time.sleep(20)
log.info("Updating permissions for %s on %s", gx_user, org_cn)
wa.users.updateOrganismPermission(
gx_user, org_cn, write=True, export=True, read=True
)
# Group access
if args.group:
group = wa.groups.loadGroupByName(name=args.group)
res = wa.groups.updateOrganismPermission(
group, org_cn, administrate=False, write=True, read=True, export=True
)
data = [o for o in data if o["commonName"] == org_cn]
print(json.dumps(data, indent=2))
|
gpl-3.0
| 1,875,516,418,846,996,000
| 33.792079
| 284
| 0.610985
| false
| 3.750267
| false
| false
| false
|
Com-Mean/MLinAcition
|
chapter2/numpyIntro.py
|
1
|
1099
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# File Name: numpyIntro.py
# Author: lpqiu
# mail: qlp_1018@126.com
# Created Time: 2014年09月06日 星期六 16时33分05秒
#########################################################################
import numpy as np
def triangleWave(x, c, c0, hc=1.0):
x = x - int(x)
ret = 0
if x >= c:
ret = 0
elif x < c0:
ret = (hc/c0)*x
else:
ret = (hc/(c0 -c))*(x - c)
return ret
def triangleFunc(x, c, c0, hc=1.0):
def trgFun(x):
x = x - int(x)
ret = 0
if x >= c:
ret = 0
elif x < c0:
ret = (hc/c0)*x
else:
ret = (hc/(c0 -c))*(x - c)
return ret
return np.frompyfunc(trgFun, 1, 1)
if __name__=="__main__":
x = np.linspace(0, 2, 1000)
y = np.array([triangleWave(t, 0.6, 0.4, 1.0) for t in x])
triangleFun = np.frompyfunc(lambda x: triangleWave(0.6, 0.4, 1.0), 1, 1)
y2 = triangleFun(x)
y3 = triangleFunc(0.6, 0.4, 1.0)(x)
|
gpl-3.0
| -7,973,966,228,759,885,000
| 24.139535
| 76
| 0.427382
| false
| 2.800518
| false
| false
| false
|
Southpaw-TACTIC/Team
|
src/python/Lib/site-packages/PySide/examples/graphicsview/elasticnodes.py
|
1
|
13943
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2006-2006 Trolltech ASA. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## Licensees holding a valid Qt License Agreement may use this file in
## accordance with the rights, responsibilities and obligations
## contained therein. Please consult your licensing agreement or
## contact sales@trolltech.com if any conditions of this licensing
## agreement are not clear to you.
##
## Further information about Qt licensing is available at:
## http://www.trolltech.com/products/qt/licensing.html or by
## contacting info@trolltech.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
import weakref
import math
from PySide import QtCore, QtGui
class Edge(QtGui.QGraphicsItem):
Pi = math.pi
TwoPi = 2.0 * Pi
Type = QtGui.QGraphicsItem.UserType + 2
def __init__(self, sourceNode, destNode):
QtGui.QGraphicsItem.__init__(self)
self.arrowSize = 10.0
self.sourcePoint = QtCore.QPointF()
self.destPoint = QtCore.QPointF()
self.setAcceptedMouseButtons(QtCore.Qt.NoButton)
self.source = weakref.ref(sourceNode)
self.dest = weakref.ref(destNode)
self.source().addEdge(self)
self.dest().addEdge(self)
self.adjust()
def type(self):
return Edge.Type
def sourceNode(self):
return self.source()
def setSourceNode(self, node):
self.source = weakref.ref(node)
self.adjust()
def destNode(self):
return self.dest()
def setDestNode(self, node):
self.dest = weakref.ref(node)
self.adjust()
def adjust(self):
if not self.source() or not self.dest():
return
line = QtCore.QLineF(self.mapFromItem(self.source(), 0, 0), self.mapFromItem(self.dest(), 0, 0))
length = line.length()
if length == 0.0:
return
edgeOffset = QtCore.QPointF((line.dx() * 10) / length, (line.dy() * 10) / length)
self.prepareGeometryChange()
self.sourcePoint = line.p1() + edgeOffset
self.destPoint = line.p2() - edgeOffset
def boundingRect(self):
if not self.source() or not self.dest():
return QtCore.QRectF()
penWidth = 1
extra = (penWidth + self.arrowSize) / 2.0
return QtCore.QRectF(self.sourcePoint,
QtCore.QSizeF(self.destPoint.x() - self.sourcePoint.x(),
self.destPoint.y() - self.sourcePoint.y())).normalized().adjusted(-extra, -extra, extra, extra)
def paint(self, painter, option, widget):
if not self.source() or not self.dest():
return
# Draw the line itself.
line = QtCore.QLineF(self.sourcePoint, self.destPoint)
if line.length() == 0.0:
return
painter.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
painter.drawLine(line)
# Draw the arrows if there's enough room.
angle = math.acos(line.dx() / line.length())
if line.dy() >= 0:
angle = Edge.TwoPi - angle
sourceArrowP1 = self.sourcePoint + QtCore.QPointF(math.sin(angle + Edge.Pi / 3) * self.arrowSize,
math.cos(angle + Edge.Pi / 3) * self.arrowSize)
sourceArrowP2 = self.sourcePoint + QtCore.QPointF(math.sin(angle + Edge.Pi - Edge.Pi / 3) * self.arrowSize,
math.cos(angle + Edge.Pi - Edge.Pi / 3) * self.arrowSize);
destArrowP1 = self.destPoint + QtCore.QPointF(math.sin(angle - Edge.Pi / 3) * self.arrowSize,
math.cos(angle - Edge.Pi / 3) * self.arrowSize)
destArrowP2 = self.destPoint + QtCore.QPointF(math.sin(angle - Edge.Pi + Edge.Pi / 3) * self.arrowSize,
math.cos(angle - Edge.Pi + Edge.Pi / 3) * self.arrowSize)
painter.setBrush(QtCore.Qt.black)
painter.drawPolygon(QtGui.QPolygonF([line.p1(), sourceArrowP1, sourceArrowP2]))
painter.drawPolygon(QtGui.QPolygonF([line.p2(), destArrowP1, destArrowP2]))
class Node(QtGui.QGraphicsItem):
Type = QtGui.QGraphicsItem.UserType + 1
def __init__(self, graphWidget):
QtGui.QGraphicsItem.__init__(self)
self.graph = weakref.ref(graphWidget)
self.edgeList = []
self.newPos = QtCore.QPointF()
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)
self.setFlag(QtGui.QGraphicsItem.ItemSendsGeometryChanges)
self.setCacheMode(self.DeviceCoordinateCache)
self.setZValue(-1)
def type(self):
return Node.Type
def addEdge(self, edge):
self.edgeList.append(weakref.ref(edge))
edge.adjust()
def edges(self):
return self.edgeList
def calculateForces(self):
if not self.scene() or self.scene().mouseGrabberItem() is self:
self.newPos = self.pos()
return
# Sum up all forces pushing this item away.
xvel = 0.0
yvel = 0.0
for item in self.scene().items():
if not isinstance(item, Node):
continue
line = QtCore.QLineF(self.mapFromItem(item, 0, 0), QtCore.QPointF(0, 0))
dx = line.dx()
dy = line.dy()
l = 2.0 * (dx * dx + dy * dy)
if l > 0:
xvel += (dx * 150.0) / l
yvel += (dy * 150.0) / l
# Now subtract all forces pulling items together.
weight = (len(self.edgeList) + 1) * 10.0
for edge in self.edgeList:
if edge().sourceNode() is self:
pos = self.mapFromItem(edge().destNode(), 0, 0)
else:
pos = self.mapFromItem(edge().sourceNode(), 0, 0)
xvel += pos.x() / weight
yvel += pos.y() / weight
if QtCore.qAbs(xvel) < 0.1 and QtCore.qAbs(yvel) < 0.1:
xvel = yvel = 0.0
sceneRect = self.scene().sceneRect()
self.newPos = self.pos() + QtCore.QPointF(xvel, yvel)
self.newPos.setX(min(max(self.newPos.x(), sceneRect.left() + 10), sceneRect.right() - 10))
self.newPos.setY(min(max(self.newPos.y(), sceneRect.top() + 10), sceneRect.bottom() - 10))
def advance(self):
if self.newPos == self.pos():
return False
self.setPos(self.newPos)
return True
def boundingRect(self):
adjust = 2.0
return QtCore.QRectF(-10 - adjust, -10 - adjust,
23 + adjust, 23 + adjust)
def shape(self):
path = QtGui.QPainterPath()
path.addEllipse(-10, -10, 20, 20)
return path
def paint(self, painter, option, widget):
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtCore.Qt.darkGray)
painter.drawEllipse(-7, -7, 20, 20)
gradient = QtGui.QRadialGradient(-3, -3, 10)
if option.state & QtGui.QStyle.State_Sunken:
gradient.setCenter(3, 3)
gradient.setFocalPoint(3, 3)
gradient.setColorAt(1, QtGui.QColor(QtCore.Qt.yellow).lighter(120))
gradient.setColorAt(0, QtGui.QColor(QtCore.Qt.darkYellow).lighter(120))
else:
gradient.setColorAt(0, QtCore.Qt.yellow)
gradient.setColorAt(1, QtCore.Qt.darkYellow)
painter.setBrush(QtGui.QBrush(gradient))
painter.setPen(QtGui.QPen(QtCore.Qt.black, 0))
painter.drawEllipse(-10, -10, 20, 20)
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemPositionChange:
for edge in self.edgeList:
edge().adjust()
self.graph().itemMoved()
return QtGui.QGraphicsItem.itemChange(self, change, value)
def mousePressEvent(self, event):
self.update()
QtGui.QGraphicsItem.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
self.update()
QtGui.QGraphicsItem.mouseReleaseEvent(self, event)
class GraphWidget(QtGui.QGraphicsView):
def __init__(self):
QtGui.QGraphicsView.__init__(self)
self.timerId = 0
scene = QtGui.QGraphicsScene(self)
scene.setItemIndexMethod(QtGui.QGraphicsScene.NoIndex)
scene.setSceneRect(-200, -200, 400, 400)
self.setScene(scene)
self.setCacheMode(QtGui.QGraphicsView.CacheBackground)
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setTransformationAnchor(QtGui.QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QtGui.QGraphicsView.AnchorViewCenter)
node1 = Node(self)
node2 = Node(self)
node3 = Node(self)
node4 = Node(self)
self.centerNode = Node(self)
node6 = Node(self)
node7 = Node(self)
node8 = Node(self)
node9 = Node(self)
scene.addItem(node1)
scene.addItem(node2)
scene.addItem(node3)
scene.addItem(node4)
scene.addItem(self.centerNode)
scene.addItem(node6)
scene.addItem(node7)
scene.addItem(node8)
scene.addItem(node9)
scene.addItem(Edge(node1, node2))
scene.addItem(Edge(node2, node3))
scene.addItem(Edge(node2, self.centerNode))
scene.addItem(Edge(node3, node6))
scene.addItem(Edge(node4, node1))
scene.addItem(Edge(node4, self.centerNode))
scene.addItem(Edge(self.centerNode, node6))
scene.addItem(Edge(self.centerNode, node8))
scene.addItem(Edge(node6, node9))
scene.addItem(Edge(node7, node4))
scene.addItem(Edge(node8, node7))
scene.addItem(Edge(node9, node8))
node1.setPos(-50, -50)
node2.setPos(0, -50)
node3.setPos(50, -50)
node4.setPos(-50, 0)
self.centerNode.setPos(0, 0)
node6.setPos(50, 0)
node7.setPos(-50, 50)
node8.setPos(0, 50)
node9.setPos(50, 50)
self.scale(0.8, 0.8)
self.setMinimumSize(400, 400)
self.setWindowTitle(self.tr("Elastic Nodes"))
def itemMoved(self):
if not self.timerId:
self.timerId = self.startTimer(1000 / 25)
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Up:
self.centerNode.moveBy(0, -20)
elif key == QtCore.Qt.Key_Down:
self.centerNode.moveBy(0, 20)
elif key == QtCore.Qt.Key_Left:
self.centerNode.moveBy(-20, 0)
elif key == QtCore.Qt.Key_Right:
self.centerNode.moveBy(20, 0)
elif key == QtCore.Qt.Key_Plus:
self.scaleView(1.2)
elif key == QtCore.Qt.Key_Minus:
self.scaleView(1 / 1.2)
elif key == QtCore.Qt.Key_Space or key == QtCore.Qt.Key_Enter:
for item in self.scene().items():
if isinstance(item, Node):
item.setPos(-150 + QtCore.qrand() % 300, -150 + QtCore.qrand() % 300)
else:
QtGui.QGraphicsView.keyPressEvent(self, event)
def timerEvent(self, event):
nodes = [item for item in self.scene().items() if isinstance(item, Node)]
for node in nodes:
node.calculateForces()
itemsMoved = False
for node in nodes:
if node.advance():
itemsMoved = True
if not itemsMoved:
self.killTimer(self.timerId)
self.timerId = 0
def wheelEvent(self, event):
self.scaleView(math.pow(2.0, -event.delta() / 240.0))
def drawBackground(self, painter, rect):
# Shadow.
sceneRect = self.sceneRect()
rightShadow = QtCore.QRectF(sceneRect.right(), sceneRect.top() + 5, 5, sceneRect.height())
bottomShadow = QtCore.QRectF(sceneRect.left() + 5, sceneRect.bottom(), sceneRect.width(), 5)
if rightShadow.intersects(rect) or rightShadow.contains(rect):
painter.fillRect(rightShadow, QtCore.Qt.darkGray)
if bottomShadow.intersects(rect) or bottomShadow.contains(rect):
painter.fillRect(bottomShadow, QtCore.Qt.darkGray)
# Fill.
gradient = QtGui.QLinearGradient(sceneRect.topLeft(), sceneRect.bottomRight())
gradient.setColorAt(0, QtCore.Qt.white)
gradient.setColorAt(1, QtCore.Qt.lightGray)
painter.fillRect(rect.intersect(sceneRect), QtGui.QBrush(gradient))
painter.setBrush(QtCore.Qt.NoBrush)
painter.drawRect(sceneRect)
# Text.
textRect = QtCore.QRectF(sceneRect.left() + 4, sceneRect.top() + 4,
sceneRect.width() - 4, sceneRect.height() - 4)
message = self.tr("Click and drag the nodes around, and zoom with the "
"mouse wheel or the '+' and '-' keys")
font = painter.font()
font.setBold(True)
font.setPointSize(14)
painter.setFont(font)
painter.setPen(QtCore.Qt.lightGray)
painter.drawText(textRect.translated(2, 2), message)
painter.setPen(QtCore.Qt.black)
painter.drawText(textRect, message)
def scaleView(self, scaleFactor):
factor = self.matrix().scale(scaleFactor, scaleFactor).mapRect(QtCore.QRectF(0, 0, 1, 1)).width()
if factor < 0.07 or factor > 100:
return
self.scale(scaleFactor, scaleFactor)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
QtCore.qsrand(QtCore.QTime(0,0,0).secsTo(QtCore.QTime.currentTime()))
widget = GraphWidget()
widget.show()
sys.exit(app.exec_())
|
epl-1.0
| -5,084,416,713,089,854,000
| 34.388325
| 138
| 0.588324
| false
| 3.644276
| false
| false
| false
|
stryder199/RyarkAssignments
|
Assignment2/web2py/gluon/decoder.py
|
1
|
3038
|
import codecs, encodings
"""Caller will hand this library a buffer and ask it to either convert
it or auto-detect the type.
Based on http://code.activestate.com/recipes/52257/
Licensed under the PSF License
"""
# None represents a potentially variable byte. "##" in the XML spec...
autodetect_dict={ # bytepattern : ("name",
(0x00, 0x00, 0xFE, 0xFF) : ("ucs4_be"),
(0xFF, 0xFE, 0x00, 0x00) : ("ucs4_le"),
(0xFE, 0xFF, None, None) : ("utf_16_be"),
(0xFF, 0xFE, None, None) : ("utf_16_le"),
(0x00, 0x3C, 0x00, 0x3F) : ("utf_16_be"),
(0x3C, 0x00, 0x3F, 0x00) : ("utf_16_le"),
(0x3C, 0x3F, 0x78, 0x6D): ("utf_8"),
(0x4C, 0x6F, 0xA7, 0x94): ("EBCDIC")
}
def autoDetectXMLEncoding(buffer):
""" buffer -> encoding_name
The buffer should be at least 4 bytes long.
Returns None if encoding cannot be detected.
Note that encoding_name might not have an installed
decoder (e.g. EBCDIC)
"""
# a more efficient implementation would not decode the whole
# buffer at once but otherwise we'd have to decode a character at
# a time looking for the quote character...that's a pain
encoding = "utf_8" # according to the XML spec, this is the default
# this code successively tries to refine the default
# whenever it fails to refine, it falls back to
# the last place encoding was set.
if len(buffer)>=4:
bytes = (byte1, byte2, byte3, byte4) = tuple(map(ord, buffer[0:4]))
enc_info = autodetect_dict.get(bytes, None)
if not enc_info: # try autodetection again removing potentially
# variable bytes
bytes = (byte1, byte2, None, None)
enc_info = autodetect_dict.get(bytes)
else:
enc_info = None
if enc_info:
encoding = enc_info # we've got a guess... these are
#the new defaults
# try to find a more precise encoding using xml declaration
secret_decoder_ring = codecs.lookup(encoding)[1]
(decoded,length) = secret_decoder_ring(buffer)
first_line = decoded.split("\n")[0]
if first_line and first_line.startswith(u"<?xml"):
encoding_pos = first_line.find(u"encoding")
if encoding_pos!=-1:
# look for double quote
quote_pos=first_line.find('"', encoding_pos)
if quote_pos==-1: # look for single quote
quote_pos=first_line.find("'", encoding_pos)
if quote_pos>-1:
quote_char,rest=(first_line[quote_pos],
first_line[quote_pos+1:])
encoding=rest[:rest.find(quote_char)]
return encoding
def decoder(buffer):
encoding = autoDetectXMLEncoding(buffer)
return buffer.decode(encoding).encode('utf8')
|
mit
| -9,123,745,468,234,891,000
| 40.054054
| 78
| 0.560895
| false
| 3.755253
| false
| false
| false
|
FreeON/spammpack
|
src-C/prototype/stream_multiply/generate_SSE_assembly.py
|
1
|
19562
|
#!/usr/bin/python
#
# Generate SSE assembly code for a kernel operating on a 4x4 blocks.
import math, optparse, sys
class box:
def __init__ (self, i_1, i_2, j_1, j_2):
self.i_1 = i_1
self.i_2 = i_2
self.j_1 = j_1
self.j_2 = j_2
def __str__ (self):
return "box: [%d-%d][%d-%d]" % (self.i_1, self.i_2, self.j_1, self.j_2)
class counter:
def __init__ (self):
self.counter = 0
def __init__ (self, initial_value):
self.counter = initial_value
def increment (self):
self.counter += 1
def get (self):
return self.counter
# Generate matrix product with Z-curve ordering.
def generate_Z_curve (A, B, C, block_counter):
if A.i_2-A.i_1 == 1 and A.j_2-A.j_1 == 1:
i = C.i_1
j = C.j_1
k = A.j_1
if options.generate_checks:
print
print padding + ".align 16"
print "block_%d:" % (block_counter.get())
block_counter.increment()
print
print padding + "# Check norm of product A(%d,%d)*B(%d,%d)." % (i+1, k+1, k+1, j+1)
print padding + "movss 0x%x(multiply_stream, base_pointer), B1" % ((i*options.N+k)*4+24)
print padding + "mulss 0x%x(multiply_stream, base_pointer), B1" % ((k*options.N+j+options.N**2)*4+24)
print padding + "comiss tolerance, B1"
print padding + "jb block_%d" % (block_counter.get())
print
print padding + "# Reset C(%d,%d) matrix block accumulators." % (i+1, j+1)
print padding + "xorps C1, C1"
print padding + "xorps C2, C2"
print padding + "xorps C3, C3"
print padding + "xorps C4, C4"
print
print padding + "# Calculate C(%d,%d) = A(%d,%d)*B(%d,%d)." % (i+1, j+1, i+1, k+1, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B1" % (0*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B2" % (1*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B3" % (2*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B4" % (3*4*4, k+1, j+1)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+0)*4*4, i+1, k+1, 1, 1)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+1)*4*4, i+1, k+1, 1, 2)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+2)*4*4, i+1, k+1, 1, 3)
print padding + "mulps B1, A11"
print padding + "mulps B2, A12"
print padding + "addps A11, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+3)*4*4, i+1, k+1, 1, 4)
print padding + "mulps B3, A13"
print padding + "addps A12, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+0)*4*4, i+1, k+1, 2, 1)
print padding + "mulps B4, A14"
print padding + "addps A13, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+1)*4*4, i+1, k+1, 2, 2)
print padding + "mulps B1, A21"
print padding + "addps A14, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+2)*4*4, i+1, k+1, 2, 3)
print padding + "mulps B2, A22"
print padding + "addps A21, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+3)*4*4, i+1, k+1, 2, 4)
print padding + "mulps B3, A23"
print padding + "addps A22, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+0)*4*4, i+1, k+1, 3, 1)
print padding + "mulps B4, A24"
print padding + "addps A23, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+1)*4*4, i+1, k+1, 3, 2)
print padding + "mulps B1, A31"
print padding + "addps A24, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+2)*4*4, i+1, k+1, 3, 3)
print padding + "mulps B2, A32"
print padding + "addps A31, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+3)*4*4, i+1, k+1, 3, 4)
print padding + "mulps B3, A33"
print padding + "addps A32, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+0)*4*4, i+1, k+1, 4, 1)
print padding + "mulps B4, A34"
print padding + "addps A33, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+1)*4*4, i+1, k+1, 4, 2)
print padding + "mulps B1, A41"
print padding + "addps A34, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+2)*4*4, i+1, k+1, 4, 3)
print padding + "mulps B2, A42"
print padding + "addps A41, C4"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+3)*4*4, i+1, k+1, 4, 4)
print padding + "mulps B3, A43"
print padding + "addps A42, C4"
print padding + "mulps B4, A44"
print padding + "addps A43, C4"
print padding + "addps A44, C4"
print
print padding + "# Multiply C(%d,%d) by alpha." % (i+1, j+1)
print padding + "mulps alpha, C1"
print padding + "mulps alpha, C2"
print padding + "mulps alpha, C3"
print padding + "mulps alpha, C4"
print
print padding + "# Add accumulated C(%d,%d) to already existing." % (i+1, j+1)
print padding + "addps 0x0+C_OFFSET_%d%d(C), C1" % (i+1, j+1)
print padding + "addps 0x10+C_OFFSET_%d%d(C), C2" % (i+1, j+1)
print padding + "addps 0x20+C_OFFSET_%d%d(C), C3" % (i+1, j+1)
print padding + "addps 0x30+C_OFFSET_%d%d(C), C4" % (i+1, j+1)
print
print padding + "# Write out C(%d,%d) submatrix block." % (i+1, j+1)
print padding + "movaps C1, 0x0+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C2, 0x10+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C3, 0x20+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C4, 0x30+C_OFFSET_%d%d(C)" % (i+1, j+1)
else:
A_11 = box(A.i_1, A.i_1+(A.i_2-A.i_1)/2, A.j_1, A.j_1+(A.j_2-A.j_1)/2)
A_12 = box(A.i_1, A.i_1+(A.i_2-A.i_1)/2, A.j_1+(A.j_2-A.j_1)/2, A.j_2)
A_21 = box(A.i_1+(A.i_2-A.i_1)/2, A.i_2, A.j_1, A.j_1+(A.j_2-A.j_1)/2)
A_22 = box(A.i_1+(A.i_2-A.i_1)/2, A.i_2, A.j_1+(A.j_2-A.j_1)/2, A.j_2)
B_11 = box(B.i_1, B.i_1+(B.i_2-B.i_1)/2, B.j_1, B.j_1+(B.j_2-B.j_1)/2)
B_12 = box(B.i_1, B.i_1+(B.i_2-B.i_1)/2, B.j_1+(B.j_2-B.j_1)/2, B.j_2)
B_21 = box(B.i_1+(B.i_2-B.i_1)/2, B.i_2, B.j_1, B.j_1+(B.j_2-B.j_1)/2)
B_22 = box(B.i_1+(B.i_2-B.i_1)/2, B.i_2, B.j_1+(B.j_2-B.j_1)/2, B.j_2)
C_11 = box(C.i_1, C.i_1+(C.i_2-C.i_1)/2, C.j_1, C.j_1+(C.j_2-C.j_1)/2)
C_12 = box(C.i_1, C.i_1+(C.i_2-C.i_1)/2, C.j_1+(C.j_2-C.j_1)/2, C.j_2)
C_21 = box(C.i_1+(C.i_2-C.i_1)/2, C.i_2, C.j_1, C.j_1+(C.j_2-C.j_1)/2)
C_22 = box(C.i_1+(C.i_2-C.i_1)/2, C.i_2, C.j_1+(C.j_2-C.j_1)/2, C.j_2)
generate_Z_curve(A_11, B_11, C_11, block_counter)
generate_Z_curve(A_12, B_21, C_11, block_counter)
generate_Z_curve(A_11, B_12, C_12, block_counter)
generate_Z_curve(A_12, B_22, C_12, block_counter)
generate_Z_curve(A_21, B_11, C_21, block_counter)
generate_Z_curve(A_22, B_21, C_21, block_counter)
generate_Z_curve(A_21, B_12, C_22, block_counter)
generate_Z_curve(A_22, B_22, C_22, block_counter)
# Main program.
parser = optparse.OptionParser(description =
"""This script generates a stream element kernel operating on 4x4 matrix
blocks. The kernel generated is written using assembly instructions assuming a
processor with SSE2.""")
parser.add_option("-N",
metavar = "N",
help = "generate fully unrolled kernel for NxN matrix of 4x4 matrix blocks [default: %default]",
dest = "N",
type = "int",
default = 1)
parser.add_option("--unroll",
metavar = "N",
help = "fully unroll loops only at and below a matrix size of NxN [default: %default]",
dest = "N_unroll",
type = "int",
default = 1)
parser.add_option("--name",
metavar = "func",
help = "set function name to \"func\" [default: %default]",
dest = "functionName",
type = "string",
default = "stream_kernel")
parser.add_option("--no-checks",
action = "store_false",
default = True,
help = "generate code without any norm checks [default: %default]",
dest = "generate_checks")
parser.add_option("--Z-curve",
action = "store_true",
default = False,
help = """layout the multiply along a Z-curve as opposed to regular
row-major ordering [default: %default]""",
dest = "Z_curve_ordering")
( options, arguments ) = parser.parse_args()
# Check N.
if options.N <= 0:
print "N needs to be a positive number > 0"
sys.exit(1)
d = int(math.log(options.N)/math.log(2))
if 2**d != options.N:
print "N needs to be a power of 2"
sys.exit(1)
# Check loop unrolling.
if options.N_unroll <= 0:
options.N_unroll = 1
if options.N_unroll > options.N:
options.N_unroll = options.N
# Assembly code indentation.
padding = " "
# Generate assembly code.
print "# This code was auto-generated by %s." % (sys.argv[0])
print "# The command line given was:"
print "#"
sys.stdout.write("# ")
for i in range(len(sys.argv)):
sys.stdout.write(" %s" % (sys.argv[i]))
sys.stdout.write("\n")
# Define some things.
print
print "# Function ABI."
print "#define number_stream_elements %rdi"
print "#define alpha %xmm0"
print "#define tolerance %xmm1"
print "#define multiply_stream %rsi"
print
print "# Define SSE registers used for C matrix"
print "#define C1 %xmm2"
print "#define C2 %xmm3"
print "#define C3 %xmm4"
print "#define C4 %xmm5"
print
print "# Define SSE registeres used for B matrix"
print "#define B1 %xmm6"
print "#define B2 %xmm7"
print "#define B3 %xmm8"
print "#define B4 %xmm9"
print
print "# Define SSE registeres used for A matrix"
print "#define A11 %xmm10"
print "#define A12 %xmm11"
print "#define A13 %xmm12"
print "#define A14 %xmm13"
print "#define A21 %xmm14"
print "#define A22 %xmm15"
print "#define A23 %xmm10"
print "#define A24 %xmm11"
print "#define A31 %xmm12"
print "#define A32 %xmm13"
print "#define A33 %xmm14"
print "#define A34 %xmm15"
print "#define A41 %xmm10"
print "#define A42 %xmm11"
print "#define A43 %xmm12"
print "#define A44 %xmm13"
print
print "# Define loop variables."
print "#define index %rax"
print "#define base_pointer %rdx"
#print "#define i_outer %r10"
#print "#define j_outer %r11"
print
print "# Define pointers to matrix blocks in stream."
print "#define A %r8"
print "#define B %rcx"
print "#define C %r9"
# Generate offsets.
print
print "# Define offsets into matrix blocks."
print
for i in range(options.N):
for j in range(options.N):
print "#define A_OFFSET_%d%d (%d*%d+%d)*64*4 // %d = 0x%x" % (i+1, j+1, i, options.N, j, (i*options.N+j)*64, (i*options.N+j)*64)
print
for i in range(options.N):
for j in range(options.N):
print "#define B_OFFSET_%d%d (%d*%d+%d)*16*4 // %d = 0x%x" % (i+1, j+1, i, options.N, j, (i*options.N+j)*16, (i*options.N+j)*16)
print
for i in range(options.N):
for j in range(options.N):
print "#define C_OFFSET_%d%d (%d*%d+%d)*16*4 // %d = 0x%x" % (i+1, j+1, i, options.N, j, (i*options.N+j)*16, (i*options.N+j)*16)
# Print some C function declarations.
print
print "# C function declaration"
print "#"
print "# struct multiply_stream_t"
print "# {"
print "# float *A_block;"
print "# float *B_block;"
print "# float *C_block;"
print "# float norm[%d];" % (2*options.N**2)
print "# };"
print "#"
print "# void"
print "# %s (const unsigned int number_stream_elements," % (options.functionName)
print "# float alpha,"
print "# float tolerance,"
print "# struct multiply_stream_t *multiply_stream);"
# Start the function prolog.
print
print padding + "# Function prolog."
print padding + ".text"
print padding + ".align 256"
print padding + ".global %s" % (options.functionName)
print padding + ".type %s, @function" % (options.functionName)
print
print "%s:" % (options.functionName)
print
print padding + "# Push used registers on stack."
print padding + "push index"
print padding + "push base_pointer"
print padding + "push A"
print padding + "push B"
print padding + "push C"
#print padding + "push i_outer"
#print padding + "push j_outer"
print
print padding + "# Copy alpha into all 4 elements of SSE register."
print padding + "shufps $0x0, alpha, alpha"
print
print padding + "# Divide number of stream elements by %d to simulate stride of %d." % (options.N**3, options.N**3)
print padding + "shr $%i, number_stream_elements" % (3*math.log(options.N)/math.log(2))
print
print padding + "# Test whether number_stream_elements is zero."
print padding + "test number_stream_elements, number_stream_elements"
print padding + "jbe done"
print
print padding + "# Set loop index to zero."
print padding + "xor base_pointer, base_pointer"
print padding + "xor index, index"
block_counter = counter(1)
# Beginning of loop.
print
print padding + ".align 16"
print "loop:"
print
print padding + "# Set the base pointer using sizeof(multiply_stream_t) = 0x98."
print padding + "imul $0x98, base_pointer, base_pointer"
print
print padding + "# Load pointers to stream matrix blocks."
print padding + "mov (multiply_stream, base_pointer, 1), A"
print padding + "mov 0x8(multiply_stream, base_pointer, 1), B"
print padding + "mov 0x10(multiply_stream, base_pointer, 1), C"
if options.Z_curve_ordering:
generate_Z_curve(box(0, options.N, 0, options.N),
box(0, options.N, 0, options.N),
box(0, options.N, 0, options.N),
block_counter)
if options.generate_checks:
print
print padding + ".align 16"
print "block_%d:" % (block_counter.get())
block_counter.increment()
else:
#if options.N_unroll < options.N:
# # Generate outer loop code.
# print
# print padding + ".align 16"
# print "outer_i:"
#if options.N_unroll < options.N:
# # Generate outer loop code.
# print
# print padding + ".align 16"
# print "outer_j:"
for i in range(options.N):
for j in range(options.N):
print
print padding + "# Reset C(%d,%d) matrix block accumulators." % (i+1, j+1)
print padding + "xorps C1, C1"
print padding + "xorps C2, C2"
print padding + "xorps C3, C3"
print padding + "xorps C4, C4"
for k in range(options.N):
if options.generate_checks:
print
print padding + ".align 16"
print "block_%d:" % (block_counter.get())
block_counter.increment()
print
print padding + "# Check norm of product."
print padding + "movss 0x%x(multiply_stream, base_pointer), B1" % ((i*options.N+k)*4+24)
print padding + "mulss 0x%x(multiply_stream, base_pointer), B1" % ((k*options.N+j+options.N**2)*4+24)
print padding + "comiss tolerance, B1"
print padding + "jb block_%d" % (block_counter.get())
print
print padding + "# Calculate C(%d,%d) = A(%d,%d)*B(%d,%d)." % (i+1, j+1, i+1, k+1, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B1" % (0*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B2" % (1*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B3" % (2*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B4" % (3*4*4, k+1, j+1)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+0)*4*4, i+1, k+1, 1, 1)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+1)*4*4, i+1, k+1, 1, 2)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+2)*4*4, i+1, k+1, 1, 3)
print padding + "mulps B1, A11"
print padding + "mulps B2, A12"
print padding + "addps A11, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+3)*4*4, i+1, k+1, 1, 4)
print padding + "mulps B3, A13"
print padding + "addps A12, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+0)*4*4, i+1, k+1, 2, 1)
print padding + "mulps B4, A14"
print padding + "addps A13, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+1)*4*4, i+1, k+1, 2, 2)
print padding + "mulps B1, A21"
print padding + "addps A14, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+2)*4*4, i+1, k+1, 2, 3)
print padding + "mulps B2, A22"
print padding + "addps A21, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+3)*4*4, i+1, k+1, 2, 4)
print padding + "mulps B3, A23"
print padding + "addps A22, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+0)*4*4, i+1, k+1, 3, 1)
print padding + "mulps B4, A24"
print padding + "addps A23, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+1)*4*4, i+1, k+1, 3, 2)
print padding + "mulps B1, A31"
print padding + "addps A24, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+2)*4*4, i+1, k+1, 3, 3)
print padding + "mulps B2, A32"
print padding + "addps A31, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+3)*4*4, i+1, k+1, 3, 4)
print padding + "mulps B3, A33"
print padding + "addps A32, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+0)*4*4, i+1, k+1, 4, 1)
print padding + "mulps B4, A34"
print padding + "addps A33, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+1)*4*4, i+1, k+1, 4, 2)
print padding + "mulps B1, A41"
print padding + "addps A34, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+2)*4*4, i+1, k+1, 4, 3)
print padding + "mulps B2, A42"
print padding + "addps A41, C4"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+3)*4*4, i+1, k+1, 4, 4)
print padding + "mulps B3, A43"
print padding + "addps A42, C4"
print padding + "mulps B4, A44"
print padding + "addps A43, C4"
print padding + "addps A44, C4"
if options.generate_checks:
print
print padding + ".align 16"
print "block_%d:" % (block_counter.get())
block_counter.increment()
print
print padding + "# Multiply C(%d,%d) by alpha." % (i+1, j+1)
print padding + "mulps alpha, C1"
print padding + "mulps alpha, C2"
print padding + "mulps alpha, C3"
print padding + "mulps alpha, C4"
print
print padding + "# Add accumulated C(%d,%d) to already existing." % (i+1, j+1)
print padding + "addps 0x0+C_OFFSET_%d%d(C), C1" % (i+1, j+1)
print padding + "addps 0x10+C_OFFSET_%d%d(C), C2" % (i+1, j+1)
print padding + "addps 0x20+C_OFFSET_%d%d(C), C3" % (i+1, j+1)
print padding + "addps 0x30+C_OFFSET_%d%d(C), C4" % (i+1, j+1)
print
print padding + "# Write out C(%d,%d) submatrix block." % (i+1, j+1)
print padding + "movaps C1, 0x0+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C2, 0x10+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C3, 0x20+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C4, 0x30+C_OFFSET_%d%d(C)" % (i+1, j+1)
# End of loop.
print
print padding + "# Loop end."
print padding + "inc index"
print padding + "mov index, base_pointer"
print padding + "cmp number_stream_elements, index"
print padding + "jb loop"
# Leave function.
print
print padding + ".align 16"
print "done:"
print
print padding + "# Pop registers from stack."
#print padding + "pop j_outer"
#print padding + "pop i_outer"
print padding + "pop C"
print padding + "pop B"
print padding + "pop A"
print padding + "pop base_pointer"
print padding + "pop index"
print
print padding + "# Return from function."
print padding + "ret"
# Start function epilog.
print
print padding + "# Function epilog."
print padding + ".size %s, .-%s" % (options.functionName, options.functionName)
|
bsd-3-clause
| -6,811,387,707,721,082,000
| 35.979206
| 132
| 0.586596
| false
| 2.467146
| false
| false
| false
|
smjurcak/csm
|
csmserver/views/download_dashboard.py
|
1
|
8882
|
# =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from flask import Blueprint
from flask import abort
from flask import jsonify
from flask import render_template
from flask.ext.login import login_required
from flask.ext.login import current_user
from database import DBSession
from common import can_install
from common import can_delete_install
from common import get_server_by_id
from models import logger
from models import SystemOption
from models import DownloadJob
from models import DownloadJobHistory
from constants import UNKNOWN
from constants import NAME_CSM
from constants import NAME_CSMSERVER
from constants import NAME_CSM_DATA
from constants import NAME_REPOSITORY
from constants import JobStatus
from constants import UserPrivilege
from constants import get_repository_directory
from utils import is_empty
from utils import get_file_list
from utils import get_tarfile_file_list
from utils import datetime_from_local_to_utc
from tarfile import ReadError
import os
import datetime
download_dashboard = Blueprint('download_dashboard', __name__, url_prefix='/download_dashboard')
@download_dashboard.route('/')
@login_required
def home():
if not can_install(current_user):
abort(401)
absolute_path = os.path.abspath('.')
csm_repository_path = absolute_path.replace(NAME_CSM + '/' + NAME_CSMSERVER,
NAME_CSM_DATA + '/' + NAME_REPOSITORY)
return render_template('host/download_dashboard.html',
csm_repository_path=csm_repository_path,
system_option=SystemOption.get(DBSession()))
def get_download_job_json_dict(db_session, download_jobs):
rows = []
for download_job in download_jobs:
if isinstance(download_job, DownloadJob) or isinstance(download_job, DownloadJobHistory):
row = dict()
row['download_job_id'] = download_job.id
row['image_name'] = download_job.cco_filename
row['scheduled_time'] = download_job.scheduled_time
server = get_server_by_id(db_session, download_job.server_id)
if server is not None:
row['server_repository'] = server.hostname
if not is_empty(download_job.server_directory):
row['server_repository'] = row['server_repository'] + \
'<br><span style="color: Gray;"><b>Sub-directory:</b></span> ' + \
download_job.server_directory
else:
row['server_repository'] = UNKNOWN
row['status'] = download_job.status
row['status_time'] = download_job.status_time
row['created_by'] = download_job.created_by
if download_job.trace is not None:
row['trace'] = download_job.id
rows.append(row)
return {'data': rows}
@download_dashboard.route('/api/get_files_from_csm_repository/')
@login_required
def api_get_files_from_csm_repository():
rows = []
file_list = get_file_list(get_repository_directory())
for filename in file_list:
if filename.endswith('.tar'):
statinfo = os.stat(get_repository_directory() + filename)
row = dict()
row['image_name'] = filename
row['image_size'] = str(statinfo.st_size)
row['downloaded_time'] = datetime_from_local_to_utc(datetime.datetime.fromtimestamp(statinfo.st_mtime))
rows.append(row)
return jsonify(**{'data': rows})
@download_dashboard.route('/api/image/<image_name>/delete/', methods=['DELETE'])
@login_required
def api_delete_image_from_repository(image_name):
if current_user.privilege != UserPrivilege.ADMIN and current_user.privilege != UserPrivilege.NETWORK_ADMIN:
abort(401)
tar_image_path = get_repository_directory() + image_name
try:
# Remove the tar file contents
file_list = get_tarfile_file_list(tar_image_path)
for filename in file_list:
try:
file_path = get_repository_directory() + filename
if os.path.exists(file_path):
os.remove(file_path)
except:
logger.exception('api_delete_image_from_repository() hit exception filename=' + file_path)
except ReadError:
# In case, it is a partial downloaded TAR.
pass
try:
# Remove the actual tar file
file_path = tar_image_path
if os.path.exists(file_path):
os.remove(file_path)
# Remove the auxiliary file for the tar file
file_path = tar_image_path + '.size'
if os.path.exists(file_path):
os.remove(file_path)
except:
logger.exception('api_delete_image_from_repository() hit exception filename=' + file_path)
return jsonify({'status': 'Failed'})
return jsonify({'status': 'OK'})
@download_dashboard.route('/hosts/delete_all_failed_downloads/', methods=['DELETE'])
@login_required
def delete_all_failed_downloads():
if not can_delete_install(current_user):
abort(401)
return delete_all_downloads(status=JobStatus.FAILED)
@download_dashboard.route('/hosts/delete_all_scheduled_downloads/', methods=['DELETE'])
@login_required
def delete_all_scheduled_downloads():
if not can_delete_install(current_user):
abort(401)
return delete_all_downloads()
def delete_all_downloads(status=None):
db_session = DBSession()
try:
download_jobs = db_session.query(DownloadJob).filter(DownloadJob.status == status)
for download_job in download_jobs:
db_session.delete(download_job)
db_session.commit()
return jsonify({'status': 'OK'})
except:
logger.exception('delete_download_job() hit exception')
return jsonify({'status': 'Failed: check system logs for details'})
@download_dashboard.route('/delete_download_job/<int:id>/', methods=['DELETE'])
@login_required
def delete_download_job(id):
if not can_delete_install(current_user):
abort(401)
db_session = DBSession()
download_job = db_session.query(DownloadJob).filter(DownloadJob.id == id).first()
if download_job is None:
abort(404)
try:
# Download jobs that are in progress cannot be deleted.
if download_job.status is None or download_job.status == JobStatus.FAILED:
db_session.delete(download_job)
db_session.commit()
return jsonify({'status': 'OK'})
except:
logger.exception('delete_download_job() hit exception')
return jsonify({'status': 'Failed: check system logs for details'})
@download_dashboard.route('/resubmit_download_job/<int:id>/', methods=['POST'])
@login_required
def resubmit_download_job(id):
if not can_install(current_user):
abort(401)
db_session = DBSession()
download_job = db_session.query(DownloadJob).filter(DownloadJob.id == id).first()
if download_job is None:
abort(404)
try:
# Download jobs that are in progress cannot be deleted.
download_job.status = None
download_job.status_time = None
db_session.commit()
return jsonify({'status': 'OK'})
except:
logger.exception('resubmit_download_job() hit exception')
return jsonify({'status': 'Failed: check system logs for details'})
|
apache-2.0
| -7,126,437,171,829,432,000
| 34.670683
| 115
| 0.654582
| false
| 4.19556
| false
| false
| false
|
tdickers/mitmproxy
|
mitmproxy/utils.py
|
1
|
1071
|
from __future__ import absolute_import, print_function, division
import netlib.utils
pkg_data = netlib.utils.Data(__name__)
class LRUCache:
"""
A simple LRU cache for generated values.
"""
def __init__(self, size=100):
self.size = size
self.cache = {}
self.cacheList = []
def get(self, gen, *args):
"""
gen: A (presumably expensive) generator function. The identity of
gen is NOT taken into account by the cache.
*args: A list of immutable arguments, used to establish identiy by
*the cache, and passed to gen to generate values.
"""
if args in self.cache:
self.cacheList.remove(args)
self.cacheList.insert(0, args)
return self.cache[args]
else:
ret = gen(*args)
self.cacheList.insert(0, args)
self.cache[args] = ret
if len(self.cacheList) > self.size:
d = self.cacheList.pop()
self.cache.pop(d)
return ret
|
mit
| 1,121,692,843,674,526,200
| 27.184211
| 78
| 0.549953
| false
| 4.041509
| false
| false
| false
|
Cladis/wikilabels
|
wikilabels/database/worksets.py
|
1
|
8760
|
import psycopg2
from psycopg2.extras import Json
from .collection import Collection
from .errors import IntegrityError, NotFoundError
class Worksets(Collection):
def get(self, workset_id, stats=False):
with self.db.conn.cursor() as cursor:
cursor.execute("""
SELECT
id, user_id,
campaign_id,
EXTRACT(EPOCH FROM created) AS created,
EXTRACT(EPOCH FROM expires) AS expires
FROM workset
WHERE id = %(workset_id)s
ORDER BY id
""", {'workset_id': workset_id})
try:
doc = next(cursor)
if stats: doc['stats'] = self.stats_for(workset_id)
return doc
except StopIteration:
raise NotFoundError("workset_id={0}".format(workset_id))
def stats_for(self, workset_id):
with self.db.conn.cursor() as cursor:
cursor.execute("""
SELECT
COUNT(workset_task.task_id) AS tasks,
COALESCE(SUM(label.task_id IS NOT NULL::int), 0) AS labeled
FROM workset
INNER JOIN workset_task ON workset_task.workset_id = workset.id
LEFT JOIN label ON
label.task_id = workset_task.task_id AND
label.user_id = workset.user_id
WHERE workset.id = %(workset_id)s
""", {'workset_id': workset_id})
try:
return next(cursor)
except StopIteration:
raise NotFoundError("workset_id={0}".format(workset_id))
def for_campaign(self, campaign_id, stats=False):
with self.db.conn.cursor() as cursor:
cursor.execute("""
SELECT
id, user_id,
campaign_id,
EXTRACT(EPOCH FROM created) AS created,
EXTRACT(EPOCH FROM expires) AS expires
FROM workset
WHERE campaign_id = %(campaign_id)s
ORDER BY id
""", {'campaign_id': campaign_id})
rows = []
for row in cursor:
if stats: row['stats'] = self.stats_for(row['id'])
rows.append(row)
return rows
def for_user(self, user_id, campaign_id=None, stats=False):
with self.db.conn.cursor() as cursor:
conditions = ["workset.user_id = %(user_id)s"]
if campaign_id is not None:
conditions.append("workset.campaign_id = %(campaign_id)s")
where = "\nWHERE " + " AND ".join(conditions) + "\n"
cursor.execute("""
SELECT
id, user_id,
campaign_id,
EXTRACT(EPOCH FROM created) AS created,
EXTRACT(EPOCH FROM expires) AS expires
FROM workset
""" + where + """
ORDER BY id
""", {'user_id': user_id,
'campaign_id': campaign_id})
rows = []
for row in cursor:
if stats: row['stats'] = self.stats_for(row['id'])
rows.append(row)
return rows
def open_workset_for_user(self, campaign_id, user_id):
with self.db.conn.cursor() as cursor:
# Check if this user already has an open workset
cursor.execute("""
SELECT
workset.id
FROM workset
INNER JOIN workset_task ON workset.id = workset_task.workset_id
INNER JOIN task ON workset_task.task_id = task.id
LEFT JOIN label ON
task.id = label.task_id AND
workset.user_id = label.user_id
WHERE workset.user_id = %(user_id)s AND
workset.campaign_id = %(campaign_id)s AND
label.task_id IS NULL
LIMIT 1;
""", {'user_id': user_id,
'campaign_id': campaign_id})
rows = cursor.fetchall()
if len(rows) > 0:
return rows[0]['id']
else:
return None
def assign(self, campaign_id, user_id, stats=False):
with self.db.conn.cursor() as cursor:
campaign = self.db.campaigns.get(campaign_id)
if not campaign['active']:
raise IntegrityError("Campaign {0} not active." \
.format(campaign_id))
workset_id = self.open_workset_for_user(campaign_id, user_id)
if workset_id is not None:
raise IntegrityError(("Incomplete workset_id={0} already " +
"assigned to user_id={1}") \
.format(workset_id, user_id))
if not self.db.campaigns.has_open_tasks(campaign_id, user_id):
raise IntegrityError(("No tasks available for user_id={0} " +
"in campaign_id={1}") \
.format(user_id, campaign_id))
try:
# Create a new workset
cursor.execute("""
INSERT INTO workset VALUES
(DEFAULT, %(campaign_id)s, %(user_id)s, NOW(),
NOW() + INTERVAL '1 DAY') RETURNING id;
""", {'campaign_id': campaign_id,
'user_id': user_id})
workset_id = cursor.fetchone()['id']
# Assign tasks to the workset
cursor.execute("""
INSERT INTO workset_task
SELECT
%(workset_id)s AS workset_id,
task.id AS task_id
FROM campaign
INNER JOIN task ON task.campaign_id = campaign.id
LEFT JOIN label ON
label.task_id = task.id
WHERE campaign.id = %(campaign_id)s
GROUP BY task.id, campaign.labels_per_task
HAVING
COUNT(label.task_id) < campaign.labels_per_task AND
SUM((label.user_id IS NOT NULL AND
label.user_id = %(user_id)s)::int) = 0
ORDER BY RANDOM()
LIMIT %(tasks_per_assignment)s
""", {'campaign_id': campaign_id,
'workset_id': workset_id,
'user_id': user_id,
'tasks_per_assignment': campaign['tasks_per_assignment']})
self.db.conn.commit()
except Exception:
self.db.conn.rollback()
raise
return self.get(workset_id, stats);
def users(self):
with self.db.conn.cursor() as cursor:
cursor.execute("""
SELECT DISTINCT user_id
FROM workset
ORDER BY user_id
""")
return [row['user_id'] for row in cursor]
def abandon(self, workset_id, user_id):
with self.db.conn.cursor() as cursor:
cursor = self.db.conn.cursor()
# Check if this user owns this workset
cursor.execute("""
SELECT 1 FROM workset
WHERE id = %(workset_id)s AND
user_id = %(user_id)s
""", {'workset_id': workset_id, 'user_id': user_id})
if len(cursor.fetchall()) == 0:
raise IntegrityError("workset_id={0} does not belong to user_id={1}" \
.format(workset_id, user_id))
# Clear incomplete assignements
try:
cursor.execute("""
DELETE FROM workset_task
WHERE
workset_id = %(workset_id)s AND
task_id IN (
SELECT workset_task.task_id
FROM workset_task
LEFT JOIN label ON
workset_task.task_id = label.task_id AND
label.user_id = %(user_id)s
WHERE
workset_id = %(workset_id)s AND
label.task_id IS NULL
)
""", {'workset_id': workset_id, 'user_id': user_id})
self.db.conn.commit()
except Exception:
self.db.conn.rollback()
raise
return self.get(workset_id)
|
mit
| 8,908,630,188,383,749,000
| 37.086957
| 86
| 0.457648
| false
| 4.555382
| false
| false
| false
|
ljhandlwt/flask-study
|
flask1/app/models.py
|
1
|
3601
|
#coding=utf-8
from flask import url_for
from flask.ext.sqlalchemy import SQLAlchemy
from app import db,app
from hashlib import md5
from datetime import datetime
import os
user_user_concern=db.Table('user_user_concern',
db.Column('user1_id',db.Integer,db.ForeignKey('user.id'),primary_key=True),
db.Column('user2_id',db.Integer,db.ForeignKey('user.id'),primary_key=True))
user_user_blacklist=db.Table('user_user_blacklist',
db.Column('user1_id',db.Integer,db.ForeignKey('user.id'),primary_key=True),
db.Column('user2_id',db.Integer,db.ForeignKey('user.id'),primary_key=True))
class User(db.Model):
id=db.Column(db.Integer,primary_key=True)
nickname=db.Column(db.String(80),unique=True)
password=db.Column(db.String(120))
email=db.Column(db.String(120),unique=True)
information=db.Column(db.String(250))
posts=db.relationship('Post',backref='author',lazy='dynamic')
has_avatar=db.Column(db.Integer,default=0)
create_time=db.Column(db.DateTime,default=datetime.now())
messages=db.relationship('Message',backref='people',lazy='dynamic')
concern=db.relationship('User',secondary=user_user_concern,primaryjoin=id==user_user_concern.c.user1_id,
secondaryjoin=id==user_user_concern.c.user2_id,backref='concerned')
blacklist=db.relationship('User',secondary=user_user_blacklist,primaryjoin=id==user_user_blacklist.c.user1_id,
secondaryjoin=id==user_user_blacklist.c.user2_id,backref='blackedlist')
is_authenticated=True
is_active=True
is_anonymous=False
def __init__(self,nickname,password,email):
self.nickname=nickname
self.password=password
self.email=email
self.information=u'这个人很懒,什么都没有写...'
def get_id(self):
try:
return unicode(self.id)
except NameError:
return str(self.id)
def avatar(self):
if self.has_avatar:
return '/static/avatar/'+self.nickname+'.jpg'
else:
return url_for('static',filename='favicon.ico')
def has_concern(self,user):
return self.concern.count(user)
def has_concerned(self,user):
return self.concerned.count(user)
def has_black(self,user):
return self.blacklist.count(user)
def count_new_message(self):
return Message.query.filter_by(user_id=self.id,has_showed=0).count()
def is_admin(self):
return Admin.query.filter_by(nickname=self.nickname).first() is not None
def __repr__(self):
return '<User %r>' % self.nickname
class Post(db.Model):
id=db.Column(db.Integer,primary_key=True)
title=db.Column(db.String(80))
body=db.Column(db.Text)
date=db.Column(db.DateTime)
user_id=db.Column(db.Integer,db.ForeignKey('user.id'))
def __init__(self,title,body,author):
self.title=title
self.body=body
self.date=datetime.now()
self.author=author
def __repr__(self):
return '<Post %r>' % self.body
class Message(db.Model):
id=db.Column(db.Integer,primary_key=True)
content=db.Column(db.Text)
user_id=db.Column(db.Integer,db.ForeignKey('user.id'))
date=db.Column(db.DateTime)
has_showed=db.Column(db.Integer,default=0)
def __init__(self,content,people):
self.content=content
self.people=people
self.date=datetime.now()
def __repr__(self):
return '<%r>' % self.content
class Admin(db.Model):
id=db.Column(db.Integer,primary_key=True)
nickname=db.Column(db.String(80),unique=True)
def __repr__(self):
return '<User %r>' % self.nickname
class PublicConfig(db.Model):
id=db.Column(db.Integer,primary_key=True)
sicily_message=db.Column(db.String(500))
def __repr__(self):
return '<'+self.sicily_message+'>'
|
mit
| 1,049,869,579,345,582,300
| 29.13913
| 111
| 0.706063
| false
| 2.820331
| false
| false
| false
|
JohnLZeller/dd-agent
|
tests/test_cassandra.py
|
1
|
5263
|
import logging
import unittest
from dogstream.cassandra import parse_cassandra
logger = logging.getLogger(__name__)
class TestCassandraDogstream(unittest.TestCase):
def testStart(self):
events = parse_cassandra(logger, " INFO [main] 2012-12-11 21:46:26,995 StorageService.java (line 687) Bootstrap/Replace/Move completed! Now serving reads.")
self.assertTrue(events is None)
def testInfo(self):
events = parse_cassandra(logger, " INFO [CompactionExecutor:35] 2012-12-02 21:15:03,738 AutoSavingCache.java (line 268) Saved KeyCache (5 items) in 3 ms")
self.assertTrue(events is None)
def testWarn(self):
events = parse_cassandra(logger, " WARN [MemoryMeter:1] 2012-12-03 20:07:47,158 Memtable.java (line 197) setting live ratio to minimum of 1.0 instead of 0.9416553595658074")
self.assertTrue(events is None)
def testError(self):
for line in """\
ERROR [CompactionExecutor:518] 2012-12-11 21:35:29,686 AbstractCassandraDaemon.java (line 135) Exception in thread Thread[CompactionExecutor:518,1,RMI Runtime]
java.util.concurrent.RejectedExecutionException
at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:1768)
at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:767)
at java.util.concurrent.ScheduledThreadPoolExecutor.delayedExecute(ScheduledThreadPoolExecutor.java:215)
at java.util.concurrent.ScheduledThreadPoolExecutor.schedule(ScheduledThreadPoolExecutor.java:397)
at java.util.concurrent.ScheduledThreadPoolExecutor.submit(ScheduledThreadPoolExecutor.java:470)
at org.apache.cassandra.io.sstable.SSTableDeletingTask.schedule(SSTableDeletingTask.java:67)
at org.apache.cassandra.io.sstable.SSTableReader.releaseReference(SSTableReader.java:806)
at org.apache.cassandra.db.DataTracker.removeOldSSTablesSize(DataTracker.java:358)
at org.apache.cassandra.db.DataTracker.postReplace(DataTracker.java:330)
at org.apache.cassandra.db.DataTracker.replace(DataTracker.java:324)
at org.apache.cassandra.db.DataTracker.replaceCompactedSSTables(DataTracker.java:253)
at org.apache.cassandra.db.ColumnFamilyStore.replaceCompactedSSTables(ColumnFamilyStore.java:992)
at org.apache.cassandra.db.compaction.CompactionTask.execute(CompactionTask.java:200)
at org.apache.cassandra.db.compaction.CompactionManager$1.runMayThrow(CompactionManager.java:154)
at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:30)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
at java.util.concurrent.FutureTask.run(FutureTask.java:138)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
at java.lang.Thread.run(Thread.java:662)""".splitlines():
events = parse_cassandra(logger, line)
self.assertTrue(events is None)
def testCompactionStart(self):
events = parse_cassandra(logger, " INFO [CompactionExecutor:2] 2012-12-11 21:46:27,012 CompactionTask.java (line 109) Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]")
self.assertEquals(events, [{'alert_type': 'info', 'event_type': 'cassandra.compaction', 'timestamp': 1355262387, 'msg_title': "Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-1", 'msg_text': "Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]", 'auto_priority': 0}])
def testCompactionEnd(self):
events = parse_cassandra(logger, "INFO [CompactionExecutor:2] 2012-12-11 21:46:27,095 CompactionTask.java (line 221) Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.")
self.assertEquals(events, [{'alert_type': 'info', 'event_type': 'cassandra.compaction', 'timestamp': 1355262387, 'msg_title': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 ', 'msg_text': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.', 'auto_priority': 0}])
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| -4,113,671,884,750,322,700
| 86.716667
| 689
| 0.758123
| false
| 3.283219
| true
| false
| false
|
stanleykylee/worldbank-world-development-indicators-distributed-tf
|
src/build_sequences.py
|
1
|
1618
|
import csv
import json
# configuration
DATA_FILE = 'WDI_Data.csv'
INDICATORS_FILE = 'indicators.config'
OUTPUT_FILE = 'population-data.csv.csv'
def make_country_dict():
country = {}
for i in range(0,57):
country[i] = {}
return country
# extract selected indicators and write time series entries of them to csv
def flush(dict):
out_str = ''
for entry in dict:
if len(dict[entry]) < len(selected_indicators):
continue
out_str = ''
for key in dict[entry]:
out_str += dict[entry][key] + ','
out_str = out_str[:-1] + '\n'
with open(OUTPUT_FILE, 'a') as f:
f.write(out_str)
f.flush()
return
# create list of indicators selected from dataset
with open(INDICATORS_FILE) as f:
selected_indicators = f.readlines()
selected_indicators = [elem.strip() for elem in selected_indicators]
with open(DATA_FILE, 'rb') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
idx = 0
for row in csv_reader:
if (idx == 0):
idx += 1
continue;
if (idx == 1):
country_dict = make_country_dict()
country = row[0]
if (row[0] != country):
country = row[0]
flush(country_dict)
country_dict = make_country_dict()
row_idx = 0
row_name = row[3]
if row_name in selected_indicators:
for item in row:
if (row_idx > 3 and item != ''):
country_dict[row_idx - 4][row_name] = item
row_idx += 1
idx += 1
|
gpl-3.0
| -3,085,287,364,212,459,000
| 27.892857
| 74
| 0.543263
| false
| 3.611607
| false
| false
| false
|
whitehorse-io/encarnia
|
Encarnia/typeclasses/statue.py
|
1
|
1228
|
"""
Readables
A readable plaque.
"""
from evennia import DefaultObject, Command, CmdSet
from world import english_utils
from evennia.utils import list_to_string
from random import randint
import time
from typeclasses.objects import Object
# the "read" command
class CmdReadStatue(Command):
"""
Hit a box until it breaks
Usage:
hit box
If the object is breakable, it will eventually
break down after enough hits.
"""
key = "read statue"
#aliases = ["hit", "break box", "break"]
locks = "cmd:all()"
help_category = "General"
def func(self):
# this Command sits on the box, so we don't need to search for it
self.caller.msg(self.obj.db.text)
class StatueCmdSet(CmdSet):
key = "read_statue_cmdset"
def at_cmdset_creation(self):
self.add(CmdReadStatue())
class Statue(DefaultObject):
"""
"""
def at_object_creation(self):
# Inherit the object properties.
super(Plaque, self).at_object_creation()
self.aliases.add([])
self.db.desc = False
self.db.text = "Beware: Lions..."
self.cmdset.add(StatueCmdSet, permanent=True)
|
mit
| -988,572,104,984,226,600
| 19.578947
| 73
| 0.614821
| false
| 3.622419
| false
| false
| false
|
shastah/spacewalk
|
backend/server/importlib/packageImport.py
|
1
|
22007
|
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Package import process
#
import rpm
import sys
import os.path
from importLib import GenericPackageImport, IncompletePackage, \
Import, InvalidArchError, InvalidChannelError, \
IncompatibleArchError
from mpmSource import mpmBinaryPackage
from spacewalk.common import rhn_pkg
from spacewalk.common.rhnConfig import CFG
from spacewalk.server import taskomatic
from spacewalk.server.rhnServer import server_packages
class ChannelPackageSubscription(GenericPackageImport):
def __init__(self, batch, backend, caller=None, strict=0, repogen=True):
# If strict, the set of packages that was passed in will be the only
# one in the channels - everything else will be unlinked
GenericPackageImport.__init__(self, batch, backend)
self.affected_channels = []
# A hash keyed on the channel id, and with tuples
# (added_packages, removed_packages) as values (packages are package
# ids)
self.affected_channel_packages = {}
if not caller:
self.caller = "backend.(unknown)"
else:
self.caller = caller
self._strict_subscription = strict
self.repogen = repogen
def preprocess(self):
# Processes the package batch to a form more suitable for database
# operations
for package in self.batch:
# if package object doesn't have multiple checksums (like satellite-sync objects)
# then let's fake it
if 'checksums' not in package:
package['checksums'] = {package['checksum_type']: package['checksum']}
if not isinstance(package, IncompletePackage):
raise TypeError("Expected an IncompletePackage instance, "
"got %s" % package.__class__.__name__)
self._processPackage(package)
def fix(self):
# Look up arches and channels
self.backend.lookupPackageArches(self.package_arches)
self.backend.lookupChannels(self.channels)
# Initialize self.channel_package_arch_compat
self.channel_package_arch_compat = {}
for channel, channel_row in self.channels.items():
if not channel_row:
# Unsupported channel
continue
self.channel_package_arch_compat[channel_row['channel_arch_id']] = None
self.backend.lookupChannelPackageArchCompat(self.channel_package_arch_compat)
self.backend.lookupPackageNames(self.names)
self.backend.lookupEVRs(self.evrs)
self.backend.lookupChecksums(self.checksums)
# Fix the package information up, and uniquify the packages too
uniqdict = {}
for package in self.batch:
if package.ignored:
continue
self._postprocessPackageNEVRA(package)
if not CFG.ENABLE_NVREA:
# nvrea disabled, skip checksum
nevrao = (
package['name_id'],
package['evr_id'],
package['package_arch_id'],
package['org_id'])
else:
# As nvrea is enabled uniquify based on checksum
nevrao = (
package['name_id'],
package['evr_id'],
package['package_arch_id'],
package['org_id'],
package['checksum_id'])
if nevrao not in uniqdict:
# Uniquify the channel names
package['channels'] = {}
# Initialize the channels
# This is a handy way of checking arch compatibility for this
# package with its channels
self.__copyChannels(package, package)
uniqdict[nevrao] = package
else:
# Package is found twice in the same batch
# Are the packages the same?
self._comparePackages(package, uniqdict[nevrao])
# Invalidate it
package.ignored = 1
firstpackage = uniqdict[nevrao]
# Copy any new channels
self.__copyChannels(package, firstpackage)
# Knowing the id of the referenced package
package.first_package = firstpackage
def _comparePackages(self, package1, package2):
# XXX This should probably do a deep compare of the two packages
pass
def submit(self):
self.backend.lookupPackages(self.batch, self.checksums)
try:
affected_channels = self.backend.subscribeToChannels(self.batch,
strict=self._strict_subscription)
except:
self.backend.rollback()
raise
self.compute_affected_channels(affected_channels)
if len(self.batch) < 10:
# update small batch per package
name_ids = [pkg['name_id'] for pkg in self.batch]
else:
# update bigger batch at once
name_ids = []
self.backend.update_newest_package_cache(caller=self.caller,
affected_channels=self.affected_channel_packages, name_ids=name_ids)
# Now that channel is updated, schedule the repo generation
if self.repogen:
taskomatic.add_to_repodata_queue_for_channel_package_subscription(
self.affected_channels, self.batch, self.caller)
self.backend.commit()
def compute_affected_channels(self, affected_channels):
# Fill the list of affected channels
self.affected_channel_packages.clear()
self.affected_channel_packages.update(affected_channels)
for channel_label, channel_row in list(self.channels.items()):
channel_id = channel_row['id']
if channel_id in affected_channels:
affected_channels[channel_id] = channel_label
self.affected_channels = list(affected_channels.values())
def _processPackage(self, package):
GenericPackageImport._processPackage(self, package)
# Process channels
channels = []
channelHash = {}
for channel in package['channels']:
channelName = channel['label']
if channelName not in channelHash:
channels.append(channelName)
channelHash[channelName] = None
self.channels[channelName] = None
# Replace the channel list with the uniquified list
package.channels = channels
# Copies the channels from one package to the other
def __copyChannels(self, sourcePackage, destPackage):
dpHash = destPackage['channels']
for schannelName in sourcePackage.channels:
# Check if the package is compatible with the channel
channel = self.channels[schannelName]
if not channel:
# Unknown channel
sourcePackage.ignored = 1
raise InvalidChannelError(channel,
"Unsupported channel %s" % schannelName)
# Check channel-package compatibility
charch = channel['channel_arch_id']
archCompat = self.channel_package_arch_compat[charch]
if not archCompat:
# Invalid architecture
sourcePackage.ignored = 1
raise InvalidArchError(charch,
"Invalid channel architecture %s" % charch)
# Now check if the source package's arch is compatible with the
# current channel
if sourcePackage['package_arch_id'] not in archCompat:
sourcePackage.ignored = 1
raise IncompatibleArchError(sourcePackage.arch, charch,
"Package arch %s incompatible with channel %s" %
(sourcePackage.arch, schannelName))
dpHash[channel['id']] = schannelName
destPackage.channels = list(dpHash.values())
class PackageImport(ChannelPackageSubscription):
def __init__(self, batch, backend, caller=None, update_last_modified=0):
ChannelPackageSubscription.__init__(self, batch, backend,
caller=caller)
self.ignoreUploaded = 1
self._update_last_modified = update_last_modified
self.capabilities = {}
self.groups = {}
self.sourceRPMs = {}
self.changelog_data = {}
def _rpm_knows(self, tag):
# See if the installed version of RPM understands a given tag
# Assumed attr-format in RPM is 'RPMTAG_<UPPERCASETAG>'
return hasattr(rpm, 'RPMTAG_'+tag.upper())
def _processPackage(self, package):
ChannelPackageSubscription._processPackage(self, package)
# Process package groups
group = package['package_group']
if group not in self.groups:
self.groups[group] = None
sourceRPM = package['source_rpm']
if (sourceRPM is not None) and (sourceRPM not in self.sourceRPMs):
self.sourceRPMs[sourceRPM] = None
# Change copyright to license
# XXX
package['copyright'] = self._fix_encoding(package['license'])
for tag in ('recommends', 'suggests', 'supplements', 'enhances', 'breaks', 'predepends'):
if not self._rpm_knows(tag) or tag not in package or type(package[tag]) != type([]):
# older spacewalk server do not export weak deps.
# and older RPM doesn't know about them either
# lets create an empty list
package[tag] = []
# Creates all the data structures needed to insert capabilities
for tag in ('provides', 'requires', 'conflicts', 'obsoletes', 'recommends', 'suggests', 'supplements', 'enhances', 'breaks', 'predepends'):
depList = package[tag]
if type(depList) != type([]):
sys.stderr.write("!!! packageImport.PackageImport._processPackage: "
"erronous depList for '%s', converting to []\n" % tag)
depList = []
for dep in depList:
nv = []
for f in ('name', 'version'):
nv.append(dep[f])
del dep[f]
nv = tuple(nv)
dep['capability'] = nv
if nv not in self.capabilities:
self.capabilities[nv] = None
# Process files too
fileList = package['files']
for f in fileList:
filename = self._fix_encoding(f['name'])
nv = (filename, '')
del f['name']
f['capability'] = nv
if nv not in self.capabilities:
self.capabilities[nv] = None
fchecksumTuple = (f['checksum_type'], f['checksum'])
if fchecksumTuple not in self.checksums:
self.checksums[fchecksumTuple] = None
# Uniquify changelog entries
unique_package_changelog_hash = {}
unique_package_changelog = []
for changelog in package['changelog']:
key = (changelog['name'], changelog['time'], changelog['text'])
if key not in unique_package_changelog_hash:
self.changelog_data[key] = None
unique_package_changelog.append(changelog)
unique_package_changelog_hash[key] = 1
package['changelog'] = unique_package_changelog
# fix encoding issues in package summary and description
package['description'] = self._fix_encoding(package['description'])
package['summary'] = self._fix_encoding(package['summary'])
def fix(self):
# If capabilities are available, process them
if self.capabilities:
try:
self.backend.processCapabilities(self.capabilities)
except:
# Oops
self.backend.rollback()
raise
# Since this is the bulk of the work, commit
self.backend.commit()
self.backend.processChangeLog(self.changelog_data)
ChannelPackageSubscription.fix(self)
self.backend.lookupSourceRPMs(self.sourceRPMs)
self.backend.lookupPackageGroups(self.groups)
# Postprocess the gathered information
self.__postprocess()
def submit(self):
upload_force = self.uploadForce
if not upload_force and self._update_last_modified:
# # Force it just a little bit - kind of hacky
upload_force = 0.5
try:
self.backend.processPackages(self.batch,
uploadForce=upload_force,
forceVerify=self.forceVerify,
ignoreUploaded=self.ignoreUploaded,
transactional=self.transactional)
self._import_signatures()
except:
# Oops
self.backend.rollback()
raise
self.backend.commit()
if not self._update_last_modified:
# Go though the list of objects and clear out the ones that have a
# force of 0.5
for p in self.batch:
if p.diff and p.diff.level == 0.5:
# Ignore this difference completely
p.diff = None
# Leave p.diff_result in place
def subscribeToChannels(self):
affected_channels = self.backend.subscribeToChannels(self.batch)
# Fill the list of affected channels
self.compute_affected_channels(affected_channels)
name_ids = [pkg['name_id'] for pkg in self.batch]
self.backend.update_newest_package_cache(caller=self.caller,
affected_channels=self.affected_channel_packages, name_ids=name_ids)
taskomatic.add_to_repodata_queue_for_channel_package_subscription(
self.affected_channels, self.batch, self.caller)
self.backend.commit()
def __postprocess(self):
# Gather the IDs we've found
for package in self.batch:
if package.ignored:
# Skip it
continue
# Only deal with packages
self.__postprocessPackage(package)
def __postprocessPackage(self, package):
""" populate the columns foo_id with id numbers from appropriate hashes """
package['package_group'] = self.groups[package['package_group']]
source_rpm = package['source_rpm']
if source_rpm is not None:
source_rpm = self.sourceRPMs[source_rpm]
else:
source_rpm = ''
package['source_rpm_id'] = source_rpm
package['checksum_id'] = self.checksums[(package['checksum_type'], package['checksum'])]
# Postprocess the dependency information
for tag in ('provides', 'requires', 'conflicts', 'obsoletes', 'files', 'recommends', 'suggests', 'supplements', 'enhances', 'breaks', 'predepends'):
for entry in package[tag]:
nv = entry['capability']
entry['capability_id'] = self.capabilities[nv]
for c in package['changelog']:
c['changelog_data_id'] = self.changelog_data[(c['name'], c['time'], c['text'])]
fileList = package['files']
for f in fileList:
f['checksum_id'] = self.checksums[(f['checksum_type'], f['checksum'])]
def _comparePackages(self, package1, package2):
if (package1['checksum_type'] == package2['checksum_type']
and package1['checksum'] == package2['checksum']):
return
# XXX Handle this better
raise Exception("Different packages in the same batch")
def _cleanup_object(self, object):
ChannelPackageSubscription._cleanup_object(self, object)
if object.ignored:
object.id = object.first_package.id
def _import_signatures(self):
for package in self.batch:
# skip missing files and mpm packages
if package['path'] and not isinstance(package, mpmBinaryPackage):
full_path = os.path.join(CFG.MOUNT_POINT, package['path'])
if os.path.exists(full_path):
header = rhn_pkg.get_package_header(filename=full_path)
server_packages.processPackageKeyAssociations(header,
package['checksum_type'], package['checksum'])
def _fix_encoding(self, text):
if text is None:
return None
try:
return text.decode('utf8')
except UnicodeDecodeError:
return text.decode('iso8859-1')
class SourcePackageImport(Import):
def __init__(self, batch, backend, caller=None, update_last_modified=0):
Import.__init__(self, batch, backend)
self._update_last_modified = update_last_modified
self.ignoreUploaded = 1
self.sourceRPMs = {}
self.groups = {}
self.checksums = {}
def preprocess(self):
for package in self.batch:
self._processPackage(package)
def fix(self):
self.backend.lookupSourceRPMs(self.sourceRPMs)
self.backend.lookupPackageGroups(self.groups)
self.backend.lookupChecksums(self.checksums)
self.__postprocess()
# Uniquify the packages
uniqdict = {}
for package in self.batch:
# Unique key
key = (package['org_id'], package['source_rpm_id'])
if key not in uniqdict:
uniqdict[key] = package
continue
else:
self._comparePackages(package, uniqdict[key])
# And invalidate it
package.ignored = 1
package.first_package = uniqdict[key]
def submit(self):
upload_force = self.uploadForce
if not upload_force and self._update_last_modified:
# # Force it just a little bit - kind of hacky
upload_force = 0.5
try:
self.backend.processSourcePackages(self.batch,
uploadForce=upload_force,
forceVerify=self.forceVerify,
ignoreUploaded=self.ignoreUploaded,
transactional=self.transactional)
except:
# Oops
self.backend.rollback()
raise
self.backend.commit()
if not self._update_last_modified:
# Go though the list of objects and clear out the ones that have a
# force of 0.5
for p in self.batch:
if p.diff and p.diff.level == 0.5:
# Ignore this difference completely
p.diff = None
# Leave p.diff_result in place
def _comparePackages(self, package1, package2):
if (package1['checksum_type'] == package2['checksum_type']
and package1['checksum'] == package2['checksum']):
return
# XXX Handle this better
raise Exception("Different packages in the same batch")
def _processPackage(self, package):
Import._processPackage(self, package)
# Fix the arch
package.arch = 'src'
package.source_rpm = package['source_rpm']
sourceRPM = package['source_rpm']
if not sourceRPM:
# Should not happen
raise Exception("Source RPM %s does not exist")
self.sourceRPMs[sourceRPM] = None
self.groups[package['package_group']] = None
checksumTuple = (package['checksum_type'], package['checksum'])
if checksumTuple not in self.checksums:
self.checksums[checksumTuple] = None
sigchecksumTuple = (package['sigchecksum_type'], package['sigchecksum'])
if sigchecksumTuple not in self.checksums:
self.checksums[sigchecksumTuple] = None
def __postprocess(self):
# Gather the IDs we've found
for package in self.batch:
if package.ignored:
# Skip it
continue
# Only deal with packages
self.__postprocessPackage(package)
def __postprocessPackage(self, package):
# Set the ids
package['package_group'] = self.groups[package['package_group']]
package['source_rpm_id'] = self.sourceRPMs[package['source_rpm']]
package['checksum_id'] = self.checksums[(package['checksum_type'],
package['checksum'])]
package['sigchecksum_id'] = self.checksums[(package['sigchecksum_type'],
package['sigchecksum'])]
def _cleanup_object(self, object):
Import._cleanup_object(self, object)
if object.ignored:
object.id = object.first_package.id
def packageImporter(batch, backend, source=0, caller=None):
if source:
return SourcePackageImport(batch, backend, caller=caller)
return PackageImport(batch, backend, caller=caller)
|
gpl-2.0
| -7,587,173,102,851,040,000
| 40.998092
| 156
| 0.577907
| false
| 4.633053
| false
| false
| false
|
geertj/bluepass
|
bluepass/frontends/qt/passwordbutton.py
|
1
|
10929
|
#
# This file is part of Bluepass. Bluepass is Copyright (c) 2012-2013
# Geert Jansen.
#
# Bluepass is free software available under the GNU General Public License,
# version 3. See the file LICENSE distributed with this file for the exact
# licensing terms.
from __future__ import absolute_import, print_function
from PyQt4.QtCore import QTimer, Signal, Slot, Property, Qt, QPoint
from PyQt4.QtGui import (QPushButton, QStylePainter, QStyleOptionButton,
QStyle, QGridLayout, QWidget, QLabel, QSpinBox, QLineEdit, QFrame,
QApplication, QCheckBox, QFontMetrics)
class NoSelectSpinbox(QSpinBox):
"""This is a SpinBox that:
* Will not select the displayed text when the value changes.
* Does not accept keyboard input.
"""
def __init__(self, parent=None):
super(NoSelectSpinbox, self).__init__(parent)
self.setFocusPolicy(Qt.NoFocus)
def stepBy(self, amount):
super(NoSelectSpinbox, self).stepBy(amount)
self.lineEdit().deselect()
class StrengthIndicator(QLabel):
"""A password strength indicator.
This is a label that gives feedback on the strength of a password.
"""
Poor, Good, Excellent = range(3)
stylesheet = """
StrengthIndicator { border: 1px solid black; }
StrengthIndicator[strength="0"] { background-color: #ff2929; }
StrengthIndicator[strength="1"] { background-color: #4dd133; }
StrengthIndicator[strength="2"] { background-color: #4dd133; }
"""
def __init__(self, parent=None):
super(StrengthIndicator, self).__init__(parent)
self._strength = 0
self.setStyleSheet(self.stylesheet)
def getStrength(self):
return self._strength
def setStrength(self, strength):
self._strength = strength
if strength == self.Poor:
self.setText('Poor')
elif strength == self.Good:
self.setText('Good')
elif strength == self.Excellent:
self.setText('Excellent')
self.setStyleSheet(self.stylesheet)
strength = Property(int, getStrength, setStrength)
class PasswordConfiguration(QFrame):
"""Base class for password configuration popups.
A password popup is installed in a GeneratePasswordButton, and allows
the user to customize the parameters of password generation.
"""
def __init__(self, method, parent=None):
super(PasswordConfiguration, self).__init__(parent)
self.method = method
self.parameters = []
parametersChanged = Signal(str, list)
class DicewarePasswordConfiguration(PasswordConfiguration):
"""Configuration for Diceware password generation."""
stylesheet = """
PasswordConfiguration { border: 1px solid grey; }
"""
def __init__(self, parent=None):
super(DicewarePasswordConfiguration, self).__init__('diceware', parent)
self.parameters = [5]
self.addWidgets()
self.setFixedSize(self.sizeHint())
self.setStyleSheet(self.stylesheet)
def addWidgets(self):
grid = QGridLayout()
self.setLayout(grid)
grid.setColumnMinimumWidth(1, 10)
label = QLabel('Length', self)
grid.addWidget(label, 0, 0)
spinbox = NoSelectSpinbox(self)
spinbox.setSuffix(' words')
spinbox.setMinimum(4)
spinbox.setMaximum(8)
grid.addWidget(spinbox, 0, 2)
label = QLabel('Security', self)
grid.addWidget(label, 1, 0)
strength = StrengthIndicator(self)
grid.addWidget(strength, 1, 2)
self.strength = strength
spinbox.valueChanged.connect(self.setParameters)
spinbox.setValue(self.parameters[0])
@Slot(int)
def setParameters(self, words):
self.parameters[0] = words
self.updateStrength()
@Slot()
def updateStrength(self):
backend = QApplication.instance().backend()
strength = backend.password_strength(self.method, *self.parameters)
# We use Diceware only for locking our vaults. Because we know we
# do proper salting and key stretching, we add 20 extra bits.
strength += 20
if strength < 70:
strength = StrengthIndicator.Poor
elif strength < 94:
strength = StrengthIndicator.Good
else:
strength = StrengthIndicator.Excellent
self.strength.setStrength(strength)
class RandomPasswordConfiguration(PasswordConfiguration):
"""Configuration for random password generation."""
stylesheet = """
PasswordConfiguration { border: 1px solid grey; }
"""
def __init__(self, parent=None):
super(RandomPasswordConfiguration, self).__init__('random', parent)
self.parameters = [12, '[a-z][A-Z][0-9]']
self.addWidgets()
self.setFixedSize(self.sizeHint())
self.setStyleSheet(self.stylesheet)
def addWidgets(self):
grid = QGridLayout()
self.setLayout(grid)
grid.setColumnMinimumWidth(1, 10)
label = QLabel('Length', self)
grid.addWidget(label, 0, 0)
spinbox = NoSelectSpinbox(self)
spinbox.setSuffix(' characters')
spinbox.setMinimum(6)
spinbox.setMaximum(20)
grid.addWidget(spinbox, 0, 2, 1, 2)
label = QLabel('Characters')
grid.addWidget(label, 1, 0)
def updateInclude(s):
def stateChanged(state):
self.updateInclude(state, s)
return stateChanged
lower = QCheckBox('Lower')
grid.addWidget(lower, 1, 2)
lower.stateChanged.connect(updateInclude('[a-z]'))
upper = QCheckBox('Upper')
grid.addWidget(upper, 1, 3)
upper.stateChanged.connect(updateInclude('[A-Z]'))
digits = QCheckBox('Digits')
grid.addWidget(digits, 2, 2)
digits.stateChanged.connect(updateInclude('[0-9]'))
special = QCheckBox('Special')
grid.addWidget(special, 2, 3)
special.stateChanged.connect(updateInclude('[!-/]'))
label = QLabel('Security', self)
grid.addWidget(label, 3, 0)
strength = StrengthIndicator(self)
grid.addWidget(strength, 3, 2)
self.strength = strength
spinbox.valueChanged.connect(self.setLength)
spinbox.setValue(self.parameters[0])
lower.setChecked('[a-z]' in self.parameters[1])
upper.setChecked('[A-Z]' in self.parameters[1])
digits.setChecked('[0-9]' in self.parameters[1])
special.setChecked('[!-/]' in self.parameters[1])
@Slot(int)
def setLength(self, length):
self.parameters[0] = length
self.parametersChanged.emit(self.method, self.parameters)
self.updateStrength()
@Slot()
def updateInclude(self, enable, s):
if enable and s not in self.parameters[1]:
self.parameters[1] += s
elif not enable:
self.parameters[1] = self.parameters[1].replace(s, '')
self.parametersChanged.emit(self.method, self.parameters)
self.updateStrength()
@Slot()
def updateStrength(self):
backend = QApplication.instance().backend()
strength = backend.password_strength(self.method, *self.parameters)
# We do not know if the remote site does key stretching or salting.
# So we only give a Good rating if the entropy takes the password
# out of reach of the largest Rainbow tables.
if strength < 60:
strength = StrengthIndicator.Poor
elif strength < 84:
strength = StrengthIndicator.Good
else:
strength = StrengthIndicator.Excellent
self.strength.setStrength(strength)
class PopupButton(QPushButton):
"""A button with a popup.
The popup will be displayed just below the button after the user
keeps the button pressed for 500 msecs.
"""
def __init__(self, text, parent=None):
super(PopupButton, self).__init__(text, parent)
timer = QTimer()
timer.setSingleShot(True)
timer.setInterval(500)
timer.timeout.connect(self.showPopup)
self.timer = timer
self.popup = None
# I would have preferred to implement the menu indicator by overriding
# initStyleOption(), and nothing else, but it doesn't work. The C++
# ::paintEvent() and ::sizeHint() are not able to call into it. So we need
# to provide our own paintEvent() and sizeHint() too.
def initStyleOption(self, option):
super(PopupButton, self).initStyleOption(option)
option.features |= option.HasMenu
def paintEvent(self, event):
p = QStylePainter(self)
opts = QStyleOptionButton()
self.initStyleOption(opts)
p.drawControl(QStyle.CE_PushButton, opts)
def sizeHint(self):
size = super(PopupButton, self).sizeHint()
fm = QFontMetrics(QApplication.instance().font())
width = fm.width(self.text())
opts = QStyleOptionButton()
self.initStyleOption(opts)
style = self.style()
dw = style.pixelMetric(QStyle.PM_MenuButtonIndicator, opts, self)
size.setWidth(width + dw + 10)
return size
def mousePressEvent(self, event):
self.timer.start()
super(PopupButton, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
self.timer.stop()
super(PopupButton, self).mouseReleaseEvent(event)
def setPopup(self, popup):
popup.setParent(None)
popup.setWindowFlags(Qt.Popup)
popup.hide()
# Install a closeEvent() on the popup that raises the button.
def closeEvent(*args):
self.setDown(False)
popup.closeEvent = closeEvent
self.popup = popup
@Slot()
def showPopup(self):
if not self.popup:
return
pos = QPoint(self.width(), self.height())
pos = self.mapToGlobal(pos)
size = self.popup.size()
self.popup.move(pos.x() - size.width(), pos.y())
self.popup.show()
class GeneratePasswordButton(PopupButton):
"""A password generation button.
A password is generated each time the user clicks the button.
"""
def __init__(self, text, popup, parent=None):
super(GeneratePasswordButton, self).__init__(text, parent)
self.method = popup.method
self.parameters = popup.parameters
self.setPopup(popup)
popup.parametersChanged.connect(self.parametersChanged)
self.clicked.connect(self.generate)
@Slot(str, list)
def parametersChanged(self, method, parameters):
self.method = method
self.parameters = parameters
self.generate()
@Slot()
def generate(self):
backend = QApplication.instance().backend()
password = backend.generate_password(self.method, *self.parameters)
self.passwordGenerated.emit(password)
passwordGenerated = Signal(str)
|
gpl-3.0
| -703,691,127,010,999,800
| 33.046729
| 79
| 0.637753
| false
| 4.037311
| true
| false
| false
|
apophys/freeipa
|
ipalib/__init__.py
|
1
|
34622
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Package containing the core library.
=============================
Tutorial for Plugin Authors
=============================
This tutorial will introduce you to writing plugins for freeIPA v2. It does
not cover every detail, but it provides enough to get you started and is
heavily cross-referenced with further documentation that (hopefully) fills
in the missing details.
In addition to this tutorial, the many built-in plugins in `ipalib.plugins`
and `ipaserver.plugins` provide real-life examples of how to write good
plugins.
----------------------------
How this tutorial is written
----------------------------
The code examples in this tutorial are presented as if entered into a Python
interactive interpreter session. As such, when you create a real plugin in
a source file, a few details will be different (in addition to the fact that
you will never include the ``>>>`` nor ``...`` that the interpreter places at
the beginning of each line of code).
The tutorial examples all have this pattern:
::
>>> from ipalib import Command, create_api
>>> api = create_api()
>>> class my_command(Command):
... pass
...
>>> api.add_plugin(my_command)
>>> api.finalize()
In the tutorial we call `create_api()` to create an *example* instance
of `plugable.API` to work with. But a real plugin will simply use
``ipalib.api``, the standard run-time instance of `plugable.API`.
A real plugin will have this pattern:
::
from ipalib import Command, Registry, api
register = Registry()
@register()
class my_command(Command):
pass
As seen above, also note that in a real plugin you will *not* call
`plugable.API.finalize()`. When in doubt, look at some of the built-in
plugins for guidance, like those in `ipalib.plugins`.
If you don't know what the Python *interactive interpreter* is, or are
confused about what this *Python* is in the first place, then you probably
should start with the Python tutorial:
http://docs.python.org/tutorial/index.html
------------------------------------
First steps: A simple command plugin
------------------------------------
Our first example will create the most basic command plugin possible. This
command will be seen in the list of command plugins, but it wont be capable
of actually doing anything yet.
A command plugin simultaneously adds a new command that can be called through
the command-line ``ipa`` script *and* adds a new XML-RPC method... the two are
one in the same, simply invoked in different ways.
A freeIPA plugin is a Python class, and when you create a plugin, you register
this class itself (instead of an instance of the class). To be a command
plugin, your plugin must subclass from `frontend.Command` (or from a subclass
thereof). Here is our first example:
>>> from ipalib import Command, create_api
>>> api = create_api()
>>> class my_command(Command): # Step 1, define class
... """My example plugin."""
...
>>> api.add_plugin(my_command) # Step 2, register class
Notice that we are registering the ``my_command`` class itself, not an
instance of ``my_command``.
Until `plugable.API.finalize()` is called, your plugin class has not been
instantiated nor does the ``Command`` namespace yet exist. For example:
>>> hasattr(api, 'Command')
False
>>> api.finalize() # plugable.API.finalize()
>>> hasattr(api.Command, 'my_command')
True
>>> api.Command.my_command.doc
Gettext('My example plugin.', domain='ipa', localedir=None)
Notice that your plugin instance is accessed through an attribute named
``my_command``, the same name as your plugin class name.
------------------------------
Make your command do something
------------------------------
This simplest way to make your example command plugin do something is to
implement a ``run()`` method, like this:
>>> class my_command(Command):
... """My example plugin with run()."""
...
... def run(self, **options):
... return dict(result='My run() method was called!')
...
>>> api = create_api()
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47') # Call your command
{'result': 'My run() method was called!'}
When `frontend.Command.__call__()` is called, it first validates any arguments
and options your command plugin takes (if any) and then calls its ``run()``
method.
------------------------
Forwarding vs. execution
------------------------
However, unlike the example above, a typical command plugin will implement an
``execute()`` method instead of a ``run()`` method. Your command plugin can
be loaded in two distinct contexts:
1. In a *client* context - Your command plugin is only used to validate
any arguments and options it takes, and then ``self.forward()`` is
called, which forwards the call over XML-RPC to an IPA server where
the actual work is done.
2. In a *server* context - Your same command plugin validates any
arguments and options it takes, and then ``self.execute()`` is called,
which you should implement to perform whatever work your plugin does.
The base `frontend.Command.run()` method simply dispatches the call to
``self.execute()`` if ``self.env.in_server`` is True, or otherwise
dispatches the call to ``self.forward()``.
For example, say you have a command plugin like this:
>>> class my_command(Command):
... """Forwarding vs. execution."""
...
... def forward(self, **options):
... return dict(
... result='forward(): in_server=%r' % self.env.in_server
... )
...
... def execute(self, **options):
... return dict(
... result='execute(): in_server=%r' % self.env.in_server
... )
...
The ``options`` will contain a dict of command options. One option is added
automatically: ``version``. It contains the API version of the client.
In order to maintain forward compatibility, you should always specify the
API version current at the time you're writing your client.
If ``my_command`` is loaded in a *client* context, ``forward()`` will be
called:
>>> api = create_api()
>>> api.env.in_server = False # run() will dispatch to forward()
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47') # Call your command plugin
{'result': 'forward(): in_server=False'}
On the other hand, if ``my_command`` is loaded in a *server* context,
``execute()`` will be called:
>>> api = create_api()
>>> api.env.in_server = True # run() will dispatch to execute()
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47') # Call your command plugin
{'result': 'execute(): in_server=True'}
Normally there should be no reason to override `frontend.Command.forward()`,
but, as above, it can be done for demonstration purposes. In contrast, there
*is* a reason you might want to override `frontend.Command.run()`: if it only
makes sense to execute your command locally, if it should never be forwarded
to the server. In this case, you should implement your *do-stuff* in the
``run()`` method instead of in the ``execute()`` method.
For example, the ``ipa`` command line script has a ``help`` command
(`ipalib.cli.help`) that is specific to the command-line-interface and should
never be forwarded to the server.
---------------
Backend plugins
---------------
There are two types of plugins:
1. *Frontend plugins* - These are loaded in both the *client* and *server*
contexts. These need to be installed with any application built atop
the `ipalib` library. The built-in frontend plugins can be found in
`ipalib.plugins`. The ``my_command`` example above is a frontend
plugin.
2. *Backend plugins* - These are only loaded in a *server* context and
only need to be installed on the IPA server. The built-in backend
plugins can be found in `ipaserver.plugins`.
Backend plugins should provide a set of methods that standardize how IPA
interacts with some external system or library. For example, all interaction
with LDAP is done through the ``ldap`` backend plugin defined in
`ipaserver.plugins.b_ldap`. As a good rule of thumb, anytime you need to
import some package that is not part of the Python standard library, you
should probably interact with that package via a corresponding backend
plugin you implement.
Backend plugins are much more free-form than command plugins. Aside from a
few reserved attribute names, you can define arbitrary public methods on your
backend plugin.
Here is a simple example:
>>> from ipalib import Backend
>>> class my_backend(Backend):
... """My example backend plugin."""
...
... def do_stuff(self):
... """Part of your API."""
... return 'Stuff got done.'
...
>>> api = create_api()
>>> api.add_plugin(my_backend)
>>> api.finalize()
>>> api.Backend.my_backend.do_stuff()
'Stuff got done.'
-------------------------------
How your command should do work
-------------------------------
We now return to our ``my_command`` plugin example.
Plugins are separated into frontend and backend plugins so that there are not
unnecessary dependencies required by an application that only uses `ipalib` and
its built-in frontend plugins (and then forwards over XML-RPC for execution).
But how do we avoid introducing additional dependencies? For example, the
``user_add`` command needs to talk to LDAP to add the user, yet we want to
somehow load the ``user_add`` plugin on client machines without requiring the
``python-ldap`` package (Python bindings to openldap) to be installed. To
answer that, we consult our golden rule:
**The golden rule:** A command plugin should implement its ``execute()``
method strictly via calls to methods on one or more backend plugins.
So the module containing the ``user_add`` command does not itself import the
Python LDAP bindings, only the module containing the ``ldap`` backend plugin
does that, and the backend plugins are only installed on the server. The
``user_add.execute()`` method, which is only called when in a server context,
is implemented as a series of calls to methods on the ``ldap`` backend plugin.
When `plugable.Plugin.__init__()` is called, each plugin stores a reference to
the `plugable.API` instance it has been loaded into. So your plugin can
access the ``my_backend`` plugin as ``self.api.Backend.my_backend``.
Additionally, convenience attributes are set for each namespace, so your
plugin can also access the ``my_backend`` plugin as simply
``self.Backend.my_backend``.
This next example will tie everything together. First we create our backend
plugin:
>>> api = create_api()
>>> api.env.in_server = True # We want to execute, not forward
>>> class my_backend(Backend):
... """My example backend plugin."""
...
... def do_stuff(self):
... """my_command.execute() calls this."""
... return 'my_backend.do_stuff() indeed did do stuff!'
...
>>> api.add_plugin(my_backend)
Second, we have our frontend plugin, the command:
>>> class my_command(Command):
... """My example command plugin."""
...
... def execute(self, **options):
... """Implemented against Backend.my_backend"""
... return dict(result=self.Backend.my_backend.do_stuff())
...
>>> api.add_plugin(my_command)
Lastly, we call ``api.finalize()`` and see what happens when we call
``my_command()``:
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47')
{'result': 'my_backend.do_stuff() indeed did do stuff!'}
When not in a server context, ``my_command.execute()`` never gets called, so
it never tries to access the non-existent backend plugin at
``self.Backend.my_backend.`` To emphasize this point, here is one last
example:
>>> api = create_api()
>>> api.env.in_server = False # We want to forward, not execute
>>> class my_command(Command):
... """My example command plugin."""
...
... def execute(self, **options):
... """Same as above."""
... return dict(result=self.Backend.my_backend.do_stuff())
...
... def forward(self, **options):
... return dict(result='Just my_command.forward() getting called here.')
...
>>> api.add_plugin(my_command)
>>> api.finalize()
Notice that the ``my_backend`` plugin has certainly not be registered:
>>> hasattr(api.Backend, 'my_backend')
False
And yet we can call ``my_command()``:
>>> api.Command.my_command(version=u'2.47')
{'result': 'Just my_command.forward() getting called here.'}
----------------------------------------
Calling other commands from your command
----------------------------------------
It can be useful to have your ``execute()`` method call other command plugins.
Among other things, this allows for meta-commands that conveniently call
several other commands in a single operation. For example:
>>> api = create_api()
>>> api.env.in_server = True # We want to execute, not forward
>>> class meta_command(Command):
... """My meta-command plugin."""
...
... def execute(self, **options):
... """Calls command_1(), command_2()"""
... msg = '%s; %s.' % (
... self.Command.command_1()['result'],
... self.Command.command_2()['result'],
... )
... return dict(result=msg)
>>> class command_1(Command):
... def execute(self, **options):
... return dict(result='command_1.execute() called')
...
>>> class command_2(Command):
... def execute(self, **options):
... return dict(result='command_2.execute() called')
...
>>> api.add_plugin(meta_command)
>>> api.add_plugin(command_1)
>>> api.add_plugin(command_2)
>>> api.finalize()
>>> api.Command.meta_command(version=u'2.47')
{'result': 'command_1.execute() called; command_2.execute() called.'}
Because this is quite useful, we are going to revise our golden rule somewhat:
**The revised golden rule:** A command plugin should implement its
``execute()`` method strictly via what it can access through ``self.api``,
most likely via the backend plugins in ``self.api.Backend`` (which can also
be conveniently accessed as ``self.Backend``).
-----------------------------------------------
Defining arguments and options for your command
-----------------------------------------------
You can define a command that will accept specific arguments and options.
For example:
>>> from ipalib import Str
>>> class nudge(Command):
... """Takes one argument, one option"""
...
... takes_args = ('programmer',)
...
... takes_options = (Str('stuff', default=u'documentation'))
...
... def execute(self, programmer, **kw):
... return dict(
... result='%s, go write more %s!' % (programmer, kw['stuff'])
... )
...
>>> api = create_api()
>>> api.env.in_server = True
>>> api.add_plugin(nudge)
>>> api.finalize()
>>> api.Command.nudge(u'Jason', version=u'2.47')
{'result': u'Jason, go write more documentation!'}
>>> api.Command.nudge(u'Jason', stuff=u'unit tests', version=u'2.47')
{'result': u'Jason, go write more unit tests!'}
The ``args`` and ``options`` attributes are `plugable.NameSpace` instances
containing a command's arguments and options, respectively, as you can see:
>>> list(api.Command.nudge.args) # Iterates through argument names
['programmer']
>>> api.Command.nudge.args.programmer
Str('programmer')
>>> list(api.Command.nudge.options) # Iterates through option names
['stuff', 'version']
>>> api.Command.nudge.options.stuff
Str('stuff', default=u'documentation')
>>> api.Command.nudge.options.stuff.default
u'documentation'
The 'version' option is added to commands automatically.
The arguments and options must not contain colliding names. They are both
merged together into the ``params`` attribute, another `plugable.NameSpace`
instance, as you can see:
>>> api.Command.nudge.params
NameSpace(<3 members>, sort=False)
>>> list(api.Command.nudge.params) # Iterates through the param names
['programmer', 'stuff', 'version']
When calling a command, its positional arguments can also be provided as
keyword arguments, and in any order. For example:
>>> api.Command.nudge(stuff=u'lines of code', programmer=u'Jason', version=u'2.47')
{'result': u'Jason, go write more lines of code!'}
When a command plugin is called, the values supplied for its parameters are
put through a sophisticated processing pipeline that includes steps for
normalization, type conversion, validation, and dynamically constructing
the defaults for missing values. The details wont be covered here; however,
here is a quick teaser:
>>> from ipalib import Int
>>> class create_player(Command):
... takes_options = (
... 'first',
... 'last',
... Str('nick',
... normalizer=lambda value: value.lower(),
... default_from=lambda first, last: first[0] + last,
... ),
... Int('points', default=0),
... )
...
>>> cp = create_player()
>>> cp.finalize()
>>> cp.convert(points=u' 1000 ')
{'points': 1000}
>>> cp.normalize(nick=u'NickName')
{'nick': u'nickname'}
>>> cp.get_default(first=u'Jason', last=u'DeRose')
{'nick': u'jderose', 'points': 0}
For the full details on the parameter system, see the
`frontend.parse_param_spec()` function, and the `frontend.Param` and
`frontend.Command` classes.
---------------------------------------
Allowed return values from your command
---------------------------------------
The return values from your command can be rendered by different user
interfaces (CLI, web-UI); furthermore, a call to your command can be
transparently forwarded over the network (XML-RPC, JSON). As such, the return
values from your command must be usable by the least common denominator.
Your command should return only simple data types and simple data structures,
the kinds that can be represented in an XML-RPC request or in the JSON format.
The return values from your command's ``execute()`` method can include only
the following:
Simple scalar values:
These can be ``str``, ``unicode``, ``int``, and ``float`` instances,
plus the ``True``, ``False``, and ``None`` constants.
Simple compound values:
These can be ``dict``, ``list``, and ``tuple`` instances. These
compound values must contain only the simple scalar values above or
other simple compound values. These compound values can also be empty.
For our purposes here, the ``list`` and ``tuple`` types are equivalent
and can be used interchangeably.
Also note that your ``execute()`` method should not contain any ``print``
statements or otherwise cause any output on ``sys.stdout``. Your command can
(and should) produce log messages by using a module-level logger (see below).
To learn more about XML-RPC (XML Remote Procedure Call), see:
http://docs.python.org/library/xmlrpclib.html
http://en.wikipedia.org/wiki/XML-RPC
To learn more about JSON (Java Script Object Notation), see:
http://docs.python.org/library/json.html
http://www.json.org/
---------------------------------------
How your command should print to stdout
---------------------------------------
As noted above, your command should not print anything while in its
``execute()`` method. So how does your command format its output when
called from the ``ipa`` script?
After the `cli.CLI.run_cmd()` method calls your command, it will call your
command's ``output_for_cli()`` method (if you have implemented one).
If you implement an ``output_for_cli()`` method, it must have the following
signature:
::
output_for_cli(textui, result, *args, **options)
textui
An object implementing methods for outputting to the console.
Currently the `ipalib.cli.textui` plugin is passed, which your method
can also access as ``self.Backend.textui``. However, in case this
changes in the future, your method should use the instance passed to
it in this first argument.
result
This is the return value from calling your command plugin. Depending
upon how your command is implemented, this is probably the return
value from your ``execute()`` method.
args
The arguments your command was called with. If your command takes no
arguments, you can omit this. You can also explicitly list your
arguments rather than using the generic ``*args`` form.
options
The options your command was called with. If your command takes no
options, you can omit this. If your command takes any options, you
must use the ``**options`` form as they will be provided strictly as
keyword arguments.
For example, say we setup a command like this:
>>> class show_items(Command):
...
... takes_args = ('key?',)
...
... takes_options = (Flag('reverse'),)
...
... def execute(self, key, **options):
... items = dict(
... fruit=u'apple',
... pet=u'dog',
... city=u'Berlin',
... )
... if key in items:
... return dict(result=items[key])
... items = [
... (k, items[k]) for k in sorted(items, reverse=options['reverse'])
... ]
... return dict(result=items)
...
... def output_for_cli(self, textui, result, key, **options):
... result = result['result']
... if key is not None:
... textui.print_plain('%s = %r' % (key, result))
... else:
... textui.print_name(self.name)
... textui.print_keyval(result)
... format = '%d items'
... if options['reverse']:
... format += ' (in reverse order)'
... textui.print_count(result, format)
...
>>> api = create_api()
>>> api.bootstrap(in_server=True) # We want to execute, not forward
>>> api.add_plugin(show_items)
>>> api.finalize()
Normally when you invoke the ``ipa`` script, `cli.CLI.load_plugins()` will
register the `cli.textui` backend plugin, but for the sake of our example,
we will just create an instance here:
>>> from ipalib import cli
>>> textui = cli.textui() # We'll pass this to output_for_cli()
Now for what we are concerned with in this example, calling your command
through the ``ipa`` script basically will do the following:
>>> result = api.Command.show_items()
>>> api.Command.show_items.output_for_cli(textui, result, None, reverse=False)
-----------
show-items:
-----------
city = u'Berlin'
fruit = u'apple'
pet = u'dog'
-------
3 items
-------
Similarly, calling it with ``reverse=True`` would result in the following:
>>> result = api.Command.show_items(reverse=True)
>>> api.Command.show_items.output_for_cli(textui, result, None, reverse=True)
-----------
show-items:
-----------
pet = u'dog'
fruit = u'apple'
city = u'Berlin'
--------------------------
3 items (in reverse order)
--------------------------
Lastly, providing a ``key`` would result in the following:
>>> result = api.Command.show_items(u'city')
>>> api.Command.show_items.output_for_cli(textui, result, 'city', reverse=False)
city = u'Berlin'
See the `ipalib.cli.textui` plugin for a description of its methods.
------------------------
Logging from your plugin
------------------------
Plugins should log through a module-level logger.
For example:
>>> import logging
>>> logger = logging.getLogger(__name__)
>>> class paint_house(Command):
...
... takes_args = 'color'
...
... def execute(self, color, **options):
... """Uses logger.error()"""
... if color not in ('red', 'blue', 'green'):
... logger.error("I don't have %s paint!", color) # Log error
... return
... return 'I painted the house %s.' % color
...
Some basic knowledge of the Python ``logging`` module might be helpful. See:
http://docs.python.org/library/logging.html
The important thing to remember is that your plugin should not configure
logging itself, but should instead simply use the module-level logger.
Also see the `plugable.API.bootstrap()` method for details on how the logging
is configured.
---------------------
Environment variables
---------------------
Plugins access configuration variables and run-time information through
``self.api.env`` (or for convenience, ``self.env`` is equivalent). This
attribute is a refences to the `ipalib.config.Env` instance created in
`plugable.API.__init__()`.
After `API.bootstrap()` has been called, the `Env` instance will be populated
with all the environment information used by the built-in plugins.
This will be called before any plugins are registered, so plugin authors can
assume these variables will all exist by the time the module containing their
plugin (or plugins) is imported.
`Env._bootstrap()`, which is called by `API.bootstrap()`, will create several
run-time variables that cannot be overridden in configuration files or through
command-line options. Here is an overview of this run-time information:
============= ============================= =======================
Key Example value Description
============= ============================= =======================
bin '/usr/bin' Dir. containing script
dot_ipa '/home/jderose/.ipa' User config directory
home os.environ['HOME'] User home dir.
ipalib '.../site-packages/ipalib' Dir. of ipalib package
mode 'unit_test' The mode ipalib is in
script sys.argv[0] Path of script
site_packages '.../python2.5/site-packages' Dir. containing ipalib/
============= ============================= =======================
If your plugin requires new environment variables *and* will be included in
the freeIPA built-in plugins, you should add the defaults for your variables
in `ipalib.constants.DEFAULT_CONFIG`. Also, you should consider whether your
new environment variables should have any auto-magic logic to determine their
values if they haven't already been set by the time `config.Env._bootstrap()`,
`config.Env._finalize_core()`, or `config.Env._finalize()` is called.
On the other hand, if your plugin requires new environment variables and will
be installed in a 3rd-party package, your plugin should set these variables
in the module it is defined in.
`config.Env` values work on a first-one-wins basis... after a value has been
set, it can not be overridden with a new value. As any variables can be set
using the command-line ``-e`` global option or set in a configuration file,
your module must check whether a variable has already been set before
setting its default value. For example:
>>> if 'message_of_the_day' not in api.env:
... api.env.message_of_the_day = 'Hello, world!'
...
Your plugin can access any environment variables via ``self.env``.
For example:
>>> class motd(Command):
... """Print message of the day."""
...
... def execute(self, **options):
... return dict(result=self.env.message)
...
>>> api = create_api()
>>> api.bootstrap(in_server=True, message='Hello, world!')
>>> api.add_plugin(motd)
>>> api.finalize()
>>> api.Command.motd(version=u'2.47')
{'result': u'Hello, world!'}
Also see the `plugable.API.bootstrap_with_global_options()` method.
---------------------------------------------
Indispensable ipa script commands and options
---------------------------------------------
The ``console`` command will launch a custom interactive Python interpreter
session. The global environment will have an ``api`` variable, which is the
standard `plugable.API` instance found at ``ipalib.api``. All plugins will
have been loaded (well, except the backend plugins if ``in_server`` is False)
and ``api`` will be fully initialized. To launch the console from within the
top-level directory in the source tree, just run ``ipa console`` from a
terminal, like this:
::
$ ./ipa console
By default, ``in_server`` is False. If you want to start the console in a
server context (so that all the backend plugins are loaded), you can use the
``-e`` option to set the ``in_server`` environment variable, like this:
::
$ ./ipa -e in_server=True console
You can specify multiple environment variables by including the ``-e`` option
multiple times, like this:
::
$ ./ipa -e in_server=True -e mode=dummy console
The space after the ``-e`` is optional. This is equivalent to the above command:
::
$ ./ipa -ein_server=True -emode=dummy console
The ``env`` command will print out the full environment in key=value pairs,
like this:
::
$ ./ipa env
If you use the ``--server`` option, it will forward the call to the server
over XML-RPC and print out what the environment is on the server, like this:
::
$ ./ipa env --server
The ``plugins`` command will show details of all the plugin that are loaded,
like this:
::
$ ./ipa plugins
-----------------------------------
Learning more about freeIPA plugins
-----------------------------------
To learn more about writing freeIPA plugins, you should:
1. Look at some of the built-in plugins, like the frontend plugins in
`ipalib.plugins.f_user` and the backend plugins in
`ipaserver.plugins.b_ldap`.
2. Learn about the base classes for frontend plugins in `ipalib.frontend`.
3. Learn about the core plugin framework in `ipalib.plugable`.
Furthermore, the freeIPA plugin architecture was inspired by the Bazaar plugin
architecture. Although the two are different enough that learning how to
write plugins for Bazaar will not particularly help you write plugins for
freeIPA, some might be interested in the documentation on writing plugins for
Bazaar, available here:
http://bazaar-vcs.org/WritingPlugins
If nothing else, we just want to give credit where credit is deserved!
However, freeIPA does not use any *code* from Bazaar... it merely borrows a
little inspiration.
--------------------------
A note on docstring markup
--------------------------
Lastly, a quick note on markup: All the Python docstrings in freeIPA v2
(including this tutorial) use the *reStructuredText* markup language. For
information on reStructuredText, see:
http://docutils.sourceforge.net/rst.html
For information on using reStructuredText markup with epydoc, see:
http://epydoc.sourceforge.net/manual-othermarkup.html
--------------------------------------------------
Next steps: get involved with freeIPA development!
--------------------------------------------------
The freeIPA team is always interested in feedback and contribution from the
community. To get involved with freeIPA, see the *Contribute* page on
freeIPA.org:
http://freeipa.org/page/Contribute
'''
from ipapython.version import VERSION as __version__
def _enable_warnings(error=False):
"""Enable additional warnings during development
"""
import ctypes
import warnings
# get reference to Py_BytesWarningFlag from Python CAPI
byteswarnings = ctypes.c_int.in_dll( # pylint: disable=no-member
ctypes.pythonapi, 'Py_BytesWarningFlag')
if byteswarnings.value >= 2:
# bytes warnings flag already set to error
return
# default warning mode for all modules: warn once per location
warnings.simplefilter('default', BytesWarning)
if error:
byteswarnings.value = 2
action = 'error'
else:
byteswarnings.value = 1
action = 'default'
module = '(ipa.*|__main__)'
warnings.filterwarnings(action, category=BytesWarning, module=module)
warnings.filterwarnings(action, category=DeprecationWarning,
module=module)
# call this as early as possible
if 'git' in __version__:
_enable_warnings(False)
# noqa: E402
from ipalib import plugable
from ipalib.backend import Backend
from ipalib.frontend import Command, LocalOrRemote, Updater
from ipalib.frontend import Object, Method
from ipalib.crud import Create, Retrieve, Update, Delete, Search
from ipalib.parameters import DefaultFrom, Bool, Flag, Int, Decimal, Bytes, Str, IA5Str, Password, DNParam
from ipalib.parameters import (BytesEnum, StrEnum, IntEnum, AccessTime, File,
DateTime, DNSNameParam)
from ipalib.errors import SkipPluginModule
from ipalib.text import _, ngettext, GettextFactory, NGettextFactory
Registry = plugable.Registry
class API(plugable.API):
bases = (Command, Object, Method, Backend, Updater)
@property
def packages(self):
if self.env.in_server:
# pylint: disable=import-error,ipa-forbidden-import
import ipaserver.plugins
# pylint: enable=import-error,ipa-forbidden-import
result = (
ipaserver.plugins,
)
else:
import ipaclient.remote_plugins
import ipaclient.plugins
result = (
ipaclient.remote_plugins.get_package(self),
ipaclient.plugins,
)
if self.env.context in ('installer', 'updates'):
# pylint: disable=import-error,ipa-forbidden-import
import ipaserver.install.plugins
# pylint: enable=import-error,ipa-forbidden-import
result += (ipaserver.install.plugins,)
return result
def create_api(mode='dummy'):
"""
Return standard `plugable.API` instance.
This standard instance allows plugins that subclass from the following
base classes:
- `frontend.Command`
- `frontend.Object`
- `frontend.Method`
- `backend.Backend`
"""
api = API()
if mode is not None:
api.env.mode = mode
assert mode != 'production'
return api
api = create_api(mode=None)
|
gpl-3.0
| 9,104,041,449,937,767,000
| 34.220753
| 106
| 0.658136
| false
| 3.933871
| false
| false
| false
|
lnls-fac/scripts
|
bin/delete_dups.py
|
1
|
3559
|
#!/usr/bin/python
import subprocess
import sys
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
@staticmethod
def purple(string):
return bcolors.HEADER + string + bcolors.ENDC
@staticmethod
def blue(string):
return bcolors.OKBLUE + string + bcolors.ENDC
@staticmethod
def green(string):
return bcolors.OKGREEN + string + bcolors.ENDC
@staticmethod
def yellow(string):
return bcolors.WARNING + string + bcolors.ENDC
@staticmethod
def red(string):
return bcolors.FAIL + string + bcolors.ENDC
def get_size_str(size):
if size < 1024:
return '{0} bytes'.format(size)
elif size < 1024*1024:
return '{0:.1f} Kb'.format(1.0*size/1024.0)
elif size < 1024*1024*1024:
return '{0:.1f} Mb'.format(1.0*size/1024.0/1024.0)
else:
return '{0:.1f} Gb'.format(1.0*size/1024.0/1024.0/1024.0)
def grab_duplicates(folder, pname):
try:
p = subprocess.Popen(['fdupes', '-r', '-S', folder],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
print(''.join([pname, ': could not run fdupes command! Is package installed?']))
sys.exit()
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
lines = output.split('\n')
sizes, files = [],[]
size = None
for line in lines:
if 'bytes each:' in line:
if size is not None:
sizes.append(size)
files.append(dupfiles)
words = line.split(' ')
size = int(words[0])
dupfiles = []
elif len(line)>0:
dupfiles.append(line)
if size is not None:
sizes.append(size)
files.append(dupfiles)
idx = sorted(range(len(sizes)), key=lambda k: sizes[k]*(len(files[k])-1), reverse = True)
dups = [(sizes[i],files[i]) for i in idx]
return dups
def select_files_simple(dups):
size = 0
for dup in dups:
print(bcolors.yellow('size of each file: ' + get_size_str(dup[0])))
for fname in dup[1]:
print(fname)
size += dup[0] * (len(dup[1]) - 1)
print('')
print(bcolors.yellow('selection has ' + get_size_str(size) + ' of duplicates.'))
def select_files_substring(dups, substring):
size = 0
files = []
for dup in dups:
''' checks how many of duplicate files are selected '''
nr_included = 0
for fname in dup[1]:
if substring in fname:
nr_included += 1
if nr_included == 0:
continue
''' loops over files of duplicates that has at least one selection '''
print(bcolors.yellow('size of each file: ' + get_size_str(dup[0])))
for fname in dup[1]:
if substring in fname:
print(bcolors.blue(fname))
files.append(fname)
size += dup[0]
else:
print(fname)
''' in case all duplicate files are selected warns and exits '''
if nr_included == len(dup[1]):
print('')
print(bcolors.red('selection of all files in duplicate is not allowed!'))
sys.exit()
print('')
''' prints size of selection and returns list '''
print(bcolors.yellow('selection has ' + get_size_str(size) + ' of duplicates.'))
return files
def main():
pname = sys.argv[0]
folder = sys.argv[1]
dups = grab_duplicates(folder, pname)
if len(sys.argv) == 2:
select_files_simple(dups)
elif len(sys.argv) == 3:
substring = sys.argv[2]
substring = substring.strip('"')
files = select_files_substring(dups, substring)
elif (len(sys.argv) == 4) and (sys.argv[3] == 'delete'):
substring = sys.argv[2]
files = select_files_substring(dups, substring)
for fname in files:
os.remove(fname)
main()
|
mit
| 112,618,110,065,499,700
| 23.210884
| 90
| 0.650745
| false
| 2.835857
| false
| false
| false
|
akuendig/RxPython
|
rx/linq/takeLast.py
|
1
|
4061
|
from rx.disposable import CompositeDisposable, SingleAssignmentDisposable
from rx.internal import Struct
from rx.observable import Producer
import rx.linq.sink
from collections import deque
class TakeLastCount(Producer):
def __init__(self, source, count, scheduler):
self.source = source
self.count = count
self.scheduler = scheduler
def run(self, observer, cancel, setSink):
sink = self.Sink(self, observer, cancel)
setSink(sink)
return sink.run()
class Sink(rx.linq.sink.Sink):
def __init__(self, parent, observer, cancel):
super(TakeLastCount.Sink, self).__init__(observer, cancel)
self.parent = parent
self.queue = deque()
def run(self):
self.subscription = SingleAssignmentDisposable()
self.loopDisposable = SingleAssignmentDisposable()
self.subscription.disposable = self.parent.source.subscribeSafe(self)
return CompositeDisposable(self.subscription, self.loopDisposable)
def onNext(self, value):
self.queue.append(value)
if len(self.queue) > self.parent.count:
self.queue.popleft()
def onError(self, exception):
self.observer.onError(exception)
self.dispose()
def onCompleted(self):
self.subscription.dispose()
scheduler = self.parent.scheduler
if scheduler.isLongRunning:
self.loopDisposable.disposable = scheduler.scheduleLongRunning(self.loop)
else:
self.loopDisposable.disposable = scheduler.scheduleRecursive(self.loopRec)
def loopRec(self, recurse):
if len(self.queue) > 0:
self.observer.onNext(self.queue.popleft())
recurse()
else:
self.observer.onCompleted()
self.dispose()
def loop(self, cancel):
while not cancel.isDisposed:
if len(self.queue) == 0:
self.observer.onCompleted()
break
else:
self.observer.onNext(self.queue.popleft())
self.dispose()
class TakeLastTime(Producer):
def __init__(self, source, duration, scheduler):
self.source = source
self.duration = duration
self.scheduler = scheduler
def run(self, observer, cancel, setSink):
sink = self.Sink(self, observer, cancel)
setSink(sink)
return sink.run()
class Sink(rx.linq.sink.Sink):
def __init__(self, parent, observer, cancel):
super(TakeLastTime.Sink, self).__init__(observer, cancel)
self.parent = parent
def run(self):
self.subscription = SingleAssignmentDisposable()
self.loop = SingleAssignmentDisposable()
self.startTime = self.parent.scheduler.now()
self.subscription.disposable = self.parent.source.subscribeSafe(self)
return CompositeDisposable(self.subscription, self.loop)
def elapsed(self):
return self.parent.scheduler.now() - self.startTime
def trim(self, now):
while len(self.queue) > 0:
current = self.queue.popleft()
if current.interval < self.parent.duration:
self.queue.appendleft(current)
break
def onNext(self, value):
now = self.elapsed()
self.queue.append(Struct(value=value,interval=now))
self.trim(now)
def onError(self, exception):
self.observer.onError(exception)
self.dispose()
def onCompleted(self):
self.subscription.dispose()
now = self.elapsed()
self.trim(now)
scheduler = self.parent.scheduler
if scheduler.isLongRunning:
self.loop.disposable = scheduler.scheduleLongRunning(self.loop)
else:
self.loop.disposable = scheduler.scheduleRecursive(self.loopRec)
def loopRec(self, recurse):
if len(self.queue) > 0:
self.observer.onNext(self.queue.popleft().value)
recurse()
else:
self.observer.onCompleted()
self.dispose()
def loop(self, cancel):
while not cancel.isDisposed:
if len(self.queue) == 0:
self.observer.onCompleted()
break
else:
self.observer.onNext(self.queue.popleft().value)
self.dispose()
|
mit
| -6,765,602,060,497,232,000
| 27.013793
| 82
| 0.658951
| false
| 3.871306
| false
| false
| false
|
gem/oq-engine
|
openquake/hazardlib/near_fault.py
|
1
|
18706
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.nearfault` provides methods for near fault
PSHA calculation.
"""
import math
import numpy as np
from openquake.hazardlib.geo import geodetic as geod
import scipy.spatial.distance as dst
def get_xyz_from_ll(projected, reference):
"""
This method computes the x, y and z coordinates of a set of points
provided a reference point
:param projected:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the coordinates of target point to be projected
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the coordinates of the reference point.
:returns:
x
y
z
"""
azims = geod.azimuth(reference.longitude, reference.latitude,
projected.longitude, projected.latitude)
depths = np.subtract(reference.depth, projected.depth)
dists = geod.geodetic_distance(reference.longitude,
reference.latitude,
projected.longitude,
projected.latitude)
return (dists * math.sin(math.radians(azims)),
dists * math.cos(math.radians(azims)),
depths)
def get_plane_equation(p0, p1, p2, reference):
'''
Define the equation of target fault plane passing through 3 given points
which includes two points on the fault trace and one point on the
fault plane but away from the fault trace. Note: in order to remain the
consistency of the fault normal vector direction definition, the order
of the three given points is strickly defined.
:param p0:
The fault trace and is the closer points from the starting point of
fault trace.
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the one vertex of the fault patch.
:param p1:
The fault trace and is the further points from the starting point of
fault trace.
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the one vertex of the fault patch.
:param p2:
The point on the fault plane but away from the fault trace.
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the one vertex of the fault patch.
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the origin of the cartesian system used the represent
objects in a projected reference
:returns:
normal: normal vector of the plane (a,b,c)
dist_to_plane: d in the plane equation, ax + by + cz = d
'''
p0_xyz = get_xyz_from_ll(p0, reference)
p1_xyz = get_xyz_from_ll(p1, reference)
p2_xyz = get_xyz_from_ll(p2, reference)
p0 = np.array(p0_xyz)
p1 = np.array(p1_xyz)
p2 = np.array(p2_xyz)
u = p1 - p0
v = p2 - p0
# vector normal to plane, ax+by+cy = d, normal=(a,b,c)
normal = np.cross(u, v)
# Define the d for the plane equation
dist_to_plane = np.dot(p0, normal)
return normal, dist_to_plane
def projection_pp(site, normal, dist_to_plane, reference):
'''
This method finds the projection of the site onto the plane containing
the slipped area, defined as the Pp(i.e. 'perpendicular projection of
site location onto the fault plane' Spudich et al. (2013) - page 88)
given a site.
:param site:
Location of the site, [lon, lat, dep]
:param normal:
Normal to the plane including the fault patch,
describe by a normal vector[a, b, c]
:param dist_to_plane:
D in the plane equation, ax + by + cz = d
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of project reference point
:returns:
pp, the projection point, [ppx, ppy, ppz], in xyz domain
, a numpy array.
'''
# Transform to xyz coordinate
[site_x, site_y, site_z] = get_xyz_from_ll(site, reference)
a = np.array([(1, 0, 0, -normal[0]),
(0, 1, 0, -normal[1]),
(0, 0, 1, -normal[2]),
(normal[0], normal[1], normal[2], 0)])
b = np.array([site_x, site_y, site_z, dist_to_plane])
x = np.linalg.solve(a, b)
pp = np.array([x[0], x[1], x[2]])
return pp
def vectors2angle(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'.
:param v1:
vector, a numpy array
:param v2:
vector, a numpy array
:returns:
the angle in radians between the two vetors
"""
cosang = np.dot(v1, v2)
sinang = np.linalg.norm(np.cross(v1, v2))
return np.arctan2(sinang, cosang)
def average_s_rad(site, hypocenter, reference, pp,
normal, dist_to_plane, e, p0, p1, delta_slip):
"""
Gets the average S-wave radiation pattern given an e-path as described in:
Spudich et al. (2013) "Final report of the NGA-West2 directivity working
group", PEER report, page 90- 92 and computes: the site to the direct point
distance, rd, and the hypocentral distance, r_hyp.
:param site:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the target site
:param hypocenter:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of hypocenter
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location
of the reference point for coordinate projection within the
calculation. The suggested reference point is Epicentre.
:param pp:
the projection point pp on the patch plane,
a numpy array
:param normal:
normal of the plane, describe by a normal vector[a, b, c]
:param dist_to_plane:
d is the constant term in the plane equation, e.g., ax + by + cz = d
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param p0:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the starting point on fault segment
:param p1:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the ending point on fault segment.
:param delta_slip:
slip direction away from the strike direction, in decimal degrees.
A positive angle is generated by a counter-clockwise rotation.
:returns:
fs, float value of the average S-wave radiation pattern.
rd, float value of the distance from site to the direct point.
r_hyp, float value of the hypocetre distance.
"""
# Obtain the distance of Ps and Pp. If Ps is above the fault plane
# zs is positive, and negative when Ps is below the fault plane
site_xyz = get_xyz_from_ll(site, reference)
zs = dst.pdist([pp, site_xyz])
if site_xyz[0] * normal[0] + site_xyz[1] * normal[1] + site_xyz[2] * \
normal[2] - dist_to_plane > 0:
zs = -zs
# Obtain the distance of Pp and hypocentre
hyp_xyz = get_xyz_from_ll(hypocenter, reference)
hyp_xyz = np.array(hyp_xyz).reshape(1, 3).flatten()
l2 = dst.pdist([pp, hyp_xyz])
rd = ((l2 - e) ** 2 + zs ** 2) ** 0.5
r_hyp = (l2 ** 2 + zs ** 2) ** 0.5
p0_xyz = get_xyz_from_ll(p0, reference)
p1_xyz = get_xyz_from_ll(p1, reference)
u = (np.array(p1_xyz) - np.array(p0_xyz))
v = pp - hyp_xyz
phi = vectors2angle(u, v) - np.deg2rad(delta_slip)
ix = np.cos(phi) * (2 * zs * (l2 / r_hyp - (l2 - e) / rd) -
zs * np.log((l2 + r_hyp) / (l2 - e + rd)))
inn = np.cos(phi) * (-2 * zs ** 2 * (1 / r_hyp - 1 / rd)
- (r_hyp - rd))
iphi = np.sin(phi) * (zs * np.log((l2 + r_hyp) / (l2 - e + rd)))
# Obtain the final average radiation pattern value
fs = (ix ** 2 + inn ** 2 + iphi ** 2) ** 0.5 / e
return fs, rd, r_hyp
def isochone_ratio(e, rd, r_hyp):
"""
Get the isochone ratio as described in Spudich et al. (2013) PEER
report, page 88.
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param rd:
float, distance from the site to the direct point.
:param r_hyp:
float, the hypocentre distance.
:returns:
c_prime, a float defining the isochone ratio
"""
if e == 0.:
c_prime = 0.8
elif e > 0.:
c_prime = 1. / ((1. / 0.8) - ((r_hyp - rd) / e))
return c_prime
def _intersection(seg1_start, seg1_end, seg2_start, seg2_end):
"""
Get the intersection point between two segments. The calculation is in
Catestian coordinate system.
:param seg1_start:
A numpy array,
representing one end point of a segment(e.g. segment1)
segment.
:param seg1_end:
A numpy array,
representing the other end point of the first segment(e.g. segment1)
:param seg2_start:
A numpy array,
representing one end point of the other segment(e.g. segment2)
segment.
:param seg2_end:
A numpy array,
representing the other end point of the second segment(e.g. segment2)
:returns:
p_intersect, :a numpy ndarray.
representing the location of intersection point of the two
given segments
vector1, a numpy array, vector defined by intersection point and
seg2_end
vector2, a numpy array, vector defined by seg2_start and seg2_end
vector3, a numpy array, vector defined by seg1_start and seg1_end
vector4, a numpy array, vector defined by intersection point
and seg1_start
"""
pa = np.array([seg1_start, seg2_start])
pb = np.array([seg1_end, seg2_end])
si = pb - pa
ni = si / np.power(
np.dot(np.sum(si ** 2, axis=1).reshape(2, 1),
np.ones((1, 3))), 0.5)
nx = ni[:, 0].reshape(2, 1)
ny = ni[:, 1].reshape(2, 1)
nz = ni[:, 2].reshape(2, 1)
sxx = np.sum(nx ** 2 - 1)
syy = np.sum(ny ** 2 - 1)
szz = np.sum(nz ** 2 - 1)
sxy = np.sum(nx * ny)
sxz = np.sum(nx * nz)
syz = np.sum(ny * nz)
s = np.array([sxx, sxy, sxz, sxy, syy, syz, sxz, syz,
szz]).reshape(3, 3)
cx = np.sum(pa[:, 0].reshape(2, 1) * (nx ** 2 - 1) +
pa[:, 1].reshape(2, 1) * [nx * ny] +
pa[:, 2].reshape(2, 1) * (nx * nz))
cy = np.sum(pa[:, 0].reshape(2, 1) * [nx * ny] +
pa[:, 1].reshape(2, 1) * [ny ** 2 - 1] +
pa[:, 2].reshape(2, 1) * [ny * nz])
cz = np.sum(pa[:, 0].reshape(2, 1) * [nx * nz] +
pa[:, 1].reshape(2, 1) * [ny * nz] +
pa[:, 2].reshape(2, 1) * [nz ** 2 - 1])
c = np.array([cx, cy, cz]).reshape(3, 1)
p_intersect = np.linalg.solve(s, c)
vector1 = (p_intersect.flatten() - seg2_end) / \
sum((p_intersect.flatten() - seg2_end) ** 2) ** 0.5
vector2 = (seg2_start - seg2_end) / \
sum((seg2_start - seg2_end) ** 2) ** 0.5
vector3 = (seg1_end - seg1_start) / \
sum((seg1_end - seg1_start) ** 2) ** 0.5
vector4 = (p_intersect.flatten() - seg1_start) / \
sum((p_intersect.flatten() - seg1_start) ** 2) ** 0.5
return p_intersect, vector1, vector2, vector3, vector4
def directp(node0, node1, node2, node3, hypocenter, reference, pp):
"""
Get the Direct Point and the corresponding E-path as described in
Spudich et al. (2013). This method also provides a logical variable
stating if the DPP calculation must consider the neighbouring patch.
To define the intersection point(Pd) of PpPh line segment and fault plane,
we obtain the intersection points(Pd) with each side of fault plan, and
check which intersection point(Pd) is the one fitting the definition in
the Chiou and Spudich(2014) directivity model.
Two possible locations for Pd, the first case, Pd locates on the side of
the fault patch when Pp is not inside the fault patch. The second case is
when Pp is inside the fault patch, then Pd=Pp.
For the first case, it follows three conditions:
1. the PpPh and PdPh line vector are the same,
2. PpPh >= PdPh,
3. Pd is not inside the fault patch.
If we can not find solution for all the four possible intersection points
for the first case, we check if the intersection point fit the second case
by checking if Pp is inside the fault patch.
Because of the coordinate system mapping(from geographic system to
Catestian system), we allow an error when we check the location. The allow
error will keep increasing after each loop when no solution in the two
cases are found, until the solution get obtained.
:param node0:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment.
:param node1:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node2:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node3:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param hypocenter:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of floating hypocenter on each segment
calculation. In the method, we take the direction point of the
previous fault patch as hypocentre for the current fault patch.
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of reference point for projection
:param pp:
the projection of the site onto the plane containing the fault
slipped area. A numpy array.
:returns:
Pd, a numpy array, representing the location of direction point
E, the distance from direction point to hypocentre.
go_next_patch, flag indicates if the calculation goes on the next
fault patch. 1: yes, 0: no.
"""
# Find the intersection point Pd, by checking if the PdPh share the
# same vector with PpPh, and PpPh >= PdPh
# Transform to xyz coordinate
node0_xyz = get_xyz_from_ll(node0, reference)
node1_xyz = get_xyz_from_ll(node1, reference)
node2_xyz = get_xyz_from_ll(node2, reference)
node3_xyz = get_xyz_from_ll(node3, reference)
hypocenter_xyz = get_xyz_from_ll(hypocenter, reference)
hypocenter_xyz = np.array(hypocenter_xyz).flatten()
pp_xyz = pp
e = []
# Loop each segments on the patch to find Pd
segment_s = [node0_xyz, node1_xyz, node2_xyz, node3_xyz]
segment_e = [node1_xyz, node2_xyz, node3_xyz, node0_xyz]
# set buffering bu
buf = 0.0001
atol = 0.0001
loop = True
exit_flag = False
looptime = 0.
while loop:
x_min = np.min(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) - buf
x_max = np.max(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) + buf
y_min = np.min(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) - buf
y_max = np.max(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) + buf
n_seg = 0
exit_flag = False
for (seg_s, seg_e) in zip(segment_s, segment_e):
seg_s = np.array(seg_s).flatten()
seg_e = np.array(seg_e).flatten()
p_intersect, vector1, vector2, vector3, vector4 = _intersection(
seg_s, seg_e, pp_xyz, hypocenter_xyz)
ppph = dst.pdist([pp, hypocenter_xyz])
pdph = dst.pdist([p_intersect.flatten(), hypocenter_xyz])
n_seg = n_seg + 1
# Check that the direction of the hyp-pp and hyp-pd vectors
# have are the same.
if (np.allclose(vector1.flatten(), vector2,
atol=atol, rtol=0.)):
if ((np.allclose(vector3.flatten(), vector4, atol=atol,
rtol=0.))):
# Check if ppph >= pdph.
if (ppph >= pdph):
if (p_intersect[0] >= x_min) & (p_intersect[0] <=
x_max):
if (p_intersect[1] >= y_min) & (p_intersect[1]
<= y_max):
e = pdph
pd = p_intersect
exit_flag = True
break
# when the pp located within the fault rupture plane, e = ppph
if not e:
if (pp_xyz[0] >= x_min) & (pp_xyz[0] <= x_max):
if (pp_xyz[1] >= y_min) & (pp_xyz[1] <= y_max):
pd = pp_xyz
e = ppph
exit_flag = True
if exit_flag:
break
if not e:
looptime += 1
atol = 0.0001 * looptime
buf = 0.0001 * looptime
# if pd is located at 2nd fault segment, then the DPP calculation will
# keep going on the next fault patch
if n_seg == 2:
go_next_patch = True
else:
go_next_patch = False
return pd, e, go_next_patch
|
agpl-3.0
| -4,038,285,033,445,288,400
| 37.64876
| 79
| 0.599326
| false
| 3.541462
| false
| false
| false
|
MySportsFeeds/mysportsfeeds-python
|
ohmysportsfeedspy/v1_0.py
|
1
|
7330
|
import os
import csv
import requests
from datetime import datetime
import simplejson as json
import platform
import base64
import ohmysportsfeedspy
# API class for dealing with v1.0 of the API
class API_v1_0(object):
# Constructor
def __init__(self, verbose, store_type=None, store_location=None):
self.base_url = "https://api.mysportsfeeds.com/v1.0/pull"
self.headers = {
'Accept-Encoding': 'gzip',
'User-Agent': 'MySportsFeeds Python/{} ({})'.format(ohmysportsfeedspy.__version__, platform.platform())
}
self.verbose = verbose
self.store_type = store_type
self.store_location = store_location
self.valid_feeds = [
'cumulative_player_stats',
'full_game_schedule',
'daily_game_schedule',
'daily_player_stats',
'game_boxscore',
'scoreboard',
'game_playbyplay',
'player_gamelogs',
'team_gamelogs',
'roster_players',
'game_startinglineup',
'active_players',
'overall_team_standings',
'conference_team_standings',
'division_team_standings',
'playoff_team_standings',
'player_injuries',
'daily_dfs',
'current_season',
'latest_updates',
]
# Verify a feed
def __verify_feed(self, feedName):
is_valid = False
for feed in self.valid_feeds:
if feed == feedName:
is_valid = True
break
return is_valid
# Verify output format
def __verify_format(self, format):
is_valid = True
if format != 'json' and format != 'xml' and format != 'csv':
is_valid = False
return is_valid
# Feed URL
def determine_url(self, league, season, feed, output_format, params):
if feed == "current_season":
return "{base_url}/{league}/{feed}.{output}".format(base_url=self.base_url, feed=feed, league=league, season=season, output=output_format)
else:
return "{base_url}/{league}/{season}/{feed}.{output}".format(base_url=self.base_url, feed=feed, league=league, season=season, output=output_format)
# Generate the appropriate filename for a feed request
def __make_output_filename(self, league, season, feed, output_format, params):
filename = "{feed}-{league}-{season}".format(league=league.lower(),
season=season,
feed=feed)
if "gameid" in params:
filename += "-" + params["gameid"]
if "fordate" in params:
filename += "-" + params["fordate"]
filename += "." + output_format
return filename
# Save a feed response based on the store_type
def __save_feed(self, response, league, season, feed, output_format, params):
# Save to memory regardless of selected method
if output_format == "json":
store_output = response.json()
elif output_format == "xml":
store_output = response.text
elif output_format == "csv":
#store_output = response.content.split('\n')
store_output = response.content.decode('utf-8')
store_output = csv.reader(store_output.splitlines(), delimiter=',')
store_output = list(store_output)
if self.store_type == "file":
if not os.path.isdir(self.store_location):
os.mkdir(self.store_location)
filename = self.__make_output_filename(league, season, feed, output_format, params)
with open(self.store_location + filename, "w") as outfile:
if output_format == "json": # This is JSON
json.dump(store_output, outfile)
elif output_format == "xml": # This is xml
outfile.write(store_output)
elif output_format == "csv": # This is csv
writer = csv.writer(outfile)
for row in store_output:
writer.writerow([row])
else:
raise AssertionError("Could not interpret feed output format")
# Indicate this version does support BASIC auth
def supports_basic_auth(self):
return True
# Establish BASIC auth credentials
def set_auth_credentials(self, username, password):
self.auth = (username, password)
self.headers['Authorization'] = 'Basic ' + base64.b64encode('{}:{}'.format(username,password).encode('utf-8')).decode('ascii')
# Request data (and store it if applicable)
def get_data(self, **kwargs):
if not self.auth:
raise AssertionError("You must authenticate() before making requests.")
# establish defaults for all variables
league = ""
season = ""
feed = ""
output_format = ""
params = {}
# iterate over args and assign vars
for key, value in kwargs.items():
if str(key) == 'league':
league = value
elif str(key) == 'season':
if kwargs['feed'] == 'players':
params['season'] = value
else:
season = value
elif str(key) == 'feed':
feed = value
elif str(key) == 'format':
output_format = value
else:
params[key] = value
# add force=false parameter (helps prevent unnecessary bandwidth use)
if not "force" in params:
params['force'] = 'false'
if self.__verify_feed(feed) == False:
raise ValueError("Unknown feed '" + feed + "'. Known values are: " + str(self.valid_feeds))
if self.__verify_format(output_format) == False:
raise ValueError("Unsupported format '" + output_format + "'.")
url = self.determine_url(league, season, feed, output_format, params)
if self.verbose:
print("Making API request to '{}'.".format(url))
print(" with headers:")
print(self.headers)
print(" and params:")
print(params)
r = requests.get(url, params=params, headers=self.headers)
if r.status_code == 200:
if self.store_type != None:
self.__save_feed(r, league, season, feed, output_format, params)
if output_format == "json":
data = json.loads(r.content)
elif output_format == "xml":
data = str(r.content)
else:
data = r.content.splitlines()
elif r.status_code == 304:
if self.verbose:
print("Data hasn't changed since last call")
filename = self.__make_output_filename(league, season, feed, output_format, params)
with open(self.store_location + filename) as f:
if output_format == "json":
data = json.load(f)
elif output_format == "xml":
data = str(f.readlines()[0])
else:
data = f.read().splitlines()
else:
raise Warning("API call failed with error:", r.status_code)
return data
|
mit
| 4,933,645,844,157,687,000
| 33.413146
| 159
| 0.54543
| false
| 4.276546
| false
| false
| false
|
ua-snap/downscale
|
snap_scripts/epscor_sc/older_epscor_sc_scripts_archive/downscaled_data_to_netcdf_epscor_se.py
|
1
|
9999
|
# convert the downscaled data archive
def run( x ):
''' simple wrapper to open and return a 2-D array from a geotiff '''
import rasterio
return rasterio.open(x).read(1)
def sort_files( files, split_on='_', elem_month=-2, elem_year=-1 ):
'''
sort a list of files properly using the month and year parsed
from the filename. This is useful with SNAP data since the standard
is to name files like '<prefix>_MM_YYYY.tif'. If sorted using base
Pythons sort/sorted functions, things will be sorted by the first char
of the month, which makes thing go 1, 11, ... which sucks for timeseries
this sorts it properly following SNAP standards as the default settings.
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_month = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-2. For SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sorted `list` by month and year ascending.
'''
import pandas as pd
months = [ int(fn.split('.')[0].split( split_on )[elem_month]) for fn in files ]
years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( {'fn':files, 'month':months, 'year':years} )
df_sorted = df.sort_values( ['year', 'month' ] )
return df_sorted.fn.tolist()
def only_years( files, begin=1901, end=2100, split_on='_', elem_year=-1 ):
'''
return new list of filenames where they are truncated to begin:end
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
begin = [int] four digit integer year of the begin time default:1901
end = [int] four digit integer year of the end time default:2100
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sliced `list` to begin and end year.
'''
import pandas as pd
years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( { 'fn':files, 'year':years } )
df_slice = df[ (df.year >= begin ) & (df.year <= end ) ]
return df_slice.fn.tolist()
# seasonal calculations
def coordinates( fn=None, meta=None, numpy_array=None, input_crs=None, to_latlong=False ):
'''
take a raster file as input and return the centroid coords for each
of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
'''
import rasterio
import numpy as np
from affine import Affine
from pyproj import Proj, transform
if fn:
# Read raster
with rasterio.open( fn ) as r:
T0 = r.affine # upper-left pixel corner affine transform
p1 = Proj( r.crs )
A = r.read( 1 ) # pixel values
elif (meta is not None) & (numpy_array is not None):
A = numpy_array
if input_crs != None:
p1 = Proj( input_crs )
T0 = meta[ 'affine' ]
else:
p1 = None
T0 = meta[ 'affine' ]
else:
BaseException( 'check inputs' )
# All rows and columns
cols, rows = np.meshgrid(np.arange(A.shape[1]), np.arange(A.shape[0]))
# Get affine transform for pixel centres
T1 = T0 * Affine.translation( 0.5, 0.5 )
# Function to convert pixel row/column index (from 0) to easting/northing at centre
rc2en = lambda r, c: ( c, r ) * T1
# All eastings and northings (there is probably a faster way to do this)
eastings, northings = np.vectorize(rc2en, otypes=[np.float, np.float])(rows, cols)
if to_latlong == False:
return eastings, northings
elif (to_latlong == True) & (input_crs != None):
# Project all longitudes, latitudes
longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
return longs, lats
else:
BaseException( 'cant reproject to latlong without an input_crs' )
# def cf_attrs( scenario, model, contact='Michael Lindgren - malindgren@alaska.edu', ):
# '''
# generate the cf_metadata convention attributes for the NC file
# CONVENTION SPEC HERE:
# http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html
# '''
# {'institution': 'Scenarios Network for Alaska + Arctic Planning' ,
# 'institute_id': 'SNAP',
# 'experiment_id':scenario,
# 'source':model,
# 'model_id':model,
# 'forcing':,
# 'parent_experiment_id': ,
# 'parent_experiment_rip': ,
# 'branch_time': ,
# 'contact':contact,
# 'references': ,
# 'initialization_method': ,
# 'physics_version': ,
# 'tracking_id': ,
# 'acknowledgements': ,
# 'cesm_casename': ,
# 'cesm_repotag': ,
# 'cesm_compset': ,
# 'resolution': ,
# 'forcing_note': ,
# 'processed_by': ,
# 'processing_code_information': ,
# 'product': ,
# 'experiment': ,
# 'frequency': ,
# 'creation_date': ,
# 'history': ,
# 'Conventions':'CF-1.6' ,
# 'project_id': ,
# 'table_id': ,
# 'title': ,
# 'parent_experiment': ,
# 'modeling_realm': ,
# 'realization': ,
# 'cmor_version': }
def generate_nc( model, variable, scenario, base_path, output_base_path, begin, end ):
'''
main function to output a netcdf file from a group of
GeoTiff files of downscaled SNAP data.
[MORE DOCS TO COME]
'''
# from pathos.multiprocessing import Pool
from multiprocessing import Pool
import numpy as np
import pandas as pd
import os, glob, rasterio, time, itertools
import xarray as xr
print( 'working on: {} {} {}'.format( variable, model, scenario ) )
# set up pathing
input_path = os.path.join( base_path, model, scenario, variable )
output_path = os.path.join( output_base_path, model, scenario, variable )
try: # try:except to overcome some multiprocessing collision issues
if not os.path.exists( output_path ):
os.makedirs( output_path )
except:
pass
# list the data
l = sort_files( glob.glob( os.path.join( input_path, '*.tif' ) ) )
l = only_years( l, begin=begin, end=end )
# open a pool and turn the list of arrays into an ndarray
pool = Pool( ncpus )
arr = np.array( pool.map( run, l ) )
pool.close()
pool.join()
# mask it
arr = np.ma.masked_where( arr <= np.min( arr ), arr )
# [RECENT ADDITION] swap the axes so we are (lat, lon, time)
arr = np.swapaxes( np.swapaxes(arr, 0, 2), 0, 1)
# get the lons and lats for the NetCDF
lons, lats = coordinates( l[0] )
rst = rasterio.open( l[0] )
# THIS IS A TEST AREA FOR PRODUCING THE *_bnds variables -- NOT IMPLEMENTED
# # the res is standard in both directions.
# res = 2000.0
# half_res = 2000.0 / 2
# lon_bnds = [ [i-half_res,i+half_res ] for i in lons.ravel() ]
# # the lat_bnds variable appears to be the same as the above, but it is
# # forced to the extent of the map so the lat_bnds at the top and bottom are
# # different resolution (half) of the remainder of the rectilinear grid cells.
# # this needs to be taken into account in this calculation.
# # MAYBE JUST HOLD IT TO THE EXTENT FOR THESE LATITUDES?
# lat_bnds = [ [i-half_res,i+half_res ] for i in lats.ravel() ]
# lat_mins, lat_max = rst.bounds
# get some time and date stuff
t = time.time()
# OGC WKT for EPSG:3338 which is the CF standard.
crs_wkt = 'PROJCS["NAD83 / Alaska Albers",GEOGCS["NAD83",DATUM["North_American_Datum_1983",\
SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],\
AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],\
AUTHORITY["EPSG","4269"]],PROJECTION["Albers_Conic_Equal_Area"],PARAMETER["standard_parallel_1",55],\
PARAMETER["standard_parallel_2",65],PARAMETER["latitude_of_center",50],PARAMETER["longitude_of_center",-154],\
PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],\
AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3338"]]'
# create the dataset in xarray
ds = xr.Dataset( { variable:(['x','y','time'], arr) },
coords={ 'lon': (['x', 'y'], lons),
'lat': (['x', 'y'], lats),
'time': pd.date_range( str(begin), str(end + 1), freq='M' ) },
attrs={ 'units':'Celcius', 'time_interval':'monthly',
'variable':variable, 'model':model, 'scenario':scenario,
'crs_wkt':crs_wkt,
'creation_date':time.ctime( t ), 'creation_date_UTC':t,
'created by':'Michael Lindgren - malindgren@alaska.edu',
'nodata_value':'-3.39999995e+38',
'cell_resolution':'2000 meters' } )
# write it out to disk
encoding = { variable: { '_FillValue':-3.39999995e+38, 'zlib':True } }
output_filename = os.path.join( output_path, '_'.join([ variable, model, scenario, str( begin ), str( end ) ]) + '.nc' )
ds.to_netcdf( output_filename, mode='w', encoding=encoding )
ds.close() # close it
return output_filename
if __name__ == '__main__':
import os, glob
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="cmip5 model name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-s", "--scenario", action='store', dest='scenario', type=str, help="cmip5 scenario name (exact)" )
args = parser.parse_args()
# setup args
base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_cmip5'
output_base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_cmip5_netcdf'
units = 'C'
time_interval = 'monthly'
ncpus = 32
if args.scenario == 'historical':
begin = 1900
end = 2005
else:
begin = 2006
end = 2100
# main
_ = generate_nc( args.model, args.variable, args.scenario, base_path, output_base_path, begin, end )
|
mit
| 2,371,635,134,616,050,000
| 36.037037
| 137
| 0.667767
| false
| 2.840625
| false
| false
| false
|
apache/airflow
|
tests/plugins/test_plugins_manager.py
|
2
|
14930
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import importlib
import logging
import os
import sys
import tempfile
from unittest import mock
import pytest
from airflow.hooks.base import BaseHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import app as application
from tests.test_utils.config import conf_vars
from tests.test_utils.mock_plugins import mock_plugin_manager
importlib_metadata_string = 'importlib_metadata'
try:
import importlib_metadata
# If importlib_metadata is installed, it takes precedence over built-in importlib.metadata in PY39
# so we should use the default declared above
except ImportError:
try:
import importlib.metadata
# only when we do not have importlib_metadata, the importlib.metadata is actually used
importlib_metadata = 'importlib.metadata'
except ImportError:
raise Exception(
"Either importlib_metadata must be installed or importlib.metadata must be"
" available in system libraries (Python 3.9+). We seem to have neither."
)
ON_LOAD_EXCEPTION_PLUGIN = """
from airflow.plugins_manager import AirflowPlugin
class AirflowTestOnLoadExceptionPlugin(AirflowPlugin):
name = 'preload'
def on_load(self, *args, **kwargs):
raise Exception("oops")
"""
class TestPluginsRBAC:
@pytest.fixture(autouse=True)
def _set_attrs(self, app):
self.app = app
self.appbuilder = app.appbuilder
def test_flaskappbuilder_views(self):
from tests.plugins.test_plugin import v_appbuilder_package
appbuilder_class_name = str(v_appbuilder_package['view'].__class__.__name__)
plugin_views = [
view for view in self.appbuilder.baseviews if view.blueprint.name == appbuilder_class_name
]
assert len(plugin_views) == 1
# view should have a menu item matching category of v_appbuilder_package
links = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == v_appbuilder_package['category']
]
assert len(links) == 1
# menu link should also have a link matching the name of the package.
link = links[0]
assert link.name == v_appbuilder_package['category']
assert link.childs[0].name == v_appbuilder_package['name']
def test_flaskappbuilder_menu_links(self):
from tests.plugins.test_plugin import appbuilder_mitem, appbuilder_mitem_toplevel
# menu item (category) should exist matching appbuilder_mitem.category
categories = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == appbuilder_mitem['category']
]
assert len(categories) == 1
# menu link should be a child in the category
category = categories[0]
assert category.name == appbuilder_mitem['category']
assert category.childs[0].name == appbuilder_mitem['name']
assert category.childs[0].href == appbuilder_mitem['href']
# a top level link isn't nested in a category
top_levels = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == appbuilder_mitem_toplevel['name']
]
assert len(top_levels) == 1
link = top_levels[0]
assert link.href == appbuilder_mitem_toplevel['href']
assert link.label == appbuilder_mitem_toplevel['label']
def test_app_blueprints(self):
from tests.plugins.test_plugin import bp
# Blueprint should be present in the app
assert 'test_plugin' in self.app.blueprints
assert self.app.blueprints['test_plugin'].name == bp.name
def test_flaskappbuilder_nomenu_views():
from tests.plugins.test_plugin import v_nomenu_appbuilder_package
class AirflowNoMenuViewsPlugin(AirflowPlugin):
appbuilder_views = [v_nomenu_appbuilder_package]
appbuilder_class_name = str(v_nomenu_appbuilder_package['view'].__class__.__name__)
with mock_plugin_manager(plugins=[AirflowNoMenuViewsPlugin()]):
appbuilder = application.create_app(testing=True).appbuilder
plugin_views = [view for view in appbuilder.baseviews if view.blueprint.name == appbuilder_class_name]
assert len(plugin_views) == 1
class TestPluginsManager:
def test_no_log_when_no_plugins(self, caplog):
with mock_plugin_manager(plugins=[]):
from airflow import plugins_manager
plugins_manager.ensure_plugins_loaded()
assert caplog.record_tuples == []
def test_should_load_plugins_from_property(self, caplog):
class AirflowTestPropertyPlugin(AirflowPlugin):
name = "test_property_plugin"
@property
def hooks(self):
class TestPropertyHook(BaseHook):
pass
return [TestPropertyHook]
with mock_plugin_manager(plugins=[AirflowTestPropertyPlugin()]):
from airflow import plugins_manager
caplog.set_level(logging.DEBUG, "airflow.plugins_manager")
plugins_manager.ensure_plugins_loaded()
assert 'AirflowTestPropertyPlugin' in str(plugins_manager.plugins)
assert 'TestPropertyHook' in str(plugins_manager.registered_hooks)
assert caplog.records[-1].levelname == 'DEBUG'
assert caplog.records[-1].msg == 'Loading %d plugin(s) took %.2f seconds'
def test_loads_filesystem_plugins(self, caplog):
from airflow import plugins_manager
with mock.patch('airflow.plugins_manager.plugins', []):
plugins_manager.load_plugins_from_plugin_directory()
assert 5 == len(plugins_manager.plugins)
for plugin in plugins_manager.plugins:
if 'AirflowTestOnLoadPlugin' not in str(plugin):
continue
assert 'postload' == plugin.name
break
else:
pytest.fail("Wasn't able to find a registered `AirflowTestOnLoadPlugin`")
assert caplog.record_tuples == []
def test_loads_filesystem_plugins_exception(self, caplog):
from airflow import plugins_manager
with mock.patch('airflow.plugins_manager.plugins', []):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'testplugin.py'), "w") as f:
f.write(ON_LOAD_EXCEPTION_PLUGIN)
with conf_vars({('core', 'plugins_folder'): tmpdir}):
plugins_manager.load_plugins_from_plugin_directory()
assert plugins_manager.plugins == []
received_logs = caplog.text
assert 'Failed to import plugin' in received_logs
assert 'testplugin.py' in received_logs
def test_should_warning_about_incompatible_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
admin_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
menu_links = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == [
(
"airflow.plugins_manager",
logging.WARNING,
"Plugin 'test_admin_views_plugin' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
),
(
"airflow.plugins_manager",
logging.WARNING,
"Plugin 'test_menu_links_plugin' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
),
]
def test_should_not_warning_about_fab_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
appbuilder_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
appbuilder_menu_items = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == []
def test_should_not_warning_about_fab_and_flask_admin_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
admin_views = [mock.MagicMock()]
appbuilder_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
menu_links = [mock.MagicMock()]
appbuilder_menu_items = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == []
def test_entrypoint_plugin_errors_dont_raise_exceptions(self, caplog):
"""
Test that Airflow does not raise an error if there is any Exception because of a plugin.
"""
from airflow.plugins_manager import import_errors, load_entrypoint_plugins
mock_dist = mock.Mock()
mock_entrypoint = mock.Mock()
mock_entrypoint.name = 'test-entrypoint'
mock_entrypoint.group = 'airflow.plugins'
mock_entrypoint.module = 'test.plugins.test_plugins_manager'
mock_entrypoint.load.side_effect = ImportError('my_fake_module not found')
mock_dist.entry_points = [mock_entrypoint]
with mock.patch(
f'{importlib_metadata_string}.distributions', return_value=[mock_dist]
), caplog.at_level(logging.ERROR, logger='airflow.plugins_manager'):
load_entrypoint_plugins()
received_logs = caplog.text
# Assert Traceback is shown too
assert "Traceback (most recent call last):" in received_logs
assert "my_fake_module not found" in received_logs
assert "Failed to import plugin test-entrypoint" in received_logs
assert ("test.plugins.test_plugins_manager", "my_fake_module not found") in import_errors.items()
def test_registering_plugin_macros(self, request):
"""
Tests whether macros that originate from plugins are being registered correctly.
"""
from airflow import macros
from airflow.plugins_manager import integrate_macros_plugins
def cleanup_macros():
"""Reloads the airflow.macros module such that the symbol table is reset after the test."""
# We're explicitly deleting the module from sys.modules and importing it again
# using import_module() as opposed to using importlib.reload() because the latter
# does not undo the changes to the airflow.macros module that are being caused by
# invoking integrate_macros_plugins()
del sys.modules['airflow.macros']
importlib.import_module('airflow.macros')
request.addfinalizer(cleanup_macros)
def custom_macro():
return 'foo'
class MacroPlugin(AirflowPlugin):
name = 'macro_plugin'
macros = [custom_macro]
with mock_plugin_manager(plugins=[MacroPlugin()]):
# Ensure the macros for the plugin have been integrated.
integrate_macros_plugins()
# Test whether the modules have been created as expected.
plugin_macros = importlib.import_module(f"airflow.macros.{MacroPlugin.name}")
for macro in MacroPlugin.macros:
# Verify that the macros added by the plugin are being set correctly
# on the plugin's macro module.
assert hasattr(plugin_macros, macro.__name__)
# Verify that the symbol table in airflow.macros has been updated with an entry for
# this plugin, this is necessary in order to allow the plugin's macros to be used when
# rendering templates.
assert hasattr(macros, MacroPlugin.name)
class TestPluginsDirectorySource:
def test_should_return_correct_path_name(self):
from airflow import plugins_manager
source = plugins_manager.PluginsDirectorySource(__file__)
assert "test_plugins_manager.py" == source.path
assert "$PLUGINS_FOLDER/test_plugins_manager.py" == str(source)
assert "<em>$PLUGINS_FOLDER/</em>test_plugins_manager.py" == source.__html__()
class TestEntryPointSource:
def test_should_return_correct_source_details(self):
from airflow import plugins_manager
mock_entrypoint = mock.Mock()
mock_entrypoint.name = 'test-entrypoint-plugin'
mock_entrypoint.module = 'module_name_plugin'
mock_dist = mock.Mock()
mock_dist.metadata = {'name': 'test-entrypoint-plugin'}
mock_dist.version = '1.0.0'
mock_dist.entry_points = [mock_entrypoint]
with mock.patch(f'{importlib_metadata_string}.distributions', return_value=[mock_dist]):
plugins_manager.load_entrypoint_plugins()
source = plugins_manager.EntryPointSource(mock_entrypoint, mock_dist)
assert str(mock_entrypoint) == source.entrypoint
assert "test-entrypoint-plugin==1.0.0: " + str(mock_entrypoint) == str(source)
assert "<em>test-entrypoint-plugin==1.0.0:</em> " + str(mock_entrypoint) == source.__html__()
|
apache-2.0
| -9,114,536,456,285,706,000
| 37.981723
| 110
| 0.649565
| false
| 4.330046
| true
| false
| false
|
danielholmstrom/nose-pyversion
|
setup.py
|
1
|
1770
|
"""
~~~~~~~~~~~~~~~~~~~~~
Nose-Pyversion-Plugin
~~~~~~~~~~~~~~~~~~~~~
"""
import os
import sys
from setuptools import find_packages, setup
# Required for nose.collector, see http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing
except ImportError:
pass
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
# Requirements for the package
install_requires = [
'nose',
]
# Requirement for running tests
test_requires = install_requires
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='Nose-PyVersion',
version='0.1b1',
description="Nose plugin for excluding files based on python version",
long_description=README,
url='http://github.com/danielholmstrom/nose-pyversion/',
license='MIT',
author='Daniel Holmstrom',
author_email='holmstrom.daniel@gmail.com',
platforms='any',
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: '
'Libraries :: Python Modules'],
py_modules=['nose_pyversion'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=test_requires,
test_suite='nose.collector',
entry_points="""
[nose.plugins]
pyversion=nose_pyversion:PyVersion
""",
**extra
)
|
mit
| -7,525,720,973,531,597,000
| 27.548387
| 78
| 0.596045
| false
| 4.078341
| false
| false
| false
|
egbertbouman/tribler-g
|
Tribler/Tools/dirtrackerseeder.py
|
1
|
6798
|
# Written by Arno Bakker
# see LICENSE.txt for license information
#
# Razvan Deaconescu, 2008:
# * corrected problem when running in background
# * added usage and print_version functions
# * uses getopt for command line argument parsing
import sys
import shutil
import time
import tempfile
import random
import os
import getopt
from traceback import print_exc
from Tribler.__init__ import LIBRARYNAME
from Tribler.Core.API import *
from Tribler.Core.BitTornado.__init__ import version, report_email
MAXUPLOAD = 1000 # KB/s or None
checkpointedwhenseeding = False
sesjun = None
def usage():
print "Usage: python dirseeder.py [options] directory"
print "Options:"
print "\t--port <port>"
print "\t-p <port>\t\tuse <port> to listen for connections"
print "\t\t\t\t(default is random value)"
print "\tdirectory (default is current)"
print "\t--seeder\t\t\tseeder only"
print "\t--version"
print "\t-v\t\t\tprint version and exit"
print "\t--help"
print "\t-h\t\t\tprint this help screen"
print
print "Report bugs to <" + report_email + ">"
def print_version():
print version, "<" + report_email + ">"
def states_callback(dslist):
allseeding = True
for ds in dslist:
state_callback(ds)
if ds.get_status() != DLSTATUS_SEEDING:
allseeding = False
global checkpointedwhenseeding
global sesjun
if len(dslist) > 0 and allseeding and not checkpointedwhenseeding:
checkpointedwhenseeding = True
print >>sys.stderr,"All seeding, checkpointing Session to enable quick restart"
sesjun.checkpoint()
return (1.0, False)
def state_callback(ds):
d = ds.get_download()
# print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)
print >>sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % \
(`d.get_def().get_name()`, \
dlstatus_strings[ds.get_status()], \
ds.get_progress() * 100, \
ds.get_error(), \
ds.get_current_speed(UPLOAD), \
ds.get_current_speed(DOWNLOAD))
return (1.0, False)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hvp:", ["help", "version", "port", "seeder"])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
# init to default values
port = 6969
tracking = True
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-p", "--port"):
port = int(a)
elif o in ("-p", "--port"):
port = int(a)
elif o in ("--seeder"):
tracking = False
elif o in ("-v", "--version"):
print_version()
sys.exit(0)
else:
assert False, "unhandled option"
if len(args) > 1:
print "Too many arguments"
usage()
sys.exit(2)
elif len(args) == 0:
torrentsdir = os.getcwd()
else:
torrentsdir = os.path.abspath(args[0])
print "Press Ctrl-C or send SIGKILL or WM_DESTROY to stop seeding"
# setup session
sscfg = SessionStartupConfig()
statedir = os.path.join(torrentsdir,"."+LIBRARYNAME)
sscfg.set_state_dir(statedir)
sscfg.set_listen_port(port)
sscfg.set_megacache(False)
sscfg.set_overlay(False)
sscfg.set_dialback(False)
if tracking:
sscfg.set_internal_tracker(True)
# log full
logfilename = "tracker-"+str(int(time.time()))+".log"
sscfg.set_tracker_logfile(logfilename)
sscfg.set_tracker_log_nat_checks(True)
s = Session(sscfg)
global sesjun
sesjun = s
s.set_download_states_callback(states_callback, getpeerlist=False)
# Restore previous Session
s.load_checkpoint()
# setup and start downloads
dscfg = DownloadStartupConfig()
dscfg.set_dest_dir(torrentsdir)
# Arno, 2010-04-16: STBSPEED: complete BITFIELDS are processed much faster
dscfg.set_breakup_seed_bitfield(False)
if MAXUPLOAD is not None:
dscfg.set_max_speed(UPLOAD,MAXUPLOAD)
##dscfg.set_max_uploads(32)
#
# Scan dir, until exit by CTRL-C (or any other signal/interrupt)
#
try:
while True:
try:
print >>sys.stderr,"Rescanning",`torrentsdir`
for torrent_file in os.listdir(torrentsdir):
if torrent_file.endswith(".torrent") or torrent_file.endswith(".tstream") or torrent_file.endswith(".url"):
print >>sys.stderr,"Found file",`torrent_file`
tfullfilename = os.path.join(torrentsdir,torrent_file)
if torrent_file.endswith(".url"):
f = open(tfullfilename,"rb")
url = f.read()
f.close()
tdef = TorrentDef.load_from_url(url)
else:
tdef = TorrentDef.load(tfullfilename)
# See if already running:
dlist = s.get_downloads()
existing = False
for d in dlist:
existinfohash = d.get_def().get_infohash()
if existinfohash == tdef.get_infohash():
existing = True
break
if existing:
print >>sys.stderr,"Ignoring existing Download",`tdef.get_name()`
if MAXUPLOAD is not None:
d.set_max_speed(UPLOAD,MAXUPLOAD)
else:
if tracking:
s.add_to_internal_tracker(tdef)
d = s.start_download(tdef, dscfg)
# Checkpoint again when new are seeding
global checkpointedwhenseeding
checkpointedwhenseeding = False
except KeyboardInterrupt,e:
raise e
except Exception, e:
print_exc()
time.sleep(30.0)
except Exception, e:
print_exc()
if __name__ == "__main__":
main()
|
lgpl-2.1
| -6,706,108,623,727,171,000
| 32.333333
| 191
| 0.521624
| false
| 3.891242
| false
| false
| false
|
pstiasny/bigos
|
bigos/__init__.py
|
1
|
1522
|
#/bin/env python2
# encoding: utf8
__version__ = '0.0.3'
import re
import itertools
from bigos.backend import generate_events
watchlist = []
class EventHandler:
def __init__(self, function, regex, dirs=False, types=['created', 'modified']):
'''
:param function: function to run when the event is matched
:param regex: regular expression string to match the
path against
:param dirs: should the handler be run for directory events,
None to run for both dirs and files
:param types: list of types of events to match, or None for
any event
'''
self.f = function
self.regex = re.compile(regex)
self.dirs = dirs
self.types = types
def match(self, ev):
dir_match = self.dirs is None or (ev.is_dir == self.dirs)
types_match = self.types is None or (ev.type in self.types)
return dir_match and types_match and self.regex.match(ev.path)
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
def on(*args, **kwargs):
def decorate(f):
watchlist.append(EventHandler(f, *args, **kwargs))
return f
return decorate
def handle_event(watchlist, ev):
for handler in watchlist:
if handler.match(ev):
handler(ev)
def main(dirpath, watchlist=watchlist):
for ev in itertools.chain.from_iterable(generate_events(dirpath)):
handle_event(watchlist, ev)
|
gpl-3.0
| 4,430,752,800,128,339,500
| 28.843137
| 83
| 0.603811
| false
| 3.902564
| false
| false
| false
|
blooparksystems/odoo
|
addons/account/models/chart_template.py
|
1
|
43914
|
# -*- coding: utf-8 -*-
import time
import math
from openerp.osv import expression
from openerp.tools.float_utils import float_round as round
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.exceptions import AccessError, UserError, ValidationError
import openerp.addons.decimal_precision as dp
from openerp import api, fields, models, _
# ---------------------------------------------------------------
# Account Templates: Account, Tax, Tax Code and chart. + Wizard
# ---------------------------------------------------------------
class AccountAccountTemplate(models.Model):
_name = "account.account.template"
_description = 'Templates for Accounts'
_order = "code"
name = fields.Char(required=True, index=True)
currency_id = fields.Many2one('res.currency', string='Account Currency', help="Forces all moves for this account to have this secondary currency.")
code = fields.Char(size=64, required=True, index=True)
user_type_id = fields.Many2one('account.account.type', string='Type', required=True, oldname='user_type',
help="These types are defined according to your country. The type contains more information "\
"about the account and its specificities.")
reconcile = fields.Boolean(string='Allow Invoices & payments Matching', default=False,
help="Check this option if you want the user to reconcile entries in this account.")
note = fields.Text()
tax_ids = fields.Many2many('account.tax.template', 'account_account_template_tax_rel', 'account_id', 'tax_id', string='Default Taxes')
nocreate = fields.Boolean(string='Optional Create', default=False,
help="If checked, the new chart of accounts will not contain this by default.")
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template',
help="This optional field allow you to link an account template to a specific chart template that may differ from the one its root parent belongs to. This allow you "
"to define chart templates that extend another and complete it with few new accounts (You don't need to define the whole structure that is common to both several times).")
tag_ids = fields.Many2many('account.account.tag', 'account_account_template_account_tag', string='Account tag', help="Optional tags you may want to assign for custom reporting")
@api.multi
@api.depends('name', 'code')
def name_get(self):
res = []
for record in self:
name = record.name
if record.code:
name = record.code + ' ' + name
res.append((record.id, name))
return res
class AccountChartTemplate(models.Model):
_name = "account.chart.template"
_description = "Templates for Account Chart"
name = fields.Char(required=True)
company_id = fields.Many2one('res.company', string='Company')
parent_id = fields.Many2one('account.chart.template', string='Parent Chart Template')
code_digits = fields.Integer(string='# of Digits', required=True, default=6, help="No. of Digits to use for account code")
visible = fields.Boolean(string='Can be Visible?', default=True,
help="Set this to False if you don't want this template to be used actively in the wizard that generate Chart of Accounts from "
"templates, this is useful when you want to generate accounts of this template only when loading its child template.")
currency_id = fields.Many2one('res.currency', string='Currency', required=True)
use_anglo_saxon = fields.Boolean(string="Use Anglo-Saxon accounting", default=False)
complete_tax_set = fields.Boolean(string='Complete Set of Taxes', default=True,
help="This boolean helps you to choose if you want to propose to the user to encode the sale and purchase rates or choose from list "
"of taxes. This last choice assumes that the set of tax defined on this template is complete")
account_ids = fields.One2many('account.account.template', 'chart_template_id', string='Associated Account Templates')
tax_template_ids = fields.One2many('account.tax.template', 'chart_template_id', string='Tax Template List',
help='List of all the taxes that have to be installed by the wizard')
bank_account_code_prefix = fields.Char(string='Prefix of the bank accounts', oldname="bank_account_code_char")
cash_account_code_prefix = fields.Char(string='Prefix of the main cash accounts')
transfer_account_id = fields.Many2one('account.account.template', string='Transfer Account', required=True,
domain=lambda self: [('reconcile', '=', True), ('user_type_id.id', '=', self.env.ref('account.data_account_type_current_assets').id)],
help="Intermediary account used when moving money from a liquidity account to another")
income_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Gain Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
expense_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Loss Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
property_account_receivable_id = fields.Many2one('account.account.template', string='Receivable Account', oldname="property_account_receivable")
property_account_payable_id = fields.Many2one('account.account.template', string='Payable Account', oldname="property_account_payable")
property_account_expense_categ_id = fields.Many2one('account.account.template', string='Category of Expense Account', oldname="property_account_expense_categ")
property_account_income_categ_id = fields.Many2one('account.account.template', string='Category of Income Account', oldname="property_account_income_categ")
property_account_expense_id = fields.Many2one('account.account.template', string='Expense Account on Product Template', oldname="property_account_expense")
property_account_income_id = fields.Many2one('account.account.template', string='Income Account on Product Template', oldname="property_account_income")
property_stock_account_input_categ_id = fields.Many2one('account.account.template', string="Input Account for Stock Valuation", oldname="property_stock_account_input_categ")
property_stock_account_output_categ_id = fields.Many2one('account.account.template', string="Output Account for Stock Valuation", oldname="property_stock_account_output_categ")
property_stock_valuation_account_id = fields.Many2one('account.account.template', string="Account Template for Stock Valuation")
@api.one
def try_loading_for_current_company(self):
self.ensure_one()
company = self.env.user.company_id
# If we don't have any chart of account on this company, install this chart of account
if not company.chart_template_id:
wizard = self.env['wizard.multi.charts.accounts'].create({
'company_id': self.env.user.company_id.id,
'chart_template_id': self.id,
'code_digits': self.code_digits,
'transfer_account_id': self.transfer_account_id.id,
'currency_id': self.currency_id.id,
'bank_account_code_prefix': self.bank_account_code_prefix,
'cash_account_code_prefix': self.cash_account_code_prefix,
})
wizard.onchange_chart_template_id()
wizard.execute()
@api.multi
def open_select_template_wizard(self):
# Add action to open wizard to select between several templates
if not self.company_id.chart_template_id:
todo = self.env['ir.actions.todo']
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_wizard_multi_chart')
if action_rec:
todo.create({'action_id': action_rec.id, 'name': _('Choose Accounting Template'), 'type': 'automatic'})
return True
@api.model
def generate_journals(self, acc_template_ref, company, journals_dict=None):
"""
This method is used for creating journals.
:param chart_temp_id: Chart Template Id.
:param acc_template_ref: Account templates reference.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
JournalObj = self.env['account.journal']
for vals_journal in self._prepare_all_journals(acc_template_ref, company, journals_dict=journals_dict):
journal = JournalObj.create(vals_journal)
if vals_journal['type'] == 'general' and vals_journal['code'] == _('EXCH'):
company.write({'currency_exchange_journal_id': journal.id})
return True
@api.multi
def _prepare_all_journals(self, acc_template_ref, company, journals_dict=None):
def _get_default_account(journal_vals, type='debit'):
# Get the default accounts
default_account = False
if journal['type'] == 'sale':
default_account = acc_template_ref.get(self.property_account_income_categ_id.id)
elif journal['type'] == 'purchase':
default_account = acc_template_ref.get(self.property_account_expense_categ_id.id)
elif journal['type'] == 'general' and journal['code'] == _('EXCH'):
if type=='credit':
default_account = acc_template_ref.get(self.income_currency_exchange_account_id.id)
else:
default_account = acc_template_ref.get(self.expense_currency_exchange_account_id.id)
return default_account
journals = [{'name': _('Customer Invoices'), 'type': 'sale', 'code': _('INV'), 'favorite': True, 'sequence': 5},
{'name': _('Vendor Bills'), 'type': 'purchase', 'code': _('BILL'), 'favorite': True, 'sequence': 6},
{'name': _('Miscellaneous Operations'), 'type': 'general', 'code': _('MISC'), 'favorite': False, 'sequence': 7},
{'name': _('Exchange Difference'), 'type': 'general', 'code': _('EXCH'), 'favorite': False, 'sequence': 9},]
if journals_dict != None:
journals.extend(journals_dict)
self.ensure_one()
journal_data = []
for journal in journals:
vals = {
'type': journal['type'],
'name': journal['name'],
'code': journal['code'],
'company_id': company.id,
'default_credit_account_id': _get_default_account(journal, 'credit'),
'default_debit_account_id': _get_default_account(journal, 'debit'),
'refund_sequence': True,
'show_on_dashboard': journal['favorite'],
}
journal_data.append(vals)
return journal_data
@api.multi
def generate_properties(self, acc_template_ref, company):
"""
This method used for creating properties.
:param self: chart templates for which we need to create properties
:param acc_template_ref: Mapping between ids of account templates and real accounts created from them
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
self.ensure_one()
PropertyObj = self.env['ir.property']
todo_list = [
('property_account_receivable_id', 'res.partner', 'account.account'),
('property_account_payable_id', 'res.partner', 'account.account'),
('property_account_expense_categ_id', 'product.category', 'account.account'),
('property_account_income_categ_id', 'product.category', 'account.account'),
('property_account_expense_id', 'product.template', 'account.account'),
('property_account_income_id', 'product.template', 'account.account'),
]
for record in todo_list:
account = getattr(self, record[0])
value = account and 'account.account,' + str(acc_template_ref[account.id]) or False
if value:
field = self.env['ir.model.fields'].search([('name', '=', record[0]), ('model', '=', record[1]), ('relation', '=', record[2])], limit=1)
vals = {
'name': record[0],
'company_id': company.id,
'fields_id': field.id,
'value': value,
}
properties = PropertyObj.search([('name', '=', record[0]), ('company_id', '=', company.id)])
if properties:
#the property exist: modify it
properties.write(vals)
else:
#create the property
PropertyObj.create(vals)
stock_properties = [
'property_stock_account_input_categ_id',
'property_stock_account_output_categ_id',
'property_stock_valuation_account_id',
]
for stock_property in stock_properties:
account = getattr(self, stock_property)
value = account and acc_template_ref[account.id] or False
if value:
company.write({stock_property: value})
return True
@api.multi
def _install_template(self, company, code_digits=None, transfer_account_id=None, obj_wizard=None, acc_ref=None, taxes_ref=None):
""" Recursively load the template objects and create the real objects from them.
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param transfer_account_id: reference to the account template that will be used as intermediary account for transfers between 2 liquidity accounts
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if acc_ref is None:
acc_ref = {}
if taxes_ref is None:
taxes_ref = {}
if self.parent_id:
tmp1, tmp2 = self.parent_id._install_template(company, code_digits=code_digits, transfer_account_id=transfer_account_id, acc_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tmp1, tmp2 = self._load_template(company, code_digits=code_digits, transfer_account_id=transfer_account_id, account_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
return acc_ref, taxes_ref
@api.multi
def _load_template(self, company, code_digits=None, transfer_account_id=None, account_ref=None, taxes_ref=None):
""" Generate all the objects from the templates
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param transfer_account_id: reference to the account template that will be used as intermediary account for transfers between 2 liquidity accounts
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if not code_digits:
code_digits = self.code_digits
if not transfer_account_id:
transfer_account_id = self.transfer_account_id
AccountTaxObj = self.env['account.tax']
# Generate taxes from templates.
generated_tax_res = self.tax_template_ids._generate_tax(company)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = self.generate_account(taxes_ref, account_ref, code_digits, company)
account_ref.update(account_template_ref)
# writing account values after creation of accounts
company.transfer_account_id = account_template_ref[transfer_account_id.id]
for key, value in generated_tax_res['account_dict'].items():
if value['refund_account_id'] or value['account_id']:
AccountTaxObj.browse(key).write({
'refund_account_id': account_ref.get(value['refund_account_id'], False),
'account_id': account_ref.get(value['account_id'], False),
})
# Create Journals
self.generate_journals(account_ref, company)
# generate properties function
self.generate_properties(account_ref, company)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
self.generate_fiscal_position(taxes_ref, account_ref, company)
return account_ref, taxes_ref
@api.multi
def generate_account(self, tax_template_ref, acc_template_ref, code_digits, company):
""" This method for generating accounts from templates.
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:param acc_template_ref: dictionary with the mappping between the account templates and the real accounts.
:param code_digits: number of digits got from wizard.multi.charts.accounts, this is use for account code.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: return acc_template_ref for reference purpose.
:rtype: dict
"""
self.ensure_one()
account_tmpl_obj = self.env['account.account.template']
acc_template = account_tmpl_obj.search([('nocreate', '!=', True), ('chart_template_id', '=', self.id)], order='id')
for account_template in acc_template:
tax_ids = []
for tax in account_template.tax_ids:
tax_ids.append(tax_template_ref[tax.id])
code_main = account_template.code and len(account_template.code) or 0
code_acc = account_template.code or ''
if code_main > 0 and code_main <= code_digits:
code_acc = str(code_acc) + (str('0'*(code_digits-code_main)))
vals = {
'name': account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': code_acc,
'user_type_id': account_template.user_type_id and account_template.user_type_id.id or False,
'reconcile': account_template.reconcile,
'note': account_template.note,
'tax_ids': [(6, 0, tax_ids)],
'company_id': company.id,
'tag_ids': [(6, 0, [t.id for t in account_template.tag_ids])],
}
new_account = self.env['account.account'].create(vals)
acc_template_ref[account_template.id] = new_account.id
return acc_template_ref
@api.multi
def generate_fiscal_position(self, tax_template_ref, acc_template_ref, company):
""" This method generate Fiscal Position, Fiscal Position Accounts and Fiscal Position Taxes from templates.
:param chart_temp_id: Chart Template Id.
:param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax.
:param acc_template_ref: Account templates reference for generating account.fiscal.position.account.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
self.ensure_one()
positions = self.env['account.fiscal.position.template'].search([('chart_template_id', '=', self.id)])
for position in positions:
new_fp = self.env['account.fiscal.position'].create({'company_id': company.id, 'name': position.name, 'note': position.note})
for tax in position.tax_ids:
self.env['account.fiscal.position.tax'].create({
'tax_src_id': tax_template_ref[tax.tax_src_id.id],
'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False,
'position_id': new_fp.id
})
for acc in position.account_ids:
self.env['account.fiscal.position.account'].create({
'account_src_id': acc_template_ref[acc.account_src_id.id],
'account_dest_id': acc_template_ref[acc.account_dest_id.id],
'position_id': new_fp.id
})
return True
class AccountTaxTemplate(models.Model):
_name = 'account.tax.template'
_description = 'Templates for Taxes'
_order = 'id'
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
name = fields.Char(string='Tax Name', required=True)
type_tax_use = fields.Selection([('sale', 'Sales'), ('purchase', 'Purchases'), ('none', 'None')], string='Tax Scope', required=True, default="sale",
help="Determines where the tax is selectable. Note : 'None' means a tax can't be used by itself, however it can still be used in a group.")
amount_type = fields.Selection(default='percent', string="Tax Computation", required=True,
selection=[('group', 'Group of Taxes'), ('fixed', 'Fixed'), ('percent', 'Percentage of Price'), ('division', 'Percentage of Price Tax Included')])
active = fields.Boolean(default=True, help="Set active to false to hide the tax without removing it.")
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id)
children_tax_ids = fields.Many2many('account.tax.template', 'account_tax_template_filiation_rel', 'parent_tax', 'child_tax', string='Children Taxes')
sequence = fields.Integer(required=True, default=1,
help="The sequence field is used to define order in which the tax lines are applied.")
amount = fields.Float(required=True, digits=(16, 4))
account_id = fields.Many2one('account.account.template', string='Tax Account', ondelete='restrict',
help="Account that will be set on invoice tax lines for invoices. Leave empty to use the expense account.", oldname='account_collected_id')
refund_account_id = fields.Many2one('account.account.template', string='Tax Account on Refunds', ondelete='restrict',
help="Account that will be set on invoice tax lines for refunds. Leave empty to use the expense account.", oldname='account_paid_id')
description = fields.Char(string='Display on Invoices')
price_include = fields.Boolean(string='Included in Price', default=False,
help="Check this if the price you use on the product and invoices includes this tax.")
include_base_amount = fields.Boolean(string='Affect Subsequent Taxes', default=False,
help="If set, taxes which are computed after this one will be computed based on the price tax included.")
analytic = fields.Boolean(string="Analytic Cost", help="If set, the amount computed by this tax will be assigned to the same analytic account as the invoice line (if any)")
tag_ids = fields.Many2many('account.account.tag', string='Account tag', help="Optional tags you may want to assign for custom reporting")
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id, type_tax_use)', 'Tax names must be unique !'),
]
@api.multi
@api.depends('name', 'description')
def name_get(self):
res = []
for record in self:
name = record.description and record.description or record.name
res.append((record.id, name))
return res
def _get_tax_vals(self, company):
""" This method generates a dictionnary of all the values for the tax that will be created.
"""
self.ensure_one()
return {
'name': self.name,
'type_tax_use': self.type_tax_use,
'amount_type': self.amount_type,
'active': self.active,
'company_id': company.id,
'sequence': self.sequence,
'amount': self.amount,
'description': self.description,
'price_include': self.price_include,
'include_base_amount': self.include_base_amount,
'analytic': self.analytic,
'tag_ids': [(6, 0, [t.id for t in self.tag_ids])],
}
@api.multi
def _generate_tax(self, company):
""" This method generate taxes from templates.
:param company: the company for which the taxes should be created from templates in self
:returns: {
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
todo_dict = {}
tax_template_to_tax = {}
for tax in self:
# Compute children tax ids
children_ids = []
for child_tax in tax.children_tax_ids:
if tax_template_to_tax.get(child_tax.id):
children_ids.append(tax_template_to_tax[child_tax.id])
vals_tax = tax._get_tax_vals(company)
vals_tax['children_tax_ids'] = children_ids and [(6, 0, children_ids)] or []
new_tax = self.env['account.tax'].create(vals_tax)
tax_template_to_tax[tax.id] = new_tax.id
# Since the accounts have not been created yet, we have to wait before filling these fields
todo_dict[new_tax.id] = {
'account_id': tax.account_id.id,
'refund_account_id': tax.refund_account_id.id,
}
return {
'tax_template_to_tax': tax_template_to_tax,
'account_dict': todo_dict
}
# Fiscal Position Templates
class AccountFiscalPositionTemplate(models.Model):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
name = fields.Char(string='Fiscal Position Template', required=True)
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
account_ids = fields.One2many('account.fiscal.position.account.template', 'position_id', string='Account Mapping')
tax_ids = fields.One2many('account.fiscal.position.tax.template', 'position_id', string='Tax Mapping')
note = fields.Text(string='Notes')
class AccountFiscalPositionTaxTemplate(models.Model):
_name = 'account.fiscal.position.tax.template'
_description = 'Template Tax Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Position', required=True, ondelete='cascade')
tax_src_id = fields.Many2one('account.tax.template', string='Tax Source', required=True)
tax_dest_id = fields.Many2one('account.tax.template', string='Replacement Tax')
class AccountFiscalPositionAccountTemplate(models.Model):
_name = 'account.fiscal.position.account.template'
_description = 'Template Account Fiscal Mapping'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Mapping', required=True, ondelete='cascade')
account_src_id = fields.Many2one('account.account.template', string='Account Source', required=True)
account_dest_id = fields.Many2one('account.account.template', string='Account Destination', required=True)
# ---------------------------------------------------------
# Account generation from template wizards
# ---------------------------------------------------------
class WizardMultiChartsAccounts(models.TransientModel):
"""
Create a new account chart for a company.
Wizards ask for:
* a company
* an account chart template
* a number of digits for formatting code of non-view accounts
* a list of bank accounts owned by the company
Then, the wizard:
* generates all accounts from the template and assigns them to the right company
* generates all taxes and tax codes, changing account assignations
* generates all accounting properties and assigns them correctly
"""
_name = 'wizard.multi.charts.accounts'
_inherit = 'res.config'
company_id = fields.Many2one('res.company', string='Company', required=True)
currency_id = fields.Many2one('res.currency', string='Currency', help="Currency as per company's country.", required=True)
only_one_chart_template = fields.Boolean(string='Only One Chart Template Available')
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
bank_account_ids = fields.One2many('account.bank.accounts.wizard', 'bank_account_id', string='Cash and Banks', required=True, oldname="bank_accounts_id")
bank_account_code_prefix = fields.Char('Bank Accounts Prefix', oldname="bank_account_code_char")
cash_account_code_prefix = fields.Char('Cash Accounts Prefix')
code_digits = fields.Integer(string='# of Digits', required=True, help="No. of Digits to use for account code")
sale_tax_id = fields.Many2one('account.tax.template', string='Default Sales Tax', oldname="sale_tax")
purchase_tax_id = fields.Many2one('account.tax.template', string='Default Purchase Tax', oldname="purchase_tax")
sale_tax_rate = fields.Float(string='Sales Tax(%)')
use_anglo_saxon = fields.Boolean(string='Use Anglo-Saxon Accounting', related='chart_template_id.use_anglo_saxon')
transfer_account_id = fields.Many2one('account.account.template', required=True, string='Transfer Account',
domain=lambda self: [('reconcile', '=', True), ('user_type_id.id', '=', self.env.ref('account.data_account_type_current_assets').id)],
help="Intermediary account used when moving money from a liquidity account to another")
purchase_tax_rate = fields.Float(string='Purchase Tax(%)')
complete_tax_set = fields.Boolean('Complete Set of Taxes',
help="This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use "
"the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete")
@api.model
def _get_chart_parent_ids(self, chart_template):
""" Returns the IDs of all ancestor charts, including the chart itself.
(inverse of child_of operator)
:param browse_record chart_template: the account.chart.template record
:return: the IDS of all ancestor charts, including the chart itself.
"""
result = [chart_template.id]
while chart_template.parent_id:
chart_template = chart_template.parent_id
result.append(chart_template.id)
return result
@api.onchange('sale_tax_rate')
def onchange_tax_rate(self):
self.purchase_tax_rate = self.sale_tax_rate or False
@api.onchange('chart_template_id')
def onchange_chart_template_id(self):
res = {}
tax_templ_obj = self.env['account.tax.template']
if self.chart_template_id:
currency_id = self.chart_template_id.currency_id and self.chart_template_id.currency_id.id or self.env.user.company_id.currency_id.id
self.complete_tax_set = self.chart_template_id.complete_tax_set
self.currency_id = currency_id
if self.chart_template_id.complete_tax_set:
# default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account
chart_ids = self._get_chart_parent_ids(self.chart_template_id)
base_tax_domain = [('chart_template_id', 'in', chart_ids)]
sale_tax_domain = base_tax_domain + [('type_tax_use', '=', 'sale')]
purchase_tax_domain = base_tax_domain + [('type_tax_use', '=', 'purchase')]
sale_tax = tax_templ_obj.search(sale_tax_domain, order="sequence, id desc", limit=1)
purchase_tax = tax_templ_obj.search(purchase_tax_domain, order="sequence, id desc", limit=1)
self.sale_tax_id = sale_tax.id
self.purchase_tax_id = purchase_tax.id
res.setdefault('domain', {})
res['domain']['sale_tax_id'] = repr(sale_tax_domain)
res['domain']['purchase_tax_id'] = repr(purchase_tax_domain)
if self.chart_template_id.transfer_account_id:
self.transfer_account_id = self.chart_template_id.transfer_account_id.id
if self.chart_template_id.code_digits:
self.code_digits = self.chart_template_id.code_digits
if self.chart_template_id.bank_account_code_prefix:
self.bank_account_code_prefix = self.chart_template_id.bank_account_code_prefix
if self.chart_template_id.cash_account_code_prefix:
self.cash_account_code_prefix = self.chart_template_id.cash_account_code_prefix
return res
@api.model
def default_get(self, fields):
context = self._context or {}
res = super(WizardMultiChartsAccounts, self).default_get(fields)
tax_templ_obj = self.env['account.tax.template']
account_chart_template = self.env['account.chart.template']
if 'bank_account_ids' in fields:
res.update({'bank_account_ids': [{'acc_name': _('Cash'), 'account_type': 'cash'}, {'acc_name': _('Bank'), 'account_type': 'bank'}]})
if 'company_id' in fields:
res.update({'company_id': self.env.user.company_id.id})
if 'currency_id' in fields:
company_id = res.get('company_id') or False
if company_id:
company = self.env['res.company'].browse(company_id)
currency_id = company.on_change_country(company.country_id.id)['value']['currency_id']
res.update({'currency_id': currency_id})
chart_templates = account_chart_template.search([('visible', '=', True)])
if chart_templates:
#in order to set default chart which was last created set max of ids.
chart_id = max(chart_templates.ids)
if context.get("default_charts"):
model_data = self.env['ir.model.data'].search_read([('model', '=', 'account.chart.template'), ('module', '=', context.get("default_charts"))], ['res_id'])
if model_data:
chart_id = model_data[0]['res_id']
chart = account_chart_template.browse(chart_id)
chart_hierarchy_ids = self._get_chart_parent_ids(chart)
if 'chart_template_id' in fields:
res.update({'only_one_chart_template': len(chart_templates) == 1,
'chart_template_id': chart_id})
if 'sale_tax_id' in fields:
sale_tax = tax_templ_obj.search([('chart_template_id', 'in', chart_hierarchy_ids),
('type_tax_use', '=', 'sale')], limit=1, order='sequence')
res.update({'sale_tax_id': sale_tax and sale_tax.id or False})
if 'purchase_tax_id' in fields:
purchase_tax = tax_templ_obj.search([('chart_template_id', 'in', chart_hierarchy_ids),
('type_tax_use', '=', 'purchase')], limit=1, order='sequence')
res.update({'purchase_tax_id': purchase_tax and purchase_tax.id or False})
res.update({
'purchase_tax_rate': 15.0,
'sale_tax_rate': 15.0,
})
return res
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
context = self._context or {}
res = super(WizardMultiChartsAccounts, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=False)
cmp_select = []
CompanyObj = self.env['res.company']
companies = CompanyObj.search([])
#display in the widget selection of companies, only the companies that haven't been configured yet (but don't care about the demo chart of accounts)
self._cr.execute("SELECT company_id FROM account_account WHERE deprecated = 'f' AND name != 'Chart For Automated Tests' AND name NOT LIKE '%(test)'")
configured_cmp = [r[0] for r in self._cr.fetchall()]
unconfigured_cmp = list(set(companies.ids) - set(configured_cmp))
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id', 'in', unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in CompanyObj.browse(unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
@api.one
def _create_tax_templates_from_rates(self, company_id):
'''
This function checks if the chosen chart template is configured as containing a full set of taxes, and if
it's not the case, it creates the templates for account.tax object accordingly to the provided sale/purchase rates.
Then it saves the new tax templates as default taxes to use for this chart template.
:param company_id: id of the company for wich the wizard is running
:return: True
'''
obj_tax_temp = self.env['account.tax.template']
all_parents = self._get_chart_parent_ids(self.chart_template_id)
# create tax templates from purchase_tax_rate and sale_tax_rate fields
if not self.chart_template_id.complete_tax_set:
value = self.sale_tax_rate
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'sale'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': value, 'name': _('Tax %.2f%%') % value})
value = self.purchase_tax_rate
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'purchase'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': value, 'name': _('Purchase Tax %.2f%%') % value})
return True
@api.multi
def execute(self):
'''
This function is called at the confirmation of the wizard to generate the COA from the templates. It will read
all the provided information to create the accounts, the banks, the journals, the taxes, the
accounting properties... accordingly for the chosen company.
'''
if not self.env.user._is_admin():
raise AccessError(_("Only administrators can change the settings"))
ir_values_obj = self.env['ir.values']
company = self.company_id
self.company_id.write({'currency_id': self.currency_id.id,
'accounts_code_digits': self.code_digits,
'anglo_saxon_accounting': self.use_anglo_saxon,
'bank_account_code_prefix': self.bank_account_code_prefix,
'cash_account_code_prefix': self.cash_account_code_prefix,
'chart_template_id': self.chart_template_id.id})
#set the coa currency to active
self.currency_id.write({'active': True})
# When we install the CoA of first company, set the currency to price types and pricelists
if company.id == 1:
for reference in ['product.list_price', 'product.standard_price', 'product.list0']:
try:
tmp2 = self.env.ref(reference).write({'currency_id': self.currency_id.id})
except ValueError:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(company.id)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref = self.chart_template_id._install_template(company, code_digits=self.code_digits, transfer_account_id=self.transfer_account_id)
# write values of default taxes for product as super user
if self.sale_tax_id and taxes_ref:
ir_values_obj.sudo().set_default('product.template', "taxes_id", [taxes_ref[self.sale_tax_id.id]], for_all_users=True, company_id=company.id)
if self.purchase_tax_id and taxes_ref:
ir_values_obj.sudo().set_default('product.template', "supplier_taxes_id", [taxes_ref[self.purchase_tax_id.id]], for_all_users=True, company_id=company.id)
# Create Bank journals
self._create_bank_journals_from_o2m(company, acc_template_ref)
# Create the current year earning account (outside of the CoA)
self.env['account.account'].create({
'code': '9999',
'name': _('Undistributed Profits/Losses'),
'user_type_id': self.env.ref("account.data_unaffected_earnings").id,
'company_id': company.id,})
return {}
@api.multi
def _create_bank_journals_from_o2m(self, company, acc_template_ref):
'''
This function creates bank journals and its accounts for each line encoded in the field bank_account_ids of the
wizard (which is currently only used to create a default bank and cash journal when the CoA is installed).
:param company: the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
'''
self.ensure_one()
# Create the journals that will trigger the account.account creation
for acc in self.bank_account_ids:
self.env['account.journal'].create({
'name': acc.acc_name,
'type': acc.account_type,
'company_id': company.id,
'currency_id': acc.currency_id.id,
})
class AccountBankAccountsWizard(models.TransientModel):
_name = 'account.bank.accounts.wizard'
acc_name = fields.Char(string='Account Name.', required=True)
bank_account_id = fields.Many2one('wizard.multi.charts.accounts', string='Bank Account', required=True, ondelete='cascade')
currency_id = fields.Many2one('res.currency', string='Account Currency',
help="Forces all moves for this account to have this secondary currency.")
account_type = fields.Selection([('cash', 'Cash'), ('bank', 'Bank')])
|
gpl-3.0
| -9,035,051,368,661,658,000
| 56.554391
| 193
| 0.633488
| false
| 4.046627
| false
| false
| false
|
truthcoin/truthcoin-cpp
|
contrib/linearize/linearize-hashes.py
|
1
|
3041
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class TruthcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = TruthcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
mit
| 8,576,335,789,278,602,000
| 25.911504
| 90
| 0.664255
| false
| 2.961052
| false
| false
| false
|
theJollySin/python_for_scientists
|
classes/14_netcdf/extract_cell_from_3d_ioapi.py
|
1
|
1291
|
import sys
from netCDF4 import Dataset
def main():
print(extract_cell_from_3d_ioapi(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), layer=0, var=None))
def extract_cell_from_3d_ioapi(file_path, row, col, layer=0, var=None):
'''Extract a single grid cell from a GRIDDED IOAPI NetCDF file.
If you don't provide a layer, we'll assume you want the ground layer.
If you don't provide a variable name, we'll assume you want all of them.
This will return a dictionary of each variable's values across the time dimension.
'''
# opening file as read-only
root = Dataset(file_path, 'r', format='NETCDF3_CLASSIC')
# find the variable names (remove TFLAG)
keys = root.variables.keys()
keys.remove('TFLAG')
if var is not None:
# if variable name is provided, and exists in the file
if var not in keys:
raise Exception('The variable ' + str(variable) + ' does not exist.')
else:
return {var: root.variables[var][:, layer, row, col]}
else:
# if variable name is not provided, return a dictionary of all variables
results = {}
for key in keys:
results[key] = root.variables[key][:, layer, row, col]
return results
if __name__ == "__main__":
main()
|
gpl-3.0
| -4,119,864,125,831,360,000
| 32.102564
| 105
| 0.633617
| false
| 3.678063
| false
| false
| false
|
daodewang/qingcloud-sdk-python
|
qingcloud/iaas/router_static.py
|
1
|
8191
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import json
from qingcloud.iaas.errors import InvalidRouterStatic
class RouterStaticFactory(object):
TYPE_PORT_FORWARDING = 1
TYPE_VPN = 2
TYPE_TUNNEL = 4
TYPE_FILTERING = 5
PPTP_DEFAULT_CONNS = 100
@classmethod
def create(cls, static_type, router_static_id='', **kw):
""" Create router static.
"""
if static_type not in STATIC_MAPPER:
raise InvalidRouterStatic('invalid static type[%s]' % static_type)
clazz = STATIC_MAPPER[static_type]
kw = clazz.extract(kw)
inst = clazz(**kw)
inst.router_static_id = router_static_id
return inst
@classmethod
def create_from_string(cls, string):
""" Create router static from json formatted string.
"""
data = json.loads(string)
if isinstance(data, dict):
return cls.create(**data)
if isinstance(data, list):
return [cls.create(**item) for item in data]
class _RouterStatic(object):
""" _RouterStatic is used to define static rule in router.
"""
router_static_id = None
static_type = None
def __repr__(self):
return '<%s>%s' % (self.__class__.__name__, self.to_json())
@staticmethod
def extract(kw):
raise NotImplementedError
def extra_props(self):
raise NotImplementedError
def to_json(self):
props = {
'router_static_id': self.router_static_id,
'static_type': self.static_type,
}
props.update(self.extra_props())
return props
class _StaticForPortForwarding(_RouterStatic):
static_type = RouterStaticFactory.TYPE_PORT_FORWARDING
def __init__(self, src_port, dst_ip, dst_port, protocol='tcp',
router_static_name='', **kw):
super(_StaticForPortForwarding, self).__init__()
self.router_static_name = router_static_name
self.src_port = src_port
self.dst_ip = dst_ip
self.dst_port = dst_port
self.protocol = protocol
@staticmethod
def extract(kw):
if 'val1' in kw:
kw['src_port'] = kw.pop('val1')
if 'val2' in kw:
kw['dst_ip'] = kw.pop('val2')
if 'val3' in kw:
kw['dst_port'] = kw.pop('val3')
if 'val4' in kw:
kw['protocol'] = kw.pop('val4')
return kw
def extra_props(self):
return {
'router_static_name': self.router_static_name,
'val1': self.src_port,
'val2': self.dst_ip,
'val3': self.dst_port,
'val4': self.protocol,
}
class _StaticForVPN(_RouterStatic):
class OpenVPN(object):
def __init__(self, ip_network, serv_port='1194', serv_protocol='udp',
**kw):
self.serv_port = serv_port
self.serv_protocol = serv_protocol
self.ip_network = ip_network
def extra_props(self):
return {
'val1': 'openvpn',
'val2': self.serv_port,
'val3': self.serv_protocol,
'val4': self.ip_network,
}
class PPTP(object):
def __init__(self, usr, pwd, ip_network,
max_conn_cnt=RouterStaticFactory.PPTP_DEFAULT_CONNS, **kw):
self.usr = usr
self.pwd = pwd
self.max_conn_cnt = max_conn_cnt
self.ip_network = ip_network
def extra_props(self):
return {
'val1': 'pptp',
'val2': '%s:%s' % (self.usr, self.pwd),
'val3': self.max_conn_cnt,
'val4': self.ip_network,
}
static_type = RouterStaticFactory.TYPE_VPN
def __init__(self, vpn_type='', **kw):
super(_StaticForVPN, self).__init__()
vpn_type = vpn_type or kw.get('val1')
if vpn_type == 'openvpn':
self.inst = _StaticForVPN.OpenVPN(**kw)
elif vpn_type == 'pptp':
self.inst = _StaticForVPN.PPTP(**kw)
else:
raise InvalidRouterStatic('unsupported vpn type[%s]' % vpn_type)
@staticmethod
def extract(kw):
vpn_type = kw.get('val1')
if vpn_type == 'openvpn':
if 'val2' in kw:
kw['serv_port'] = kw.pop('val2')
if 'val3' in kw:
kw['serv_protocol'] = kw.pop('val3')
if 'val4' in kw:
kw['ip_network'] = kw.pop('val4')
elif vpn_type == 'pptp':
if 'entry_set' in kw:
entry_set = kw['entry_set']
kw['usr'] = entry_set[0]['val1']
kw['pwd'] = ''
if 'val3' in kw:
kw['max_conn_cnt'] = kw.pop('val3')
if 'val4' in kw:
kw['ip_network'] = kw.pop('val4')
return kw
def extra_props(self):
return self.inst.extra_props()
class _StaticForTunnel(_RouterStatic):
static_type = RouterStaticFactory.TYPE_TUNNEL
def __init__(self, vxnet_id, tunnel_entries, **kw):
"""
@param tunnel_entries: [(tunnel_type, ip_network, key), ...]
"""
super(_StaticForTunnel, self).__init__()
self.vxnet_id = vxnet_id
self.tunnel_entries = tunnel_entries
@staticmethod
def extract(kw):
if 'val1' in kw:
kw['tunnel_entries'] = [tuple(entry.split('|')) for entry in kw.pop('val1').split(';')]
return kw
def extra_props(self):
return {
'vxnet_id': self.vxnet_id,
'val1': ';'.join('%s|%s|%s' % entry for entry in self.tunnel_entries),
}
class _StaticForFiltering(_RouterStatic):
static_type = RouterStaticFactory.TYPE_FILTERING
def __init__(self, router_static_name='', src_ip='', src_port='',
dst_ip='', dst_port='', priority='1', action='', **kw):
super(_StaticForFiltering, self).__init__()
self.router_static_name = router_static_name
self.src_ip = src_ip
self.src_port = src_port
self.dst_ip = dst_ip
self.dst_port = dst_port
self.priority = priority
self.action = action
@staticmethod
def extract(kw):
if 'val1' in kw:
kw['src_ip'] = kw.pop('val1')
if 'val2' in kw:
kw['src_port'] = kw.pop('val2')
if 'val3' in kw:
kw['dst_ip'] = kw.pop('val3')
if 'val4' in kw:
kw['dst_port'] = kw.pop('val4')
if 'val5' in kw:
kw['priority'] = kw.pop('val5')
if 'val6' in kw:
kw['action'] = kw.pop('val6')
return kw
def extra_props(self):
return {
'router_static_name': self.router_static_name,
'val1': self.src_ip,
'val2': self.src_port,
'val3': self.dst_ip,
'val4': self.dst_port,
'val5': self.priority,
'val6': self.action,
}
STATIC_MAPPER = {
RouterStaticFactory.TYPE_PORT_FORWARDING: _StaticForPortForwarding,
RouterStaticFactory.TYPE_VPN: _StaticForVPN,
RouterStaticFactory.TYPE_TUNNEL: _StaticForTunnel,
RouterStaticFactory.TYPE_FILTERING: _StaticForFiltering,
}
|
apache-2.0
| 8,290,995,983,507,668,000
| 30.503846
| 99
| 0.521182
| false
| 3.790375
| false
| false
| false
|
matplotlib/freetypy
|
docstrings/truetype.py
|
1
|
5852
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
TT_PLATFORM = """
Platform identifier codes.
- `APPLE_UNICODE`: Used by Apple to indicate a Unicode character map
and/or name entry. See `TT_APPLE_ID` for corresponding ‘encoding_id’
values. Note that name entries in this format are coded as
big-endian UCS-2 character codes only.
- `MACINTOSH`: Used by Apple to indicate a MacOS-specific charmap
and/or name entry. See `TT_MAC_ID` for corresponding ‘encoding_id’
values. Note that most TrueType fonts contain an Apple roman charmap
to be usable on MacOS systems (even if they contain a Microsoft
charmap as well).
- `ISO`: This value was used to specify ISO/IEC 10646 charmaps. It is
however now deprecated. See `TT_ISO_ID` for a list of
corresponding ‘encoding_id’ values.
- `MICROSOFT`: Used by Microsoft to indicate Windows-specific
charmaps. See `TT_MS_ID` for a list of corresponding ‘encoding_id’
values. Note that most fonts contain a Unicode charmap using
(`TT_PLATFORM.MICROSOFT`, `TT_MS_ID.UNICODE_CS`).
- `CUSTOM`: Used to indicate application-specific charmaps.
- `ADOBE`: This value isn't part of any font format specification, but
is used by FreeType to report Adobe-specific charmaps in an
`CharMap` object. See `TT_ADOBE_ID`.
"""
TT_APPLE_ID = """
Apple-specific encoding values.
- `DEFAULT`: Unicode version 1.0.
- `UNICODE_1_1`: Unicode 1.1; specifies Hangul characters starting at
U+34xx.
- `ISO_10646`: Deprecated (identical to preceding).
- `UNICODE_2_0`: Unicode 2.0 and beyond (UTF-16 BMP only).
- `UNICODE_32`: Unicode 3.1 and beyond, using UTF-32.
- `VARIANT_SELECTOR`: From Adobe, not Apple. Not a normal
cmap. Specifies variations on a real cmap.
"""
TT_ADOBE_ID = """
Adobe-specific encoding values.
- `STANDARD`: Adobe standard encoding.
- `EXPERT`: Adobe expert encoding.
- `CUSTOM`: Adobe custom encoding.
- `LATIN_1`: Adobe Latin 1 encoding.
"""
TT_ISO_ID = """
Standard ISO encodings.
- `ISO_7BIT_ASCII`: ASCII.
- `ISO_10646`: ISO/10646.
- `ISO_8859_1`: Also known as Latin-1.
"""
TT_MAC_ID = """
Macintosh-specific encoding values.
- `ROMAN`
- `JAPANESE`
- `TRADITIONAL_CHINESE`
- `KOREAN`
- `ARABIC`
- `HEBREW`
- `GREEK`
- `RUSSIAN`
- `RSYMBOL`
- `DEVANAGARI`
- `GURMUKHI`
- `GUJARATI`
- `ORIYA`
- `BENGALI`
- `TAMIL`
- `TELUGU`
- `KANNADA`
- `MALAYALAM`
- `SINHALESE`
- `BURMESE`
- `KHMER`
- `THAI`
- `LAOTIAN`
- `GEORGIAN`
- `ARMENIAN`
- `MALDIVIAN`
- `SIMPLIFIED_CHINESE`
- `TIBETAN`
- `MONGOLIAN`
- `GEEZ`
- `SLAVIC`
- `VIETNAMESE`
- `SINDHI`
- `UNINTERP`
"""
TT_MAC_LANGID = """
Language identifier.
Used in the name records of the TTF "name" table if the "platform"
identifier code is `TT_PLATFORM.MACINTOSH`.
"""
TT_MS_ID = """
Microsoft-specific encoding values.
- `SYMBOL_CS`: Corresponds to Microsoft symbol encoding. See
`ENCODING.MS_SYMBOL`.
- `UNICODE_CS`: Corresponds to a Microsoft WGL4 charmap, matching
Unicode. See `ENCODING.UNICODE`.
- `SJIS`: Corresponds to SJIS Japanese encoding. See `ENCODING.SJIS`.
- `GB2312`: Corresponds to Simplified Chinese as used in Mainland
China. See `ENCODING.GB2312`.
- `BIG_5`: Corresponds to Traditional Chinese as used in Taiwan and
Hong Kong. See `ENCODING.BIG5`.
- `WANSUNG`: Corresponds to Korean Wansung encoding. See
`ENCODING.WANSUNG`.
- `JOHAB`: Corresponds to Johab encoding. See `ENCODING.JOHAB`.
- `UCS_4`: Corresponds to UCS-4 or UTF-32 charmaps. This has been
added to the OpenType specification version 1.4 (mid-2001.)
"""
TT_MS_LANGID = """
Language identifier.
Used in in the name records of the TTF "name" table if the "platform"
identifier code is `TT_PLATFORM.MICROSOFT`.
"""
TT_NAME_ID = """
The type of value stored in a `SfntName` record.
- `COPYRIGHT`
- `FONT_FAMILY`
- `FONT_SUBFAMILY`
- `UNIQUE_ID`
- `FULL_NAME`
- `VERSION_STRING`
- `PS_NAME`
- `TRADEMARK`
The following values are from the OpenType spec:
- `MANUFACTURER`
- `DESIGNER`
- `DESCRIPTION`
- `VENDOR_URL`
- `DESIGNER_URL`
- `LICENSE`
- `LICENSE_URL`
- `PREFERRED_FAMILY`
- `PREFERRED_SUBFAMILY`
- `MAC_FULL_NAME`
- `SAMPLE_TEXT`
This is new in OpenType 1.3:
- `CID_FINDFONT_NAME`
This is new in OpenType 1.5:
- `WWS_FAMILY`
- `WWS_SUBFAMILY`
"""
|
bsd-2-clause
| -7,203,823,751,697,514,000
| 25.527273
| 72
| 0.72207
| false
| 3.071579
| false
| false
| false
|
foursquare/pants
|
contrib/errorprone/src/python/pants/contrib/errorprone/tasks/errorprone.py
|
1
|
7607
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
from builtins import str
from pants.backend.jvm import argfile
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.revision import Revision
from pants.base.workunit import WorkUnitLabel
from pants.java.jar.jar_dependency import JarDependency
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
from pants.util.strutil import safe_shlex_split
class ErrorProne(NailgunTask):
"""Check Java code for Error Prone violations. See http://errorprone.info/ for more details."""
_ERRORPRONE_MAIN = 'com.google.errorprone.ErrorProneCompiler'
_JAVA_SOURCE_EXTENSION = '.java'
@classmethod
def register_options(cls, register):
super(ErrorProne, cls).register_options(register)
register('--skip', type=bool, help='Skip Error Prone.')
register('--transitive', default=False, type=bool,
help='Run Error Prone against transitive dependencies of targets '
'specified on the command line.')
register('--command-line-options', type=list, default=[], fingerprint=True,
help='Command line options passed to Error Prone')
register('--exclude-patterns', type=list, default=[], fingerprint=True,
help='Patterns for targets to be excluded from analysis.')
cls.register_jvm_tool(register,
'errorprone',
classpath=[
JarDependency(org='com.google.errorprone',
name='error_prone_core',
rev='2.3.1'),
],
main=cls._ERRORPRONE_MAIN,
custom_rules=[
Shader.exclude_package('com.google.errorprone', recursive=True)
]
)
# The javac version should be kept in sync with the version used by errorprone above.
cls.register_jvm_tool(register,
'errorprone-javac',
classpath=[
JarDependency(org='com.google.errorprone',
name='javac',
rev='9+181-r4173-1'),
])
@classmethod
def prepare(cls, options, round_manager):
super(ErrorProne, cls).prepare(options, round_manager)
round_manager.require_data('runtime_classpath')
@memoized_property
def _exclude_patterns(self):
return [re.compile(x) for x in set(self.get_options().exclude_patterns or [])]
def _is_errorprone_target(self, target):
if not target.has_sources(self._JAVA_SOURCE_EXTENSION):
self.context.log.debug('Skipping [{}] because it has no {} sources'.format(target.address.spec, self._JAVA_SOURCE_EXTENSION))
return False
if target.is_synthetic:
self.context.log.debug('Skipping [{}] because it is a synthetic target'.format(target.address.spec))
return False
for pattern in self._exclude_patterns:
if pattern.search(target.address.spec):
self.context.log.debug(
"Skipping [{}] because it matches exclude pattern '{}'".format(target.address.spec, pattern.pattern))
return False
return True
@property
def cache_target_dirs(self):
return True
def execute(self):
if self.get_options().skip:
return
if self.get_options().transitive:
targets = self.context.targets(self._is_errorprone_target)
else:
targets = [t for t in self.context.target_roots if self._is_errorprone_target(t)]
targets = list(set(targets))
target_count = 0
errorprone_failed = False
with self.invalidated(targets, invalidate_dependents=True) as invalidation_check:
total_targets = len(invalidation_check.invalid_vts)
for vt in invalidation_check.invalid_vts:
target_count += 1
self.context.log.info('[{}/{}] {}'.format(
str(target_count).rjust(len(str(total_targets))),
total_targets,
vt.target.address.spec))
result = self.errorprone(vt.target)
if result != 0:
errorprone_failed = True
if self.get_options().fail_fast:
break
else:
vt.update()
if errorprone_failed:
raise TaskError('ErrorProne checks failed')
def calculate_sources(self, target):
return {source for source in target.sources_relative_to_buildroot()
if source.endswith(self._JAVA_SOURCE_EXTENSION)}
def errorprone(self, target):
runtime_classpaths = self.context.products.get_data('runtime_classpath')
runtime_classpath = [jar for conf, jar in runtime_classpaths.get_for_targets(target.closure(bfs=True))]
output_dir = os.path.join(self.workdir, target.id)
safe_mkdir(output_dir)
runtime_classpath.append(output_dir)
# Try to run errorprone with the same java version as the target
# The minimum JDK for errorprone is JDK 1.8
min_jdk_version = max(target.platform.target_level, Revision.lenient('1.8'))
if min_jdk_version.components[0] == 1:
max_jdk_version = Revision(min_jdk_version.components[0], min_jdk_version.components[1], '9999')
else:
max_jdk_version = Revision(min_jdk_version.components[0], '9999')
self.set_distribution(minimum_version=min_jdk_version, maximum_version=max_jdk_version, jdk=True)
jvm_options = self.get_options().jvm_options[:]
if self.dist.version < Revision.lenient('9'):
# For Java 8 we need to add the errorprone javac jar to the bootclasspath to
# avoid the "java.lang.NoSuchFieldError: ANNOTATION_PROCESSOR_MODULE_PATH" error
# See https://github.com/google/error-prone/issues/653 for more information
jvm_options.extend(['-Xbootclasspath/p:{}'.format(self.tool_classpath('errorprone-javac')[0])])
args = [
'-d', output_dir,
]
# Errorprone does not recognize source or target 10 yet
if target.platform.source_level < Revision.lenient('10'):
args.extend(['-source', str(target.platform.source_level)])
if target.platform.target_level < Revision.lenient('10'):
args.extend(['-target', str(target.platform.target_level)])
errorprone_classpath_file = os.path.join(self.workdir, '{}.classpath'.format(os.path.basename(output_dir)))
with open(errorprone_classpath_file, 'w') as f:
f.write('-classpath ')
f.write(':'.join(runtime_classpath))
args.append('@{}'.format(errorprone_classpath_file))
for opt in self.get_options().command_line_options:
args.extend(safe_shlex_split(opt))
with argfile.safe_args(self.calculate_sources(target), self.get_options()) as batched_sources:
args.extend(batched_sources)
result = self.runjava(classpath=self.tool_classpath('errorprone'),
main=self._ERRORPRONE_MAIN,
jvm_options=jvm_options,
args=args,
workunit_name='errorprone',
workunit_labels=[WorkUnitLabel.LINT])
self.context.log.debug('java {main} ... exited with result ({result})'.format(
main=self._ERRORPRONE_MAIN, result=result))
return result
|
apache-2.0
| 2,359,390,993,532,608,000
| 40.342391
| 131
| 0.640331
| false
| 3.91508
| false
| false
| false
|
ace02000/pyload
|
module/plugins/accounts/Keep2ShareCc.py
|
1
|
2362
|
# -*- coding: utf-8 -*-
import re
import time
from module.plugins.internal.Account import Account
from module.plugins.internal.Plugin import set_cookie
class Keep2ShareCc(Account):
__name__ = "Keep2ShareCc"
__type__ = "account"
__version__ = "0.10"
__status__ = "testing"
__description__ = """Keep2Share.cc account plugin"""
__license__ = "GPLv3"
__authors__ = [("aeronaut", "aeronaut@pianoguy.de"),
("Walter Purcaro", "vuolter@gmail.com")]
VALID_UNTIL_PATTERN = r'Premium expires:\s*<b>(.+?)<'
TRAFFIC_LEFT_PATTERN = r'Available traffic \(today\):\s*<b><a href="/user/statistic.html">(.+?)<'
LOGIN_FAIL_PATTERN = r'Please fix the following input errors'
def grab_info(self, user, password, data):
validuntil = None
trafficleft = -1
premium = False
html = self.load("http://keep2share.cc/site/profile.html")
m = re.search(self.VALID_UNTIL_PATTERN, html)
if m:
expiredate = m.group(1).strip()
self.log_debug("Expire date: " + expiredate)
if expiredate == "LifeTime":
premium = True
validuntil = -1
else:
try:
validuntil = time.mktime(time.strptime(expiredate, "%Y.%m.%d"))
except Exception, e:
self.log_error(e)
else:
premium = True if validuntil > time.mktime(time.gmtime()) else False
m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
if m:
try:
trafficleft = self.parse_traffic(m.group(1))
except Exception, e:
self.log_error(e)
return {'validuntil': validuntil, 'trafficleft': trafficleft, 'premium': premium}
def signin(self, user, password, data):
set_cookie(self.req.cj, "keep2share.cc", "lang", "en")
html = self.load("https://keep2share.cc/login.html",
post={'LoginForm[username]' : user,
'LoginForm[password]' : password,
'LoginForm[rememberMe]': 1,
'yt0' : ""})
if re.search(self.LOGIN_FAIL_PATTERN, html):
self.fail_login()
|
gpl-3.0
| -2,316,719,965,670,955,000
| 30.918919
| 101
| 0.516511
| false
| 3.7792
| false
| false
| false
|
namgivu/shared-model-FlaskSqlAlchemy-vs-SQLAlchemy
|
python-app/model/user.py
|
1
|
1292
|
from base_model import BaseModel
import sqlalchemy as db
class User(BaseModel):
#table mapping
__tablename__ = "users"
##region column mapping
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.Text)
primary_email_id = db.Column(db.Integer, db.ForeignKey('user_emails.id') )
#Use model class instead of physical table name for db.ForeignKey() ref. http://stackoverflow.com/a/41633052/248616
from model.address import Address
billing_address_id = db.Column(db.Integer, db.ForeignKey(Address.__table__.c['id'] ))
shipping_address_id = db.Column(db.Integer, db.ForeignKey(Address.__table__.c['id'] ))
##endregion column mapping
##region relationship obj
emails = db.relationship('UserEmail',
primaryjoin='User.id==UserEmail.user_id',
back_populates='owner')
primaryEmail = db.relationship('UserEmail',
primaryjoin='User.primary_email_id==UserEmail.id')
billingAddress = db.relationship('Address',
primaryjoin='User.billing_address_id==Address.id')
shippingAddress = db.relationship('Address',
primaryjoin='User.shipping_address_id==Address.id')
##endregion relationship obj
|
gpl-3.0
| 6,215,019,284,809,545,000
| 37
| 117
| 0.647059
| false
| 4.050157
| false
| false
| false
|
Disiok/poetry-seq2seq
|
utils.py
|
1
|
1442
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
import os
VOCAB_SIZE = 6000
SEP_TOKEN = 0
PAD_TOKEN = 5999
DATA_RAW_DIR = 'data/raw'
DATA_PROCESSED_DIR = 'data/processed'
DATA_SAMPLES_DIR = 'data/samples'
MODEL_DIR = 'model'
LOG_DIR = 'log'
if not os.path.exists(DATA_PROCESSED_DIR):
os.mkdir(DATA_PROCESSED_DIR)
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
def embed_w2v(embedding, data_set):
embedded = [map(lambda x: embedding[x], sample) for sample in data_set]
return embedded
def apply_one_hot(data_set):
applied = [map(lambda x: to_categorical(x, num_classes=VOCAB_SIZE)[0], sample) for sample in data_set]
return applied
def apply_sparse(data_set):
applied = [map(lambda x: [x], sample) for sample in data_set]
return applied
def pad_to(lst, length, value):
for i in range(len(lst), length):
lst.append(value)
return lst
def uprint(x):
print repr(x).decode('unicode-escape'),
def uprintln(x):
print repr(x).decode('unicode-escape')
def is_CN_char(ch):
return ch >= u'\u4e00' and ch <= u'\u9fa5'
def split_sentences(line):
sentences = []
i = 0
for j in range(len(line)+1):
if j == len(line) or line[j] in [u',', u'。', u'!', u'?', u'、']:
if i < j:
sentence = u''.join(filter(is_CN_char, line[i:j]))
sentences.append(sentence)
i = j+1
return sentences
|
mit
| -4,215,054,157,706,704,400
| 20.058824
| 106
| 0.609637
| false
| 2.875502
| false
| false
| false
|
rohitwaghchaure/erpnext_develop
|
erpnext/regional/united_arab_emirates/setup.py
|
1
|
2919
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, os, json
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
from erpnext.setup.setup_wizard.operations.taxes_setup import create_sales_tax
def setup(company=None, patch=True):
make_custom_fields()
add_print_formats()
if company:
create_sales_tax(company)
def make_custom_fields():
invoice_fields = [
dict(fieldname='vat_section', label='VAT Details', fieldtype='Section Break',
insert_after='select_print_heading', print_hide=1, collapsible=1),
dict(fieldname='permit_no', label='Permit Number',
fieldtype='Data', insert_after='vat_section', print_hide=1),
dict(fieldname='reverse_charge_applicable', label='Reverse Charge Applicable',
fieldtype='Select', insert_after='permit_no', print_hide=1,
options='Y\nN', default='N')
]
purchase_invoice_fields = [
dict(fieldname='company_trn', label='Company TRN',
fieldtype='Read Only', insert_after='shipping_address',
options='company.tax_id', print_hide=1),
dict(fieldname='supplier_name_in_arabic', label='Supplier Name in Arabic',
fieldtype='Read Only', insert_after='supplier_name',
options='supplier.supplier_name_in_arabic', print_hide=1)
]
sales_invoice_fields = [
dict(fieldname='company_trn', label='Company TRN',
fieldtype='Read Only', insert_after='company_address',
options='company.tax_id', print_hide=1),
dict(fieldname='customer_name_in_arabic', label='Customer Name in Arabic',
fieldtype='Read Only', insert_after='customer_name',
options='customer.customer_name_in_arabic', print_hide=1),
]
tax_code_field = dict(fieldname='tax_code', label='Tax Code',
fieldtype='Read Only', options='item_code.tax_code', insert_after='description',
allow_on_submit=1, print_hide=1)
custom_fields = {
'Item': [
dict(fieldname='tax_code', label='Tax Code',
fieldtype='Data', insert_after='item_group'),
],
'Customer': [
dict(fieldname='customer_name_in_arabic', label='Customer Name in Arabic',
fieldtype='Data', insert_after='customer_name'),
],
'Supplier': [
dict(fieldname='supplier_name_in_arabic', label='Supplier Name in Arabic',
fieldtype='Data', insert_after='supplier_name'),
],
'Purchase Invoice': purchase_invoice_fields + invoice_fields,
'Sales Invoice': sales_invoice_fields + invoice_fields,
'Sales Invoice Item': [tax_code_field],
'Purchase Invoice Item': [tax_code_field]
}
create_custom_fields(custom_fields)
def add_print_formats():
frappe.reload_doc("regional", "print_format", "detailed_tax_invoice")
frappe.reload_doc("regional", "print_format", "simplified_tax_invoice")
frappe.db.sql(""" update `tabPrint Format` set disabled = 0 where
name in('Simplified Tax Invoice', 'Detailed Tax Invoice') """)
|
gpl-3.0
| 1,549,981,976,826,451,200
| 37.407895
| 82
| 0.711545
| false
| 3.239734
| false
| false
| false
|
w1ll1am23/home-assistant
|
homeassistant/components/smartthings/sensor.py
|
1
|
12696
|
"""Support for sensors through the SmartThings cloud API."""
from __future__ import annotations
from collections import namedtuple
from typing import Sequence
from pysmartthings import Attribute, Capability
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
AREA_SQUARE_METERS,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
ENERGY_KILO_WATT_HOUR,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
VOLT,
VOLUME_CUBIC_METERS,
)
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
Map = namedtuple("map", "attribute name default_unit device_class")
CAPABILITY_TO_SENSORS = {
Capability.activity_lighting_mode: [
Map(Attribute.lighting_mode, "Activity Lighting Mode", None, None)
],
Capability.air_conditioner_mode: [
Map(Attribute.air_conditioner_mode, "Air Conditioner Mode", None, None)
],
Capability.air_quality_sensor: [
Map(Attribute.air_quality, "Air Quality", "CAQI", None)
],
Capability.alarm: [Map(Attribute.alarm, "Alarm", None, None)],
Capability.audio_volume: [Map(Attribute.volume, "Volume", PERCENTAGE, None)],
Capability.battery: [
Map(Attribute.battery, "Battery", PERCENTAGE, DEVICE_CLASS_BATTERY)
],
Capability.body_mass_index_measurement: [
Map(
Attribute.bmi_measurement,
"Body Mass Index",
f"{MASS_KILOGRAMS}/{AREA_SQUARE_METERS}",
None,
)
],
Capability.body_weight_measurement: [
Map(Attribute.body_weight_measurement, "Body Weight", MASS_KILOGRAMS, None)
],
Capability.carbon_dioxide_measurement: [
Map(
Attribute.carbon_dioxide,
"Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.carbon_monoxide_detector: [
Map(Attribute.carbon_monoxide, "Carbon Monoxide Detector", None, None)
],
Capability.carbon_monoxide_measurement: [
Map(
Attribute.carbon_monoxide_level,
"Carbon Monoxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.dishwasher_operating_state: [
Map(Attribute.machine_state, "Dishwasher Machine State", None, None),
Map(Attribute.dishwasher_job_state, "Dishwasher Job State", None, None),
Map(
Attribute.completion_time,
"Dishwasher Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
Capability.dryer_mode: [Map(Attribute.dryer_mode, "Dryer Mode", None, None)],
Capability.dryer_operating_state: [
Map(Attribute.machine_state, "Dryer Machine State", None, None),
Map(Attribute.dryer_job_state, "Dryer Job State", None, None),
Map(
Attribute.completion_time,
"Dryer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
Capability.dust_sensor: [
Map(Attribute.fine_dust_level, "Fine Dust Level", None, None),
Map(Attribute.dust_level, "Dust Level", None, None),
],
Capability.energy_meter: [
Map(Attribute.energy, "Energy Meter", ENERGY_KILO_WATT_HOUR, None)
],
Capability.equivalent_carbon_dioxide_measurement: [
Map(
Attribute.equivalent_carbon_dioxide_measurement,
"Equivalent Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.formaldehyde_measurement: [
Map(
Attribute.formaldehyde_level,
"Formaldehyde Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.gas_meter: [
Map(Attribute.gas_meter, "Gas Meter", ENERGY_KILO_WATT_HOUR, None),
Map(Attribute.gas_meter_calorific, "Gas Meter Calorific", None, None),
Map(Attribute.gas_meter_time, "Gas Meter Time", None, DEVICE_CLASS_TIMESTAMP),
Map(Attribute.gas_meter_volume, "Gas Meter Volume", VOLUME_CUBIC_METERS, None),
],
Capability.illuminance_measurement: [
Map(Attribute.illuminance, "Illuminance", LIGHT_LUX, DEVICE_CLASS_ILLUMINANCE)
],
Capability.infrared_level: [
Map(Attribute.infrared_level, "Infrared Level", PERCENTAGE, None)
],
Capability.media_input_source: [
Map(Attribute.input_source, "Media Input Source", None, None)
],
Capability.media_playback_repeat: [
Map(Attribute.playback_repeat_mode, "Media Playback Repeat", None, None)
],
Capability.media_playback_shuffle: [
Map(Attribute.playback_shuffle, "Media Playback Shuffle", None, None)
],
Capability.media_playback: [
Map(Attribute.playback_status, "Media Playback Status", None, None)
],
Capability.odor_sensor: [Map(Attribute.odor_level, "Odor Sensor", None, None)],
Capability.oven_mode: [Map(Attribute.oven_mode, "Oven Mode", None, None)],
Capability.oven_operating_state: [
Map(Attribute.machine_state, "Oven Machine State", None, None),
Map(Attribute.oven_job_state, "Oven Job State", None, None),
Map(Attribute.completion_time, "Oven Completion Time", None, None),
],
Capability.oven_setpoint: [
Map(Attribute.oven_setpoint, "Oven Set Point", None, None)
],
Capability.power_meter: [Map(Attribute.power, "Power Meter", POWER_WATT, None)],
Capability.power_source: [Map(Attribute.power_source, "Power Source", None, None)],
Capability.refrigeration_setpoint: [
Map(
Attribute.refrigeration_setpoint,
"Refrigeration Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.relative_humidity_measurement: [
Map(
Attribute.humidity,
"Relative Humidity Measurement",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
)
],
Capability.robot_cleaner_cleaning_mode: [
Map(
Attribute.robot_cleaner_cleaning_mode,
"Robot Cleaner Cleaning Mode",
None,
None,
)
],
Capability.robot_cleaner_movement: [
Map(Attribute.robot_cleaner_movement, "Robot Cleaner Movement", None, None)
],
Capability.robot_cleaner_turbo_mode: [
Map(Attribute.robot_cleaner_turbo_mode, "Robot Cleaner Turbo Mode", None, None)
],
Capability.signal_strength: [
Map(Attribute.lqi, "LQI Signal Strength", None, None),
Map(Attribute.rssi, "RSSI Signal Strength", None, None),
],
Capability.smoke_detector: [Map(Attribute.smoke, "Smoke Detector", None, None)],
Capability.temperature_measurement: [
Map(
Attribute.temperature,
"Temperature Measurement",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_cooling_setpoint: [
Map(
Attribute.cooling_setpoint,
"Thermostat Cooling Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_fan_mode: [
Map(Attribute.thermostat_fan_mode, "Thermostat Fan Mode", None, None)
],
Capability.thermostat_heating_setpoint: [
Map(
Attribute.heating_setpoint,
"Thermostat Heating Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_mode: [
Map(Attribute.thermostat_mode, "Thermostat Mode", None, None)
],
Capability.thermostat_operating_state: [
Map(
Attribute.thermostat_operating_state,
"Thermostat Operating State",
None,
None,
)
],
Capability.thermostat_setpoint: [
Map(
Attribute.thermostat_setpoint,
"Thermostat Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.three_axis: [],
Capability.tv_channel: [
Map(Attribute.tv_channel, "Tv Channel", None, None),
Map(Attribute.tv_channel_name, "Tv Channel Name", None, None),
],
Capability.tvoc_measurement: [
Map(
Attribute.tvoc_level,
"Tvoc Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.ultraviolet_index: [
Map(Attribute.ultraviolet_index, "Ultraviolet Index", None, None)
],
Capability.voltage_measurement: [
Map(Attribute.voltage, "Voltage Measurement", VOLT, None)
],
Capability.washer_mode: [Map(Attribute.washer_mode, "Washer Mode", None, None)],
Capability.washer_operating_state: [
Map(Attribute.machine_state, "Washer Machine State", None, None),
Map(Attribute.washer_job_state, "Washer Job State", None, None),
Map(
Attribute.completion_time,
"Washer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
}
UNITS = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT}
THREE_AXIS_NAMES = ["X Coordinate", "Y Coordinate", "Z Coordinate"]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add binary sensors for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
sensors = []
for device in broker.devices.values():
for capability in broker.get_assigned(device.device_id, "sensor"):
if capability == Capability.three_axis:
sensors.extend(
[
SmartThingsThreeAxisSensor(device, index)
for index in range(len(THREE_AXIS_NAMES))
]
)
else:
maps = CAPABILITY_TO_SENSORS[capability]
sensors.extend(
[
SmartThingsSensor(
device, m.attribute, m.name, m.default_unit, m.device_class
)
for m in maps
]
)
async_add_entities(sensors)
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None:
"""Return all capabilities supported if minimum required are present."""
return [
capability for capability in CAPABILITY_TO_SENSORS if capability in capabilities
]
class SmartThingsSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Sensor."""
def __init__(
self, device, attribute: str, name: str, default_unit: str, device_class: str
):
"""Init the class."""
super().__init__(device)
self._attribute = attribute
self._name = name
self._device_class = device_class
self._default_unit = default_unit
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {self._name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{self._attribute}"
@property
def state(self):
"""Return the state of the sensor."""
return self._device.status.attributes[self._attribute].value
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
unit = self._device.status.attributes[self._attribute].unit
return UNITS.get(unit, unit) if unit else self._default_unit
class SmartThingsThreeAxisSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Three Axis Sensor."""
def __init__(self, device, index):
"""Init the class."""
super().__init__(device)
self._index = index
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {THREE_AXIS_NAMES[self._index]}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{THREE_AXIS_NAMES[self._index]}"
@property
def state(self):
"""Return the state of the sensor."""
three_axis = self._device.status.attributes[Attribute.three_axis].value
try:
return three_axis[self._index]
except (TypeError, IndexError):
return None
|
apache-2.0
| -5,184,662,723,028,571,000
| 32.856
| 88
| 0.604285
| false
| 3.728634
| false
| false
| false
|
pixelpicosean/my-godot-2.1
|
platform/android/detect.py
|
1
|
9664
|
import os
import sys
import string
import platform
def is_active():
return True
def get_name():
return "Android"
def can_build():
import os
if (not os.environ.has_key("ANDROID_NDK_ROOT")):
return False
return True
def get_opts():
return [
('ANDROID_NDK_ROOT', 'the path to Android NDK',
os.environ.get("ANDROID_NDK_ROOT", 0)),
('ndk_platform', 'compile for platform: (android-<api> , example: android-14)', "android-14"),
('android_arch', 'select compiler architecture: (armv7/armv6/x86)', "armv7"),
('android_neon', 'enable neon (armv7 only)', "yes"),
('android_stl', 'enable STL support in android port (for modules)', "no")
]
def get_flags():
return [
('tools', 'no'),
]
def create(env):
tools = env['TOOLS']
if "mingw" in tools:
tools.remove('mingw')
if "applelink" in tools:
tools.remove("applelink")
env.Tool('gcc')
return env.Clone(tools=tools)
def configure(env):
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
import os
if (os.name == "nt"):
import subprocess
def mySubProcess(cmdline, env):
# print "SPAWNED : " + cmdline
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, env=env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print "====="
print err
print "====="
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
rv = 0
if len(cmdline) > 32000 and cmd.endswith("ar"):
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3, len(args)):
rv = mySubProcess(cmdline + args[i], env)
if rv:
break
else:
rv = mySubProcess(cmdline, env)
return rv
env['SPAWN'] = mySpawn
ndk_platform = env['ndk_platform']
if env['android_arch'] not in ['armv7', 'armv6', 'x86']:
env['android_arch'] = 'armv7'
if env['android_arch'] == 'x86':
env["x86_libtheora_opt_gcc"] = True
if env['PLATFORM'] == 'win32':
env.Tool('gcc')
env['SHLIBSUFFIX'] = '.so'
neon_text = ""
if env["android_arch"] == "armv7" and env['android_neon'] == 'yes':
neon_text = " (with neon)"
print("Godot Android!!!!! (" + env['android_arch'] + ")" + neon_text)
env.Append(CPPPATH=['#platform/android'])
if env['android_arch'] == 'x86':
env.extra_suffix = ".x86" + env.extra_suffix
target_subpath = "x86-4.9"
abi_subpath = "i686-linux-android"
arch_subpath = "x86"
elif env['android_arch'] == 'armv6':
env.extra_suffix = ".armv6" + env.extra_suffix
target_subpath = "arm-linux-androideabi-4.9"
abi_subpath = "arm-linux-androideabi"
arch_subpath = "armeabi"
elif env["android_arch"] == "armv7":
target_subpath = "arm-linux-androideabi-4.9"
abi_subpath = "arm-linux-androideabi"
arch_subpath = "armeabi-v7a"
if env['android_neon'] == 'yes':
env.extra_suffix = ".armv7.neon" + env.extra_suffix
else:
env.extra_suffix = ".armv7" + env.extra_suffix
mt_link = True
if (sys.platform.startswith("linux")):
host_subpath = "linux-x86_64"
elif (sys.platform.startswith("darwin")):
host_subpath = "darwin-x86_64"
elif (sys.platform.startswith('win')):
if (platform.machine().endswith('64')):
host_subpath = "windows-x86_64"
else:
mt_link = False
host_subpath = "windows"
compiler_path = env["ANDROID_NDK_ROOT"] + \
"/toolchains/llvm/prebuilt/" + host_subpath + "/bin"
gcc_toolchain_path = env["ANDROID_NDK_ROOT"] + \
"/toolchains/" + target_subpath + "/prebuilt/" + host_subpath
tools_path = gcc_toolchain_path + "/" + abi_subpath + "/bin"
# For Clang to find NDK tools in preference of those system-wide
env.PrependENVPath('PATH', tools_path)
env['CC'] = compiler_path + '/clang'
env['CXX'] = compiler_path + '/clang++'
env['AR'] = tools_path + "/ar"
env['RANLIB'] = tools_path + "/ranlib"
env['AS'] = tools_path + "/as"
if env['android_arch'] == 'x86':
env['ARCH'] = 'arch-x86'
else:
env['ARCH'] = 'arch-arm'
sysroot = env["ANDROID_NDK_ROOT"] + \
"/platforms/" + ndk_platform + "/" + env['ARCH']
common_opts = ['-fno-integrated-as', '-gcc-toolchain', gcc_toolchain_path]
env.Append(CPPFLAGS=["-isystem", sysroot + "/usr/include"])
env.Append(CPPFLAGS=string.split(
'-fpic -ffunction-sections -funwind-tables -fstack-protector-strong -fvisibility=hidden -fno-strict-aliasing'))
env.Append(CPPFLAGS=string.split('-DANDROID -DNO_STATVFS -DGLES2_ENABLED'))
env['neon_enabled'] = False
if env['android_arch'] == 'x86':
can_vectorize = True
target_opts = ['-target', 'i686-none-linux-android']
# The NDK adds this if targeting API < 21, so we can drop it when Godot targets it at least
env.Append(CPPFLAGS=['-mstackrealign'])
elif env["android_arch"] == "armv6":
can_vectorize = False
target_opts = ['-target', 'armv6-none-linux-androideabi']
env.Append(CPPFLAGS=string.split(
'-D__ARM_ARCH_6__ -march=armv6 -mfpu=vfp -mfloat-abi=softfp'))
elif env["android_arch"] == "armv7":
can_vectorize = True
target_opts = ['-target', 'armv7-none-linux-androideabi']
env.Append(CPPFLAGS=string.split(
'-D__ARM_ARCH_7__ -D__ARM_ARCH_7A__ -march=armv7-a -mfloat-abi=softfp'))
if env['android_neon'] == 'yes':
env['neon_enabled'] = True
env.Append(CPPFLAGS=['-mfpu=neon', '-D__ARM_NEON__'])
else:
env.Append(CPPFLAGS=['-mfpu=vfpv3-d16'])
env.Append(CPPFLAGS=target_opts)
env.Append(CPPFLAGS=common_opts)
env.Append(LIBS=['OpenSLES'])
env.Append(LIBS=['EGL', 'OpenSLES', 'android'])
env.Append(LIBS=['log', 'GLESv1_CM', 'GLESv2', 'z'])
if (sys.platform.startswith("darwin")):
env['SHLIBSUFFIX'] = '.so'
env['LINKFLAGS'] = ['-shared', '--sysroot=' +
sysroot, '-Wl,--warn-shared-textrel']
env.Append(LINKFLAGS=string.split(
'-Wl,--fix-cortex-a8'))
env.Append(LINKFLAGS=string.split(
'-Wl,--no-undefined -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now'))
env.Append(LINKFLAGS=string.split(
'-Wl,-soname,libgodot_android.so -Wl,--gc-sections'))
if mt_link:
env.Append(LINKFLAGS=['-Wl,--threads'])
env.Append(LINKFLAGS=target_opts)
env.Append(LINKFLAGS=common_opts)
env.Append(LIBPATH=[env["ANDROID_NDK_ROOT"] + '/toolchains/arm-linux-androideabi-4.9/prebuilt/' +
host_subpath + '/lib/gcc/' + abi_subpath + '/4.9.x'])
env.Append(LIBPATH=[env["ANDROID_NDK_ROOT"] +
'/toolchains/arm-linux-androideabi-4.9/prebuilt/' + host_subpath + '/' + abi_subpath + '/lib'])
if (env["target"].startswith("release")):
env.Append(LINKFLAGS=['-O2'])
env.Append(CPPFLAGS=['-O2', '-DNDEBUG', '-ffast-math',
'-funsafe-math-optimizations', '-fomit-frame-pointer'])
if (can_vectorize):
env.Append(CPPFLAGS=['-ftree-vectorize'])
if (env["target"] == "release_debug"):
env.Append(CPPFLAGS=['-DDEBUG_ENABLED'])
elif (env["target"] == "debug"):
env.Append(LINKFLAGS=['-O0'])
env.Append(CPPFLAGS=['-O0', '-D_DEBUG', '-UNDEBUG', '-DDEBUG_ENABLED',
'-DDEBUG_MEMORY_ENABLED', '-g', '-fno-limit-debug-info'])
env.Append(CPPFLAGS=['-DANDROID_ENABLED',
'-DUNIX_ENABLED', '-DNO_FCNTL', '-DMPC_FIXED_POINT'])
# TODO: Move that to opus module's config
if("module_opus_enabled" in env and env["module_opus_enabled"] != "no"):
if (env["android_arch"] == "armv6" or env["android_arch"] == "armv7"):
env.Append(CFLAGS=["-DOPUS_ARM_OPT"])
env.opus_fixed_point = "yes"
if (env['android_stl'] == 'yes'):
env.Append(CPPPATH=[env["ANDROID_NDK_ROOT"] +
"/sources/cxx-stl/gnu-libstdc++/4.9/include"])
env.Append(CPPPATH=[env["ANDROID_NDK_ROOT"] +
"/sources/cxx-stl/gnu-libstdc++/4.9/libs/" + arch_subpath + "/include"])
env.Append(LIBPATH=[env["ANDROID_NDK_ROOT"] +
"/sources/cxx-stl/gnu-libstdc++/4.9/libs/" + arch_subpath])
env.Append(LIBS=["gnustl_static"])
else:
env.Append(CXXFLAGS=['-fno-rtti', '-fno-exceptions', '-DNO_SAFE_CAST'])
import methods
env.Append(BUILDERS={'GLSL120': env.Builder(
action=methods.build_legacygl_headers, suffix='glsl.gen.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL': env.Builder(
action=methods.build_glsl_headers, suffix='glsl.gen.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL120GLES': env.Builder(
action=methods.build_gles2_headers, suffix='glsl.gen.h', src_suffix='.glsl')})
env.use_windows_spawn_fix()
|
mit
| 7,101,317,733,062,213,000
| 35.467925
| 119
| 0.561465
| false
| 3.274822
| false
| false
| false
|
Endika/connector-accountedge
|
hr_expense_accountedge/hr_expense_accountedge.py
|
1
|
8226
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from osv import orm, fields
from datetime import datetime
class hr_expense_expense(orm.Model):
_inherit = 'hr.expense.expense'
def _create_csv_report(self, cr, uid, ids, context={}):
res = {}
for id in ids:
this = self.browse(cr, uid, id)
output = this.employee_id.name
output += "\r\n"
output += "Employee\tCard ID\tDate\tVendor Invoice #\tAccount Number\tAmount\tDescription\
\tTax Code\tCurrency Code\tExchange Rate\r\n"
# Comment the previous line and uncomment the next one
# if you want to import taxes with their amount, instead of their code
# \tTax Code\tGST Amount\tPST/QST Amount\tCurrency Code\tExchange Rate\r\n"
for l in this.line_ids:
taxes = self._compute_taxes(cr, uid, l, context)
# output += u"%s\t%s\t%s\t%s\t%s\t%.2f\t%s\t%s\t%.2f\t%.2f\t%s\t%.2f\r\n" % (
output += u"%s\t%s\t%s\t%s\t%s\t%.2f\t%s\t%s\t%s\t%.2f\r\n" % (
this.employee_id.name,
this.employee_id.supplier_id_accountedge,
datetime.today().strftime("%d-%m-%Y"),
l.expense_id.id,
l.account_id.code,
taxes['amount_before_tax'],
l.name,
(l.tax_id.tax_code_accountedge or '000'),
# Comment the previous line and uncomment the next two ones
# if you want to import taxes with their amount, instead of their code
# taxes['amount_gst'],
# taxes['amount_pst'],
(l.expense_id.currency_id.name or 'CAD'),
(float(l.expense_id.currency_id.rate) or '1.0'))
byte_string = output.encode('utf-8-sig')
res[id] = base64.encodestring(byte_string)
self.write(cr, uid, ids, {'csv_file': res[id]}, context=context)
self._add_attachment(cr, uid, id, byte_string, context)
return True
def _compute_taxes(self, cr, uid, expense_line, context={}):
res = {
'amount_before_tax': expense_line.total_amount,
'amount_gst': 0.0, # Goods and Services Tax, federal
'amount_pst': 0.0 # Provincial Sales Tax
}
tax = expense_line.tax_id
if not tax.amount:
return res
# Divide tax per two?
tax_factor = 1.0
if expense_line.account_id.tax_halftax:
tax_factor = 0.5
if tax.child_ids:
for child_tax in tax.child_ids: # TODO: the detection of the two taxes should be more reliable
if 'TPS' in child_tax.name or 'GST' in child_tax.name:
res['amount_gst'] = float(child_tax.amount) * tax_factor
else:
res['amount_pst'] = float(child_tax.amount) * tax_factor
else:
res['amount_gst'] = float(tax.amount)
res['amount_before_tax'] = expense_line.total_amount / (1 + res['amount_gst'] + res['amount_pst'])
res['amount_gst'] = res['amount_before_tax'] * res['amount_gst']
res['amount_pst'] = res['amount_before_tax'] * res['amount_pst']
return res
def _add_attachment(self, cr, uid, ids, content, context={}):
file_name = 'export_'+time.strftime('%Y%m%d_%H%M%S')+'.tsv'
self.pool.get('ir.attachment').create(cr, uid, {
'name': file_name,
'datas': base64.encodestring(content),
'datas_fname': file_name,
'res_model': self._name,
'res_id': ids,
},
context=context
)
return True
def action_exported(self, cr, uid, ids, *args):
if not len(ids):
return False
# Employee must have a recordID matching his supplier account
# in Accountedge to generate an expense sheet
for id in ids:
this = self.browse(cr, uid, id)
if not this.employee_id.supplier_id_accountedge:
raise orm.except_orm(
'Accountedge Supplier ID missing',
'Please add the Accountedge supplier ID on the employee before exporting the sheet.'
)
self._create_csv_report(cr, uid, ids, {})
self.write(cr, uid, ids, {'state': 'exported'})
return True
def action_imported(self, cr, uid, ids, *args):
if not len(ids):
return False
for id in ids:
self.write(cr, uid, ids, {'state': 'imported'})
return True
def _get_cur_account_manager(self, cr, uid, ids, field_name, arg, context):
res = {}
for id in ids:
emails = ''
grp_ids = self.pool.get('res.groups').search(
cr, uid, [
('name', '=', u'Manager'),
('category_id.name', '=', u'Accounting & Finance')
]
)
usr_ids = self.pool.get('res.users').search(cr, uid, [('groups_id', '=', grp_ids[0])])
usrs = self.pool.get('res.users').browse(cr, uid, usr_ids)
for user in usrs:
if user.user_email:
emails += user.user_email
emails += ','
else:
empl_id = self.pool.get('hr.employee').search(cr, uid, [('login', '=', user.login)])[0]
empl = self.pool.get('hr.employee').browse(cr, uid, empl_id)
if empl.work_email:
emails += empl.work_email
emails += ','
emails = emails[:-1]
res[id] = emails
return res
_columns = {
'manager': fields.function(_get_cur_account_manager, string='Manager', type='char', size=128, readonly=True),
'state': fields.selection([
('draft', 'New'),
('confirm', 'Waiting Approval'),
('accepted', 'Approved'),
('exported', 'Exported'),
('imported', 'Imported'),
('cancelled', 'Refused'), ],
'State', readonly=True,
help="When the expense request is created the state is 'Draft'.\n"
"It is confirmed by the user and request is sent to admin, the state is 'Waiting Confirmation'.\n"
"If the admin accepts it, the state is 'Accepted'.\n"
"If the admin refuses it, the state is 'Refused'.\n"
"If a csv file has been generated for the expense request, the state is 'Exported'.\n"
"If the expense request has been imported in AccountEdge, the state is 'Imported'."
),
}
class hr_expense_line(orm.Model):
_inherit = 'hr.expense.line'
def _get_parent_state(self, cr, uid, ids, field_name, arg, context):
res = {}
for id in ids:
expense_line = self.pool.get('hr.expense.line').browse(cr, uid, id)
res[id] = expense_line.expense_id.state
return res
_columns = {
'state': fields.function(_get_parent_state, string='Expense State', type='char', size=128, readonly=True),
}
|
agpl-3.0
| -4,562,259,662,920,168,000
| 40.336683
| 117
| 0.531972
| false
| 3.818942
| false
| false
| false
|
srcole/qwm
|
usa_map/map_util.py
|
1
|
4006
|
"""
map_util.py
Visualizing US state data on a geographical colormap
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap as Basemap
from matplotlib.colors import rgb2hex
from matplotlib.patches import Polygon
def usa_state_colormap(state_dict, title='', colorbar_title=''):
"""
Plot data as a function of US state onto a geographical colormap
Parameters
----------
state_dict : dict
Keys are states, and values are the feature value to be converted to color
title : str
Title of plot
colorbar_title : str
Colorbar axis label
Code adapted from:
https://stackoverflow.com/questions/39742305/how-to-use-basemap-python-to-plot-us-with-50-states
Required shape files (st9_d00...) acquired from:
https://github.com/matplotlib/basemap/tree/master/examples
"""
# Lambert Conformal map of lower 48 states.
plt.figure(figsize=(10,8))
m = Basemap(llcrnrlon=-119,llcrnrlat=22,urcrnrlon=-64,urcrnrlat=49,
projection='lcc',lat_1=33,lat_2=45,lon_0=-95)
# draw state boundaries.
# data from U.S Census Bureau
# http://www.census.gov/geo/www/cob/st2000.html
shp_info = m.readshapefile('st99_d00','states',drawbounds=True)
# choose a color for each state based on population density.
colors={}
statenames=[]
cmap = plt.cm.viridis # use 'hot' colormap
vmin = np.min(list(state_dict.values()))
vmax = np.max(list(state_dict.values()))
for shapedict in m.states_info:
statename = shapedict['NAME']
# skip DC and Puerto Rico.
if statename not in ['District of Columbia','Puerto Rico']:
pop = state_dict[statename]
# calling colormap with value between 0 and 1 returns
# rgba value. Invert color range (hot colors are high
# population), take sqrt root to spread out colors more.
colors[statename] = cmap((pop-vmin)/(vmax-vmin))[:3]
statenames.append(statename)
# cycle through state names, color each one.
ax = plt.gca() # get current axes instance
for nshape,seg in enumerate(m.states):
# skip DC and Puerto Rico.
if statenames[nshape] not in ['Puerto Rico', 'District of Columbia']:
# Offset Alaska and Hawaii to the lower-left corner.
if statenames[nshape] == 'Alaska':
# Alaska is too big. Scale it down to 35% first, then transate it.
seg = list(map(alaska_transform, seg))
if statenames[nshape] == 'Hawaii':
seg = list(map(hawaii_transform, seg))
color = rgb2hex(colors[statenames[nshape]])
poly = Polygon(seg,facecolor=color,edgecolor=color)
ax.add_patch(poly)
plt.title(title, size=15)
# Make colorbar
# Make a figure and axes with dimensions as desired.
fig = plt.figure(figsize=(8.5, 1))
ax1 = fig.add_axes([0.05, 0.4, 0.9, 0.15])
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation='horizontal')
cb1.set_label(colorbar_title, size=15)
return ax
def alaska_transform(xy):
"""Transform Alaska's geographical placement so fits on US map"""
x, y = xy
return (0.3*x + 1000000, 0.3*y-1100000)
def hawaii_transform(xy):
"""Transform Hawaii's geographical placement so fits on US map"""
x, y = xy
return (x + 5250000, y-1400000)
|
mit
| -7,728,630,059,513,854,000
| 36.439252
| 100
| 0.638293
| false
| 3.557726
| false
| false
| false
|
protwis/protwis
|
signprot/migrations/0008_auto_20200829_1739.py
|
1
|
3041
|
# Generated by Django 3.0.3 on 2020-08-29 15:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0003_citation_page_name'),
('structure', '0028_auto_20200829_1704'),
('protein', '0009_auto_20200511_1818'),
('signprot', '0007_auto_20190711_1811'),
]
operations = [
migrations.RemoveField(
model_name='signprotstructure',
name='PDB_code',
),
migrations.AddField(
model_name='signprotstructure',
name='pdb_code',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='common.WebLink'),
preserve_default=False,
),
migrations.AddField(
model_name='signprotstructure',
name='publication',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='common.Publication'),
),
migrations.AddField(
model_name='signprotstructure',
name='publication_date',
field=models.DateField(default=None),
preserve_default=False,
),
migrations.AddField(
model_name='signprotstructure',
name='stabilizing_agents',
field=models.ManyToManyField(to='structure.StructureStabilizingAgent'),
),
migrations.AddField(
model_name='signprotstructure',
name='structure_type',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='structure.StructureType'),
preserve_default=False,
),
migrations.CreateModel(
name='SignprotStructureExtraProteins',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=20)),
('note', models.CharField(max_length=50, null=True)),
('chain', models.CharField(max_length=1)),
('category', models.CharField(max_length=20)),
('wt_coverage', models.IntegerField(null=True)),
('protein_conformation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='protein.ProteinConformation')),
('structure', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='signprot.SignprotStructure')),
('wt_protein', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='protein.Protein')),
],
options={
'db_table': 'signprot_extra_proteins',
},
),
migrations.AddField(
model_name='signprotstructure',
name='extra_proteins',
field=models.ManyToManyField(related_name='extra_proteins', to='signprot.SignprotStructureExtraProteins'),
),
]
|
apache-2.0
| 4,604,085,215,076,593,000
| 41.830986
| 150
| 0.599474
| false
| 4.211911
| false
| false
| false
|
Fizzadar/pyinfra
|
pyinfra/api/connectors/winrm.py
|
1
|
7851
|
from __future__ import print_function, unicode_literals
import base64
import ntpath
import click
from pyinfra import logger
from pyinfra.api import Config
from pyinfra.api.exceptions import ConnectError, PyinfraError
from pyinfra.api.util import get_file_io, memoize, sha1_hash
from .pyinfrawinrmsession import PyinfraWinrmSession
from .util import make_win_command
def _raise_connect_error(host, message, data):
message = '{0} ({1})'.format(message, data)
raise ConnectError(message)
@memoize
def show_warning():
logger.warning('The @winrm connector is alpha!')
def _make_winrm_kwargs(state, host):
kwargs = {
}
for key, value in (
('username', host.data.winrm_user),
('password', host.data.winrm_password),
('winrm_port', int(host.data.winrm_port or 0)),
('winrm_transport', host.data.winrm_transport or 'plaintext'),
('winrm_read_timeout_sec', host.data.winrm_read_timeout_sec or 30),
('winrm_operation_timeout_sec', host.data.winrm_operation_timeout_sec or 20),
):
if value:
kwargs[key] = value
# FUTURE: add more auth
# pywinrm supports: basic, certificate, ntlm, kerberos, plaintext, ssl, credssp
# see https://github.com/diyan/pywinrm/blob/master/winrm/__init__.py#L12
return kwargs
def make_names_data(hostname):
show_warning()
yield '@winrm/{0}'.format(hostname), {'winrm_hostname': hostname}, []
def connect(state, host):
'''
Connect to a single host. Returns the winrm Session if successful.
'''
kwargs = _make_winrm_kwargs(state, host)
logger.debug('Connecting to: %s (%s)', host.name, kwargs)
# Hostname can be provided via winrm config (alias), data, or the hosts name
hostname = kwargs.pop(
'hostname',
host.data.winrm_hostname or host.name,
)
try:
# Create new session
host_and_port = '{}:{}'.format(hostname, host.data.winrm_port)
logger.debug('host_and_port: %s', host_and_port)
session = PyinfraWinrmSession(
host_and_port,
auth=(
kwargs['username'],
kwargs['password'],
),
transport=kwargs['winrm_transport'],
read_timeout_sec=kwargs['winrm_read_timeout_sec'],
operation_timeout_sec=kwargs['winrm_operation_timeout_sec'],
)
return session
# TODO: add exceptions here
except Exception as e:
auth_kwargs = {}
for key, value in kwargs.items():
if key in ('username', 'password'):
auth_kwargs[key] = value
auth_args = ', '.join(
'{0}={1}'.format(key, value)
for key, value in auth_kwargs.items()
)
logger.debug('%s', e)
_raise_connect_error(host, 'Authentication error', auth_args)
def run_shell_command(
state, host, command,
env=None,
success_exit_codes=None,
print_output=False,
print_input=False,
return_combined_output=False,
shell_executable=Config.SHELL,
**ignored_command_kwargs
):
'''
Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
success_exit_codes (list): all values in the list that will return success
print_output (boolean): print the output
print_intput (boolean): print the input
return_combined_output (boolean): combine the stdout and stderr lists
shell_executable (string): shell to use - 'cmd'=cmd, 'ps'=powershell(default)
env (dict): environment variables to set
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
'''
command = make_win_command(command)
logger.debug('Running command on %s: %s', host.name, command)
if print_input:
click.echo('{0}>>> {1}'.format(host.print_prefix, command), err=True)
# get rid of leading/trailing quote
tmp_command = command.strip("'")
if print_output:
click.echo(
'{0}>>> {1}'.format(host.print_prefix, command),
err=True,
)
if not shell_executable:
shell_executable = 'ps'
logger.debug('shell_executable:%s', shell_executable)
# we use our own subclassed session that allows for env setting from open_shell.
if shell_executable in ['cmd']:
response = host.connection.run_cmd(tmp_command, env=env)
else:
response = host.connection.run_ps(tmp_command, env=env)
return_code = response.status_code
logger.debug('response:%s', response)
std_out_str = response.std_out.decode('utf-8')
std_err_str = response.std_err.decode('utf-8')
# split on '\r\n' (windows newlines)
std_out = std_out_str.split('\r\n')
std_err = std_err_str.split('\r\n')
logger.debug('std_out:%s', std_out)
logger.debug('std_err:%s', std_err)
if print_output:
click.echo(
'{0}>>> {1}'.format(host.print_prefix, '\n'.join(std_out)),
err=True,
)
if success_exit_codes:
status = return_code in success_exit_codes
else:
status = return_code == 0
logger.debug('Command exit status: %s', status)
if return_combined_output:
std_out = [('stdout', line) for line in std_out]
std_err = [('stderr', line) for line in std_err]
return status, std_out + std_err
return status, std_out, std_err
def get_file(
state, host, remote_filename, filename_or_io,
**command_kwargs
):
raise PyinfraError('Not implemented')
def _put_file(state, host, filename_or_io, remote_location, chunk_size=2048):
# this should work fine on smallish files, but there will be perf issues
# on larger files both due to the full read, the base64 encoding, and
# the latency when sending chunks
with get_file_io(filename_or_io) as file_io:
data = file_io.read()
for i in range(0, len(data), chunk_size):
chunk = data[i:i + chunk_size]
ps = (
'$data = [System.Convert]::FromBase64String("{0}"); '
'{1} -Value $data -Encoding byte -Path "{2}"'
).format(
base64.b64encode(chunk).decode('utf-8'),
'Set-Content' if i == 0 else 'Add-Content',
remote_location)
status, _stdout, stderr = run_shell_command(state, host, ps)
if status is False:
logger.error('File upload error: {0}'.format('\n'.join(stderr)))
return False
return True
def put_file(
state, host, filename_or_io, remote_filename,
print_output=False, print_input=False,
**command_kwargs
):
'''
Upload file by chunking and sending base64 encoded via winrm
'''
# Always use temp file here in case of failure
temp_file = ntpath.join(
host.fact.windows_temp_dir(),
'pyinfra-{0}'.format(sha1_hash(remote_filename)),
)
if not _put_file(state, host, filename_or_io, temp_file):
return False
# Execute run_shell_command w/sudo and/or su_user
command = 'Move-Item -Path {0} -Destination {1} -Force'.format(temp_file, remote_filename)
status, _, stderr = run_shell_command(
state, host, command,
print_output=print_output,
print_input=print_input,
**command_kwargs
)
if status is False:
logger.error('File upload error: {0}'.format('\n'.join(stderr)))
return False
if print_output:
click.echo(
'{0}file uploaded: {1}'.format(host.print_prefix, remote_filename),
err=True,
)
return True
EXECUTION_CONNECTOR = True
|
mit
| -1,413,492,209,220,742,400
| 28.851711
| 94
| 0.61075
| false
| 3.724383
| false
| false
| false
|
silmeth/Simple-Library-Manager
|
server/slm_db_interface/views.py
|
1
|
10693
|
from django.shortcuts import render
from django.http import HttpResponse
from django.db import transaction, connection
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login
from slm_db_interface.models import Book, Author, Publisher, Borrower, SLMUser
from bisect import bisect_left # to do binary search on sorted lists
import re
import json
def create_json_from_books(books, additional_dic=None):
books_list = []
for i, book in enumerate(books):
book_dict = {
'title': book.title,
'author': book.author.name,
'author_id': book.author.id,
'isbn10': book.isbn10,
'isbn13': book.isbn13,
'publisher': book.publisher.name,
'publisher_id': book.publisher.id,
'pub_date': book.published_year,
'book_id': book.id
}
if additional_dic is not None: # add additional non-standard fields
for key in additional_dic[i]:
book_dict[key] = additional_dic[i][key]
books_list.append(book_dict)
return json.JSONEncoder(indent=2, ensure_ascii=False).encode(books_list)
def create_book_from_json(json_obj):
book = None
book_author = None
book_publisher = None
with transaction.atomic():
json_decoded = json.JSONDecoder().decode(json_obj)
if 'author' in json_decoded and json_decoded['author'] is None:
return None
elif 'author_new' in json_decoded and json_decoded['author_new']:
book_author = Author(name=json_decoded['author_name'])
book_author.save()
elif 'author_id' in json_decoded:
book_author = Author.objects.get(id=json_decoded['author_id'])
if 'publisher' in json_decoded and json_decoded['publisher'] is None:
return None
elif 'publisher_new' in json_decoded and json_decoded['publisher_new']:
book_publisher = Publisher(name=json_decoded['publisher_name'])
book_publisher.save()
elif 'publisher_id' in json_decoded:
book_publisher = Publisher.objects.get(id=json_decoded['publisher_id'])
if 'title' not in json_decoded:
return None
book = Book(title=json_decoded['title'], author=book_author, publisher=book_publisher,
borrower=None, borrow_date=None, return_date=None)
if 'isbn10' in json_decoded:
book.isbn10 = json_decoded['isbn10']
if 'isbn13' in json_decoded:
book.isbn13 = json_decoded['isbn13']
if 'pub_date' in json_decoded:
book.published_year = json_decoded['pub_date']
book.save()
return book
def create_3grams(s):
assert(type(s) is str)
list_3grams = []
for pos in range(len(s)-2):
list_3grams.append(s[pos:pos+3])
list_3grams.sort()
return list_3grams
def compare_3grams(first, second): # Jaccard's similarity
assert(type(first) is list and type(second) is list)
intersect = 0
len1 = len(first)
len2 = len(second)
for val in first: # find number of elements in the intersection of two lists of 3-grams
pos = bisect_left(second, val, 0, len2)
if pos != len2 and second[pos] == val:
intersect += 1
return float(intersect)/(len1+len2-intersect)
def get_books_by_isbn(request, isbn): # no login required ATM, may change
sisbn = str(isbn)
results = None
if len(sisbn) == 10:
results = Book.objects.filter(isbn10=sisbn)
elif len(sisbn) == 13:
results = Book.objects.filter(isbn13=sisbn)
return HttpResponse(content=create_json_from_books(results), content_type='application/json; charset=utf-8')
def search(request, attr, attr_val): # no login required ATM, may change
regexp_whitespace = re.compile('\s+')
regexp_punctuation = re.compile('[^\w\s]+')
attr_val = regexp_whitespace.sub(' ', attr_val.lower())
attr_val = regexp_punctuation.sub('', attr_val)
query_3grams = create_3grams(attr_val)
results = []
similarities = []
for book in Book.objects.all():
if attr == 'title':
book_attr_val = book.title.lower()
elif attr == 'author':
book_attr_val = book.author.name.lower()
else:
return HttpResponse(content='cannot search by this attribute', status=404)
book_attr_val = regexp_whitespace.sub(' ', book_attr_val)
book_attr_val = regexp_punctuation.sub('', book_attr_val)
book_3grams = create_3grams(book_attr_val)
similarity = compare_3grams(query_3grams, book_3grams)
if similarity > 0.21:
pos = bisect_left(similarities, similarity, 0, len(similarities))
results.insert(pos, book)
similarities.insert(pos, similarity)
sim_dic_list = []
for sim in similarities:
sim_dic_list.append({'similarity': sim})
return HttpResponse(content=create_json_from_books(results[::-1], sim_dic_list[::-1]),
content_type='application/json; charset=utf-8')
def search_authors(request, name):
if request.user.is_authenticated():
regexp_whitespace = re.compile('\s+')
regexp_punctuation = re.compile('[^\w\s]+')
name = regexp_whitespace.sub(' ', name.lower())
name = regexp_punctuation.sub('', name)
query_3grams = create_3grams(name)
results = []
similarities = []
for author in Author.objects.all():
result = author.name.lower()
result = regexp_whitespace.sub(' ', result)
result = regexp_punctuation.sub('', result)
result_3grams = create_3grams(result)
similarity = compare_3grams(query_3grams, result_3grams)
if similarity > 0.21:
pos = bisect_left(similarities, similarity, 0, len(similarities))
results.insert(pos, author)
similarities.insert(pos, similarity)
results = results[::-1]
similarities = similarities[::-1]
json_results_list = []
for i, res in enumerate(results):
json_results_list.append({'name': res.name, 'author_id': res.id, 'similarity': similarities[i]})
json_results = json.JSONEncoder(indent=2, ensure_ascii=False).encode(json_results_list)
return HttpResponse(content=json_results,
content_type='application/json; charset=utf-8')
else:
return HttpResponse(content='error: not authenticated', content_type='text/plain') # TODO change to error dict
def search_publishers(request, name):
if request.user.is_authenticated():
regexp_whitespace = re.compile('\s+')
regexp_punctuation = re.compile('[^\w\s]+')
name = regexp_whitespace.sub(' ', name.lower())
name = regexp_punctuation.sub('', name)
query_3grams = create_3grams(name)
results = []
similarities = []
for publisher in Publisher.objects.all():
result = publisher.name.lower()
result = regexp_whitespace.sub(' ', result)
result = regexp_punctuation.sub('', result)
result_3grams = create_3grams(result)
similarity = compare_3grams(query_3grams, result_3grams)
# if similarity > 0.21: # listing all publishers makes more sense
pos = bisect_left(similarities, similarity, 0, len(similarities))
results.insert(pos, publisher)
similarities.insert(pos, similarity)
results = results[::-1]
similarities = similarities[::-1]
json_results_list = []
for i, res in enumerate(results):
json_results_list.append({'name': res.name, 'publisher_id': res.id, 'similarity': similarities[i]})
json_results = json.JSONEncoder(indent=2, ensure_ascii=False).encode(json_results_list)
return HttpResponse(content=json_results,
content_type='application/json; charset=utf-8')
else:
return HttpResponse(content='error: not authenticated', content_type='text/plain')
@csrf_exempt
def add_book(request):
if request.user.is_authenticated():
if request.user.slm_user.can_manage_books:
# book data comes in json through a POST request
if request.method == 'POST':
try:
print(request.body.decode('utf8'))
book = create_book_from_json(request.body.decode('utf8'))
return HttpResponse(content=create_json_from_books([book]),
content_type='application/json; charset=utf-8')
except ValueError as err: # TODO change to error dict
return HttpResponse(
content='error: request not a valid json\n' + str(err),
content_type='text/plain'
)
else:
return HttpResponse(content='error: something went wrong', content_type='text/plain')
else:
return HttpResponse(content='error: lack of manage book permission')
else:
return HttpResponse(content='error: not authenticated', content_type='text/plain')
@csrf_exempt
def log_user_in(request):
if request.method == 'POST':
try:
credentials = json.JSONDecoder().decode(request.body.decode('utf8'))
user = authenticate(username=credentials['username'], password=credentials['password'])
if user is not None:
if user.is_active:
login(request, user)
resp_json = {'logged_in': True,
'username': str(user)}
if user.slm_user.can_manage_books:
resp_json['can_manage_books'] = True
if user.slm_user.can_lend:
resp_json['can_lend'] = True
if user.slm_user.can_borrow:
resp_json['can_borrow'] = True
resp = json.JSONEncoder(indent=2, ensure_ascii=False).encode(resp_json)
return HttpResponse(content=resp, content_type='application/json; charset=utf-8')
else: # TODO change to error dict
return HttpResponse(content='error: user inactive', content_type='text/plain')
else:
return HttpResponse(content='error: wrong credentials', content_type='text/plain')
except ValueError:
return HttpResponse(content='error: request not a valid json', content_type='text/plain')
|
lgpl-3.0
| 4,883,945,105,540,650,000
| 38.899254
| 119
| 0.600673
| false
| 3.991415
| false
| false
| false
|
davidjrichardson/uwcs-zarya
|
newsletter/tasks.py
|
1
|
2020
|
import gc
from celery.decorators import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.urls import reverse
from blog.models import Sponsor
from .models import Subscription, Mail
def mail_newsletter(recipients, mail):
email_context = {
'title': mail.subject,
'message': mail.text,
'base_url': settings.EMAIL_ABS_URL,
'sponsors': Sponsor.objects.all(),
}
email_html = render_to_string('newsletter/email_newsletter.html', email_context)
email_plaintext = render_to_string('newsletter/email_newsletter.txt', email_context)
to = [x.email for x in recipients]
# Create a map of emails to unsub tokens for the email merge
unsub_tokens = {recipient.email: {
'unsub_url': '{hostname}{path}'.format(hostname=settings.EMAIL_ABS_URL,
path=reverse('unsub_with_id', kwargs={
'token': recipient.unsubscribe_token
}))} for recipient in recipients}
sender = '{name} <{email}>'.format(name=mail.sender_name, email=mail.sender_email)
email = EmailMultiAlternatives(mail.subject, email_plaintext, sender, to)
email.attach_alternative(email_html, 'text/html')
email.merge_data = unsub_tokens
email.merge_global_data = {
'subject': mail.subject
}
email.template_id = '615bcf44-fdfd-4632-8403-38987eb9074b'
email.send()
# Create a function called "chunks" with two arguments, l and n:
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i+n]
@task(name='send_newsletter')
def send_newsletter(mail_id):
subscriptions = chunks(Subscription.objects.all(), 100)
for chunk in subscriptions:
mail_newsletter(chunk, Mail.objects.get(id=mail_id))
|
mit
| -830,727,445,206,895,400
| 37.113208
| 88
| 0.64703
| false
| 3.726937
| false
| false
| false
|
ianmiell/shutit-distro
|
screen/screen.py
|
1
|
1148
|
"""ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class screen(ShutItModule):
def build(self, shutit):
shutit.send('mkdir -p /tmp/build/screen')
shutit.send('cd /tmp/build/screen')
shutit.send('wget -qO- http://ftp.gnu.org/gnu/screen/screen-4.2.1.tar.gz | tar -zxf -')
shutit.send('cd screen*')
shutit.send('./configure --prefix=/usr --infodir=/usr/share/info --mandir=/usr/share/man --with-socket-dir=/run/screen --with-pty-group=5 --with-sys-screenrc=/etc/screenrc')
shutit.send('sed -i -e "s%/usr/local/etc/screenrc%/etc/screenrc%" {etc,doc}/*')
shutit.send('make')
shutit.send('make install')
shutit.send('install -m 644 etc/etcscreenrc /etc/screenrc')
return True
#def get_config(self, shutit):
# shutit.get_config(self.module_id,'item','default')
# return True
def finalize(self, shutit):
shutit.send('rm -rf /tmp/build/screen')
return True
#def remove(self, shutit):
# return True
#def test(self, shutit):
# return True
def module():
return screen(
'shutit.tk.sd.screen.screen', 158844782.0237,
description='',
maintainer='',
depends=['shutit.tk.setup']
)
|
gpl-2.0
| 4,438,800,849,325,293,000
| 26.333333
| 175
| 0.682927
| false
| 2.512035
| false
| false
| false
|
PROGRAM-IX/vectorwars
|
vw_game_engine.py
|
1
|
10587
|
import pygame
from pygame.locals import *
from pystroke.hud import *
from pystroke.game_engine import GameEngine
from pystroke.vector2 import Vector2
from pystroke.vex import Vex
from pystroke.input_engine import InputEngine
from pystroke.event_engine import EventEngine
from pystroke.draw_engine import DrawEngine
from vw_beh_engine import VWBehaviourEngine
from enemy import gen
from bullet import BulletD, BulletP
from player import Player
from random import randint
class VWGameEngine(GameEngine):
def __init__(self, screen, event_e):
GameEngine.__init__(self, screen, event_e)
self.beh_e = VWBehaviourEngine()
self.FPS = 60
self.player = Player(400, 300, pygame.Color(0, 255, 0),
[Vector2(0, -5), Vector2(-15, -20),
Vector2(-10, 10), Vector2(0, 20), Vector2(10, 10),
Vector2(15, -20), Vector2(0, -5)],
1)
self.combo_ticks = self.FPS*3
self.combo_timer = 0
self.combo_num = 0
self.enemies = []
self.bullets = []
self.score = 0
self.high_score = 0
self.rep_interval = self.FPS * 10 / 3
#self.rep_interval = self.FPS/10
self.rep_count = 1
self.shoot_interval = self.FPS/10
self.shoot_count = 0
self.player_speed = 5
def spawn(self, num):
for i in xrange(num):
x = randint(100, 700)
y = randint(100, 500)
self.enemies.append(gen(x, y))
def reset_game(self):
del self.enemies
self.enemies = []
del self.bullets
self.bullets = []
self.shoot_count = 0
self.combo_timer = 0
self.combo_num = 0
combo = self._hud.get("Combo")
combo.visible = False
if combo is not None:
combo.text = "combo "+str(self.combo_num)
def set_end_screen(self, visible):
self._hud.get("GameOver1").visible = visible
self._hud.get("GameOver2").visible = visible
self._hud.get("GameOver3").visible = visible
def populate(self):
self.spawn(4)
def game_over(self):
self.set_end_screen(True)
self.reset_game()
self.reset_score()
def combo_tick(self):
if self.combo_timer > 0:
self.combo_timer -= 1
else:
self.combo_num = 0
combo = self._hud.get("Combo")
combo.visible = False
if combo is not None:
combo.text = "combo "+str(self.combo_num)
#print self.combo_num, self.combo_timer
def update(self):
p_move_x = 0 # How much the player will move (H)
p_move_y = 0 # How much the player will move (V)
self.event_e.update()
if self.event_e.input.keys[K_ESCAPE] == True:
return 1
elif self.event_e.input.keys[K_q] == True:
return 0
if self.event_e.input.keys[K_SPACE] == True:
self.score_inc(5)
if self.event_e.input.keys[K_c] == True:
self.set_end_screen(False)
self.reset_game()
self.populate()
if self.event_e.input.keys[K_DOWN] == True:
# Fire down
self.player_shoot_dir(0)
elif self.event_e.input.keys[K_UP] == True:
# Fire up
self.player_shoot_dir(2)
elif self.event_e.input.keys[K_LEFT] == True:
# Fire left
self.player_shoot_dir(3)
elif self.event_e.input.keys[K_RIGHT] == True:
# Fire right
self.player_shoot_dir(1)
elif self.event_e.input.mouse_buttons[1] == True:
# Fire towards the mouse cursor
self.player_shoot_point(Vector2(self.event_e.input.mouse_pos[0],
self.event_e.input.mouse_pos[1]))
else:
self.shoot_count = 0
if self.event_e.input.keys[K_w] == True:
# Move up
p_move_y -= self.player_speed
elif self.event_e.input.keys[K_s] == True:
# Move down
p_move_y += self.player_speed
if self.event_e.input.keys[K_a] == True:
# Move left
p_move_x -= self.player_speed
elif self.event_e.input.keys[K_d] == True:
# Move right
p_move_x += self.player_speed
self.player.rotate_to_face_point(Vector2(
self.event_e.input.mouse_pos[0],
self.event_e.input.mouse_pos[1]))
self.beh_e.update(self.enemies, self.player, self.screen)
self.player.move_abs(p_move_x, p_move_y, self.screen)
self.bullet_update()
if len(self.enemies) > 1:
self.rep()
elif len(self.enemies) == 0 and self.score > 0:
self.game_over()
#else:
#self.spawn(4)
self.collide()
self.combo_tick()
self.clock.tick(self.FPS)
return 2
def score_inc(self, pts):
self.combo_timer = self.combo_ticks
self.combo_num += 1
if self.combo_num > 1:
pts = pts * self.combo_num
print "COMBO " + str(self.combo_num)
combo = self._hud.get("Combo")
combo.visible = True
if combo is not None:
combo.text = "combo "+str(self.combo_num)
self.score += 50*pts
sc = self._hud.get("Score")
if sc is not None:
sc.text = "score "+str(self.score)
go = self._hud.get("GameOver2")
if go is not None:
go.text = "score "+str(self.score)
if self.score > self.high_score:
self.high_score = self.score
hsc = self._hud.get("HighScore")
if hsc is not None:
hsc.text = "high score "+str(self.high_score)
def reset_score(self):
print "SCORE RESET FROM", self.score
self.score = 0
sc = self._hud.get("Score")
if(sc is not None):
sc.text = "score "+str(self.score)
def collide(self):
dead_enemies = []
dead_bullets = []
for e in self.enemies:
if e.lifetime >= 30:
for b in self.bullets:
if e.point_inside(Vector2(b.x, b.y)):
#print "COLLIDE2"
self.score_inc(len(e.points))
if e not in dead_enemies:
dead_enemies.append(e)
if b not in dead_bullets:
dead_bullets.append(b)
for e in dead_enemies:
#print self.player.distance_to(Vector2(e.x, e.y))
self.enemies.remove(e)
for b in dead_bullets:
self.bullets.remove(b)
for p in self.player.points:
for e in self.enemies:
if e.lifetime >= 30:
if e.point_inside(p+Vector2(self.player.x, self.player.y)):
self.game_over()
def draw(self):
self.draw_e.begin_draw(pygame.Color(0,0,0))
self.draw_e.draw(self.enemies)
self.draw_e.draw(self.bullets)
self.draw_e.draw([self.player])
self.draw_e.draw([self._hud])
self.draw_e.end_draw()
def run(self):
self._hud.add(HUDPolygon("Box1", pygame.Color(255, 255, 255),
((50, 50), (750, 50),
(750, 550), (50, 550), 2)))
self._hud.add(HUDText("Score", pygame.Color(255, 255, 255),
"score "+str(self.score), (15, 20), 1, 2))
self._hud.add(HUDText("HighScore", pygame.Color(255, 255, 255),
"high score "+str(self.high_score), (15, 575),
1, 2))
self._hud.add(HUDText("GameOver1", pygame.Color(255, 0, 255),
"game over", (100, 200),
5, 2, False))
self._hud.add(HUDText("GameOver2", pygame.Color(255, 0, 255),
"score "+str(self.score),
(200, 300),
2, 2, False))
self._hud.add(HUDText("GameOver3", pygame.Color(255, 0, 255),
"c to restart",
(200, 360),
2, 2, False))
self._hud.add(HUDText("Combo", pygame.Color(255, 255, 255),
"combo "+str(self.combo_num),
(650, 575),
1, 2, True))
self.spawn(4)
while True:
r = self.update()
if r == 0 or r == 1:
return r
self.draw()
def rep(self):
if(self.rep_count % self.rep_interval == 0 and len(self.enemies)>1):
p1 = randint(0, len(self.enemies)-1)
p2 = p1
while (p1 == p2):
p2 = randint(0, len(self.enemies)-1)
if self.enemies[p1].x < self.enemies[p2].x:
x = randint(self.enemies[p1].x, self.enemies[p2].x)
else:
x = randint(self.enemies[p2].x, self.enemies[p1].x)
if self.enemies[p1].y < self.enemies[p2].y:
y = randint(self.enemies[p1].y, self.enemies[p2].y)
else:
y = randint(self.enemies[p2].y, self.enemies[p1].y)
self.enemies.append(
self.enemies[p1].reproduce(self.enemies[p2], x, y))
elif len(self.enemies) < 2:
self.spawn(2)
self.rep_count += 1
#print self.rep_count
def bullet_update(self):
for b in self.bullets:
if b.x > 800 or b.x < 0 or b.y > 600 or b.y < 0:
self.bullets.remove(b)
b.move()
def player_shoot_dir(self, direction):
if self.shoot_count % self.shoot_interval == 0:
b = BulletD(self.player.x, self.player.y, direction)
self.bullets.append(b)
self.shoot_count += 1
def player_shoot_point(self, point):
if self.shoot_count % self.shoot_interval == 0:
b = BulletP(self.player.x, self.player.y, point)
self.bullets.append(b)
self.shoot_count += 1
|
mit
| 8,833,229,971,831,664,000
| 34.891525
| 79
| 0.489657
| false
| 3.516108
| false
| false
| false
|
liqd/a4-meinberlin
|
meinberlin/apps/polls/migrations/0006_copy_poll_data_to_a4_polls.py
|
1
|
2716
|
# Generated by Django 2.2.24 on 2021-06-18 12:18
from django.db import migrations
def copy_data(apps, schema_editor):
Item = apps.get_model('a4modules', 'Item')
MBPoll = apps.get_model('meinberlin_polls', 'MBPoll')
Poll = apps.get_model('a4polls', 'Poll')
MBQuestion = apps.get_model('meinberlin_polls', 'MBQuestion')
Question = apps.get_model('a4polls', 'Question')
MBChoice = apps.get_model('meinberlin_polls', 'MBChoice')
Choice = apps.get_model('a4polls', 'Choice')
MBVote = apps.get_model('meinberlin_polls', 'MBVote')
Vote = apps.get_model('a4polls', 'Vote')
for mb_poll in MBPoll.objects.all():
item = Item.objects.get(id=mb_poll.item_ptr_id)
poll = Poll(item_ptr_id=mb_poll.item_ptr_id)
poll.__dict__.update(item.__dict__)
poll.save()
mb_questions = MBQuestion.objects.filter(poll=mb_poll)
for mb_question in mb_questions:
question = Question.objects.create(
label = mb_question.label,
weight = mb_question.weight,
multiple_choice = mb_question.multiple_choice,
poll = poll)
mb_choices = MBChoice.objects.filter(question=mb_question)
for mb_choice in mb_choices:
choice = Choice.objects.create(
label = mb_choice.label,
question = question)
mb_votes = MBVote.objects.filter(choice=mb_choice)
for mb_vote in mb_votes:
Vote.objects.create(
created = mb_vote.created,
modified = mb_vote.modified,
creator = mb_vote.creator,
choice = choice)
Comment = apps.get_model('a4comments', 'Comment')
ContentType = apps.get_model('contenttypes', 'ContentType')
mb_poll_content_type = ContentType.objects.get_for_model(MBPoll)
poll_content_type = ContentType.objects.get_for_model(Poll)
comments = Comment.objects.filter(content_type_id=mb_poll_content_type.id)
for comment in comments:
comment.content_type = poll_content_type
comment.save()
Phase = apps.get_model('a4phases', 'Phase')
phases = Phase.objects.filter(type='meinberlin_polls:voting')
for phase in phases:
phase.type='a4polls:voting'
phase.save()
class Migration(migrations.Migration):
dependencies = [
('meinberlin_polls', '0005_rename_mb_poll_models'),
('a4polls', '0001_initial'),
('a4comments', '0007_comment_is_moderator_marked'),
('a4phases', '0007_order_phases_also_by_id')
]
operations = [
migrations.RunPython(copy_data)
]
|
agpl-3.0
| -491,148,125,555,496,700
| 38.362319
| 78
| 0.60162
| false
| 3.564304
| false
| false
| false
|
ulrikpedersen/toggl-gnome-applet
|
toggl.py
|
1
|
5391
|
#!/usr/bin/env python
import logging
from datetime import datetime
logging.basicConfig(level=logging.WARNING)
import os
import urllib2, base64, json
import dateutil.parser
def from_ISO8601( str_iso8601 ):
return dateutil.parser.parse(str_iso8601)
def to_ISO8601( timestamp ):
return timestamp.isoformat()
def convert_time_strings(toggl_dicts):
timestamp_fields = ['at',
'created_at',
'start',
'stop']
result = []
for tdict in toggl_dicts:
d = tdict
for tsf in timestamp_fields:
if tdict.has_key(tsf):
d[tsf] = from_ISO8601(tdict[tsf])
result.append(d)
return result
class Toggl:
def __init__(self, api_token=None):
self.log = logging.getLogger("Toggl")
self.log.setLevel(logging.DEBUG)
self.toggl_domain = "www.toggl.com"
self.toggl_api = "https://%s/api/v8/" % self.toggl_domain
self.report_api = "https://%s/reports/api/v2" % self.toggl_domain
self._api_token = api_token
# Search for an Toggl API token in a list of files
# No validation of the collected token
# TODO: encryption of tokenfiles could be nice
tokenfiles = [os.path.expanduser(f) for f in ['.toggltoken', '~/.toggltoken', '~/.togglapplet/.toggltoken']]
for tf in tokenfiles:
if os.path.exists( tf ):
try:
f = open(tf)
self._api_token = f.read().strip()
f.close()
except:
self.log.exception("Could not read token from " + tf)
self._api_token = None
if self._api_token: break
def send_request( self, api_call_url ):
''' Send a request or command to Toggl, retrieve and parse the json response.
returns a list of dictionary objects.
Throws an exception if the http response is not OK (200) or if no JSON can be decoded from the response.
'''
request = urllib2.Request( api_call_url )
self.log.debug("http request url = \'%s\'", request.get_full_url())
# username:password
# Use base64.standard_b64encode instead of replace...
user_pass = base64.encodestring('%s:%s' % (self._api_token, 'api_token')).replace('\n', '')
request.add_header("Authorization", "Basic %s" % user_pass)
opener = urllib2.build_opener(
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.ProxyHandler({'https': 'http://wwwcache.rl.ac.uk:8080'}))
urllib2.install_opener(opener)
result = urllib2.urlopen(request, timeout = 3.0) # with no data, this is a http GET.
self.log.debug("http request result: code=%s url=\'%s\'", result.getcode(), result.geturl())
js = json.load(result)
#self.log.debug("JSON raw result: %s" % json.dumps(js,sort_keys=True, indent=4, separators=(',', ': ')))
return js
def get_workspaces(self):
self.log.debug("get_workspaces()")
js = self.send_request(self.toggl_api + "workspaces")
js = convert_time_strings(js)
return js
def get_default_workspace(self):
self.log.debug("get_default_workspace()")
wid = self.get_user()['default_wid']
js = self.send_request(self.toggl_api + "workspaces/%s"%str(wid))
js = convert_time_strings([js['data']])
return js[0]
def get_default_workspace_id(self):
self.log.debug("get_default_workspace_id()")
ws = self.get_default_workspace()
self.log.debug(ws)
return ws['id']
def get_projects(self, wid=None):
self.log.debug("get_projects(wid=%s)"%str(wid))
if wid:
js = self.send_request(self.toggl_api + "workspaces/%s/projects"%str(wid))
else:
js = []
for w in self.get_workspaces():
js += self.send_request(self.toggl_api + "workspaces/%s/projects"%str(w['id']))
js = convert_time_strings(js)
return js
def get_current_entry(self):
'''get the currently active time entry'''
self.log.debug("get_current_entry()")
js = self.send_request(self.toggl_api + "time_entries/current")
self.log.debug( js )
js = convert_time_strings(js['data'])
return js
def get_range_entries(self, start_end=None):
'''Get a list of entries in a range (max 1000 entries).
If no start-end range is defined, the default is to return all entries
from the last 9 days.
start_end: tuple with start and end date'''
self.log.debug("get_range_entries()")
query = "time_entries"
if start_end:
start, end = start_end
if type(start) == datetime.datetime:
start = to_ISO8601(start)
if type(end) == datetime.datetime:
end = to_ISO8601(end)
query += "?start_date=%s&end_date=%s"%(start, end)
js = self.send_request(self.toggl_api + query)
js = convert_time_strings(js)
return js
def get_user(self):
self.log.debug("get_user()")
js = self.send_request(self.toggl_api + "me")
return js['data']
|
unlicense
| 6,207,641,037,173,916,000
| 37.241135
| 116
| 0.565758
| false
| 3.679863
| false
| false
| false
|
fancycode/dnsadmin
|
scripts/dnsadmin.py
|
1
|
4877
|
#
# Copyright (C) 2016 Joachim Bauch <mail@joachim-bauch.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import cookielib
import functools
import json
import sys
import urllib2
class parse_response(object):
"""Decorator that parses returned data and checks contents for success."""
def __init__(self, action):
self.action = action
def __call__(self, f):
@functools.wraps(f)
def do_parse_response(*args, **kw):
try:
data = f(*args, **kw)
except urllib2.HTTPError, e:
print >> sys.stderr, '%s failed: %s (%s)' \
% (self.action, e.reason, e.code)
print >> sys.stderr, 'Server response: %s' % (e.read().strip())
return None
if data is None:
return None
elif not isinstance(data, basestring):
data = data.read()
try:
decoded = json.loads(data)
except Exception, e:
print >> sys.stderr, 'Server didn\'t return valid JSON: %s' \
% (e)
print >> sys.stderr, 'Server response: %r' % (data)
return None
if not isinstance(decoded, dict):
print >> sys.stderr, 'Server didn\'t return a map'
print >> sys.stderr, 'Server response: %r' % (data)
return None
if decoded.get('status') != 'ok':
print >> sys.stderr, 'Server didn\'t return a success status'
print >> sys.stderr, 'Server response: %r' % (decoded)
return None
return decoded['result']
return do_parse_response
class MethodAwareRequest(urllib2.Request):
"""Request that supports setting a custom HTTP method."""
def __init__(self, *args, **kw):
self.method = kw.pop('method', None)
urllib2.Request.__init__(self, *args, **kw)
def get_method(self):
if self.method is not None:
return self.method
return urllib2.Request.get_method(self)
class DnsAdminClient(object):
"""Client implementation for the DNS admin service."""
API_VERSION = 'v1'
def __init__(self, base_url):
# Remove any trailing slashes from base url.
if base_url[-1:] == '/':
base_url = base_url[:-1]
self.base_url = base_url + "/api/" + self.API_VERSION
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
def _perform_request(self, url, data=None, method=None):
"""Send GET/POST request to the server with correct headers."""
if data is not None:
data = json.dumps(data)
headers = {
'Content-Type': 'application/json',
}
req = MethodAwareRequest(url, data, headers, method=method)
else:
req = MethodAwareRequest(url, method=method)
return self.opener.open(req)
@parse_response('Login')
def login(self, username, password):
"""Authenticate user with the service."""
data = {
'username': username,
'password': password,
}
return self._perform_request(self.base_url + '/user/login', data)
@parse_response('Change password')
def changePassword(self, new_password):
"""Change password of logged in user."""
data = {
'password': new_password,
}
return self._perform_request(self.base_url + '/user/change-password', data)
@parse_response('List')
def listDomains(self):
"""Return list of registered domains."""
return self._perform_request(self.base_url + '/domain/list')
@parse_response('Register')
def registerSlave(self, domain, master):
"""Register slave domain."""
data = {
'master': master,
}
return self._perform_request(self.base_url + '/slave/' + domain,
data=data, method='PUT')
@parse_response('Unregister')
def unregisterSlave(self, domain):
"""Unregister slave domain."""
return self._perform_request(self.base_url + '/slave/' + domain,
method='DELETE')
|
agpl-3.0
| 8,245,141,423,036,300,000
| 33.835714
| 83
| 0.58294
| false
| 4.193465
| false
| false
| false
|
pinterest/mysql_utils
|
mysql_backup_csv.py
|
1
|
33689
|
#!/usr/bin/env python
import argparse
import datetime
import json
import logging
import multiprocessing
import os
import subprocess
import threading
import time
import traceback
import uuid
import boto
import _mysql_exceptions
import psutil
import safe_uploader
import mysql_backup_status
from lib import backup
from lib import environment_specific
from lib import host_utils
from lib import mysql_lib
ACTIVE = 'active'
CSV_BACKUP_LOCK_TABLE_NAME = 'backup_locks'
CSV_BACKUP_LOCK_TABLE = """CREATE TABLE IF NOT EXISTS {db}.{tbl} (
`lock_identifier` varchar(36) NOT NULL,
`lock_active` enum('active') DEFAULT 'active',
`created_at` datetime NOT NULL,
`expires` datetime DEFAULT NULL,
`released` datetime DEFAULT NULL,
`table_name` varchar(255) NOT NULL,
`partition_number` INT UNSIGNED NOT NULL DEFAULT 0,
`hostname` varchar(90) NOT NULL DEFAULT '',
`port` int(11) NOT NULL DEFAULT '0',
PRIMARY KEY (`lock_identifier`),
UNIQUE KEY `lock_active` (`table_name`,`partition_number`,`lock_active`),
INDEX `backup_location` (`hostname`, `port`),
INDEX `expires` (`expires`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1"""
MAX_THREAD_ERROR = 5
LOCKS_HELD_TIME = '5 MINUTE'
# How long locks are held and updated
LOCK_EXTEND_FREQUENCY = 10
# LOCK_EXTEND_FREQUENCY in seconds
PATH_PITR_DATA = 'pitr/{replica_set}/{db_name}/{table}/{date}'
SUCCESS_ENTRY = 'YAY_IT_WORKED'
log = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--db',
default=None,
help='DB to export, default is all databases.')
parser.add_argument('--force_table',
default=None,
help='Table to export, default is all tables.')
parser.add_argument('--force_reupload',
default=False,
action='store_true',
help='Ignore existing uploads, reupload everyting')
parser.add_argument('--loglevel',
default='INFO',
help='Change logging verbosity',
choices=set(['INFO', 'DEBUG']))
parser.add_argument('--dev_bucket',
default=False,
action='store_true',
help='Use the dev bucket, useful for testing')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.loglevel.upper(), None))
# Nope, don't even start.
if os.path.isfile(backup.CSV_BACKUP_SKIP_FILE):
log.info('Found {}. Skipping CSV backup '
'run.'.format(backup.CSV_BACKUP_SKIP_FILE))
return
# If we ever want to run multi instance, this wil need to be updated
backup_obj = mysql_backup_csv(host_utils.HostAddr(host_utils.HOSTNAME),
args.db, args.force_table,
args.force_reupload, args.dev_bucket)
backup_obj.backup_instance()
class mysql_backup_csv:
def __init__(self, instance,
db=None, force_table=None,
force_reupload=False, dev_bucket=False):
""" Init function for backup, takes all args
Args:
instance - A hostAddr obect of the instance to be baced up
db - (option) backup only specified db
force_table - (option) backup only specified table
force_reupload - (optional) force reupload of backup
"""
self.instance = instance
self.session_id = None
self.timestamp = datetime.datetime.utcnow()
# datestamp is for s3 files which are by convention -1 day
self.datestamp = (self.timestamp -
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
self.tables_to_backup = multiprocessing.Queue()
self.tables_to_retry = multiprocessing.Queue()
if db:
table_list = ['{}.{}'.format(db, x) for x in mysql_lib.get_tables(instance, db, True)]
else:
table_list = mysql_lib.get_all_tables_by_instance(instance)
for t in backup.filter_tables_to_csv_backup(instance, table_list):
self.tables_to_backup.put(t)
self.dev_bucket = dev_bucket
self.force_table = force_table
self.force_reupload = force_reupload
self.table_count = 0
self.upload_bucket = environment_specific.S3_CSV_BUCKET_DEV \
if dev_bucket else environment_specific.S3_CSV_BUCKET
def backup_instance(self):
""" Back up a replica instance to s3 in csv """
log.info('Backup for instance {i} started at {t}'
''.format(t=str(self.timestamp),
i=self.instance))
log.info('Checking heartbeat to make sure replication is not too '
'lagged.')
self.check_replication_for_backup()
log.info('Taking host backup lock')
host_lock = host_utils.bind_lock_socket(backup.CSV_BACKUP_LOCK_SOCKET)
log.info('Setting up export directory structure')
self.setup_and_get_tmp_path()
log.info('Will temporarily dump inside of {path}'
''.format(path=self.dump_base_path))
log.info('Releasing any invalid shard backup locks')
self.ensure_backup_locks_sanity()
log.info('Deleting old expired locks')
self.purge_old_expired_locks()
log.info('Stopping replication SQL thread to get a snapshot')
mysql_lib.stop_replication(self.instance,
mysql_lib.REPLICATION_THREAD_SQL)
# starting a consistent snapshot here and retrieving the thread ID
conn = mysql_lib.connect_mysql(self.instance,
backup.USER_ROLE_MYSQLDUMP)
mysql_lib.start_consistent_snapshot(conn, read_only=True)
cursor = conn.cursor()
cursor.execute('SET SESSION wait_timeout=28800')
cursor.execute("SELECT VARIABLE_VALUE AS conn_id FROM "
"INFORMATION_SCHEMA.SESSION_VARIABLES "
"WHERE VARIABLE_NAME='pseudo_thread_id'")
self.session_id = cursor.fetchone()['conn_id']
workers = []
for _ in range(multiprocessing.cpu_count() / 2):
proc = multiprocessing.Process(target=self.mysql_backup_csv_tables)
proc.daemon = True
proc.start()
workers.append(proc)
# throw in a sleep to make sure all threads have started dumps
time.sleep(2)
log.info('Restarting replication')
mysql_lib.start_replication(self.instance,
mysql_lib.REPLICATION_THREAD_SQL)
for worker in workers:
worker.join()
if not (self.tables_to_backup.empty() and self.tables_to_retry.empty()):
raise Exception('All worker processes have completed, but '
'work remains in the queue')
log.info('CSV backup is complete, will run a check')
self.release_expired_locks()
mysql_backup_status.verify_csv_instance_backup(
self.instance,
self.datestamp,
self.dev_bucket)
host_utils.release_lock_socket(host_lock)
def mysql_backup_csv_tables(self):
""" Worker for backing up a queue of tables """
proc_id = multiprocessing.current_process().name
conn = mysql_lib.connect_mysql(self.instance,
backup.USER_ROLE_MYSQLDUMP)
mysql_lib.start_consistent_snapshot(conn, read_only=True,
session_id=self.session_id)
pitr_data = mysql_lib.get_pitr_data(self.instance)
err_count = 0
while not (self.tables_to_backup.empty() and self.tables_to_retry.empty()):
table_tuple = self.tables_to_retry.get() if not self.tables_to_retry.empty() \
else self.tables_to_backup.get()
try:
# if this is a partitioned table, and it is already
# being backed up on some other host, we do not want to attempt
# to back it up here.
#
if table_tuple[1] and self.partition_lock_exists(table_tuple):
log.debug('Partitioned table {} is already being '
'backed up elsewhere, so we cannot do it '
'here.'.format(table_tuple[0]))
else:
self.mysql_backup_csv_table_wrapper(table_tuple, conn, pitr_data)
self.table_count = self.table_count + 1
if (self.table_count % 50) == 0:
self.release_expired_locks()
except:
self.tables_to_retry.put(table_tuple)
log.error('{proc_id}: Could not dump {tbl}, partition {p} - '
'error: {e}'.format(tbl=table_tuple[0], p=table_tuple[2],
e=traceback.format_exc(),
proc_id=proc_id))
err_count = err_count + 1
if err_count > MAX_THREAD_ERROR:
log.error('{}: Error count in thread > MAX_THREAD_ERROR. '
'Aborting :('.format(proc_id))
return
def mysql_backup_csv_table_wrapper(self, table_tuple, conn, pitr_data):
""" Back up a single table or partition
Args:
table_tuple - A tuple containing the fully-qualified table name,
the partition name, and the partition number
conn - a connection the the mysql instance
pitr_data - data describing the position of the db data in replication
"""
proc_id = multiprocessing.current_process().name
if not self.force_reupload and self.already_backed_up(table_tuple):
log.info('{proc_id}: {tbl} partition {p} is already backed up, '
'skipping'.format(proc_id=proc_id,
tbl=table_tuple[0],
p=table_tuple[2]))
return
# attempt to take lock by writing a lock to the master
tmp_dir_db = None
lock_identifier = None
extend_lock_thread = None
try:
self.release_expired_locks()
lock_identifier = self.take_backup_lock(table_tuple)
extend_lock_stop_event = threading.Event()
extend_lock_thread = threading.Thread(
target=self.extend_backup_lock,
args=(lock_identifier, extend_lock_stop_event))
extend_lock_thread.daemon = True
extend_lock_thread.start()
if not lock_identifier:
return
log.info('{proc_id}: {tbl} table, partition {p} backup start'
''.format(tbl=table_tuple[0], p=table_tuple[2],
proc_id=proc_id))
tmp_dir_db = os.path.join(self.dump_base_path, table_tuple[0].split('.')[0])
if not os.path.exists(tmp_dir_db):
os.makedirs(tmp_dir_db)
host_utils.change_owner(tmp_dir_db, 'mysql', 'mysql')
self.upload_pitr_data(*table_tuple[0].split('.'), pitr_data=pitr_data)
self.mysql_backup_one_partition(table_tuple, tmp_dir_db, conn)
log.info('{proc_id}: {tbl} table, partition {p} backup complete'
''.format(tbl=table_tuple[0], p=table_tuple[2],
proc_id=proc_id))
finally:
if extend_lock_thread:
extend_lock_stop_event.set()
log.debug('{proc_id}: {tbl} table, partition {p} waiting for '
'lock expiry thread to end'.format(tbl=table_tuple[0],
p=table_tuple[2],
proc_id=proc_id))
extend_lock_thread.join()
if lock_identifier:
log.debug('{proc_id}: {tbl} table, partition {p} releasing lock'
''.format(tbl=table_tuple[0], p=table_tuple[2],
proc_id=proc_id))
self.release_table_backup_lock(lock_identifier)
def mysql_backup_one_partition(self, table_tuple, tmp_dir_db, conn):
""" Back up a single partition of a single table
Args:
table_tuple - the table_tuple (db, partition name, partition number)
to be backed up
tmp_dir_db - temporary storage used for all tables in the db
conn - a connection the the mysql instance
"""
proc_id = multiprocessing.current_process().name
(_, data_path, _) = backup.get_csv_backup_paths(self.instance,
*table_tuple[0].split('.'),
date=self.datestamp,
partition_number=table_tuple[2])
log.debug('{proc_id}: {tbl} partition {p} dump to {path} started'
''.format(proc_id=proc_id,
tbl=table_tuple[0],
p=table_tuple[2],
path=data_path))
self.upload_schema(*table_tuple[0].split('.'), tmp_dir_db=tmp_dir_db)
fifo = os.path.join(tmp_dir_db,
'{tbl}{part}'.format(tbl=table_tuple[0].split('.')[1],
part=table_tuple[2]))
procs = dict()
try:
# giant try so we can try to clean things up in case of errors
self.create_fifo(fifo)
# Start creating processes
procs['cat'] = subprocess.Popen(['cat', fifo],
stdout=subprocess.PIPE)
procs['nullescape'] = subprocess.Popen(['nullescape'],
stdin=procs['cat'].stdout,
stdout=subprocess.PIPE)
procs['lzop'] = subprocess.Popen(['lzop'],
stdin=procs['nullescape'].stdout,
stdout=subprocess.PIPE)
# Start dump query
return_value = set()
query_thread = threading.Thread(target=self.run_dump_query,
args=(table_tuple, fifo, conn,
procs['cat'], return_value))
query_thread.daemon = True
query_thread.start()
# And run the upload
safe_uploader.safe_upload(precursor_procs=procs,
stdin=procs['lzop'].stdout,
bucket=self.upload_bucket,
key=data_path,
check_func=self.check_dump_success,
check_arg=return_value)
os.remove(fifo)
log.debug('{proc_id}: {tbl} partition {p} clean up complete'
''.format(proc_id=proc_id,
tbl=table_tuple[0],
p=table_tuple[2]))
except:
log.debug('{}: in exception handling for failed table '
'upload'.format(proc_id))
if os.path.exists(fifo):
self.cleanup_fifo(fifo)
raise
def create_fifo(self, fifo):
""" Create a fifo to be used for dumping a mysql table
Args:
fifo - The path to the fifo
"""
if os.path.exists(fifo):
self.cleanup_fifo(fifo)
log.debug('{proc_id}: creating fifo {fifo}'
''.format(proc_id=multiprocessing.current_process().name,
fifo=fifo))
os.mkfifo(fifo)
# Could not get os.mkfifo(fifo, 0777) to work due to umask
host_utils.change_owner(fifo, 'mysql', 'mysql')
def cleanup_fifo(self, fifo):
""" Safely cleanup a fifo that is an unknown state
Args:
fifo - The path to the fifo
"""
log.debug('{proc_id}: Cleanup of {fifo} started'
''.format(proc_id=multiprocessing.current_process().name,
fifo=fifo))
cat_proc = subprocess.Popen('timeout 5 cat {} >/dev/null'.format(fifo),
shell=True)
cat_proc.wait()
os.remove(fifo)
log.debug('{proc_id}: Cleanup of {fifo} complete'
''.format(proc_id=multiprocessing.current_process().name,
fifo=fifo))
def run_dump_query(self, table_tuple, fifo, conn, cat_proc, return_value):
""" Run a SELECT INTO OUTFILE into a fifo
Args:
table_tuple - A tuple of (table_name, partition_name, partition_number)
fifo - The fifo to dump the table.db into
conn - The connection to MySQL
cat_proc - The process reading from the fifo
return_value - A set to be used to populated the return status. This is
a semi-ugly hack that is required because of the use of
threads not being able to return data, however being
able to modify objects (like a set).
"""
log.debug('{proc_id}: {tbl} partition {p} dump started'
''.format(proc_id=multiprocessing.current_process().name,
tbl=table_tuple[0],
p=table_tuple[2]))
extra = '' if not table_tuple[1] else " PARTITION ({})".format(table_tuple[1])
(db, tbl) = table_tuple[0].split('.')
sql = ("SELECT * "
"INTO OUTFILE '{fifo}' "
"FROM {db}.`{tbl}` {extra} "
"").format(fifo=fifo,
db=db,
tbl=tbl,
extra=extra)
cursor = conn.cursor()
try:
cursor.execute(sql)
except Exception as detail:
# if we have not output any data, then the cat proc will never
# receive an EOF, so we will be stuck
if psutil.pid_exists(cat_proc.pid):
cat_proc.kill()
log.error('{proc_id}: dump query encountered an error: {er}'
''.format(
er=detail,
proc_id=multiprocessing.current_process().name))
log.debug('{proc_id}: {tbl} partition {p} dump complete'
''.format(proc_id=multiprocessing.current_process().name,
tbl=table_tuple[0], p=table_tuple[2]))
return_value.add(SUCCESS_ENTRY)
def check_dump_success(self, return_value):
""" Check to see if a dump query succeeded
Args:
return_value - A set which if it includes SUCCESS_ENTRY shows that
the query succeeded
"""
if SUCCESS_ENTRY not in return_value:
raise Exception('{}: dump failed'
''.format(multiprocessing.current_process().name))
def upload_pitr_data(self, db, tbl, pitr_data):
""" Upload a file of PITR data to s3 for each table
Args:
db - the db that was backed up.
tbl - the table that was backed up.
pitr_data - a dict of various data that might be helpful for running a
PITR
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
s3_path = PATH_PITR_DATA.format(replica_set=replica_set,
date=self.datestamp,
db_name=db, table=tbl)
log.debug('{proc_id}: {db}.{tbl} Uploading pitr data to {s3_path}'
''.format(s3_path=s3_path,
proc_id=multiprocessing.current_process().name,
db=db, tbl=tbl))
boto_conn = boto.connect_s3()
bucket = boto_conn.get_bucket(self.upload_bucket, validate=False)
key = bucket.new_key(s3_path)
key.set_contents_from_string(json.dumps(pitr_data))
def upload_schema(self, db, table, tmp_dir_db):
""" Upload the schema of a table to s3
Args:
db - the db to be backed up
table - the table to be backed up
tmp_dir_db - temporary storage used for all tables in the db
"""
(schema_path, _, _) = backup.get_csv_backup_paths(
self.instance, db, table, self.datestamp)
create_stm = mysql_lib.show_create_table(self.instance, db, table)
log.debug('{proc_id}: Uploading schema to {schema_path}'
''.format(schema_path=schema_path,
proc_id=multiprocessing.current_process().name))
boto_conn = boto.connect_s3()
bucket = boto_conn.get_bucket(self.upload_bucket, validate=False)
key = bucket.new_key(schema_path)
key.set_contents_from_string(create_stm)
def partition_lock_exists(self, table_tuple):
""" Find out if there is already a lock on one partition of a
partitioned table from a host other than us. If so, we
cannot backup that table here.
Args:
table_tuple - the tuple of table information.
Returns:
True if there is such a lock, False if not.
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(
replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
params = {'table_name': table_tuple[0],
'hostname': self.instance.hostname,
'port': self.instance.port,
'active': ACTIVE}
sql = ("SELECT COUNT(*) AS cnt FROM {db}.{tbl} WHERE "
"lock_active = %(active)s AND "
"table_name = %(table_name)s AND "
"hostname <> %(hostname)s AND "
"port = %(port)s").format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql, params)
row = int(cursor.fetchone()['cnt'])
return (row > 0)
def take_backup_lock(self, table_tuple):
""" Write a lock row on to the master
Args:
table_tuple - the tuple containing info about the table/partition
to be backed up.
Returns:
a uuid lock identifier
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(
replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
lock_identifier = str(uuid.uuid4())
log.debug('Taking backup lock: {replica_set} {tbl} partition {p}'
''.format(replica_set=replica_set,
tbl=table_tuple[0], p=table_tuple[2]))
params = {'lock': lock_identifier,
'table_name': table_tuple[0],
'partition_number': table_tuple[2],
'hostname': self.instance.hostname,
'port': self.instance.port,
'active': ACTIVE}
sql = ("INSERT INTO {db}.{tbl} "
"SET "
"lock_identifier = %(lock)s, "
"lock_active = %(active)s, "
"created_at = NOW(), "
"expires = NOW() + INTERVAL {locks_held_time}, "
"released = NULL, "
"table_name = %(table_name)s, "
"partition_number = %(partition_number)s, "
"hostname = %(hostname)s, "
"port = %(port)s"
"").format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME,
locks_held_time=LOCKS_HELD_TIME)
cursor = master_conn.cursor()
try:
cursor.execute(sql, params)
master_conn.commit()
except _mysql_exceptions.IntegrityError:
lock_identifier = None
sql = ("SELECT hostname, port, expires "
"FROM {db}.{tbl} "
"WHERE "
" lock_active = %(active)s AND "
" table_name = %(table_name)s AND "
" partition_number = %(partition_number)s"
"").format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql,
{'table_name': table_tuple[0],
'partition_number': table_tuple[2],
'active': ACTIVE})
ret = cursor.fetchone()
log.debug('Table {tbl} (partition {p}) is being backed '
'up on {hostname}:{port}, '
'lock will expire at {expires}.'
''.format(tbl=table_tuple[0],
p=table_tuple[2],
hostname=ret['hostname'],
port=ret['port'],
expires=str(ret['expires'])))
log.debug(cursor._executed)
return lock_identifier
def extend_backup_lock(self, lock_identifier, extend_lock_stop_event):
""" Extend a backup lock. This is to be used by a thread
Args:
lock_identifier - Corrosponds to a lock identifier row in the
CSV_BACKUP_LOCK_TABLE_NAME.
extend_lock_stop_event - An event that will be used to inform this
thread to stop extending the lock
"""
# Assumption is that this is callled right after creating the lock
last_update = time.time()
while(not extend_lock_stop_event.is_set()):
if (time.time() - last_update) > LOCK_EXTEND_FREQUENCY:
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
params = {'lock_identifier': lock_identifier}
sql = ('UPDATE {db}.{tbl} '
'SET expires = NOW() + INTERVAL {locks_held_time} '
'WHERE lock_identifier = %(lock_identifier)s'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME,
locks_held_time=LOCKS_HELD_TIME)
cursor.execute(sql, params)
master_conn.commit()
log.debug(cursor._executed)
last_update = time.time()
extend_lock_stop_event.wait(.5)
def release_table_backup_lock(self, lock_identifier):
""" Release a backup lock created by take_backup_lock
Args:
lock_identifier - a uuid to identify a lock row
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
params = {'lock_identifier': lock_identifier}
sql = ('UPDATE {db}.{tbl} '
'SET lock_active = NULL, released = NOW() '
'WHERE lock_identifier = %(lock_identifier)s AND '
' lock_active is NOT NULL'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql, params)
master_conn.commit()
log.debug(cursor._executed)
def ensure_backup_locks_sanity(self):
""" Release any backup locks that aren't sane. This means locks
created by the same host as the caller. The instance level lock
should allow this assumption to be correct.
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
if not mysql_lib.does_table_exist(master, mysql_lib.METADATA_DB,
CSV_BACKUP_LOCK_TABLE_NAME):
log.debug('Creating missing metadata table')
cursor.execute(CSV_BACKUP_LOCK_TABLE.format(
db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME))
params = {'hostname': self.instance.hostname,
'port': self.instance.port}
sql = ('UPDATE {db}.{tbl} '
'SET lock_active = NULL, released = NOW() '
'WHERE hostname = %(hostname)s AND '
' port = %(port)s'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql, params)
master_conn.commit()
def release_expired_locks(self):
""" Release any expired locks """
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
sql = ('UPDATE {db}.{tbl} '
'SET lock_active = NULL, released = NOW() '
'WHERE expires < NOW() AND lock_active IS NOT NULL'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql)
master_conn.commit()
log.debug(cursor._executed)
def purge_old_expired_locks(self):
""" Delete any locks older than 2 days """
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
sql = ('DELETE FROM {db}.{tbl} '
'WHERE expires < NOW() - INTERVAL 2 DAY'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql)
master_conn.commit()
log.debug(cursor._executed)
def already_backed_up(self, table_tuple):
""" Check to see if a particular partition has already been uploaded
to s3
Args:
table_tuple - (table, partition name, part number)
Returns:
bool - True if the partition has already been backed up,
False otherwise
"""
boto_conn = boto.connect_s3()
bucket = boto_conn.get_bucket(self.upload_bucket, validate=False)
(_, data_path, _) = backup.get_csv_backup_paths(self.instance,
*table_tuple[0].split('.'),
date=self.datestamp,
partition_number=table_tuple[2])
if not bucket.get_key(data_path):
return False
return True
def check_replication_for_backup(self):
""" Confirm that replication is caught up enough to run """
while True:
heartbeat = mysql_lib.get_heartbeat(self.instance)
if heartbeat.date() < self.timestamp.date():
log.warning('Replication is too lagged ({}) to run daily '
'backup, sleeping'.format(heartbeat))
time.sleep(10)
elif heartbeat.date() > self.timestamp.date():
raise Exception('Replication is later than expected day')
else:
log.info('Replication is ok ({}) to run daily '
'backup'.format(heartbeat))
return
def setup_and_get_tmp_path(self):
""" Figure out where to temporarily store csv backups,
and clean it up
"""
tmp_dir_root = os.path.join(host_utils.find_root_volume(),
'csv_export',
str(self.instance.port))
if not os.path.exists(tmp_dir_root):
os.makedirs(tmp_dir_root)
host_utils.change_owner(tmp_dir_root, 'mysql', 'mysql')
self.dump_base_path = tmp_dir_root
if __name__ == "__main__":
environment_specific.initialize_logger()
main()
|
gpl-2.0
| -410,670,963,058,634,600
| 42.808843
| 98
| 0.531123
| false
| 4.236544
| false
| false
| false
|
docusign/docusign-python-client
|
docusign_esign/models/workflow.py
|
1
|
5166
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Workflow(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'current_workflow_step_id': 'str',
'workflow_status': 'str',
'workflow_steps': 'list[WorkflowStep]'
}
attribute_map = {
'current_workflow_step_id': 'currentWorkflowStepId',
'workflow_status': 'workflowStatus',
'workflow_steps': 'workflowSteps'
}
def __init__(self, current_workflow_step_id=None, workflow_status=None, workflow_steps=None): # noqa: E501
"""Workflow - a model defined in Swagger""" # noqa: E501
self._current_workflow_step_id = None
self._workflow_status = None
self._workflow_steps = None
self.discriminator = None
if current_workflow_step_id is not None:
self.current_workflow_step_id = current_workflow_step_id
if workflow_status is not None:
self.workflow_status = workflow_status
if workflow_steps is not None:
self.workflow_steps = workflow_steps
@property
def current_workflow_step_id(self):
"""Gets the current_workflow_step_id of this Workflow. # noqa: E501
# noqa: E501
:return: The current_workflow_step_id of this Workflow. # noqa: E501
:rtype: str
"""
return self._current_workflow_step_id
@current_workflow_step_id.setter
def current_workflow_step_id(self, current_workflow_step_id):
"""Sets the current_workflow_step_id of this Workflow.
# noqa: E501
:param current_workflow_step_id: The current_workflow_step_id of this Workflow. # noqa: E501
:type: str
"""
self._current_workflow_step_id = current_workflow_step_id
@property
def workflow_status(self):
"""Gets the workflow_status of this Workflow. # noqa: E501
# noqa: E501
:return: The workflow_status of this Workflow. # noqa: E501
:rtype: str
"""
return self._workflow_status
@workflow_status.setter
def workflow_status(self, workflow_status):
"""Sets the workflow_status of this Workflow.
# noqa: E501
:param workflow_status: The workflow_status of this Workflow. # noqa: E501
:type: str
"""
self._workflow_status = workflow_status
@property
def workflow_steps(self):
"""Gets the workflow_steps of this Workflow. # noqa: E501
# noqa: E501
:return: The workflow_steps of this Workflow. # noqa: E501
:rtype: list[WorkflowStep]
"""
return self._workflow_steps
@workflow_steps.setter
def workflow_steps(self, workflow_steps):
"""Sets the workflow_steps of this Workflow.
# noqa: E501
:param workflow_steps: The workflow_steps of this Workflow. # noqa: E501
:type: list[WorkflowStep]
"""
self._workflow_steps = workflow_steps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Workflow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Workflow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
| 5,507,776,582,154,696,000
| 28.861272
| 140
| 0.577429
| false
| 4.136109
| false
| false
| false
|
nttcom/eclcli
|
eclcli/orchestration/heatclient/common/http.py
|
1
|
13729
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import logging
import os
import socket
from oslo_serialization import jsonutils
from oslo_utils import encodeutils, importutils
import requests
import six
from six.moves.urllib import parse
from eclcli.orchestration.heatclient.common import utils
from eclcli.orchestration.heatclient import exc
from eclcli.orchestration.heatclient.openstack.common._i18n import _
from eclcli.orchestration.heatclient.openstack.common._i18n import _LW
from keystoneauth1 import adapter
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-heatclient'
CHUNKSIZE = 1024 * 64 # 64kB
SENSITIVE_HEADERS = ('X-Auth-Token',)
osprofiler_web = importutils.try_import("osprofiler.web")
def get_system_ca_file():
"""Return path to system default CA file."""
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD, MacOSX, and the bundled ca
ca_path = ['/etc/ssl/certs/ca-certificates.crt',
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/ssl/ca-bundle.pem',
'/etc/ssl/cert.pem',
'/System/Library/OpenSSL/certs/cacert.pem',
requests.certs.where()]
for ca in ca_path:
LOG.debug("Looking for ca file %s", ca)
if os.path.exists(ca):
LOG.debug("Using ca file %s", ca)
return ca
LOG.warning(_LW("System ca file could not be found."))
class HTTPClient(object):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.auth_url = kwargs.get('auth_url')
self.auth_token = kwargs.get('token')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.region_name = kwargs.get('region_name')
self.include_pass = kwargs.get('include_pass')
self.endpoint_url = endpoint
self.cert_file = kwargs.get('cert_file')
self.key_file = kwargs.get('key_file')
self.timeout = kwargs.get('timeout')
self.ssl_connection_params = {
'ca_file': kwargs.get('ca_file'),
'cert_file': kwargs.get('cert_file'),
'key_file': kwargs.get('key_file'),
'insecure': kwargs.get('insecure'),
}
self.verify_cert = None
if parse.urlparse(endpoint).scheme == "https":
if kwargs.get('insecure'):
self.verify_cert = False
else:
self.verify_cert = kwargs.get('ca_file', get_system_ca_file())
# FIXME(shardy): We need this for compatibility with the oslo apiclient
# we should move to inheriting this class from the oslo HTTPClient
self.last_request_id = None
def safe_header(self, name, value):
if name in SENSITIVE_HEADERS:
# because in python3 byte string handling is ... ug
v = value.encode('utf-8')
h = hashlib.sha1(v)
d = h.hexdigest()
return encodeutils.safe_decode(name), "{SHA1}%s" % d
else:
return (encodeutils.safe_decode(name),
encodeutils.safe_decode(value))
def log_curl_request(self, method, url, kwargs):
curl = ['curl -g -i -X %s' % method]
for (key, value) in kwargs['headers'].items():
header = '-H \'%s: %s\'' % self.safe_header(key, value)
curl.append(header)
conn_params_fmt = [
('key_file', '--key %s'),
('cert_file', '--cert %s'),
('ca_file', '--cacert %s'),
]
for (key, fmt) in conn_params_fmt:
value = self.ssl_connection_params.get(key)
if value:
curl.append(fmt % value)
if self.ssl_connection_params.get('insecure'):
curl.append('-k')
if 'data' in kwargs:
curl.append('-d \'%s\'' % kwargs['data'])
curl.append('%s%s' % (self.endpoint, url))
LOG.debug(' '.join(curl))
@staticmethod
def log_http_response(resp):
status = (resp.raw.version / 10.0, resp.status_code, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
dump.extend(['%s: %s' % (k, v) for k, v in resp.headers.items()])
dump.append('')
if resp.content:
content = resp.content
if isinstance(content, six.binary_type):
content = content.decode()
dump.extend([content, ''])
LOG.debug('\n'.join(dump))
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around requests.request to handle tasks such as
setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
if self.auth_token:
kwargs['headers'].setdefault('X-Auth-Token', self.auth_token)
else:
kwargs['headers'].update(self.credentials_headers())
if self.auth_url:
kwargs['headers'].setdefault('X-Auth-Url', self.auth_url)
if self.region_name:
kwargs['headers'].setdefault('X-Region-Name', self.region_name)
if self.include_pass and 'X-Auth-Key' not in kwargs['headers']:
kwargs['headers'].update(self.credentials_headers())
if osprofiler_web:
kwargs['headers'].update(osprofiler_web.get_trace_id_headers())
self.log_curl_request(method, url, kwargs)
if self.cert_file and self.key_file:
kwargs['cert'] = (self.cert_file, self.key_file)
if self.verify_cert is not None:
kwargs['verify'] = self.verify_cert
if self.timeout is not None:
kwargs['timeout'] = float(self.timeout)
# Allow caller to specify not to follow redirects, in which case we
# just return the redirect response. Useful for using stacks:lookup.
redirect = kwargs.pop('redirect', True)
# Since requests does not follow the RFC when doing redirection to sent
# back the same method on a redirect we are simply bypassing it. For
# example if we do a DELETE/POST/PUT on a URL and we get a 302 RFC says
# that we should follow that URL with the same method as before,
# requests doesn't follow that and send a GET instead for the method.
# Hopefully this could be fixed as they say in a comment in a future
# point version i.e.: 3.x
# See issue: https://github.com/kennethreitz/requests/issues/1704
allow_redirects = False
try:
resp = requests.request(
method,
self.endpoint_url + url,
allow_redirects=allow_redirects,
**kwargs)
except socket.gaierror as e:
message = (_("Error finding address for %(url)s: %(e)s") %
{'url': self.endpoint_url + url, 'e': e})
raise exc.InvalidEndpoint(message=message)
except (socket.error, socket.timeout) as e:
endpoint = self.endpoint
message = (_("Error communicating with %(endpoint)s %(e)s") %
{'endpoint': endpoint, 'e': e})
raise exc.CommunicationError(message=message)
self.log_http_response(resp)
if not ('X-Auth-Key' in kwargs['headers']) and (
resp.status_code == 401 or
(resp.status_code == 500 and "(HTTP 401)" in resp.content)):
raise exc.HTTPUnauthorized(_("Authentication failed: %s")
% resp.content)
elif 400 <= resp.status_code < 600:
raise exc.from_response(resp)
elif resp.status_code in (301, 302, 305):
# Redirected. Reissue the request to the new location,
# unless caller specified redirect=False
if redirect:
location = resp.headers.get('location')
path = self.strip_endpoint(location)
resp = self._http_request(path, method, **kwargs)
elif resp.status_code == 300:
raise exc.from_response(resp)
return resp
def strip_endpoint(self, location):
if location is None:
message = _("Location not returned with 302")
raise exc.InvalidEndpoint(message=message)
elif location.lower().startswith(self.endpoint.lower()):
return location[len(self.endpoint):]
else:
message = _("Prohibited endpoint redirect %s") % location
raise exc.InvalidEndpoint(message=message)
def credentials_headers(self):
creds = {}
# NOTE(dhu): (shardy) When deferred_auth_method=password, Heat
# encrypts and stores username/password. For Keystone v3, the
# intent is to use trusts since SHARDY is working towards
# deferred_auth_method=trusts as the default.
# TODO(dhu): Make Keystone v3 work in Heat standalone mode. Maye
# require X-Auth-User-Domain.
if self.username:
creds['X-Auth-User'] = self.username
if self.password:
creds['X-Auth-Key'] = self.password
return creds
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'data' in kwargs:
kwargs['data'] = jsonutils.dumps(kwargs['data'])
resp = self._http_request(url, method, **kwargs)
body = utils.get_response_body(resp)
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
return self._http_request(url, method, **kwargs)
def client_request(self, method, url, **kwargs):
resp, body = self.json_request(method, url, **kwargs)
return resp
def head(self, url, **kwargs):
return self.client_request("HEAD", url, **kwargs)
def get(self, url, **kwargs):
return self.client_request("GET", url, **kwargs)
def post(self, url, **kwargs):
return self.client_request("POST", url, **kwargs)
def put(self, url, **kwargs):
return self.client_request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.raw_request("DELETE", url, **kwargs)
def patch(self, url, **kwargs):
return self.client_request("PATCH", url, **kwargs)
class SessionClient(adapter.LegacyJsonAdapter):
"""HTTP client based on Keystone client session."""
def __init__(self, *args, **kwargs):
self.username = kwargs.pop('username', None)
self.password = kwargs.pop('password', None)
super(SessionClient, self).__init__(*args, **kwargs)
def request(self, url, method, **kwargs):
redirect = kwargs.get('redirect')
kwargs.setdefault('user_agent', USER_AGENT)
try:
kwargs.setdefault('json', kwargs.pop('data'))
except KeyError:
pass
resp, body = super(SessionClient, self).request(
url, method,
raise_exc=False,
**kwargs)
if 400 <= resp.status_code < 600:
raise exc.from_response(resp)
elif resp.status_code in (301, 302, 305):
if redirect:
location = resp.headers.get('location')
path = self.strip_endpoint(location)
resp = self.request(path, method, **kwargs)
elif resp.status_code == 300:
raise exc.from_response(resp)
return resp
def credentials_headers(self):
return {}
def strip_endpoint(self, location):
if location is None:
message = _("Location not returned with 302")
raise exc.InvalidEndpoint(message=message)
if (self.endpoint_override is not None and
location.lower().startswith(self.endpoint_override.lower())):
return location[len(self.endpoint_override):]
else:
return location
def _construct_http_client(endpoint=None, username=None, password=None,
include_pass=None, endpoint_type=None,
auth_url=None, **kwargs):
session = kwargs.pop('session', None)
auth = kwargs.pop('auth', None)
if session:
kwargs['endpoint_override'] = endpoint
if username:
kwargs.update({'username': username})
if password:
kwargs.update({'password': password})
return SessionClient(session, auth=auth, **kwargs)
else:
return HTTPClient(endpoint=endpoint, username=username,
password=password, include_pass=include_pass,
endpoint_type=endpoint_type, auth_url=auth_url,
**kwargs)
|
apache-2.0
| -2,518,008,748,685,360,600
| 37.564607
| 79
| 0.592541
| false
| 4.067852
| false
| false
| false
|
ramineni/my_congress
|
congress_dashboard/policies/views.py
|
1
|
5367
|
# Copyright 2014 VMware.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.template.defaultfilters import dictsort
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from congress_dashboard.api import congress
import congress_dashboard.datasources.utils as ds_utils
from congress_dashboard.policies import forms as policies_forms
from congress_dashboard.policies.rules import tables as rules_tables
from congress_dashboard.policies import tables as policies_tables
LOG = logging.getLogger(__name__)
class IndexView(tables.DataTableView):
"""List policies."""
table_class = policies_tables.PoliciesTable
template_name = 'admin/policies/index.html'
def get_data(self):
try:
policies = congress.policies_list(self.request)
except Exception as e:
msg = _('Unable to get policies list: %s') % str(e)
LOG.error(msg)
messages.error(self.request, msg)
return []
return policies
class CreateView(forms.ModalFormView):
form_class = policies_forms.CreatePolicy
template_name = 'admin/policies/create.html'
success_url = reverse_lazy('horizon:admin:policies:index')
class DetailView(tables.DataTableView):
"""List details about and rules in a policy."""
table_class = rules_tables.PolicyRulesTable
template_name = 'admin/policies/detail.html'
def get_data(self):
policy_name = self.kwargs['policy_name']
try:
policy_rules = congress.policy_rules_list(self.request,
policy_name)
except Exception as e:
msg_args = {'policy_name': policy_name, 'error': str(e)}
msg = _('Unable to get rules in policy "%(policy_name)s": '
'%(error)s') % msg_args
LOG.error(msg)
messages.error(self.request, msg)
redirect = reverse('horizon:admin:policies:index')
raise exceptions.Http302(redirect)
for r in policy_rules:
r.set_id_as_name_if_empty()
return policy_rules
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
policy_name = kwargs['policy_name']
try:
policy = congress.policy_get(self.request, policy_name)
except Exception as e:
msg_args = {'policy_name': policy_name, 'error': str(e)}
msg = _('Unable to get policy "%(policy_name)s": '
'%(error)s') % msg_args
LOG.error(msg)
messages.error(self.request, msg)
redirect = reverse('horizon:admin:policies:index')
raise exceptions.Http302(redirect)
context['policy'] = policy
# Alphabetize and convert list of data source tables and columns into
# JSON formatted string consumable by JavaScript. Do this here instead
# of in the Create Rule form so that the tables and columns lists
# appear in the HTML document before the JavaScript that uses them.
all_tables = ds_utils.get_datasource_tables(self.request)
sorted_datasources = dictsort(all_tables, 'datasource')
tables = []
for ds in sorted_datasources:
datasource_tables = ds['tables']
datasource_tables.sort()
for table in ds['tables']:
tables.append('%s%s%s' % (ds['datasource'],
congress.TABLE_SEPARATOR, table))
context['tables'] = json.dumps(tables)
datasource_columns = ds_utils.get_datasource_columns(self.request)
sorted_datasources = dictsort(datasource_columns, 'datasource')
columns = []
for ds in sorted_datasources:
sorted_tables = dictsort(ds['tables'], 'table')
for tbl in sorted_tables:
# Ignore service-derived tables, which are already included.
if congress.TABLE_SEPARATOR in tbl['table']:
continue
table_columns = tbl['columns']
if table_columns:
table_columns.sort()
else:
# Placeholder name for column when the table has none.
table_columns = ['_']
for column in table_columns:
columns.append('%s%s%s %s' % (ds['datasource'],
congress.TABLE_SEPARATOR,
tbl['table'], column))
context['columns'] = json.dumps(columns)
return context
|
apache-2.0
| -726,373,775,188,115,800
| 39.353383
| 78
| 0.618968
| false
| 4.428218
| false
| false
| false
|
JohnPeel/Sarabi
|
misc.py
|
1
|
2767
|
import os
import shutil
import stat
import yaml
from strings import *
ignored_paths = ['.git']
class EPackageNotFound(Exception):
pass
class EAmbiguousAtom(Exception):
def __init__(self, valid_packages):
self.valid_packages = valid_packages
message = PACKAGE_TOO_AMBIGUOUS % len(valid_packages)
message += '\n\n'
for package in valid_packages:
message += '\n '.join(package.info())
message += '\n'
message += '\n'
super(EAmbiguousAtom, self).__init__(message)
def get_default_config(program):
(path, executable) = os.path.split(program)
return os.path.abspath(os.path.join(path, os.path.splitext(executable)[0] + '.yml'))
def get_repo(repo):
if (repo[:7] == 'github:'):
return 'https://github.com/%s.git' % repo[7:]
if (repo[:4] == 'git:'):
return repo[4:]
return repo
def parse_package_atom(package):
remote = None
if ('::' in package):
(package, remote) = package.split('::', 2)
catagory = None
if ('/' in package):
(catagory, package) = package.split('/', 2)
version = None
if ('-' in package):
(package, version) = package.split('-', 2)
return {
'catagory': catagory,
'name': package,
'version': version,
'remote': remote
}
def listdir(dir):
return [os.path.relpath(os.path.join(dp, f), dir) for dp, dn, fn in os.walk(dir) for f in fn]
def copytree(src, dst, symlinks = False, ignore = None):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
class ConfDict(dict):
def __init__(self, filename = None, **kwds):
super(ConfDict, self).__init__(**kwds)
self.set_defaults()
if filename and os.path.exists(filename):
self.load(filename)
def __del__(self):
if hasattr(self, 'filename') and self.filename:
self.save()
def set_defaults(self):
pass
def load(self, filename):
if (os.path.exists(filename)):
self.filename = filename
with open(filename, 'r') as file:
self.update(yaml.load(file))
def save(self, filename = None):
if (not filename):
filename = self.filename
with open(filename, 'w') as file:
yaml.dump(dict(self), file, default_flow_style=False)
|
gpl-3.0
| 873,124,693,430,433,500
| 23.927928
| 95
| 0.606433
| false
| 3.202546
| false
| false
| false
|
ianmiell/shutit-distro
|
postgresql/postgresql.py
|
1
|
1264
|
"""ShutIt module. See http://shutit.tk
"""
#http://www.linuxfromscratch.org/blfs/view/svn/postlfs/mitkrb.html configs and context and configureation
from shutit_module import ShutItModule
class postgresql(ShutItModule):
def build(self, shutit):
shutit.send('mkdir -p /tmp/build/postgresql')
shutit.send('cd /tmp/build/postgresql')
shutit.send('wget -qO- http://ftp.postgresql.org/pub/source/v9.4.0/postgresql-9.4.0.tar.bz2 | bunzip2 -c | tar -xf -')
shutit.send('cd postgres*')
shutit.send('''sed -i '/DEFAULT_PGSOCKET_DIR/s@/tmp@/run/postgresql@' src/include/pg_config_manual.h''')
shutit.send('./configure --prefix=/usr --enable-thread-safety --docdir=/usr/share/doc/postgresql-9.4.0')
shutit.send('make')
shutit.send('make install')
shutit.send('make install-docs')
# TODO: server http://www.linuxfromscratch.org/blfs/view/svn/server/postgresql.html
return True
def finalize(self, shutit):
shutit.send('rm -rf /tmp/build/postgresql')
return True
def module():
return postgresql(
'shutit.tk.sd.postgresql.postgresql', 158844782.0255,
description='',
maintainer='',
depends=['shutit.tk.sd.tcl.tcl','shutit.tk.sd.open_ldap.open_ldap','shutit.tk.sd.linux_pam.linux_pam','shutit.tk.sd.mit_kerberos_v5.mit_kerberos_v5']
)
|
gpl-2.0
| 7,476,906,327,428,438,000
| 37.30303
| 151
| 0.719146
| false
| 2.61157
| false
| false
| false
|
cprn/samegame
|
model.py
|
1
|
1390
|
import random
data = []
NUM_COLOURS = 5
GRID_SIZE = 20
def init():
for y in range(GRID_SIZE):
r = []
for x in range(GRID_SIZE):
r.append(random.randint(0, NUM_COLOURS - 1))
data.append(r)
def get_block_colour(x, y):
if x in range(get_width()) and y in range(get_height()):
return data[x][y]
return None
def get_siblings(x, y, siblings=None):
colour = get_block_colour(x, y)
if colour is None:
return []
if siblings is None:
siblings = [(x, y)]
for neighbour in [(x-1, y), (x, y-1), (x+1, y), (x, y+1)]:
if neighbour in siblings:
continue
if get_block_colour(*neighbour) == colour:
siblings.append(neighbour)
siblings = get_siblings(*neighbour, siblings=siblings)
return siblings
def get_height():
if len(data):
return len(data[0])
return 0
def get_width():
return len(data)
def remove_block(x, y):
for block in get_siblings(x, y):
data[block[0]][block[1]] = None
def run_gravity():
for x in reversed(range(get_width())):
if data[x] == [None] * get_height():
data.pop(x)
continue
for y in reversed(range(get_height())):
if get_block_colour(x, y) is None and y != 0:
data[x][y] = data[x][y-1]
data[x][y-1] = None
|
unlicense
| 6,598,916,812,739,268,000
| 22.166667
| 66
| 0.539568
| false
| 3.159091
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.