repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
tempbottle/kosmosfs-1 | refs/heads/master | scripts/kfssetup.py | 13 | #!/usr/bin/env python
#
# $Id: kfssetup.py 36 2007-11-12 02:43:36Z sriramsrao $
#
# Copyright 2007 Kosmix Corp.
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Script to setup KFS servers on a set of nodes
# This scripts reads a machines.cfg file that describes the meta/chunk
# servers configurations and installs the binaries/scripts and creates
# the necessary directory hierarchy.
#
import os,sys,os.path,getopt
import socket,threading,popen2
import md5
from ConfigParser import ConfigParser
# Use the python config parser to parse out machines setup
# Input file format for machines.cfg
# [metaserver]
# type: metaserver
# clusterkey: <cluster name>
# node: <value>
# rundir: <dir>
# baseport: <port>
#
# [chunkserver1]
# node: <value>
# rundir: <dir>
# baseport: <port>
# space: <space exported by the server> (n m/g)
# {chunkdir: <dir>}
# [chunkserver2]
# ...
# [chunkserverN]
# ...
#
# where, space is expressed in units of MB/GB or bytes.
#
# Install on each machine with the following directory hierarchy:
# rundir/
# bin/ -- binaries, config file, kfscp/kfslog/kfschunk dirs
# logs/ -- log output from running the binary
# scripts/ -- all the helper scripts
# If a path for storing the chunks isn't specified, then it defaults to bin
#
unitsScale = {'g' : 1 << 30, 'm' : 1 << 20, 'k' : 1 << 10, 'b' : 1}
maxConcurrent = 25
chunkserversOnly = 0
tarProg = 'gtar'
md5String = ""
def which(program):
import os
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def setupMeta(section, config, outputFn, packageFn):
""" Setup the metaserver binaries/config files on a node. """
global chunkserversOnly
if chunkserversOnly > 0:
print "Chunkservers only is set; not doing meta"
return
key = config.get(section, 'clusterkey')
baseport = config.getint(section, 'baseport')
rundir = config.get(section, 'rundir')
fh = open(outputFn, 'w')
print >> fh, "metaServer.clientPort = %d" % baseport
print >> fh, "metaServer.chunkServerPort = %d" % (baseport + 100)
print >> fh, "metaServer.clusterKey = %s" % (key)
print >> fh, "metaServer.cpDir = %s/bin/kfscp" % rundir
print >> fh, "metaServer.logDir = %s/bin/kfslog" % rundir
if config.has_option(section, 'loglevel'):
print >> fh, "metaServer.loglevel = %s" % config.get(section, 'loglevel')
if config.has_option(section, 'worm'):
print >> fh, "metaServer.wormMode = 1"
if config.has_option(section, 'numservers'):
print >> fh, "metaServer.minChunkservers = %s" % config.get(section, 'numservers')
if config.has_option(section, 'md5sumfilename'):
print >> fh, "metaServer.md5sumFilename = %s" % config.get(section, 'md5sumfilename')
fh.close()
if config.has_option(section, 'webuiConfFile'):
confFile = config.get(section, 'webuiConfFile')
fh = open(confFile, 'w')
print >> fh, "[webserver]"
print >> fh, "webServer.metaserverPort = %d" % baseport
print >> fh, "webServer.port = %d" % (baseport + 50)
print >> fh, "webServer.allMachinesFn = %s/webui/all-machines.txt" % rundir
print >> fh, "webServer.docRoot = %s/webui/files" % rundir
fh.close()
cmd = "%s -zcf %s bin/logcompactor bin/metaserver %s lib webui scripts/*" % (tarProg, packageFn, outputFn)
os.system(cmd)
installArgs = "-r %s -d %s -m" % (tarProg, rundir)
return installArgs
def setupChunkConfig(section, config, outputFn):
""" Setup the chunkserver binaries/config files on a node. """
metaNode = config.get('metaserver', 'node')
metaToChunkPort = config.getint('metaserver', 'baseport') + 100
hostname = config.get(section, 'node')
# for rack-aware replication, we assume that nodes on different racks are on different subnets
s = socket.gethostbyname(hostname)
ipoctets = s.split('.')
rackId = int(ipoctets[2])
#
fh = open (outputFn, 'w')
print >> fh, "chunkServer.metaServer.hostname = %s" % metaNode
print >> fh, "chunkServer.metaServer.port = %d" % metaToChunkPort
print >> fh, "chunkServer.clientPort = %d" % config.getint(section, 'baseport')
print >> fh, "chunkServer.clusterKey = %s" % config.get('metaserver', 'clusterkey')
print >> fh, "chunkServer.rackId = %d" % (rackId)
print >> fh, "chunkServer.md5sum = %s" % (md5String)
space = config.get(section, 'space')
s = space.split()
if (len(s) >= 2):
units = s[1].lower()
else:
units = 'b'
value = int(s[0]) * unitsScale[ units[0] ]
print >> fh, "chunkServer.totalSpace = %d" % value
rundir = config.get(section, 'rundir')
if config.has_option(section, 'chunkdir'):
chunkDir = config.get(section, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
print >> fh, "chunkServer.chunkDir = %s" % (chunkDir)
print >> fh, "chunkServer.logDir = %s/bin/kfslog" % (rundir)
if config.has_option(section, 'loglevel'):
print >> fh, "chunkServer.loglevel = %s" % config.get(section, 'loglevel')
fh.close()
def setupChunk(section, config, outputFn, packageFn):
""" Setup the chunkserver binaries/config files on a node. """
setupChunkConfig(section, config, outputFn)
cmd = "%s -zcf %s bin/chunkscrubber bin/chunkserver %s lib scripts/*" % (tarProg, packageFn, outputFn)
os.system(cmd)
rundir = config.get(section, 'rundir')
if config.has_option(section, 'chunkdir'):
chunkDir = config.get(section, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
installArgs = "-r %s -d %s -c \"%s\" " % (tarProg, rundir, chunkDir)
return installArgs
def usage():
""" Print out the usage for this program. """
print "%s [-f, --file <server.cfg>] [-m , --machines <chunkservers.txt>] [-r, --tar <tar|gtar>] \
[-w, --webui <webui dir>] [ [-b, --bin <dir with binaries>] {-u, --upgrade} | [-U, --uninstall] ]\n" % sys.argv[0]
return
def copyDir(srcDir, dstDir):
""" Copy files from src to dest"""
cmd = "cp -r %s %s" % (srcDir, dstDir)
os.system(cmd)
def computeMD5(datadir, digest):
"""Update the MD5 digest using the MD5 of all the files in a directory"""
files = os.listdir(datadir)
for f in sorted(files):
path = os.path.join(datadir, f)
if os.path.isdir(path):
continue
fh = open(path, 'r')
while 1:
buf = fh.read(4096)
if buf == "":
break
digest.update(buf)
def getFiles(buildDir, webuidir):
""" Copy files from buildDir/bin, buildDir/lib and . to ./bin, ./lib, and ./scripts
respectively."""
global md5String
cmd = "mkdir -p ./scripts; cp ./* scripts; chmod u+w scripts/*"
os.system(cmd)
s = "%s/bin" % buildDir
if (os.path.exists(s + "/amd64")):
s += "/amd64"
copyDir(s, './bin')
digest = md5.new()
computeMD5('./bin', digest)
s = "%s/lib" % buildDir
if (os.path.exists(s + "/amd64")):
s += "/amd64"
copyDir(s, './lib')
computeMD5('./lib', digest)
md5String = digest.hexdigest()
copyDir(webuidir, './webui')
def cleanup(fn):
""" Cleanout the dirs we created. """
cmd = "rm -rf ./scripts ./bin ./lib ./webui %s " % fn
os.system(cmd)
class InstallWorker(threading.Thread):
"""InstallWorker thread that runs a command on remote node"""
def __init__(self, sec, conf, tmpdir, i, m):
threading.Thread.__init__(self)
self.section = sec
self.config = conf
self.tmpdir = tmpdir
self.id = i
self.mode = m
self.doBuildPkg = 1
def singlePackageForAll(self, packageFn, installArgs):
self.doBuildPkg = 0
self.packageFn = packageFn
self.installArgs = installArgs
def buildPackage(self):
if (self.section == 'metaserver'):
self.installArgs = setupMeta(self.section, self.config, self.configOutputFn, self.packageFn)
else:
self.installArgs = setupChunk(self.section, self.config, self.configOutputFn, self.packageFn)
def doInstall(self):
fn = os.path.basename(self.packageFn)
if (self.section == 'metaserver'):
if chunkserversOnly > 0:
return
c = "scp -pr -o StrictHostKeyChecking=no -q %s kfsinstall.sh %s:/tmp/; ssh -o StrictHostKeyChecking=no %s 'mv /tmp/%s /tmp/kfspkg.tgz; sh /tmp/kfsinstall.sh %s %s ' " % \
(self.packageFn, self.dest, self.dest, fn, self.mode, self.installArgs)
else:
# chunkserver
configFn = os.path.basename(self.configOutputFn)
c = "scp -pr -o StrictHostKeyChecking=no -q %s kfsinstall.sh %s %s:/tmp/; ssh -o StrictHostKeyChecking=no %s 'mv /tmp/%s /tmp/kfspkg.tgz; mv /tmp/%s /tmp/ChunkServer.prp; sh /tmp/kfsinstall.sh %s %s ' " % \
(self.packageFn, self.configOutputFn, self.dest, self.dest, fn, configFn, self.mode, self.installArgs)
p = popen2.Popen3(c, True)
for out in p.fromchild:
if len(out) > 1:
print '[%s]: %s' % (self.dest, out[:-1])
def cleanup(self):
if self.doBuildPkg > 0:
# if we built the package, nuke it
c = "rm -f %s %s" % (self.configOutputFn, self.packageFn)
else:
c = "rm -f %s" % (self.configOutputFn)
os.system(c)
c = "ssh -o StrictHostKeyChecking=no %s 'rm -f /tmp/install.sh /tmp/kfspkg.tgz' " % self.dest
popen2.Popen3(c, True)
def run(self):
self.configOutputFn = "%s/fn.%d" % (self.tmpdir, self.id)
if self.doBuildPkg > 0:
self.packageFn = "%s/kfspkg.%d.tgz" % (self.tmpdir, self.id)
self.buildPackage()
else:
setupChunkConfig(self.section, self.config, self.configOutputFn)
self.dest = config.get(self.section, 'node')
self.doInstall()
self.cleanup()
def doInstall(config, builddir, tmpdir, webuidir, upgrade, serialMode):
if not config.has_section('metaserver'):
raise config.NoSectionError, "No metaserver section"
if not os.path.exists(builddir):
print "%s : directory doesn't exist\n" % builddir
sys.exit(-1)
getFiles(builddir, webuidir)
if os.path.exists('webui'):
webuiconfFile = os.path.join(webuidir, "server.conf")
config.set('metaserver', 'webuiConfFile', webuiconfFile)
workers = []
i = 0
sections = config.sections()
if upgrade == 1:
mode = "-u"
else:
mode = "-i"
chunkPkgFn = ""
cleanupFn = ""
for s in sections:
w = InstallWorker(s, config, tmpdir, i, mode)
workers.append(w)
if serialMode == 1:
w.start()
w.join()
else:
# same package for all chunkservers
if (s != 'metaserver'):
if chunkPkgFn == "":
configOutputFn = "%s/fn.common" % (tmpdir)
chunkPkgFn = "kfspkg-chunk.tgz"
cleanupFn = "%s %s" % (configOutputFn, chunkPkgFn)
installArgs = setupChunk(s, config, configOutputFn, chunkPkgFn)
w.singlePackageForAll(chunkPkgFn, installArgs)
i = i + 1
if serialMode == 0:
for i in xrange(0, len(workers), maxConcurrent):
#start a bunch
for j in xrange(maxConcurrent):
idx = i + j
if idx >= len(workers):
break
workers[idx].start()
#wait for each one to finish
for j in xrange(maxConcurrent):
idx = i + j
if idx >= len(workers):
break
workers[idx].join()
print "Done with %d workers" % idx
for i in xrange(len(workers)):
workers[i].join(120.0)
cleanup(cleanupFn)
class UnInstallWorker(threading.Thread):
"""UnInstallWorker thread that runs a command on remote node"""
def __init__(self, c, n):
threading.Thread.__init__(self)
self.cmd = c
self.node = n
def run(self):
# capture stderr and ignore the hostkey has changed message
p = popen2.Popen3(self.cmd, True)
for out in p.fromchild:
if len(out) > 1:
print '[%s]: %s' % (self.node, out[:-1])
def doUninstall(config):
sections = config.sections()
workers = []
for s in sections:
rundir = config.get(s, 'rundir')
node = config.get(s, 'node')
if (s == 'metaserver'):
otherArgs = '-m'
else:
# This is a chunkserver; so nuke out chunk dir as well
if config.has_option(s, 'chunkdir'):
chunkDir = config.get(s, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
otherArgs = "-c \"%s\"" % (chunkDir)
cmd = "ssh -o StrictHostKeyChecking=no %s 'cd %s; sh scripts/kfsinstall.sh -U -d %s %s' " % \
(node, rundir, rundir, otherArgs)
# print "Uninstall cmd: %s\n" % cmd
# os.system(cmd)
w = UnInstallWorker(cmd, node)
workers.append(w)
w.start()
print "Started all the workers..waiting for them to finish"
for i in xrange(len(workers)):
workers[i].join(120.0)
sys.exit(0)
def readChunkserversFile(machinesFn):
'''Given a list of chunkserver node names, one per line, construct a config
for each chunkserver and add that to the config based on the defaults'''
global config
defaultChunkOptions = config.options("chunkserver_defaults")
for l in open(machinesFn, 'r'):
line = l.strip()
if (line.startswith('#')):
# ignore commented out node names
continue
section_name = "chunkserver_" + line
config.add_section(section_name)
config.set(section_name, "node", line)
for o in defaultChunkOptions:
config.set(section_name, o, config.get("chunkserver_defaults", o))
config.remove_section("chunkserver_defaults")
if __name__ == '__main__':
(opts, args) = getopt.getopt(sys.argv[1:], "cb:f:m:r:t:w:hsUu",
["chunkserversOnly", "build=", "file=", "machines=", "tar=", "tmpdir=",
"webui=", "help", "serialMode", "uninstall", "upgrade"])
filename = ""
builddir = ""
uninstall = 0
upgrade = 0
serialMode = 0
machines = ""
webuidir = ""
chunkserversOnly = 0
# Script probably won't work right if you change tmpdir from /tmp location
tmpdir = "/tmp"
for (o, a) in opts:
if o in ("-h", "--help"):
usage()
sys.exit(2)
if o in ("-f", "--file"):
filename = a
elif o in ("-b", "--build"):
builddir = a
elif o in ("-c", "--chunkserversOnly"):
chunkserversOnly = 1
elif o in ("-m", "--machines"):
machines = a
elif o in ("-r", "--tar"):
tarProg = a
elif o in ("-w", "--webuidir"):
webuidir = a
elif o in ("-t", "--tmpdir"):
tmpdir = a
elif o in ("-U", "--uninstall"):
uninstall = 1
elif o in ("-u", "--upgrade"):
upgrade = 1
elif o in ("-s", "--serialMode"):
serialMode = 1
if not which(tarProg):
if (which('gtar')):
tarProg = 'gtar'
else:
tarProg = 'tar'
if not os.path.exists(filename):
print "%s : directory doesn't exist\n" % filename
sys.exit(-1)
config = ConfigParser()
config.readfp(open(filename, 'r'))
if machines != "":
readChunkserversFile(machines)
if uninstall == 1:
doUninstall(config)
else:
doInstall(config, builddir, tmpdir, webuidir, upgrade, serialMode)
|
agiovann/Constrained_NMF | refs/heads/master | use_cases/CaImAnpaper/compare_gt_cnmf_CNN.py | 2 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 14:49:36 2017
@author: agiovann
"""
from __future__ import division
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import map
from builtins import range
from past.utils import old_div
import cv2
import pickle
from caiman.components_evaluation import select_components_from_metrics
from caiman.base.rois import nf_match_neurons_in_binary_masks
from caiman.utils.utils import apply_magic_wand
from caiman.base.rois import detect_duplicates_and_subsets
try:
cv2.setNumThreads(1)
except:
print('Open CV is naturally single threaded')
try:
if __IPYTHON__:
print(1)
# this is used for debugging purposes only. allows to reload classes
# when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
print('Not launched under iPython')
import caiman as cm
import numpy as np
import os
import time
import pylab as pl
import scipy
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.components_evaluation import estimate_components_quality_auto
from caiman.cluster import setup_cluster
#%%
def precision_snr(snr_gt, snr_gt_fn, snr_cnmf, snr_cnmf_fp, snr_thrs):
all_results_fake = []
all_results_OR = []
all_results_AND = []
for snr_thr in snr_thrs:
snr_all_gt = np.array(list(snr_gt) + list(snr_gt_fn) + [0]*len(snr_cnmf_fp))
snr_all_cnmf = np.array(list(snr_cnmf) + [0]*len(snr_gt_fn) + list(snr_cnmf_fp))
ind_gt = np.where(snr_all_gt > snr_thr)[0] # comps in gt above threshold
ind_cnmf = np.where(snr_all_cnmf > snr_thr)[0] # same for cnmf
# precision: how many detected components above a given SNR are true
prec = np.sum(snr_all_gt[ind_cnmf] > 0)/len(ind_cnmf)
# recall: how many gt components with SNR above the threshold are detected
rec = np.sum(snr_all_cnmf[ind_gt] > 0)/len(ind_gt)
f1 = 2*prec*rec/(prec + rec)
results_fake = [prec, rec, f1]
# f1 score with OR condition
ind_OR = np.union1d(ind_gt, ind_cnmf)
# indices of components that are above threshold in either direction
ind_gt_OR = np.where(snr_all_gt[ind_OR] > 0)[0] # gt components
ind_cnmf_OR = np.where(snr_all_cnmf[ind_OR] > 0)[0] # cnmf components
prec_OR = np.sum(snr_all_gt[ind_OR][ind_cnmf_OR] > 0)/len(ind_cnmf_OR)
rec_OR = np.sum(snr_all_cnmf[ind_OR][ind_gt_OR] > 0)/len(ind_gt_OR)
f1_OR = 2*prec_OR*rec_OR/(prec_OR + rec_OR)
results_OR = [prec_OR, rec_OR, f1_OR]
# f1 score with AND condition
ind_AND = np.intersect1d(ind_gt, ind_cnmf)
ind_fp = np.intersect1d(ind_cnmf, np.where(snr_all_gt == 0)[0])
ind_fn = np.intersect1d(ind_gt, np.where(snr_all_cnmf == 0)[0])
prec_AND = len(ind_AND)/(len(ind_AND) + len(ind_fp))
rec_AND = len(ind_AND)/(len(ind_AND) + len(ind_fn))
f1_AND = 2*prec_AND*rec_AND/(prec_AND + rec_AND)
results_AND = [prec_AND, rec_AND, f1_AND]
all_results_fake.append(results_fake)
all_results_OR.append(results_OR)
all_results_AND.append(results_AND)
return np.array(all_results_fake), np.array(all_results_OR), np.array(all_results_AND)
#%%
global_params = {'min_SNR': 2, # minimum SNR when considering adding a new neuron
'gnb': 2, # number of background components
'rval_thr' : 0.80, # spatial correlation threshold
'min_cnn_thresh' : 0.95,
'p' : 1,
'min_rval_thr_rejected': 0, # length of mini batch for OnACID in decay time units (length would be batch_length_dt*decay_time*fr)
'max_classifier_probability_rejected' : 0.1, # flag for motion correction (set to False to compare directly on the same FOV)
'max_fitness_delta_accepted' : -20,
'Npeaks' : 5,
'min_SNR_patch' : -10,
'min_r_val_thr_patch': 0.5,
'fitness_delta_min_patch': -5,
'update_background_components' : True,# whether to update the background components in the spatial phase
'low_rank_background' : True, #whether to update the using a low rank approximation. In the False case all the nonzero elements of the background components are updated using hals
#(to be used with one background per patch)
'only_init_patch' : True,
'is_dendrites' : False, # if dendritic. In this case you need to set init_method to sparse_nmf
'alpha_snmf' : None,
'init_method' : 'greedy_roi',
'filter_after_patch' : False
}
#%%
params_movies = []
#%%
params_movie = {'fname': '/mnt/ceph/neuro/labeling/neurofinder.03.00.test/images/final_map/Yr_d1_498_d2_467_d3_1_order_C_frames_2250_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/neurofinder.03.00.test/regions/joined_consensus_active_regions.npy',
# order of the autoregressive system
'merge_thresh': 0.8, # merging threshold, max correlation allow
'rf': 25, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels
'K': 4, # number of components per patch
'gSig': [8,8], # expected half size of neurons
'n_chunks': 10,
'swap_dim':False,
'crop_pix' : 0,
'fr': 7,
'decay_time': 0.4,
}
params_movies.append(params_movie.copy())
#%%
params_movie = {'fname': '/mnt/ceph/neuro/labeling/neurofinder.04.00.test/images/final_map/Yr_d1_512_d2_512_d3_1_order_C_frames_3000_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/neurofinder.04.00.test/regions/joined_consensus_active_regions.npy',
# order of the autoregressive system
'merge_thresh': 0.8, # merging threshold, max correlation allow
'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels
'K': 5, # number of components per patch
'gSig': [5,5], # expected half size of neurons
'n_chunks': 10,
'swap_dim':False,
'crop_pix' : 0,
'fr' : 8,
'decay_time' : 0.5, # rough length of a transient
}
params_movies.append(params_movie.copy())
#%% yuste
params_movie = {'fname': '/mnt/ceph/neuro/labeling/yuste.Single_150u/images/final_map/Yr_d1_200_d2_256_d3_1_order_C_frames_3000_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/yuste.Single_150u/regions/joined_consensus_active_regions.npy',
# order of the autoregressive system
'merge_thresh': 0.8, # merging threshold, max correlation allow
'rf': 15, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels
'K': 8, # number of components per patch
'gSig': [5,5], # expected half size of neurons
'fr' : 10,
'decay_time' : 0.75,
'n_chunks': 10,
'swap_dim':False,
'crop_pix':0
}
params_movies.append(params_movie.copy())
#%% neurofinder 00.00
params_movie = {'fname': '/mnt/ceph/neuro/labeling/neurofinder.00.00/images/final_map/Yr_d1_512_d2_512_d3_1_order_C_frames_2936_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/neurofinder.00.00/regions/joined_consensus_active_regions.npy',
'merge_thresh': 0.8, # merging threshold, max correlation allow
'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels
'K': 6, # number of components per patch
'gSig': [6,6], # expected half size of neurons
'decay_time' : 0.4,
'fr' : 8,
'n_chunks': 10,
'swap_dim':False,
'crop_pix':10
}
params_movies.append(params_movie.copy())
#%% neurofinder 01.01
params_movie = {'fname': '/mnt/ceph/neuro/labeling/neurofinder.01.01/images/final_map/Yr_d1_512_d2_512_d3_1_order_C_frames_1825_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/neurofinder.01.01/regions/joined_consensus_active_regions.npy',
'merge_thresh': 0.9, # merging threshold, max correlation allow
'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels
'K': 6, # number of components per patch
'gSig': [6,6], # expected half size of neurons
'decay_time' : 1.4,
'fr' : 8,
'n_chunks': 10,
'swap_dim':False,
'crop_pix':2,
}
params_movies.append(params_movie.copy())
#%% neurofinder 02.00
params_movie = {#'fname': '/opt/local/Data/labeling/neurofinder.02.00/Yr_d1_512_d2_512_d3_1_order_C_frames_8000_.mmap',
'fname': '/mnt/ceph/neuro/labeling/neurofinder.02.00/images/final_map/Yr_d1_512_d2_512_d3_1_order_C_frames_8000_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/neurofinder.02.00/regions/joined_consensus_active_regions.npy',
# order of the autoregressive system
'merge_thresh': 0.8, # merging threshold, max correlation allow
'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels
'K': 6, # number of components per patch
'gSig': [5,5], # expected half size of neurons
'fr' : 30, # imaging rate in Hz
'n_chunks': 10,
'swap_dim':False,
'crop_pix':10,
'decay_time': 0.3,
}
params_movies.append(params_movie.copy())
#%% Sue Ann k53
params_movie = {#'fname': '/opt/local/Data/labeling/k53_20160530/Yr_d1_512_d2_512_d3_1_order_C_frames_116043_.mmap',
'fname':'/mnt/ceph/neuro/labeling/k53_20160530/images/final_map/Yr_d1_512_d2_512_d3_1_order_C_frames_116043_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/k53_20160530/regions/joined_consensus_active_regions.npy',
# order of the autoregressive system
'merge_thresh': 0.8, # merging threshold, max correlation allow
'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels
'K': 9, # number of components per patch
'gSig': [6,6], # expected half size of neurons
'fr': 30,
'decay_time' : 0.3,
'n_chunks': 10,
'swap_dim':False,
'crop_pix':2,
}
params_movies.append(params_movie.copy())
#%% J115
params_movie = {#'fname': '/opt/local/Data/labeling/J115_2015-12-09_L01_ELS/Yr_d1_463_d2_472_d3_1_order_C_frames_90000_.mmap',
'fname': '/mnt/ceph/neuro/labeling/J115_2015-12-09_L01_ELS/images/final_map/Yr_d1_463_d2_472_d3_1_order_C_frames_90000_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/J115_2015-12-09_L01_ELS/regions/joined_consensus_active_regions.npy',
# order of the autoregressive system
'merge_thresh': 0.8, # merging threshold, max correlation allow
'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels
'K': 7, # number of components per patch
'gSig': [7,7], # expected half size of neurons
'fr' : 30,
'decay_time' : 0.4,
'n_chunks': 10,
'swap_dim':False,
'crop_pix':2,
}
params_movies.append(params_movie.copy())
#%% J123
params_movie = {#'fname': '/opt/local/Data/labeling/J123_2015-11-20_L01_0/Yr_d1_458_d2_477_d3_1_order_C_frames_41000_.mmap',
'fname': '/mnt/ceph/neuro/labeling/J123_2015-11-20_L01_0/images/final_map/Yr_d1_458_d2_477_d3_1_order_C_frames_41000_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/J123_2015-11-20_L01_0/regions/joined_consensus_active_regions.npy',
# order of the autoregressive system
'merge_thresh': 0.8, # merging threshold, max correlation allow
'rf': 40, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 20, # amounpl.it of overlap between the patches in pixels
'K': 10, # number of components per patch
'gSig': [10,10], # expected half size of neurons
'decay_time' : 0.5,
'fr' : 30,
'n_chunks': 10,
'swap_dim':False,
'crop_pix':2,
}
params_movies.append(params_movie.copy())
#%%
params_movie = { #'fname': '/opt/local/Data/labeling/k37_20160109_AM_150um_65mW_zoom2p2_00001_1-16/Yr_d1_512_d2_512_d3_1_order_C_frames_48000_.mmap',
'fname': '/mnt/ceph/neuro/labeling/k37_20160109_AM_150um_65mW_zoom2p2_00001_1-16/images/final_map/Yr_d1_512_d2_512_d3_1_order_C_frames_48000_.mmap',
'gtname':'/mnt/ceph/neuro/labeling/k37_20160109_AM_150um_65mW_zoom2p2_00001_1-16/regions/joined_consensus_active_regions.npy',
# order of the autoregressive system
'merge_thresh': 0.8, # merging threshold, max correlation allow
'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20
'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels
'K': 5, # number of components per patch
'gSig': [6,6], # expected half size of neurons
'fr' : 30,
'decay_time' : 0.3,
'n_chunks': 30,
'swap_dim':False,
'crop_pix':8,
}
params_movies.append(params_movie.copy())
#%%
def myfun(x):
from caiman.source_extraction.cnmf.deconvolution import constrained_foopsi
dc = constrained_foopsi(*x)
return (dc[0],dc[5])
def myfun_dff(x):
from caiman.source_extraction.cnmf.deconvolution import constrained_foopsi
# compute dff and then extract spikes
aa, cc, b_gt, f_gt, yra, nd0, nd1, nd2, nd3, p, method = x
[aa.T, cc[None,:], b_gt, f_gt, None, None, None, 2, 'oasis']
f_dff = cm.source_extraction.cnmf.utilities.detrend_df_f_auto(aa, b_gt, cc, f_gt, YrA = yra)
dc = constrained_foopsi(f_dff[0],nd0, nd1, nd2, nd3, p, method)
return (dc[0],dc[5],f_dff[0])
def fun_exc(x):
from scipy.stats import norm
from caiman.components_evaluation import compute_event_exceptionality
fluo, param = x
N_samples = np.ceil(param['fr'] * param['decay_time']).astype(np.int)
ev = compute_event_exceptionality(np.atleast_2d(fluo), N=N_samples)
return -norm.ppf(np.exp(np.array(ev[1]) / N_samples))
#%%
all_perfs = []
all_rvalues = []
all_comp_SNR_raw =[]
all_comp_SNR_delta = []
all_predictions = []
all_labels = []
all_results = dict()
reload = True
plot_on = False
save_on = False
skip_refinement = False
backend_patch = 'local'
backend_refine = 'local'
n_processes = 24
n_pixels_per_process=4000
block_size=4000
num_blocks_per_run=10
ALL_CCs = []
for params_movie in np.array(params_movies)[:]:
# params_movie['gnb'] = 3
params_display = {
'downsample_ratio': .2,
'thr_plot': 0.8
}
# @params fname name of the movie
fname_new = params_movie['fname']
print(fname_new)
# %% LOAD MEMMAP FILE
# fname_new='Yr_d1_501_d2_398_d3_1_order_F_frames_369_.mmap'
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
# TODO: needinfo
Y = np.reshape(Yr, dims + (T,), order='F')
m_images = cm.movie(images)
# TODO: show screenshot 10
#%%
try:
cm.stop_server()
dview.terminate()
except:
print('No clusters to stop')
c, dview, n_processes = setup_cluster(
backend=backend_patch, n_processes=n_processes, single_thread=False)
print('Not RELOADING')
#%%
if not reload:
# %% RUN ANALYSIS
# %% correlation image
if (plot_on or save_on):
if False and m_images.shape[0]<10000:
Cn = m_images.local_correlations(swap_dim = params_movie['swap_dim'], frames_per_chunk = 1500)
Cn[np.isnan(Cn)] = 0
else:
Cn = np.array(cm.load(('/'.join(params_movie['gtname'].split('/')[:-2]+['projections','correlation_image.tif'])))).squeeze()
#pl.imshow(Cn, cmap='gray', vmax=.95)
check_nan = False
# %% some parameter settings
# order of the autoregressive fit to calcium imaging in general one (slow gcamps) or two (fast gcamps fast scanning)
p = global_params['p']
# merging threshold, max correlation allowed
merge_thresh = params_movie['merge_thresh']
# half-size of the patches in pixels. rf=25, patches are 50x50
rf = params_movie['rf']
# amounpl.it of overlap between the patches in pixels
stride_cnmf = params_movie['stride_cnmf']
# number of components per patch
K = params_movie['K']
# if dendritic. In this case you need to set init_method to sparse_nmf
is_dendrites = global_params['is_dendrites']
# iinit method can be greedy_roi for round shapes or sparse_nmf for denritic data
init_method = global_params['init_method']
# expected half size of neurons
gSig = params_movie['gSig']
# this controls sparsity
alpha_snmf = global_params['alpha_snmf']
# frame rate of movie (even considering eventual downsampling)
final_frate = params_movie['fr']
if global_params['is_dendrites'] == True:
if global_params['init_method'] is not 'sparse_nmf':
raise Exception('dendritic requires sparse_nmf')
if global_params['alpha_snmf'] is None:
raise Exception('need to set a value for alpha_snmf')
# %% Extract spatial and temporal components on patches
t1 = time.time()
# TODO: todocument
# TODO: warnings 3
print('Strating CNMF')
cnm = cnmf.CNMF(n_processes=n_processes, nb_patch = 1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=global_params['p'],
dview=dview, rf=rf, stride=stride_cnmf, memory_fact=1,
method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=global_params['only_init_patch'],
gnb=global_params['gnb'], method_deconvolution='oasis',border_pix = params_movie['crop_pix'],
low_rank_background = global_params['low_rank_background'], rolling_sum = True, check_nan=check_nan,
block_size=block_size, num_blocks_per_run=num_blocks_per_run)
cnm = cnm.fit(images)
A_tot = cnm.A
C_tot = cnm.C
YrA_tot = cnm.YrA
b_tot = cnm.b
f_tot = cnm.f
sn_tot = cnm.sn
print(('Number of components:' + str(A_tot.shape[-1])))
t_patch = time.time() - t1
try:
dview.terminate()
except:
pass
c, dview, n_processes = cm.cluster.setup_cluster(
backend=backend_refine, n_processes=n_processes, single_thread=False)
# %%
if plot_on:
pl.figure()
crd = plot_contours(A_tot, Cn, thr=params_display['thr_plot'])
# %% rerun updating the components to refine
t1 = time.time()
cnm = cnmf.CNMF(n_processes=n_processes, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, dview=dview, Ain=A_tot,
Cin=C_tot, b_in = b_tot,
f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis',gnb = global_params['gnb'],
low_rank_background = global_params['low_rank_background'],
update_background_components = global_params['update_background_components'], check_nan=check_nan,
n_pixels_per_process=n_pixels_per_process, block_size= block_size, num_blocks_per_run=num_blocks_per_run, skip_refinement=skip_refinement)
cnm = cnm.fit(images)
t_refine = time.time() - t1
A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn
# %% again recheck quality of components, stricter criteria
t1 = time.time()
idx_components, idx_components_bad, comp_SNR, r_values, predictionsCNN = estimate_components_quality_auto(
Y, A, C, b, f, YrA, params_movie['fr'], params_movie['decay_time'], gSig, dims,
dview = dview, min_SNR=global_params['min_SNR'],
r_values_min = global_params['rval_thr'], r_values_lowest = global_params['min_rval_thr_rejected'],
Npeaks = global_params['Npeaks'], use_cnn = True, thresh_cnn_min = global_params['min_cnn_thresh'],
thresh_cnn_lowest = global_params['max_classifier_probability_rejected'],
thresh_fitness_delta = global_params['max_fitness_delta_accepted'], gSig_range = None)
# [list(np.add(i,a)) for i,a in zip(range(0,1),[gSig]*3)]
t_eva_comps = time.time() - t1
print(' ***** ')
print((len(C)))
print((len(idx_components)))
#%%
# all_matches = False
# filter_SNR = False
gt_file = os.path.join(os.path.split(fname_new)[0], os.path.split(fname_new)[1][:-4] + 'match_masks.npz')
with np.load(gt_file, encoding = 'latin1') as ld:
print(ld.keys())
# locals().update(ld)
C_gt = ld['C_gt']
YrA_gt = ld['YrA_gt']
b_gt = ld['b_gt']
f_gt = ld['f_gt']
A_gt = scipy.sparse.coo_matrix(ld['A_gt'][()])
dims_gt = (ld['d1'],ld['d2'])
# t1 = time.time()
idx_components_gt, idx_components_bad_gt, comp_SNR_gt, r_values_gt, predictionsCNN_gt = estimate_components_quality_auto(
Y, A_gt, C_gt, b_gt, f_gt, YrA_gt, params_movie['fr'], params_movie['decay_time'], gSig, dims_gt,
dview = dview, min_SNR=global_params['min_SNR'],
r_values_min = global_params['rval_thr'], r_values_lowest = global_params['min_rval_thr_rejected'],
Npeaks = global_params['Npeaks'], use_cnn = True, thresh_cnn_min = global_params['min_cnn_thresh'],
thresh_cnn_lowest = global_params['max_classifier_probability_rejected'],
thresh_fitness_delta = global_params['max_fitness_delta_accepted'], gSig_range = None)
# [list(np.add(i,a)) for i,a in zip(range(0,1),[gSig]*3)]
print(' ***** ')
print((len(C)))
print((len(idx_components_gt)))
#%%
min_size_neuro = 3*2*np.pi
max_size_neuro = (2*gSig[0])**2*np.pi
A_gt_thr = cm.source_extraction.cnmf.spatial.threshold_components(A_gt.tocsc()[:,:], dims_gt, medw=None, thr_method='max', maxthr=0.2, nrgthr=0.99, extract_cc=True,
se=None, ss=None, dview=dview)
A_gt_thr = A_gt_thr > 0
# size_neurons_gt = A_gt_thr.sum(0)
# idx_size_neuro_gt = np.where((size_neurons_gt>min_size_neuro) & (size_neurons_gt<max_size_neuro) )[0]
# #A_thr = A_thr[:,idx_size_neuro]
print(A_gt_thr.shape)
#%%
A_thr = cm.source_extraction.cnmf.spatial.threshold_components(A.tocsc()[:,:].toarray(), dims, medw=None, thr_method='max', maxthr=0.2, nrgthr=0.99, extract_cc=True,
se=None, ss=None, dview=dview)
A_thr = A_thr > 0
size_neurons = A_thr.sum(0)
# idx_size_neuro = np.where((size_neurons>min_size_neuro) & (size_neurons<max_size_neuro) )[0]
# A_thr = A_thr[:,idx_size_neuro]
print(A_thr.shape)
# %% save results
if save_on:
np.savez(os.path.join(os.path.split(fname_new)[0],
os.path.split(fname_new)[1][:-4] +
'results_analysis_new_dev.npz'),
Cn=Cn, fname_new = fname_new,
A=A, C=C, b=b, f=f, YrA=YrA, sn=sn, d1=d1,
d2=d2, idx_components=idx_components,
idx_components_bad=idx_components_bad,
comp_SNR=comp_SNR, r_values=r_values,
predictionsCNN = predictionsCNN,
params_movie = params_movie,
A_gt=A_gt, A_gt_thr=A_gt_thr, A_thr=A_thr,
C_gt=C_gt, f_gt=f_gt, b_gt=b_gt, YrA_gt=YrA_gt,
idx_components_gt=idx_components_gt,
idx_components_bad_gt=idx_components_bad_gt,
comp_SNR_gt=comp_SNR_gt,r_values_gt=r_values_gt,
predictionsCNN_gt=predictionsCNN_gt,
t_patch=t_patch, t_eva_comps=t_eva_comps,
t_refine=t_refine)
# %%
if plot_on:
pl.subplot(1, 2, 1)
crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=params_display['thr_plot'])
pl.subplot(1, 2, 2)
crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=params_display['thr_plot'])
# %%
# TODO: needinfo
view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_components]), C[idx_components, :], b, f, dims[0], dims[1],
YrA=YrA[idx_components, :], img=Cn)
# %%
view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, idx_components_bad]), C[idx_components_bad, :], b, f, dims[0],
dims[1], YrA=YrA[idx_components_bad, :], img=Cn)
#%% LOAD DATA
else:
#%%
params_display = {
'downsample_ratio': .2,
'thr_plot': 0.8
}
fn_old = fname_new
#analysis_file = '/mnt/ceph/neuro/jeremie_analysis/neurofinder.03.00.test/Yr_d1_498_d2_467_d3_1_order_C_frames_2250_._results_analysis.npz'
# =============================================================================
# print(os.path.join(os.path.split(fname_new)[0], os.path.split(fname_new)[1][:-4] + 'results_analysis_after_merge_5.npz'))
# =============================================================================
with np.load(os.path.join(os.path.split(fname_new)[0], os.path.split(fname_new)[1][:-4] + 'results_analysis_new_dev.npz'), encoding = 'latin1') as ld:
ld1 = {k:ld[k] for k in ['d1','d2','A','params_movie','fname_new',
'C','idx_components','idx_components_bad','Cn','b','f','YrA',
'sn','comp_SNR','r_values','predictionsCNN','A_gt','A_gt_thr',
'A_thr','C_gt','b_gt','f_gt','YrA_gt','idx_components_gt',
'idx_components_bad_gt', 'comp_SNR_gt', 'r_values_gt', 'predictionsCNN_gt',
't_eva_comps', 't_patch', 't_refine']}
locals().update(ld1)
dims_off = d1,d2
A = scipy.sparse.coo_matrix(A[()])
A_gt = A_gt[()]
dims = (d1,d2)
try:
params_movie = params_movie[()]
except:
pass
gSig = params_movie['gSig']
fname_new = fn_old
print([A.shape])
print([ t_patch, t_refine,t_eva_comps])
print(t_eva_comps+t_patch + t_refine)
# print(C_gt.shape)
# print(Y.shape)
# comp_SNR_trace = np.concatenate(dview.map(fun_exc,[[fluor, params_movie] for fluor in (C_gt+YrA_gt)]),0)
# pl.hist(np.sum(comp_SNR_trace>2.5,0)/len(comp_SNR_trace),15)
# np.savez(fname_new.split('/')[-4]+'_act.npz', comp_SNR_trace=comp_SNR_trace, C_gt=C_gt, A_gt=A_gt, dims=(d1,d2))
# NO F_dff = cm.source_extraction.cnmf.utilities.detrend_df_f_auto(A_gt, b_gt, C_gt, f_gt, YrA=None, dview=dview)
# #NO S_gt_old = dview.map(myfun,[[fluor, None, None, None, None, 2, 'oasis'] for fluor in F_dff])
# S_gt = dview.map(myfun_dff,[[aa.T, cc[None,:], b_gt, f_gt, yra[None,:], None, None, None, None, 2, 'oasis'] for aa,cc,yra in zip(A_gt.tocsc().T,C_gt, YrA_gt)])
# S = [s[1] for s in S_gt]
# C = [s[0] for s in S_gt]
# F = [s[2] for s in S_gt]
#
#
# np.savez(fname_new.split('/')[-4]+'_spikes_DFF.npz', S=S, C=C, A_gt=A_gt, F = F, dims=(d1,d2))
# continue
# gt_file = os.path.join(os.path.split(fname_new)[0], os.path.split(fname_new)[1][:-4] + 'match_masks.npz')
# with np.load(gt_file, encoding = 'latin1') as ld:
# print(ld.keys())
# # locals().update(ld)
# C_gt = ld['C_gt']
# YrA_gt = ld['YrA_gt']
# b_gt = ld['b_gt']
# f_gt = ld['f_gt']
# A_gt = scipy.sparse.coo_matrix(ld['A_gt'][()])
# dims_gt = (ld['d1'],ld['d2'])
#
# t1 = time.time()
# idx_components_gt, idx_components_bad_gt, comp_SNR_gt, r_values_gt, predictionsCNN_gt = estimate_components_quality_auto(
# Y, A_gt, C_gt, b_gt, f_gt, YrA_gt, params_movie['fr'], params_movie['decay_time'], gSig, dims_gt,
# dview = dview, min_SNR=global_params['min_SNR'],
# r_values_min = global_params['rval_thr'], r_values_lowest = global_params['min_rval_thr_rejected'],
# Npeaks = global_params['Npeaks'], use_cnn = True, thresh_cnn_min = global_params['min_cnn_thresh'],
# thresh_cnn_lowest = global_params['max_classifier_probability_rejected'],
# thresh_fitness_delta = global_params['max_fitness_delta_accepted'], gSig_range = None)
# # [list(np.add(i,a)) for i,a in zip(range(0,1),[gSig]*3)]
#
#
# t_eva_comps = time.time() - t1
# print(' ***** ')
# print((len(C)))
# print((len(idx_components_gt)))
#%%
if plot_on:
pl.figure()
crd = plot_contours(A_gt_thr, Cn, thr=.99)
#%%
if plot_on:
threshold = .95
from caiman.utils.visualization import matrixMontage
pl.figure()
matrixMontage(np.squeeze(final_crops[np.where(predictions[:,1]>=threshold)[0]]))
pl.figure()
matrixMontage(np.squeeze(final_crops[np.where(predictions[:,0]>=threshold)[0]]))
#%
cm.movie(final_crops).play(gain=3,magnification = 6,fr=5)
#%
cm.movie(np.squeeze(final_crops[np.where(predictions[:,1]>=0.95)[0]])).play(gain=2., magnification = 8,fr=5)
#%
cm.movie(np.squeeze(final_crops[np.where(predictions[:,0]>=0.95)[0]])
).play(gain=4., magnification = 8,fr=5)
#%%
# print(C_gt.shape)
# try:
# np.savez(os.path.join(os.path.split(fname_new)[0],
# os.path.split(fname_new)[1][:-4] +
# 'results_analysis_after_merge_5.npz'),
# Cn=Cn, fname_new = fname_new,
# A=A, C=C, b=b, f=f, YrA=YrA, sn=sn, d1=d1,
# d2=d2, idx_components=idx_components,
# idx_components_bad=idx_components_bad,
# comp_SNR=comp_SNR, r_values=r_values,
# predictionsCNN = predictionsCNN,
# params_movie = params_movie,
# A_gt=A_gt, A_gt_thr=A_gt_thr, A_thr=A_thr,
# C_gt=C_gt, f_gt=f_gt, b_gt=b_gt, YrA_gt=YrA_gt,
# idx_components_gt=idx_components_gt,
# idx_components_bad_gt=idx_components_bad_gt,
# comp_SNR_gt=comp_SNR_gt,r_values_gt=r_values_gt,
# predictionsCNN_gt=predictionsCNN_gt,
# t_patch=t_patch, t_eva_comps=t_eva_comps,
# t_refine=t_refine)
# except:
#
# np.savez(os.path.join(os.path.split(fname_new[()])[0].decode("utf-8"),
# os.path.split(fname_new[()])[1][:-4].decode("utf-8")
# + 'results_analysis_after_merge_5.npz'), Cn=Cn,
# fname_new = fname_new,
# A=A, C=C, b=b, f=f, YrA=YrA, sn=sn, d1=d1, d2=d2,
# idx_components=idx_components,
# idx_components_bad=idx_components_bad,
# comp_SNR=comp_SNR, r_values=r_values,
# predictionsCNN=predictionsCNN,
# params_movie=params_movie,
# A_gt=A_gt, A_gt_thr=A_gt_thr, A_thr=A_thr,
# C_gt=C_gt, f_gt=f_gt, b_gt=b_gt, YrA_gt=YrA_gt,
# idx_components_gt=idx_components_gt,
# idx_components_bad_gt=idx_components_bad_gt,
# comp_SNR_gt=comp_SNR_gt, r_values_gt=r_values_gt,
# predictionsCNN_gt=predictionsCNN_gt,
# t_patch=t_patch, t_eva_comps=t_eva_comps,
# t_refine=t_refine)
#
#%%
# masks_thr_bin = apply_magic_wand(A, gSig, np.array(dims), A_thr=A_thr, coms=None,
# dview=dview, min_frac=0.7, max_frac=1.2)
# #%%
# masks_gt_thr_bin = apply_magic_wand(A_gt, gSig, np.array(dims), A_thr=(A_gt>0).toarray(), coms=None,
# dview=dview, min_frac=0.7, max_frac=1.2,roughness=2, zoom_factor=1,
# center_range=2)
#%%
thresh_fitness_raw_reject = 0.5
# global_params['max_classifier_probability_rejected'] = .1
gSig_range = [list(np.add(i,a)) for i,a in zip(range(0,1),[gSig]*1)]
# global_params['max_classifier_probability_rejected'] = .2
idx_components, idx_components_bad, cnn_values =\
select_components_from_metrics(
A, dims, gSig, r_values, comp_SNR , global_params['rval_thr'],
global_params['min_rval_thr_rejected'], global_params['min_SNR'],
thresh_fitness_raw_reject, global_params['min_cnn_thresh'],
global_params['max_classifier_probability_rejected'], True, gSig_range)
print((len(idx_components)))
#%%
if plot_on:
pl.figure()
pl.subplot(1, 2, 1)
crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=params_display['thr_plot'], vmax = 0.85)
pl.subplot(1, 2, 2)
crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=params_display['thr_plot'], vmax = 0.85)
#%% detect duplicates
thresh_subset = 0.6
duplicates, indices_keep, indices_remove, D, overlap = detect_duplicates_and_subsets(
A_thr[:,idx_components].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])*1., predictionsCNN[idx_components], r_values = None,
dist_thr=0.1, min_dist = 10,thresh_subset = thresh_subset)
idx_components_cnmf = idx_components.copy()
if len(duplicates) > 0:
if plot_on:
pl.figure()
pl.subplot(1,3,1)
pl.imshow(A_thr[:,idx_components].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.unique(duplicates).flatten()].sum(0))
pl.colorbar()
pl.subplot(1,3,2)
pl.imshow(A_thr[:,idx_components].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(indices_keep)[:]].sum(0))
pl.colorbar()
pl.subplot(1,3,3)
pl.imshow(A_thr[:,idx_components].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(indices_remove)[:]].sum(0))
pl.colorbar()
pl.pause(1)
idx_components_cnmf = np.delete(idx_components_cnmf,indices_remove)
print('Duplicates CNMF:'+str(len(duplicates)))
duplicates_gt, indices_keep_gt, indices_remove_gt, D_gt, overlap_gt = detect_duplicates_and_subsets(
A_gt_thr.reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])*1., predictions = None, r_values = None,
dist_thr=0.1, min_dist = 10,thresh_subset = thresh_subset)
idx_components_gt = np.arange(A_gt_thr.shape[-1])
if len(duplicates_gt) > 0:
if plot_on:
pl.figure()
pl.subplot(1,3,1)
pl.imshow(A_gt_thr.reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(duplicates_gt).flatten()].sum(0))
pl.colorbar()
pl.subplot(1,3,2)
pl.imshow(A_gt_thr.reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(indices_keep_gt)[:]].sum(0))
pl.colorbar()
pl.subplot(1,3,3)
pl.imshow(A_gt_thr.reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(indices_remove_gt)[:]].sum(0))
pl.colorbar()
pl.pause(1)
idx_components_gt = np.delete(idx_components_gt,indices_remove_gt)
print('Duplicates gt:'+str(len(duplicates_gt)))
#%%
remove_small_neurons = False
if remove_small_neurons:
min_size_neuro = 3*2*np.pi
max_size_neuro = (2*gSig[0])**2*np.pi
size_neurons_gt = A_gt_thr.sum(0)
idx_size_neuro_gt = np.where((size_neurons_gt>min_size_neuro) & (size_neurons_gt<max_size_neuro) )[0]
idx_components_gt = np.intersect1d(idx_components_gt,idx_size_neuro_gt)
size_neurons = A_thr.sum(0)
idx_size_neuro = np.where((size_neurons>min_size_neuro) & (size_neurons<max_size_neuro) )[0]
idx_components_cnmf = np.intersect1d(idx_components_cnmf,idx_size_neuro)
plot_results = plot_on
if plot_results:
pl.figure(figsize=(30,20))
Cn_ = Cn
else:
Cn_ = None
# idx_components = range(len(r_values))
# tp_gt, tp_comp, fn_gt, fp_comp, performance_cons_off =\
# nf_match_neurons_in_binary_masks(
# masks_gt_thr_bin*1.,#A_gt_thr[:,:].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])*1.,
# masks_thr_bin[idx_components]*1., #A_thr[:,idx_components].reshape([dims[0],dims[1],-1],order = 'F')\
# thresh_cost=.8, min_dist = 10,
# print_assignment= False, plot_results=plot_results, Cn=Cn, labels=['GT','Offline'])
tp_gt, tp_comp, fn_gt, fp_comp, performance_cons_off =\
nf_match_neurons_in_binary_masks(
A_gt_thr[:,idx_components_gt].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])*1.,
A_thr[:,idx_components_cnmf].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])*1.,
thresh_cost=.8, min_dist = 10,
print_assignment= False, plot_results=plot_results, Cn=Cn_, labels=['GT','Offline'])
pl.rcParams['pdf.fonttype'] = 42
font = {'family' : 'Arial',
'weight' : 'regular',
'size' : 20}
pl.rc('font', **font)
print({a:b.astype(np.float16) for a,b in performance_cons_off.items()})
performance_cons_off['fname_new'] = fname_new
all_perfs.append(performance_cons_off)
all_rvalues.append(r_values)
all_comp_SNR_raw.append( comp_SNR)
all_predictions.append(predictionsCNN )
lbs = np.zeros(len(r_values))
lbs[tp_comp] = 1
all_labels.append(lbs)
print({pf['fname_new'].split('/')[-4]+pf['fname_new'].split('/')[-2]:pf['f1_score'] for pf in all_perfs})
performance_tmp = performance_cons_off.copy()
performance_tmp['comp_SNR_gt'] = comp_SNR_gt
performance_tmp['comp_SNR'] = comp_SNR
performance_tmp['A_gt_thr'] = A_gt_thr
performance_tmp['A_thr'] = A_thr
performance_tmp['A'] = A
performance_tmp['A_gt'] = A_gt
performance_tmp['C'] = C
performance_tmp['C_gt'] = C_gt
performance_tmp['YrA'] = YrA
performance_tmp['YrA_gt'] = YrA_gt
performance_tmp['predictionsCNN'] = predictionsCNN
performance_tmp['predictionsCNN_gt'] = predictionsCNN_gt
performance_tmp['r_values'] = r_values
performance_tmp['r_values_gt'] = r_values_gt
performance_tmp['idx_components_gt'] = idx_components_gt
performance_tmp['idx_components_cnmf'] = idx_components_cnmf
performance_tmp['tp_gt'] = tp_gt
performance_tmp['tp_comp'] = tp_comp
performance_tmp['fn_gt'] = fn_gt
performance_tmp['fp_comp'] = fp_comp
ALL_CCs.append([scipy.stats.pearsonr(a,b)[0] for a, b in zip(C_gt[idx_components_gt[tp_gt]],C[idx_components_cnmf[tp_comp]])])
# performance_SNRs = []
# performance_SNRs.append(performance_tmp)
# pltt = False
# print('*')
# for snrs in range(1,10):
# print('******************************')
## idx_components_gt_filt = np.delete(idx_components_gt,np.where(comp_SNR_gt[idx_components_gt]<snrs))
## idx_components_cnmf_filt = np.delete(idx_components_cnmf,np.where(comp_SNR[idx_components_cnmf]<snrs))
# idx_components_gt_filt = np.delete(idx_components_gt,np.where(comp_SNR_gt[idx_components_gt]<snrs))
# idx_components_cnmf_filt = np.delete(idx_components_cnmf,np.where(comp_SNR[idx_components_cnmf]<snrs))
# print(len(idx_components_cnmf_filt))
# print(len(idx_components_gt_filt))
#
# tp_gt, tp_comp, fn_gt, fp_comp, performance_tmp =\
# nf_match_neurons_in_binary_masks(
# A_gt_thr[:,idx_components_gt_filt].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])*1.,
# A_thr[:,idx_components_cnmf_filt].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])*1.,
# thresh_cost=.8, min_dist = 10,
# print_assignment= False, plot_results=pltt, Cn=Cn, labels=['GT','Offline'])
#
# performance_tmp['SNR'] = snrs
# performance_tmp['idx_components_gt_filt'] = idx_components_gt_filt
# performance_tmp['idx_components_cnmf_filt'] = idx_components_cnmf_filt
# performance_tmp['tp_gt'] = tp_gt
# performance_tmp['tp_comp'] = tp_comp
# performance_tmp['fn_gt'] = fn_gt
# performance_tmp['fp_comp'] = fp_comp
#
#
# performance_SNRs.append(performance_tmp.copy())
all_results[fname_new.split('/')[-4]+fname_new.split('/')[-2]] = performance_tmp
print([ t_patch, t_refine,t_eva_comps])
print(t_eva_comps+t_patch + t_refine)
#%% CREATE FIGURES
if False:
#%%
pl.figure()
pl.subplot(1,2,1)
a1 = plot_contours(A.tocsc()[:, idx_components_cnmf[tp_comp]], Cn, thr=0.9, colors='yellow', vmax = 0.75, display_numbers=False,cmap = 'gray')
a2 = plot_contours(A_gt.tocsc()[:, idx_components_gt[tp_gt]], Cn, thr=0.9, vmax = 0.85, colors='r', display_numbers=False,cmap = 'gray')
pl.subplot(1,2,2)
a3 = plot_contours(A.tocsc()[:, idx_components_cnmf[fp_comp]], Cn, thr=0.9, colors='yellow', vmax = 0.75, display_numbers=False,cmap = 'gray')
a4 = plot_contours(A_gt.tocsc()[:, idx_components_gt[fn_gt]], Cn, thr=0.9, vmax = 0.85, colors='r', display_numbers=False,cmap = 'gray')
#%%
pl.figure()
pl.ylabel('spatial components')
idx_comps_high_r = [np.argsort(predictionsCNN[idx_components_cnmf[tp_comp]])[[-6,-5,-4,-3,-2]]]
idx_comps_high_r_cnmf = idx_components_cnmf[tp_comp][idx_comps_high_r]
idx_comps_high_r_gt = idx_components_gt[tp_gt][idx_comps_high_r]
images_nice = (A.tocsc()[:,idx_comps_high_r_cnmf].toarray().reshape(dims+(-1,),order = 'F')).transpose(2,0,1)
images_nice_gt = (A_gt.tocsc()[:,idx_comps_high_r_gt].toarray().reshape(dims+(-1,),order = 'F')).transpose(2,0,1)
cms = np.array([scipy.ndimage.center_of_mass(img) for img in images_nice]).astype(np.int)
images_nice_crop = [img[cm_[0]-15:cm_[0]+15,cm_[1]-15:cm_[1]+15] for cm_,img in zip(cms,images_nice)]
images_nice_crop_gt = [img[cm_[0]-15:cm_[0]+15,cm_[1]-15:cm_[1]+15] for cm_,img in zip(cms,images_nice_gt)]
indexes = [1,3,5,7,9,2,4,6,8,10]
count = 0
for img in images_nice_crop:
pl.subplot(5,2,indexes[count])
pl.imshow(img)
pl.axis('off')
count += 1
for img in images_nice_crop_gt:
pl.subplot(5,2,indexes[count])
pl.imshow(img)
pl.axis('off')
count += 1
#%%
pl.figure()
traces_gt = C_gt[idx_comps_high_r_gt]# + YrA_gt[idx_comps_high_r_gt]
traces_cnmf = C[idx_comps_high_r_cnmf]# + YrA[idx_comps_high_r_cnmf]
traces_gt/=np.max(traces_gt,1)[:,None]
traces_cnmf /=np.max(traces_cnmf,1)[:,None]
pl.plot(scipy.signal.decimate(traces_cnmf,10,1).T-np.arange(5)*1,'y')
pl.plot(scipy.signal.decimate(traces_gt,10,1).T-np.arange(5)*1,'k', linewidth = .5 )
#%% mmap timing
import glob
try:
dview.terminate()
except:
print('No clusters to stop')
c, dview, n_processes = setup_cluster(
backend=backend, n_processes=n_processes, single_thread=False)
t1 = time.time()
ffllss = list(glob.glob('/opt/local/Data/Sue/k53/orig_tifs/*.tif')[:])
ffllss.sort()
print(ffllss)
fname_new = cm.save_memmap(ffllss, base_name='memmap_', order='C',
border_to_0=0, dview = dview, n_chunks = 80) # exclude borders
t2 = time.time() - t1
#%%
if False:
np.savez('/mnt/home/agiovann/Dropbox/FiguresAndPapers/PaperCaiman/all_results_Jan_2018.npz',all_results = all_results)
with np.load('/mnt/home/agiovann/Dropbox/FiguresAndPapers/PaperCaiman/all_results_Jan_2018.npz') as ld:
all_results = ld['all_results']
#%%
f1s = []
names = []
for folder_out in folders_out[:1]:
projection_img_median = folder_out + '/projections/median_projection.tif'
projection_img_correlation = folder_out + '/projections/correlation_image.tif'
folder_in = folder_out + '/regions'
print('********' + folder_out)
with np.load(folder_in + '/comparison_labelers_consensus.npz', encoding='latin1') as ld:
pf = ld['performance_all'][()]
print(pf[list(pf.keys())[0]].keys())
#%%
from matplotlib.pyplot import cm as cmap
pl.figure()
color=cmap.jet(np.linspace(0,1,10))
i = 0
legs = []
all_ys = []
SNRs = np.arange(0,10)
pl.subplot(1,3,1)
for k,fl_results in all_results.items():
print(k)
nm = k[:]
nm = nm.replace('neurofinder','NF')
nm = nm.replace('final_map','')
nm = nm.replace('.','')
nm = nm.replace('Data','')
idx_components_gt = fl_results['idx_components_gt']
idx_components_cnmf = fl_results['idx_components_cnmf']
tp_gt = fl_results['tp_gt']
tp_comp = fl_results['tp_comp']
fn_gt = fl_results['fn_gt']
fp_comp = fl_results['fp_comp']
comp_SNR = fl_results['comp_SNR']
comp_SNR_gt = fl_results['comp_SNR_gt']
snr_gt = comp_SNR_gt[idx_components_gt[tp_gt]]
snr_gt_fn = comp_SNR_gt[idx_components_gt[fn_gt]]
snr_cnmf = comp_SNR[idx_components_cnmf[tp_comp]]
snr_cnmf_fp = comp_SNR[idx_components_cnmf[fp_comp]]
all_results_fake, all_results_OR, all_results_AND = precision_snr(snr_gt, snr_gt_fn, snr_cnmf, snr_cnmf_fp, SNRs)
all_ys.append(all_results_fake[:,-1])
pl.fill_between(SNRs,all_results_OR[:,-1],all_results_AND[:,-1], color=color[i], alpha = .1)
pl.plot(SNRs,all_results_fake[:,-1], '.-',color=color[i])
# pl.plot(x[::1]+np.random.normal(scale=.07,size=10),y[::1], 'o',color=color[i])
pl.ylim([0.5,1])
legs.append(nm[:7])
i += 1
# break
pl.plot(SNRs,np.mean(all_ys,0),'k--', alpha=1, linewidth = 2)
pl.legend(legs+['average'], fontsize=10)
pl.xlabel('SNR threshold')
pl.ylabel('F1 SCORE')
#%
i = 0
legs = []
for k,fl_results in all_results.items():
x = []
y = []
if 'k53' in k:
idx_components_gt = fl_results['idx_components_gt']
idx_components_cnmf = fl_results['idx_components_cnmf']
tp_gt = fl_results['tp_gt']
tp_comp = fl_results['tp_comp']
fn_gt = fl_results['fn_gt']
fp_comp = fl_results['fp_comp']
comp_SNR = fl_results['comp_SNR']
comp_SNR_gt = fl_results['comp_SNR_gt']
snr_gt = comp_SNR_gt[idx_components_gt[tp_gt]]
snr_gt_fn = comp_SNR_gt[idx_components_gt[fn_gt]]
snr_cnmf = comp_SNR[idx_components_cnmf[tp_comp]]
snr_cnmf_fp = comp_SNR[idx_components_cnmf[fp_comp]]
all_results_fake, all_results_OR, all_results_AND = precision_snr(snr_gt, snr_gt_fn, snr_cnmf, snr_cnmf_fp, SNRs)
prec_idx = 0
recall_idx = 1
f1_idx = 2
pl.subplot(1,3,2)
pl.scatter(snr_gt,snr_cnmf,color='k', alpha = .15)
pl.scatter(snr_gt_fn,np.random.normal(scale = .25, size=len(snr_gt_fn)),color='g', alpha = .15)
pl.scatter(np.random.normal(scale = .25, size=len(snr_cnmf_fp)),snr_cnmf_fp,color='g', alpha = .15)
pl.fill_between([20,40],[-2,-2],[40,40], alpha = .05, color='r')
pl.fill_between([-2,40],[20,20],[40,40], alpha = .05 ,color='b')
pl.xlabel('SNR GT')
pl.ylabel('SNR CaImAn')
pl.subplot(1,3,3)
pl.fill_between(SNRs,all_results_OR[:,prec_idx],all_results_AND[:,prec_idx], color='b', alpha = .1)
pl.plot(SNRs,all_results_fake[:,prec_idx], '.-',color='b')
pl.fill_between(SNRs,all_results_OR[:,recall_idx],all_results_AND[:,recall_idx], color='r', alpha = .1)
pl.plot(SNRs,all_results_fake[:,recall_idx], '.-',color= 'r')
pl.fill_between(SNRs,all_results_OR[:,f1_idx],all_results_AND[:,f1_idx], color='g', alpha = .1)
pl.plot(SNRs,all_results_fake[:,f1_idx], '.-',color='g')
pl.legend(['precision','recall','f-1 score'], fontsize = 10)
pl.xlabel('SNR threshold')
#%% performance on desktop
import pylab as plt
plt.close('all')
import numpy as np
plt.rcParams['pdf.fonttype'] = 42
font = {'family' : 'Arial',
'weight' : 'regular',
'size' : 20}
t_mmap = dict()
t_patch = dict()
t_refine = dict()
t_filter_comps = dict()
size = np.log10(np.array([2.1, 3.1,0.6,3.1,8.4,1.9,121.7,78.7,35.8,50.3])*1000)
components= np.array([368,935,476,1060,1099,1387,1541,1013,398,1064])
components= np.array([368,935,476,1060,1099,1387,1541,1013,398,1064])
t_mmap['cluster'] = np.array([np.nan,41,np.nan,np.nan,109,np.nan,561,378,135,212])
t_patch['cluster'] = np.array([np.nan,46,np.nan,np.nan,92,np.nan,1063,469,142,372])
t_refine['cluster'] = np.array([np.nan,225,np.nan,np.nan,256,np.nan,1065,675,265,422])
t_filter_comps['cluster'] = np.array([np.nan,7,np.nan,np.nan,11,np.nan,143,77,30,57])
t_mmap['desktop'] = np.array([25,41,11,41,135,23,690,510,176,163])
t_patch['desktop'] = np.array([21,43,16,48,85,45,2150,949,316,475])
t_refine['desktop'] = np.array([105,205,43,279,216,254,1749,837,237,493])
t_filter_comps['desktop'] = np.array([3,5,2,5,9,7,246,81,36,38])
t_mmap['laptop'] = np.array([4.7,27,3.6,18,144,11,731,287,125,248])
t_patch['laptop'] = np.array([58,84,47,77,174,85,2398,1587,659,1203])
t_refine['laptop'] = np.array([195,321,87,236,414,354,5129,3087,807,1550])
t_filter_comps['laptop'] = np.array([5,10,5,7,15,11,719,263,74,100])
t_mmap['online'] = np.array([18.98, 85.21458578, 40.50961256, 17.71901989, 85.23642993, 30.11493444, 34.09690762, 18.95380235, 10.85061121, 31.97082043])
t_patch['online'] = np.array([75.73,266.81324172, 332.06756997, 114.17053413, 267.06141853, 147.59935951, 3297.18628764, 2573.04009032, 578.88080835, 1725.74687123])
t_refine['online'] = np.array([12.41, 91.77891779, 84.74378371, 31.84973955, 89.29527831, 25.1676743 , 1689.06246471, 1282.98535109, 61.20671248, 322.67962313])
t_filter_comps['online'] = np.array([0,0, 0, 0, 0, 0, 0, 0, 0, 0])
pl.subplot(1,4,1)
for key in ['cluster','desktop', 'laptop','online']:
np.log10(t_mmap[key]+t_patch[key]+t_refine[key]+t_filter_comps[key])
plt.scatter((size),np.log10(t_mmap[key]+t_patch[key]+t_refine[key]+t_filter_comps[key]),s=np.array(components)/10)
plt.xlabel('size log_10(MB)')
plt.ylabel('time log_10(s)')
plt.plot((np.sort(size)),np.log10((np.sort(10**size))/31.45),'--.k')
plt.legend(['acquisition-time','cluster (112 CPUs)','workstation (24 CPUs)', 'laptop (6 CPUs)','online (6 CPUs)'])
pl.title('Total execution time')
pl.xlim([3.8,5.2])
pl.ylim([2.35,4.2])
counter=2
for key in ['cluster','laptop','online']:
pl.subplot(1,4,counter)
counter+=1
if counter == 3:
pl.title('Time per phase (cluster)')
plt.ylabel('time (10^3 s)')
elif counter == 2:
pl.title('Time per phase (workstation)')
else:
pl.title('Time per phase (online)')
plt.bar((size),(t_mmap[key]), width = 0.12, bottom = 0)
plt.bar((size),(t_patch[key]), width = 0.12, bottom = (t_mmap[key]))
plt.bar((size),(t_refine[key]), width = 0.12, bottom = (t_mmap[key]+t_patch[key]))
plt.bar((size),(t_filter_comps[key]), width = 0.12, bottom = (t_mmap[key]+t_patch[key]+t_refine[key]))
plt.xlabel('size log_10(MB)')
if counter == 5:
plt.legend(['Initialization','track activity','update shapes'])
else:
plt.legend(['mem mapping','patch init','refine sol','quality filter','acquisition time'])
plt.plot((np.sort(size)),(10**np.sort(size))/31.45,'--k')
pl.xlim([3.6,5.2])
#%% performance labelers and caiman
import pylab as plt
plt.figure()
names = ['0300.T',
'0400.T',
'YT',
'0000',
'0200',
'0101',
'k53',
'J115',
'J123']
f1s = dict()
f1s['batch'] = [0.77777,0.67,0.7623,0.72391,0.778739,0.7731,0.76578,0.77386,0.6783]
f1s['online'] = [0.76,0.678,0.783,0.721,0.769,0.725,0.818,0.805,0.803]
f1s['L1'] = [np.nan,np.nan,0.78,np.nan,0.89,0.8,0.89,np.nan,0.85]
f1s['L2'] = [0.9,0.69,0.9,0.92,0.87,0.89,0.92,0.93,0.83]
f1s['L3'] = [0.85,0.75,0.82,0.83,0.84,0.78,0.93,0.94,0.9]
f1s['L4'] = [0.78,0.87,0.79,0.87,0.82,0.75,0.83,0.83,0.91]
all_of = ((np.vstack([f1s['L1'],f1s['L2'],f1s['L3'],f1s['L4'],f1s['batch'],f1s['online']])))
for i in range(6):
pl.plot(i+np.random.random(9)*.2, all_of[i,:],'.')
pl.plot([i-.5, i+.5], [np.nanmean(all_of[i,:])]*2,'k')
plt.xticks(range(6), ['L1','L2','L3','L4','batch','online'], rotation=45)
pl.ylabel('F1-score')
#%%
some_of = ((np.vstack([f1s['L1'],f1s['L2'],f1s['L3'],f1s['L4']])))
for i in range(4):
pl.plot(i+np.random.random(9)*.2, some_of[i,:],'.')
pl.plot([i-.5, i+.5], [np.nanmean(some_of[i,:])]*2,'k')
plt.xticks(range(4), ['L1','L2','L3','L4'], rotation=45)
pl.ylabel('F1-score')
#%% check correlation against gounrd truth
pl.rcParams['pdf.fonttype'] = 42
with np.load('/mnt/home/agiovann/Dropbox/FiguresAndPapers/PaperCaiman/ALL_CORRELATIONS_ONLINE_CONSENSUS.npz') as ld:
xcorr_online = ld['ALL_CCs']
xcorr_offline = list(np.array(xcorr_offline[[0,1,3,4,5,2,6,7,8,9]]))
with np.load('/mnt/home/agiovann/Dropbox/FiguresAndPapers/PaperCaiman/ALL_CORRELATIONS_OFFLINE_CONSENSUS.npz') as ld:
xcorr_offline = ld['ALL_CCs']
names = ['0300.T',
'0400.T',
'YT',
'0000',
'0200',
'0101',
'k53',
'J115',
'J123']
pl.subplot(1,2,1)
pl.hist(np.concatenate(xcorr_online),bins = np.arange(0,1,.01))
pl.hist(np.concatenate(xcorr_offline),bins = np.arange(0,1,.01))
a = pl.hist(np.concatenate(xcorr_online[:]),bins = np.arange(0,1,.01))
a1 = pl.hist(np.concatenate(xcorr_online[:6]),bins = np.arange(0,1,.01))
a2 = pl.hist(np.concatenate(xcorr_online)[6:],bins = np.arange(0,1,.01))
a3 = pl.hist(np.concatenate(xcorr_offline)[:],bins = np.arange(0,1,.01))
a_on_1 = np.cumsum(a2[0]/a2[0].sum())
a_on_2 = np.cumsum(a1[0]/a1[0].sum())
a_on = np.cumsum(a[0]/a[0].sum())
a_off = np.cumsum(a3[0]/a3[0].sum())
pl.close()
pl.plot(np.arange(0.01,1,.01),a_on);pl.plot(np.arange(0.01,1,.01),a_off)
pl.legend(['online', 'offline'])
pl.xlabel('correlation (r)')
pl.ylabel('cumulative probability')
pl.subplot(1,2,2)
medians_on = []
iqrs_on = []
medians_off = []
iqrs_off = []
for onn,off in zip(xcorr_online,xcorr_offline):
medians_on.append(np.median(onn))
iqrs_on.append(np.percentile(onn,[25,75]))
medians_off.append(np.median(off))
iqrs_off.append(np.percentile(off,[25,75]))
#%% activity MITYA
import glob
import numpy as np
import pylab as pl
from scipy.signal import savgol_filter
ffllss = glob.glob('/mnt/home/agiovann/SOFTWARE/CaImAnOld/*_act.npz')
ffllss.sort()
pl.figure("Distribution of population activity (fluorescence)")
print(ffllss)
for thresh in np.arange(1.5,4.5,1):
count = 1
pl.pause(0.1)
for ffll in ffllss:
with np.load(ffll) as ld:
print(ld.keys())
locals().update(ld)
pl.subplot(3,4,count)
count += 1
a = np.histogram(np.sum(comp_SNR_trace>thresh,0)/len(comp_SNR_trace),100)
pl.plot(np.log10(a[1][1:][a[0]>0]),savgol_filter(a[0][a[0]>0]/comp_SNR_trace.shape[-1],5,3))
pl.title(ffll.split('/')[-1])
pl.legend(np.arange(1.5,4.5,1))
pl.xlabel('fraction active')
pl.ylabel('fraction of frames')
#%% remove outliers
def remove_outliers(data, num_iqr = 5):
median = np.median(data)
q75, q25 = np.percentile(data, [75 ,25])
iqr = q75 - q25
min_ = q25 - (iqr*num_iqr)
max_ = q75 + (iqr*num_iqr)
new_data = data[(data>min_) & (data<max_)]
return new_data
#%%
from caiman.components_evaluation import mode_robust
from scipy.stats import norm # quantile function
def estimate_stats(log_values, n_bins = 30, delta = 3 ):
a = np.histogram(log_values,bins=n_bins)
mu_est = np.argmax(a[0])
bins = a[1][1:]
bin_size = np.diff(a[1][1:])[0]
pdf = a[0]/a[0].sum()/bin_size
# compute the area around the mean and from that infer the standard deviation (see pic on phone June 7 2018)
area_PDF = np.sum(np.diff(bins[mu_est-delta-1:mu_est+delta])*pdf[mu_est-delta:mu_est+delta])
alpha = delta*bin_size
sigma = alpha / norm.ppf( (area_PDF+1)/2 )
mean = bins[mu_est]-bin_size/2
return bins, pdf, mean, sigma
#%% spikes MITYA population synchrony
import glob
import numpy as np
import pylab as pl
from scipy.signal import savgol_filter
from sklearn.preprocessing import normalize
ffllss = glob.glob('/mnt/home/agiovann/SOFTWARE/CaImAn/use_cases/CaImAnpaper/*_spikes_DFF.npz')
# mode = 'inter_spike_interval'
mode = 'synchrony' # 'synchrony', 'firing_rates'
mode = 'firing_rates' # 'synchrony', 'firing_rates'
mode = 'correlation'
pl.figure("Distribution of " + mode + " (spikes)", figsize=(10,10))
ffllss.sort()
print(ffllss)
for thresh in [0]:
count = 1
for ffll in ffllss[:]:
with np.load(ffll) as ld:
print(ffll.split('/')[-1])
print(ld['S'].shape)
locals().update(ld)
if mode == 'synchrony':
pl.subplot(3,4,count)
S = np.maximum(S,0)
if True:
S /= np.max(S)
else:
S /= np.max(S,1)[:,None] # normalized to maximum activity of each neuron
S[np.isnan(S)] = 0
activ_neur_fraction = np.sum(S,0)
activ_neur_fraction = remove_outliers(activ_neur_fraction)
activ_neur_fraction = np.delete(activ_neur_fraction,np.where(activ_neur_fraction<=1e-4)[0])
activ_neur_fraction_log = np.log10(activ_neur_fraction )
bins, pdf, mean, sigma = estimate_stats(activ_neur_fraction_log, n_bins = 30, delta = 3 )
pl.plot(bins,norm.pdf(bins,loc = mean, scale = sigma))
pl.plot(bins,pdf,'-.')
pl.legend(['fit','data'])
pl.xlabel('fraction of max firing')
pl.ylabel('probability')
elif mode == 'firing_rates':
pl.subplot(3,4,count)
fir_rate = np.mean(S,1)
fir_rate = remove_outliers(fir_rate, num_iqr = 30)
fir_rate = np.delete(fir_rate,np.where(fir_rate==0)[0])
fir_rate_log = np.log10(fir_rate)
bins, pdf, mean, sigma = estimate_stats(fir_rate_log, n_bins = 20, delta = 3)
pl.plot(bins,norm.pdf(bins,loc = mean, scale = sigma))
pl.plot(bins,pdf,'-.')
pl.legend(['fit','data'])
pl.xlabel('firing rate')
pl.ylabel('number of neurons')
elif mode == 'correlation':
pl.subplot(3,4,count)
S = normalize(S,axis=1)
cc = S.dot(S.T)
cc = cc[np.triu_indices(cc.shape[0], k = 1)]
cc[np.isnan(cc)] = 0
cc = np.delete(cc, np.where(cc<1e-5)[0])
cc_log = np.log10(cc)
bins, pdf, mean, sigma = estimate_stats(cc_log, n_bins = 30, delta = 3)
pl.plot(bins,norm.pdf(bins,loc = mean, scale = sigma))
pl.plot(bins,pdf,'-.')
pl.legend(['fit','data'])
pl.xlabel('corr. coeff.')
pl.ylabel('count')
count += 1
# pl.plot(np.log10(a[1][1:][a[0]>0]),savgol_filter(a[0][a[0]>0]/S.shape[-1],5,3))
pl.title(ffll.split('/')[-1][:18])
pl.pause(0.1)
pl.tight_layout(w_pad = 0.05)
pl.savefig('/mnt/xfs1/home/agiovann/ceph/LinuxDropbox/Dropbox (Simons Foundation)/Lab Meetings & Pres/MITYA JUNE 2018/'+mode+'.pdf')
#%%
isis = ([np.histogram(np.log10(np.diff(np.where(s>0)[0]))) for s in S])
for isi in isis[::20]:
pl.plot((isi[1][1:][isi[0]>0]),savgol_filter(isi[0][isi[0]>0]/S.shape[-1],5,3))
|
tuturto/breakfast | refs/heads/master | ui.py | 1 | # Copyright 2012 Tuukka Turto
#
# This file is part of breakfast, a behave demo.
#
# breakfast is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# breakfast is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with breakfast. If not, see <http://www.gnu.org/licenses/>.
class BreakfastUI(object):
def __init__(self):
super(BreakfastUI, self).__init__()
def eggs_boiled(self, amount, hardness):
pass
def error(self, message):
pass
|
Dino0631/RedRain-Bot | refs/heads/develop | cogs/lib/pip/_vendor/html5lib/filters/whitespace.py | 353 | from __future__ import absolute_import, division, unicode_literals
import re
from . import base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
|
acenario/Payable | refs/heads/master | lib/python2.7/site-packages/django/db/migrations/operations/special.py | 374 | from __future__ import unicode_literals
from django.db import router
from .base import Operation
class SeparateDatabaseAndState(Operation):
"""
Takes two lists of operations - ones that will be used for the database,
and ones that will be used for the state change. This allows operations
that don't support state change to have it applied, or have operations
that affect the state or not the database, or so on.
"""
serialization_expand_args = ['database_operations', 'state_operations']
def __init__(self, database_operations=None, state_operations=None):
self.database_operations = database_operations or []
self.state_operations = state_operations or []
def deconstruct(self):
kwargs = {}
if self.database_operations:
kwargs['database_operations'] = self.database_operations
if self.state_operations:
kwargs['state_operations'] = self.state_operations
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
for database_operation in self.database_operations:
to_state = from_state.clone()
database_operation.state_forwards(app_label, to_state)
database_operation.database_forwards(app_label, schema_editor, from_state, to_state)
from_state = to_state
def database_backwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
base_state = to_state
for pos, database_operation in enumerate(reversed(self.database_operations)):
to_state = base_state.clone()
for dbop in self.database_operations[:-(pos + 1)]:
dbop.state_forwards(app_label, to_state)
from_state = base_state.clone()
database_operation.state_forwards(app_label, from_state)
database_operation.database_backwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Custom state/database change combination"
class RunSQL(Operation):
"""
Runs some raw SQL. A reverse SQL statement may be provided.
Also accepts a list of operations that represent the state change effected
by this SQL change, in case it's custom column/table creation/deletion.
"""
noop = ''
def __init__(self, sql, reverse_sql=None, state_operations=None, hints=None):
self.sql = sql
self.reverse_sql = reverse_sql
self.state_operations = state_operations or []
self.hints = hints or {}
def deconstruct(self):
kwargs = {
'sql': self.sql,
}
if self.reverse_sql is not None:
kwargs['reverse_sql'] = self.reverse_sql
if self.state_operations:
kwargs['state_operations'] = self.state_operations
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_sql is not None
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_sql is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.reverse_sql)
def describe(self):
return "Raw SQL operation"
def _run_sql(self, schema_editor, sqls):
if isinstance(sqls, (list, tuple)):
for sql in sqls:
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError("Expected a 2-tuple but got %d" % elements)
schema_editor.execute(sql, params=params)
elif sqls != RunSQL.noop:
statements = schema_editor.connection.ops.prepare_sql_script(sqls)
for statement in statements:
schema_editor.execute(statement, params=None)
class RunPython(Operation):
"""
Runs Python code in a context suitable for doing versioned ORM operations.
"""
reduces_to_sql = False
def __init__(self, code, reverse_code=None, atomic=True, hints=None):
self.atomic = atomic
# Forwards code
if not callable(code):
raise ValueError("RunPython must be supplied with a callable")
self.code = code
# Reverse code
if reverse_code is None:
self.reverse_code = None
else:
if not callable(reverse_code):
raise ValueError("RunPython must be supplied with callable arguments")
self.reverse_code = reverse_code
self.hints = hints or {}
def deconstruct(self):
kwargs = {
'code': self.code,
}
if self.reverse_code is not None:
kwargs['reverse_code'] = self.reverse_code
if self.atomic is not True:
kwargs['atomic'] = self.atomic
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_code is not None
def state_forwards(self, app_label, state):
# RunPython objects have no state effect. To add some, combine this
# with SeparateDatabaseAndState.
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
# We now execute the Python code in a context that contains a 'models'
# object, representing the versioned models as an app registry.
# We could try to override the global cache, but then people will still
# use direct imports, so we go with a documentation approach instead.
self.code(from_state.apps, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_code is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self.reverse_code(from_state.apps, schema_editor)
def describe(self):
return "Raw Python operation"
@staticmethod
def noop(apps, schema_editor):
return None
|
altanawealth/django-otp | refs/heads/master | django_otp/models.py | 1 | from __future__ import absolute_import, division, print_function, unicode_literals
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import six
class DeviceManager(models.Manager):
"""
The :class:`~django.db.models.Manager` object installed as
``Device.objects``.
"""
def devices_for_user(self, user, confirmed=None):
"""
Returns a queryset for all devices of this class that belong to the
given user.
:param user: The user.
:type user: :class:`~django.contrib.auth.models.User`
:param confirmed: If ``None``, all matching devices are returned.
Otherwise, this can be any true or false value to limit the query
to confirmed or unconfirmed devices, respectively.
"""
devices = self.model.objects.filter(user=user)
if confirmed is not None:
devices = devices.filter(confirmed=bool(confirmed))
return devices
class Device(models.Model):
"""
Abstract base model for a :term:`device` attached to a user. Plugins must
subclass this to define their OTP models.
.. _unsaved_device_warning:
.. warning::
OTP devices are inherently stateful. For example, verifying a token is
logically a mutating operation on the device, which may involve
incrementing a counter or otherwise consuming a token. A device must be
committed to the database before it can be used in any way.
.. attribute:: user
*ForeignKey*: Foreign key to your user model, as configured by
:setting:`AUTH_USER_MODEL` (:class:`~django.contrib.auth.models.User`
by default).
.. attribute:: name
*CharField*: A human-readable name to help the user identify their
devices.
.. attribute:: confirmed
*BooleanField*: A boolean value that tells us whether this device has
been confirmed as valid. It defaults to ``True``, but subclasses or
individual deployments can force it to ``False`` if they wish to create
a device and then ask the user for confirmation. As a rule, built-in
APIs that enumerate devices will only include those that are confirmed.
.. attribute:: objects
A :class:`~django_otp.models.DeviceManager`.
"""
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'), help_text="The user that this device belongs to.")
name = models.CharField(max_length=64, help_text="The human-readable name of this device.")
confirmed = models.BooleanField(default=True, help_text="Is this device ready for use?")
objects = DeviceManager()
class Meta(object):
abstract = True
def __str__(self):
if six.PY3:
return self.__unicode__()
else:
return self.__unicode__().encode('utf-8')
def __unicode__(self):
try:
user = self.user
except ObjectDoesNotExist:
user = None
return "{0} ({1})".format(self.name, user)
@property
def persistent_id(self):
return '{0}/{1}'.format(self.import_path, self.id)
@property
def import_path(self):
return '{0}.{1}'.format(self.__module__, self.__class__.__name__)
@classmethod
def from_persistent_id(cls, path):
"""
Loads a device from its persistent id::
device == Device.from_persistent_id(device.persistent_id)
"""
from . import import_class
try:
device_type, device_id = path.rsplit('/', 1)
device_cls = import_class(device_type)
device = device_cls.objects.get(id=device_id)
except Exception:
device = None
return device
def is_interactive(self):
"""
Returns ``True`` if this is an interactive device. The default
implementation returns ``True`` if
:meth:`~django_otp.models.Device.generate_challenge` has been
overridden, but subclasses are welcome to provide smarter
implementations.
:rtype: bool
"""
return not hasattr(self.generate_challenge, 'stub')
def generate_challenge(self):
"""
Generates a challenge value that the user will need to produce a token.
This method is permitted to have side effects, such as transmitting
information to the user through some other channel (email or SMS,
perhaps). And, of course, some devices may need to commit the
challenge to the databse.
:returns: A message to the user. This should be a string that fits
comfortably in the template ``'OTP Challenge: {0}'``. This may
return ``None`` if this device is not interactive.
:rtype: string or ``None``
:raises: Any :exc:`~exceptions.Exception` is permitted. Callers should
trap ``Exception`` and report it to the user.
"""
return None
generate_challenge.stub = True
def verify_token(self, token):
"""
Verifies a token. As a rule, the token should no longer be valid if
this returns ``True``.
:param string token: The OTP token provided by the user.
:rtype: bool
"""
return False
|
AntoineCezar/flockcontext | refs/heads/master | flockcontext/__init__.py | 1 | # -*- coding: utf-8 -*-
__author__ = 'Antoine Cezar'
__email__ = 'antoine@cezar.fr'
__version__ = '0.3.1'
from .flock import Flock
from .flock_open import FlockOpen
__all__ = [
'Flock',
'FlockOpen',
]
|
Kelfast/mamba-framework | refs/heads/master | mamba/test/dummy_app/__init__.py | 3 | # Application Package
|
jreinhardt/manual-labour | refs/heads/master | src/manuallabour/layouts/html_single/basic/__init__.py | 1 | # Manual labour - a library for step-by-step instructions
# Copyright (C) 2014 Johannes Reinhardt <jreinhardt@ist-dein-freund.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
"""
Basic layout for the HTML Single Page exporter
"""
|
CollabQ/CollabQ | refs/heads/master | .google_appengine/google/appengine/api/labs/__init__.py | 1333 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
seanli9jan/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/ops/bijectors/cholesky_outer_product.py | 18 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CholeskyOuterProduct bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"CholeskyOuterProduct",
]
class CholeskyOuterProduct(bijector.Bijector):
"""Compute `g(X) = X @ X.T`; X is lower-triangular, positive-diagonal matrix.
Note: the upper-triangular part of X is ignored (whether or not its zero).
The surjectivity of g as a map from the set of n x n positive-diagonal
lower-triangular matrices to the set of SPD matrices follows immediately from
executing the Cholesky factorization algorithm on an SPD matrix A to produce a
positive-diagonal lower-triangular matrix L such that `A = L @ L.T`.
To prove the injectivity of g, suppose that L_1 and L_2 are lower-triangular
with positive diagonals and satisfy `A = L_1 @ L_1.T = L_2 @ L_2.T`. Then
`inv(L_1) @ A @ inv(L_1).T = [inv(L_1) @ L_2] @ [inv(L_1) @ L_2].T = I`.
Setting `L_3 := inv(L_1) @ L_2`, that L_3 is a positive-diagonal
lower-triangular matrix follows from `inv(L_1)` being positive-diagonal
lower-triangular (which follows from the diagonal of a triangular matrix being
its spectrum), and that the product of two positive-diagonal lower-triangular
matrices is another positive-diagonal lower-triangular matrix.
A simple inductive argument (proceeding one column of L_3 at a time) shows
that, if `I = L_3 @ L_3.T`, with L_3 being lower-triangular with positive-
diagonal, then `L_3 = I`. Thus, `L_1 = L_2`, proving injectivity of g.
#### Examples
```python
bijector.CholeskyOuterProduct().forward(x=[[1., 0], [2, 1]])
# Result: [[1., 2], [2, 5]], i.e., x @ x.T
bijector.CholeskyOuterProduct().inverse(y=[[1., 2], [2, 5]])
# Result: [[1., 0], [2, 1]], i.e., cholesky(y).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="cholesky_outer_product"):
"""Instantiates the `CholeskyOuterProduct` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
super(CholeskyOuterProduct, self).__init__(
forward_min_event_ndims=2,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(x, 2)
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(shape[-2], shape[-1])
x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
# For safety, explicitly zero-out the upper triangular part.
x = array_ops.matrix_band_part(x, -1, 0)
return math_ops.matmul(x, x, adjoint_b=True)
def _inverse(self, y):
return linalg_ops.cholesky(y)
def _forward_log_det_jacobian(self, x):
# Let Y be a symmetric, positive definite matrix and write:
# Y = X X.T
# where X is lower-triangular.
#
# Observe that,
# dY[i,j]/dX[a,b]
# = d/dX[a,b] { X[i,:] X[j,:] }
# = sum_{d=1}^p { I[i=a] I[d=b] X[j,d] + I[j=a] I[d=b] X[i,d] }
#
# To compute the Jacobian dX/dY we must represent X,Y as vectors. Since Y is
# symmetric and X is lower-triangular, we need vectors of dimension:
# d = p (p + 1) / 2
# where X, Y are p x p matrices, p > 0. We use a row-major mapping, i.e.,
# k = { i (i + 1) / 2 + j i>=j
# { undef i<j
# and assume zero-based indexes. When k is undef, the element is dropped.
# Example:
# j k
# 0 1 2 3 /
# 0 [ 0 . . . ]
# i 1 [ 1 2 . . ]
# 2 [ 3 4 5 . ]
# 3 [ 6 7 8 9 ]
# Write vec[.] to indicate transforming a matrix to vector via k(i,j). (With
# slight abuse: k(i,j)=undef means the element is dropped.)
#
# We now show d vec[Y] / d vec[X] is lower triangular. Assuming both are
# defined, observe that k(i,j) < k(a,b) iff (1) i<a or (2) i=a and j<b.
# In both cases dvec[Y]/dvec[X]@[k(i,j),k(a,b)] = 0 since:
# (1) j<=i<a thus i,j!=a.
# (2) i=a>j thus i,j!=a.
#
# Since the Jacobian is lower-triangular, we need only compute the product
# of diagonal elements:
# d vec[Y] / d vec[X] @[k(i,j), k(i,j)]
# = X[j,j] + I[i=j] X[i,j]
# = 2 X[j,j].
# Since there is a 2 X[j,j] term for every lower-triangular element of X we
# conclude:
# |Jac(d vec[Y]/d vec[X])| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}.
diag = array_ops.matrix_diag_part(x)
# We now ensure diag is columnar. Eg, if `diag = [1, 2, 3]` then the output
# is `[[1], [2], [3]]` and if `diag = [[1, 2, 3], [4, 5, 6]]` then the
# output is unchanged.
diag = self._make_columnar(diag)
if self.validate_args:
is_matrix = check_ops.assert_rank_at_least(
x, 2, message="Input must be a (batch of) matrix.")
shape = array_ops.shape(x)
is_square = check_ops.assert_equal(
shape[-2], shape[-1],
message="Input must be a (batch of) square matrix.")
# Assuming lower-triangular means we only need check diag>0.
is_positive_definite = check_ops.assert_positive(
diag, message="Input must be positive definite.")
x = control_flow_ops.with_dependencies(
[is_matrix, is_square, is_positive_definite], x)
# Create a vector equal to: [p, p-1, ..., 2, 1].
if x.get_shape().ndims is None or x.get_shape().dims[-1].value is None:
p_int = array_ops.shape(x)[-1]
p_float = math_ops.cast(p_int, dtype=x.dtype)
else:
p_int = x.get_shape().dims[-1].value
p_float = np.array(p_int, dtype=x.dtype.as_numpy_dtype)
exponents = math_ops.linspace(p_float, 1., p_int)
sum_weighted_log_diag = array_ops.squeeze(
math_ops.matmul(math_ops.log(diag),
exponents[..., array_ops.newaxis]),
axis=-1)
fldj = p_float * np.log(2.) + sum_weighted_log_diag
# We finally need to undo adding an extra column in non-scalar cases
# where there is a single matrix as input.
if x.get_shape().ndims is not None:
if x.get_shape().ndims == 2:
fldj = array_ops.squeeze(fldj, axis=-1)
return fldj
shape = array_ops.shape(fldj)
maybe_squeeze_shape = array_ops.concat([
shape[:-1],
distribution_util.pick_vector(
math_ops.equal(array_ops.rank(x), 2),
np.array([], dtype=np.int32), shape[-1:])], 0)
return array_ops.reshape(fldj, maybe_squeeze_shape)
def _make_columnar(self, x):
"""Ensures non-scalar input has at least one column.
Example:
If `x = [1, 2, 3]` then the output is `[[1], [2], [3]]`.
If `x = [[1, 2, 3], [4, 5, 6]]` then the output is unchanged.
If `x = 1` then the output is unchanged.
Args:
x: `Tensor`.
Returns:
columnar_x: `Tensor` with at least two dimensions.
"""
if x.get_shape().ndims is not None:
if x.get_shape().ndims == 1:
x = x[array_ops.newaxis, :]
return x
shape = array_ops.shape(x)
maybe_expanded_shape = array_ops.concat([
shape[:-1],
distribution_util.pick_vector(
math_ops.equal(array_ops.rank(x), 1),
[1], np.array([], dtype=np.int32)),
shape[-1:],
], 0)
return array_ops.reshape(x, maybe_expanded_shape)
|
smmribeiro/intellij-community | refs/heads/master | python/testData/completion/reexportModules/a.after.py | 166 | import b
b.da<caret> |
Crystalnix/house-of-life-chromium | refs/heads/master | third_party/simplejson/encoder.py | 62 | """
Implementation of JSONEncoder
"""
import re
try:
from simplejson import _speedups
except ImportError:
_speedups = None
ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])')
ESCAPE_DCT = {
# escape all forward slashes to prevent </script> attack
'/': '\\/',
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def encode_basestring_ascii(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = _speedups.encode_basestring_ascii
_need_utf8 = True
except AttributeError:
_need_utf8 = False
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8'):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_need_utf8 and _encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_need_utf8 and _encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo":["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks...
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8' and _need_utf8)):
o = o.decode(_encoding)
return encode_basestring_ascii(o)
# This doesn't pass the iterator directly to ''.join() because it
# sucks at reporting exceptions. It's going to do this internally
# anyway because it uses PySequence_Fast or similar.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
__all__ = ['JSONEncoder']
|
AnhellO/DAS_Sistemas | refs/heads/development | Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/django/contrib/messages/apps.py | 130 | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class MessagesConfig(AppConfig):
name = 'django.contrib.messages'
verbose_name = _("Messages")
|
tsrnnash/bg8-cdw11 | refs/heads/master | users/a/g8/ag8_40323131_task1.py | 9 | # 各組分別在各自的 .py 程式中建立應用程式 (第1步/總共3步)
from flask import Blueprint, render_template, make_response
# 利用 Blueprint建立 ag1, 並且 url 前綴為 /ag1, 並設定 template 存放目錄
ag8_40323131 = Blueprint('ag8_40323131', __name__, url_prefix='/ag8_40323131', template_folder='templates')
@ag8_40323131.route('/task1_31')
def task1_31():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
class chain():
# 輪廓的外型設為成員變數
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
# 利用鏈條起點與終點定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic(self, x1, y1, x2, y2, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.rot = rot
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)
y2 = y1 + 20*math.sin(rot*deg)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
return x2, y2
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
cgo.setWorldCoords(-315, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="blue")
cgo.setWorldCoords(-385, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="red")
cgo.setWorldCoords(-445, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
</script>
</body></html>
'''
return outstring
@ag8_40323131.route('/task1b')
def task1():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-40, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
class chain():
# 輪廓的外型設為成員變數
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
# 利用鏈條起點與終點定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic(self, x1, y1, x2, y2, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.rot = rot
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)
y2 = y1 + 20*math.sin(rot*deg)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
return x2, y2
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 B
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 右上垂直向下單元
x7, y7 = mychain.basic_rot(x6, y6, -90)
# 右斜 240 度
x8, y8 = mychain.basic_rot(x7, y7, 210)
# 中間水平
mychain.basic(x8, y8, x2, y2)
# 右下斜 -30 度
x10, y10 = mychain.basic_rot(x8, y8, -30)
# 右下垂直向下單元
x11, y11 = mychain.basic_rot(x10, y10, -90)
# 右下斜 240 度
x12, y12 = mychain.basic_rot(x11, y11, 210)
# 水平接回起點
mychain.basic(x12,y12, 0, 0, color="green")
cgo.setWorldCoords(-107.5, -250, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
cgo.setWorldCoords(-50, -250, 500, 500)
# 畫 C
# 上半部
# 左邊中間垂直起點, 圓心位於線段中央, y 方向再向上平移兩個鏈條圓心距單位
x1, y1 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90)
# 上方轉 80 度
x2, y2 = mychain.basic_rot(x1, y1, 80)
# 上方轉 30 度
x3, y3 = mychain.basic_rot(x2, y2, 30)
# 上方水平
x4, y4 = mychain.basic_rot(x3, y3, 0)
# 下半部, 從起點開始 -80 度
x5, y5 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80)
# 下斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 下方水平單元
x7, y7 = mychain.basic_rot(x6, y6, -0, color="green")
cgo.setWorldCoords(-55, -250, 500, 500)
# 畫 D
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0+65*3, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -40 度
x6, y6 = mychain.basic_rot(x5, y5, -40)
x7, y7 = mychain.basic_rot(x6, y6, -60)
# 右中垂直向下單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
# -120 度
x9, y9 = mychain.basic_rot(x8, y8, -120)
# -140
x10, y10 = mychain.basic_rot(x9, y9, -140)
# 水平接回原點
mychain.basic(x10, y10, 0+65*3, 0, color="green")
</script>
</body></html>
'''
return outstring
@ag8_40323131.route('/task1c')
def task1c():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/Cango2D-6v13.js"></script>
<script type="text/javascript" src="http://cptocadp-2015fallhw.rhcloud.com/static/CangoAxes-1v33.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
from javascript import JSConstructor
from browser import alert
from browser import window
import math
cango = JSConstructor(window.Cango)
cobj = JSConstructor(window.Cobj)
shapedefs = window.shapeDefs
obj2d = JSConstructor(window.Obj2D)
cgo = cango("plotarea")
cgo.setWorldCoords(-250, -250, 500, 500)
# 畫軸線
cgo.drawAxes(0, 240, 0, 240, {
"strokeColor":"#aaaaaa",
"fillColor": "#aaaaaa",
"xTickInterval": 20,
"xLabelInterval": 20,
"yTickInterval": 20,
"yLabelInterval": 20})
deg = math.pi/180
# 將繪製鏈條輪廓的內容寫成 class 物件
class chain():
# 輪廓的外型設為成員變數
chamber = "M -6.8397, -1.4894 \
A 7, 7, 0, 1, 0, 6.8397, -1.4894 \
A 40, 40, 0, 0, 1, 6.8397, -18.511 \
A 7, 7, 0, 1, 0, -6.8397, -18.511 \
A 40, 40, 0, 0, 1, -6.8397, -1.4894 z"
cgoChamber = window.svgToCgoSVG(chamber)
# 利用鏈條起點與終點定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic(self, x1, y1, x2, y2, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(math.atan2(y2-y1, x2-x1)/deg+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
# 利用鏈條起點與旋轉角度定義繪圖, 使用內定的 color, border 與 linewidth 變數
def basic_rot(self, x1, y1, rot, color="green", border=True, linewidth=4, scale=1):
self.x1 = x1
self.y1 = y1
self.rot = rot
self.color = color
self.border = border
self.linewidth = linewidth
self.scale = scale
# 注意, cgo.Chamber 為成員變數
cmbr = cobj(self.cgoChamber, "SHAPE", {
"fillColor": color,
"border": border,
"strokeColor": "tan",
"lineWidth": linewidth })
# hole 為原點位置
hole = cobj(shapedefs.circle(4), "PATH")
cmbr.appendPath(hole)
# 根據旋轉角度, 計算 x2 與 y2
x2 = x1 + 20*math.cos(rot*deg)
y2 = y1 + 20*math.sin(rot*deg)
# 複製 cmbr, 然後命名為 basic1
basic1 = cmbr.dup()
# 因為鏈條的角度由原點向下垂直, 所以必須轉 90 度, 再考量 atan2 的轉角
basic1.rotate(rot+90)
# 放大 scale 倍
cgo.render(basic1, x1, y1, scale, 0)
return x2, y2
# 利用 chain class 建立案例, 對應到 mychain 變數
mychain = chain()
# 畫 B
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 右上垂直向下單元
x7, y7 = mychain.basic_rot(x6, y6, -90)
# 右斜 240 度
x8, y8 = mychain.basic_rot(x7, y7, 210)
# 中間水平
mychain.basic(x8, y8, x2, y2)
# 右下斜 -30 度
x10, y10 = mychain.basic_rot(x8, y8, -30)
# 右下垂直向下單元
x11, y11 = mychain.basic_rot(x10, y10, -90)
# 右下斜 240 度
x12, y12 = mychain.basic_rot(x11, y11, 210)
# 水平接回起點
mychain.basic(x12,y12, 0, 0, color="green")
cgo.setWorldCoords(-247.5, -350, 500, 500)
# 畫 A
# 左邊兩個垂直單元
x1, y1 = mychain.basic_rot(0, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
# 左斜邊兩個單元
x3, y3 = mychain.basic_rot(x2, y2, 80)
x4, y4 = mychain.basic_rot(x3, y3, 71)
# 最上方水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜邊兩個單元
x6, y6 = mychain.basic_rot(x5, y5, -71)
x7, y7 = mychain.basic_rot(x6, y6, -80)
# 右邊兩個垂直單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
x9, y9 = mychain.basic_rot(x8, y8, -90)
# 中間兩個水平單元
x10, y10 = mychain.basic_rot(x8, y8, -180)
mychain.basic(x10, y10, x1, y1, color="green")
cgo.setWorldCoords(-55, -50, 500, 500)
# 畫 D
# 左邊四個垂直單元
x1, y1 = mychain.basic_rot(0+65*3, 0, 90)
x2, y2 = mychain.basic_rot(x1, y1, 90)
x3, y3 = mychain.basic_rot(x2, y2, 90)
x4, y4 = mychain.basic_rot(x3, y3, 90)
# 上方一個水平單元
x5, y5 = mychain.basic_rot(x4, y4, 0)
# 右斜 -40 度
x6, y6 = mychain.basic_rot(x5, y5, -40)
x7, y7 = mychain.basic_rot(x6, y6, -60)
# 右中垂直向下單元
x8, y8 = mychain.basic_rot(x7, y7, -90)
# -120 度
x9, y9 = mychain.basic_rot(x8, y8, -120)
# -140
x10, y10 = mychain.basic_rot(x9, y9, -140)
# 水平接回原點
mychain.basic(x10, y10, 0+65*3, 0, color="green")
cgo.setWorldCoords(-120, -150, 500, 500)
# 畫 C
# 上半部
# 左邊中間垂直起點, 圓心位於線段中央, y 方向再向上平移兩個鏈條圓心距單位
x1, y1 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), 90)
# 上方轉 80 度
x2, y2 = mychain.basic_rot(x1, y1, 80)
# 上方轉 30 度
x3, y3 = mychain.basic_rot(x2, y2, 30)
# 上方水平
x4, y4 = mychain.basic_rot(x3, y3, 0)
# 下半部, 從起點開始 -80 度
x5, y5 = mychain.basic_rot(0+65*2, -10+10+20*math.sin(80*deg)+20*math.sin(30*deg), -80)
# 下斜 -30 度
x6, y6 = mychain.basic_rot(x5, y5, -30)
# 下方水平單元
x7, y7 = mychain.basic_rot(x6, y6, -0, color="green")
</script>
</body></html>
'''
return outstring
|
nirzari18/Query-Analysis-Application-on-Google-App-Engine | refs/heads/master | lib/oauth2client/crypt.py | 36 | # -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crypto-related routines for oauth2client."""
import base64
import imp
import json
import logging
import os
import sys
import time
import six
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
logger = logging.getLogger(__name__)
class AppIdentityError(Exception):
pass
def _TryOpenSslImport():
"""Import OpenSSL, avoiding the explicit import where possible.
Importing OpenSSL 0.14 can take up to 0.5s, which is a large price
to pay at module import time. However, it's also possible for
``imp.find_module`` to fail to find the module, even when it's
installed. (This is the case in various exotic environments,
including some relevant for Google.) So we first try a fast-path,
and fall back to the slow import as needed.
Args:
None
Returns:
None
Raises:
ImportError if OpenSSL is unavailable.
"""
try:
_ = imp.find_module('OpenSSL')
return
except ImportError:
import OpenSSL
try:
_TryOpenSslImport()
class OpenSSLVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was signed by the private key associated with the public
key that this object was constructed with.
"""
from OpenSSL import crypto
try:
if isinstance(message, six.text_type):
message = message.encode('utf-8')
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error if the key_pem can't be parsed.
"""
from OpenSSL import crypto
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return OpenSSLVerifier(pubkey)
class OpenSSLSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
from OpenSSL import crypto
if isinstance(message, six.text_type):
message = message.encode('utf-8')
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password=b'notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PKCS12 or PEM format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
from OpenSSL import crypto
parsed_pem_key = _parse_pem_key(key)
if parsed_pem_key:
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)
else:
if isinstance(password, six.text_type):
password = password.encode('utf-8')
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return OpenSSLSigner(pkey)
def pkcs12_key_as_pem(private_key_text, private_key_password):
"""Convert the contents of a PKCS12 key to PEM using OpenSSL.
Args:
private_key_text: String. Private key.
private_key_password: String. Password for PKCS12.
Returns:
String. PEM contents of ``private_key_text``.
"""
from OpenSSL import crypto
decoded_body = base64.b64decode(private_key_text)
if isinstance(private_key_password, six.string_types):
private_key_password = private_key_password.encode('ascii')
pkcs12 = crypto.load_pkcs12(decoded_body, private_key_password)
return crypto.dump_privatekey(crypto.FILETYPE_PEM,
pkcs12.get_privatekey())
except ImportError:
OpenSSLVerifier = None
OpenSSLSigner = None
def pkcs12_key_as_pem(*args, **kwargs):
raise NotImplementedError('pkcs12_key_as_pem requires OpenSSL.')
try:
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Signature import PKCS1_v1_5
from Crypto.Util.asn1 import DerSequence
class PyCryptoVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey (or equiv), The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was signed by the private key associated with the public
key that this object was constructed with.
"""
try:
return PKCS1_v1_5.new(self._pubkey).verify(
SHA256.new(message), signature)
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
"""
if is_x509_cert:
if isinstance(key_pem, six.text_type):
key_pem = key_pem.encode('ascii')
pemLines = key_pem.replace(b' ', b'').split()
certDer = _urlsafe_b64decode(b''.join(pemLines[1:-1]))
certSeq = DerSequence()
certSeq.decode(certDer)
tbsSeq = DerSequence()
tbsSeq.decode(certSeq[0])
pubkey = RSA.importKey(tbsSeq[6])
else:
pubkey = RSA.importKey(key_pem)
return PyCryptoVerifier(pubkey)
class PyCryptoSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
if isinstance(message, six.text_type):
message = message.encode('utf-8')
return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM files.
Returns:
Signer instance.
Raises:
NotImplementedError if they key isn't in PEM format.
"""
parsed_pem_key = _parse_pem_key(key)
if parsed_pem_key:
pkey = RSA.importKey(parsed_pem_key)
else:
raise NotImplementedError(
'PKCS12 format is not supported by the PyCrypto library. '
'Try converting to a "PEM" '
'(openssl pkcs12 -in xxxxx.p12 -nodes -nocerts > privatekey.pem) '
'or using PyOpenSSL if native code is an option.')
return PyCryptoSigner(pkey)
except ImportError:
PyCryptoVerifier = None
PyCryptoSigner = None
if OpenSSLSigner:
Signer = OpenSSLSigner
Verifier = OpenSSLVerifier
elif PyCryptoSigner:
Signer = PyCryptoSigner
Verifier = PyCryptoVerifier
else:
raise ImportError('No encryption library found. Please install either '
'PyOpenSSL, or PyCrypto 2.6 or later')
def _parse_pem_key(raw_key_input):
"""Identify and extract PEM keys.
Determines whether the given key is in the format of PEM key, and extracts
the relevant part of the key if it is.
Args:
raw_key_input: The contents of a private key file (either PEM or PKCS12).
Returns:
string, The actual key if the contents are from a PEM file, or else None.
"""
offset = raw_key_input.find(b'-----BEGIN ')
if offset != -1:
return raw_key_input[offset:]
def _urlsafe_b64encode(raw_bytes):
if isinstance(raw_bytes, six.text_type):
raw_bytes = raw_bytes.encode('utf-8')
return base64.urlsafe_b64encode(raw_bytes).decode('ascii').rstrip('=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
if isinstance(b64string, six.text_type):
b64string = b64string.encode('ascii')
padded = b64string + b'=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _json_encode(data):
return json.dumps(data, separators=(',', ':'))
def make_signed_jwt(signer, payload):
"""Make a signed JWT.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
signer: crypt.Signer, Cryptographic signer.
payload: dict, Dictionary of data to convert to JSON and then sign.
Returns:
string, The JWT for the payload.
"""
header = {'typ': 'JWT', 'alg': 'RS256'}
segments = [
_urlsafe_b64encode(_json_encode(header)),
_urlsafe_b64encode(_json_encode(payload)),
]
signing_input = '.'.join(segments)
signature = signer.sign(signing_input)
segments.append(_urlsafe_b64encode(signature))
logger.debug(str(segments))
return '.'.join(segments)
def verify_signed_jwt_with_certs(jwt, certs, audience):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
None then the JWT's 'aud' parameter is not verified.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if len(segments) != 3:
raise AppIdentityError('Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = json.loads(json_body.decode('utf-8'))
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for pem in certs.values():
verifier = Verifier.from_string(pem, True)
if verifier.verify(signed, signature):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = int(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError('exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return parsed
|
luogangyi/Ceilometer-oVirt | refs/heads/stable/juno | ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py | 10 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import VARCHAR
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
name = Table('unique_name', meta, autoload=True)
name.c.key.alter(type=VARCHAR(length=255))
trait = Table('trait', meta, autoload=True)
trait.c.t_string.alter(type=VARCHAR(length=255))
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
name = Table('unique_name', meta, autoload=True)
name.c.key.alter(type=VARCHAR(length=32))
trait = Table('trait', meta, autoload=True)
trait.c.t_string.alter(type=VARCHAR(length=32))
|
SUSE/azure-sdk-for-python | refs/heads/master | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/plan.py | 16 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Plan(Model):
"""Plan for the resource.
:param name: The plan ID.
:type name: str
:param publisher: The publisher ID.
:type publisher: str
:param product: The offer ID.
:type product: str
:param promotion_code: The promotion code.
:type promotion_code: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
'promotion_code': {'key': 'promotionCode', 'type': 'str'},
}
def __init__(self, name=None, publisher=None, product=None, promotion_code=None):
self.name = name
self.publisher = publisher
self.product = product
self.promotion_code = promotion_code
|
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | refs/heads/master | scripts/dl_cleanup.py | 131 | #!/usr/bin/env python
"""
# OpenWRT download directory cleanup utility.
# Delete all but the very last version of the program tarballs.
#
# Copyright (c) 2010 Michael Buesch <mb@bu3sch.de>
"""
import sys
import os
import re
import getopt
# Commandline options
opt_dryrun = False
def parseVer_1234(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
(int(match.group(5)) << 16)
return (progname, progversion)
def parseVer_123(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(5)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
patchlevel
return (progname, progversion)
def parseVer_12(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(4)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
patchlevel
return (progname, progversion)
def parseVer_r(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64)
return (progname, progversion)
def parseVer_ymd(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32)
return (progname, progversion)
def parseVer_GIT(match, filepath):
progname = match.group(1)
st = os.stat(filepath)
progversion = int(st.st_mtime) << 64
return (progname, progversion)
extensions = (
".tar.gz",
".tar.bz2",
".orig.tar.gz",
".orig.tar.bz2",
".zip",
".tgz",
".tbz",
)
versionRegex = (
(re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4
(re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a
(re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3
(re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a
(re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111
)
blacklist = [
("linux", re.compile(r"linux-.*")),
("gcc", re.compile(r"gcc-.*")),
("wl_apsta", re.compile(r"wl_apsta.*")),
(".fw", re.compile(r".*\.fw")),
(".arm", re.compile(r".*\.arm")),
(".bin", re.compile(r".*\.bin")),
("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")),
]
class EntryParseError(Exception): pass
class Entry:
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.progname = ""
self.fileext = ""
for ext in extensions:
if filename.endswith(ext):
filename = filename[0:0-len(ext)]
self.fileext = ext
break
else:
print self.filename, "has an unknown file-extension"
raise EntryParseError("ext")
for (regex, parseVersion) in versionRegex:
match = regex.match(filename)
if match:
(self.progname, self.version) = parseVersion(
match, directory + "/" + filename + self.fileext)
break
else:
print self.filename, "has an unknown version pattern"
raise EntryParseError("ver")
def deleteFile(self):
path = (self.directory + "/" + self.filename).replace("//", "/")
print "Deleting", path
if not opt_dryrun:
os.unlink(path)
def __eq__(self, y):
return self.filename == y.filename
def __ge__(self, y):
return self.version >= y.version
def usage():
print "OpenWRT download directory cleanup utility"
print "Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>"
print ""
print " -d|--dry-run Do a dry-run. Don't delete any files"
print " -B|--show-blacklist Show the blacklist and exit"
print " -w|--whitelist ITEM Remove ITEM from blacklist"
def main(argv):
global opt_dryrun
try:
(opts, args) = getopt.getopt(argv[1:],
"hdBw:",
[ "help", "dry-run", "show-blacklist", "whitelist=", ])
if len(args) != 1:
usage()
return 1
except getopt.GetoptError:
usage()
return 1
directory = args[0]
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-d", "--dry-run"):
opt_dryrun = True
if o in ("-w", "--whitelist"):
for i in range(0, len(blacklist)):
(name, regex) = blacklist[i]
if name == v:
del blacklist[i]
break
else:
print "Whitelist error: Item", v,\
"is not in blacklist"
return 1
if o in ("-B", "--show-blacklist"):
for (name, regex) in blacklist:
print name
return 0
# Create a directory listing and parse the file names.
entries = []
for filename in os.listdir(directory):
if filename == "." or filename == "..":
continue
for (name, regex) in blacklist:
if regex.match(filename):
if opt_dryrun:
print filename, "is blacklisted"
break
else:
try:
entries.append(Entry(directory, filename))
except (EntryParseError), e: pass
# Create a map of programs
progmap = {}
for entry in entries:
if entry.progname in progmap.keys():
progmap[entry.progname].append(entry)
else:
progmap[entry.progname] = [entry,]
# Traverse the program map and delete everything but the last version
for prog in progmap:
lastVersion = None
versions = progmap[prog]
for version in versions:
if lastVersion is None or version >= lastVersion:
lastVersion = version
if lastVersion:
for version in versions:
if version != lastVersion:
version.deleteFile()
if opt_dryrun:
print "Keeping", lastVersion.filename
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
austinharris/gem5-riscv | refs/heads/master | src/arch/x86/X86NativeTrace.py | 42 | # Copyright (c) 2009 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
from m5.params import *
from CPUTracers import NativeTrace
class X86NativeTrace(NativeTrace):
type = 'X86NativeTrace'
cxx_class = 'Trace::X86NativeTrace'
cxx_header = 'arch/x86/nativetrace.hh'
|
JetBrains/intellij-community | refs/heads/master | python/testData/completion/superInitKwParamsNoCompletion.after.py | 83 | class A:
def __init__(self, first=True, second=False): pass
class B(A):
def __init__(self, **kwargs): A.__init__(self, first=False)
b = B(fir) |
kcwu/2048-python | refs/heads/master | ai_modules/kcwu_short2.py | 1 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from multiprocessing import *
import sys
range4 = range(4)
job_table = {}
def rotateRight(grid):
return [[grid[r][3-c] for r in range4] for c in range4]
def move_row(row):
out = [x for x in row if x]
ic = oc = 0
while out[ic:]:
if out[ic+1:] and out[ic] == out[ic+1]:
out[oc] = 2*out[ic]
ic += 1
else:
out[oc] = out[ic]
ic += 1
oc += 1
out[oc:]=[None]*(4-oc)
return out
def move(grid, rot):
for i in range(rot):
grid = rotateRight(grid)
out = map(move_row, grid)
return out, out != grid
def eval_monotone_L(grid):
L = 0
for x in range4:
m = 0
for y in range(3):
A = grid[x][y] or 0
B = grid[x][y+1] or 0
if A and A >= B:
m += 1
L += m ** 2 * 4
else:
L -= abs(A- B) * 1.5
m = 0
return L
def eval_monotone_LR(grid):
return max(eval_monotone_L(grid), eval_monotone_L(rotateRight(rotateRight(grid))))
def eval_smoothness(grid):
return -sum( min([1e8]+[abs((grid[x][y] or 2) - (grid[x+a][y+b] or 2)) for a, b in((-1,0),(0,-1),(1,0),(0,1)) if 0 <= x+a <4 and 0<=y+b<4]) for x in range4 for y in range4)
def EVAL(grid):
return eval_monotone_LR(grid) + eval_monotone_LR(rotateRight(grid))+ eval_smoothness(grid) \
-(16-sum(r.count(None) for r in grid))**2
def encode(grid):
return tuple(grid[0]+grid[1]+grid[2]+grid[3])
def search_max(grid, depth, nodep):
return max([search_min(move(grid,m)[0], depth-1, nodep) for m in range4 if move(grid,m)[1]]+[-1e8])
table = {}
def worker(jq, rq):
while 1:
grid, depth, nodep = jq.get()
table.clear()
rq.put((
(encode(grid), depth)
,search_min(grid, depth, nodep)))
def search_min(grid, depth, nodep):
if depth == 0:
return EVAL(grid)
key = encode(grid), depth
if key in table:
return table[key]
scores = []
for i in range4:
row = grid[i]
for j in range4:
if not row[j]:
score = 0
for v, p in ((2, .9), (4, .1)):
row[j] = v
score += p * search_max(grid, depth, p*nodep)
row[j] = None
scores.append(score)
b = sum(scores) / len(scores)
table[key] = b
return b
def gen_job3(grid, depth, nodep, jq):
for m in range4:
g2, moved = move(grid, m)
key = encode(g2), depth - 1
if moved and key not in job_table:
job_table[key] = 1
jq.put((g2, depth - 1, nodep))
def gen_job2(grid, depth, nodep, jq):
for i in range4:
row = grid[i]
for j in range4:
if not row[j]:
for v, p in ((2, .9), (4, .1)):
row[j] = v
gen_job3(grid, depth, p*nodep, jq)
row[j] = None
class AI:
def __init__(self):
self.mg = Manager()
self.jq = self.mg.Queue()
self.rq = self.mg.Queue()
self.pp = []
for i in range(30):
p = Process(target=worker, args=(self.jq, self.rq))
self.pp.append(p)
p.start()
def __del__(self):
for i in range(30):
self.jq.put(0)
def getNextMove(self, grid):
table.clear()
job_table.clear()
for m in range4:
move(grid, m)[1] and gen_job2(move(grid,m)[0], 2, 1, self.jq)
for i in job_table:
key, value = self.rq.get()
table[key] = value
return ['up','left','down','right'][max((search_min(move(grid,m)[0],2,1),m) for m in range4 if move(grid,m)[1])[1]]
# vim:sw=4:expandtab:softtabstop=4
|
Health123/ansible | refs/heads/devel | v1/tests/TestModuleUtilsDatabase.py | 325 | import collections
import mock
import os
import re
from nose.tools import eq_
try:
from nose.tools import assert_raises_regexp
except ImportError:
# Python < 2.7
def assert_raises_regexp(expected, regexp, callable, *a, **kw):
try:
callable(*a, **kw)
except expected as e:
if isinstance(regexp, basestring):
regexp = re.compile(regexp)
if not regexp.search(str(e)):
raise Exception('"%s" does not match "%s"' %
(regexp.pattern, str(e)))
else:
if hasattr(expected,'__name__'): excName = expected.__name__
else: excName = str(expected)
raise AssertionError("%s not raised" % excName)
from ansible.module_utils.database import (
pg_quote_identifier,
SQLParseError,
)
# Note: Using nose's generator test cases here so we can't inherit from
# unittest.TestCase
class TestQuotePgIdentifier(object):
# These are all valid strings
# The results are based on interpreting the identifier as a table name
valid = {
# User quoted
'"public.table"': '"public.table"',
'"public"."table"': '"public"."table"',
'"schema test"."table test"': '"schema test"."table test"',
# We quote part
'public.table': '"public"."table"',
'"public".table': '"public"."table"',
'public."table"': '"public"."table"',
'schema test.table test': '"schema test"."table test"',
'"schema test".table test': '"schema test"."table test"',
'schema test."table test"': '"schema test"."table test"',
# Embedded double quotes
'table "test"': '"table ""test"""',
'public."table ""test"""': '"public"."table ""test"""',
'public.table "test"': '"public"."table ""test"""',
'schema "test".table': '"schema ""test"""."table"',
'"schema ""test""".table': '"schema ""test"""."table"',
'"""wat"""."""test"""': '"""wat"""."""test"""',
# Sigh, handle these as well:
'"no end quote': '"""no end quote"',
'schema."table': '"schema"."""table"',
'"schema.table': '"""schema"."table"',
'schema."table.something': '"schema"."""table"."something"',
# Embedded dots
'"schema.test"."table.test"': '"schema.test"."table.test"',
'"schema.".table': '"schema."."table"',
'"schema."."table"': '"schema."."table"',
'schema.".table"': '"schema".".table"',
'"schema".".table"': '"schema".".table"',
'"schema.".".table"': '"schema.".".table"',
# These are valid but maybe not what the user intended
'."table"': '".""table"""',
'table.': '"table."',
}
invalid = {
('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema."table"','table'): 'User escaped identifiers must escape extra quotes',
('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
}
def check_valid_quotes(self, identifier, quoted_identifier):
eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier)
def test_valid_quotes(self):
for identifier in self.valid:
yield self.check_valid_quotes, identifier, self.valid[identifier]
def check_invalid_quotes(self, identifier, id_type, msg):
assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type))
def test_invalid_quotes(self):
for test in self.invalid:
yield self.check_invalid_quotes, test[0], test[1], self.invalid[test]
def test_how_many_dots(self):
eq_(pg_quote_identifier('role', 'role'), '"role"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role'))
eq_(pg_quote_identifier('db', 'database'), '"db"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database'))
eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema'))
eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table'))
eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column'))
|
adamcharnock/swiftwind | refs/heads/master | swiftwind/costs/exceptions.py | 2 |
class CannotEnactUnenactableRecurringCostError(Exception): pass
class CannotRecreateTransactionOnRecurredCost(Exception): pass
class NoSplitsFoundForRecurringCost(Exception): pass
class ProvidedBillingCycleBeginsBeforeInitialBillingCycle(Exception): pass
class RecurringCostAlreadyEnactedForBillingCycle(Exception): pass
|
ElitosGon/medgoproject | refs/heads/master | medgointranet/signals/doctor_signal.py | 1 | from django.contrib.auth.models import User
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from medgointranet import models
@receiver(pre_save, sender=models.Doctor)
def model_pre_save_doctor(sender, instance, **kwargs):
# Doctor
# Es activo
if instance.id:
old_doctor = models.Doctor.objects.get(pk=instance.id)
if instance.is_active and (instance.is_active != old_doctor.is_active):
instance.estado_doctor = models.EstadoDoctor.objects.get(pk=1)
# Es in_activo
if instance.is_active == False and (instance.is_active != old_doctor.is_active):
instance.estado_doctor = models.EstadoDoctor.objects.get(pk=2)
|
Kazade/NeHe-Website | refs/heads/master | google_appengine/lib/django-1.2/django/db/backends/creation.py | 44 | import sys
import time
from django.conf import settings
from django.core.management import call_command
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
return '%x' % (abs(hash(args)) % 4294967296L) # 2**32
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
from django.db import models
opts = model._meta
if not opts.managed or opts.proxy:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
if not f.null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True))
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style)
if pending:
pr = pending_references.setdefault(f.rel.to, []).append((model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"Return the SQL snippet defining the foreign key reference for a field"
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' + \
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' + \
style.SQL_FIELD(qn(field.rel.to._meta.get_field(field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"Returns any ALTER TABLE statements to add constraints after the fact."
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
qn = self.connection.ops.quote_name
final_output = []
opts = model._meta
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % \
(qn(r_table), qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_for_many_to_many(self, model, style):
"Return the CREATE TABLE statments for all the many-to-many tables defined on a model"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
PendingDeprecationWarning
)
output = []
for f in model._meta.local_many_to_many:
if model._meta.managed or f.rel.to._meta.managed:
output.extend(self.sql_for_many_to_many_field(model, f, style))
return output
def sql_for_many_to_many_field(self, model, f, style):
"Return the CREATE TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
PendingDeprecationWarning
)
from django.db import models
from django.db.backends.util import truncate_name
output = []
if f.auto_created:
opts = model._meta
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or opts.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace, inline=True)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
table_output = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + \
style.SQL_TABLE(qn(f.m2m_db_table())) + ' (']
table_output.append(' %s %s %s%s,' %
(style.SQL_FIELD(qn('id')),
style.SQL_COLTYPE(models.AutoField(primary_key=True).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL PRIMARY KEY'),
tablespace_sql))
deferred = []
inline_output, deferred = self.sql_for_inline_many_to_many_references(model, f, style)
table_output.extend(inline_output)
table_output.append(' %s (%s, %s)%s' %
(style.SQL_KEYWORD('UNIQUE'),
style.SQL_FIELD(qn(f.m2m_column_name())),
style.SQL_FIELD(qn(f.m2m_reverse_name())),
tablespace_sql))
table_output.append(')')
if opts.db_tablespace:
# f.db_tablespace is only for indices, so ignore its value here.
table_output.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
table_output.append(';')
output.append('\n'.join(table_output))
for r_table, r_col, table, col in deferred:
r_name = '%s_refs_%s_%s' % (r_col, col, self._digest(r_table, table))
output.append(style.SQL_KEYWORD('ALTER TABLE') + ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table),
qn(truncate_name(r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
# Add any extra SQL needed to support auto-incrementing PKs
autoinc_sql = self.connection.ops.autoinc_sql(f.m2m_db_table(), 'id')
if autoinc_sql:
for stmt in autoinc_sql:
output.append(stmt)
return output
def sql_for_inline_many_to_many_references(self, model, field, style):
"Create the references to other tables required by a many-to-many table"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
PendingDeprecationWarning
)
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(opts.db_table)),
style.SQL_FIELD(qn(opts.pk.column)),
self.connection.ops.deferrable_sql()),
' %s %s %s %s (%s)%s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL REFERENCES'),
style.SQL_TABLE(qn(field.rel.to._meta.db_table)),
style.SQL_FIELD(qn(field.rel.to._meta.pk.column)),
self.connection.ops.deferrable_sql())
]
deferred = []
return table_output, deferred
def sql_indexes_for_model(self, model, style):
"Returns the CREATE INDEX SQL statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
return output
def sql_indexes_for_field(self, model, f, style):
"Return the CREATE INDEX SQL statements for a single model field"
from django.db.backends.util import truncate_name
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
i_name = '%s_%s' % (model._meta.db_table, self._digest(f.column))
output = [style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(i_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' +
"(%s)" % style.SQL_FIELD(qn(f.column)) +
"%s;" % tablespace_sql]
else:
output = []
return output
def sql_destroy_model(self, model, references_to_delete, style):
"Return the DROP TABLE and restraint dropping statements for a single model"
if not model._meta.managed or model._meta.proxy:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def sql_destroy_many_to_many(self, model, f, style):
"Returns the DROP TABLE statements for a single m2m field"
import warnings
warnings.warn(
'Database creation API for m2m tables has been deprecated. M2M models are now automatically generated',
PendingDeprecationWarning
)
qn = self.connection.ops.quote_name
output = []
if f.auto_created:
output.append("%s %s;" % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
ds = self.connection.ops.drop_sequence_sql("%s_%s" % (model._meta.db_table, f.column))
if ds:
output.append(ds)
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
if verbosity >= 1:
print "Creating test database '%s'..." % self.connection.alias
test_database_name = self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
can_rollback = self._rollback_works()
self.connection.settings_dict["SUPPORTS_TRANSACTIONS"] = can_rollback
call_command('syncdb',
verbosity=verbosity,
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=verbosity,
interactive=False,
database=self.connection.alias)
if settings.CACHE_BACKEND.startswith('db://'):
from django.core.cache import parse_backend_uri, cache
from django.db import router
if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
_, cache_name, _ = parse_backend_uri(settings.CACHE_BACKEND)
call_command('createcachetable', cache_name, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def _create_test_db(self, verbosity, autoclobber):
"Internal implementation - creates the test db tables."
suffix = self.sql_table_creation_suffix()
if self.connection.settings_dict['TEST_NAME']:
test_database_name = self.connection.settings_dict['TEST_NAME']
else:
test_database_name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self.set_autocommit()
try:
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
cursor.execute("DROP DATABASE %s" % qn(test_database_name))
if verbosity >= 1:
print "Creating test database..."
cursor.execute("CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def _rollback_works(self):
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
return count == 0
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
if verbosity >= 1:
print "Destroying test database '%s'..." % self.connection.alias
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
self.connection.settings_dict['NAME'] = old_database_name
self._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"Internal implementation - remove the test db tables."
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self.set_autocommit()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"Make sure a connection is in autocommit mode."
if hasattr(self.connection.connection, "autocommit"):
if callable(self.connection.connection.autocommit):
self.connection.connection.autocommit(True)
else:
self.connection.connection.autocommit = True
elif hasattr(self.connection.connection, "set_isolation_level"):
self.connection.connection.set_isolation_level(0)
def sql_table_creation_suffix(self):
"SQL to append to the end of the test table creation statements"
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
|
PlayCircular/play_circular | refs/heads/master | apps/actividades/forms.py | 1 | # coding=utf-8
# Copyright (C) 2014 by Víctor Romero Blanco <info at playcircular dot com>.
# http://playcircular.com/
# It's licensed under the AFFERO GENERAL PUBLIC LICENSE unless stated otherwise.
# You can get copies of the licenses here: http://www.affero.org/oagpl.html
# AFFERO GENERAL PUBLIC LICENSE is also included in the file called "LICENSE".
from django import forms
from actividades.models import *
from django.utils.translation import ugettext as _
from utilidades.combox import *
from actividades.models import *
from grupos.models import *
from django.forms import ModelMultipleChoiceField
from django.forms.models import BaseInlineFormSet
from django.db.models import Q
############################################################################################################################
class Form_valorar_actividad(forms.Form):
objeto = forms.IntegerField(widget=forms.HiddenInput())
pagina = forms.IntegerField(widget=forms.HiddenInput())
n_paginas = forms.IntegerField(widget=forms.HiddenInput())
retorno = forms.IntegerField(widget=forms.HiddenInput())
me_gusta_para_mi = forms.IntegerField()
rating_para_mi = forms.IntegerField()
bien_comun = forms.IntegerField()
rating_para_bien_comun = forms.IntegerField()
comentario = forms.CharField(widget=forms.Textarea(), label=_(u"Comentario"),max_length=400,required=False)
############################################################################################################################
class Form_busqueda(forms.Form):
busqueda = forms.CharField(max_length=100, label=_(u"Búsqueda"),required=False)
clase = forms.ChoiceField(choices=CLASE_ACTIVIDAD, label=_(u"Clase"),required=False)
tipo = forms.ChoiceField(choices=TIPO_ACTIVIDAD, label=_(u"Tipo"),required=False)
grupos = forms.ChoiceField(choices=GRUPOS_BUSQUEDA, label=_(u"En qué grupos"),required=False)
orden = forms.ChoiceField(choices=ORDEN, label=_(u"Ordenar por"),required=False)
############################################################################################################################
class Form_categoria_actividad_admin(forms.ModelForm):
"""Cargo en los select solo los datos relativos a los grupos de los que el usuario registrado en el sistema es administrador"""
class Meta:
model = Categoria
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None) # Now you can access request anywhere in your form methods by using self.request.
user = kwargs.pop('user', None)
super(Form_categoria_actividad_admin, self).__init__(*args, **kwargs)
if user.is_superuser:
self.fields['grupo'].queryset = Grupo.objects.all()
else:
qs_grupos_administrados = Miembro.objects.filter(usuario=user,activo=True,nivel=u'Administrador').values_list('grupo', flat=True)
if len(qs_grupos_administrados)>0:
self.fields['grupo'].queryset = Grupo.objects.filter(pk__in=qs_grupos_administrados,activo=True)
else:
qs_grupos = Miembro.objects.filter(usuario=user).values_list('grupo', flat=True)
def clean(self):
try:
grupos = self.cleaned_data['grupo']
usuario = self.request.user
#----- Grupos --------
n_grupos = len(grupos)
if n_grupos == 0 and not usuario.is_superuser:
#El Superadmin puede publicar sin que pernezca a ningún grupo para que no lo controlen los Admin de los grupos
raise forms.ValidationError(_(u"El campo grupo es obligatorio."))
return self.cleaned_data
except KeyError:
raise forms.ValidationError(_("Soluciona estos problemas"), code='invalid')
############################################################################################################################
class Form_actividad_admin(forms.ModelForm):
"""Cargo en los select solo los datos relativos a los grupos de los que el usuario registrado en el sistema es administrador"""
class Meta:
model = Actividad
fields = ('grupo','superadmin','usuario','clase','tipo','categoria','precio_moneda_social','activo')
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None) # Now you can access request anywhere in your form methods by using self.request.
user = kwargs.pop('user', None)
super(Form_actividad_admin, self).__init__(*args, **kwargs)
if user.is_superuser:
self.fields['grupo'].queryset = Grupo.objects.all()
self.fields['usuario'].queryset = User.objects.all()
self.fields['categoria'].queryset = Categoria.objects.all()
else:
qs_grupos_administrados = Miembro.objects.filter(usuario=user,activo=True,nivel=u'Administrador').values_list('grupo', flat=True)
if len(qs_grupos_administrados)>0:
qs_miembros_administrados = Miembro.objects.filter(grupo__in=qs_grupos_administrados,activo=True).values_list('usuario', flat=True)
self.fields['grupo'].queryset = Grupo.objects.filter(pk__in=qs_grupos_administrados,activo=True)
self.fields['usuario'].queryset = User.objects.filter(pk__in=qs_miembros_administrados)
self.fields['categoria'].queryset = Categoria.objects.filter(Q(grupo__in=qs_grupos_administrados) | Q(superadmin=True))
else:
qs_grupos = Miembro.objects.filter(usuario=user).values_list('grupo', flat=True)
self.fields['grupo'].queryset = Grupo.objects.filter(pk__in=qs_grupos,activo=True)
self.fields['usuario'].queryset = User.objects.filter(pk=user.pk)
self.fields['categoria'].queryset = Categoria.objects.filter(Q(grupo__in=qs_grupos) | Q(superadmin=True))
def clean_precio_moneda_social(self):
precio_moneda_social = self.cleaned_data['precio_moneda_social']
if precio_moneda_social <= 0:
raise forms.ValidationError(_(u"Introduce una cantidad positiva."))
return self.cleaned_data['precio_moneda_social']
def clean(self):
try:
grupos = self.cleaned_data['grupo']
usuario = self.cleaned_data['usuario']
categoria = self.cleaned_data['categoria']
#----- Grupos --------
n_grupos = len(grupos)
if n_grupos == 0 and not usuario.is_superuser:
#El Superadmin puede publicar sin que pernezca a ningún grupo para que no lo controlen los Admin de los grupos
raise forms.ValidationError(_(u"El campo grupo es obligatorio."))
#----- Usuario --------
n_usuario = Miembro.objects.filter(grupo__in=grupos,usuario=usuario,activo=True).count()
if n_usuario == 0 and not usuario.is_superuser:
#El Superadmin puede publicar sin que pernezca a ningún grupo para que no lo controlen los Admin de los grupos
raise forms.ValidationError(_(u"El usuario elegido no pertenece a ninguno de los grupos seleccionados."))
#------ Categoria ---------
if categoria and categoria.superadmin == False:
# Al superadmin que puede publicar sin grupos no se le controla.
if n_grupos > 0:
n_categoria = Categoria.objects.filter(pk=categoria.pk,grupo__in=grupos).count()
if n_categoria == 0:
raise forms.ValidationError(_(u"La categoria elegida no pertenece a ninuno de los grupos selecionados"))
return self.cleaned_data
except KeyError:
raise forms.ValidationError(_("Soluciona estos problemas"), code='invalid')
############################################################################################################################
class Form_Actividad(forms.ModelForm):
class Meta:
model = Actividad
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
super(Form_Actividad, self).__init__(*args, **kwargs)
self.fields['grupo'].queryset = Miembro.objects.filter(usuario=user).values('grupo')
#######################################################################################################################################################
class Idioma_requerido_formset(BaseInlineFormSet):
def clean(self):
super(Idioma_requerido_formset, self).clean()
for error in self.errors:
if error:
return
completed = 0
for cleaned_data in self.cleaned_data:
if cleaned_data and not cleaned_data.get('DELETE', False):
completed += 1
if completed < 1:
raise forms.ValidationError(_("Es necesario introducir al menos un idioma para la actividad."))
############################################################################################################################
|
duralog/repo | refs/heads/master | color.py | 36 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pager
COLORS = {None :-1,
'normal' :-1,
'black' : 0,
'red' : 1,
'green' : 2,
'yellow' : 3,
'blue' : 4,
'magenta': 5,
'cyan' : 6,
'white' : 7}
ATTRS = {None :-1,
'bold' : 1,
'dim' : 2,
'ul' : 4,
'blink' : 5,
'reverse': 7}
RESET = "\033[m" # pylint: disable=W1401
# backslash is not anomalous
def is_color(s):
return s in COLORS
def is_attr(s):
return s in ATTRS
def _Color(fg = None, bg = None, attr = None):
fg = COLORS[fg]
bg = COLORS[bg]
attr = ATTRS[attr]
if attr >= 0 or fg >= 0 or bg >= 0:
need_sep = False
code = "\033[" #pylint: disable=W1401
if attr >= 0:
code += chr(ord('0') + attr)
need_sep = True
if fg >= 0:
if need_sep:
code += ';'
need_sep = True
if fg < 8:
code += '3%c' % (ord('0') + fg)
else:
code += '38;5;%d' % fg
if bg >= 0:
if need_sep:
code += ';'
need_sep = True
if bg < 8:
code += '4%c' % (ord('0') + bg)
else:
code += '48;5;%d' % bg
code += 'm'
else:
code = ''
return code
class Coloring(object):
def __init__(self, config, section_type):
self._section = 'color.%s' % section_type
self._config = config
self._out = sys.stdout
on = self._config.GetString(self._section)
if on is None:
on = self._config.GetString('color.ui')
if on == 'auto':
if pager.active or os.isatty(1):
self._on = True
else:
self._on = False
elif on in ('true', 'always'):
self._on = True
else:
self._on = False
def redirect(self, out):
self._out = out
@property
def is_on(self):
return self._on
def write(self, fmt, *args):
self._out.write(fmt % args)
def flush(self):
self._out.flush()
def nl(self):
self._out.write('\n')
def printer(self, opt=None, fg=None, bg=None, attr=None):
s = self
c = self.colorer(opt, fg, bg, attr)
def f(fmt, *args):
s._out.write(c(fmt, *args))
return f
def nofmt_printer(self, opt=None, fg=None, bg=None, attr=None):
s = self
c = self.nofmt_colorer(opt, fg, bg, attr)
def f(fmt):
s._out.write(c(fmt))
return f
def colorer(self, opt=None, fg=None, bg=None, attr=None):
if self._on:
c = self._parse(opt, fg, bg, attr)
def f(fmt, *args):
output = fmt % args
return ''.join([c, output, RESET])
return f
else:
def f(fmt, *args):
return fmt % args
return f
def nofmt_colorer(self, opt=None, fg=None, bg=None, attr=None):
if self._on:
c = self._parse(opt, fg, bg, attr)
def f(fmt):
return ''.join([c, fmt, RESET])
return f
else:
def f(fmt):
return fmt
return f
def _parse(self, opt, fg, bg, attr):
if not opt:
return _Color(fg, bg, attr)
v = self._config.GetString('%s.%s' % (self._section, opt))
if v is None:
return _Color(fg, bg, attr)
v = v.strip().lower()
if v == "reset":
return RESET
elif v == '':
return _Color(fg, bg, attr)
have_fg = False
for a in v.split(' '):
if is_color(a):
if have_fg:
bg = a
else:
fg = a
elif is_attr(a):
attr = a
return _Color(fg, bg, attr)
|
pools/JaikuEngine | refs/heads/master | common/properties.py | 34 | #!/usr/bin/python2.4
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'termie@google.com (Andy Smith)'
import re
import time, datetime
try:
import cPickle as pickle
except ImportError:
import pickle
from google.appengine.ext import db
from google.appengine.api.datastore_types import Blob
DJANGO_DATE = "%Y-%m-%d"
DJANGO_TIME = "%H:%M:%S"
class DateTimeProperty(db.DateTimeProperty):
def validate(self, value):
"""Validate a datetime, attempt to convert from string
Returns:
A valid datetime object
"""
# XXX termie: pretty naive at this point, ask for forgiveness
try:
us = 0
m_fractional = re.search('(.*)\.(\d+)$', value)
if (m_fractional):
value = m_fractional.group(1)
fractional_s = m_fractional.group(2)
scaled_to_us = fractional_s + '0' * (6 - len(fractional_s))
truncated_to_us = scaled_to_us[0:6]
us = int(truncated_to_us)
t = time.strptime(value, "%s %s"%(DJANGO_DATE, DJANGO_TIME))
t = (t)[0:6] + (us,)
d = datetime.datetime(*t)
value = d
except ValueError, e:
# eat the error
pass
except TypeError, e:
# we passed it a datetime, probably, let the orignal handle this
pass
value = super(DateTimeProperty, self).validate(value)
return value
class DictProperty(db.Property):
def validate(self, value):
value = super(DictProperty, self).validate(value)
if not isinstance(value, dict):
raise Exception("NOT A DICT %s" % value)
return value
def default_value(self):
return {}
def datastore_type(self):
return Blob
def get_value_for_datastore(self, model_instance):
value = super(DictProperty, self).get_value_for_datastore(model_instance)
return Blob(pickle.dumps(value, protocol=-1))
def make_value_from_datastore(self, model_instance):
value = super(DictProperty, self).make_value_from_datastore(model_instance)
return pickle.loads(str(value))
|
chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | refs/heads/master | tests/unit/dataactvalidator/test_a32_appropriations.py | 1 | from dataactcore.models.stagingModels import Appropriation
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'a32_appropriations'
_TAS = 'a32_appropriations_tas'
def test_column_headers(database):
expected_subset = {'row_number', 'allocation_transfer_agency', 'agency_identifier',
'beginning_period_of_availa', 'ending_period_of_availabil',
'availability_type_code', 'main_account_code', 'sub_account_code'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Tests that TAS values in File A are not unique """
tas = "".join([_TAS, "_success"])
ap1 = Appropriation(job_id=1, row_number=1, tas=tas)
ap2 = Appropriation(job_id=1, row_number=2, tas='1')
assert number_of_errors(_FILE, database, models=[ap1,ap2]) == 0
def test_failure(database):
""" Tests that TAS values in File A are unique """
tas = "".join([_TAS, "_failure"])
ap1 = Appropriation(job_id=1, row_number=1, tas=tas)
ap2 = Appropriation(job_id=1, row_number=2, tas=tas)
assert number_of_errors(_FILE, database, models=[ap1,ap2]) == 2
|
gregdek/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_ironic_inspect.py | 31 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015-2016, Hewlett Packard Enterprise Development Company LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_ironic_inspect
short_description: Explicitly triggers baremetal node introspection in ironic.
extends_documentation_fragment: openstack
author: "Julia Kreger (@juliakreger)"
version_added: "2.1"
description:
- Requests Ironic to set a node into inspect state in order to collect metadata regarding the node.
This command may be out of band or in-band depending on the ironic driver configuration.
This is only possible on nodes in 'manageable' and 'available' state.
options:
mac:
description:
- unique mac address that is used to attempt to identify the host.
uuid:
description:
- globally unique identifier (UUID) to identify the host.
name:
description:
- unique name identifier to identify the host in Ironic.
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the endpoint URL for the Ironic API.
Use with "auth" and "auth_type" settings set to None.
timeout:
description:
- A timeout in seconds to tell the role to wait for the node to complete introspection if wait is set to True.
default: 1200
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements: ["openstacksdk"]
'''
RETURN = '''
ansible_facts:
description: Dictionary of new facts representing discovered properties of the node..
returned: changed
type: complex
contains:
memory_mb:
description: Amount of node memory as updated in the node properties
type: str
sample: "1024"
cpu_arch:
description: Detected CPU architecture type
type: str
sample: "x86_64"
local_gb:
description: Total size of local disk storage as updaed in node properties.
type: str
sample: "10"
cpus:
description: Count of cpu cores defined in the updated node properties.
type: str
sample: "1"
'''
EXAMPLES = '''
# Invoke node inspection
- os_ironic_inspect:
name: "testnode1"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
def main():
argument_spec = openstack_full_argument_spec(
auth_type=dict(required=False),
uuid=dict(required=False),
name=dict(required=False),
mac=dict(required=False),
ironic_url=dict(required=False),
timeout=dict(default=1200, type='int', required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears to be disabled, "
"Please define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
sdk, cloud = openstack_cloud_from_module(module)
try:
if module.params['name'] or module.params['uuid']:
server = cloud.get_machine(_choose_id_value(module))
elif module.params['mac']:
server = cloud.get_machine_by_mac(module.params['mac'])
else:
module.fail_json(msg="The worlds did not align, "
"the host was not found as "
"no name, uuid, or mac was "
"defined.")
if server:
cloud.inspect_machine(server['uuid'], module.params['wait'])
# TODO(TheJulia): diff properties, ?and ports? and determine
# if a change occurred. In theory, the node is always changed
# if introspection is able to update the record.
module.exit_json(changed=True,
ansible_facts=server['properties'])
else:
module.fail_json(msg="node not found.")
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == "__main__":
main()
|
ayushagrawal288/zamboni | refs/heads/master | mkt/websites/indexers.py | 8 | import re
from operator import attrgetter
from urlparse import urlparse
from mkt.search.indexers import BaseIndexer
from mkt.tags.models import attach_tags
from mkt.translations.models import attach_trans_dict
URL_RE = re.compile(
r'^www\.|^m\.|^mobile\.|' # Remove common subdomains.
r'\.com$|\.net$|\.org$|\.\w{2}$' # Remove common TLDs incl. ccTLDs.
)
class WebsiteIndexer(BaseIndexer):
translated_fields = ('description', 'name', 'short_name', 'title')
fields_with_language_analyzers = ('description', 'name', 'short_name')
hidden_fields = (
'*.raw',
'*_sort',
'popularity_*',
'trending_*',
'boost',
# 'name', 'short_name' and 'description', as well as their locale
# variants ('name_l10n_<language>', etc.) are only used for the query
# matches, and are never returned to the client through the API. The
# fields that are returned to the API are '*_translations'.
'description',
'name',
'short_name',
'description_l10n_*',
'name_l10n_*',
'short_name_l10n_*',
# Title is not analyzed with language-specific analyzers but again, we
# need `title_translations` for the API, `title` is only used for
# querying.
'title',
)
@classmethod
def get_mapping_type_name(cls):
return 'website'
@classmethod
def get_model(cls):
"""Returns the Django model this MappingType relates to"""
from mkt.websites.models import Website
return Website
@classmethod
def get_mapping(cls):
"""Returns an Elasticsearch mapping for this MappingType"""
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
'_all': {'enabled': False},
'properties': {
'id': {'type': 'long'},
'category': cls.string_not_analyzed(),
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'description': {'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100},
'default_locale': cls.string_not_indexed(),
'device': {'type': 'byte'},
'icon_hash': cls.string_not_indexed(),
'icon_type': cls.string_not_indexed(),
'is_disabled': {'type': 'boolean'},
'last_updated': {'format': 'dateOptionalTime',
'type': 'date'},
# 'url' is already indexed, no need to also index the
# mobile-specific URL.
'mobile_url': cls.string_not_indexed(),
'modified': {'type': 'date', 'format': 'dateOptionalTime'},
'name': {
'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100,
# For exact matches. Referenced as `name.raw`.
'fields': {
'raw': cls.string_not_analyzed(
position_offset_gap=100)
},
},
# Name for sorting.
'name_sort': cls.string_not_analyzed(doc_values=True),
'preferred_regions': {'type': 'short'},
'promo_img_hash': cls.string_not_indexed(),
'reviewed': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
'short_name': {'type': 'string',
'analyzer': 'default_icu'},
'status': {'type': 'byte'},
'tags': cls.string_not_analyzed(),
'title': {
'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100,
},
'url': cls.string_not_analyzed(),
'url_tokenized': {'type': 'string', 'analyzer': 'simple'},
}
}
}
# Attach boost field, because we are going to need search by relevancy.
cls.attach_boost_mapping(mapping)
# Attach popularity and trending.
cls.attach_trending_and_popularity_mappings(mapping)
# Add extra mapping for translated fields, containing the "raw"
# translations.
cls.attach_translation_mappings(mapping, cls.translated_fields)
# Add language-specific analyzers.
cls.attach_language_specific_analyzers(
mapping, cls.fields_with_language_analyzers)
return mapping
@classmethod
def extract_document(cls, pk=None, obj=None):
"""Converts this instance into an Elasticsearch document"""
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
# Attach translations for searching and indexing.
attach_trans_dict(cls.get_model(), [obj])
# Attach tags (keywords).
attach_tags([obj])
attrs = ('created', 'default_locale', 'id', 'icon_hash', 'icon_type',
'is_disabled', 'last_updated', 'mobile_url', 'modified',
'promo_img_hash', 'status', 'url')
doc = dict(zip(attrs, attrgetter(*attrs)(obj)))
doc['category'] = obj.categories or []
doc['device'] = obj.devices or []
doc['name_sort'] = unicode(obj.name).lower()
doc['preferred_regions'] = obj.preferred_regions or []
doc['tags'] = getattr(obj, 'keywords_list', [])
doc['url_tokenized'] = cls.strip_url(obj.url)
# For now, websites are not reviewed, since we're manually injecting
# data, so just use last_updated.
doc['reviewed'] = obj.last_updated
# Add boost, popularity, trending values.
doc.update(cls.extract_popularity_trending_boost(obj))
# Handle localized fields. This adds both the field used for search and
# the one with all translations for the API.
for field in cls.translated_fields:
doc.update(cls.extract_field_translations(
obj, field, include_field_for_search=True))
# Handle language-specific analyzers.
for field in cls.fields_with_language_analyzers:
doc.update(cls.extract_field_analyzed_translations(obj, field))
return doc
@classmethod
def strip_url(cls, url):
"""
Remove all unwanted sections of the URL and return a string that will
be passed to Elasticsearch.
E.g. 'https://m.domain.com/topic/' will become 'domain/topic/', which
will further get tokenized by Elasticsearch into 'domain' and 'topic'.
This will never be perfect but it should remove a majority of cruft
from getting indexed.
"""
bits = urlparse(url)
# Get just the netloc.
url = bits.netloc
# Strip common subdomains and TLDs.
url = URL_RE.sub('', url)
# Add back stuff in the path.
url += bits.path
return url
|
TNT-Samuel/Coding-Projects | refs/heads/master | DNS Server/Source - Copy/Lib/importlib/_bootstrap_external.py | 14 | """Core implementation of path-based import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
# IMPORTANT: Whenever making changes to this module, be sure to run a top-level
# `make regen-importlib` followed by `make` in order to get the frozen version
# of the module updated. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module in the early
# stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win',
_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin'
_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY
+ _CASE_INSENSITIVE_PLATFORMS_STR_KEY)
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY):
key = 'PYTHONCASEOK'
else:
key = b'PYTHONCASEOK'
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return key in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
def _w_long(x):
"""Convert a 32-bit integer to little-endian."""
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer."""
return int.from_bytes(int_bytes, 'little')
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
return path_sep.join([part.rstrip(path_separators)
for part in path_parts if part])
def _path_split(path):
"""Replacement for os.path.split()."""
if len(path_separators) == 1:
front, _, tail = path.rpartition(path_sep)
return front, tail
for x in reversed(path):
if x in path_separators:
front, tail = path.rsplit(x, maxsplit=1)
return front, tail
return '', path
def _path_stat(path):
"""Stat the path.
Made a separate function to make it easier to override in experiments
(e.g. cache stat results).
"""
return _os.stat(path)
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _path_stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
_code_type = type(_write_atomic.__code__)
# Finder/loader utility code ###############################################
# Magic word to reject .pyc files generated by other Python versions.
# It should change for each incompatible change to the bytecode.
#
# The value of CR and LF is incorporated so if you ever read or write
# a .pyc file in text mode the magic number will be wrong; also, the
# Apple MPW compiler swaps their values, botching string constants.
#
# There were a variety of old schemes for setting the magic number.
# The current working scheme is to increment the previous value by
# 10.
#
# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
# number also includes a new "magic tag", i.e. a human readable string used
# to represent the magic number in __pycache__ directories. When you change
# the magic number, you must also set a new unique magic tag. Generally this
# can be named after the Python major version of the magic number bump, but
# it can really be anything, as long as it's different than anything else
# that's come before. The tags are included in the following table, starting
# with Python 3.2a0.
#
# Known values:
# Python 1.5: 20121
# Python 1.5.1: 20121
# Python 1.5.2: 20121
# Python 1.6: 50428
# Python 2.0: 50823
# Python 2.0.1: 50823
# Python 2.1: 60202
# Python 2.1.1: 60202
# Python 2.1.2: 60202
# Python 2.2: 60717
# Python 2.3a0: 62011
# Python 2.3a0: 62021
# Python 2.3a0: 62011 (!)
# Python 2.4a0: 62041
# Python 2.4a3: 62051
# Python 2.4b1: 62061
# Python 2.5a0: 62071
# Python 2.5a0: 62081 (ast-branch)
# Python 2.5a0: 62091 (with)
# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
# Python 2.5b3: 62101 (fix wrong code: for x, in ...)
# Python 2.5b3: 62111 (fix wrong code: x += yield)
# Python 2.5c1: 62121 (fix wrong lnotab with for loops and
# storing constants that should have been removed)
# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
# Python 2.6a1: 62161 (WITH_CLEANUP optimization)
# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
# Python 2.7a0: 62181 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 2.7a0 62191 (introduce SETUP_WITH)
# Python 2.7a0 62201 (introduce BUILD_SET)
# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
# Python 3000: 3000
# 3010 (removed UNARY_CONVERT)
# 3020 (added BUILD_SET)
# 3030 (added keyword-only parameters)
# 3040 (added signature annotations)
# 3050 (print becomes a function)
# 3060 (PEP 3115 metaclass syntax)
# 3061 (string literals become unicode)
# 3071 (PEP 3109 raise changes)
# 3081 (PEP 3137 make __file__ and __name__ unicode)
# 3091 (kill str8 interning)
# 3101 (merge from 2.6a0, see 62151)
# 3103 (__file__ points to source file)
# Python 3.0a4: 3111 (WITH_CLEANUP optimization).
# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT
#3021)
# Python 3.1a1: 3141 (optimize list, set and dict comprehensions:
# change LIST_APPEND and SET_ADD, add MAP_ADD #2183)
# Python 3.1a1: 3151 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
#4715)
# Python 3.2a1: 3160 (add SETUP_WITH #6101)
# tag: cpython-32
# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225)
# tag: cpython-32
# Python 3.2a3 3180 (add DELETE_DEREF #4617)
# Python 3.3a1 3190 (__class__ super closure changed)
# Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448)
# Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645)
# Python 3.3a2 3220 (changed PEP 380 implementation #14230)
# Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857)
# Python 3.4a1 3250 (evaluate positional default arguments before
# keyword-only defaults #16967)
# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override
# free vars #17853)
# Python 3.4a1 3270 (various tweaks to the __class__ closure #12370)
# Python 3.4a1 3280 (remove implicit class argument)
# Python 3.4a4 3290 (changes to __qualname__ computation #19301)
# Python 3.4a4 3300 (more changes to __qualname__ computation #19301)
# Python 3.4rc2 3310 (alter __qualname__ computation #20625)
# Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176)
# Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292)
# Python 3.5b2 3340 (fix dictionary display evaluation order #11205)
# Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400)
# Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286)
# Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483)
# Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107)
# Python 3.6a2 3370 (16 bit wordcode #26647)
# Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140)
# Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE
# #27095)
# Python 3.6b1 3373 (add BUILD_STRING opcode #27078)
# Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes
# #27985)
# Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL
#27213)
# Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722)
# Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257)
# Python 3.6rc1 3379 (more thorough __class__ validation #23722)
# Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110)
# Python 3.7a2 3391 (update GET_AITER #31709)
# Python 3.7a4 3392 (PEP 552: Deterministic pycs #31650)
# Python 3.7b1 3393 (remove STORE_ANNOTATION opcode #32550)
# Python 3.7b5 3394 (restored docstring as the firts stmt in the body;
# this might affected the first line number #32911)
#
# MAGIC must change whenever the bytecode emitted by the compiler may no
# longer be understood by older implementations of the eval loop (usually
# due to the addition of new opcodes).
#
# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array
# in PC/launcher.c must also be updated.
MAGIC_NUMBER = (3394).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
_PYCACHE = '__pycache__'
_OPT = 'opt-'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
BYTECODE_SUFFIXES = ['.pyc']
# Deprecated.
DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES
def cache_from_source(path, debug_override=None, *, optimization=None):
"""Given the path to a .py file, return the path to its .pyc file.
The .py file does not need to exist; this simply returns the path to the
.pyc file calculated as if the .py file were imported.
The 'optimization' parameter controls the presumed optimization level of
the bytecode file. If 'optimization' is not None, the string representation
of the argument is taken and verified to be alphanumeric (else ValueError
is raised).
The debug_override parameter is deprecated. If debug_override is not None,
a True value is the same as setting 'optimization' to the empty string
while a False value is equivalent to setting 'optimization' to '1'.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if debug_override is not None:
_warnings.warn('the debug_override parameter is deprecated; use '
"'optimization' instead", DeprecationWarning)
if optimization is not None:
message = 'debug_override or optimization must be set to None'
raise TypeError(message)
optimization = '' if debug_override else 1
path = _os.fspath(path)
head, tail = _path_split(path)
base, sep, rest = tail.rpartition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
almost_filename = ''.join([(base if base else rest), sep, tag])
if optimization is None:
if sys.flags.optimize == 0:
optimization = ''
else:
optimization = sys.flags.optimize
optimization = str(optimization)
if optimization != '':
if not optimization.isalnum():
raise ValueError('{!r} is not alphanumeric'.format(optimization))
almost_filename = '{}.{}{}'.format(almost_filename, _OPT, optimization)
return _path_join(head, _PYCACHE, almost_filename + BYTECODE_SUFFIXES[0])
def source_from_cache(path):
"""Given the path to a .pyc. file, return the path to its .py file.
The .pyc file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147/488 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
path = _os.fspath(path)
head, pycache_filename = _path_split(path)
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
dot_count = pycache_filename.count('.')
if dot_count not in {2, 3}:
raise ValueError('expected only 2 or 3 dots in '
'{!r}'.format(pycache_filename))
elif dot_count == 3:
optimization = pycache_filename.rsplit('.', 2)[-2]
if not optimization.startswith(_OPT):
raise ValueError("optimization portion of filename does not start "
"with {!r}".format(_OPT))
opt_level = optimization[len(_OPT):]
if not opt_level.isalnum():
raise ValueError("optimization level {!r} is not an alphanumeric "
"value".format(optimization))
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _get_cached(filename):
if filename.endswith(tuple(SOURCE_SUFFIXES)):
try:
return cache_from_source(filename)
except NotImplementedError:
pass
elif filename.endswith(tuple(BYTECODE_SUFFIXES)):
return filename
else:
return None
def _calc_mode(path):
"""Calculate the mode permissions for a bytecode file."""
try:
mode = _path_stat(path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return mode
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError('loader for %s cannot handle %s' %
(self.name, name), name=name)
return method(self, name, *args, **kwargs)
try:
_wrap = _bootstrap._wrap
except NameError:
# XXX yuck
def _wrap(new, old):
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader().
This method is deprecated in favor of finder.find_spec().
"""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = 'Not importing directory {}: missing __init__'
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
def _classify_pyc(data, name, exc_details):
"""Perform basic validity checking of a pyc header and return the flags field,
which determines how the pyc should be further validated against the source.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required, though.)
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
ImportError is raised when the magic number is incorrect or when the flags
field is invalid. EOFError is raised when the data is found to be truncated.
"""
magic = data[:4]
if magic != MAGIC_NUMBER:
message = f'bad magic number in {name!r}: {magic!r}'
_bootstrap._verbose_message('{}', message)
raise ImportError(message, **exc_details)
if len(data) < 16:
message = f'reached EOF while reading pyc header of {name!r}'
_bootstrap._verbose_message('{}', message)
raise EOFError(message)
flags = _r_long(data[4:8])
# Only the first two flags are defined.
if flags & ~0b11:
message = f'invalid flags {flags!r} in {name!r}'
raise ImportError(message, **exc_details)
return flags
def _validate_timestamp_pyc(data, source_mtime, source_size, name,
exc_details):
"""Validate a pyc against the source last-modified time.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required.)
*source_mtime* is the last modified timestamp of the source file.
*source_size* is None or the size of the source file in bytes.
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
An ImportError is raised if the bytecode is stale.
"""
if _r_long(data[8:12]) != (source_mtime & 0xFFFFFFFF):
message = f'bytecode is stale for {name!r}'
_bootstrap._verbose_message('{}', message)
raise ImportError(message, **exc_details)
if (source_size is not None and
_r_long(data[12:16]) != (source_size & 0xFFFFFFFF)):
raise ImportError(f'bytecode is stale for {name!r}', **exc_details)
def _validate_hash_pyc(data, source_hash, name, exc_details):
"""Validate a hash-based pyc by checking the real source hash against the one in
the pyc header.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required.)
*source_hash* is the importlib.util.source_hash() of the source file.
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
An ImportError is raised if the bytecode is stale.
"""
if data[8:16] != source_hash:
raise ImportError(
f'hash in bytecode doesn\'t match hash of source {name!r}',
**exc_details,
)
def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None):
"""Compile bytecode as found in a pyc."""
code = marshal.loads(data)
if isinstance(code, _code_type):
_bootstrap._verbose_message('code object from {!r}', bytecode_path)
if source_path is not None:
_imp._fix_co_filename(code, source_path)
return code
else:
raise ImportError('Non-code object in {!r}'.format(bytecode_path),
name=name, path=bytecode_path)
def _code_to_timestamp_pyc(code, mtime=0, source_size=0):
"Produce the data for a timestamp-based pyc."
data = bytearray(MAGIC_NUMBER)
data.extend(_w_long(0))
data.extend(_w_long(mtime))
data.extend(_w_long(source_size))
data.extend(marshal.dumps(code))
return data
def _code_to_hash_pyc(code, source_hash, checked=True):
"Produce the data for a hash-based pyc."
data = bytearray(MAGIC_NUMBER)
flags = 0b1 | checked << 1
data.extend(_w_long(flags))
assert len(source_hash) == 8
data.extend(source_hash)
data.extend(marshal.dumps(code))
return data
def decode_source(source_bytes):
"""Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
import tokenize # To avoid bootstrap issues.
source_bytes_readline = _io.BytesIO(source_bytes).readline
encoding = tokenize.detect_encoding(source_bytes_readline)
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
return newline_decoder.decode(source_bytes.decode(encoding[0]))
# Module specifications #######################################################
_POPULATE = object()
def spec_from_file_location(name, location=None, *, loader=None,
submodule_search_locations=_POPULATE):
"""Return a module spec based on a file location.
To indicate that the module is a package, set
submodule_search_locations to a list of directory paths. An
empty list is sufficient, though its not otherwise useful to the
import system.
The loader must take a spec as its only __init__() arg.
"""
if location is None:
# The caller may simply want a partially populated location-
# oriented spec. So we set the location to a bogus value and
# fill in as much as we can.
location = '<unknown>'
if hasattr(loader, 'get_filename'):
# ExecutionLoader
try:
location = loader.get_filename(name)
except ImportError:
pass
else:
location = _os.fspath(location)
# If the location is on the filesystem, but doesn't actually exist,
# we could return None here, indicating that the location is not
# valid. However, we don't have a good way of testing since an
# indirect location (e.g. a zip file or URL) will look like a
# non-existent file relative to the filesystem.
spec = _bootstrap.ModuleSpec(name, loader, origin=location)
spec._set_fileattr = True
# Pick a loader if one wasn't provided.
if loader is None:
for loader_class, suffixes in _get_supported_file_loaders():
if location.endswith(tuple(suffixes)):
loader = loader_class(name, location)
spec.loader = loader
break
else:
return None
# Set submodule_search_paths appropriately.
if submodule_search_locations is _POPULATE:
# Check the loader.
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
pass
else:
if is_package:
spec.submodule_search_locations = []
else:
spec.submodule_search_locations = submodule_search_locations
if spec.submodule_search_locations == []:
if location:
dirname = _path_split(location)[0]
spec.submodule_search_locations.append(dirname)
return spec
# Loaders #####################################################################
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry."""
REGISTRY_KEY = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}')
REGISTRY_KEY_DEBUG = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}\\Debug')
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except OSError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version='%d.%d' % sys.version_info[:2])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, '')
except OSError:
return None
return filepath
@classmethod
def find_spec(cls, fullname, path=None, target=None):
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_path_stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
spec = _bootstrap.spec_from_loader(fullname,
loader(fullname, filepath),
origin=filepath)
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry.
This method is deprecated. Use exec_module() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is not None:
return spec.loader
else:
return None
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def create_module(self, spec):
"""Use default semantics for module creation."""
def exec_module(self, module):
"""Execute the module."""
code = self.get_code(module.__name__)
if code is None:
raise ImportError('cannot load module {!r} when get_code() '
'returns None'.format(module.__name__))
_bootstrap._call_with_frames_removed(exec, code, module.__dict__)
def load_module(self, fullname):
"""This module is deprecated."""
return _bootstrap._load_module_shim(self, fullname)
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path, where path is a str.
Raises OSError when the path cannot be handled.
"""
raise OSError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified path
to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
Raises OSError when the path cannot be handled.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except OSError as exc:
raise ImportError('source not available through get_data()',
name=fullname) from exc
return decode_source(source_bytes)
def source_to_code(self, data, path, *, _optimize=-1):
"""Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
return _bootstrap._call_with_frames_removed(compile, data, path, 'exec',
dont_inherit=True, optimize=_optimize)
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
source_bytes = None
source_hash = None
hash_based = False
check_source = True
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except OSError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except OSError:
pass
else:
exc_details = {
'name': fullname,
'path': bytecode_path,
}
try:
flags = _classify_pyc(data, fullname, exc_details)
bytes_data = memoryview(data)[16:]
hash_based = flags & 0b1 != 0
if hash_based:
check_source = flags & 0b10 != 0
if (_imp.check_hash_based_pycs != 'never' and
(check_source or
_imp.check_hash_based_pycs == 'always')):
source_bytes = self.get_data(source_path)
source_hash = _imp.source_hash(
_RAW_MAGIC_NUMBER,
source_bytes,
)
_validate_hash_pyc(data, source_hash, fullname,
exc_details)
else:
_validate_timestamp_pyc(
data,
source_mtime,
st['size'],
fullname,
exc_details,
)
except (ImportError, EOFError):
pass
else:
_bootstrap._verbose_message('{} matches {}', bytecode_path,
source_path)
return _compile_bytecode(bytes_data, name=fullname,
bytecode_path=bytecode_path,
source_path=source_path)
if source_bytes is None:
source_bytes = self.get_data(source_path)
code_object = self.source_to_code(source_bytes, source_path)
_bootstrap._verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
if hash_based:
if source_hash is None:
source_hash = _imp.source_hash(source_bytes)
data = _code_to_hash_pyc(code_object, source_hash, check_source)
else:
data = _code_to_timestamp_pyc(code_object, source_mtime,
len(source_bytes))
try:
self._cache_bytecode(source_path, bytecode_path, data)
_bootstrap._verbose_message('wrote {!r}', bytecode_path)
except NotImplementedError:
pass
return code_object
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
# The only reason for this method is for the name check.
# Issue #14857: Avoid the zero-argument form of super so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
# ResourceReader ABC API.
@_check_name
def get_resource_reader(self, module):
if self.is_package(module):
return self
return None
def open_resource(self, resource):
path = _path_join(_path_split(self.path)[0], resource)
return _io.FileIO(path, 'r')
def resource_path(self, resource):
if not self.is_resource(resource):
raise FileNotFoundError
path = _path_join(_path_split(self.path)[0], resource)
return path
def is_resource(self, name):
if path_sep in name:
return False
path = _path_join(_path_split(self.path)[0], name)
return _path_isfile(path)
def contents(self):
return iter(_os.listdir(_path_split(self.path)[0]))
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _path_stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
mode = _calc_mode(source_path)
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_bootstrap._verbose_message('could not create {!r}: {!r}',
parent, exc)
return
try:
_write_atomic(path, data, _mode)
_bootstrap._verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_bootstrap._verbose_message('could not create {!r}: {!r}', path,
exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
# Call _classify_pyc to do basic validation of the pyc but ignore the
# result. There's no source to check against.
exc_details = {
'name': fullname,
'path': path,
}
_classify_pyc(data, fullname, exc_details)
return _compile_bytecode(
memoryview(data)[16:],
name=fullname,
bytecode_path=path,
)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader(FileLoader, _LoaderBasics):
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
def create_module(self, spec):
"""Create an unitialized extension module"""
module = _bootstrap._call_with_frames_removed(
_imp.create_dynamic, spec)
_bootstrap._verbose_message('extension module {!r} loaded from {!r}',
spec.name, self.path)
return module
def exec_module(self, module):
"""Initialize an extension module"""
_bootstrap._call_with_frames_removed(_imp.exec_dynamic, module)
_bootstrap._verbose_message('extension module {!r} executed from {!r}',
self.name, self.path)
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
spec = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if spec is not None and spec.loader is None:
if spec.submodule_search_locations:
self._path = spec.submodule_search_locations
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __setitem__(self, index, path):
self._path[index] = path
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return '_NamespacePath({!r})'.format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
# We use this exclusively in module_from_spec() for backward-compatibility.
class _NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (namespace)>'.format(module.__name__)
def is_package(self, fullname):
return True
def get_source(self, fullname):
return ''
def get_code(self, fullname):
return compile('', '<string>', 'exec', dont_inherit=True)
def create_module(self, spec):
"""Use default semantics for module creation."""
def exec_module(self, module):
pass
def load_module(self, fullname):
"""Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
# The import system never calls this method.
_bootstrap._verbose_message('namespace module loaded with path {!r}',
self._path)
return _bootstrap._load_module_shim(self, fullname)
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for name, finder in list(sys.path_importer_cache.items()):
if finder is None:
del sys.path_importer_cache[name]
elif hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sys.path_hooks for a finder for 'path'."""
if sys.path_hooks is not None and not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
try:
path = _os.getcwd()
except FileNotFoundError:
# Don't cache the failure as the cwd can easily change to
# a valid directory later on.
return None
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _legacy_get_spec(cls, fullname, finder):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
return _bootstrap.spec_from_loader(fullname, loader)
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = portions
return spec
@classmethod
def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec
@classmethod
def find_spec(cls, fullname, path=None, target=None):
"""Try to find a spec for 'fullname' on sys.path or 'path'.
The search is based on sys.path_hooks and sys.path_importer_cache.
"""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a spec which
# can create the namespace package.
spec.origin = None
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
return spec.loader
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or []
def _get_spec(self, loader_class, fullname, path, smsl, target):
loader = loader_class(fullname, path)
return spec_from_file_location(fullname, path, loader=loader,
submodule_search_locations=smsl)
def find_spec(self, fullname, target=None):
"""Try to find a spec for the specified module.
Returns the matching spec, or None if not found.
"""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _path_stat(self.path or _os.getcwd()).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, [base_path], target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = _path_isdir(base_path)
# Check for a file w/ a proper suffix exists.
for suffix, loader_class in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_bootstrap._verbose_message('trying {}', full_path, verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path,
None, target)
if is_namespace:
_bootstrap._verbose_message('possible namespace for {}', base_path)
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path or _os.getcwd())
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = {fn.lower() for fn in contents}
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return 'FileFinder({!r})'.format(self.path)
# Import setup ###############################################################
def _fix_up_module(ns, name, pathname, cpathname=None):
# This function is used by PyImport_ExecCodeModuleObject().
loader = ns.get('__loader__')
spec = ns.get('__spec__')
if not loader:
if spec:
loader = spec.loader
elif pathname == cpathname:
loader = SourcelessFileLoader(name, pathname)
else:
loader = SourceFileLoader(name, pathname)
if not spec:
spec = spec_from_file_location(name, pathname, loader=loader)
try:
ns['__spec__'] = spec
ns['__loader__'] = loader
ns['__file__'] = pathname
ns['__cached__'] = cpathname
except Exception:
# Not important enough to report.
pass
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def _setup(_bootstrap_module):
"""Setup the path-based importers for importlib by importing needed
built-in modules and injecting them into the global namespace.
Other components are extracted from the core bootstrap module.
"""
global sys, _imp, _bootstrap
_bootstrap = _bootstrap_module
sys = _bootstrap.sys
_imp = _bootstrap._imp
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'):
if builtin_name not in sys.modules:
builtin_module = _bootstrap._builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
# Directly load the os module (needed during bootstrap).
os_details = ('posix', ['/']), ('nt', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = _bootstrap._builtin_from_name(builtin_os)
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
setattr(self_module, '_os', os_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', ''.join(path_separators))
# Directly load the _thread module (needed during bootstrap).
thread_module = _bootstrap._builtin_from_name('_thread')
setattr(self_module, '_thread', thread_module)
# Directly load the _weakref module (needed during bootstrap).
weakref_module = _bootstrap._builtin_from_name('_weakref')
setattr(self_module, '_weakref', weakref_module)
# Directly load the winreg module (needed during bootstrap).
if builtin_os == 'nt':
winreg_module = _bootstrap._builtin_from_name('winreg')
setattr(self_module, '_winreg', winreg_module)
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
def _install(_bootstrap_module):
"""Install the path-based import components."""
_setup(_bootstrap_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(PathFinder)
|
lukassup/route-ctl | refs/heads/master | tests/test_cli.py | 1 | # -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
print_function,
unicode_literals,
with_statement,
)
import contextlib
import os
import shlex
import sys
import unittest
from route_ctl import cli
try:
from io import StringIO
except ImportError:
# NOTE: PY2 compat
from cStringIO import StringIO
try:
unicode, basestring
except NameError:
# NOTE: PY2 compat
unicode = basestring = str
@contextlib.contextmanager
def suppress_output():
"""Pipe stdout and stderr to /dev/null."""
with open(os.devnull, 'w') as stdout:
with open(os.devnull, 'w') as stderr:
_stdout, sys.stdout = sys.stdout, stdout
_stderr, sys.stderr = sys.stderr, stderr
yield stdout, stderr
sys.stdout = _stdout
sys.stderr = _stderr
@contextlib.contextmanager
def capture_output():
"""Pipe stdout and stderr to ``StringIO`` objects."""
with StringIO() as stdout:
with StringIO() as stderr:
_stdout, sys.stdout = sys.stdout, stdout
_stderr, sys.stderr = sys.stderr, stderr
yield stdout, stderr
sys.stdout = _stdout
sys.stderr = _stderr
class TestCLI(unittest.TestCase):
"""Perform CLI tests."""
def test_displays_usage(self):
"""CLI should display usage without any arguments."""
command = ''
with suppress_output():
# check if exits
self.assertRaises(SystemExit,
cli.parser.parse_args,
shlex.split(command))
# check exit status
try:
cli.parser.parse_args(shlex.split(command))
except SystemExit as err:
self.assertEqual(err.code, 2)
# NOTE: this always fails on Python 2 because argparse prints
# help in str not unicode.
# self.assertRegexpMatches(
# stderr.getvalue(),
# r'^usage:',
# )
def test_displays_help_with_subcommand(self):
"""CLI should display help with the `help` subcommand."""
if sys.version_info < (2, 7):
return
command = 'help'
args = cli.parser.parse_args(shlex.split(command))
with capture_output() as (stdout, stderr):
# check if exits
self.assertRaises(SystemExit, args.action, vars(args))
# check output
self.assertRegexpMatches(stdout.getvalue(), r'^usage:')
# check exit status
try:
args.action(**vars(args))
except SystemExit as err:
self.assertEqual(err.code, 0)
def test_displays_help_with_option(self):
"""CLI should display help with with `-h` option for all subcommands."""
if sys.version_info < (2, 7):
return
commands = [
'-h',
'list -h',
'find -h',
'batch-insert -h',
'batch-replace -h',
'create -h',
]
for command in commands:
with capture_output() as (stdout, stderr):
# check if exits
self.assertRaises(SystemExit,
cli.parser.parse_args,
shlex.split(command))
# check output
self.assertRegexpMatches(stdout.getvalue(), r'^usage:')
# check exit code
try:
cli.parser.parse_args(shlex.split(command))
except SystemExit as err:
self.assertEqual(err.code, 0)
|
ivandavid77/learnetic_monitoreo_mcourserpro | refs/heads/master | apps/learnetic_monitoreo_mcourserpro/priv/utils/obtener_datos_dgespe_accesos.py | 1 | # -*- coding: utf-8 -*-
import sys
import csv
import datetime
import yaml
import pymysql.cursors
def get_config():
with open('../config/database.yaml') as f:
return yaml.load(f.read())
def get_connection(config):
db = config['db']
return pymysql.connect(db=db['name'],
user=db['user'],
password=db['password'],
host=db['host'],
charset='utf8mb4')
#cursorclass=pymysql.cursors.DictCursor)
with open('dgespe_accesos.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(('username', 'unidad', 'tipo_dispositivo','sistema_operativo','modo','plataforma','horas_invertidas'))
config = get_config()
conn = get_connection(config)
with conn.cursor() as cursor:
cursor.execute((
'SELECT '
'username,'
'unidad,'
'tipo_dispositivo,'
'sistema_operativo,'
'modo,'
'plataforma,'
'horas_invertidas '
'FROM dgespe_accesos'
))
writer.writerows(cursor.fetchall())
|
40223205/w16b_test- | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/test/__init__.py | 2547 | # Dummy file to make this directory a package.
|
SpectraLogic/samba | refs/heads/master | buildtools/wafsamba/hpuxcc.py | 67 | # compiler definition for HPUX
# based on suncc.py from waf
import os, optparse, sys
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
import gcc
@conftest
def gcc_modifier_hpux(conf):
v=conf.env
v['CCFLAGS_DEBUG']=['-g']
v['CCFLAGS_RELEASE']=['-O2']
v['CC_SRC_F']=''
v['CC_TGT_F']=['-c','-o','']
v['CPPPATH_ST']='-I%s'
if not v['LINK_CC']:v['LINK_CC']=v['CC']
v['CCLNK_SRC_F']=''
v['CCLNK_TGT_F']=['-o','']
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STATICLIB_ST']='-l%s'
v['STATICLIBPATH_ST']='-L%s'
v['RPATH_ST']='-Wl,-rpath,%s'
v['CCDEFINES_ST']='-D%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']=[]
# v['STATICLIB_MARKER']='-Wl,-Bstatic'
v['FULLSTATIC_MARKER']='-static'
v['program_PATTERN']='%s'
v['shlib_CCFLAGS']=['-fPIC','-DPIC']
v['shlib_LINKFLAGS']=['-shared']
v['shlib_PATTERN']='lib%s.sl'
# v['staticlib_LINKFLAGS']=['-Wl,-Bstatic']
v['staticlib_PATTERN']='lib%s.a'
gcc.gcc_modifier_hpux = gcc_modifier_hpux
from TaskGen import feature, after
@feature('cprogram', 'cshlib')
@after('apply_link', 'apply_lib_vars', 'apply_obj_vars')
def hpux_addfullpath(self):
if sys.platform == 'hp-ux11':
link = getattr(self, 'link_task', None)
if link:
lst = link.env.LINKFLAGS
buf = []
for x in lst:
if x.startswith('-L'):
p2 = x[2:]
if not os.path.isabs(p2):
x = x[:2] + self.bld.srcnode.abspath(link.env) + "/../" + x[2:].lstrip('.')
buf.append(x)
link.env.LINKFLAGS = buf
|
IV-GII/Ingenia | refs/heads/master | Versiones antiguas/proyecto_con_template/pedidos/models.py | 2 | from django.db import models
# Create your models here.
class Usuarios (models.Model):
nombre = models.CharField (max_length=100)
correo_electronico = models.CharField (max_length=300)
password = models.CharField (max_length=30)
rol = models.CharField (max_length=30)
def __unicode__(self):
return self.nombre
class Pedidos (models.Model):
usuario = models.ForeignKey (Usuarios)
num_pedido = models.CharField (max_length=15)
concepto = models.CharField(max_length=200)
estado = models.CharField (max_length=20)
telefono_tecnico = models.CharField (max_length=12, blank=True)
#fecha_instalacion = models.DateTimeField (null=True, blank=True)
forma_de_recepcion = models.CharField (max_length=30, blank=True)
def __unicode__(self):
return self.num_pedido
|
DanteOnline/free-art | refs/heads/master | venv/lib/python3.4/site-packages/django/contrib/messages/api.py | 512 | from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
from django.http import HttpRequest
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
'MessageFailure',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app.
"""
if not isinstance(request, HttpRequest):
raise TypeError("add_message() argument must be an HttpRequest object, "
"not '%s'." % request.__class__.__name__)
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if not fail_silently:
raise MessageFailure('You cannot add messages without installing '
'django.contrib.messages.middleware.MessageMiddleware')
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
an empty list.
"""
if hasattr(request, '_messages'):
return request._messages
else:
return []
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
if hasattr(request, '_messages'):
storage = request._messages
else:
storage = default_storage(request)
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
codermoji-contrib/python | refs/heads/master | start/Intro to Dicts/001/mkfs.py | 18 | # Things that are available:
# U
# fs
|
tectronics/photivo | refs/heads/master | scons-local-2.2.0/SCons/Tool/midl.py | 14 | """SCons.Tool.midl
Tool-specific initialization for midl (Microsoft IDL compiler).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/midl.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner.IDL
import SCons.Util
from MSCommon import msvc_exists
def midl_emitter(target, source, env):
"""Produces a list of outputs from the MIDL compiler"""
base, ext = SCons.Util.splitext(str(target[0]))
tlb = target[0]
incl = base + '.h'
interface = base + '_i.c'
t = [tlb, incl, interface]
midlcom = env['MIDLCOM']
if midlcom.find('/proxy') != -1:
proxy = base + '_p.c'
t.append(proxy)
if midlcom.find('/dlldata') != -1:
dlldata = base + '_data.c'
t.append(dlldata)
return (t,source)
idl_scanner = SCons.Scanner.IDL.IDLScan()
midl_action = SCons.Action.Action('$MIDLCOM', '$MIDLCOMSTR')
midl_builder = SCons.Builder.Builder(action = midl_action,
src_suffix = '.idl',
suffix='.tlb',
emitter = midl_emitter,
source_scanner = idl_scanner)
def generate(env):
"""Add Builders and construction variables for midl to an Environment."""
env['MIDL'] = 'MIDL.EXE'
env['MIDLFLAGS'] = SCons.Util.CLVar('/nologo')
env['MIDLCOM'] = '$MIDL $MIDLFLAGS /tlb ${TARGETS[0]} /h ${TARGETS[1]} /iid ${TARGETS[2]} /proxy ${TARGETS[3]} /dlldata ${TARGETS[4]} $SOURCE 2> NUL'
env['BUILDERS']['TypeLibrary'] = midl_builder
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
rvalyi/geraldo | refs/heads/master | site/newsite/django_1_0/tests/regressiontests/requests/__init__.py | 272 | """
Tests for Django's various Request objects.
"""
|
bhansa/fireball | refs/heads/master | pyvenv/Lib/site-packages/pygame/tests/test_utils/endian.py | 21 | # Module pygame.tests.test_utils.endian
#
# Machine independent conversion to little-endian and big-endian Python
# integer values.
import struct
def little_endian_uint32(i):
"""Return the 32 bit unsigned integer little-endian representation of i"""
s = struct.pack('<I', i)
return struct.unpack('=I', s)[0]
def big_endian_uint32(i):
"""Return the 32 bit unsigned integer big-endian representation of i"""
s = struct.pack('>I', i)
return struct.unpack('=I', s)[0]
|
Tjorriemorrie/trading | refs/heads/master | 09_scalping/s-takeprofit.py | 1 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import linear_model
from features import calculateTargets
from simulate import backtest
from pprint import pprint
currency = 'EURUSD'
interval = '60'
factor = 10000
df = pd.read_csv(
r'../data/' + currency.upper() + interval + '.csv',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float', 'volume': 'int'},
#parse_dates=[[0, 1]],
# index_col=0,
)
# df = df.iloc[-14400:].reset_index()
print df.tail()
print 'calculating targets...'
calculateTargets(df)
#bullMean = df['targetBull'].mean()
#bullStd = df['targetBull'].std()
#print 'high mean std', bullMean, bullStd
bearMean = df['targetBear'].mean()
bearStd = df['targetBear'].std()
print 'bear mean std', bearMean, bearStd
print 'backtesting...'
takeProfits = [tp + 0. for tp in range(400, 601, 10)]
stopLoss = 540.
entry = 20.
waitFor = 95.
exitAt = 530.
totalNpt = []
totalRat = []
for takeProfit in takeProfits:
print '\ntakeProfit', takeProfit
wins, losses = backtest(df, takeProfit, stopLoss, entry, waitFor, exitAt, factor)
profit = sum([w['outcome'] for w in wins])
print 'wins', len(wins), round(profit, 4)
loss = sum([l['outcome'] for l in losses])
print 'loss', len(losses), round(loss, 4)
net = profit - loss
npt = int((net / len(wins + losses)) * factor)
ratio = len(wins) / (len(wins) + len(losses) + 0.)
print 'net', round(net, 4), 'npt', npt, '%', int(ratio * 100)
totalNpt.append(npt)
totalRat.append(ratio)
print '\n'
#pprint(totalNpt)
N = len(totalNpt)
#totalNpt = (20, 35, 30, 35, 27)
#menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, totalNpt, width, color='r')
#womenMeans = (25, 32, 34, 20, 25)
#womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, totalRat, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Pips')
ax.set_title('Results')
ax.set_xticks(ind + width)
ax.set_xticklabels(map(int, takeProfits))
#ax.legend(
# (rects1[0]),
# ('Npt',),
#)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
#autolabel(rects1)
#autolabel(rects2)
plt.show()
|
vizual54/MissionPlanner | refs/heads/master | Lib/distutils/command/build_py.py | 54 | """distutils.command.build_py
Implements the Distutils 'build_py' command."""
__revision__ = "$Id$"
import os
import sys
from glob import glob
from distutils.core import Command
from distutils.errors import DistutilsOptionError, DistutilsFileError
from distutils.util import convert_path
from distutils import log
class build_py(Command):
description = "\"build\" pure Python modules (copy to build directory)"
user_options = [
('build-lib=', 'd', "directory to \"build\" (copy) to"),
('compile', 'c', "compile .py to .pyc"),
('no-compile', None, "don't compile .py files [default]"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['compile', 'force']
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
self.build_lib = None
self.py_modules = None
self.package = None
self.package_data = None
self.package_dir = None
self.compile = 0
self.optimize = 0
self.force = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('force', 'force'))
# Get the distribution options that are aliases for build_py
# options -- list of packages and list of modules.
self.packages = self.distribution.packages
self.py_modules = self.distribution.py_modules
self.package_data = self.distribution.package_data
self.package_dir = {}
if self.distribution.package_dir:
for name, path in self.distribution.package_dir.items():
self.package_dir[name] = convert_path(path)
self.data_files = self.get_data_files()
# Ick, copied straight from install_lib.py (fancy_getopt needs a
# type system! Hell, *everything* needs a type system!!!)
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self):
# XXX copy_file by default preserves atime and mtime. IMHO this is
# the right thing to do, but perhaps it should be an option -- in
# particular, a site administrator might want installed files to
# reflect the time of installation rather than the last
# modification time before the installed release.
# XXX copy_file by default preserves mode, which appears to be the
# wrong thing to do: if a file is read-only in the working
# directory, we want it to be installed read/write so that the next
# installation of the same module distribution can overwrite it
# without problems. (This might be a Unix-specific issue.) Thus
# we turn off 'preserve_mode' when copying to the build directory,
# since the build directory is supposed to be exactly what the
# installation will look like (ie. we preserve mode when
# installing).
# Two options control which modules will be installed: 'packages'
# and 'py_modules'. The former lets us work with whole packages, not
# specifying individual modules at all; the latter is for
# specifying modules one-at-a-time.
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.byte_compile(self.get_outputs(include_bytecode=0))
def get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
data = []
if not self.packages:
return data
for package in self.packages:
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = 0
if src_dir:
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = []
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
filelist = glob(os.path.join(src_dir, convert_path(pattern)))
# Files that match more than one pattern are only added once
files.extend([fn for fn in filelist if fn not in files])
return files
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target,
preserve_mode=False)
def get_package_dir(self, package):
"""Return the directory, relative to the top of the source
distribution, where package 'package' should be found
(at least according to the 'package_dir' option, if any)."""
path = package.split('.')
if not self.package_dir:
if path:
return os.path.join(*path)
else:
return ''
else:
tail = []
while path:
try:
pdir = self.package_dir['.'.join(path)]
except KeyError:
tail.insert(0, path[-1])
del path[-1]
else:
tail.insert(0, pdir)
return os.path.join(*tail)
else:
# Oops, got all the way through 'path' without finding a
# match in package_dir. If package_dir defines a directory
# for the root (nameless) package, then fallback on it;
# otherwise, we might as well have not consulted
# package_dir at all, as we just use the directory implied
# by 'tail' (which should be the same as the original value
# of 'path' at this point).
pdir = self.package_dir.get('')
if pdir is not None:
tail.insert(0, pdir)
if tail:
return os.path.join(*tail)
else:
return ''
def check_package(self, package, package_dir):
# Empty dir name means current directory, which we can probably
# assume exists. Also, os.path.exists and isdir don't know about
# my "empty string means current dir" convention, so we have to
# circumvent them.
if package_dir != "":
if not os.path.exists(package_dir):
raise DistutilsFileError(
"package directory '%s' does not exist" % package_dir)
if not os.path.isdir(package_dir):
raise DistutilsFileError(
"supposed package directory '%s' exists, "
"but is not a directory" % package_dir)
# Require __init__.py for all but the "root package"
if package:
init_py = os.path.join(package_dir, "__init__.py")
if os.path.isfile(init_py):
return init_py
else:
log.warn(("package init file '%s' not found " +
"(or not a regular file)"), init_py)
# Either not in a package at all (__init__.py not expected), or
# __init__.py doesn't exist -- so don't return the filename.
return None
def check_module(self, module, module_file):
if not os.path.isfile(module_file):
log.warn("file %s (for module %s) not found", module_file, module)
return False
else:
return True
def find_package_modules(self, package, package_dir):
self.check_package(package, package_dir)
module_files = glob(os.path.join(package_dir, "*.py"))
modules = []
setup_script = os.path.abspath(self.distribution.script_name)
for f in module_files:
abs_f = os.path.abspath(f)
if abs_f != setup_script:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
else:
self.debug_print("excluding %s" % setup_script)
return modules
def find_modules(self):
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages = {}
# List of (package, module, filename) tuples to return
modules = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split('.')
package = '.'.join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package)
checked = 0
if not checked:
init_py = self.check_package(package, package_dir)
packages[package] = (package_dir, 1)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
if not self.check_module(module, module_file):
continue
modules.append((package, module_base, module_file))
return modules
def find_all_modules(self):
"""Compute the list of all modules that will be built, whether
they are specified one-module-at-a-time ('self.py_modules') or
by whole packages ('self.packages'). Return a list of tuples
(package, module, module_file), just like 'find_modules()' and
'find_package_modules()' do."""
modules = []
if self.py_modules:
modules.extend(self.find_modules())
if self.packages:
for package in self.packages:
package_dir = self.get_package_dir(package)
m = self.find_package_modules(package, package_dir)
modules.extend(m)
return modules
def get_source_files(self):
return [module[-1] for module in self.find_all_modules()]
def get_module_outfile(self, build_dir, package, module):
outfile_path = [build_dir] + list(package) + [module + ".py"]
return os.path.join(*outfile_path)
def get_outputs(self, include_bytecode=1):
modules = self.find_all_modules()
outputs = []
for (package, module, module_file) in modules:
package = package.split('.')
filename = self.get_module_outfile(self.build_lib, package, module)
outputs.append(filename)
if include_bytecode:
if self.compile:
outputs.append(filename + "c")
if self.optimize > 0:
outputs.append(filename + "o")
outputs += [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
return outputs
def build_module(self, module, module_file, package):
if isinstance(package, str):
package = package.split('.')
elif not isinstance(package, (list, tuple)):
raise TypeError(
"'package' must be a string (dot-separated), list, or tuple")
# Now put the module source file into the "build" area -- this is
# easy, we just copy it somewhere under self.build_lib (the build
# directory for Python source).
outfile = self.get_module_outfile(self.build_lib, package, module)
dir = os.path.dirname(outfile)
self.mkpath(dir)
return self.copy_file(module_file, outfile, preserve_mode=0)
def build_modules(self):
modules = self.find_modules()
for (package, module, module_file) in modules:
# Now "build" the module -- ie. copy the source file to
# self.build_lib (the build directory for Python source).
# (Actually, it gets copied to the directory for this package
# under self.build_lib.)
self.build_module(module, module_file, package)
def build_packages(self):
for package in self.packages:
# Get list of (package, module, module_file) tuples based on
# scanning the package directory. 'package' is only included
# in the tuple so that 'find_modules()' and
# 'find_package_tuples()' have a consistent interface; it's
# ignored here (apart from a sanity check). Also, 'module' is
# the *unqualified* module name (ie. no dots, no package -- we
# already know its package!), and 'module_file' is the path to
# the .py file, relative to the current directory
# (ie. including 'package_dir').
package_dir = self.get_package_dir(package)
modules = self.find_package_modules(package, package_dir)
# Now loop over the modules we found, "building" each one (just
# copy it to self.build_lib).
for (package_, module, module_file) in modules:
assert package == package_
self.build_module(module, module_file, package)
def byte_compile(self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
prefix = self.build_lib
if prefix[-1] != os.sep:
prefix = prefix + os.sep
# XXX this code is essentially the same as the 'byte_compile()
# method of the "install_lib" command, except for the determination
# of the 'prefix' string. Hmmm.
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=prefix, dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=prefix, dry_run=self.dry_run)
|
Asnelchristian/coala | refs/heads/master | coalib/output/Interactions.py | 6 | def fail_acquire_settings(log_printer, settings_names_dict, section):
"""
This method throws an exception if any setting needs to be acquired.
:param log_printer: Printer responsible for logging the messages.
:param settings: A dictionary with the settings name as key and
a list containing a description in [0] and the
name of the bears who need this setting in [1]
and following.
:raises AssertionError: If any setting is required.
:raises TypeError: If ``settings_names_dict`` is not a dictionary.
"""
if not isinstance(settings_names_dict, dict):
raise TypeError('The settings_names_dict parameter has to be a '
'dictionary.')
required_settings = settings_names_dict.keys()
if len(required_settings) != 0:
msg = ('During execution, we found that some required '
'settings were not provided. They are:\n')
for name, setting in settings_names_dict.items():
msg += '{} (from {}) - {}'.format(name, setting[1], setting[0])
log_printer.err(msg)
raise AssertionError(msg)
|
katstalk/android_external_chromium_org | refs/heads/kk44 | chrome/browser/extensions/PRESUBMIT_test.py | 58 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import difflib
import os
import re
import unittest
import PRESUBMIT
class MockLogging(object):
def __init__(self):
self.lines = []
def info(self, message):
self.lines.append(message)
def debug(self, message):
self.lines.append(message)
class MockInputApi(object):
def __init__(self):
self.re = re
self.os_path = os.path
self.files = []
self.is_committing = False
self.logging = MockLogging()
def AffectedFiles(self, include_deletes=None):
return self.files
class MockOutputApi(object):
class PresubmitResult(object):
def __init__(self, message, items=None, long_text=''):
self.message = message
self.items = items
self.long_text = long_text
class PresubmitError(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'error'
class PresubmitPromptWarning(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'warning'
class PresubmitNotifyResult(PresubmitResult):
def __init__(self, message, items, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'notify'
class MockFile(object):
def __init__(self, local_path, old_contents, new_contents):
self._local_path = local_path
self._new_contents = new_contents
self._old_contents = old_contents
self._cached_changed_contents = None
def ChangedContents(self):
return self._changed_contents
def NewContents(self):
return self._new_contents
def LocalPath(self):
return self._local_path
def IsDirectory(self):
return False
def GenerateScmDiff(self):
result = ""
for line in difflib.unified_diff(self._old_contents, self._new_contents,
self._local_path, self._local_path):
result += line
return result
# NOTE: This method is a copy of ChangeContents method of AffectedFile in
# presubmit_support.py
def ChangedContents(self):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
if self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
self._cached_changed_contents = []
line_num = 0
if self.IsDirectory():
return []
for line in self.GenerateScmDiff().splitlines():
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
self._cached_changed_contents.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
return self._cached_changed_contents[:]
class MockChange(object):
def __init__(self, changed_files):
self._changed_files = changed_files
def LocalPaths(self):
return self._changed_files
class HistogramValueCheckerTest(unittest.TestCase):
TEST_FILE_PATTERN = "PRESUBMIT_test_new_file_%s.txt"
def _ReadTextFileContents(self, path):
"""Given a path, returns a list of strings corresponding to the text lines
in the file. Reads files in text format.
"""
fo = open(path, 'r')
try:
contents = fo.readlines()
finally:
fo.close()
return contents
def _ReadInputFile(self):
return self._ReadTextFileContents("PRESUBMIT_test_old_file.txt")
def _PrepareTest(self, new_file_path):
old_contents = self._ReadInputFile()
if not new_file_path:
new_contents = []
else:
new_contents = self._ReadTextFileContents(new_file_path)
input_api = MockInputApi()
mock_file = MockFile(PRESUBMIT.HistogramValueChecker.LOCAL_PATH,
old_contents,
new_contents)
input_api.files.append(mock_file)
output_api = MockOutputApi()
return input_api, output_api
def _RunTest(self, new_file_path):
input_api, output_api = self._PrepareTest(new_file_path)
checker = PRESUBMIT.HistogramValueChecker(input_api, output_api)
results = checker.Run()
return results
def testDeleteFile(self):
results = self._RunTest(new_file_path=None)
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(1, len(results),
"We hould get a single warning about file deletion.")
def testSimpleValidEdit(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "1")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(0, len(results),
"We should get no warning for simple edits.")
def testSingleDeletionOfEntry(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "2")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(1, len(results),
"We should get a warning for an entry deletion.")
def testSingleRenameOfEntry(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "3")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(1, len(results),
"We should get a warning for an entry rename, even "
"though it is not optimal.")
def testMissingEnumStartOfEntry(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "4")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(1, len(results),
"We should get a warning for a missing enum marker.")
def testMissingEnumEndOfEntry(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "5")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(1, len(results),
"We should get a warning for a missing enum marker.")
def testInvertedEnumMarkersOfEntry(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "6")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(1, len(results),
"We should get a warning for inverted enum markers.")
def testMultipleInvalidEdits(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "7")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(3, len(results),
"We should get 3 warnings (one per edit).")
def testSingleInvalidInserts(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "8")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(1, len(results),
"We should get a warning for a single invalid "
"insertion inside the enum.")
def testMulitpleValidInserts(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "9")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(0, len(results),
"We should not get a warning mulitple valid edits")
def testSingleValidDeleteOutsideOfEnum(self):
results = self._RunTest(self.TEST_FILE_PATTERN % "10")
# TODO(rpaquay) How to check it's the expected warning?'
self.assertEquals(0, len(results),
"We should not get a warning for a deletion outside of "
"the enum")
if __name__ == '__main__':
unittest.main()
|
jjneely/current | refs/heads/master | cwebapp/cwebapp/controllers.py | 1 | import xmlrpclib
import turbogears
import auth
import cherrypy.config
from turbogears import controllers
class SubDir(object):
def __init__(self, api):
self._api = api
class Policy(SubDir):
@turbogears.expose(html="cwebapp.templates.policy")
@auth.needsLogin
def index(self, userInfo):
myOU = self._api.policy.myOU(userInfo['session'])
tree = self._api.policy.showTree(userInfo['session'])
for row in tree:
clients = self._api.policy.countProfilesOfOU(userInfo['session'],
row['ou_id'])
row['num_clients'] = clients
return dict(OU=myOU, tree=tree)
class Systems(SubDir):
@turbogears.expose(html="cwebapp.templates.systems")
@auth.needsLogin
def index(self, userInfo):
systems = self._api.cadmin.findProfile(userInfo['session'])
return dict(systems=systems)
@turbogears.expose(html="cwebapp.templates.systemDetail")
@auth.needsLogin
def details(self, userInfo, profileID):
system = self._api.systems.systemDetail(userInfo['session'],
profileID)
return dict(system=system)
class Channels(SubDir):
@turbogears.expose(html="cwebapp.templates.channels")
@auth.needsLogin
def index(self, userInfo):
channels = self._api.channels.listChannels(userInfo['session'])
return dict(channels=channels)
@turbogears.expose(html="cwebapp.templates.channelDetail")
@auth.needsLogin
def detail(self, userInfo, label):
detail = self._api.channels.getChannelDetail(userInfo['session'],
label)
return dict(channel=detail)
class Root(controllers.Root):
def __init__(self):
controllers.Root.__init__(self)
self._api = xmlrpclib.Server(cherrypy.config.get("current"))
self.systems = Systems(self._api)
self.channels = Channels(self._api)
self.policy = Policy(self._api)
def doLoginCall(self, userid, password):
return self._api.policy.login(userid, password)
@turbogears.expose(html="cwebapp.templates.index")
@auth.needsLogin
def index(self, userInfo):
print userInfo
return dict(systemTotal=self._api.systems.systemCount(
userInfo['session']),
userID=userInfo['userid'])
@turbogears.expose(html="cwebapp.templates.login")
def login(self, redirect="/", message=None):
auth.removeCookie()
return dict(redirect=redirect, message=message)
|
smmribeiro/intellij-community | refs/heads/master | python/testData/psi/FStringBackslashBeforeExpression.py | 15 | s = f'foo{\ 42}bar' |
yaph/z3lstore | refs/heads/master | store_tests.py | 1 | # -*- coding: utf-8 -*-
"""
Store Tests
~~~~~~~~~~~~
Tests the store application.
"""
import os
import store
import unittest
class StoreTestCase(unittest.TestCase):
"""Test cases for the store application."""
def setUp(self):
store.app.config['TESTING'] = True
self.app = store.app.test_client()
def test_search(self):
response = self.app.get('/search/geek')
assert 200 == response.status_code
assert 0 < response.data.find('geek')
def test_tag(self):
response = self.app.get('/tag/geek/')
assert 200 == response.status_code
assert 0 < response.data.find('geek')
def test_unknowntag(self):
response = self.app.get('/tag/unknowntag/')
assert 404 == response.status_code
def test_404(self):
response = self.app.get('/404')
assert 404 == response.status_code
if __name__ == '__main__':
unittest.main()
|
nemesisdesign/django | refs/heads/master | tests/middleware_exceptions/tests.py | 24 | from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import patch_logger
from . import middleware as mw
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class MiddlewareTests(SimpleTestCase):
def tearDown(self):
mw.log = []
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewNoneMiddleware'])
def test_process_view_return_none(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, ['processed view normal_view'])
self.assertEqual(response.content, b'OK')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewMiddleware'])
def test_process_view_return_response(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.LogMiddleware',
])
def test_templateresponse_from_process_view_rendered(self):
"""
TemplateResponses returned from process_view() must be rendered before
being passed to any middleware that tries to access response.content,
such as middleware_exceptions.middleware.LogMiddleware.
"""
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view\nProcessViewTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.TemplateResponseMiddleware',
])
def test_templateresponse_from_process_view_passed_to_process_template_response(self):
"""
TemplateResponses returned from process_view() should be passed to any
template response middleware.
"""
response = self.client.get('/middleware_exceptions/view/')
expected_lines = [
b'Processed view normal_view',
b'ProcessViewTemplateResponseMiddleware',
b'TemplateResponseMiddleware',
]
self.assertEqual(response.content, b'\n'.join(expected_lines))
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.TemplateResponseMiddleware'])
def test_process_template_response(self):
response = self.client.get('/middleware_exceptions/template_response/')
self.assertEqual(response.content, b'template_response OK\nTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.LogMiddleware'])
def test_view_exception_converted_before_middleware(self):
response = self.client.get('/middleware_exceptions/permission_denied/')
self.assertEqual(mw.log, [(response.status_code, response.content)])
self.assertEqual(response.status_code, 403)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_view_exception_handled_by_process_exception(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessExceptionLogMiddleware',
'middleware_exceptions.middleware.ProcessExceptionMiddleware',
])
def test_response_from_process_exception_short_circuits_remainder(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(mw.log, [])
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.LogMiddleware',
'middleware_exceptions.middleware.NotFoundMiddleware',
])
def test_exception_in_middleware_converted_before_prior_middleware(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, [(404, response.content)])
self.assertEqual(response.status_code, 404)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_exception_in_render_passed_to_process_exception(self):
response = self.client.get('/middleware_exceptions/exception_in_render/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class RootUrlconfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
with self.assertRaises(AttributeError):
self.client.get("/middleware_exceptions/view/")
class MyMiddleware(object):
def __init__(self, get_response=None):
raise MiddlewareNotUsed
def process_request(self, request):
pass
class MyMiddlewareWithExceptionMessage(object):
def __init__(self, get_response=None):
raise MiddlewareNotUsed('spam eggs')
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF='middleware_exceptions.urls',
MIDDLEWARE=['django.middleware.common.CommonMiddleware'],
)
class MiddlewareNotUsedTests(SimpleTestCase):
rf = RequestFactory()
def test_raise_exception(self):
request = self.rf.get('middleware_exceptions/view/')
with self.assertRaises(MiddlewareNotUsed):
MyMiddleware().process_request(request)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddleware'])
def test_log(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed: 'middleware_exceptions.tests.MyMiddleware'"
)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'])
def test_log_custom_message(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed('middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'): spam eggs"
)
@override_settings(DEBUG=False)
def test_do_not_log_when_debug_is_false(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 0)
|
bigswitch/horizon | refs/heads/master | openstack_dashboard/dashboards/admin/networks/subnets/tables.py | 61 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.subnets \
import tables as proj_tables
LOG = logging.getLogger(__name__)
class DeleteSubnet(proj_tables.SubnetPolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Subnet",
u"Delete Subnets",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Subnet",
u"Deleted Subnets",
count
)
policy_rules = (("network", "delete_subnet"),)
def delete(self, request, obj_id):
try:
api.neutron.subnet_delete(request, obj_id)
except Exception:
msg = _('Failed to delete subnet %s') % obj_id
LOG.info(msg)
network_id = self.table.kwargs['network_id']
redirect = reverse('horizon:admin:networks:detail',
args=[network_id])
exceptions.handle(request, msg, redirect=redirect)
class CreateSubnet(proj_tables.SubnetPolicyTargetMixin, tables.LinkAction):
name = "create"
verbose_name = _("Create Subnet")
url = "horizon:admin:networks:addsubnet"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_subnet"),)
def get_link_url(self, datum=None):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id,))
class UpdateSubnet(proj_tables.SubnetPolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Subnet")
url = "horizon:admin:networks:editsubnet"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_subnet"),)
def get_link_url(self, subnet):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, subnet.id))
class SubnetsTable(tables.DataTable):
name = tables.Column("name_or_id", verbose_name=_("Name"),
link='horizon:admin:networks:subnets:detail')
cidr = tables.Column("cidr", verbose_name=_("CIDR"))
ip_version = tables.Column("ipver_str", verbose_name=_("IP Version"))
gateway_ip = tables.Column("gateway_ip", verbose_name=_("Gateway IP"))
failure_url = reverse_lazy('horizon:admin:networks:index')
def get_object_display(self, subnet):
return subnet.id
@memoized.memoized_method
def _get_network(self):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg, redirect=self.failure_url)
return network
class Meta(object):
name = "subnets"
verbose_name = _("Subnets")
table_actions = (CreateSubnet, DeleteSubnet)
row_actions = (UpdateSubnet, DeleteSubnet,)
hidden_title = False
|
ndebuhr/thermo-state-solver | refs/heads/master | thermo-env/lib/python3.5/site-packages/pip/_vendor/distlib/scripts.py | 333 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
def _enquote_executable(executable):
if ' ' in executable:
# make sure we quote only the executable in case of env
# for example /usr/bin/env "/dir with spaces/bin/jython"
# instead of "/usr/bin/env /dir with spaces/bin/jython"
# otherwise whole
if executable.startswith('/usr/bin/env '):
env, _executable = executable.split(' ', 1)
if ' ' in _executable and not _executable.startswith('"'):
executable = '%s "%s"' % (env, _executable)
else:
if not executable.startswith('"'):
executable = '"%s"' % executable
return executable
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (
os.name == 'java' and os._name == 'nt')
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv(): # pragma: no cover
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows
executable = os.path.normcase(executable)
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = _enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
|
vipul-sharma20/oh-mainline | refs/heads/master | vendor/packages/django-tastypie/tastypie/admin.py | 57 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib import admin
if 'django.contrib.auth' in settings.INSTALLED_APPS:
from tastypie.models import ApiKey
class ApiKeyInline(admin.StackedInline):
model = ApiKey
extra = 0
ABSTRACT_APIKEY = getattr(settings, 'TASTYPIE_ABSTRACT_APIKEY', False)
if ABSTRACT_APIKEY and not isinstance(ABSTRACT_APIKEY, bool):
raise TypeError("'TASTYPIE_ABSTRACT_APIKEY' must be either 'True' "
"or 'False'.")
if not ABSTRACT_APIKEY:
admin.site.register(ApiKey)
|
petrus-v/odoo | refs/heads/8.0 | addons/base_import/__openerp__.py | 317 | {
'name': 'Base import',
'description': """
New extensible file import for OpenERP
======================================
Re-implement openerp's file import system:
* Server side, the previous system forces most of the logic into the
client which duplicates the effort (between clients), makes the
import system much harder to use without a client (direct RPC or
other forms of automation) and makes knowledge about the
import/export system much harder to gather as it is spread over
3+ different projects.
* In a more extensible manner, so users and partners can build their
own front-end to import from other file formats (e.g. OpenDocument
files) which may be simpler to handle in their work flow or from
their data production sources.
* In a module, so that administrators and users of OpenERP who do not
need or want an online import can avoid it being available to users.
""",
'category': 'Uncategorized',
'website': 'https://www.odoo.com',
'author': 'OpenERP SA',
'depends': ['web'],
'installable': True,
'auto_install': True,
'data': [
'security/ir.model.access.csv',
'views/base_import.xml',
],
'qweb': ['static/src/xml/import.xml'],
}
|
akash1808/nova_test_latest | refs/heads/master | nova/tests/unit/virt/libvirt/volume/test_remotefs.py | 6 | # Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_concurrency import processutils
from nova import test
from nova import utils
from nova.virt.libvirt.volume import remotefs
class RemoteFSTestCase(test.NoDBTestCase):
"""Remote filesystem operations test case."""
@mock.patch.object(utils, 'execute')
def _test_mount_share(self, mock_execute, already_mounted=False):
if already_mounted:
err_msg = 'Device or resource busy'
mock_execute.side_effect = [
None, processutils.ProcessExecutionError(err_msg)]
remotefs.mount_share(
mock.sentinel.mount_path, mock.sentinel.export_path,
mock.sentinel.export_type,
options=[mock.sentinel.mount_options])
mock_execute.assert_any_call('mkdir', '-p',
mock.sentinel.mount_path)
mock_execute.assert_any_call('mount', '-t', mock.sentinel.export_type,
mock.sentinel.mount_options,
mock.sentinel.export_path,
mock.sentinel.mount_path,
run_as_root=True)
def test_mount_new_share(self):
self._test_mount_share()
def test_mount_already_mounted_share(self):
self._test_mount_share(already_mounted=True)
@mock.patch.object(utils, 'execute')
def test_unmount_share(self, mock_execute):
remotefs.unmount_share(
mock.sentinel.mount_path, mock.sentinel.export_path)
mock_execute.assert_any_call('umount', mock.sentinel.mount_path,
run_as_root=True, attempts=3,
delay_on_retry=True)
|
daniestevez/gr-satellites | refs/heads/master | python/ccsds/telemetry_parser.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Athanasios Theocharis <athatheoc@gmail.com>
# This was made under ESA Summer of Code in Space 2019
# by Athanasios Theocharis, mentored by Daniel Estevez
#
# This file is part of gr-satellites
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
import numpy
from gnuradio import gr
import pmt
from . import telemetry
class telemetry_parser(gr.basic_block):
"""
docstring for block CCSDS telemetry_parser
"""
def __init__(self):
gr.basic_block.__init__(self,
name="telemetry_parser",
in_sig=[],
out_sig=[])
self.message_port_register_in(pmt.intern('in'))
self.set_msg_handler(pmt.intern('in'), self.handle_msg)
self.message_port_register_out(pmt.intern('out'))
def handle_msg(self, msg_pmt):
msg = pmt.cdr(msg_pmt)
if not pmt.is_u8vector(msg):
print("[ERROR] Received invalid message type. Expected u8vector")
return
packet = bytearray(pmt.u8vector_elements(msg))
size = len(packet) - 6
try:
header = telemetry.PrimaryHeader.parse(packet[:])
if header.ocf_flag == 1:
size -= 4
data = telemetry.FullPacket.parse(packet[:], size=size)
except:
print("Could not decode telemetry packet")
return
print(data)
self.message_port_pub(pmt.intern('out'), msg_pmt)
|
analogdevicesinc/gnuradio | refs/heads/master | grc/gui/FlowGraph.py | 5 | """
Copyright 2007-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import random
import functools
from itertools import chain
from operator import methodcaller
from distutils.spawn import find_executable
import gobject
from . import Actions, Colors, Constants, Utils, Messages, Bars, Dialogs
from . Element import Element
from . Constants import SCROLL_PROXIMITY_SENSITIVITY, SCROLL_DISTANCE
from . external_editor import ExternalEditor
class FlowGraph(Element):
"""
FlowGraph is the data structure to store graphical signal blocks,
graphical inputs and outputs,
and the connections between inputs and outputs.
"""
def __init__(self):
"""
FlowGraph constructor.
Create a list for signal blocks and connections. Connect mouse handlers.
"""
Element.__init__(self)
#when is the flow graph selected? (used by keyboard event handler)
self.is_selected = lambda: bool(self.get_selected_elements())
#important vars dealing with mouse event tracking
self.element_moved = False
self.mouse_pressed = False
self.unselect()
self.press_coor = (0, 0)
#selected ports
self._old_selected_port = None
self._new_selected_port = None
# current mouse hover element
self.element_under_mouse = None
#context menu
self._context_menu = Bars.ContextMenu()
self.get_context_menu = lambda: self._context_menu
self._external_updaters = {}
def install_external_editor(self, param):
target = (param.get_parent().get_id(), param.get_key())
if target in self._external_updaters:
editor = self._external_updaters[target]
else:
editor = (find_executable(Constants.EDITOR) or
Dialogs.ChooseEditorDialog())
if not editor:
return
updater = functools.partial(
self.handle_external_editor_change, target=target)
editor = self._external_updaters[target] = ExternalEditor(
editor=editor,
name=target[0], value=param.get_value(),
callback=functools.partial(gobject.idle_add, updater)
)
editor.start()
try:
editor.open_editor()
except Exception as e:
# Problem launching the editor. Need to select a new editor.
Messages.send('>>> Error opening an external editor. Please select a different editor.\n')
# Reset the editor to force the user to select a new one.
Constants.prefs.set_string('grc', 'editor', '')
Constants.prefs.save()
Constants.EDITOR = ""
def handle_external_editor_change(self, new_value, target):
try:
block_id, param_key = target
self.get_block(block_id).get_param(param_key).set_value(new_value)
except (IndexError, ValueError): # block no longer exists
self._external_updaters[target].stop()
del self._external_updaters[target]
return
Actions.EXTERNAL_UPDATE()
###########################################################################
# Access Drawing Area
###########################################################################
def get_drawing_area(self): return self.drawing_area
def queue_draw(self): self.get_drawing_area().queue_draw()
def get_size(self): return self.get_drawing_area().get_size_request()
def set_size(self, *args): self.get_drawing_area().set_size_request(*args)
def get_scroll_pane(self): return self.drawing_area.get_parent()
def get_ctrl_mask(self): return self.drawing_area.ctrl_mask
def get_mod1_mask(self): return self.drawing_area.mod1_mask
def new_pixmap(self, *args): return self.get_drawing_area().new_pixmap(*args)
def add_new_block(self, key, coor=None):
"""
Add a block of the given key to this flow graph.
Args:
key: the block key
coor: an optional coordinate or None for random
"""
id = self._get_unique_id(key)
#calculate the position coordinate
h_adj = self.get_scroll_pane().get_hadjustment()
v_adj = self.get_scroll_pane().get_vadjustment()
if coor is None: coor = (
int(random.uniform(.25, .75)*h_adj.page_size + h_adj.get_value()),
int(random.uniform(.25, .75)*v_adj.page_size + v_adj.get_value()),
)
#get the new block
block = self.get_new_block(key)
block.set_coordinate(coor)
block.set_rotation(0)
block.get_param('id').set_value(id)
Actions.ELEMENT_CREATE()
return id
###########################################################################
# Copy Paste
###########################################################################
def copy_to_clipboard(self):
"""
Copy the selected blocks and connections into the clipboard.
Returns:
the clipboard
"""
#get selected blocks
blocks = self.get_selected_blocks()
if not blocks: return None
#calc x and y min
x_min, y_min = blocks[0].get_coordinate()
for block in blocks:
x, y = block.get_coordinate()
x_min = min(x, x_min)
y_min = min(y, y_min)
#get connections between selected blocks
connections = filter(
lambda c: c.get_source().get_parent() in blocks and c.get_sink().get_parent() in blocks,
self.get_connections(),
)
clipboard = (
(x_min, y_min),
[block.export_data() for block in blocks],
[connection.export_data() for connection in connections],
)
return clipboard
def paste_from_clipboard(self, clipboard):
"""
Paste the blocks and connections from the clipboard.
Args:
clipboard: the nested data of blocks, connections
"""
selected = set()
(x_min, y_min), blocks_n, connections_n = clipboard
old_id2block = dict()
#recalc the position
h_adj = self.get_scroll_pane().get_hadjustment()
v_adj = self.get_scroll_pane().get_vadjustment()
x_off = h_adj.get_value() - x_min + h_adj.page_size/4
y_off = v_adj.get_value() - y_min + v_adj.page_size/4
#create blocks
for block_n in blocks_n:
block_key = block_n.find('key')
if block_key == 'options': continue
block = self.get_new_block(block_key)
selected.add(block)
#set params
params_n = block_n.findall('param')
for param_n in params_n:
param_key = param_n.find('key')
param_value = param_n.find('value')
#setup id parameter
if param_key == 'id':
old_id2block[param_value] = block
#if the block id is not unique, get a new block id
if param_value in [bluck.get_id() for bluck in self.get_blocks()]:
param_value = self._get_unique_id(param_value)
#set value to key
block.get_param(param_key).set_value(param_value)
#move block to offset coordinate
block.move((x_off, y_off))
#update before creating connections
self.update()
#create connections
for connection_n in connections_n:
source = old_id2block[connection_n.find('source_block_id')].get_source(connection_n.find('source_key'))
sink = old_id2block[connection_n.find('sink_block_id')].get_sink(connection_n.find('sink_key'))
self.connect(source, sink)
#set all pasted elements selected
for block in selected: selected = selected.union(set(block.get_connections()))
self._selected_elements = list(selected)
###########################################################################
# Modify Selected
###########################################################################
def type_controller_modify_selected(self, direction):
"""
Change the registered type controller for the selected signal blocks.
Args:
direction: +1 or -1
Returns:
true for change
"""
return any([sb.type_controller_modify(direction) for sb in self.get_selected_blocks()])
def port_controller_modify_selected(self, direction):
"""
Change port controller for the selected signal blocks.
Args:
direction: +1 or -1
Returns:
true for changed
"""
return any([sb.port_controller_modify(direction) for sb in self.get_selected_blocks()])
def enable_selected(self, enable):
"""
Enable/disable the selected blocks.
Args:
enable: true to enable
Returns:
true if changed
"""
changed = False
for selected_block in self.get_selected_blocks():
if selected_block.set_enabled(enable): changed = True
return changed
def bypass_selected(self):
"""
Bypass the selected blocks.
Args:
None
Returns:
true if changed
"""
changed = False
for selected_block in self.get_selected_blocks():
if selected_block.set_bypassed(): changed = True
return changed
def move_selected(self, delta_coordinate):
"""
Move the element and by the change in coordinates.
Args:
delta_coordinate: the change in coordinates
"""
for selected_block in self.get_selected_blocks():
delta_coordinate = selected_block.bound_move_delta(delta_coordinate)
for selected_block in self.get_selected_blocks():
selected_block.move(delta_coordinate)
self.element_moved = True
def rotate_selected(self, rotation):
"""
Rotate the selected blocks by multiples of 90 degrees.
Args:
rotation: the rotation in degrees
Returns:
true if changed, otherwise false.
"""
if not self.get_selected_blocks(): return False
#initialize min and max coordinates
min_x, min_y = self.get_selected_block().get_coordinate()
max_x, max_y = self.get_selected_block().get_coordinate()
#rotate each selected block, and find min/max coordinate
for selected_block in self.get_selected_blocks():
selected_block.rotate(rotation)
#update the min/max coordinate
x, y = selected_block.get_coordinate()
min_x, min_y = min(min_x, x), min(min_y, y)
max_x, max_y = max(max_x, x), max(max_y, y)
#calculate center point of slected blocks
ctr_x, ctr_y = (max_x + min_x)/2, (max_y + min_y)/2
#rotate the blocks around the center point
for selected_block in self.get_selected_blocks():
x, y = selected_block.get_coordinate()
x, y = Utils.get_rotated_coordinate((x - ctr_x, y - ctr_y), rotation)
selected_block.set_coordinate((x + ctr_x, y + ctr_y))
return True
def remove_selected(self):
"""
Remove selected elements
Returns:
true if changed.
"""
changed = False
for selected_element in self.get_selected_elements():
self.remove_element(selected_element)
changed = True
return changed
def draw(self, gc, window):
"""
Draw the background and grid if enabled.
Draw all of the elements in this flow graph onto the pixmap.
Draw the pixmap to the drawable window of this flow graph.
"""
W,H = self.get_size()
#draw the background
gc.set_foreground(Colors.FLOWGRAPH_BACKGROUND_COLOR)
window.draw_rectangle(gc, True, 0, 0, W, H)
# draw comments first
if Actions.TOGGLE_SHOW_BLOCK_COMMENTS.get_active():
for block in self.iter_blocks():
if block.get_enabled():
block.draw_comment(gc, window)
#draw multi select rectangle
if self.mouse_pressed and (not self.get_selected_elements() or self.get_ctrl_mask()):
#coordinates
x1, y1 = self.press_coor
x2, y2 = self.get_coordinate()
#calculate top-left coordinate and width/height
x, y = int(min(x1, x2)), int(min(y1, y2))
w, h = int(abs(x1 - x2)), int(abs(y1 - y2))
#draw
gc.set_foreground(Colors.HIGHLIGHT_COLOR)
window.draw_rectangle(gc, True, x, y, w, h)
gc.set_foreground(Colors.BORDER_COLOR)
window.draw_rectangle(gc, False, x, y, w, h)
#draw blocks on top of connections
hide_disabled_blocks = Actions.TOGGLE_HIDE_DISABLED_BLOCKS.get_active()
blocks = sorted(self.iter_blocks(), key=methodcaller('get_enabled'))
for element in chain(self.iter_connections(), blocks):
if hide_disabled_blocks and not element.get_enabled():
continue # skip hidden disabled blocks and connections
element.draw(gc, window)
#draw selected blocks on top of selected connections
for selected_element in self.get_selected_connections() + self.get_selected_blocks():
selected_element.draw(gc, window)
def update_selected(self):
"""
Remove deleted elements from the selected elements list.
Update highlighting so only the selected are highlighted.
"""
selected_elements = self.get_selected_elements()
elements = self.get_elements()
#remove deleted elements
for selected in selected_elements:
if selected in elements: continue
selected_elements.remove(selected)
if self._old_selected_port and self._old_selected_port.get_parent() not in elements:
self._old_selected_port = None
if self._new_selected_port and self._new_selected_port.get_parent() not in elements:
self._new_selected_port = None
#update highlighting
for element in elements:
element.set_highlighted(element in selected_elements)
def update(self):
"""
Call the top level rewrite and validate.
Call the top level create labels and shapes.
"""
self.rewrite()
self.validate()
self.create_labels()
self.create_shapes()
def reload(self):
"""
Reload flow-graph (with updated blocks)
Args:
page: the page to reload (None means current)
Returns:
False if some error occurred during import
"""
success = False
data = self.export_data()
if data:
self.unselect()
success = self.import_data(data)
self.update()
return success
##########################################################################
## Get Selected
##########################################################################
def unselect(self):
"""
Set selected elements to an empty set.
"""
self._selected_elements = []
def what_is_selected(self, coor, coor_m=None):
"""
What is selected?
At the given coordinate, return the elements found to be selected.
If coor_m is unspecified, return a list of only the first element found to be selected:
Iterate though the elements backwards since top elements are at the end of the list.
If an element is selected, place it at the end of the list so that is is drawn last,
and hence on top. Update the selected port information.
Args:
coor: the coordinate of the mouse click
coor_m: the coordinate for multi select
Returns:
the selected blocks and connections or an empty list
"""
selected_port = None
selected = set()
#check the elements
for element in reversed(self.get_elements()):
selected_element = element.what_is_selected(coor, coor_m)
if not selected_element: continue
# hidden disabled connections, blocks and their ports can not be selected
if Actions.TOGGLE_HIDE_DISABLED_BLOCKS.get_active() and (
selected_element.is_block() and not selected_element.get_enabled() or
selected_element.is_connection() and not selected_element.get_enabled() or
selected_element.is_port() and not selected_element.get_parent().get_enabled()
):
continue
#update the selected port information
if selected_element.is_port():
if not coor_m: selected_port = selected_element
selected_element = selected_element.get_parent()
selected.add(selected_element)
#place at the end of the list
self.get_elements().remove(element)
self.get_elements().append(element)
#single select mode, break
if not coor_m: break
#update selected ports
if selected_port is not self._new_selected_port:
self._old_selected_port = self._new_selected_port
self._new_selected_port = selected_port
return list(selected)
def get_selected_connections(self):
"""
Get a group of selected connections.
Returns:
sub set of connections in this flow graph
"""
selected = set()
for selected_element in self.get_selected_elements():
if selected_element.is_connection(): selected.add(selected_element)
return list(selected)
def get_selected_blocks(self):
"""
Get a group of selected blocks.
Returns:
sub set of blocks in this flow graph
"""
selected = set()
for selected_element in self.get_selected_elements():
if selected_element.is_block(): selected.add(selected_element)
return list(selected)
def get_selected_block(self):
"""
Get the selected block when a block or port is selected.
Returns:
a block or None
"""
return self.get_selected_blocks() and self.get_selected_blocks()[0] or None
def get_selected_elements(self):
"""
Get the group of selected elements.
Returns:
sub set of elements in this flow graph
"""
return self._selected_elements
def get_selected_element(self):
"""
Get the selected element.
Returns:
a block, port, or connection or None
"""
return self.get_selected_elements() and self.get_selected_elements()[0] or None
def update_selected_elements(self):
"""
Update the selected elements.
The update behavior depends on the state of the mouse button.
When the mouse button pressed the selection will change when
the control mask is set or the new selection is not in the current group.
When the mouse button is released the selection will change when
the mouse has moved and the control mask is set or the current group is empty.
Attempt to make a new connection if the old and ports are filled.
If the control mask is set, merge with the current elements.
"""
selected_elements = None
if self.mouse_pressed:
new_selections = self.what_is_selected(self.get_coordinate())
#update the selections if the new selection is not in the current selections
#allows us to move entire selected groups of elements
if self.get_ctrl_mask() or not (
new_selections and new_selections[0] in self.get_selected_elements()
): selected_elements = new_selections
if self._old_selected_port:
self._old_selected_port.force_label_unhidden(False)
self.create_shapes()
self.queue_draw()
elif self._new_selected_port:
self._new_selected_port.force_label_unhidden()
else: # called from a mouse release
if not self.element_moved and (not self.get_selected_elements() or self.get_ctrl_mask()):
selected_elements = self.what_is_selected(self.get_coordinate(), self.press_coor)
#this selection and the last were ports, try to connect them
if self._old_selected_port and self._new_selected_port:
try:
self.connect(self._old_selected_port, self._new_selected_port)
Actions.ELEMENT_CREATE()
except: Messages.send_fail_connection()
self._old_selected_port = None
self._new_selected_port = None
return
#update selected elements
if selected_elements is None: return
old_elements = set(self.get_selected_elements())
self._selected_elements = list(set(selected_elements))
new_elements = set(self.get_selected_elements())
#if ctrl, set the selected elements to the union - intersection of old and new
if self.get_ctrl_mask():
self._selected_elements = list(
set.union(old_elements, new_elements) - set.intersection(old_elements, new_elements)
)
Actions.ELEMENT_SELECT()
##########################################################################
## Event Handlers
##########################################################################
def handle_mouse_context_press(self, coordinate, event):
"""
The context mouse button was pressed:
If no elements were selected, perform re-selection at this coordinate.
Then, show the context menu at the mouse click location.
"""
selections = self.what_is_selected(coordinate)
if not set(selections).intersection(self.get_selected_elements()):
self.set_coordinate(coordinate)
self.mouse_pressed = True
self.update_selected_elements()
self.mouse_pressed = False
self._context_menu.popup(None, None, None, event.button, event.time)
def handle_mouse_selector_press(self, double_click, coordinate):
"""
The selector mouse button was pressed:
Find the selected element. Attempt a new connection if possible.
Open the block params window on a double click.
Update the selection state of the flow graph.
"""
self.press_coor = coordinate
self.set_coordinate(coordinate)
self.time = 0
self.mouse_pressed = True
if double_click: self.unselect()
self.update_selected_elements()
#double click detected, bring up params dialog if possible
if double_click and self.get_selected_block():
self.mouse_pressed = False
Actions.BLOCK_PARAM_MODIFY()
def handle_mouse_selector_release(self, coordinate):
"""
The selector mouse button was released:
Update the state, handle motion (dragging).
And update the selected flowgraph elements.
"""
self.set_coordinate(coordinate)
self.time = 0
self.mouse_pressed = False
if self.element_moved:
Actions.BLOCK_MOVE()
self.element_moved = False
self.update_selected_elements()
def handle_mouse_motion(self, coordinate):
"""
The mouse has moved, respond to mouse dragging or notify elements
Move a selected element to the new coordinate.
Auto-scroll the scroll bars at the boundaries.
"""
#to perform a movement, the mouse must be pressed
# (no longer checking pending events via gtk.events_pending() - always true in Windows)
if not self.mouse_pressed:
# only continue if mouse-over stuff is enabled (just the auto-hide port label stuff for now)
if not Actions.TOGGLE_AUTO_HIDE_PORT_LABELS.get_active(): return
redraw = False
for element in reversed(self.get_elements()):
over_element = element.what_is_selected(coordinate)
if not over_element: continue
if over_element != self.element_under_mouse: # over sth new
if self.element_under_mouse:
redraw |= self.element_under_mouse.mouse_out() or False
self.element_under_mouse = over_element
redraw |= over_element.mouse_over() or False
break
else:
if self.element_under_mouse:
redraw |= self.element_under_mouse.mouse_out() or False
self.element_under_mouse = None
if redraw:
#self.create_labels()
self.create_shapes()
self.queue_draw()
else:
#perform auto-scrolling
width, height = self.get_size()
x, y = coordinate
h_adj = self.get_scroll_pane().get_hadjustment()
v_adj = self.get_scroll_pane().get_vadjustment()
for pos, length, adj, adj_val, adj_len in (
(x, width, h_adj, h_adj.get_value(), h_adj.page_size),
(y, height, v_adj, v_adj.get_value(), v_adj.page_size),
):
#scroll if we moved near the border
if pos-adj_val > adj_len-SCROLL_PROXIMITY_SENSITIVITY and adj_val+SCROLL_DISTANCE < length-adj_len:
adj.set_value(adj_val+SCROLL_DISTANCE)
adj.emit('changed')
elif pos-adj_val < SCROLL_PROXIMITY_SENSITIVITY:
adj.set_value(adj_val-SCROLL_DISTANCE)
adj.emit('changed')
#remove the connection if selected in drag event
if len(self.get_selected_elements()) == 1 and self.get_selected_element().is_connection():
Actions.ELEMENT_DELETE()
#move the selected elements and record the new coordinate
if not self.get_ctrl_mask():
X, Y = self.get_coordinate()
dX, dY = int(x - X), int(y - Y)
active = Actions.TOGGLE_SNAP_TO_GRID.get_active() or self.get_mod1_mask()
if not active or abs(dX) >= Utils.CANVAS_GRID_SIZE or abs(dY) >= Utils.CANVAS_GRID_SIZE:
self.move_selected((dX, dY))
self.set_coordinate((x, y))
#queue draw for animation
self.queue_draw()
|
aerwin3/swift | refs/heads/master | test/unit/common/test_exceptions.py | 51 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(creiht): Tests
import unittest
from swift.common import exceptions
class TestExceptions(unittest.TestCase):
def test_replication_exception(self):
self.assertEqual(str(exceptions.ReplicationException()), '')
self.assertEqual(str(exceptions.ReplicationException('test')), 'test')
def test_replication_lock_timeout(self):
exc = exceptions.ReplicationLockTimeout(15, 'test')
try:
self.assertTrue(isinstance(exc, exceptions.MessageTimeout))
finally:
exc.cancel()
def test_client_exception(self):
strerror = 'test: HTTP://random:888/randompath?foo=1 666 reason: ' \
'device /sdb1 content'
exc = exceptions.ClientException('test', http_scheme='HTTP',
http_host='random',
http_port=888,
http_path='/randompath',
http_query='foo=1',
http_status=666,
http_reason='reason',
http_device='/sdb1',
http_response_content='content')
self.assertEqual(str(exc), strerror)
if __name__ == '__main__':
unittest.main()
|
bxlab/HiFive_Paper | refs/heads/master | Scripts/HiCLib/mirnylab-hiclib-460c3fbc0f72/build/lib/hiclib/fragmentHiC.py | 2 | # (c) 2012 Massachusetts Institute of Technology. All Rights Reserved
# Code written by: Maksim Imakaev (imakaev@mit.edu)
"""
This is a module class for fragment-level Hi-C data analysis.
The base class "HiCdataset" can load, save and merge Hi-C datasets,
perform certain filters, and save binned heatmaps.
Additional class HiCStatistics contains methods to analyze HiC data
on a fragment level.
This includes read statistics, scalings, etc.
Input data
----------
When used together with iterative mapping, this class can load
files from h5dicts created by iterative mapping.
This method can also input any dictionary-like structure, such as a dictionary,
np.savez, etc. The minimal subset of information are positions of two reads,
but providing strand informations is adviced.
If restriction fragment assignment is not provided,
it will be automatically recalculated.
.. warning ::
1-bp difference in positions of restriction sites will force certain
algorithms, such as scaling calculations, to throw an exception. It is
adviced to supply restriction site data only if it was generated by
iterative mapping code.
Concepts
--------
All read data is stored in a synchronized h5dict-based dictionary of arrays.
Each variable has a fixed name and type, as specified in the self.vectors
variable. Whenever the variable is accessed from the program, it is loaded from
the h5dict.
Whenever a set of reads needs to be excluded from the dataset, a
:py:func:`maskFilter <HiCdataset.maskFilter>` method is called,
that goes over all datasets and overrides them.
This method automatically rebuilds fragments.
Filtering the data
------------------
This class has many build-in methods for filtering the data.
However, one can easily construct another filter as presented in
multiple one-liner examples below
.. code-block:: python
>>> Dset = HiCdataset(**kwargs)
>>> Dset.fragmentFilter((Dset.ufragmentlen >1000) * \
(Dset.ufragmentlen < 4000))
#Keep reads from fragments between 1kb and 4kb long.
>>> Dset.maskFilter(Dset.chrms1 == Dset.chrms2) #keep only cis reads
>>> Dset.maskFilter((Dset.chrms1 !=14) + (Dset.chrms2 !=14))
#Exclude all reads from chromosome 15 (yes, chromosomes are zero-based!)
>>> Dset.maskFilter(Dset.dist1 + Dset.dist2 > 500)
#Keep only random breaks, if 500 is maximum molecule length
-------------------------------------------------------------------------------
API documentation
-----------------
"""
import warnings
import os
import traceback
from copy import copy
from mirnylib.genome import Genome
import numpy as np
import gc
from hiclib.hicShared import binarySearch, sliceableDataset, mydtype, h5dictBinarySearch, mydtypeSorter, searchsorted
import mirnylib.h5dict
from mirnylib import numutils
from mirnylib.numutils import arrayInArray, \
uniqueIndex, fasterBooleanIndexing, fillDiagonal, arraySearch, \
arraySumByArray, externalMergeSort, chunkedBincount
import time
from textwrap import dedent
USE_NUMEXPR = True
import numexpr
import logging
log = logging.getLogger(__name__)
class HiCdataset(object):
"""Base class to operate on HiC dataset.
This class stores all information about HiC reads on a hard drive.
Whenever a variable corresponding to any record is used,
it is loaded/saved from/to the HDD.
If you apply any filters to a dataset, it will actually modify
the content of the current working file.
Thus, to preserve the data, loading datasets is advised. """
def __init__(self, filename, genome, enzymeName="fromGenome", maximumMoleculeLength=500,
inMemory=False, mode="a",tmpFolder = "/tmp", dictToStoreIDs="dict"):
"""
__init__ method
Initializes empty dataset by default.
If "override" is False, works with existing dataset.
Parameters
----------
filename : string
A filename to store HiC dataset in an HDF5 file.
genome : folder with genome, or Genome object
A folder with fastq files of the genome
and gap table from Genome browser.
Alternatively, mirnylib.genome.Genome object.
maximumMoleculeLength : int, optional
Maximum length of molecules in the HiC library,
used as a cutoff for dangling ends filter
inMemory : bool, optional
Create dataset in memory. Filename is ignored then,
but still needs to be specified.
mode : str
'r' - Readonly, file must exist
'r+' - Read/write, file must exist
'w' - Create file, overwrite if exists
'w-' - Create file, fail if exists
'a' - Read/write if exists, create otherwise (default)
dictToStoreIDs : dict-like or "dict" or "h5dict"
A dictionary to store rsite IDs. If "dict", then store them in memory.
If "h5dict", then creates default h5dict (in /tmp folder)
If other object, uses it, whether it is an h5dict or a dictionary
"""
# -->>> Important::: do not define any variables before vectors!!! <<<--
# These are fields that will be kept on a hard drive
# You can learn what variables mean from here too.
self.vectors = {
# chromosomes for each read.
"strands1": "bool", "strands2": "bool",
"chrms1": "int8", "chrms2": "int8",
# IDs of fragments. fragIDmult * chromosome + location
# distance to rsite
"cuts1": "int32", "cuts2": "int32",
}
self.vectors2 = {
"fraglens1": "int32", "fraglens2": "int32",
# fragment lengthes
"fragids1": "int64", "fragids2": "int64",
"mids1": "int32", "mids2": "int32",
# midpoint of a fragment, determined as "(start+end)/2"
"dists1": "int32", "dists2": "int32",
# precise location of cut-site
"distances": "int32",
# distance between fragments. If -1, different chromosomes.
# If -2, different arms.
}
self.vectors3 = {"rfragAbsIdxs1": "int32", "rfragAbsIdxs2": "int32", }
if dictToStoreIDs == "dict":
self.rfragIDDict = {}
elif dictToStoreIDs == "h5dict":
self.rfragIDDict = mirnylib.h5dict.h5dict()
else:
self.rfragIDDict = dictToStoreIDs
self.metadata = {}
self.tmpDir = tmpFolder
#-------Initialization of the genome and parameters-----
self.mode = mode
if type(genome) == str:
self.genome = Genome(genomePath=genome, readChrms=["#", "X"])
else:
self.genome = genome
assert isinstance(self.genome, Genome) # bla
if enzymeName == "fromGenome":
if self.genome.hasEnzyme() == False:
raise ValueError("Provide the genome with the enzyme or specify enzyme=...")
else:
self.genome.setEnzyme(enzymeName)
self.chromosomeCount = self.genome.chrmCount
self.fragIDmult = self.genome.fragIDmult # used for building heatmaps
self.rFragIDs = self.genome.rfragMidIds
self.rFragLens = np.concatenate(self.genome.rfragLens)
self.rFragMids = np.concatenate(self.genome.rfragMids)
self.rsites = self.genome.rsiteIds
# to speed up searchsorted we use positive-only numbers
self.rsitesPositive = self.rsites + 2 * self.fragIDmult
print "----> New dataset opened, genome %s, filename = %s" % (
self.genome.folderName, filename)
self.maximumMoleculeLength = maximumMoleculeLength
# maximum length of a molecule for SS reads
self.filename = os.path.abspath(os.path.expanduser(filename)) # File to save the data
self.chunksize = 10000000
# Chunk size for h5dict operation, external sorting, etc
self.inMemory = inMemory
#------Creating filenames, etc---------
if os.path.exists(self.filename) and (mode in ['w', 'a']):
print '----->!!!File already exists! It will be {0}\n'.format(
{"w": "deleted", "a": "opened in the append mode"}[mode])
if len(os.path.split(self.filename)[0]) != 0:
if not os.path.exists(os.path.split(self.filename)[0]):
warnings.warn("Folder in which you want to create file"
"do not exist: %s" % os.path.split(self.filename)[0])
try:
os.mkdir(os.path.split(self.filename)[0])
except:
raise IOError("Failed to create directory: %s" %
os.path.split(self.filename)[0])
self.h5dict = mirnylib.h5dict.h5dict(self.filename, mode=mode, in_memory=inMemory)
if "chrms1" in self.h5dict.keys():
self.N = len(self.h5dict.get_dataset("chrms1"))
if "metadata" in self.h5dict:
self.metadata = self.h5dict["metadata"]
def _setData(self, name, data):
"an internal method to save numpy arrays to HDD quickly"
if name not in self.vectors.keys():
raise ValueError("Attept to save data not "
"specified in self.vectors")
dtype = np.dtype(self.vectors[name])
data = np.asarray(data, dtype=dtype)
self.h5dict[name] = data
def _getData(self, name):
"an internal method to load numpy arrays from HDD quickly"
if name not in self.vectors.keys():
raise ValueError("Attept to load data not "
"specified in self.vectors")
return self.h5dict[name]
def _isSorted(self):
c1 = self._getVector("chrms1", 0, min(self.N, 10000))
cs = np.sort(c1)
if np.sum(c1 != cs) == 0:
return True
return False
def __getattribute__(self, x):
"""a method that overrides set/get operation for self.vectors
o that they're always on HDD"""
if x in ["vectors", "vectors2", "vectors3"]:
return object.__getattribute__(self, x)
if x in self.vectors.keys():
a = self._getData(x)
return a
elif (x in self.vectors2) or (x in self.vectors3):
return self._getVector(x)
else:
return object.__getattribute__(self, x)
def _getSliceableVector(self, name):
return sliceableDataset(self._getVector, name, self.N)
def _getVector(self, name, start=None, end=None):
if name in self.vectors:
if name in self.h5dict:
return self.h5dict.get_dataset(name)[start:end]
else:
raise ValueError("name {0} not in h5dict {1}".format(name, self.h5dict.path))
if name in self.vectors3:
datas = self.rfragIDDict
if name not in datas:
self._calculateRgragIDs()
assert name in datas
if hasattr(datas, "get_dataset"):
dset = datas.get_dataset(name)
else:
dset = datas[name]
return dset[start:end]
if name in self.vectors2:
if name == "fragids1":
return self.genome.rfragMidIds[self._getVector("rfragAbsIdxs1", start, end)]
elif name == "fragids2":
return self.genome.rfragMidIds[self._getVector("rfragAbsIdxs2", start, end)]
elif name == "fraglens1":
fl1 = self.rFragLens[self._getVector("rfragAbsIdxs1", start, end)]
fl1[self._getVector("chrms1", start, end) == -1] = 0
return fl1
elif name == "fraglens2":
fl2 = self.rFragLens[self._getVector("rfragAbsIdxs2", start, end)]
fl2[self._getVector("chrms2", start, end) == -1] = 0
return fl2
elif name == "dists1":
cutids1 = self._getVector("cuts1", start, end) + np.array(self._getVector("chrms1", start, end), dtype=np.int64) * self.fragIDmult
d1 = np.abs(cutids1 - self.rsites[self._getVector("rfragAbsIdxs1", start, end) + self._getVector("strands1", start, end) - 1])
d1[self._getVector("chrms1", start, end) == -1] = 0
return d1
elif name == "dists2":
cutids2 = self._getVector("cuts2", start, end) + np.array(self._getVector("chrms2", start, end), dtype=np.int64) * self.fragIDmult
d2 = np.abs(cutids2 - self.rsites[self._getVector("rfragAbsIdxs2", start, end) + self._getVector("strands2", start, end) - 1])
d2[self._getVector("chrms2", start, end) == -1] = 0
return d2
elif name == "mids1":
return self.rFragMids[self._getVector("rfragAbsIdxs1", start, end)]
elif name == "mids2":
return self.rFragMids[self._getVector("rfragAbsIdxs2", start, end)]
elif name == "distances":
dvec = np.abs(self._getVector("mids1", start, end) - self._getVector("mids2", start, end))
dvec[self.chrms1[start:end] != self.chrms2[start:end]] = -1
return dvec
raise "unknown vector: {0}".format(name)
def _calculateRgragIDs(self):
log.debug("Started calculating rfrag IDs")
for i in self.rfragIDDict.keys():
del self.rfragIDDict[i]
if hasattr(self.rfragIDDict, "add_empty_dataset"):
self.rfragIDDict.add_empty_dataset("rfragAbsIdxs1", (self.N,), "int32")
self.rfragIDDict.add_empty_dataset("rfragAbsIdxs2", (self.N,), "int32")
d1 = self.rfragIDDict.get_dataset("rfragAbsIdxs1")
d2 = self.rfragIDDict.get_dataset("rfragAbsIdxs2")
else:
self.rfragIDDict["rfragAbsIdxs1"] = np.empty(self.N, dtype=np.int32)
self.rfragIDDict["rfragAbsIdxs2"] = np.empty(self.N, dtype=np.int32)
d1 = self.rfragIDDict["rfragAbsIdxs1"]
d2 = self.rfragIDDict["rfragAbsIdxs2"]
constants = {"np":np, "binarySearch":binarySearch,
"rsites":self.rsitesPositive, "fragMult":self.fragIDmult,
"numexpr":numexpr}
code1 = dedent("""
id1 = numexpr.evaluate("(cuts1 + (chrms1+2) * fragMult + 7 * strands1 - 3)")
del cuts1
del chrms1
res = binarySearch(id1 ,rsites)
""")
self.evaluate(code1, ["chrms1", "strands1", "cuts1"], outVariable=("res", d1),
constants=constants, chunkSize=150000000)
code2 = dedent("""
id2 = numexpr.evaluate("(cuts2 + (chrms2 + 2) * fragMult + 7 * strands2 - 3) * (chrms2 >=0)")
del cuts2
del chrms2
res = binarySearch(id2 ,rsites)
""")
self.evaluate(code2, ["chrms2", "strands2", "cuts2"], outVariable=("res", d2),
constants=constants, chunkSize=150000000)
log.debug("Finished calculating rfrag IDs")
def __setattr__(self, x, value):
"""a method that overrides set/get operation for self.vectors
so that they're always on HDD"""
if x in ["vectors", "vectors2"]:
return object.__setattr__(self, x, value)
if x in self.vectors.keys():
self._setData(x, value)
elif x in self.vectors2:
raise
else:
return object.__setattr__(self, x, value)
def _dumpMetadata(self):
if self.mode in ["r"]:
warnings.warn(RuntimeWarning("Cannot dump metadata in read mode"))
return
try:
self.h5dict["metadata"] = self.metadata
except Exception, err:
print "-" * 20 + "Got Exception when saving metadata" + "-" * 20
traceback.print_exc()
print Exception, err
print "-" * 60
warnings.warn(RuntimeWarning("Got exception when saving metadata"))
def _checkConsistency(self):
"""
Internal method to automatically check consistency with the genome
Every time rebuildFragments is getting called
"""
c1 = self.chrms1
p1 = self.cuts1
if len(c1) > 1000000:
c1 = c1[::100]
p1 = p1[::100]
if c1.max() >= self.genome.chrmCount:
print 'Genome length', self.genome.chrmCount
print "Maximum chromosome", c1.max()
print "note that chromosomes are 0-based, so go",
print "from 0 to {0}".format(self.genome.chrmCount)
raise ValueError("Chromosomes do not fit in the genome")
maxPos = self.genome.chrmLens[c1]
dif = p1 - maxPos
if dif.max() > 0:
print "Some reads map after chromosome end"
print 'However, deviation of {0} is not big enough to call an error'.format(dif.max())
warnings.warn("Reads map {0} bp after the end of chromosome".format(dif.max()))
if dif.max() > 100:
print "Possible genome mismatch found"
print 'Maximum deviation is {0}'.format(dif.max())
for chrom in range(self.genome.chrmCount):
posmax = (p1[c1 == chrom]).max()
chrLens = self.genome.chrmLens[chrom]
if posmax > chrLens:
print "Maximum position for chr {0} is {1}".format(chrom, posmax)
print "Length of chr {0} is {1}".format(chrom, chrLens)
raise ValueError("Wrong chromosome lengths")
def _getChunks(self, chunkSize="default"):
if chunkSize == "default":
chunkSize = self.chunksize
if chunkSize > 0.5 * self.N:
return [(0, self.N)]
points = range(0, self.N - chunkSize / 2, chunkSize) + [self.N]
return zip(points[:-1], points[1:])
def _sortData(self):
"""
Orders data such that chrms1 is always more than chrms2, and sorts it by chrms1, cuts1
"""
log.debug("Starting sorting data: making the file")
if not hasattr(self, "dataSorted"):
tmpFile = os.path.join(self.tmpDir, str(np.random.randint(0, 100000000)))
mydict = mirnylib.h5dict.h5dict(tmpFile,'w')
data = mydict.add_empty_dataset("sortedData", (self.N,), mydtype)
tmp = mydict.add_empty_dataset("trash", (self.N,), mydtype)
code = dedent("""
a = np.empty(len(chrms1), dtype = mydtype)
mask = chrms1 > chrms2
chrms2[mask],chrms1[mask] = chrms1[mask].copy(), chrms2[mask].copy()
cuts1[mask],cuts2[mask] = cuts2[mask].copy(), cuts1[mask].copy()
strands1[mask],strands2[mask] = strands2[mask].copy(),strands1[mask].copy()
a["chrms1"] = chrms1
a["pos1"] = cuts1
a["chrms2"] = chrms2
a["pos2"] = cuts2
a["strands1"] = strands1
a["strands2"] = strands2
""")
self.evaluate(expression=code, internalVariables = ["chrms1","chrms2","cuts1","cuts2","strands1","strands2"],
constants = {"np":np,"mydtype":mydtype}, outVariable = ("a",data))
log.debug("Invoking sorter")
externalMergeSort(data,tmp, sorter=mydtypeSorter,searchsorted=searchsorted)
log.debug("Getting data back")
sdata = mydict.get_dataset("sortedData")
c1 = self.h5dict.get_dataset("chrms1")
c2 = self.h5dict.get_dataset("chrms2")
p1 = self.h5dict.get_dataset("cuts1")
p2 = self.h5dict.get_dataset("cuts2")
s1 = self.h5dict.get_dataset("strands1")
s2 = self.h5dict.get_dataset("strands2")
for start,end in self._getChunks():
data = sdata[start:end]
c1[start:end] = data["chrms1"]
c2[start:end] = data["chrms2"]
p1[start:end] = data["pos1"]
p2[start:end] = data["pos2"]
s1[start:end] = data["strands1"]
s2[start:end] = data["strands2"]
self.dataSorted = True
del mydict
os.remove(tmpFile)
gc.collect()
log.debug("Finished")
def evaluate(self, expression, internalVariables, externalVariables={},
constants={"np": np},
outVariable="autodetect",
chunkSize="default"):
"""
Still experimental class to perform evaluation of
any expression on hdf5 datasets
Note that out_variable should be writable by slices.
---If one can provide autodetect of values for internal
variables by parsing an expression, it would be great!---
.. note ::
See example of usage of this class in filterRsiteStart,
parseInputData, etc.
.. warning ::
Please avoid passing internal variables
as "self.cuts1" - use "cuts1"
.. warning ::
You have to pass all the modules and functions (e.g. np)
in a "constants" dictionary.
Parameters
----------
expression : str
Mathematical expression, single or multi line
internal_variables : list of str
List of variables ("chrms1", etc.), used in the expression
external_variables : dict , optional
Dict of {str:array}, where str indicates name of the variable,
and array - value of the variable.
constants : dict, optional
Dictionary of constants to be used in the evaluation.
Because evaluation happens without namespace,
you should include numpy here if you use it (included by default)
out_variable : str or tuple or None, optional
Variable to output the data. Either internal variable, or tuple
(name,value), where value is an array
"""
if type(internalVariables) == str:
internalVariables = [internalVariables]
# detecting output variable automatically
if outVariable == "autodetect":
outVariable = expression.split("\n")[-1].split("=")[0].strip()
if outVariable not in self.vectors:
outVariable = (outVariable, "ToDefine")
code = compile(expression, '<string>', 'exec')
# compile because we're launching it many times
for start, end in self._getChunks(chunkSize):
variables = copy(constants)
variables["start"] = start
variables["end"] = end
# dictionary to pass to the evaluator.
# It's safer than to use the default locals()
for name in internalVariables:
variables[name] = self._getVector(name, start, end)
for name, variable in externalVariables.items():
variables[name] = variable[start:end]
# actually execute the code in our own namespace
exec code in variables
# autodetecting output dtype on the first run if not specified
if outVariable[1] == "ToDefine":
dtype = variables[outVariable[0]].dtype
outVariable = (outVariable[0], np.zeros(self.N, dtype))
if type(outVariable) == str:
self.h5dict.get_dataset(outVariable)[start:end] = variables[outVariable]
elif len(outVariable) == 2:
outVariable[1][start:end] = variables[outVariable[0]]
elif outVariable is None:
pass
else:
raise ValueError("Please provide str or (str,value)"
" for out variable")
if type(outVariable) == tuple:
return outVariable[1]
def merge(self, filenames):
"""combines data from multiple datasets
Parameters
----------
filenames : list of strings
List of folders to merge to current working folder
"""
log.debug("Starting merge; number of datasets = {0}".format(len(filenames)))
if self.filename in filenames:
raise StandardError("----> Cannot merge folder into itself! "
"Create a new folder")
for filename in filenames:
if not os.path.exists(filename):
raise IOError("\nCannot open file: %s" % filename)
log.debug("Getting h5dicts")
h5dicts = [mirnylib.h5dict.h5dict(i, mode='r') for i in filenames]
if all(["metadata" in i for i in h5dicts]):
metadatas = [mydict["metadata"] for mydict in h5dicts]
# print metadatas
newMetadata = metadatas.pop()
for oldData in metadatas:
for key, value in oldData.items():
if (key in newMetadata):
try:
newMetadata[key] += value
except:
print "Values {0} and {1} for key {2} cannot be added".format(metadatas[key], value, key)
warnings.warn("Cannot add metadatas")
else:
warnings.warn("key {0} not found in some files".format(key))
self.metadata = newMetadata
self.h5dict["metadata"] = self.metadata
log.debug("Calculating final length")
self.N = sum([len(i.get_dataset("strands1")) for i in h5dicts])
log.debug("Final length equals: {0}".format(self.N))
for name in self.vectors.keys():
log.debug("Processing vector {0}".format(name))
if name in self.h5dict:
del self.h5dict[name]
self.h5dict.add_empty_dataset(name, (self.N,), self.vectors[name])
dset = self.h5dict.get_dataset(name)
position = 0
for mydict in h5dicts:
cur = mydict[name]
dset[position:position + len(cur)] = cur
position += len(cur)
self.h5dict.flush()
time.sleep(0.2) # allow buffers to flush
log.debug("sorting data")
self._sortData()
log.debug("Finished merge")
def parseInputData(self, dictLike, zeroBaseChrom=True,
**kwargs):
"""
__NOT optimized for large datasets__
(use chunking as suggested in pipeline2015)
Inputs data from a dictionary-like object,
containing coordinates of the reads.
Performs filtering of the reads.
A good example of a dict-like object is a numpy.savez
.. warning::
Restriction fragments MUST be specified
exactly as in the Genome class.
.. warning::
Strand information is needed for proper scaling
calculations, but will be imitated if not provided
Parameters
----------
dictLike : dict or dictLike object, or string with h5dict filename
Input reads
dictLike["chrms1,2"] : array-like
Chromosomes of 2 sides of the read
dictLike["cuts1,2"] : array-like
Exact position of cuts
dictLike["strands1,2"], essential : array-like
Direction of the read
dictLike["rsites1,2"], optional : array-like
Position of rsite to which the read is pointing
dictLike["uprsites1,2"] , optional : array-like
rsite upstream (larger genomic coordinate) of the cut position
dictLike["downrsites1,2"] , optional : array-like
rsite downstream (smaller genomic coordinate) of the cut position
zeroBaseChrom : bool , optional
Use zero-base chromosome counting if True, one-base if False
enzymeToFillRsites : None or str, optional if rsites are specified
Enzyme name to use with Bio.restriction
removeSS : bool, optional
If set to True, removes SS reads from the library
noFiltering : bool, optional
If True then no filters are applied to the data. False by default.
Overrides removeSS. Experimental, do not use if you are not sure.
"""
if type(dictLike) == str:
if not os.path.exists(dictLike):
raise IOError("File not found: %s" % dictLike)
print " loading data from file %s (assuming h5dict)" % dictLike
dictLike = mirnylib.h5dict.h5dict(dictLike, 'r') # attempting to open h5dict
"---Filling in chromosomes and positions - mandatory objects---"
a = dictLike["chrms1"]
self.trackLen = len(a)
if zeroBaseChrom == True:
self.chrms1 = a
self.chrms2 = dictLike["chrms2"]
else:
self.chrms1 = a - 1
self.chrms2 = dictLike["chrms2"] - 1
self.N = len(self.chrms1)
del a
self.cuts1 = dictLike['cuts1']
self.cuts2 = dictLike['cuts2']
if not (("strands1" in dictLike.keys()) and
("strands2" in dictLike.keys())):
warnings.warn("No strand information provided,"
" assigning random strands.")
t = np.random.randint(0, 2, self.trackLen)
self.strands1 = t
self.strands2 = 1 - t
del t
noStrand = True
else:
self.strands1 = dictLike["strands1"]
self.strands2 = dictLike["strands2"]
noStrand = False # strand information filled in
self.metadata["100_TotalReads"] = self.trackLen
try:
dictLike["misc"]["genome"]["idx2label"]
self.updateGenome(self.genome, oldGenome=dictLike["misc"]["genome"]["idx2label"], putMetadata=True)
except KeyError:
assumedGenome = Genome(self.genome.genomePath)
self.updateGenome(self.genome, oldGenome=assumedGenome, putMetadata=True)
warnings.warn("\n Genome not found in mapped data. \n"
"Assuming genome comes from the same folder with all chromosomes")
self.metadata["152_removedUnusedChromosomes"] = self.trackLen - self.N
self.metadata["150_ReadsWithoutUnusedChromosomes"] = self.N
# Discard dangling ends and self-circles
DSmask = (self.chrms1 >= 0) * (self.chrms2 >= 0)
self.metadata["200_totalDSReads"] = DSmask.sum()
self.metadata["201_DS+SS"] = len(DSmask)
self.metadata["202_SSReadsRemoved"] = len(DSmask) - DSmask.sum()
sameFragMask = self.evaluate("a = (fragids1 == fragids2)",
["fragids1", "fragids2"]) * DSmask
cutDifs = self.cuts2[sameFragMask] > self.cuts1[sameFragMask]
s1 = self.strands1[sameFragMask]
s2 = self.strands2[sameFragMask]
SSDE = (s1 != s2)
SS = SSDE * (cutDifs == s2)
SS_N = SS.sum()
SSDE_N = SSDE.sum()
sameFrag_N = sameFragMask.sum()
self.metadata["210_sameFragmentReadsRemoved"] = sameFrag_N
self.metadata["212_Self-Circles"] = SS_N
self.metadata["214_DandlingEnds"] = SSDE_N - SS_N
self.metadata["216_error"] = sameFrag_N - SSDE_N
mask = DSmask * (-sameFragMask)
del DSmask, sameFragMask
noSameFrag = mask.sum()
# Discard unused chromosomes
if noStrand == True:
# Can't tell if reads point to each other.
dist = self.evaluate("a = np.abs(cuts1 - cuts2)",
["cuts1", "cuts2"])
else:
# distance between sites facing each other
dist = self.evaluate("a = numexpr.evaluate('- cuts1 * (2 * strands1 -1) - "
"cuts2 * (2 * strands2 - 1)')",
["cuts1", "cuts2", "strands1", "strands2"],
constants={"numexpr":numexpr})
readsMolecules = self.evaluate(
"a = numexpr.evaluate('(chrms1 == chrms2)&(strands1 != strands2) & (dist >=0) &"
" (dist <= maximumMoleculeLength)')",
internalVariables=["chrms1", "chrms2", "strands1", "strands2"],
externalVariables={"dist": dist},
constants={"maximumMoleculeLength": self.maximumMoleculeLength, "numexpr":numexpr})
mask *= (readsMolecules == False)
extraDE = mask.sum()
self.metadata["220_extraDandlingEndsRemoved"] = -extraDE + noSameFrag
if mask.sum() == 0:
raise Exception(
'No reads left after filtering. Please, check the input data')
del dist
del readsMolecules
if not kwargs.get('noFiltering', False):
self.maskFilter(mask)
self.metadata["300_ValidPairs"] = self.N
del dictLike
def printMetadata(self, saveTo=None):
self._dumpMetadata()
for i in sorted(self.metadata):
if i[2] != "0":
print "\t\t",
elif i[1] != "0":
print "\t",
print i, self.metadata[i]
if saveTo != None:
with open(saveTo, 'w') as myfile:
for i in sorted(self.metadata):
if i[2] != "0":
myfile.write("\t\t")
elif i[1] != "0":
myfile.write("\t")
myfile.write(str(i))
myfile.write(": ")
myfile.write(str(self.metadata[i]))
myfile.write("\n")
def updateGenome(self, newGenome, oldGenome="current", putMetadata=False):
"""
__partially optimized for large datasets__
Updates dataset to a new genome, with a fewer number of chromosomes.
Use it to delete chromosomes.
By default, removes all DS reads with that chromosomes.
Parameters
----------
newGenome : Genome object
Genome to replace the old genome, with fewer chromosomes
removeSSreads : "trans"(default), "all" or "none"
"trans": remove all reads from deleted chromosomes,
ignore the rest.
"all": remove all SS reads from all chromosomes
"None": mark all trans reads as SS reads
putMetadata : bool (optional)
Writes metadata for M and Y reads
oldGenome : Genome object or idx2label of old genome, optional
"""
assert isinstance(newGenome, Genome)
newN = newGenome.chrmCount
if oldGenome == "current":
oldGenome = self.genome
upgrade = newGenome.upgradeMatrix(oldGenome)
if isinstance(oldGenome, Genome):
if oldGenome.hasEnzyme():
newGenome.setEnzyme(oldGenome.enzymeName)
oldGenome = oldGenome.idx2label
oldN = len(oldGenome.keys())
label2idx = dict(zip(oldGenome.values(), oldGenome.keys()))
chrms1 = np.array(self.chrms1, int)
chrms2 = np.array(self.chrms2, int)
SS = (chrms1 < 0) + (chrms2 < 0)
metadata = {}
if "M" in label2idx:
Midx = label2idx["M"]
M1 = chrms1 == Midx
M2 = chrms2 == Midx
mToM = (M1 * M2).sum()
mToAny = (M1 + M2).sum()
mToSS = ((M1 + M2) * SS).sum()
metadata["102_mappedSide1"] = (chrms1 >= 0).sum()
metadata["104_mappedSide2"] = (chrms2 >= 0).sum()
metadata["112_M-to-M_reads"] = mToM
metadata["114_M-to-Any_reads"] = mToAny
metadata["116_M-to-SS_reads"] = mToSS
metadata["118_M-to-DS_reads"] = mToAny - mToSS
if "Y" in label2idx:
Yidx = label2idx["Y"]
Y1 = chrms1 == Yidx
Y2 = chrms2 == Yidx
yToY = (Y1 * Y2).sum()
yToAny = (Y1 + Y2).sum()
yToSS = ((Y1 + Y2) * SS).sum()
metadata["122_Y-to-Y_reads"] = yToY
metadata["124_Y-to-Any_reads"] = yToAny
metadata["126_Y-to-SS_reads"] = yToSS
metadata["128_Y-to-DS_reads"] = yToAny - yToSS
if putMetadata:
self.metadata.update(metadata)
if oldN == newN:
return None
if upgrade is not None:
upgrade[upgrade == -1] = 9999 # to tell old SS reads from new SS reads
chrms1 = upgrade[chrms1]
self.chrms1 = chrms1
del chrms1
chrms2 = upgrade[chrms2]
self.chrms2 = chrms2
"Keeping only DS reads"
mask = ((self.chrms1 < newN) * (self.chrms2 < newN))
self.genome = newGenome
self.maskFilter(mask)
def buildAllHeatmap(self, resolution, countDiagonalReads="Once",
useWeights=False):
"""
__optimized for large datasets__
Creates an all-by-all heatmap in accordance with mapping
provided by 'genome' class
Parameters
----------
resolution : int or str
Resolution of a heatmap. May be an int or 'fragment' for
restriction fragment resolution.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
useWeights : bool
If True, then take weights from 'weights' variable. False by default.
"""
for start,end in self._getChunks(30000000):
if type(resolution) == int:
# 8 bytes per record + heatmap
self.genome.setResolution(resolution)
numBins = self.genome.numBins
label = self.genome.chrmStartsBinCont[self._getVector("chrms1", start, end)]
label = np.asarray(label, dtype="int64")
label += self._getVector("mids1",start,end) / resolution
label *= numBins
label += self.genome.chrmStartsBinCont[self._getVector("chrms2",start,end)]
label += self._getVector("mids2",start,end) / resolution
elif resolution == 'fragment':
numBins = self.genome.numRfrags
label = self._getVector("rfragAbsIdxs1",start,end)
label *= numBins
label += self._getVector("rfragAbsIdxs2",start,end)
else:
raise Exception('Unknown value for resolution: {0}'.format(
resolution))
if useWeights:
if 'weights' not in self.vectors:
raise Exception('Set read weights first!')
counts = np.bincount(label, weights=self.fragmentWeights, minlength=numBins ** 2)
else:
counts = np.bincount(label, minlength=numBins ** 2)
if len(counts) > numBins ** 2:
raise StandardError("\nheatmap exceed length of the genome!!!"
" Check genome")
counts.shape = (numBins, numBins)
try:
heatmap += counts # @UndefinedVariable
except:
heatmap = counts
for i in xrange(len(heatmap)):
heatmap[i, i:] += heatmap[i:, i]
heatmap[i:, i] = heatmap[i, i:]
if countDiagonalReads.lower() == "once":
diag = np.diag(heatmap)
fillDiagonal(heatmap, diag / 2)
elif countDiagonalReads.lower() == "twice":
pass
else:
raise ValueError("Bad value for countDiagonalReads")
return heatmap
def buildHeatmapWithOverlapCpp(self, resolution, countDiagonalReads="Twice",
maxBinSpawn=10):
"""
__NOT optimized for large datasets__
Creates an all-by-all heatmap in accordance with mapping
provided by 'genome' class
This method assigns fragments to all bins which
the fragment overlaps, proportionally
Parameters
----------
resolution : int or str
Resolution of a heatmap. May be an int or 'fragment' for
restriction fragment resolution.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
maxBinSpawn : int, optional, not more than 10
Discard read if it spawns more than maxBinSpawn bins
"""
if type(resolution) == int:
# many bytes per record + heatmap
self.genome.setResolution(resolution)
N = self.N
N = int(N)
low1 = self.genome.chrmStartsBinCont[self.chrms1]
low1 = np.asarray(low1, dtype="float32")
low1 += (self.mids1 - self.fraglens1 / 2) / float(resolution)
high1 = self.genome.chrmStartsBinCont[self.chrms1]
high1 = np.asarray(high1, dtype="float32")
high1 += (self.mids1 + self.fraglens1 / 2) / float(resolution)
low2 = self.genome.chrmStartsBinCont[self.chrms2]
low2 = np.asarray(low2, dtype="float32")
low2 += (self.mids2 - self.fraglens2 / 2) / float(resolution)
high2 = self.genome.chrmStartsBinCont[self.chrms2]
high2 = np.asarray(high2, dtype="float32")
high2 += (self.mids2 + self.fraglens2 / 2) / float(resolution)
heatmap = np.zeros((self.genome.numBins, self.genome.numBins),
dtype="float64", order="C")
heatmapSize = len(heatmap) # @UnusedVariable
from scipy import weave
code = """
#line 1045 "fragmentHiC.py"
double vector1[100];
double vector2[100];
for (int readNum = 0; readNum < N; readNum++)
{
for (int i=0; i<10; i++)
{
vector1[i] = 0;
vector2[i] = 0;
}
double l1 = low1[readNum];
double l2 = low2[readNum];
double h1 = high1[readNum];
double h2 = high2[readNum];
if ((h1 - l1) > maxBinSpawn) continue;
if ((h2 - l2) > maxBinSpawn) continue;
int binNum1 = ceil(h1) - floor(l1);
int binNum2 = ceil(h2) - floor(l2);
double binLen1 = h1 - l1;
double binLen2 = h2 - l2;
int b1 = floor(l1);
int b2 = floor(l2);
if (binNum1 == 1)
vector1[0] = 1.;
else
{
vector1[0] = (ceil(l1 + 0.00001) - l1) / binLen1;
for (int t = 1; t< binNum1 - 1; t++)
{vector1[t] = 1. / binLen1;}
vector1[binNum1 - 1] = (h1 - floor(h1)) / binLen1;
}
if (binNum2 == 1) vector2[0] = 1.;
else
{
vector2[0] = (ceil(l2 + 0.0001) - l2) / binLen2;
for (int t = 1; t< binNum2 - 1; t++)
{vector2[t] = 1. / binLen2;}
vector2[binNum2 - 1] = (h2 - floor(h2)) / binLen2;
}
for (int i = 0; i< binNum1; i++)
{
for (int j = 0; j < binNum2; j++)
{
heatmap[(b1 + i) * heatmapSize + b2 + j] += vector1[i] * vector2[j];
}
}
}
"""
weave.inline(code,
['low1', "high1", "low2", "high2",
"N", "heatmap", "maxBinSpawn",
"heatmapSize",
],
extra_compile_args=['-march=native -O3 '],
support_code=r"""
#include <stdio.h>
#include <math.h>""")
counts = heatmap
for i in xrange(len(counts)):
counts[i, i:] += counts[i:, i]
counts[i:, i] = counts[i, i:]
diag = np.diag(counts)
if countDiagonalReads.lower() == "once":
fillDiagonal(counts, diag / 2)
elif countDiagonalReads.lower() == "twice":
pass
else:
raise ValueError("Bad value for countDiagonalReads")
return counts
def getHiResHeatmapWithOverlaps(self, resolution, chromosome, start = 0, end = None, countDiagonalReads="Twice", maxBinSpawn=10):
c1 = self.h5dict.get_dataset("chrms1")
p1 = self.h5dict.get_dataset("cuts1")
print "getting heatmap", chromosome, start, end
from scipy import weave
if end == None:
end = self.genome.chrmLens[chromosome]
low = h5dictBinarySearch(c1,p1, (chromosome, start),"left")
high = h5dictBinarySearch(c1,p1, (chromosome, end),"right")
c1 = self._getVector("chrms1", low,high)
c2 = self._getVector("chrms2", low,high)
mids1 = self._getVector("mids1", low,high)
mids2 = self._getVector("mids2", low,high)
fraglens1 = self._getVector("fraglens1", low,high)
fraglens2 = self._getVector("fraglens2",low,high)
mask = (c1 == c2) * (mids2 >= start) * (mids2 < end)
mids1 = mids1[mask]
mids2 = mids2[mask]
fraglens1 = fraglens1[mask]
fraglens2 = fraglens2[mask]
low1 = mids1 - fraglens1 / 2 - start
high1 = low1 + fraglens1
low2 = mids2 - fraglens2 / 2 - start
high2 = low2 + fraglens2
low1 = low1 / float(resolution)
high1 = high1 / float(resolution)
low2 = low2 / float(resolution)
high2 = high2 / float(resolution)
N = len(low1) # @UnusedVariable
if chromosome == 1:
pass
#0/0
heatmapSize = int(np.ceil((end - start) / float(resolution)))
heatmap = np.zeros((heatmapSize, heatmapSize),
dtype="float64", order="C")
code = r"""
#line 1045 "fragmentHiC.py"
double vector1[1000];
double vector2[1000];
for (int readNum = 0; readNum < N; readNum++)
{
for (int i=0; i<10; i++)
{
vector1[i] = 0;
vector2[i] = 0;
}
double l1 = low1[readNum];
double l2 = low2[readNum];
double h1 = high1[readNum];
double h2 = high2[readNum];
if ((h1 - l1) > maxBinSpawn) continue;
if ((h2 - l2) > maxBinSpawn) continue;
int binNum1 = ceil(h1) - floor(l1);
int binNum2 = ceil(h2) - floor(l2);
double binLen1 = h1 - l1;
double binLen2 = h2 - l2;
int b1 = floor(l1);
int b2 = floor(l2);
if (binNum1 == 1)
vector1[0] = 1.;
else
{
vector1[0] = (ceil(l1+ 0.00000001) - l1) / binLen1;
for (int t = 1; t< binNum1 - 1; t++)
{vector1[t] = 1. / binLen1;}
vector1[binNum1 - 1] = (h1 - floor(h1)) / binLen1;
}
if (binNum2 == 1) vector2[0] = 1.;
else
{
vector2[0] = (ceil(l2 + 0.00000001) - l2) / binLen2;
for (int t = 1; t< binNum2 - 1; t++)
{vector2[t] = 1. / binLen2;}
vector2[binNum2 - 1] = (h2 - floor(h2)) / binLen2;
}
if ((b1 + binNum1) >= heatmapSize) { continue;}
if ((b2 + binNum2) >= heatmapSize) { continue;}
if ((b1 < 0)) {continue;}
if ((b2 < 0)) {continue;}
double psum = 0;
for (int i = 0; i< binNum1; i++)
{
for (int j = 0; j < binNum2; j++)
{
heatmap[(b1 + i) * heatmapSize + b2 + j] += vector1[i] * vector2[j];
psum += vector1[i] * vector2[j];
}
}
if (abs(psum-1) > 0.0000001)
{
printf("bins num 1 = %d \n",binNum1);
printf("bins num 2 = %d \n",binNum2);
printf("psum = %f \n", psum);
}
}
"""
weave.inline(code,
['low1', "high1", "low2", "high2",
"N", "heatmap", "maxBinSpawn",
"heatmapSize",
],
extra_compile_args=['-march=native -O3 '],
support_code=r"""
#include <stdio.h>
#include <math.h>
""")
for i in xrange(len(heatmap)):
heatmap[i, i:] += heatmap[i:, i]
heatmap[i:, i] = heatmap[i, i:]
if countDiagonalReads.lower() == "once":
diag = np.diag(heatmap).copy()
fillDiagonal(heatmap, diag / 2)
del diag
elif countDiagonalReads.lower() == "twice":
pass
else:
raise ValueError("Bad value for countDiagonalReads")
weave.inline("") # to release all buffers of weave.inline
gc.collect()
return heatmap
def saveHiResHeatmapWithOverlaps(self, filename, resolution, countDiagonalReads="Twice", maxBinSpawn=10, chromosomes="all"):
"""Creates within-chromosome heatmaps at very high resolution,
assigning each fragment to all the bins it overlaps with,
proportional to the area of overlaps.
Parameters
----------
resolution : int or str
Resolution of a heatmap.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
maxBinSpawn : int, optional, not more than 10
Discard read if it spawns more than maxBinSpawn bins
"""
if not self._isSorted():
print "Data is not sorted!!!"
self._sortData()
tosave = mirnylib.h5dict.h5dict(filename)
if chromosomes == "all":
chromosomes = range(self.genome.chrmCount)
for chrom in chromosomes:
heatmap = self.getHiResHeatmapWithOverlaps(resolution, chrom,
countDiagonalReads = countDiagonalReads, maxBinSpawn = maxBinSpawn)
tosave["{0} {0}".format(chrom)] = heatmap
del heatmap
gc.collect()
print "----> By chromosome Heatmap saved to '{0}' at {1} resolution".format(filename, resolution)
def saveSuperHighResMapWithOverlaps(self, filename, resolution, chunkSize = 20000000, chunkStep = 10000000, countDiagonalReads="Twice", maxBinSpawn=10, chromosomes="all"):
"""Creates within-chromosome heatmaps at very high resolution,
assigning each fragment to all the bins it overlaps with,
proportional to the area of overlaps.
Parameters
----------
resolution : int or str
Resolution of a heatmap.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
maxBinSpawn : int, optional, not more than 10
Discard read if it spawns more than maxBinSpawn bins
"""
tosave = mirnylib.h5dict.h5dict(filename)
if chromosomes == "all":
chromosomes = range(self.genome.chrmCount)
for chrom in chromosomes:
chrLen = self.genome.chrmLens[chrom]
chunks = [(i * chunkStep, min(i * chunkStep + chunkSize, chrLen)) for i in xrange(chrLen / chunkStep + 1)]
for chunk in chunks:
heatmap = self.getHiResHeatmapWithOverlaps(resolution, chrom,
start = chunk[0], end = chunk[1],
countDiagonalReads = countDiagonalReads, maxBinSpawn = maxBinSpawn)
tosave["{0}_{1}_{2}".format(chrom, chunk[0], chunk[1])] = heatmap
print "----> Super-high-resolution heatmap saved to '{0}' at {1} resolution".format(filename, resolution)
def fragmentFilter(self, fragments):
"""
__optimized for large datasets__
keeps only reads that originate from fragments in 'fragments'
variable, for DS - on both sides
Parameters
----------
fragments : numpy.array of fragment IDs or bools
List of fragments to keep, or their indexes in self.rFragIDs
"""
if fragments.dtype == np.bool:
fragments = self.rFragIDs[fragments]
m1 = arrayInArray(self._getSliceableVector("fragids1"), fragments, chunkSize=self.chunksize)
m2 = arrayInArray(self._getSliceableVector("fragids2"), fragments, chunkSize=self.chunksize)
mask = np.logical_and(m1, m2)
self.maskFilter(mask)
def maskFilter(self, mask):
"""
__optimized for large datasets__
keeps only reads designated by mask
Parameters
----------
mask : array of bools
Indexes of reads to keep
"""
# Uses 16 bytes per read
for i in self.rfragIDDict.keys():
del self.rfragIDDict[i]
length = 0
ms = mask.sum()
assert mask.dtype == np.bool
self.N = ms
for name in self.vectors:
data = self._getData(name)
ld = len(data)
if length == 0:
length = ld
else:
if ld != length:
self.delete()
newdata = fasterBooleanIndexing(data, mask, outLen=ms,
bounds=False) # see mirnylib.numutils
del data
self._setData(name, newdata)
del newdata
del mask
def filterExtreme(self, cutH=0.005, cutL=0):
"""
__optimized for large datasets__
removes fragments with most and/or least # counts
Parameters
----------
cutH : float, 0<=cutH < 1, optional
Fraction of the most-counts fragments to be removed
cutL : float, 0<=cutL<1, optional
Fraction of the least-counts fragments to be removed
"""
print "----->Extreme fragments filter: remove top %lf, "\
"bottom %lf fragments" % (cutH, cutL)
s = self.fragmentSum()
ss = np.sort(s)
valueL, valueH = np.percentile(ss[ss > 0], [100. * cutL, 100 * (1. - cutH)])
news = (s >= valueL) * (s <= valueH)
N1 = self.N
self.fragmentFilter(self.rFragIDs[news])
self.metadata["350_removedFromExtremeFragments"] = N1 - self.N
self._dumpMetadata()
print " #Top fragments are: ", ss[-10:]
print " # Cutoff for low # counts is (counts): ", valueL,
print "; cutoff for large # counts is: ", valueH, "\n"
def filterLarge(self, cutlarge=100000, cutsmall=100):
"""
__optimized for large datasets__
removes very large and small fragments
Parameters
----------
cutlarge : int
remove fragments larger than it
cutsmall : int
remove fragments smaller than it
"""
print "----->Small/large fragments filter: keep strictly less"\
"than %d,strictly more than %d bp" % (cutlarge, cutsmall)
p = (self.rFragLens < (cutlarge)) * (self.rFragLens > cutsmall)
N1 = self.N
self.fragmentFilter(self.rFragIDs[p])
N2 = self.N
self.metadata["340_removedLargeSmallFragments"] = N1 - N2
self._dumpMetadata()
def filterRsiteStart(self, offset=5):
"""
__optimized for large datasets__
Removes reads that start within x bp near rsite
Parameters
----------
offset : int
Number of bp to exclude next to rsite, not including offset
"""
# TODO:(MI) fix this so that it agrees with the definition.
print "----->Semi-dangling end filter: remove guys who start %d"\
" bp near the rsite" % offset
expression = 'mask = numexpr.evaluate("(abs(dists1 - fraglens1) >= offset) & '\
'((abs(dists2 - fraglens2) >= offset))")'
mask = self.evaluate(expression,
internalVariables=["dists1", "fraglens1",
"dists2", "fraglens2"],
constants={"offset": offset, "np": np, "numexpr":numexpr},
outVariable=("mask", np.zeros(self.N, bool)))
self.metadata["310_startNearRsiteRemoved"] = len(mask) - mask.sum()
self.maskFilter(mask)
def filterDuplicates(self, mode="hdd", tmpDir="default", chunkSize = 100000000):
"""
__optimized for large datasets__
removes duplicate molecules"""
# Uses a lot!
print "----->Filtering duplicates in DS reads: "
if tmpDir == "default":
tmpDir = self.tmpDir
# an array to determine unique rows. Eats 1 byte per DS record
if mode == "ram":
log.debug("Filtering duplicates in RAM")
dups = np.zeros((self.N, 2), dtype="int64", order="C")
dups[:, 0] = self.chrms1
dups[:, 0] *= self.fragIDmult
dups[:, 0] += self.cuts1
dups[:, 1] = self.chrms2
dups[:, 1] *= self.fragIDmult
dups[:, 1] += self.cuts2
dups.sort(axis=1)
dups.shape = (self.N * 2)
strings = dups.view("|S16")
# Converting two indices to a single string to run unique
uids = uniqueIndex(strings)
del strings, dups
stay = np.zeros(self.N, bool)
stay[uids] = True # indexes of unique DS elements
del uids
elif mode == "hdd":
tmpFile = os.path.join(tmpDir, str(np.random.randint(0, 100000000)))
a = mirnylib.h5dict.h5dict(tmpFile)
a.add_empty_dataset("duplicates", (self.N,), dtype="|S24")
a.add_empty_dataset("temp", (self.N,), dtype="|S24")
dset = a.get_dataset("duplicates")
tempdset = a.get_dataset("temp")
code = dedent("""
tmp = np.array(chrms1, dtype=np.int64) * fragIDmult + cuts1
tmp2 = np.array(chrms2, dtype=np.int64) * fragIDmult + cuts2
newarray = np.zeros((len(tmp),3), dtype = np.int64)
newarray[:,0] = tmp
newarray[:,1] = tmp2
newarray[:,:2].sort(axis=1)
newarray[:,2] = np.arange(start, end, dtype=np.int64)
newarray.shape = (3*len(tmp))
a = np.array(newarray.view("|S24"))
assert len(a) == len(chrms1)
""")
self.evaluate(code, ["chrms1", "cuts1", "chrms2", "cuts2"],
constants={"np":np, "fragIDmult":self.fragIDmult},
outVariable=("a", dset))
stay = np.zeros(self.N, bool)
numutils.externalMergeSort(dset, tempdset, chunkSize=chunkSize)
bins = range(0, self.N - 1000, self.chunksize) + [self.N - 1]
for start, end in zip(bins[:-1], bins[1:]):
curset = dset[start:end + 1]
curset = curset.view(dtype=np.int64)
curset.shape = (len(curset) / 3, 3)
unique = (curset[:-1, 0] != curset[1:, 0]) + (curset[:-1, 1] != curset[1:, 1])
stay[curset[:, 2][unique]] = True
if end == self.N - 1:
stay[curset[-1, 2]] = True
del a
del tmpFile
self.metadata["320_duplicatesRemoved"] = len(stay) - stay.sum()
self.maskFilter(stay)
def filterByCisToTotal(self, cutH=0.0, cutL=0.01):
"""
__NOT optimized for large datasets__
Remove fragments with too low or too high cis-to-total ratio.
Parameters
----------
cutH : float, 0<=cutH < 1, optional
Fraction of the fragments with largest cis-to-total ratio
to be removed.
cutL : float, 0<=cutL<1, optional
Fraction of the fragments with lowest cis-to-total ratio
to be removed.
"""
concRfragAbsIdxs = np.r_[self.rfragAbsIdxs1, self.rfragAbsIdxs2]
concCis = np.r_[self.chrms1 == self.chrms2, self.chrms1 == self.chrms2]
cis = np.bincount(concRfragAbsIdxs[concCis])
total = np.bincount(concRfragAbsIdxs)
cistototal = np.nan_to_num(cis / total.astype('float'))
numEmptyFrags = (cistototal == 0).sum()
cutLFrags = int(np.ceil((len(cistototal) - numEmptyFrags) * cutL))
cutHFrags = int(np.ceil((len(cistototal) - numEmptyFrags) * cutH))
sortedCistotot = np.sort(cistototal)
lCutoff = sortedCistotot[cutLFrags + numEmptyFrags]
hCutoff = sortedCistotot[len(cistototal) - 1 - cutHFrags]
fragsToFilter = np.where((cistototal < lCutoff) + (cistototal > hCutoff))[0]
print ('Keep fragments with cis-to-total ratio in range ({0},{1}), '
'discard {2} fragments').format(lCutoff, hCutoff, cutLFrags + cutHFrags)
mask = (arrayInArray(self.rfragAbsIdxs1, fragsToFilter) +
arrayInArray(self.rfragAbsIdxs2, fragsToFilter))
self.metadata["330_removedByCisToTotal"] = mask.sum()
self.maskFilter(-mask)
def filterTooClose(self, minRsitesDist=2):
"""
__NOT optimized for large datasets__
Remove fragment pairs separated by less then `minRsitesDist`
restriction sites within the same chromosome.
"""
mask = (np.abs(self.rfragAbsIdxs1 - self.rfragAbsIdxs2) < minRsitesDist) * (self.chrms1 == self.chrms2)
self.metadata["360_closeFragmentsRemoved"] = mask.sum()
print '360_closeFragmentsRemoved: ', mask.sum()
self.maskFilter(-mask)
def filterOrientation(self):
"__NOT optimized for large datasets__"
# Keep only --> --> or <-- <-- pairs, discard --> <-- and <-- -->
mask = (self.strands1 == self.strands2)
self.metadata["370_differentOrientationReadsRemoved"] = mask.sum()
print '370_differentOrientationReadsRemoved: ', mask.sum()
self.maskFilter(-mask)
def writeFilteringStats(self):
self.metadata["400_readsAfterFiltering"] = self.N
sameChrom = self.chrms1 == self.chrms2
self.metadata["401_cisReads"] = sameChrom.sum()
self.metadata["402_transReads"] = self.N - sameChrom.sum()
self._dumpMetadata()
def fragmentSum(self, fragments=None, strands="both", useWeights=False):
"""
__optimized for large datasets__
returns sum of all counts for a set or subset of fragments
Parameters
----------
fragments : list of fragment IDs, optional
Use only this fragments. By default all fragments are used
strands : 1,2 or "both" (default)
Use only first or second side of the read
(first has SS, second - doesn't)
useWeights : bool, optional
If set to True, will give a fragment sum with weights adjusted for iterative correction.
"""
# Uses 0 bytes per read
if fragments is None:
fragments = self.rFragIDs
if not useWeights:
f1 = chunkedBincount(self._getSliceableVector("rfragAbsIdxs1"), minlength = len(self.rFragIDs))
f2 = chunkedBincount(self._getSliceableVector("rfragAbsIdxs2"), minlength = len(self.rFragIDs))
if strands == "both":
return f1 + f2
if strands == 1:
return f1
if strands == 2:
return f2
else:
if strands == "both":
pass1 = 1. / self.fragmentWeights[arraySearch(self.rFragIDs, self.fragids1)]
pass1 /= self.fragmentWeights[arraySearch(self.rFragIDs, self.fragids2)]
return arraySumByArray(self.fragids1, fragments, pass1) + arraySumByArray(self.fragids2, fragments, pass1)
else:
raise NotImplementedError("Sorry")
def iterativeCorrectionFromMax(self, minimumCount=50, precision=0.01):
"TODO: rewrite this to account for a new fragment model"
biases = np.ones(len(self.rFragMids), dtype = np.double)
self.fragmentWeights = 1. * self.fragmentSum()
self.fragmentFilter(self.fragmentWeights > minimumCount)
self.fragmentWeights = 1. * self.fragmentSum()
while True:
newSum = 1. * self.fragmentSum(useWeights=True)
biases *= newSum / newSum.mean()
maxDev = np.max(np.abs(newSum - newSum.mean())) / newSum.mean()
print maxDev
self.fragmentWeights *= (newSum / newSum.mean())
if maxDev < precision:
return biases
def printStats(self):
self.printMetadata()
def save(self, filename):
"Saves dataset to filename, does not change the working file."
if self.filename == filename:
raise StandardError("Cannot save to the working file")
newh5dict = mirnylib.h5dict.h5dict(filename, mode='w')
for name in self.vectors.keys():
newh5dict[name] = self.h5dict[name]
newh5dict["metadata"] = self.metadata
print "----> Data saved to file %s" % (filename,)
def load(self, filename, buildFragments="deprecated"):
"Loads dataset from file to working file; check for inconsistency"
otherh5dict = mirnylib.h5dict.h5dict(filename, 'r')
if "metadata" in otherh5dict:
self.metadata = otherh5dict["metadata"]
else:
print otherh5dict.keys()
warnings.warn("Metadata not found!!!")
length = 0
for name in self.vectors:
data = otherh5dict[name]
ld = len(data)
if length == 0:
length = ld
else:
if ld != length:
print("---->!!!!!File %s contains inconsistend data<----" %
filename)
self.exitProgram("----> Sorry...")
self._setData(name, data)
print "---->Loaded data from file %s, contains %d reads" % (
filename, length)
self.N = length
self._checkConsistency()
def saveHeatmap(self, filename, resolution=1000000,
countDiagonalReads="Once",
useWeights=False,
useFragmentOverlap=False, maxBinSpawn=10):
"""
Saves heatmap to filename at given resolution.
For small genomes where number of fragments per bin is small,
please set useFragmentOverlap to True.
This will assign each fragment to all bins over which the fragment
spawns.
Parameters
----------
filename : str
Filename of the output h5dict
resolution : int or str
Resolution of a heatmap. May be an int or 'fragment' for
restriction fragment resolution.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
useWeights : bool
If True, then take weights from 'weights' variable. False by default.
If using iterativeCorrectionFromMax (fragment-level IC), use weights.
useFragmentOverlap : bool (optional)
Set this to true if you have few fragments per bin (bin size <20kb for HindIII)
It will consume more RAM and be slower.
"""
try:
os.remove(filename)
except:
pass
tosave = mirnylib.h5dict.h5dict(path=filename, mode="w")
if not useFragmentOverlap:
heatmap = self.buildAllHeatmap(resolution, countDiagonalReads, useWeights)
else:
heatmap = self.buildHeatmapWithOverlapCpp(resolution, countDiagonalReads, maxBinSpawn)
tosave["heatmap"] = heatmap
del heatmap
if resolution != 'fragment':
chromosomeStarts = np.array(self.genome.chrmStartsBinCont)
numBins = self.genome.numBins
else:
chromosomeStarts = np.array(self.genome.chrmStartsRfragCont)
numBins = self.genome.numRfrags
tosave["resolution"] = resolution
tosave["genomeBinNum"] = numBins
tosave["genomeIdxToLabel"] = self.genome.idx2label
tosave["chromosomeStarts"] = chromosomeStarts
print "----> Heatmap saved to '{0}' at {1} resolution".format(
filename, resolution)
def saveByChromosomeHeatmap(self, filename, resolution=10000,
includeTrans=True,
countDiagonalReads="Once"):
"""
Saves chromosome by chromosome heatmaps to h5dict.
This method is not as memory demanding as saving allxall heatmap.
Keys of the h5dict are of the format ["1 14"],
where chromosomes are zero-based,
and there is one space between numbers.
.. warning :: Chromosome numbers are always zero-based.
Only "chr3" labels are one-based in this package.
Parameters
----------
filename : str
Filename of the h5dict with the output
resolution : int
Resolution to save heatmaps
includeTrans : bool, optional
Build inter-chromosomal heatmaps (default: False)
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
"""
if countDiagonalReads.lower() not in ["once", "twice"]:
raise ValueError("Bad value for countDiagonalReads")
self.genome.setResolution(resolution)
mydict = mirnylib.h5dict.h5dict(filename)
for chromosome in xrange(self.genome.chrmCount):
c1 = self.h5dict.get_dataset("chrms1")
p1 = self.h5dict.get_dataset("cuts1")
low = h5dictBinarySearch(c1,p1, (chromosome, -1),"left")
high = h5dictBinarySearch(c1,p1, (chromosome, 999999999),"right")
chr1 = self._getVector("chrms1", low,high)
chr2 = self._getVector("chrms2", low,high)
pos1 = np.array(self._getVector("mids1", low,high) / resolution, dtype = np.int32)
pos2 = np.array(self._getVector("mids2", low,high) / resolution, dtype = np.int32)
if includeTrans == True:
mask = ((chr1 == chromosome) + (chr2 == chromosome))
chr1 = chr1[mask]
chr2 = chr2[mask]
pos1 = pos1[mask]
pos2 = pos2[mask]
# Located chromosomes and positions of chromosomes
if includeTrans == True:
# moving different chromosomes to c2
# c1 == chrom now
mask = (chr2 == chromosome) * (chr1 != chromosome)
chr1[mask], chr2[mask], pos1[mask], pos2[mask] = chr2[mask].copy(), chr1[
mask].copy(), pos2[mask].copy(), pos1[mask].copy()
args = np.argsort(chr2)
chr2 = chr2[args]
pos1 = pos1[args]
pos2 = pos2[args]
for chrom2 in xrange(chromosome, self.genome.chrmCount):
if (includeTrans == False) and (chrom2 != chromosome):
continue
start = np.searchsorted(chr2, chrom2, "left")
end = np.searchsorted(chr2, chrom2, "right")
cur1 = pos1[start:end]
cur2 = pos2[start:end]
label = np.array(cur1, "int64")
label *= self.genome.chrmLensBin[chrom2]
label += cur2
maxLabel = self.genome.chrmLensBin[chromosome] * \
self.genome.chrmLensBin[chrom2]
counts = np.bincount(label, minlength=maxLabel)
mymap = counts.reshape((self.genome.chrmLensBin[chromosome], -1))
if chromosome == chrom2:
mymap = mymap + mymap.T
if countDiagonalReads.lower() == "once":
fillDiagonal(mymap, np.diag(mymap).copy() / 2)
mydict["%d %d" % (chromosome, chrom2)] = mymap
print "----> By chromosome Heatmap saved to '{0}' at {1} resolution".format(filename, resolution)
return
def exitProgram(self, a):
print a
print " ----> Bye! :) <----"
exit()
def iterativeCorrection(self, numsteps=10, normToLen=False):
'''
Perform fragment-based iterative correction of Hi-C data.
'''
rfragLensConc = np.concatenate(self.genome.rfragLens)
weights = np.ones(self.N, dtype=np.float32)
concRfragAbsIdxs = np.r_[self.rfragAbsIdxs1, self.rfragAbsIdxs2]
concOrigArgs = np.r_[np.arange(0, self.N), np.arange(0, self.N)]
concArgs = np.argsort(concRfragAbsIdxs)
concRfragAbsIdxs = concRfragAbsIdxs[concArgs]
concOrigArgs = concOrigArgs[concArgs]
fragBorders = np.where(concRfragAbsIdxs[:-1] != concRfragAbsIdxs[1:])[0] + 1
fragBorders = np.r_[0, fragBorders, 2 * self.N]
rfragLensLocal = rfragLensConc[concRfragAbsIdxs[fragBorders[:-1]]]
for _ in range(numsteps):
for i in range(len(fragBorders) - 1):
mask = concOrigArgs[fragBorders[i]:fragBorders[i + 1]]
totWeight = weights[mask].sum()
if normToLen:
weights[mask] *= rfragLensLocal[i] / totWeight
else:
weights[mask] /= totWeight
self.vectors['weights'] = 'float32'
self.weights = weights
def plotScaling(self, fragids1=None, fragids2=None,
# IDs of fragments for which to plot scaling.
# One can, for example, limit oneself to
# only fragments shorter than 1000 bp
# Or calculate scaling only between different arms
useWeights=False,
# use weights associated with fragment length
excludeNeighbors=None, enzyme=None,
# number of neighboring fragments to exclude.
# Enzyme is needed for that!
normalize=True, normRange=None,
# normalize the final plot to sum to one
withinArms=True,
# Treat chromosomal arms separately
mindist=1000,
# Scaling was proved to be unreliable
# under 10000 bp for 6-cutter enzymes
maxdist=None,
#----Calculating scaling within a set of regions only----
regions=None,
# Array of tuples (chrom, start, end)
# for which scaling should be calculated
# Note that calculation might be extremely long
# (it might be proportional to # of regions for # > 100)
appendReadCount=True, **kwargs
# Append read count to the plot label
# kwargs to be passed to plotting
): # Sad smiley, because this method
# is very painful and complicated
"""plots scaling over, possibly uses subset of fragmetns, or weigts,
possibly normalizes after plotting
Plan of scaling calculation:
1. Subdivide all genome into regions. \n
a. Different chromosomes \n
b. Different arms \n
c. User defined squares/rectangles on a contact map \n
-(chromosome, start,end) square around the diagonal \n
-(chr, st1, end1, st2, end2) rectangle \n
2. Use either all fragments, or only interactions between
two groups of fragments \n
e.g. you can calculate how scaling for small fragments is different
from that for large \n
It can be possibly used for testing Hi-C protocol issues. \n
One can see effect of weights by doing this \n
3. (optional) Calculate correction associated
with fragment length dependence
4. Subdivide all possible genomic separation into log-spaced bins
5. Calculate expected number of fragment pairs within each bin
(possibly with weights from step 3).
If exclusion of neighbors is specificed,
expected number of fragments knows about this
Parameters
----------
fragids1, fragids2 : np.array of fragment IDs, optional
Scaling is calculated only for interactions between
fragids1 and fragids2
If omitted, all fragments are used
If boolean array is supplied, it serves as a mask for fragments.
useWeights : bool, optional
Use weights calculated from fragment length
excludeNeighbors : int or None, optional
If None, all fragment pairs are considered.
If integer, only fragment pairs separated
by at least this number of r-fragments are considered.
enzyme : string ("HindIII","NcoI")
If excludeNeighbors is used, you have to specify restriction enzyme
normalize : bool, optional
Do an overall normalization of the answer, by default True.
withinArms : bool, optional
Set to false to use whole chromosomes instead of arms
mindist, maxdist : int, optional
Use lengthes from mindist to maxdist
regions : list of (chrom,start,end) or (ch,st1,end1,st2,end2), optional
Restrict scaling calculation to only certain squares of the map
appendReadCount : bool, optional
Append read count to the plot label
plot : bool, optional
If False then do not display the plot. True by default.
**kwargs : optional
All other keyword args are passed to plt.plot
Returns
-------
(bins,probabilities) - values to plot on the scaling plot
"""
# TODO:(MI) write an ab-initio test for scaling calculation
if not self._isSorted():
self._sortData()
import matplotlib.pyplot as plt
if excludeNeighbors <= 0:
excludeNeighbors = None # Not excluding neighbors
# use all fragments if they're not specified
# parse fragment array if it's bool
if (fragids1 is None) and (fragids2 is None):
allFragments = True
else:
allFragments = False
if fragids1 is None:
fs = self.fragmentSum()
fragids1 = fs > 0
if fragids2 is None:
try:
fragids2 = fs > 0
except:
fragids2 = self.fragmentSum() > 0
del fs
if fragids1.dtype == np.bool:
fragids1 = self.rFragIDs[fragids1]
if fragids2.dtype == np.bool:
fragids2 = self.rFragIDs[fragids2]
# Calculate regions if not specified
if regions is None:
if withinArms == False:
regions = [(i, 0, self.genome.chrmLens[i])
for i in xrange(self.genome.chrmCount)]
else:
regions = [(i, 0, self.genome.cntrMids[i])
for i in xrange(self.genome.chrmCount)] + \
[(i, self.genome.cntrMids[i], self.genome.chrmLens[i])
for i in xrange(self.genome.chrmCount)]
if maxdist is None:
maxdist = max(
max([i[2] - i[1] for i in regions]),
# rectangular regions
max([abs(i[2] - i[3]) for i in regions if
len(i) > 3] + [0]),
max([abs(i[1] - i[4]) for i in regions if
len(i) > 3] + [0]) # other side
)
# Region to which a read belongs
fragch1 = fragids1 / self.fragIDmult
fragch2 = fragids2 / self.fragIDmult
fragpos1 = fragids1 % self.fragIDmult
fragpos2 = fragids2 % self.fragIDmult
c1_h5 = self.h5dict.get_dataset("chrms1")
p1_h5 = self.h5dict.get_dataset("cuts1")
c2_h5 = self.h5dict.get_dataset("chrms2")
p2_h5 = self.h5dict.get_dataset("cuts2")
bins = np.array(
numutils.logbins(mindist, maxdist, 1.12), float) + 0.1 # bins of lengths
numBins = len(bins) - 1 # number of bins
args = np.argsort(self.rFragIDs)
usort = self.rFragIDs[args]
if useWeights == True: # calculating weights if needed
try:
self.fragmentWeights
except:
self.calculateFragmentWeights()
uweights = self.fragmentWeights[args] # weights for sorted fragment IDs
weights1 = uweights[np.searchsorted(usort, fragids1)]
weights2 = uweights[np.searchsorted(usort, fragids2)
] # weghts for fragment IDs under consideration
numExpFrags = np.zeros(numBins) # count of reads in each min
values = [0] * (len(bins) - 1)
rawValues = [0] * (len(bins) - 1)
binBegs, binEnds = bins[:-1], bins[1:]
binMids = 0.5 * (binBegs + binEnds).astype(float)
binLens = binEnds - binBegs
for region in regions:
if len(region) == 3:
chrom, start1, end1 = region
low = h5dictBinarySearch(c1_h5,p1_h5, (chrom, start1),"left")
high = h5dictBinarySearch(c1_h5,p1_h5, (chrom, end1),"right")
if len(region) == 5:
chrom, start1, end1, start2, end2 = region
assert start1 < end1
assert start2 < end2
low = h5dictBinarySearch(c1_h5,p1_h5, (chrom, min(start1, start2)),"left")
high = h5dictBinarySearch(c1_h5,p1_h5, (chrom, max(end1, end2)),"right")
chr2 = c2_h5[low:high]
pos1 = p1_h5[low:high]
pos2 = p2_h5[low:high]
myfragids1 = self._getVector("fragids1", low,high)
myfragids2 = self._getVector("fragids2", low,high)
mystrands1 = self._getVector("strands1",low,high)
mystrands2 = self._getVector("strands2",low,high)
mydists = self._getVector("distances", low, high)
print "region",region,"low",low,"high",high
if len(region) == 3:
mask = (pos1 > start1) * (pos1 < end1) * \
(chr2 == chrom) * (pos2 > start1) * (pos2 < end1)
maskFrag1 = (fragch1 == chrom) * (fragpos1 >
start1) * (fragpos1 < end1)
maskFrag2 = (fragch2 == chrom) * (fragpos2 >
start1) * (fragpos2 < end1)
if len(region) == 5:
chrom, start1, end1, start2, end2 = region
mask1 = (chr2 == chrom) * (pos1 > start1) * \
(pos1 < end1) * (pos2 > start2) * (pos2 < end2)
mask2 = (chr2 == chrom) * (pos1 > start2) * \
(pos1 < end2) * (pos2 > start1) * (pos2 < end1)
mask = mask1 + mask2
maskFrag1 = (fragch1 == chrom) * (
(fragpos1 > start1) * (fragpos1 < end1)
+ (fragpos1 > start2) * (fragpos1 < end2))
maskFrag2 = (fragch2 == chrom) * (
(fragpos2 > start2) * (fragpos2 < end2)
+ (fragpos2 > start1) * (fragpos2 < end1))
if maskFrag1.sum() == 0 or maskFrag2.sum() == 0:
print "no fragments for region", region
continue
if mask.sum() == 0:
print "No reads for region", region
continue
chr2 = chr2[mask]
pos1 = pos1[mask]
pos2 = pos2[mask]
myfragids1 = myfragids1[mask]
myfragids2 = myfragids2[mask]
mystrands1 = mystrands1[mask]
mystrands2 = mystrands2[mask]
mydists = mydists[mask]
validFragPairs = np.ones(len(chr2), dtype = np.bool)
if allFragments == False:
# Filter the dataset so it has only the specified fragments.
p11 = arrayInArray(myfragids1, fragids1)
p12 = arrayInArray(myfragids1, fragids2)
p21 = arrayInArray(myfragids2, fragids1)
p22 = arrayInArray(myfragids2, fragids2)
validFragPairs *= ((p11 * p22) + (p12 * p21))
# Consider pairs of fragments from the same region.
# Keep only --> --> or <-- <-- pairs, discard --> <-- and <-- -->
validFragPairs *= (mystrands1 == mystrands2)
# Keep only fragment pairs more than excludeNeighbors fragments apart.
distsInFrags = self.genome.getFragmentDistance(
myfragids1, myfragids2, self.genome.enzymeName)
validFragPairs *= distsInFrags > excludeNeighbors
distances = np.sort(mydists[validFragPairs])
"calculating fragments lengths for exclusions to expected # of counts"
# sorted fragment IDs and lengthes
print region
# filtering fragments that correspond to current region
bp1, bp2 = fragpos1[maskFrag1], fragpos2[maskFrag2]
# positions of fragments on chromosome
p2arg = np.argsort(bp2)
p2 = bp2[p2arg] # sorted positions on the second fragment
if excludeNeighbors is not None:
"calculating excluded fragments (neighbors) and their weights"\
" to subtract them later"
excFrag1, excFrag2 = self.genome.getPairsLessThanDistance(
fragids1[mask1], fragids2[mask2], excludeNeighbors, enzyme)
excDists = np.abs(excFrag2 - excFrag1)
# distances between excluded fragment pairs
if useWeights == True:
correctionWeights = weights1[numutils.arraySearch(
fragids1, excFrag1)]
# weights for excluded fragment pairs
correctionWeights = correctionWeights * weights2[
numutils.arraySearch(fragids2, excFrag2)]
if useWeights == True:
w1, w2 = weights1[mask1], weights2[mask2]
sw2 = np.r_[0, np.cumsum(w2[p2arg])]
# cumsum for sorted weights on 2 strand
for minDist, maxDist, binIndex in zip(binBegs, binEnds, range(numBins)):
"Now calculating actual number of fragment pairs for a "\
"length-bin, or weight of all these pairs"
# For each first fragment in a pair, calculate total # of
# restriction fragments in the genome lying downstream within
# the bin.
val1 = np.searchsorted(p2, bp1 - maxDist)
val2 = np.searchsorted(p2, bp1 - minDist)
if useWeights == False:
curcount = np.sum(np.abs(val1 - val2)) # just # of fragments
else:
# (difference in cumsum of weights) * my weight
curcount = np.sum(w1 * np.abs(sw2[val1] - sw2[val2]))
# Repeat the procedure for the fragments lying upstream.
val1 = np.searchsorted(p2, bp1 + maxDist)
val2 = np.searchsorted(p2, bp1 + minDist)
if useWeights == False:
curcount += np.sum(np.abs(val1 - val2))
else:
curcount += np.sum(w1 * np.abs(sw2[val1] - sw2[val2]))
# now modifying expected count because of excluded fragments
if excludeNeighbors is not None:
if useWeights == False:
ignore = ((excDists > minDist) *
(excDists < maxDist)).sum()
else:
ignore = (correctionWeights[((excDists > minDist) * \
(excDists < maxDist))]).sum()
if (ignore >= curcount) and (ignore != 0):
if ignore < curcount * 1.0001:
curcount = ignore = 0
else:
print "error found", "minDist:", minDist
print " curcount:", curcount, " ignore:", ignore
else: # Everything is all right
curcount -= ignore
numExpFrags[binIndex] += curcount
#print curcount
for i in xrange(len(bins) - 1): # Dividing observed by expected
first, last = tuple(np.searchsorted(distances, [binBegs[i], binEnds[i]]))
mycounts = last - first
values[i] += (mycounts / float(numExpFrags[i]))
rawValues[i] += (mycounts)
#print "values", values
#print "rawValies", rawValues
values = np.array(values)
if normalize == True:
if normRange is None:
values /= np.sum(
1. * (binLens * values)[
np.logical_not(
np.isnan(binMids * values))])
else:
values /= np.sum(
1. * (binLens * values)[
np.logical_not(
np.isnan(binMids * values))
* (binMids > normRange[0])
* (binMids < normRange[1])])
do_plot = kwargs.pop('plot', True)
if do_plot:
if appendReadCount == True:
if "label" in kwargs.keys():
kwargs["label"] = kwargs["label"] + \
", %d reads" % len(distances)
plt.plot(binMids, values, **kwargs)
return (binMids, values)
def plotRsiteStartDistribution(self, offset=5, length=200):
"""
run plt.show() after this function.
"""
import matplotlib.pyplot as plt
dists1 = self.fraglens1 - np.array(self.dists1, dtype="int32")
dists2 = self.fraglens2 - np.array(self.dists2, dtype="int32")
m = min(dists1.min(), dists2.min())
if offset < -m:
offset = -m
print "minimum negative distance is %d, larger than offset;"\
" offset set to %d" % (m, -m)
dists1 += offset
dists2 += offset
myrange = np.arange(-offset, length - offset)
plt.subplot(141)
plt.title("strands1, side 1")
plt.plot(myrange, np.bincount(
5 + dists1[self.strands1 == True])[:length])
plt.subplot(142)
plt.title("strands1, side 2")
plt.plot(myrange, np.bincount(
dists2[self.strands1 == True])[:length])
plt.subplot(143)
plt.title("strands2, side 1")
plt.plot(myrange, np.bincount(
dists1[self.strands1 == False])[:length])
plt.subplot(144)
plt.title("strands2, side 2")
plt.plot(myrange, np.bincount(
dists2[self.strands1 == False])[:length])
|
yzl0083/orange | refs/heads/master | Orange/tuning/__init__.py | 6 | import Orange.core
import Orange.classification
import Orange.evaluation.scoring
import Orange.evaluation.testing
import Orange.misc
from Orange.utils import deprecated_class_attribute, deprecated_keywords, \
deprecated_members
@deprecated_members({"returnWhat": "return_what", "object": "learner"})
class TuneParameters(Orange.classification.Learner):
""".. attribute:: data
Data table with either discrete or continuous features
.. attribute:: weight_id
The id of the weight meta attribute
.. attribute:: learner
The learning algorithm whose parameters are to be tuned. This can be,
for instance, :obj:`Orange.classification.tree.TreeLearner`.
.. attribute:: evaluate
The statistics to evaluate. The default is
:obj:`Orange.evaluation.scoring.CA`, so the learner will be fit for the
optimal classification accuracy. You can replace it with, for instance,
:obj:`Orange.evaluation.scoring.AUC` to optimize the AUC. Statistics
can return either a single value (classification accuracy), a list with
a single value (this is what :obj:`Orange.evaluation.scoring.CA`
actually does), or arbitrary objects which the compare function below
must be able to compare.
.. attribute:: folds
The number of folds used in internal cross-validation. Default is 5.
.. attribute:: compare
The function used to compare the results. The function should accept
two arguments (e.g. two classification accuracies, AUCs or whatever the
result of ``evaluate`` is) and return a positive value if the first
argument is better, 0 if they are equal and a negative value if the
first is worse than the second. The default compare function is
``cmp``. You don't need to change this if evaluate is such that higher
values mean a better classifier.
.. attribute:: return_what
Decides what should be result of tuning. Possible values are:
* ``TuneParameters.RETURN_NONE`` (or 0): tuning will return nothing,
* ``TuneParameters.RETURN_PARAMETERS`` (or 1): return the optimal value(s) of parameter(s),
* ``TuneParameters.RETURN_LEARNER`` (or 2): return the learner set to optimal parameters,
* ``TuneParameters.RETURN_CLASSIFIER`` (or 3): return a classifier trained with the optimal parameters on the entire data set. This is the default setting.
Regardless of this, the learner (given as parameter ``learner``) is
left set to the optimal parameters.
.. attribute:: verbose
If 0 (default), the class doesn't print anything. If set to 1, it will
print out the optimal value found, if set to 2, it will print out all
tried values and the related
If tuner returns the classifier, it behaves as a learning algorithm. As the
examples below will demonstrate, it can be called, given the data and
the result is a "trained" classifier. It can, for instance, be used in
cross-validation.
Out of these attributes, the only necessary argument is ``learner``. The
real tuning classes (subclasses of this class) add two additional -
the attributes that tell what parameter(s) to optimize and which values
to use.
"""
RETURN_NONE = 0
RETURN_PARAMETERS = 1
RETURN_LEARNER = 2
RETURN_CLASSIFIER = 3
returnNone = \
deprecated_class_attribute("returnNone", "RETURN_NONE")
returnParameters = \
deprecated_class_attribute("returnParameters", "RETURN_PARAMETERS")
returnLearner = \
deprecated_class_attribute("returnLearner", "RETURN_LEARNER")
returnClassifier = \
deprecated_class_attribute("returnClassifier", "RETURN_CLASSIFIER")
@deprecated_keywords({"examples": "data", "weightID": "weight_id"})
def __new__(cls, data=None, weight_id=0, **argkw):
self = Orange.classification.Learner.__new__(cls, **argkw)
if data is not None:
for name, value in argkw.items():
setattr(self, name, value)
self.__init__(**argkw)
return self.__call__(data, weight_id)
else:
return self
def findobj(self, name):
import string
names = string.split(name, ".")
lastobj = self.learner
for i in names[:-1]:
lastobj = getattr(lastobj, i)
return lastobj, names[-1]
class Tune1Parameter(TuneParameters):
"""Class :obj:`Orange.optimization.Tune1Parameter` tunes a single parameter.
.. attribute:: parameter
The name of the parameter (or a list of names, if the same parameter is
stored at multiple places - see the examples) to be tuned.
.. attribute:: values
A list of parameter's values to be tried.
To show how it works, we shall fit the minimal number of examples in a leaf
for a tree classifier.
part of :download:`optimization-tuning1.py <code/optimization-tuning1.py>`
.. literalinclude:: code/optimization-tuning1.py
:lines: 3-11
Set up like this, when the tuner is called, set ``learner.min_subset`` to
1, 2, 3, 4, 5, 10, 15 and 20, and measure the AUC in 5-fold cross
validation. It will then reset the learner.minSubset to the optimal value
found and, since we left ``return_what`` at the default
(``RETURN_CLASSIFIER``), construct and return the classifier from the
entire data set. So, what we get is a classifier, but if we'd also like
to know what the optimal value was, we can get it from
``learner.min_subset``.
Tuning is of course not limited to setting numeric parameters. You can, for
instance, try to find the optimal criteria for assessing the quality of
attributes by tuning ``parameter="measure"``, trying settings like
``values=[Orange.feature.scoring.GainRatio(), Orange.feature.scoring.Gini()]``
Since the tuner returns a classifier and thus behaves like a learner, it
can be used in a cross-validation. Let us see whether a tuning tree indeed
enhances the AUC or not. We shall reuse the tuner from above, add another
tree learner, and test them both.
part of :download:`optimization-tuning1.py <code/optimization-tuning1.py>`
.. literalinclude:: code/optimization-tuning1.py
:lines: 13-18
This can be time consuming: for each of 8 values for ``min_subset`` it will
perform 5-fold cross validation inside a 10-fold cross validation -
altogether 400 trees. Plus, it will learn the optimal tree afterwards for
each fold. Adding a tree without tuning, that makes 420 trees build in
total.
Nevertheless, results are good::
Untuned tree: 0.930
Tuned tree: 0.986
"""
def __call__(self, data, weight=None, verbose=0):
verbose = verbose or getattr(self, "verbose", 0)
evaluate = getattr(self, "evaluate", Orange.evaluation.scoring.CA)
folds = getattr(self, "folds", 5)
compare = getattr(self, "compare", cmp)
return_what = getattr(self, "return_what",
Tune1Parameter.RETURN_CLASSIFIER)
if (type(self.parameter) == list) or (type(self.parameter) == tuple):
to_set = [self.findobj(ld) for ld in self.parameter]
else:
to_set = [self.findobj(self.parameter)]
cvind = Orange.core.MakeRandomIndicesCV(data, folds)
findBest = Orange.utils.selection.BestOnTheFly(seed=data.checksum(),
call_compare_on_1st=True)
tableAndWeight = weight and (data, weight) or data
for par in self.values:
for i in to_set:
setattr(i[0], i[1], par)
res = evaluate(Orange.evaluation.testing.test_with_indices(
[self.learner], tableAndWeight, cvind))
findBest.candidate((res, par))
if verbose == 2:
print '*** optimization %s: %s:' % (par, ", ".join("%.8f" % r for r in res))
bestpar = findBest.winner()[1]
for i in to_set:
setattr(i[0], i[1], bestpar)
if verbose:
print "*** Optimal parameter: %s = %s" % (self.parameter, bestpar)
if return_what == Tune1Parameter.RETURN_NONE:
return None
elif return_what == Tune1Parameter.RETURN_PARAMETERS:
return bestpar
elif return_what == Tune1Parameter.RETURN_LEARNER:
return self.learner
else:
classifier = self.learner(data)
if not Orange.utils.environ.orange_no_deprecated_members:
classifier.setattr("fittedParameter", bestpar)
classifier.setattr("fitted_parameter", bestpar)
return classifier
@deprecated_members({"progressCallback": "progress_callback"})
class TuneMParameters(TuneParameters):
"""The use of :obj:`Orange.optimization.TuneMParameters` differs from
:obj:`Orange.optimization.Tune1Parameter` only in specification of tuning
parameters.
.. attribute:: parameters
A list of two-element tuples, each containing the name of a parameter
and its possible values.
For example we can try to tune both the minimal number of instances in
leaves and the splitting criteria by setting the tuner as follows:
:download:`optimization-tuningm.py <code/optimization-tuningm.py>`
.. literalinclude:: code/optimization-tuningm.py
"""
def __call__(self, data, weight=None, verbose=0):
evaluate = getattr(self, "evaluate", Orange.evaluation.scoring.CA)
folds = getattr(self, "folds", 5)
compare = getattr(self, "compare", cmp)
verbose = verbose or getattr(self, "verbose", 0)
return_what = getattr(self, "return_what", Tune1Parameter.RETURN_CLASSIFIER)
progress_callback = getattr(self, "progress_callback", lambda i: None)
to_set = []
parnames = []
for par in self.parameters:
if (type(par[0]) == list) or (type(par[0]) == tuple):
to_set.append([self.findobj(ld) for ld in par[0]])
parnames.append(par[0])
else:
to_set.append([self.findobj(par[0])])
parnames.append([par[0]])
cvind = Orange.core.MakeRandomIndicesCV(data, folds)
findBest = Orange.utils.selection.BestOnTheFly(seed=data.checksum(),
call_compare_on_1st=True)
tableAndWeight = weight and (data, weight) or data
numOfTests = sum([len(x[1]) for x in self.parameters])
milestones = set(range(0, numOfTests, max(numOfTests / 100, 1)))
for itercount, valueindices in enumerate(Orange.utils.counters.LimitedCounter(\
[len(x[1]) for x in self.parameters])):
values = [self.parameters[i][1][x] for i, x \
in enumerate(valueindices)]
for pi, value in enumerate(values):
for i, par in enumerate(to_set[pi]):
setattr(par[0], par[1], value)
if verbose == 2:
print "%s: %s" % (parnames[pi][i], value)
res = evaluate(Orange.evaluation.testing.test_with_indices(
[self.learner], tableAndWeight, cvind))
if itercount in milestones:
progress_callback(100.0 * itercount / numOfTests)
findBest.candidate((res, values))
if verbose == 2:
print "===> Result: %s\n" % res
bestpar = findBest.winner()[1]
if verbose:
print "*** Optimal set of parameters: ",
for pi, value in enumerate(bestpar):
for i, par in enumerate(to_set[pi]):
setattr(par[0], par[1], value)
if verbose:
print "%s: %s" % (parnames[pi][i], value),
if verbose:
print
if return_what == Tune1Parameter.RETURN_NONE:
return None
elif return_what == Tune1Parameter.RETURN_PARAMETERS:
return bestpar
elif return_what == Tune1Parameter.RETURN_LEARNER:
return self.learner
else:
classifier = self.learner(data)
if Orange.utils.environ.orange_no_deprecated_members:
classifier.fittedParameters = bestpar
classifier.fitted_parameters = bestpar
return classifier
@deprecated_members({"storeCurve": "store_curve"}, wrap_methods=["__init__"])
class ThresholdLearner(Orange.classification.Learner):
""":obj:`Orange.optimization.ThresholdLearner` is a class that wraps
another learner. When given the data, it calls the wrapped learner to build
a classifier, than it uses the classifier to predict the class
probabilities on the training instances. Storing the probabilities, it
computes the threshold that would give the optimal classification accuracy.
Then it wraps the classifier and the threshold into an instance of
:obj:`Orange.optimization.ThresholdClassifier`.
Note that the learner doesn't perform internal cross-validation. Also, the
learner doesn't work for multivalued classes.
:obj:`Orange.optimization.ThresholdLearner` has the same interface as any
learner: if the constructor is given data, it returns a classifier,
else it returns a learner. It has two attributes.
.. attribute:: learner
The wrapped learner, for example an instance of
:obj:`Orange.classification.bayes.NaiveLearner`.
.. attribute:: store_curve
If `True`, the resulting classifier will contain an attribute curve, with
a list of tuples containing thresholds and classification accuracies at
that threshold (default `False`).
"""
@deprecated_keywords({"examples": "data", "weightID": "weight_id"})
def __new__(cls, data=None, weight_id=0, **kwds):
self = Orange.classification.Learner.__new__(cls, **kwds)
if data is not None:
self.__init__(**kwargs)
return self.__call__(data, weight_id)
else:
return self
@deprecated_keywords({"storeCurve": "store_curve"})
def __init__(self, learner=None, store_curve=False, **kwds):
self.learner = learner
self.store_curve = store_curve
for name, value in kwds.items():
setattr(self, name, value)
@deprecated_keywords({"examples": "data", "weightID": "weight_id"})
def __call__(self, data, weight_id=0):
if self.learner is None:
raise AttributeError("Learner not set.")
classifier = self.learner(data, weight_id)
threshold, optCA, curve = Orange.wrappers.ThresholdCA(classifier,
data,
weight_id)
if self.store_curve:
return ThresholdClassifier(classifier, threshold, curve=curve)
else:
return ThresholdClassifier(classifier, threshold)
class ThresholdClassifier(Orange.classification.Classifier):
""":obj:`Orange.optimization.ThresholdClassifier`, used by both
:obj:`Orange.optimization.ThredholdLearner` and
:obj:`Orange.optimization.ThresholdLearner_fixed` is therefore another
wrapper class, containing a classifier and a threshold. When it needs to
classify an instance, it calls the wrapped classifier to predict
probabilities. The example will be classified into the second class only if
the probability of that class is above the threshold.
.. attribute:: classifier
The wrapped classifier, normally the one related to the ThresholdLearner's
learner, e.g. an instance of
:obj:`Orange.classification.bayes.NaiveLearner`.
.. attribute:: threshold
The threshold for classification into the second class.
The two attributes can be specified set as attributes or given to the
constructor as ordinary arguments.
"""
def __init__(self, classifier, threshold, **kwds):
self.classifier = classifier
self.threshold = threshold
for name, value in kwds.items():
setattr(self, name, value)
def __call__(self, instance, what=Orange.classification.Classifier.GetValue):
probs = self.classifier(instance, self.GetProbabilities)
if what == self.GetProbabilities:
return probs
value = Orange.data.Value(self.classifier.classVar, probs[1] > \
self.threshold)
if what == Orange.classification.Classifier.GetValue:
return value
else:
return (value, probs)
class ThresholdLearner_fixed(Orange.classification.Learner):
""" This is a convinience variant of
:obj:`Orange.optimization.ThresholdLearner`. Instead of finding the
optimal threshold it uses a prescribed one. It has the following two
attributes.
.. attribute:: learner
The wrapped learner, for example an instance of
:obj:`~Orange.classification.bayes.NaiveLearner`.
.. attribute:: threshold
Threshold to use in classification.
This class calls its base learner and puts the resulting classifier
together with the threshold into an instance of :obj:`ThresholdClassifier`.
"""
@deprecated_keywords({"examples": "data", "weightID": "weight_id"})
def __new__(cls, data=None, weight_id=0, **kwds):
self = Orange.classification.Learner.__new__(cls, **kwds)
if data is not None:
self.__init__(**kwds)
return self.__call__(data, weight_id)
else:
return self
def __init__(self, learner=None, threshold=None, **kwds):
self.learner = learner
self.threshold = threshold
for name, value in kwds.items():
setattr(name, value)
@deprecated_keywords({"examples": "data", "weightID": "weight_id"})
def __call__(self, data, weight_id=0):
if self.learner is None:
raise AttributeError("Learner not set.")
if self.threshold is None:
raise AttributeError("Threshold not set.")
if len(data.domain.classVar.values) != 2:
raise ValueError("ThresholdLearner handles binary classes only.")
return ThresholdClassifier(self.learner(data, weight_id),
self.threshold)
class PreprocessedLearner(object):
def __new__(cls, preprocessor=None, learner=None):
self = object.__new__(cls)
if learner is not None:
self.__init__(preprocessor)
return self.wrapLearner(learner)
else:
return self
def __init__(self, preprocessor=None, learner=None):
if isinstance(preprocessor, list):
self.preprocessors = preprocessor
elif preprocessor is not None:
self.preprocessors = [preprocessor]
else:
self.preprocessors = []
#self.preprocessors = [Orange.core.Preprocessor_addClassNoise(proportion=0.8)]
if learner:
self.wrapLearner(learner)
def processData(self, data, weightId=None):
hadWeight = hasWeight = weightId is not None
for preprocessor in self.preprocessors:
if hasWeight:
t = preprocessor(data, weightId)
else:
t = preprocessor(data)
if isinstance(t, tuple):
data, weightId = t
hasWeight = True
else:
data = t
if hadWeight:
return data, weightId
else:
return data
def wrapLearner(self, learner):
class WrappedLearner(learner.__class__):
preprocessor = self
wrappedLearner = learner
name = getattr(learner, "name", "")
def __call__(self, data, weightId=0, getData=False):
t = self.preprocessor.processData(data, weightId or 0)
processed, procW = t if isinstance(t, tuple) else (t, 0)
classifier = self.wrappedLearner(processed, procW)
if getData:
return classifier, processed
else:
return classifier # super(WrappedLearner, self).__call__(processed, procW)
def __reduce__(self):
return PreprocessedLearner, (self.preprocessor.preprocessors, \
self.wrappedLearner)
def __getattr__(self, name):
return getattr(learner, name)
return WrappedLearner()
|
carlohamalainen/nipype | refs/heads/master | nipype/workflows/__init__.py | 156 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
|
uberamd/NGECore2 | refs/heads/master | scripts/object/tangible/collection/col_ent_instrument_01.py | 85615 | import sys
def setup(core, object):
return |
uberamd/NGECore2 | refs/heads/master | scripts/object/tangible/crafting/station/armor_repair.py | 85615 | import sys
def setup(core, object):
return |
agry/NGECore2 | refs/heads/master | scripts/object/tangible/quest/quest_start/profession_bounty_hunter_30.py | 85615 | import sys
def setup(core, object):
return |
mlperf/training_results_v0.7 | refs/heads/master | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/python/tvm/relay/module.py | 1 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, wildcard-import
"""A global module storing everything needed to interpret or compile a Relay program."""
from .base import register_relay_node, RelayNode
from .._ffi import base as _base
from . import _make
from . import _module
from . import expr as _expr
from . import ty as _ty
@register_relay_node
class Module(RelayNode):
"""The global Relay module containing collection of functions.
Each global function is identified by an unique tvm.relay.GlobalVar.
tvm.relay.GlobalVar and Module is necessary in order to enable
recursions in function to avoid cyclic reference in the function.x
Parameters
----------
functions: Optional[dict].
Map of global var to Function
"""
def __init__(self, functions=None, type_definitions=None):
if functions is None:
functions = {}
elif isinstance(functions, dict):
mapped_funcs = {}
for k, v in functions.items():
if isinstance(k, _base.string_types):
k = _expr.GlobalVar(k)
if not isinstance(k, _expr.GlobalVar):
raise TypeError("Expect functions to be Dict[GlobalVar, Function]")
mapped_funcs[k] = v
functions = mapped_funcs
if type_definitions is None:
type_definitions = {}
elif isinstance(type_definitions, dict):
mapped_type_defs = {}
for k, v in type_definitions.items():
if isinstance(k, _base.string_types):
k = _ty.GlobalTypeVar(k)
if not isinstance(k, _ty.GlobalTypeVar):
raise TypeError("Expect type_definitions to be Dict[GlobalTypeVar, Type]")
mapped_type_defs[k] = v
type_definitions = mapped_type_defs
self.__init_handle_by_constructor__(_make.Module, functions, type_definitions)
def __setitem__(self, var, val):
"""Add a mapping to the module.
Parameters
---------
var: GlobalVar
The global variable.
val: Union[Function, Type]
The value.
"""
return self._add(var, val)
def _add(self, var, val, update=False):
if isinstance(val, _expr.Expr):
if isinstance(var, _base.string_types):
if _module.Module_ContainGlobalVar(self, var):
var = _module.Module_GetGlobalVar(self, var)
else:
var = _expr.GlobalVar(var)
_module.Module_Add(self, var, val, update)
else:
assert isinstance(val, _ty.Type)
if isinstance(var, _base.string_types):
var = _ty.GlobalTypeVar(var)
_module.Module_AddDef(self, var, val)
def __getitem__(self, var):
"""Lookup a global definition by name or by variable.
Parameters
----------
var: Union[String, GlobalVar, GlobalTypeVar]
The name or global variable.
Returns
-------
val: Union[Function, Type]
The definition referenced by :code:`var` (either a function or type).
"""
if isinstance(var, _base.string_types):
return _module.Module_Lookup_str(self, var)
elif isinstance(var, _expr.GlobalVar):
return _module.Module_Lookup(self, var)
else:
return _module.Module_LookupDef(self, var)
def update(self, other):
"""Insert functions in another Module to current one.
Parameters
----------
other: Module
The module to merge into the current Module.
"""
if isinstance(other, dict):
other = Module(other)
return _module.Module_Update(self, other)
def get_global_var(self, name):
"""Get a global variable in the function by name.
Parameters
----------
name: str
The name of the global variable.
Returns
-------
global_var: GlobalVar
The global variable mapped to :code:`name`.
Raises
------
tvm.TVMError if we cannot find corresponding global var.
"""
return _module.Module_GetGlobalVar(self, name)
def get_global_type_var(self, name):
"""Get a global type variable in the function by name.
Parameters
----------
name: str
The name of the global type variable.
Returns
-------
global_type_var: GlobalTypeVar
The global variable mapped to :code:`name`.
Raises
------
tvm.TVMError if we cannot find corresponding global type var.
"""
return _module.Module_GetGlobalTypeVar(self, name)
def get_constructor(self, tag):
"""Look up an ADT constructor by tag.
Parameters
----------
tag: int
The tag for a constructor.
Returns
-------
constructor: Constructor
The constructor associated with the given tag,
Raises
------
tvm.TVMError if the corresponding constructor cannot be found.
"""
return _module.Module_LookupTag(self, tag)
@staticmethod
def from_expr(expr, functions=None, type_defs=None):
"""Construct a module from a standalone expression.
Parameters
----------
expr: Expr
The starting expression
global_funcs: Optional[dict]
Map of global vars to function definitions
type_defs: Optional[dict]
Map of global type vars to type definitions
Returns
-------
mod: Module
A module containing the passed definitions,
where expr is set as the entry point
(wrapped in a function if necessary)
"""
funcs = functions if functions is not None else {}
defs = type_defs if type_defs is not None else {}
return _module.Module_FromExpr(expr, funcs, defs)
|
ftCommunity/ftcommunity-apps | refs/heads/master | packages/ftDuinIO/ftduinio.py | 1 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
import sys, time, serial
import ftduino_direct as ftd
from PyQt4 import QtCore
from TouchStyle import *
from TouchAuxiliary import *
from PyQt4.QtCore import QTimer
import queue, pty, subprocess, select, os
import urllib.request, urllib.parse, urllib.error
import avrdude_widget
MAX_TEXT_LINES=50
STORE= "https://raw.githubusercontent.com/harbaum/ftduino/master/bin/"
STD_STYLE="QPlainTextEdit { font-size: 12px; color: white; background-color: black; font-family: monospace; }"
EXT_STYLE="QPlainTextEdit { font-size: 8px; color: #c9ff74; background-color: #184d00; font-family: monospace; }"
FTDUINO_VIRGIN_VIDPID="1c40:0537"
FTDUINO_VIDPID="1c40:0538"
TST=False
try:
with open(os.path.dirname(os.path.realpath(__file__)) + "/manifest","r", encoding="utf-8") as f:
r=f.readline()
while not "version" in r:
r=f.readline()
if "version" in r:
VSTRING = "v" + r[ r.index(":")+2 : ]
else: VSTRING=""
f.close()
except:
VSTRING="n/a"
class FtcGuiApplication(TouchApplication):
def __init__(self, args):
TouchApplication.__init__(self, args)
self.duinos=[]
self.act_duino=None
self.flashBootloader=False
self.window = TouchWindow("ftDuinIO")
self.window.titlebar.close.clicked.connect(self.end)
self.setMainWidget()
self.menu=self.window.addMenu()
self.menu.setStyleSheet("font-size: 24px;")
self.m_manage = self.menu.addAction(QCoreApplication.translate("mmain","Sketches"))
self.m_manage.triggered.connect(self.on_menu_manage)
#self.menu.addSeparator()
self.m_bootloader = self.menu.addAction(QCoreApplication.translate("mmain","Flash Bootloader"))
self.m_bootloader.triggered.connect(self.on_menu_bootloader)
self.m_about = self.menu.addAction(QCoreApplication.translate("mmain","About"))
self.m_about.triggered.connect(self.on_menu_about)
self.window.setCentralWidget(self.mainWidget)
self.window.show()
self.ftdscan()
self.checker=QTimer()
self.checker.timeout.connect(self.checkFtdComm)
self.checker.start(250)
self.app_process = None
self.flashfile = None
self.exec_()
def end(self):
self.out=False
if self.act_duino!=None:
try:
self.act_duino.close()
except:
pass
def on_menu_bootloader(self):
self.flashBootloader=True
self.dFlash_clicked()
self.menu.setDisabled(True)
self.fFlash.setStyleSheet("font-size: 20px; color: white; background-color: qlineargradient( x1:0 y1:0, x2:0 y2:1, stop:0 yellow, stop:1 red);")
self.fFlash.setDisabled(False)
def on_menu_about(self):
t=TouchMessageBox(QCoreApplication.translate("about","About"), self.window)
t.setCancelButton()
t.addPixmap(QPixmap("icon.png"))
text=QCoreApplication.translate("about","<font size='2'>ftDuinIO<br><font size='1'>Version ")
text=text+VSTRING
text=text+QCoreApplication.translate("about","<center>(c) 2018 Peter Habermehl<br>for the ft community")
t.setText(text)
t.setPosButton(QCoreApplication.translate("about","Okay"))
t.exec_()
def get_bootloader(self):
path = os.path.dirname(os.path.realpath(__file__))
# remove trailing .hex
files = [f[:-4] for f in os.listdir(os.path.join(path,"bootloader")) if os.path.isfile(os.path.join(path, "bootloader", f))]
return files
def get_binaries(self):
path = os.path.dirname(os.path.realpath(__file__))
# remove trailing .ino.hex
files = [f[:-8] for f in os.listdir(os.path.join(path,"binaries")) if os.path.isfile(os.path.join(path, "binaries", f))]
return files
class FileList(TouchDialog):
def __init__(self,parent=None):
TouchDialog.__init__(self,QCoreApplication.translate("manage","Sketches"),parent)
self.layout=QVBoxLayout()
# the list
self.itemlist = QListWidget()
self.itemlist.setObjectName("smalllabel")
self.itemlist.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.itemlist.itemClicked.connect(self.on_itemchanged)
self.scan()
self.layout.addWidget(self.itemlist)
self.download_btn = QPushButton(QCoreApplication.translate("manage","More..."))
self.download_btn.setStyleSheet("font-size: 20px;")
self.download_btn.clicked.connect(self.on_download)
self.layout.addWidget(self.download_btn)
self.centralWidget.setLayout(self.layout)
def scan(self):
self.itemlist.clear()
path = os.path.dirname(os.path.realpath(__file__))
# remove trailing .ino.hex
files = [f[:-8] for f in os.listdir(os.path.join(path,"binaries")) if os.path.isfile(os.path.join(path, "binaries", f))]
self.itemlist.addItems(files)
def on_itemchanged(self):
r = self.itemlist.currentItem().text()
t = TouchMessageBox(QCoreApplication.translate("manage","Sketch"), self)
t.setText(r)
t.setPosButton(QCoreApplication.translate("manage","Delete!"))
t.setNegButton(QCoreApplication.translate("manage","Cancel"))
(c,v)=t.exec_()
if v==QCoreApplication.translate("manage","Delete!"):
path = os.path.dirname(os.path.realpath(__file__))
os.remove(os.path.join(path,"binaries",r+".ino.hex"))
self.scan()
def on_download(self):
food=[]
select=[]
try:
file=urllib.request.urlopen(STORE+"00index.txt", timeout=1)
food=file.read().decode('utf-8').split("\n")
file.close()
except:
t=TouchMessageBox(QCoreApplication.translate("flash","Store"), self)
t.setCancelButton()
t.setText(QCoreApplication.translate("flash","Store not accessible."))
t.setPosButton(QCoreApplication.translate("flash","Okay"))
t.exec_()
if food !=[]:
menu=[]
for line in food:
if line[0:6]=="name: ": menu.append(line[6:])
(s,r)=TouchAuxListRequester(QCoreApplication.translate("flash","Store"),QCoreApplication.translate("ecl","Select binary:"),menu,menu[0],"Okay",self).exec_()
if s:
a=False
b=False
for line in food:
if b:
version=line[9:]
break
if a and not b:
filename=line[6:]
b=True
if line[6:]==r: a=True
v=""
if b:
t=TouchMessageBox(QCoreApplication.translate("flash","Download"), self)
t.setText( QCoreApplication.translate("flash","File:") + "<br>"+ filename + "<br><br>" +
QCoreApplication.translate("flash","Version: v") + version)
t.setPosButton(QCoreApplication.translate("flash","Download"))
t.setNegButton(QCoreApplication.translate("flash","Cancel"))
(c,v)=t.exec_()
if v==QCoreApplication.translate("flash","Download"):
try:
file=urllib.request.urlopen(STORE+filename, timeout=1)
food=file.read()
file.close()
target = os.path.dirname(os.path.realpath(__file__))
target = os.path.join(target, "binaries", filename)
v=QCoreApplication.translate("flash","Replace")
if os.path.exists(target):
t=TouchMessageBox(QCoreApplication.translate("flash","Download"), self)
t.setText( QCoreApplication.translate("flash","File:") + "<br>"+ filename + "<br><br>" +
QCoreApplication.translate("flash","already exists!"))
t.setPosButton(QCoreApplication.translate("flash","Replace"))
t.setNegButton(QCoreApplication.translate("flash","Cancel"))
(c,v)=t.exec_()
if v==QCoreApplication.translate("flash","Replace"):
with open(target, 'wb') as f:
f.write(food)
f.close()
self.scan()
except: # download failed
t=TouchMessageBox(QCoreApplication.translate("flash","Store"), self)
t.setCancelButton()
t.setText(QCoreApplication.translate("flash","Download failed."))
t.setPosButton(QCoreApplication.translate("flash","Okay"))
t.exec_()
def on_menu_manage(self):
self.FileList(self.window).exec_()
def checkFtdComm(self):
if self.act_duino!=None:
n=self.act_duino.comm("ftduino_id_get")
if n=="Fail":
self.act_duino=None
self.ftdcomm()
def ftdscan(self):
duinos=ftd.ftduino_scan()
self.duinos=[]
self.device=[]
for d in duinos:
if d[1]!="":
self.duinos.append(d[1])
self.device.append(d[0])
else:
self.duinos.append(d[0])
self.device.append(d[0])
self.dList.clear()
if len(self.duinos)>0:
self.ftdcomm()
self.dList.addItems(self.duinos)
else:
self.dFlash.setDisabled(True)
self.dRename.setDisabled(True)
self.dIO.setDisabled(True)
self.dList.addItem(QCoreApplication.translate("comm","none found"))
self.dComm.setStyleSheet("font-size: 20px;")
self.dComm.setText(QCoreApplication.translate("comm","none"))
self.act_duino=None
self.dComm.repaint()
self.processEvents()
def ftdcomm(self):
if self.act_duino!=None:
try:
self.act_duino.close()
except:
pass
if len(self.device)>0:
duino=self.device[self.dList.currentIndex()]
self.act_duino=ftd.ftduino(duino)
time.sleep(0.25)
if self.act_duino!=None: n=self.act_duino.comm("ftduino_id_get")
else: n="Fail"
if n!="Fail" and n!="":
self.dComm.setStyleSheet("font-size: 20px; background-color: darkgreen;")
self.dComm.setText(QCoreApplication.translate("comm","SW: v")+self.act_duino.comm("ftduino_direct_get_version"))
self.dRename.setDisabled(False)
self.dFlash.setDisabled(False)
self.dIO.setDisabled(False)
elif len(self.device)>0:
self.dComm.setStyleSheet("font-size: 20px; background-color: darkred;")
self.dComm.setText(QCoreApplication.translate("comm","failed"))
self.dRename.setDisabled(True)
self.dFlash.setDisabled(False)
self.dIO.setDisabled(True)
self.act_duino=None
else:
self.dComm.setStyleSheet("font-size: 20px;")
self.dComm.setText(QCoreApplication.translate("comm","none"))
self.dRename.setDisabled(True)
self.dFlash.setDisabled(True)
self.dIO.setDisabled(True)
self.act_duino=None
def rename_clicked(self):
n=self.act_duino.comm("ftduino_id_get")
if n!="" and n!="Fail":
(res,st)=TouchAuxRequestText(QCoreApplication.translate("rename","Rename"),
QCoreApplication.translate("rename","Enter new ftDuino ID for device '") + n +"':",
n,
QCoreApplication.translate("rename","Okay"), self.window
).exec_()
if ((st!="") and res):
res=self.act_duino.comm("ftduino_id_set "+st)
self.rescan_clicked()
def rescan_trigger(self):
self.timer = QTimer()
self.timer.setSingleShot(True)
self.timer.timeout.connect(self.rescan_clicked)
self.timer.start(1000)
def rescan_clicked(self):
self.dComm.setStyleSheet("font-size: 20px; background-color: darkorange;")
self.dComm.setText(QCoreApplication.translate("comm","scanning"))
self.dComm.repaint()
self.processEvents()
self.ftdscan()
def fFlash_clicked(self):
flasherror=False
self.fLabel.hide()
self.fBinary.hide()
self.fFlash.hide()
self.avrdude.show()
self.menu.setDisabled(True)
self.fBack.hide()
self.processEvents()
self.fWidget.repaint()
# get file to flash from combobox
file = self.fBinary.currentText()
if not self.flashBootloader: # binary flash
duino=self.device[self.dList.currentIndex()]
# activate bootloader
if self.act_duino!="None":
try:
self.act_duino.close()
except:
pass
self.act_duino=None
self.avrdude.setPort(duino)
self.avrdude.trigger_bootloader()
devices = []
for dev in serial.tools.list_ports.grep("vid:pid="+FTDUINO_VIDPID):
devices.append(dev[0])
for dev in serial.tools.list_ports.grep("vid:pid="+FTDUINO_VIRGIN_VIDPID):
devices.append(dev[0])
if len(devices)>1:
t=TouchMessageBox(QCoreApplication.translate("flash","Error"), self.window)
t.setText(QCoreApplication.translate("flash","More than one ftDuino connected! Please disconnect all but the device to be flashed."))
t.setPosButton(QCoreApplication.translate("flash","Okay"))
t.exec_()
return
elif len(devices)<1:
t=TouchMessageBox(QCoreApplication.translate("flash","Error"), self.window)
t.setText(QCoreApplication.translate("flash","No ftDuino connected! Please connect the device to be flashed."))
t.setPosButton(QCoreApplication.translate("flash","Okay"))
t.exec_()
return
else:
# tell avrdude widget which port to use
self.avrdude.setPort(devices[0])
self.avrdude.flash(os.path.join("binaries", file)+".ino.hex")
else: # bootloader flash
# bootloader
self.avrdude.flash(os.path.join("bootloader", file)+".hex", True)
self.flashBootloader = False
self.fBack.setDisabled(False)
self.fBack.setText(QCoreApplication.translate("flash","Back"))
self.processEvents()
def xBack_clicked(self):
self.out=False
self.menu.setDisabled(False)
self.avrdude.hide()
self.fBack.show()
self.dWidget.show()
self.fWidget.hide()
self.ioWidget.hide()
self.processEvents()
self.ftdcomm()
def io_changed(self):
self.doIO()
def dIO_clicked(self):
self.dWidget.hide()
self.fWidget.hide()
self.ioWidget.show()
self.out=True
self.doIO()
def doIO(self):
outType=self.ioFun.currentIndex()
dist=self.iDCType.currentIndex()
if outType==0:
self.iDCType.hide()
self.iTextField.show()
self.oOut.hide()
self.oMot.hide()
for n in range(1,9):
i=self.act_duino.comm("input_set_mode I"+str(n)+" Switch")
elif outType==1:
self.iDCType.hide()
self.iTextField.show()
self.oOut.hide()
self.oMot.hide()
for n in range(1,9):
i=self.act_duino.comm("input_set_mode I"+str(n)+" Voltage")
elif outType==2:
self.iDCType.hide()
self.iTextField.show()
self.oOut.hide()
self.oMot.hide()
for n in range(1,9):
i=self.act_duino.comm("input_set_mode I"+str(n)+" Resistance")
elif outType==3:
self.iDCType.show()
self.iTextField.show()
self.oOut.hide()
self.oMot.hide()
for n in range(1,5):
i=self.act_duino.comm("counter_set_mode C"+str(n)+" Any")
i=self.act_duino.comm("counter_clear C"+str(n))
if dist==0: #counters Only
i=self.act_duino.comm("ultrasonic_enable false")
else: # dist + counters
i=self.act_duino.comm("ultrasonic_enable true")
elif outType==4:
self.iDCType.hide()
self.iTextField.hide()
self.oOut.show()
self.oMot.hide()
elif outType==5:
self.iDCType.hide()
self.iTextField.hide()
self.oOut.hide()
self.oMot.show()
while self.out:
self.processEvents()
time.sleep(0.05)
s=""
if outType<3:
for n in range(1,9):
s=s+"I"+str(n)+": "
i=self.act_duino.comm("input_get I"+str(n))
if outType==0:
if i=="1": s=s+"True"
elif i=="0": s=s+"False"
else: s=s+"Fail"
s=s+"\n"
elif outType==1:
a=" "+i
s=s+a[-5:]+" mV\n"
elif outType==2:
a=" "+i
s=s+a[-5:]+" Ohm\n"
self.iTextField.setText(s)
elif outType==3:
if dist==0:
a=" "+self.act_duino.comm("counter_get C1")
s="C1: "+a[-5:]+"\n"
else:
a=self.act_duino.comm("ultrasonic_get")
if a!="-1":
a=" "+a
s="D1: "+a[-5:]+" cm\n"
else: s="D1: Fail\n"
for n in range(2,5):
s=s+"C"+str(n)+": "
a=" "+self.act_duino.comm("counter_get C"+str(n))
s=s+a[-5:]+"\n"
self.iTextField.setText(s)
def dFlash_clicked(self):
# get files to fill combobox
files = None
if self.flashBootloader:
files = self.get_bootloader()
self.fLabel.setText(QCoreApplication.translate("flash","Bootloader:"))
else:
files = self.get_binaries()
self.fLabel.setText(QCoreApplication.translate("flash","Binary sketch:"))
self.dWidget.hide()
self.ioWidget.hide()
self.avrdude.setPort(self.act_duino)
self.avrdude.reset()
# enable flash button if at least one file is present
self.fFlash.setStyleSheet("font-size: 20px; color: white; background-color: darkred;")
if len(files):
self.fFlash.setEnabled(True)
self.fBinary.clear()
for f in files:
self.fBinary.addItem(f)
self.fLabel.show()
self.fBinary.show()
self.fFlash.show()
self.fWidget.show()
def setDWidget(self):
# Widget für Devices:
self.dWidget=QWidget()
devices=QVBoxLayout()
hbox=QHBoxLayout()
text=QLabel(QCoreApplication.translate("devices","Device:"))
text.setStyleSheet("font-size: 20px;")
hbox.addWidget(text)
hbox.addStretch()
self.dRescan=QPushButton(QCoreApplication.translate("devices","Rescan"))
self.dRescan.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.dRescan)
devices.addLayout(hbox)
self.dList=QComboBox()
self.dList.setStyleSheet("font-size: 20px;")
self.dList.addItem(" --- none ---")
devices.addWidget(self.dList)
hbox=QHBoxLayout()
text=QLabel(QCoreApplication.translate("devices","Connect:"))
text.setStyleSheet("font-size: 20px;")
hbox.addWidget(text)
self.dComm=QLineEdit()
self.dComm.setReadOnly(True)
self.dComm.setStyleSheet("font-size: 20px; color: white;")
self.dComm.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.dComm.setText(QCoreApplication.translate("flash","no device"))
hbox.addWidget(self.dComm)
devices.addLayout(hbox)
devices.addStretch()
self.dIO=QPushButton(QCoreApplication.translate("devices","I/O test"))
self.dIO.setStyleSheet("font-size: 20px;")
devices.addWidget(self.dIO)
self.dRename=QPushButton(QCoreApplication.translate("devices","Rename"))
self.dRename.setStyleSheet("font-size: 20px;")
devices.addWidget(self.dRename)
self.dFlash=QPushButton(QCoreApplication.translate("devices","Flash binary"))
self.dFlash.setStyleSheet("font-size: 20px;")
devices.addWidget(self.dFlash)
self.dWidget.setLayout(devices)
self.dRescan.clicked.connect(self.rescan_clicked)
self.dRename.clicked.connect(self.rename_clicked)
self.dIO.clicked.connect(self.dIO_clicked)
self.dFlash.clicked.connect(self.dFlash_clicked)
def on_avrdude_done(self, ok):
# close dialog automatically if everything is fine
if ok:
self.xBack_clicked()
self.rescan_trigger()
else:
self.fBack.show()
def setFWidget(self):
# widget für Flashtool:
self.fWidget=QWidget()
flash=QVBoxLayout()
flash.setContentsMargins(0,0,0,0)
flash.addStretch()
hbox=QHBoxLayout()
self.fLabel=QLabel("")
self.fLabel.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.fLabel)
flash.addLayout(hbox)
self.fBinary = QComboBox()
self.fBinary.setStyleSheet("font-size: 20px; color: white;")
flash.addWidget(self.fBinary)
# add avrdude widget
self.avrdude = avrdude_widget.AvrdudeWidget(self.window)
self.avrdude.hide()
self.avrdude.done.connect(self.on_avrdude_done)
flash.addWidget(self.avrdude)
self.fFlash=QPushButton(QCoreApplication.translate("flash","--> Flash <--"))
self.fFlash.setStyleSheet("font-size: 20px; color: white; background-color: darkred;")
flash.addWidget(self.fFlash)
self.fFlash.setDisabled(True)
flash.addStretch()
self.fBack=QPushButton(QCoreApplication.translate("flash","Back"))
self.fBack.setStyleSheet("font-size: 20px; color: white;")
flash.addWidget(self.fBack)
self.fWidget.setLayout(flash)
self.fFlash.clicked.connect(self.fFlash_clicked)
self.fBack.clicked.connect(self.xBack_clicked)
def setIOWidget(self):
# widget für I/O Test
self.ioWidget=QWidget()
io=QVBoxLayout()
self.ioFun=QComboBox()
self.ioFun.setStyleSheet("font-size: 20px;")
self.ioFun.addItems( [ QCoreApplication.translate("io","Inp. Switch"),
QCoreApplication.translate("io","Inp. Voltage"),
QCoreApplication.translate("io","Inp. Resistance"),
QCoreApplication.translate("io","Inp. Dist.&Count."),
QCoreApplication.translate("io","Outputs"),
QCoreApplication.translate("io","Motors")
] )
io.addWidget(self.ioFun)
# Verschiedene I/O Widgets:
# Dist. und counter input
self.iDCType=QComboBox()
self.iDCType.setStyleSheet("font-size: 20px;")
self.iDCType.addItems( [ QCoreApplication.translate("io","Counters"),
QCoreApplication.translate("io","Distance")
] )
io.addWidget(self.iDCType)
self.iDCType.hide()
# Dig. & analog Input
self.iTextField=QTextEdit()
self.iTextField.setReadOnly(True)
self.iTextField.setWordWrapMode(QTextOption.WrapAnywhere)
self.iTextField.setStyleSheet("font-size: 15px; color: white; background-color: black; font-family: monospace;")
self.iTextField.setText("I1: ____0\nI2: ____0\nI3: ____0\nI4: ____0\nI5: ____0\nI6: ____0\nI7: ____0\nI8: ____0")
io.addWidget(self.iTextField)
#self.iTextField.hide()
# outputs:
self.oOut=QWidget()
oOut=QVBoxLayout()
hbox=QHBoxLayout()
self.oPower=QSlider()
self.oPower.setMinimum(0)
self.oPower.setMaximum(512)
self.oPower.setOrientation(1)
self.oPower.setValue(512)
hbox.addWidget(self.oPower)
self.oPVal=QLabel()
self.oPVal.setStyleSheet("font-size: 20px; color: white;")
self.oPVal.setText("512")
hbox.addWidget(self.oPVal)
oOut.addLayout(hbox)
hbox=QHBoxLayout()
self.oB1=QPushButton("O1")
self.oB1.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.oB1)
self.oB2=QPushButton("O2")
self.oB2.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.oB2)
oOut.addLayout(hbox)
hbox=QHBoxLayout()
self.oB3=QPushButton("O3")
self.oB3.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.oB3)
self.oB4=QPushButton("O4")
self.oB4.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.oB4)
oOut.addLayout(hbox)
hbox=QHBoxLayout()
self.oB5=QPushButton("O5")
self.oB5.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.oB5)
self.oB6=QPushButton("O6")
self.oB6.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.oB6)
oOut.addLayout(hbox)
hbox=QHBoxLayout()
self.oB7=QPushButton("O7")
self.oB7.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.oB7)
self.oB8=QPushButton("O8")
self.oB8.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.oB8)
oOut.addLayout(hbox)
self.oOut.setLayout(oOut)
io.addWidget(self.oOut)
self.oOut.hide()
self.oB1.pressed.connect(self.oB1_pressed)
self.oB1.released.connect(self.oB1_released)
self.oB2.pressed.connect(self.oB2_pressed)
self.oB2.released.connect(self.oB2_released)
self.oB3.pressed.connect(self.oB3_pressed)
self.oB3.released.connect(self.oB3_released)
self.oB4.pressed.connect(self.oB4_pressed)
self.oB4.released.connect(self.oB4_released)
self.oB5.pressed.connect(self.oB5_pressed)
self.oB5.released.connect(self.oB5_released)
self.oB6.pressed.connect(self.oB6_pressed)
self.oB6.released.connect(self.oB6_released)
self.oB7.pressed.connect(self.oB7_pressed)
self.oB7.released.connect(self.oB7_released)
self.oB8.pressed.connect(self.oB8_pressed)
self.oB8.released.connect(self.oB8_released)
self.oPower.valueChanged.connect(self.oPower_changed)
# motor outputs:
self.oMot=QWidget()
oMot=QVBoxLayout()
hbox=QHBoxLayout()
self.mPower=QSlider()
self.mPower.setMinimum(0)
self.mPower.setMaximum(512)
self.mPower.setOrientation(1)
self.mPower.setValue(512)
hbox.addWidget(self.mPower)
self.mPVal=QLabel()
self.mPVal.setStyleSheet("font-size: 20px; color: white;")
self.mPVal.setText("512")
hbox.addWidget(self.mPVal)
oMot.addLayout(hbox)
hbox=QHBoxLayout()
self.mB1=QPushButton(QCoreApplication.translate("mout"," left "))
self.mB1.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.mB1)
txt=QLabel("M1")
txt.setStyleSheet("font-size: 20px;")
txt.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
hbox.addWidget(txt)
self.mB2=QPushButton(QCoreApplication.translate("mout","right"))
self.mB2.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.mB2)
oMot.addLayout(hbox)
hbox=QHBoxLayout()
self.mB3=QPushButton(QCoreApplication.translate("mout","left"))
self.mB3.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.mB3)
txt=QLabel("M2")
txt.setStyleSheet("font-size: 20px;")
txt.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
hbox.addWidget(txt)
self.mB4=QPushButton(QCoreApplication.translate("mout","right"))
self.mB4.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.mB4)
oMot.addLayout(hbox)
hbox=QHBoxLayout()
self.mB5=QPushButton(QCoreApplication.translate("mout","left"))
self.mB5.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.mB5)
txt=QLabel("M3")
txt.setStyleSheet("font-size: 20px;")
txt.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
hbox.addWidget(txt)
self.mB6=QPushButton(QCoreApplication.translate("mout","right"))
self.mB6.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.mB6)
oMot.addLayout(hbox)
hbox=QHBoxLayout()
self.mB7=QPushButton(QCoreApplication.translate("mout","left"))
self.mB7.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.mB7)
txt=QLabel("M4")
txt.setStyleSheet("font-size: 20px;")
txt.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
hbox.addWidget(txt)
self.mB8=QPushButton(QCoreApplication.translate("mout","right"))
self.mB8.setStyleSheet("font-size: 20px;")
hbox.addWidget(self.mB8)
oMot.addLayout(hbox)
self.oMot.setLayout(oMot)
io.addWidget(self.oMot)
self.oMot.hide()
self.mB1.pressed.connect(self.mB1_pressed)
self.mB1.released.connect(self.mB1_released)
self.mB2.pressed.connect(self.mB2_pressed)
self.mB2.released.connect(self.mB2_released)
self.mB3.pressed.connect(self.mB3_pressed)
self.mB3.released.connect(self.mB3_released)
self.mB4.pressed.connect(self.mB4_pressed)
self.mB4.released.connect(self.mB4_released)
self.mB5.pressed.connect(self.mB5_pressed)
self.mB5.released.connect(self.mB5_released)
self.mB6.pressed.connect(self.mB6_pressed)
self.mB6.released.connect(self.mB6_released)
self.mB7.pressed.connect(self.mB7_pressed)
self.mB7.released.connect(self.mB7_released)
self.mB8.pressed.connect(self.mB8_pressed)
self.mB8.released.connect(self.mB8_released)
self.mPower.valueChanged.connect(self.mPower_changed)
# Back-button!
io.addStretch()
self.ioBack=QPushButton(QCoreApplication.translate("io","Back"))
self.ioBack.setStyleSheet("font-size: 20px;")
io.addWidget(self.ioBack)
self.ioWidget.setLayout(io)
self.ioFun.currentIndexChanged.connect(self.io_changed)
self.iDCType.currentIndexChanged.connect(self.io_changed)
self.ioBack.clicked.connect(self.xBack_clicked)
def setMainWidget(self):
self.setDWidget()
self.setFWidget()
self.setIOWidget()
self.mainWidget=QWidget()
mL=QVBoxLayout()
mL.addWidget(self.dWidget)
mL.addWidget(self.fWidget)
mL.addWidget(self.ioWidget)
self.dWidget.show()
self.fWidget.hide()
self.ioWidget.hide()
self.mainWidget.setLayout(mL)
def oB1_pressed(self):
self.act_duino.comm("output_set O1 1 "+str(self.oPower.value()))
def oB1_released(self):
self.act_duino.comm("output_set O1 1 0")
def oB2_pressed(self):
self.act_duino.comm("output_set O2 1 "+str(self.oPower.value()))
def oB2_released(self):
self.act_duino.comm("output_set O2 1 0")
def oB3_pressed(self):
self.act_duino.comm("output_set O3 1 "+str(self.oPower.value()))
def oB3_released(self):
self.act_duino.comm("output_set O3 1 0")
def oB4_pressed(self):
self.act_duino.comm("output_set O4 1 "+str(self.oPower.value()))
def oB4_released(self):
self.act_duino.comm("output_set O4 1 0")
def oB5_pressed(self):
self.act_duino.comm("output_set O5 1 "+str(self.oPower.value()))
def oB5_released(self):
self.act_duino.comm("output_set O5 1 0")
def oB6_pressed(self):
self.act_duino.comm("output_set O6 1 "+str(self.oPower.value()))
def oB6_released(self):
self.act_duino.comm("output_set O6 1 0")
def oB7_pressed(self):
self.act_duino.comm("output_set O7 1 "+str(self.oPower.value()))
def oB7_released(self):
self.act_duino.comm("output_set O7 1 0")
def oB8_pressed(self):
self.act_duino.comm("output_set O8 1 "+str(self.oPower.value()))
def oB8_released(self):
self.act_duino.comm("output_set O8 1 0")
def oPower_changed(self):
self.oPVal.setText(str(self.oPower.value()))
def mB1_pressed(self):
self.act_duino.comm("motor_set M1 left "+str(self.mPower.value()))
def mB1_released(self):
self.act_duino.comm("motor_set M1 brake 0")
def mB2_pressed(self):
self.act_duino.comm("motor_set M1 right "+str(self.mPower.value()))
def mB2_released(self):
self.act_duino.comm("motor_set M1 brake 0")
def mB3_pressed(self):
self.act_duino.comm("motor_set M2 left "+str(self.mPower.value()))
def mB3_released(self):
self.act_duino.comm("motor_set M2 brake 0")
def mB4_pressed(self):
self.act_duino.comm("motor_set M2 right "+str(self.mPower.value()))
def mB4_released(self):
self.act_duino.comm("motor_set M2 brake 0")
def mB5_pressed(self):
self.act_duino.comm("motor_set M3 left "+str(self.mPower.value()))
def mB5_released(self):
self.act_duino.comm("motor_set M3 brake 0")
def mB6_pressed(self):
self.act_duino.comm("motor_set M3 right "+str(self.mPower.value()))
def mB6_released(self):
self.act_duino.comm("motor_set M3 brake 0")
def mB7_pressed(self):
self.act_duino.comm("motor_set M4 left "+str(self.mPower.value()))
def mB7_released(self):
self.act_duino.comm("motor_set M4 brake 0")
def mB8_pressed(self):
self.act_duino.comm("motor_set M4 right "+str(self.mPower.value()))
def mB8_released(self):
self.act_duino.comm("motor_set M4 brake 0")
def mPower_changed(self):
self.mPVal.setText(str(self.mPower.value()))
if __name__ == "__main__":
FtcGuiApplication(sys.argv)
|
jeffkistler/django-request-utils | refs/heads/master | docs/conf.py | 1 | # -*- coding: utf-8 -*-
#
# Django Request Utils documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 15 20:58:37 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Request Utils'
copyright = u'2010, Jeff Kistler'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoRequestUtilsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoRequestUtils.tex', u'Django Request Utils Documentation',
u'Jeff Kistler', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangorequestutils', u'Django Request Utils Documentation',
[u'Jeff Kistler'], 1)
]
|
PhiInnovations/mdp28-linux-bsp | refs/heads/master | meta/lib/oe/qa.py | 2 | class ELFFile:
EI_NIDENT = 16
EI_CLASS = 4
EI_DATA = 5
EI_VERSION = 6
EI_OSABI = 7
EI_ABIVERSION = 8
# possible values for EI_CLASS
ELFCLASSNONE = 0
ELFCLASS32 = 1
ELFCLASS64 = 2
# possible value for EI_VERSION
EV_CURRENT = 1
# possible values for EI_DATA
ELFDATANONE = 0
ELFDATA2LSB = 1
ELFDATA2MSB = 2
def my_assert(self, expectation, result):
if not expectation == result:
#print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
raise Exception("This does not work as expected")
def __init__(self, name, bits = 0):
self.name = name
self.bits = bits
self.objdump_output = {}
def open(self):
self.file = file(self.name, "r")
self.data = self.file.read(ELFFile.EI_NIDENT+4)
self.my_assert(len(self.data), ELFFile.EI_NIDENT+4)
self.my_assert(self.data[0], chr(0x7f) )
self.my_assert(self.data[1], 'E')
self.my_assert(self.data[2], 'L')
self.my_assert(self.data[3], 'F')
if self.bits == 0:
if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32):
self.bits = 32
elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64):
self.bits = 64
else:
# Not 32-bit or 64.. lets assert
raise Exception("ELF but not 32 or 64 bit.")
elif self.bits == 32:
self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32))
elif self.bits == 64:
self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64))
else:
raise Exception("Must specify unknown, 32 or 64 bit size.")
self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) )
self.sex = self.data[ELFFile.EI_DATA]
if self.sex == chr(ELFFile.ELFDATANONE):
raise Exception("self.sex == ELFDATANONE")
elif self.sex == chr(ELFFile.ELFDATA2LSB):
self.sex = "<"
elif self.sex == chr(ELFFile.ELFDATA2MSB):
self.sex = ">"
else:
raise Exception("Unknown self.sex")
def osAbi(self):
return ord(self.data[ELFFile.EI_OSABI])
def abiVersion(self):
return ord(self.data[ELFFile.EI_ABIVERSION])
def abiSize(self):
return self.bits
def isLittleEndian(self):
return self.sex == "<"
def isBigEngian(self):
return self.sex == ">"
def machine(self):
"""
We know the sex stored in self.sex and we
know the position
"""
import struct
(a,) = struct.unpack(self.sex+"H", self.data[18:20])
return a
def run_objdump(self, cmd, d):
import bb.process
import sys
if self.objdump_output.has_key(cmd):
return self.objdump_output[cmd]
objdump = d.getVar('OBJDUMP', True)
env = os.environ.copy()
env["LC_ALL"] = "C"
env["PATH"] = d.getVar('PATH', True)
try:
bb.note("%s %s %s" % (objdump, cmd, self.name))
self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0]
return self.objdump_output[cmd]
except Exception, e:
bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
return ""
|
chdecultot/erpnext | refs/heads/develop | erpnext/patches/v5_4/notify_system_managers_regarding_wrong_tax_calculation.py | 30 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe.email import sendmail_to_system_managers
from frappe.utils import get_link_to_form
def execute():
wrong_records = []
for dt in ("Quotation", "Sales Order", "Delivery Note", "Sales Invoice",
"Purchase Order", "Purchase Receipt", "Purchase Invoice"):
records = frappe.db.sql_list("""select name from `tab{0}`
where apply_discount_on = 'Net Total' and ifnull(discount_amount, 0) != 0
and modified >= '2015-02-17' and docstatus=1""".format(dt))
if records:
records = [get_link_to_form(dt, d) for d in records]
wrong_records.append([dt, records])
if wrong_records:
content = """Dear System Manager,
Due to an error related to Discount Amount on Net Total, tax calculation might be wrong in the following records. We did not fix the tax amount automatically because it can corrupt the entries, so we request you to check these records and amend if you found the calculation wrong.
Please check following Entries:
%s
Regards,
Administrator""" % "\n".join([(d[0] + ": " + ", ".join(d[1])) for d in wrong_records])
try:
sendmail_to_system_managers("[Important] [ERPNext] Tax calculation might be wrong, please check.", content)
except:
pass
print("="*50)
print(content)
print("="*50) |
saintdragon2/python-3-lecture-2015 | refs/heads/master | civil_mid_mid/pymunk-4.0.0/tests/test_space.py | 1 | import pymunk as p
from pymunk import *
from pymunk.vec2d import Vec2d
import unittest
####################################################################
class UnitTestSpace(unittest.TestCase):
def setUp(self):
p.reset_shapeid_counter()
self.s = p.Space()
self.b1, self.b2 = p.Body(1,3),p.Body(10,100)
self.s.add(self.b1,self.b2)
self.b1.position = 10,0
self.b2.position = 20,0
self.s1,self.s2 = p.Circle(self.b1,5), p.Circle(self.b2,10)
self.s.add(self.s1,self.s2)
def tearDown(self):
del self.s
del self.b1, self.b2
del self.s1, self.s2
def testProperties(self):
s = p.Space(15)
self.assertEqual(s.iterations, 15)
s.gravity = 10,2
self.assertEqual(s.gravity.x, 10)
s.damping = 3
self.assertEqual(s.damping, 3)
s.idle_speed_threshold = 4
self.assertEqual(s.idle_speed_threshold, 4)
s.sleep_time_threshold = 5
self.assertEqual(s.sleep_time_threshold, 5)
s.collision_slop = 6
self.assertEqual(s.collision_slop, 6)
s.collision_bias = 8
self.assertEqual(s.collision_bias, 8)
s.collision_persistence = 9
self.assertEqual(s.collision_persistence, 9)
self.assertEqual(s.enable_contact_graph, False)
s.enable_contact_graph = True
self.assertEqual(s.enable_contact_graph, True)
def testAddRemove(self):
s = self.s
s.remove(self.b1)
s.add(self.b1)
b = p.Body()
s3 = p.Circle(b,2)
s.add(s3)
b3 = p.Body(1,1)
s.add(b3)
self.assertEqual(len(s.bodies), 3)
self.assertEqual(len(s.shapes), 3)
s.remove(self.s2,self.b1,self.s1)
s.remove(s3)
self.assertEqual(len(s.bodies), 2)
self.assertEqual(len(s.shapes), 0)
def testAddInStep(self):
s = self.s
b = p.Body(1,2)
c = p.Circle(b,2)
def pre_solve(space, arbiter):
space.add(b,c)
return True
s.add_collision_handler(0, 0, pre_solve = pre_solve)
s.step(.1)
self.assert_(b in s.bodies)
self.assert_(c in s.shapes)
def testRemoveInStep(self):
s = self.s
def pre_solve(space, arbiter):
space.remove(arbiter.shapes)
return True
s.add_collision_handler(0, 0, pre_solve = pre_solve)
s.step(.1)
self.assert_(self.s1 not in s.bodies)
self.assert_(self.s2 not in s.shapes)
def testPointQueryFirst(self):
self.assertEqual(self.s.point_query_first((31,0)), None)
self.assertEqual(self.s.point_query_first((10,0)), self.s1)
self.s1.group = 1
self.assertEqual(self.s.point_query_first((10,0)), self.s1)
self.assertEqual(self.s.point_query_first((10,0), group=1), None)
def testPointQuery(self):
b3 = p.Body(1,1)
b3.position = 19,1
s3 = p.Circle(b3, 10)
self.s.add(s3)
hits = self.s.point_query((23,0))
self.assert_(self.s1 not in hits)
self.assert_(self.s2 in hits)
self.assert_(s3 in hits)
def testNearestPointQuery(self):
res = self.s.nearest_point_query((-10,0), 20)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['distance'], 15)
self.assertEqual(res[0]['point'], Vec2d(5,0))
self.assertEqual(res[0]['shape'], self.s1)
res = self.s.nearest_point_query((-10,0), 15)
self.assertEqual(len(res), 0)
def testNearestPointQueryNearest(self):
res = self.s.nearest_point_query_nearest((-10,0), 200)
self.assertEqual(res['distance'], 15)
self.assertEqual(res['point'], Vec2d(5,0))
self.assertEqual(res['shape'], self.s1)
res = self.s.nearest_point_query_nearest((-10,0), 15)
self.assertEqual(res, None)
def testBBQuery(self):
bb = p.BB(-7,-7,7,7)
hits = self.s.bb_query(bb)
self.assert_(self.s1 in hits)
self.assert_(self.s2 not in hits)
def testShapeQuery(self):
b = p.Body()
s = p.Circle(b, 2)
b.position = 20,1
hits = self.s.shape_query(s)
self.assert_(self.s1 not in hits)
self.assert_(self.s2 in hits)
def testStaticPointQueries(self):
b = p.Body()
c = p.Circle(b, 10)
b.position = -50,-50
self.s.add(c)
hit = self.s.point_query_first( (-50,-55) )
self.assertEqual(hit, c)
hits = self.s.point_query( (-50,-55) )
self.assertEqual(hits[0], c)
def testReindexStatic(self):
b = p.Body()
c = p.Circle(b, 10)
self.s.add(c)
b.position = -50,-50
hit = self.s.point_query_first( (-50,-55) )
self.assertEqual(hit, None)
self.s.reindex_static()
hit = self.s.point_query_first( (-50,-55) )
self.assertEqual(hit, c)
b.position = 50,50
self.s.reindex_shape(c)
hit = self.s.point_query_first( (50,50) )
self.assertEqual(hit, c)
def testReindexStaticCollision(self):
b1 = p.Body(10, p.inf)
c1 = p.Circle(b1, 10)
b1.position = 20, 20
b2 = p.Body()
s2 = p.Segment(b2, (-10,0), (10,0),1)
self.s.add(b1,c1)
self.s.add(s2)
s2.unsafe_set_b((100,0))
self.s.gravity = 0, -100
for x in range(10):
self.s.step(.1)
self.assert_(b1.position.y < 0)
b1.position = 20,20
b1.velocity = 0,0
self.s.reindex_static()
for x in range(10):
self.s.step(.1)
self.assert_(b1.position.y > 10)
def testReindexShape(self):
b = p.Body()
c = p.Circle(b, 10)
self.s.add(c)
b.position = -50,-50
hit = self.s.point_query_first( (-50,-55) )
self.assertEqual(hit, None)
self.s.reindex_shape(c)
hit = self.s.point_query_first( (-50,-55) )
self.assertEqual(hit, c)
def testSegmentQueries(self):
self.assertEqual(self.s.segment_query_first( (13,11), (131.01,12) ), None)
self.assertEqual(self.s.segment_query_first( (13,-11),(131.01,-11) ), None)
r = self.s.segment_query_first( (10,-100), (10,100) )
self.assertEqual(r.shape, self.s1)
self.assertEqual(r.t, 0.475)
self.assertEqual(r.n, Vec2d(0,-1))
b3 = p.Body(1,1)
b3.position = 19,1
s3 = p.Circle(b3, 10)
self.s.add(s3)
hits = self.s.segment_query((16,-100), (16,100))
hit_shapes = [hit.shape for hit in hits]
self.assert_(self.s1 not in hit_shapes)
self.assert_(self.s2 in hit_shapes)
self.assert_(s3 in hit_shapes)
def testStaticSegmentQueries(self):
b = p.Body()
c = p.Circle(b, 10)
b.position = -50,-50
self.s.add(c)
hit = self.s.segment_query_first( (-70,-50), (-30, -50) )
self.assertEqual(hit.shape, c)
hits = self.s.segment_query( (-70,-50), (-30, -50) )
self.assertEqual(hits[0].shape, c)
def testCollisionHandlerBegin(self):
self.num_of_begins = 0
def begin(space, arb, data):
self.num_of_begins += 1
return True
self.b1.position = self.b2.position
self.s.add_collision_handler(0,0, begin, None, None, None, None)
self.s.step(0.1)
self.s.step(0.1)
self.assertEqual(self.num_of_begins, 1)
def testCollisionHandlerPreSolve(self):
self.begin_shapes = None
self.begin_contacts = None
self.begin_space = None
self.s1.collision_type = 1
self.s2.collision_type = 2
def pre_solve(space, arb, test_self):
test_self.begin_shapes = arb.shapes
test_self.begin_contacts = arb.contacts
test_self.begin_space = space
return True
for x in range(100):
self.s.step(0.1)
self.s.add_collision_handler(1,2, None, pre_solve, None, None, self)
self.s.step(0.1)
self.assertEqual(self.s1, self.begin_shapes[0])
self.assertEqual(self.s2, self.begin_shapes[1])
self.assertEqual(self.begin_space, self.s)
def testCollisionHandlerPostSolve(self):
self.first_contact = None
def post_solve(space, arb, test_self):
self.first_contact = arb.is_first_contact
return True
self.s.add_collision_handler(0,0, None, None, post_solve, None, self)
self.s.step(0.1)
self.assert_(self.first_contact)
self.s.step(0.1)
self.assertFalse(self.first_contact)
def testPostStepCallback(self):
self.number_of_calls = 0
def f(obj, shapes, test_self):
for shape in shapes:
self.s.remove(shape)
test_self.number_of_calls += 1
def pre_solve(space, arb):
space.add_post_step_callback(f, arb.shapes[0], arb.shapes, test_self = self)
return True
self.s.add_collision_handler(0, 0, None, pre_solve, None, None)
self.s.step(0.1)
self.assertEqual(self.s.shapes, [])
self.s.add(self.s1, self.s2)
self.s.step(0.1)
self.assertEqual(self.s.shapes, [])
self.s.add(self.s1, self.s2)
self.s.add_collision_handler(0, 0, None, None, None, None)
self.s.step(0.1)
self.assertEqual(self.number_of_calls, 2)
####################################################################
if __name__ == "__main__":
print ("testing pymunk version " + p.version)
unittest.main() |
felixrieseberg/lets-encrypt-preview | refs/heads/iis | letsencrypt/tests/continuity_auth_test.py | 1 | """Test the ContinuityAuthenticator dispatcher."""
import unittest
import mock
from acme import challenges
from letsencrypt import achallenges
from letsencrypt import errors
class PerformTest(unittest.TestCase):
"""Test client perform function."""
def setUp(self):
from letsencrypt.continuity_auth import ContinuityAuthenticator
self.auth = ContinuityAuthenticator(
mock.MagicMock(server="demo_server.org"), None)
self.auth.rec_token.perform = mock.MagicMock(
name="rec_token_perform", side_effect=gen_client_resp)
self.auth.proof_of_pos.perform = mock.MagicMock(
name="proof_of_pos_perform", side_effect=gen_client_resp)
def test_rec_token1(self):
token = achallenges.RecoveryToken(challb=None, domain="0")
responses = self.auth.perform([token])
self.assertEqual(responses, ["RecoveryToken0"])
def test_rec_token5(self):
tokens = []
for i in xrange(5):
tokens.append(achallenges.RecoveryToken(challb=None, domain=str(i)))
responses = self.auth.perform(tokens)
self.assertEqual(len(responses), 5)
for i in xrange(5):
self.assertEqual(responses[i], "RecoveryToken%d" % i)
def test_pop_and_rec_token(self):
achalls = []
for i in xrange(4):
if i % 2 == 0:
achalls.append(achallenges.RecoveryToken(challb=None,
domain=str(i)))
else:
achalls.append(achallenges.ProofOfPossession(challb=None,
domain=str(i)))
responses = self.auth.perform(achalls)
self.assertEqual(len(responses), 4)
for i in xrange(4):
if i % 2 == 0:
self.assertEqual(responses[i], "RecoveryToken%d" % i)
else:
self.assertEqual(responses[i], "ProofOfPossession%d" % i)
def test_unexpected(self):
self.assertRaises(
errors.LetsEncryptContAuthError, self.auth.perform, [
achallenges.DVSNI(challb=None, domain="0", key="invalid_key")])
def test_chall_pref(self):
self.assertEqual(
self.auth.get_chall_pref("example.com"),
[challenges.ProofOfPossession, challenges.RecoveryToken])
class CleanupTest(unittest.TestCase):
"""Test the Authenticator cleanup function."""
def setUp(self):
from letsencrypt.continuity_auth import ContinuityAuthenticator
self.auth = ContinuityAuthenticator(
mock.MagicMock(server="demo_server.org"), None)
self.mock_cleanup = mock.MagicMock(name="rec_token_cleanup")
self.auth.rec_token.cleanup = self.mock_cleanup
def test_rec_token2(self):
token1 = achallenges.RecoveryToken(challb=None, domain="0")
token2 = achallenges.RecoveryToken(challb=None, domain="1")
self.auth.cleanup([token1, token2])
self.assertEqual(self.mock_cleanup.call_args_list,
[mock.call(token1), mock.call(token2)])
def test_unexpected(self):
token = achallenges.RecoveryToken(challb=None, domain="0")
unexpected = achallenges.DVSNI(challb=None, domain="0", key="dummy_key")
self.assertRaises(errors.LetsEncryptContAuthError,
self.auth.cleanup, [token, unexpected])
def gen_client_resp(chall):
"""Generate a dummy response."""
return "%s%s" % (chall.__class__.__name__, chall.domain)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
KamranMackey/CloudBot | refs/heads/gonzobot | plugins/minecraft_ping.py | 4 | import socket
from mcstatus import MinecraftServer
from cloudbot import hook
mc_colors = [('\xa7f', '\x0300'), ('\xa70', '\x0301'), ('\xa71', '\x0302'), ('\xa72', '\x0303'),
('\xa7c', '\x0304'), ('\xa74', '\x0305'), ('\xa75', '\x0306'), ('\xa76', '\x0307'),
('\xa7e', '\x0308'), ('\xa7a', '\x0309'), ('\xa73', '\x0310'), ('\xa7b', '\x0311'),
('\xa71', '\x0312'), ('\xa7d', '\x0313'), ('\xa78', '\x0314'), ('\xa77', '\x0315'),
('\xa7l', '\x02'), ('\xa79', '\x0310'), ('\xa7o', ''), ('\xa7m', '\x13'),
('\xa7r', '\x0f'), ('\xa7n', '\x15')]
def format_colors(description):
for original, replacement in mc_colors:
description = description.replace(original, replacement)
return description.replace("\xa7k", "")
@hook.command("mcping", "mcp")
def mcping(text):
"""<server[:port]> - gets info about the Minecraft server at <server[:port]>"""
try:
server = MinecraftServer.lookup(text)
except (IOError, ValueError) as e:
return e
try:
s = server.status()
except socket.gaierror:
return "Invalid hostname"
except socket.timeout:
return "Request timed out"
except ConnectionRefusedError:
return "Connection refused"
except ConnectionError:
return "Connection error"
except (IOError, ValueError) as e:
return "Error pinging server: {}".format(e)
if isinstance(s.description, dict):
description = format_colors(" ".join(s.description["text"].split()))
else:
description = format_colors(" ".join(s.description.split()))
if s.latency:
return "{}\x0f - \x02{}\x0f - \x02{:.1f}ms\x02" \
" - \x02{}/{}\x02 players".format(description, s.version.name, s.latency,
s.players.online, s.players.max).replace("\n", "\x0f - ")
else:
return "{}\x0f - \x02{}\x0f" \
" - \x02{}/{}\x02 players".format(description, s.version.name,
s.players.online, s.players.max).replace("\n", "\x0f - ")
|
matthaywardwebdesign/rethinkdb | refs/heads/next | external/v8_3.30.33.16/test/mozilla/testcfg.py | 73 | # Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import subprocess
import tarfile
from testrunner.local import testsuite
from testrunner.objects import testcase
MOZILLA_VERSION = "2010-06-29"
EXCLUDED = ["CVS"]
FRAMEWORK = """
browser.js
shell.js
jsref.js
template.js
""".split()
TEST_DIRS = """
ecma
ecma_2
ecma_3
js1_1
js1_2
js1_3
js1_4
js1_5
""".split()
class MozillaTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(MozillaTestSuite, self).__init__(name, root)
self.testroot = os.path.join(root, "data")
def ListTests(self, context):
tests = []
for testdir in TEST_DIRS:
current_root = os.path.join(self.testroot, testdir)
for dirname, dirs, files in os.walk(current_root):
for dotted in [x for x in dirs if x.startswith(".")]:
dirs.remove(dotted)
for excluded in EXCLUDED:
if excluded in dirs:
dirs.remove(excluded)
dirs.sort()
files.sort()
for filename in files:
if filename.endswith(".js") and not filename in FRAMEWORK:
testname = os.path.join(dirname[len(self.testroot) + 1:],
filename[:-3])
case = testcase.TestCase(self, testname)
tests.append(case)
return tests
def GetFlagsForTestCase(self, testcase, context):
result = []
result += context.mode_flags
result += ["--expose-gc"]
result += [os.path.join(self.root, "mozilla-shell-emulation.js")]
testfilename = testcase.path + ".js"
testfilepath = testfilename.split(os.path.sep)
for i in xrange(len(testfilepath)):
script = os.path.join(self.testroot,
reduce(os.path.join, testfilepath[:i], ""),
"shell.js")
if os.path.exists(script):
result.append(script)
result.append(os.path.join(self.testroot, testfilename))
return testcase.flags + result
def GetSourceForTest(self, testcase):
filename = os.path.join(self.testroot, testcase.path + ".js")
with open(filename) as f:
return f.read()
def IsNegativeTest(self, testcase):
return testcase.path.endswith("-n")
def IsFailureOutput(self, output, testpath):
if output.exit_code != 0:
return True
return "FAILED!" in output.stdout
def DownloadData(self):
old_cwd = os.getcwd()
os.chdir(os.path.abspath(self.root))
# Maybe we're still up to date?
versionfile = "CHECKED_OUT_VERSION"
checked_out_version = None
if os.path.exists(versionfile):
with open(versionfile) as f:
checked_out_version = f.read()
if checked_out_version == MOZILLA_VERSION:
os.chdir(old_cwd)
return
# If we have a local archive file with the test data, extract it.
directory_name = "data"
directory_name_old = "data.old"
if os.path.exists(directory_name):
if os.path.exists(directory_name_old):
shutil.rmtree(directory_name_old)
os.rename(directory_name, directory_name_old)
archive_file = "downloaded_%s.tar.gz" % MOZILLA_VERSION
if os.path.exists(archive_file):
with tarfile.open(archive_file, "r:gz") as tar:
tar.extractall()
with open(versionfile, "w") as f:
f.write(MOZILLA_VERSION)
os.chdir(old_cwd)
return
# No cached copy. Check out via CVS, and pack as .tar.gz for later use.
command = ("cvs -d :pserver:anonymous@cvs-mirror.mozilla.org:/cvsroot"
" co -D %s mozilla/js/tests" % MOZILLA_VERSION)
code = subprocess.call(command, shell=True)
if code != 0:
os.chdir(old_cwd)
raise Exception("Error checking out Mozilla test suite!")
os.rename(os.path.join("mozilla", "js", "tests"), directory_name)
shutil.rmtree("mozilla")
with tarfile.open(archive_file, "w:gz") as tar:
tar.add("data")
with open(versionfile, "w") as f:
f.write(MOZILLA_VERSION)
os.chdir(old_cwd)
def GetSuite(name, root):
return MozillaTestSuite(name, root)
|
roadmapper/ansible | refs/heads/devel | lib/ansible/modules/network/exos/exos_vlans.py | 13 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for exos_vlans
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: exos_vlans
version_added: "2.10"
short_description: Manage VLANs on Extreme Networks EXOS devices.
description: This module provides declarative management of VLANs on Extreme Networks EXOS network devices.
author: Jayalakshmi Viswanathan (@jayalakshmiV)
notes:
- Tested against EXOS 30.2.1.8
- This module works with connection C(httpapi).
See L(EXOS Platform Options,../network/user_guide/platform_exos.html)
options:
config:
description: A dictionary of VLANs options
type: list
elements: dict
suboptions:
name:
description:
- Ascii name of the VLAN.
type: str
vlan_id:
description:
- ID of the VLAN. Range 1-4094
type: int
required: True
state:
description:
- Operational state of the VLAN
type: str
choices:
- active
- suspend
default: active
state:
description:
- The state the configuration should be left in
type: str
choices:
- merged
- replaced
- overridden
- deleted
default: merged
"""
EXAMPLES = """
# Using deleted
# Before state:
# -------------
#
# path: /rest/restconf/data/openconfig-vlan:vlans/
# method: GET
# data:
# {
# "openconfig-vlan:vlans": {
# "vlan": [
# {
# "config": {
# "name": "Default",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 1
# },
# },
# {
# "config": {
# "name": "vlan_10",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 10
# },
# },
# {
# "config": {
# "name": "vlan_20",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 20
# },
# },
# {
# "config": {
# "name": "vlan_30",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 30
# },
# }
# ]
# }
# }
- name: Delete attributes of given VLANs
exos_vlans:
config:
- vlan_id: 10
- vlan_id: 20
- vlan_id: 30
state: deleted
# Module Execution Results:
# -------------------------
#
# "after": [
# {
# "name": "Default",
# "state": "active",
# "vlan_id": 1
# }
# ],
#
# "before": [
# {
# "name": "Default",
# "state": "active",
# "vlan_id": 1
# },
# {
# "name": "vlan_10",
# "state": "active",
# "vlan_id": 10
# },
# {
# "name": "vlan_20",
# "state": "active",
# "vlan_id": 20
# }
# {
# "name": "vlan_30",
# "state": "active",
# "vlan_id": 30
# }
# ],
#
# "requests": [
# {
# "data": null,
# "method": "DELETE",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=10"
# },
# {
# "data": null,
# "method": "DELETE",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=20"
# },
# {
# "data": null,
# "method": "DELETE",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=30"
# }
# ]
#
#
# After state:
# -------------
#
# path: /rest/restconf/data/openconfig-vlan:vlans/
# method: GET
# data:
# {
# "openconfig-vlan:vlans": {
# "vlan": [
# {
# "config": {
# "name": "Default",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 1
# },
# }
# ]
# }
# }
# Using merged
# Before state:
# -------------
# path: /rest/restconf/data/openconfig-vlan:vlans/
# method: GET
# data:
# {
# "openconfig-vlan:vlans": {
# "vlan": [
# {
# "config": {
# "name": "Default",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 1
# },
# }
# ]
# }
# }
- name: Merge provided configuration with device configuration
exos_vlans:
config:
- name: vlan_10
vlan_id: 10
state: active
- name: vlan_20
vlan_id: 20
state: active
- name: vlan_30
vlan_id: 30
state: active
state: merged
# Module Execution Results:
# -------------------------
#
# "after": [
# {
# "name": "Default",
# "state": "active",
# "vlan_id": 1
# },
# {
# "name": "vlan_10",
# "state": "active",
# "vlan_id": 10
# },
# {
# "name": "vlan_20",
# "state": "active",
# "vlan_id": 20
# },
# {
# "name": "vlan_30",
# "state": "active",
# "vlan_id": 30
# }
# ],
#
# "before": [
# {
# "name": "Default",
# "state": "active",
# "vlan_id": 1
# }
# ],
#
# "requests": [
# {
# "data": {
# "openconfig-vlan:vlan": [
# {
# "config": {
# "name": "vlan_10",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 10
# }
# }
# ]
# },
# "method": "POST",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/"
# },
# {
# "data": {
# "openconfig-vlan:vlan": [
# {
# "config": {
# "name": "vlan_20",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 20
# }
# }
# ]
# },
# "method": "POST",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/"
# },
# "data": {
# "openconfig-vlan:vlan": [
# {
# "config": {
# "name": "vlan_30",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 30
# }
# }
# ]
# },
# "method": "POST",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/"
# }
# ]
#
#
# After state:
# -------------
#
# path: /rest/restconf/data/openconfig-vlan:vlans/
# method: GET
# data:
# {
# "openconfig-vlan:vlans": {
# "vlan": [
# {
# "config": {
# "name": "Default",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 1
# },
# },
# {
# "config": {
# "name": "vlan_10",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 10
# },
# },
# {
# "config": {
# "name": "vlan_20",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 20
# },
# },
# {
# "config": {
# "name": "vlan_30",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 30
# },
# }
# ]
# }
# }
# Using overridden
# Before state:
# -------------
#
# path: /rest/restconf/data/openconfig-vlan:vlans/
# method: GET
# data:
# {
# "openconfig-vlan:vlans": {
# "vlan": [
# {
# "config": {
# "name": "Default",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 1
# },
# },
# {
# "config": {
# "name": "vlan_10",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 10
# },
# },
# {
# "config": {
# "name": "vlan_20",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 20
# },
# },
# {
# "config": {
# "name": "vlan_30",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 30
# },
# }
# ]
# }
# }
- name: Override device configuration of all VLANs with provided configuration
exos_vlans:
config:
- name: TEST_VLAN10
vlan_id: 10
state: overridden
# Module Execution Results:
# -------------------------
#
# "after": [
# {
# "name": "Default",
# "state": "active",
# "vlan_id": 1
# },
# {
# "name": "TEST_VLAN10",
# "state": "active",
# "vlan_id": 10
# },
# ],
#
# "before": [
# {
# "name": "Default",
# "state": "active",
# "vlan_id": 1
# },
# {
# "name": "vlan_10",
# "state": "active",
# "vlan_id": 10
# },
# {
# "name": "vlan_20",
# "state": "active",
# "vlan_id": 20
# },
# {
# "name": "vlan_30",
# "state": "active",
# "vlan_id": 30
# }
# ],
#
# "requests": [
# {
# "data": {
# "openconfig-vlan:vlan": {
# "vlan": [
# {
# "config": {
# "name": "TEST_VLAN10",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 10
# }
# }
# ]
# }
# }
# },
# "method": "PATCH",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/"
# },
# {
# "data": null,
# "method": "DELETE",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=20"
# },
# {
# "data": null,
# "method": "DELETE",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/vlan=30"
# }
# ]
#
#
# After state:
# -------------
#
# path: /rest/restconf/data/openconfig-vlan:vlans/
# method: GET
# data:
# {
# "openconfig-vlan:vlans": {
# "vlan": [
# {
# "config": {
# "name": "Default",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 1
# },
# },
# {
# "config": {
# "name": "TEST_VLAN10",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 10
# },
# }
# ]
# }
# }
# Using replaced
# Before state:
# -------------
#
# path: /rest/restconf/data/openconfig-vlan:vlans/
# method: GET
# data:
# {
# "openconfig-vlan:vlans": {
# "vlan": [
# {
# "config": {
# "name": "Default",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 1
# },
# },
# {
# "config": {
# "name": "vlan_10",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 10
# },
# },
# {
# "config": {
# "name": "vlan_20",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 20
# },
# },
# {
# "config": {
# "name": "vlan_30",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 30
# },
# }
# ]
# }
# }
- name: Replaces device configuration of listed VLANs with provided configuration
exos_vlans:
config:
- name: Test_VLAN20
vlan_id: 20
- name: Test_VLAN30
vlan_id: 30
state: replaced
# Module Execution Results:
# -------------------------
#
# "after": [
# {
# "name": "Default",
# "state": "active",
# "vlan_id": 1
# },
# {
# "name": "vlan_10",
# "state": "active",
# "vlan_id": 10
# },
# {
# "name": "TEST_VLAN20",
# "state": "active",
# "vlan_id": 20
# },
# {
# "name": "TEST_VLAN30",
# "state": "active",
# "vlan_id": 30
# }
# ],
#
# "before": [
# {
# "name": "Default",
# "state": "active",
# "vlan_id": 1
# },
# {
# "name": "vlan_10",
# "state": "active",
# "vlan_id": 10
# },
# {
# "name": "vlan_20",
# "state": "active",
# "vlan_id": 20
# },
# {
# "name": "vlan_30",
# "state": "active",
# "vlan_id": 30
# }
# ],
#
# "requests": [
# {
# "data": {
# "openconfig-vlan:vlan": {
# "vlan": [
# {
# "config": {
# "name": "TEST_VLAN20",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 20
# }
# "config": {
# "name": "TEST_VLAN30",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 30
# }
# }
# ]
# },
# "method": "PATCH",
# "path": "/rest/restconf/data/openconfig-vlan:vlans/"
# }
# ]
#
# After state:
# -------------
#
# path: /rest/restconf/data/openconfig-vlan:vlans/
# method: GET
# data:
# {
# "openconfig-vlan:vlans": {
# "vlan": [
# {
# "config": {
# "name": "Default",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 1
# },
# },
# {
# "config": {
# "name": "vlan_10",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 10
# },
# },
# {
# "config": {
# "name": "TEST_VLAN20",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 20
# },
# },
# {
# "config": {
# "name": "TEST_VLAN30",
# "status": "ACTIVE",
# "tpid": "oc-vlan-types:TPID_0x8100",
# "vlan-id": 30
# },
# }
# ]
# }
# }
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
after:
description: The resulting configuration model invocation.
returned: when changed
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
requests:
description: The set of requests pushed to the remote device.
returned: always
type: list
sample: [{"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}, {"data": "...", "method": "...", "path": "..."}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.exos.argspec.vlans.vlans import VlansArgs
from ansible.module_utils.network.exos.config.vlans.vlans import Vlans
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [('state', 'merged', ('config',)),
('state', 'replaced', ('config',))]
module = AnsibleModule(argument_spec=VlansArgs.argument_spec, required_if=required_if,
supports_check_mode=True)
result = Vlans(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Betaboxguugi/P6 | refs/heads/master | code/test/manual_vs_framework/tests/test_fd.py | 1 | import os
from test.manual_vs_framework import framework_fd_test
from test.manual_vs_framework.dw import setup
__author__ = 'Arash Michael Sami Kjær'
__maintainer__ = 'Arash Michael Sami Kjær'
number = 100
dbname = 'fd.db'
if not os.path.isfile('./'+dbname):
setup(dbname, number)
framework_fd_test(dbname)
|
djnugent/ardupilot-solo | refs/heads/master | Tools/LogAnalyzer/tests/TestDupeLogData.py | 273 | from LogAnalyzer import Test,TestResult
import DataflashLog
class TestDupeLogData(Test):
'''test for duplicated data in log, which has been happening on PX4/Pixhawk'''
def __init__(self):
Test.__init__(self)
self.name = "Dupe Log Data"
def __matchSample(self, sample, sampleStartIndex, logdata):
'''return the line number where a match is found, otherwise return False'''
# ignore if all data in sample is the same value
nSame = 0
for s in sample:
if s[1] == sample[0][1]:
nSame += 1
if nSame == 20:
return False
# c
data = logdata.channels["ATT"]["Pitch"].listData
for i in range(sampleStartIndex, len(data)):
#print "Checking against index %d" % i
if i == sampleStartIndex:
continue # skip matching against ourselves
j = 0
while j<20 and (i+j)<len(data) and data[i+j][1] == sample[j][1]:
#print "### Match found, j=%d, data=%f, sample=%f, log data matched to sample at line %d" % (j,data[i+j][1],sample[j][1],data[i+j][0])
j += 1
if j == 20: # all samples match
return data[i][0]
return False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# this could be made more flexible by not hard-coding to use ATT data, could make it dynamic based on whatever is available as long as it is highly variable
if "ATT" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# pick 10 sample points within the range of ATT data we have
sampleStartIndices = []
attStartIndex = 0
attEndIndex = len(logdata.channels["ATT"]["Pitch"].listData)-1
step = attEndIndex / 11
for i in range(step,attEndIndex-step,step):
sampleStartIndices.append(i)
#print "Dupe data sample point index %d at line %d" % (i, logdata.channels["ATT"]["Pitch"].listData[i][0])
# get 20 datapoints of pitch from each sample location and check for a match elsewhere
sampleIndex = 0
for i in range(sampleStartIndices[0], len(logdata.channels["ATT"]["Pitch"].listData)):
if i == sampleStartIndices[sampleIndex]:
#print "Checking sample %d" % i
sample = logdata.channels["ATT"]["Pitch"].listData[i:i+20]
matchedLine = self.__matchSample(sample, i, logdata)
if matchedLine:
#print "Data from line %d found duplicated at line %d" % (sample[0][0],matchedLine)
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Duplicate data chunks found in log (%d and %d)" % (sample[0][0],matchedLine)
return
sampleIndex += 1
if sampleIndex >= len(sampleStartIndices):
break
|
dongguangming/django-angular | refs/heads/master | djangular/styling/__init__.py | 14224 | # -*- coding: utf-8 -*-
|
18praveenb/toil-rnaseq-sc | refs/heads/master | src/toil_rnaseq_sc/rnaseq_sc_cgl_plot_functions.py | 1 | #!/usr/bin/env python2.7
# This is a modified version of a source file from the repository "scRNA-Seq-tcc-prep" by the Pachter Lab which can be found here: https://github.com/pachterlab/scRNA-Seq-TCC-prep/blob/201469940e138c2f09bcd058a6291b17794f7c88/notebooks/10xResults.ipynb
# The citation for the paper with which this repository is associated is Ntranos, V., Kamath, G. M., Zhang, J. M., Pachter, L. & Tse, D. N. Fast and accurate single-cell RNA-seq analysis by clustering of transcript-compatibility counts. Genome Biology 17, 112 (2016).
# The entire source of "scRNA-Seq-tcc prep" is also used in Dockerized form in this pipeline.
# The original "scRNA-Seq-TCC-prep" repository was released under GPLv3, as is this repository (and thus this source file). For more details, see the 'README.md' of this repository which contains the full text of the GPL.
from __future__ import print_function
import os
import pickle
import sys
from subprocess import CalledProcessError
from urlparse import urlparse
import numpy as np
from bd2k.util.files import mkdir_p
from sklearn import cluster,manifold
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from toil.lib.docker import dockerCall
from toil_lib.files import tarball_files, copy_files
from toil_lib.urls import s3am_upload
from string import lstrip
# Matplotlib backend nonsense
import matplotlib
if sys.platform == 'darwin':
matplotlib.use('TkAgg')
else:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
SC3_OUTPUT_DIRECTORY = "SC3"
MATRIX_TSV_FILENAME = "matrix.tsv"
MATRIX_CELLS_FILENAME = "matrix.cells"
DOCKER_WORK_DIR = "/data"
# TODO: Refactor to use ids
def run_data_analysis(job, config, tcc_matrix_id, pwise_dist_l1_id, nonzero_ec_id, kallisto_matrix_id, matrix_tsv_id, matrix_cells_id):
"""
Generates graphs and plots of results. Uploads images to savedir location.
:param job: toil job
:param config: toil job configuration
:param tcc_matrix_id: jobstore location of TCC matrix (.dat)
:param pwise_dist_l1_id: jobstore location of L1 pairwise distance (.dat)
:param nonzero_ec_id: jobstore loation of nonzero ec (.dat)
:param kallisto_matrix_id: id of kallisto output matrix (.ec)
:param matrix_tsv_id: id of kallisto output matrix (.tsv)
:param matrix_cells_id: id of kallisto output matrix (.cells)
"""
# source: https://github.com/pachterlab/scRNA-Seq-TCC-prep (/blob/master/notebooks/10xResults.ipynb)
# extract output
job.fileStore.logToMaster('Performing data analysis')
# read files
work_dir = job.fileStore.getLocalTempDir()
tcc_matrix = job.fileStore.readGlobalFile(tcc_matrix_id, os.path.join(work_dir, "TCC_matrix.dat"))
pwise_dist_l1 = job.fileStore.readGlobalFile(pwise_dist_l1_id, os.path.join(work_dir, "pwise_dist_L1.dat"))
nonzero_ec = job.fileStore.readGlobalFile(nonzero_ec_id, os.path.join(work_dir, "nonzero_ec.dat"))
kallisto_matrix = job.fileStore.readGlobalFile(kallisto_matrix_id, os.path.join(work_dir, 'kallisto_matrix.ec'))
matrix_tsv = job.fileStore.readGlobalFile(matrix_tsv_id, os.path.join(work_dir, MATRIX_TSV_FILENAME))
matrix_cells = job.fileStore.readGlobalFile(matrix_cells_id, os.path.join(work_dir, MATRIX_CELLS_FILENAME))
##############################################################
# load dataset
with open(os.path.join(work_dir, "TCC_matrix.dat"), 'rb') as f:
tcc_matrix = pickle.load(f)
with open(os.path.join(work_dir, "pwise_dist_L1.dat"), 'rb') as f:
pwise_dist_l1 = pickle.load(f)
with open(os.path.join(work_dir, "nonzero_ec.dat"), 'rb') as f:
nonzero_ec = pickle.load(f)
ecfile_dir = os.path.join(work_dir, 'kallisto_matrix.ec')
eclist = np.loadtxt(ecfile_dir, dtype=str)
tcc = tcc_matrix.T
T_norm = normalize(tcc_matrix, norm='l1', axis=0)
t_normt = T_norm.transpose()
num_of_cells = np.shape(tcc_matrix)[1]
print("NUM_OF_CELLS =", num_of_cells)
print("NUM_OF_nonzero_EC =", np.shape(tcc_matrix)[0])
#################################
EC_dict = {}
for i in range(np.shape(eclist)[0]):
EC_dict[i] = [int(x) for x in eclist[i, 1].split(',')]
union = set()
for i in nonzero_ec:
new = [tx for tx in EC_dict[i] if tx not in union] # filter out previously seen transcripts
union.update(new)
NUM_OF_TX_inTCC = len(union)
print("NUM_OF_Transcripts =", NUM_OF_TX_inTCC) # number of distinct transcripts in nonzero eq. classes
##############################################################
# inspect
# sort eq. classes based on size
size_of_ec = [len(EC_dict[i]) for i in nonzero_ec]
ec_idx = [i[0] for i in sorted(enumerate(size_of_ec), key=lambda x: x[1])]
index_ec = np.array(ec_idx)
ec_sort_map = {}
nonzero_ec_srt = [] # init
for i in range(len(nonzero_ec)):
nonzero_ec_srt += [nonzero_ec[index_ec[i]]]
ec_sort_map[nonzero_ec[index_ec[i]]] = i
sumi = np.array(tcc_matrix.sum(axis=1))
sumi_sorted = sumi[index_ec]
total_num_of_umis = int(sumi_sorted.sum())
total_num_of_umis_per_cell = np.array(tcc_matrix.sum(axis=0))[0, :]
print("Total number of UMIs =", total_num_of_umis)
#################################
fig, ax1 = plt.subplots()
ax1.plot(sorted(total_num_of_umis_per_cell)[::-1], 'b-', linewidth=2.0)
ax1.set_title('UMI counts per cell')
ax1.set_xlabel('cells (sorted by UMI counts)')
ax1.set_ylabel('UMI counts')
ax1.set_yscale("log", nonposy='clip')
ax1.grid(True)
ax1.grid(True, 'minor')
umi_counts_per_cell = os.path.join(work_dir, "UMI_counts_per_cell.png")
plt.savefig(umi_counts_per_cell, format='png')
fig, ax1 = plt.subplots()
ax1.plot(sorted(sumi.reshape(np.shape(sumi)[0]))[::-1], 'r-', linewidth=2.0)
ax1.set_title('UMI counts per eq. class')
ax1.set_xlabel('ECs (sorted by UMI counts)')
ax1.set_ylabel('UMI counts')
ax1.set_yscale("log", nonposy='clip')
ax1.grid(True)
ax1.grid(True, 'minor')
umi_counts_per_class = os.path.join(work_dir, "UMI_counts_per_class.png")
plt.savefig(umi_counts_per_class, format='png')
cell_nonzeros = np.array(((T_norm != 0)).sum(axis=0))[0]
fig, ax1 = plt.subplots()
ax1.plot(total_num_of_umis_per_cell, cell_nonzeros, '.g', linewidth=2.0)
ax1.set_title('UMI counts vs nonzero ECs')
ax1.set_xlabel('total num of umis per cell')
ax1.set_ylabel('total num of nonzero ecs per cell')
ax1.set_yscale("log", nonposy='clip')
ax1.set_xscale("log", nonposy='clip')
ax1.grid(True)
ax1.grid(True, 'minor')
umi_counts_vs_nonzero_ecs = os.path.join(work_dir, "UMI_counts_vs_nonzero_ECs.png")
plt.savefig(umi_counts_vs_nonzero_ecs, format='png')
# TCC MEAN-VARIANCE
#todo verify this works
TCC_var=np.var(tcc.todense(),axis=0)
TCC_mean=np.mean(tcc.todense(),axis=0)
TCC_mean=np.array(TCC_mean)[0]
TCC_var=np.array(TCC_var)[0]
fig = plt.figure()
N=tcc.sum()
C=tcc.shape[0]
ax = plt.gca()
ax.plot(TCC_mean ,TCC_var,'.', c='blue', alpha=0.5, markeredgecolor='none')
xlims=[0.0001,10*TCC_mean.max()]
ax.set_xlim(xlims)
ax.set_ylim([0.0001,10*TCC_var.max()])
ax.set_yscale('symlog')
ax.set_xscale('symlog')
ax.plot(xlims, [(C-1)*(xlims[0])**2, (C-1)*(xlims[1])**2], color='g', linestyle='-', linewidth=2)
ax.plot(xlims, [(xlims[0]), (xlims[1])], color='k', linestyle='--', linewidth=1)
ax.set_title("TCC Mean-Variance ["+str(tcc.shape[1])+" TCCs in "+str(C)+" Cells]")
ax.set_xlabel("mean(TCC)")
ax.set_ylabel("var(TCC)")
tcc_mean_variance = os.path.join(work_dir, "TCC_mean_variance.png")
plt.savefig(tcc_mean_variance, format='png')
##############################################################
# clustering
#################################
# t-SNE
x_tsne = tSNE_pairwise(2, pwise_dist_l1)
#################################
# spectral clustering
n_clusters = config.n_clusters
similarity_mat = pwise_dist_l1.max() - pwise_dist_l1
labels_spectral = spectral(n_clusters, similarity_mat)
spectral_clustering = stain_plot(x_tsne, labels_spectral, [], "TCC -- tSNE, spectral clustering with " + str(n_clusters) + " n_clusters", work_dir=work_dir,
filename="spectral_clustering_tSNE")
#################################
# affinity propagation
pref = -np.median(pwise_dist_l1) * np.ones(num_of_cells)
labels_aff = AffinityProp(-pwise_dist_l1, pref, 0.5)
np.unique(labels_aff)
affinity_propagation_tsne = stain_plot(x_tsne, labels_aff, [], "TCC -- tSNE, affinity propagation", work_dir,
"affinity_propagation_tSNE")
#################################
# pca
pca = PCA(n_components=2)
x_pca = pca.fit_transform(t_normt.todense())
affinity_propagation_pca = stain_plot(x_pca, labels_aff, [], "TCC -- PCA, affinity propagation", work_dir,
"affinity_propagation_PCA")
# SC3
outfilePath = job.fileStore.getLocalTempFile()
SC3OutputPath = os.path.join(work_dir, SC3_OUTPUT_DIRECTORY)
os.mkdir(SC3OutputPath)
shouldUseSC3Output = True
with open(outfilePath, "r+") as outfile:
def dockerPathTo(resource): return os.path.join(DOCKER_WORK_DIR, resource)
def boolForR(aBool): return "TRUE" if aBool else "FALSE"
try:
dockerCall(job, tool='rscript', workDir=work_dir, parameters=map(str, [config.min_k, config.max_k, dockerPathTo(MATRIX_TSV_FILENAME), dockerPathTo(MATRIX_CELLS_FILENAME), dockerPathTo(SC3_OUTPUT_DIRECTORY), boolForR(config.use_estimated_k), boolForR(config.debug)]), outfile=outfile)
pass
except CalledProcessError:
outfile.seek(0, 0)
job.fileStore.logToMaster("Docker failed with the following log: " + str(outfile.read()))
shouldUseSC3Output = False
# build tarfile of output plots
output_files = [umi_counts_per_cell, umi_counts_per_class, umi_counts_vs_nonzero_ecs, tcc_mean_variance,
spectral_clustering, affinity_propagation_tsne, affinity_propagation_pca, outfilePath] + ([os.path.join(work_dir, SC3_OUTPUT_DIRECTORY, x) for x in os.listdir(SC3OutputPath)] if shouldUseSC3Output else [])
tarball_files(tar_name='single_cell_plots.tar.gz', file_paths=output_files, output_dir=work_dir)
# return file id for consolidation
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'single_cell_plots.tar.gz'))
def AffinityProp(D, pref, damp):
"""
Perform SKLearn affinity propagation (clustering) with specified data and parameters, returning labels.
:param pref: preference parameter for the affinity propagation
:param damp: damping parameter for the affinity propagation
:return: labels
"""
aff = cluster.AffinityPropagation(affinity='precomputed',
preference=pref, damping=damp, verbose=True)
labels = aff.fit_predict(D)
return labels
def spectral(n, D):
"""
Perform spectral clustering on the distance matrix.
:param n: Number of clusters (for some reason, this may not equal the number displayed on the stain plot?)
:param D: Distance matrix to analyze
:return: labels from the spectral clustering
"""
spectral = cluster.SpectralClustering(n_clusters=n, affinity='precomputed')
spectral.fit(D)
labels = spectral.labels_
return labels
def tSNE_pairwise(n, D):
"""
Perform t-SNE dimensionality reduction on the distance matrix D, using n components.
:param n: the number of components to use (passed as n_components to sklearn.manifold.TSNE.__init__)
:param D: Distance matrix to be processed
:return: t-SNE reduced version of D
"""
tsne = manifold.TSNE(n_components=n, random_state=213, metric='precomputed', n_iter=2000, verbose=1);
X_tsne = tsne.fit_transform(D);
return X_tsne
def stain_plot(X, labels, stain, title, work_dir, filename, filetype='png', nc=2, ax_lim=0, marksize=46):
"""
Create a matplotlib plot from the specified parameters, including cluster labels and dimensionally reduced points to plot
:param X: the reduced matrix returned by a dimensionality reduction routine e.g. tSNE or PCA
:param labels: the labels to use to group the points into clusters
:param stain: labels to stain
:param title: plot title
:param work_dir: working directory to create the file
:param filename: name of the file to be saved to work_dir
:param filetype: extension of the created file
:param nc: number of columns in the legend
:param ax_lim: limits of x- and y- axes (e.g. ax_lim = 3 -> [-3, 3] x [-3, 3] bounding box)
:param marksize: size of the scatter-plot points that are NOT stained (stained are always 146
"""
file_location = os.path.join(work_dir, filename + "." + filetype)
unique_labels = np.unique(labels)
N = len(unique_labels)
max_value = 16581375 # 255**3
interval = int(max_value / N)
colors = [hex(I)[2:].zfill(6) for I in range(0, max_value, interval)]
color = [(int(i[:2], 16) / float(255), int(i[2:4], 16) / float(255),
int(i[4:], 16) / float(255)) for i in colors]
i = 0;
plt.figure(figsize=(15, 10))
for label in unique_labels:
ind = np.squeeze(labels == label)
if label in stain:
plt.scatter(X[ind, 0], X[ind, 1], c='red', s=146, edgecolor='black',
lw=0.5, alpha=1, marker='*', label=label)
else:
plt.scatter(X[ind, 0], X[ind, 1], c=color[i], s=marksize, edgecolor='lightgray',
lw=0.5, label=label)
i += 1
plt.title(title)
plt.gray()
plt.legend(loc='upper right', bbox_to_anchor=(1.18, 1.01), ncol=nc)
if ax_lim > 0:
plt.xlim([-ax_lim, ax_lim])
plt.ylim([-ax_lim, ax_lim])
plt.axis('off')
plt.savefig(file_location, format=filetype)
return file_location
|
NewBeeStudio/xichao-new | refs/heads/master | xichao/packages/route/special.py | 1 | # -*- coding: utf-8 -*-
from imports import *
################################## 专栏页面 ##################################
# 专栏列表页
@app.route('/special_all', methods=['GET'])
def special_all():
try:
view = request.args.get('view')
sort = request.args.get('sort')
page_id = int(request.args.get('page'))
except Exception:
abort(404)
if sort != 'favor':
sort = 'time'
sort_change_url = '/special_all?view=%s&sort=favor&page=1'%(view)
else:
sort_change_url = '/special_all?view=%s&sort=time&page=1'%(view)
if view != 'list':
view = 'all'
view_change_url = '/special_all?view=list&sort=%s&page=1'%(sort)
else:
view_change_url = '/special_all?view=all&sort=%s&page=1'%(sort)
if view == 'list': # list view
specials_pagination = get_all_specials(sort, page_id, 5)
return render_template('special_all_listView.html', sort = sort, view=view,
specials_pagination_list = specials_pagination,
author = get_special_author,
articles = get_special_article,
sort_change_url = sort_change_url,
view_change_url = view_change_url)
else: # all view
specials_pagination = get_all_specials(sort, page_id, 12)
return render_template('special_all_allView.html', sort = sort, view=view,
specials_pagination_all = specials_pagination,
author = get_special_author,
articles = get_special_article,
sort_change_url = sort_change_url,
view_change_url = view_change_url)
#专栏列表搜索
@app.route('/special_search', methods=['GET'])
def special_search():
try:
search = request.args.get('search')
if search == '': abort(404)
except Exception:
abort(404)
specials_pagination = get_search_specials(search)
return render_template('special_search.html', specials_pagination = specials_pagination,
author = get_special_author)
# 专栏详情页
@app.route('/special', methods=['GET'])
def special():
#URL样式:http://127.0.0.1:5000/special?id=2&page=1&sort=time
try:
special_id = int(request.args.get('id'))
page_id = int(request.args.get('page'))
sort = request.args.get('sort')
except Exception:
abort(404)
#只有favor和time两种排序方式
if (sort != 'favor'):
sort = 'time'
sort_change_url = "/special?id=%d&page=1&sort=favor" % (special_id)
else:
sort_change_url = "/special?id=%d&page=1&sort=time" % (special_id)
special = get_special_information(special_id)
if (special == None):
abort(404)
author = get_special_author(special.special_id)
# print ddd
#article的分页对象,articles_pagination.items获得该分页对象中的所有内容,为一个list
login_user = get_userid_from_session()
articles_pagination = get_special_article(special_id, page_id, sort, 5)
related_other_special = get_related_special(special.special_id)
is_mobile = is_small_mobile_device(request)
if is_mobile:
return render_template('mobile_special_detail.html',
login_user_id = login_user,
is_mobile = is_mobile,
root_authorized = root_authorized(),
#author_itself = (special.user_id == login_user),
has_collected_special = get_special_collect_info(login_user, special_id),
has_collected_author = has_collected,
sort_change_url = sort_change_url,
special_id = special_id,
sort = sort,
special_favor = special.favor,
special_title = special.name,
special_author = author,
#special_author_slogon = author.slogon,
special_introduction = special.introduction,
special_style = special.style,
special_total_issue = special.total_issue,
special_update_frequency = special.update_frequency,
special_coin = special.coin,
special_image = special.picture,
#special_author_avatar = author.photo,
articles_pagination = articles_pagination,
get_nick_by_userid = get_nick_by_userid)
else:
return render_template('special_detail.html',
len = len,
author = get_special_author,
login_user_id = login_user,
is_mobile = is_mobile,
root_authorized = root_authorized(),
#author_itself = (special.user_id == login_user),
has_collected_special = get_special_collect_info(login_user, special_id),
has_collected_author = has_collected,
sort_change_url = sort_change_url,
special_id = special_id,
sort = sort,
other = get_special_author_other,
special_favor = special.favor,
special_title = special.name,
special_author = author,
#special_author_slogon = author.slogon,
special_introduction = special.introduction,
special_style = special.style,
special_total_issue = special.total_issue,
special_update_frequency = special.update_frequency,
special_coin = special.coin,
special_image = special.picture,
#special_author_avatar = author.photo,
articles_pagination = articles_pagination,
related_other_special = related_other_special,
get_nick_by_userid = get_nick_by_userid)
## 创建专栏界面
@app.route('/create_special')
@login_required
def create_special():
if (not root_authorized()):
abort(404)
return render_template('create_special.html')
## 修改专栏界面
@app.route('/modify_special')
@login_required
def modify_special():
if (not root_authorized()):
abort(404)
return render_template('modify_special.html')
## 上传专栏题图文件
@app.route('/upload_special_title_image', methods=['GET', 'POST'])
def save_special_title_image():
title_image = request.files['upload_file']
#设置默认题图
title_image_name = 'special_upload_pic.jpg'
if title_image:
if allowed_file(title_image.filename):
title_image_name=get_secure_photoname(title_image.filename)
title_image_url=os.path.join(app.config['SPECIAL_DEST'], title_image_name)
title_image.save(title_image_url)
return app.config['HOST_NAME']+'/upload/special/'+title_image_name
# 调用美图秀秀
@app.route('/upload/tailor/special_title_image')
def upload_special_title_image():
return render_template('upload_special_title_image_tailor.html')
## 完成创建专栏的上传
@app.route('/create_special_finish', methods=['GET'])
@login_required
def create_special_finish():
if (not root_authorized()):
abort(404)
try:
title = request.args.get('title')
content = request.args.get('content')
title_image = request.args.get('title_image')
style = request.args.get('style')
total_issue = request.args.get('total_issue')
update_frequency = request.args.get('update_frequency')
except Exception:
return "failed"
authors = []
try:
author_list = eval(request.args.get('author_list'))
for nick in author_list:
author = get_userid_by_nick(nick)
if (len(author) == 0):
return "nick_error"
authors.append(author[0][0])
except Exception:
return "failed"
special_id = create_new_special(name = title,
#user_id = author[0][0],
picture = title_image,
introduction = content,
style = style,
total_issue = total_issue,
update_frequency = update_frequency)
for author in authors:
create_new_special_author(special_id, author)
return str(special_id)
## 完成修改专栏
@app.route('/modify_special_finish', methods=['GET'])
@login_required
def modify_special_finish():
if (not root_authorized()):
abort(404)
try:
title = request.args.get('title')
content = request.args.get('content')
title_image = request.args.get('title_image')
style = request.args.get('style')
total_issue = request.args.get('total_issue')
update_frequency = request.args.get('update_frequency')
except Exception:
return "failed"
authors = []
try:
author_list = eval(request.args.get('author_list'))
for nick in author_list:
author = get_userid_by_nick(nick)
if (len(author) == 0):
return "nick_error"
authors.append(author[0][0])
except Exception:
return "failed"
try:
special_id = modify_special_func(name = title,
#user_id = author[0][0],
authors = authors,
picture = title_image,
introduction = content,
style = style,
total_issue = total_issue,
update_frequency = update_frequency)
return str(special_id)
except Exception:
return "failed"
## 编辑专栏文章
@app.route('/special_article_upload', methods=['GET'])
@login_required
def special_article_upload():
try:
special_id = int(request.args.get('id'))
except Exception:
abort(404)
####TODO
#author = get_special_information(special_id).user_id
#login_user = get_userid_from_session()
if (not root_authorized()):
abort(404)
article_session_id = get_article_session_id()
session['special_article_session_id'] = str(article_session_id)
session['special_id'] = str(special_id)
os.makedirs(os.path.join(app.config['ARTICLE_CONTENT_DEST'], str(article_session_id)))
return render_template('special_article_upload.html')
# 修改专栏文章
@app.route('/special_article_modify/article/<int:article_id>')
@login_required
def special_article_modify(article_id):
article = get_article_information(article_id)
try:
special_id = int(article[0].special_id)
except Exception:
abort(404)
if (not root_authorized()):
abort(404)
session['special_id'] = str(article[0].special_id)
session['special_article_session_id'] = str(article[0].article_session_id)
return render_template('special_article_modify.html',
article=article[0], book=article[2],
get_author = get_nick_by_userid)
# 删除专栏文章
@app.route('/special_article_remove', methods=['GET'])
def special_article_remove():
try:
article_id = request.args.get('id')
except Exception:
return "failed"
user_id = get_userid_from_session()
if delete_article_by_article_id(article_id, user_id) == 'fail':
return "failed"
return "success"
## 上传专栏文章
##TODO:可能是存在数据库中的草稿提交过来的,这时候只需要把is_draft字段更改就行
@app.route('/special_article_finish', methods=['POST'])
def special_article_finish():
content = request.form['content']
title = request.form['title']
##TODO 文章标题的安全性过滤
title_image=request.form['title_image']
abstract_abstract_with_img=request.form['abstract']
book_picture=request.form['book_picture']
book_author=request.form['book_author']
book_press=request.form['book_press']
book_page_num=request.form['book_page_num']
book_price=request.form['book_price']
book_press_time=request.form['book_press_time']
book_title=request.form['book_title']
book_ISBN=request.form['book_ISBN']
book_binding=request.form['book_binding']
special_author = request.form['special_author']
try:
user_id = get_userid_by_nick(special_author)[0][0]
if not has_special_author(int(session['special_id']), user_id):
raise Exception
except Exception:
return "nick"
abstract_plain_text=get_abstract_plain_text(abstract_abstract_with_img)
if len(abstract_plain_text)<191:
abstract=abstract_plain_text[0:len(abstract_plain_text)-1]+'......'
else:
abstract=abstract_plain_text[0:190]+'......'
book_id = create_book(book_picture = book_picture,
book_author = book_author,
book_press = book_press,
book_page_num = book_page_num,
book_price = book_price,
book_press_time = book_press_time,
book_title = book_title,
book_ISBN = book_ISBN,
book_binding = book_binding)
article_id=create_article(title = title, content = content,
title_image = title_image, user_id = user_id,
article_session_id = session['special_article_session_id'],
is_draft ='0', special_id = int(session['special_id']),
group_id = '3', category_id = '0',
abstract = abstract,
book_id = book_id)
update_article_num_for_special(int(session['special_id']),True)
session.pop('special_id', None)
session.pop('special_article_session_id', None)
return str(article_id)
# 上传专栏草稿
@app.route('/special_article_draft',methods=['POST'])
def special_article_draft():
content=request.form['content']
##TODO 文章标题的安全性过滤
title=request.form['title']
title_image=request.form['title_image']
abstract_abstract_with_img=request.form['abstract']
book_picture=request.form['book_picture']
book_author=request.form['book_author']
book_press=request.form['book_press']
book_page_num=request.form['book_page_num']
book_price=request.form['book_price']
book_press_time=request.form['book_press_time']
book_title=request.form['book_title']
book_ISBN=request.form['book_ISBN']
book_binding=request.form['book_binding']
special_author = request.form['special_author']
try:
user_id = get_userid_by_nick(special_author)[0][0]
if not has_special_author(int(session['special_id']), user_id):
raise Exception
except Exception:
return "nick"
abstract_plain_text=get_abstract_plain_text(abstract_abstract_with_img)
if len(abstract_plain_text)<191:
abstract=abstract_plain_text[0:len(abstract_plain_text)-1]+'......'
else:
abstract=abstract_plain_text[0:190]+'......'
#create_article(title=title,content=content,title_image=title_image,user_id=user_id,article_session_id=session['article_session_id'],is_draft='1',group_id=group_id,category_id=category_id,abstract=abstract)
book_id=create_book(book_picture=book_picture,book_author=book_author,book_press=book_press,book_page_num=book_page_num,book_price=book_price,book_press_time=book_press_time,book_title=book_title,book_ISBN=book_ISBN,book_binding=book_binding)
article_id=create_article(title = title, content = content,
title_image = title_image, user_id = user_id,
article_session_id = session['special_article_session_id'],
is_draft ='1', special_id = int(session['special_id']),
group_id = '3', category_id = '0',
abstract = abstract,
book_id = book_id)
return str(article_id)
################################## 专栏详情页面 ##################################
@app.route('/upload/special/<filename>')
def uploaded_special_image(filename):
return send_from_directory(app.config['SPECIAL_DEST'],filename)
|
GaryKriebel/osf.io | refs/heads/develop | website/profile/utils.py | 12 | # -*- coding: utf-8 -*-
import framework
from website.util.permissions import reduce_permissions
from website.filters import gravatar
from website import settings
from modularodm import Q
def get_projects(user):
'''Return a list of user's projects, excluding registrations and folders.'''
return list(user.node__contributed.find(
(
Q('category', 'eq', 'project') &
Q('is_registration', 'eq', False) &
Q('is_deleted', 'eq', False) &
Q('is_folder', 'eq', False)
)
))
def get_public_projects(user):
'''Return a list of a user's public projects.'''
return [p for p in get_projects(user) if p.is_public]
def get_gravatar(user, size=None):
if size is None:
size = settings.GRAVATAR_SIZE_PROFILE
return gravatar(
user, use_ssl=True,
size=size
)
def serialize_user(user, node=None, admin=False, full=False):
"""Return a dictionary representation of a registered user.
:param User user: A User object
:param bool full: Include complete user properties
"""
fullname = user.display_full_name(node=node)
ret = {
'id': str(user._primary_key),
'registered': user.is_registered,
'surname': user.family_name,
'fullname': fullname,
'shortname': fullname if len(fullname) < 50 else fullname[:23] + "..." + fullname[-23:],
'gravatar_url': gravatar(
user, use_ssl=True,
size=settings.GRAVATAR_SIZE_ADD_CONTRIBUTOR
),
'active': user.is_active,
}
if node is not None:
if admin:
flags = {
'visible': False,
'permission': 'read',
}
else:
flags = {
'visible': user._id in node.visible_contributor_ids,
'permission': reduce_permissions(node.get_permissions(user)),
}
ret.update(flags)
if user.is_registered:
ret.update({
'url': user.url,
'absolute_url': user.absolute_url,
'display_absolute_url': user.display_absolute_url,
'date_registered': user.date_registered.strftime("%Y-%m-%d"),
})
if full:
# Add emails
ret['emails'] = [
{
'address': each,
'primary': each == user.username,
'confirmed': True,
} for each in user.emails
] + [
{
'address': each,
'primary': each == user.username,
'confirmed': False
}
for each in user.unconfirmed_emails
]
if user.is_merged:
merger = user.merged_by
merged_by = {
'id': str(merger._primary_key),
'url': merger.url,
'absolute_url': merger.absolute_url
}
else:
merged_by = None
ret.update({
'number_projects': len(get_projects(user)),
'number_public_projects': len(get_public_projects(user)),
'activity_points': user.get_activity_points(),
'gravatar_url': gravatar(
user, use_ssl=True,
size=settings.GRAVATAR_SIZE_PROFILE
),
'is_merged': user.is_merged,
'merged_by': merged_by,
})
return ret
def serialize_contributors(contribs, node, **kwargs):
return [
serialize_user(contrib, node, **kwargs)
for contrib in contribs
]
def add_contributor_json(user, current_user=None):
# get shared projects
if current_user:
n_projects_in_common = current_user.n_projects_in_common(user)
else:
n_projects_in_common = 0
current_employment = None
education = None
if user.jobs:
current_employment = user.jobs[0]['institution']
if user.schools:
education = user.schools[0]['institution']
return {
'fullname': user.fullname,
'email': user.username,
'id': user._primary_key,
'employment': current_employment,
'education': education,
'n_projects_in_common': n_projects_in_common,
'registered': user.is_registered,
'active': user.is_active,
'gravatar_url': gravatar(
user, use_ssl=True,
size=settings.GRAVATAR_SIZE_ADD_CONTRIBUTOR
),
'profile_url': user.profile_url
}
def serialize_unregistered(fullname, email):
"""Serializes an unregistered user.
"""
user = framework.auth.get_user(email=email)
if user is None:
serialized = {
'fullname': fullname,
'id': None,
'registered': False,
'active': False,
'gravatar': gravatar(email, use_ssl=True,
size=settings.GRAVATAR_SIZE_ADD_CONTRIBUTOR),
'email': email,
}
else:
serialized = add_contributor_json(user)
serialized['fullname'] = fullname
serialized['email'] = email
return serialized
|
yencarnacion/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.3/tests/modeltests/m2m_intermediary/tests.py | 92 | from datetime import datetime
from django.test import TestCase
from models import Reporter, Article, Writer
class M2MIntermediaryTests(TestCase):
def test_intermeiary(self):
r1 = Reporter.objects.create(first_name="John", last_name="Smith")
r2 = Reporter.objects.create(first_name="Jane", last_name="Doe")
a = Article.objects.create(
headline="This is a test", pub_date=datetime(2005, 7, 27)
)
w1 = Writer.objects.create(reporter=r1, article=a, position="Main writer")
w2 = Writer.objects.create(reporter=r2, article=a, position="Contributor")
self.assertQuerysetEqual(
a.writer_set.select_related().order_by("-position"), [
("John Smith", "Main writer"),
("Jane Doe", "Contributor"),
],
lambda w: (unicode(w.reporter), w.position)
)
self.assertEqual(w1.reporter, r1)
self.assertEqual(w2.reporter, r2)
self.assertEqual(w1.article, a)
self.assertEqual(w2.article, a)
self.assertQuerysetEqual(
r1.writer_set.all(), [
("John Smith", "Main writer")
],
lambda w: (unicode(w.reporter), w.position)
)
|
awemulya/fieldsight-kobocat | refs/heads/master | onadata/apps/viewer/south_migrations/0010_auto__add_field_export_filepath.py | 13 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Export.filepath'
db.add_column('odk_viewer_export', 'filepath', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Export.filepath'
db.delete_column('odk_viewer_export', 'filepath')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'is_crowd_form': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_viewer.columnrename': {
'Meta': {'object_name': 'ColumnRename'},
'column_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'xpath': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'odk_viewer.export': {
'Meta': {'object_name': 'Export'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'export_type': ('django.db.models.fields.CharField', [], {'default': "'xls'", 'max_length': '10'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'filepath': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.XForm']"})
},
'odk_viewer.instancemodification': {
'Meta': {'object_name': 'InstanceModification'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'modifications'", 'to': "orm['logger.Instance']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'xpath': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'odk_viewer.parsedinstance': {
'Meta': {'object_name': 'ParsedInstance'},
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'parsed_instance'", 'unique': 'True', 'to': "orm['logger.Instance']"}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['viewer']
|
dsajkl/123 | refs/heads/master | cms/djangoapps/contentstore/views/xblock.py | 64 | """
Views dedicated to rendering xblocks.
"""
from __future__ import absolute_import
import logging
import mimetypes
from xblock.core import XBlock
from django.conf import settings
from django.http import Http404, HttpResponse
log = logging.getLogger(__name__)
def xblock_resource(request, block_type, uri): # pylint: disable=unused-argument
"""
Return a package resource for the specified XBlock.
"""
try:
xblock_class = XBlock.load_class(block_type, settings.XBLOCK_SELECT_FUNCTION)
content = xblock_class.open_local_resource(uri)
except IOError:
log.info('Failed to load xblock resource', exc_info=True)
raise Http404
except Exception: # pylint: disable-msg=broad-except
log.error('Failed to load xblock resource', exc_info=True)
raise Http404
mimetype, _ = mimetypes.guess_type(uri)
return HttpResponse(content, mimetype=mimetype)
|
jugonzalez/topology_lib_scapy | refs/heads/master | lib/topology_lib_scapy/library.py | 1 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
topology_lib_scapy communication library implementation.
"""
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
from __future__ import with_statement
class Shell:
"""
This class defines a context manager object.
Usage:
::
with Shell() as ctx:
ctx.cmd(command)
This way a scapy shell will be opened at the beggining and closed at the end.
"""
def __init__(self, enode):
self.enode = enode
self.scapy_prompt = '>>> '
def __enter__(self):
"""
Prepare context opening a scapy shell
"""
self.enode.get_shell('bash').send_command('scapy', matches=self.scapy_prompt)
self.enode.get_shell('bash').send_command('import sys', matches=self.scapy_prompt)
self.enode.get_shell('bash').send_command('sys.path.append(".")', matches=self.scapy_prompt)
self.enode.get_shell('bash').send_command('sys.path.append("/tmp")', matches=self.scapy_prompt)
return self
def __exit__(self, type, value, traceback):
"""
Close scapy shell
"""
self.enode.get_shell('bash').send_command('exit()')
def cmd(self, command):
"""
Send instructions to remote scapy command line
:param command: instruction to execute remotely
:type command: string"
"""
self.enode.get_shell('bash').send_command(command, matches=self.scapy_prompt)
response = self.enode.get_shell('bash').get_response()
return response
__all__ = [
'CLI'
]
|
megaumi/django | refs/heads/master | tests/contenttypes_tests/tests.py | 81 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.apps.registry import Apps, apps
from django.contrib.contenttypes import management
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import checks
from django.db import connections, models
from django.test import TestCase, override_settings
from django.test.utils import captured_stdout
from django.utils.encoding import force_str, force_text
from .models import Article, Author, SchemeIncludedURL
@override_settings(ROOT_URLCONF='contenttypes_tests.urls')
class ContentTypesViewsTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='testserver', name='testserver')
cls.site1.save()
cls.author1 = Author.objects.create(name='Boris')
cls.article1 = Article.objects.create(
title='Old Article', slug='old_article', author=cls.author1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23)
)
cls.article2 = Article.objects.create(
title='Current Article', slug='current_article', author=cls.author1,
date_created=datetime.datetime(2007, 9, 17, 21, 22, 23)
)
cls.article3 = Article.objects.create(
title='Future Article', slug='future_article', author=cls.author1,
date_created=datetime.datetime(3000, 1, 1, 21, 22, 23)
)
cls.scheme1 = SchemeIncludedURL.objects.create(url='http://test_scheme_included_http/')
cls.scheme2 = SchemeIncludedURL.objects.create(url='https://test_scheme_included_https/')
cls.scheme3 = SchemeIncludedURL.objects.create(url='//test_default_scheme_kept/')
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(),
status_code=302, target_status_code=404)
def test_shortcut_with_absolute_url_including_scheme(self):
"""
Can view a shortcut when object's get_absolute_url returns a full URL
the tested URLs are: "http://...", "https://..." and "//..."
"""
for obj in SchemeIncludedURL.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(SchemeIncludedURL).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, obj.get_absolute_url(),
status_code=302,
fetch_redirect_response=False)
def test_shortcut_no_absolute_url(self):
"Shortcuts for an object that has no get_absolute_url method raises 404"
for obj in Article.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_create_contenttype_on_the_spot(self):
"""
Make sure ContentTypeManager.get_for_model creates the corresponding
content type if it doesn't exist in the database (for some reason).
"""
class ModelCreatedOnTheFly(models.Model):
name = models.CharField()
class Meta:
verbose_name = 'a model created on the fly'
app_label = 'my_great_app'
apps = Apps()
ct = ContentType.objects.get_for_model(ModelCreatedOnTheFly)
self.assertEqual(ct.app_label, 'my_great_app')
self.assertEqual(ct.model, 'modelcreatedonthefly')
self.assertEqual(force_text(ct), 'modelcreatedonthefly')
class IsolatedModelsTestCase(TestCase):
def setUp(self):
# The unmanaged models need to be removed after the test in order to
# prevent bad interactions with the flush operation in other tests.
self._old_models = apps.app_configs['contenttypes_tests'].models.copy()
def tearDown(self):
apps.app_configs['contenttypes_tests'].models = self._old_models
apps.all_models['contenttypes_tests'] = self._old_models
apps.clear_cache()
@override_settings(SILENCED_SYSTEM_CHECKS=['fields.W342']) # ForeignKey(unique=True)
class GenericForeignKeyTests(IsolatedModelsTestCase):
def test_str(self):
class Model(models.Model):
field = GenericForeignKey()
expected = "contenttypes_tests.Model.field"
actual = force_str(Model.field)
self.assertEqual(expected, actual)
def test_missing_content_type_field(self):
class TaggedItem(models.Model):
# no content_type field
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
errors = TaggedItem.content_object.check()
expected = [
checks.Error(
"The GenericForeignKey content type references the non-existent field 'TaggedItem.content_type'.",
hint=None,
obj=TaggedItem.content_object,
id='contenttypes.E002',
)
]
self.assertEqual(errors, expected)
def test_invalid_content_type_field(self):
class Model(models.Model):
content_type = models.IntegerField() # should be ForeignKey
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object.check()
expected = [
checks.Error(
"'Model.content_type' is not a ForeignKey.",
hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.",
obj=Model.content_object,
id='contenttypes.E003',
)
]
self.assertEqual(errors, expected)
def test_content_type_field_pointing_to_wrong_model(self):
class Model(models.Model):
content_type = models.ForeignKey('self', models.CASCADE) # should point to ContentType
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object.check()
expected = [
checks.Error(
"'Model.content_type' is not a ForeignKey to 'contenttypes.ContentType'.",
hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.",
obj=Model.content_object,
id='contenttypes.E004',
)
]
self.assertEqual(errors, expected)
def test_missing_object_id_field(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
# missing object_id field
content_object = GenericForeignKey()
errors = TaggedItem.content_object.check()
expected = [
checks.Error(
"The GenericForeignKey object ID references the non-existent field 'object_id'.",
hint=None,
obj=TaggedItem.content_object,
id='contenttypes.E001',
)
]
self.assertEqual(errors, expected)
def test_field_name_ending_with_underscore(self):
class Model(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object_ = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object_.check()
expected = [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=Model.content_object_,
id='fields.E001',
)
]
self.assertEqual(errors, expected)
@override_settings(INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'contenttypes_tests'])
def test_generic_foreign_key_checks_are_performed(self):
class MyGenericForeignKey(GenericForeignKey):
def check(self, **kwargs):
return ['performed!']
class Model(models.Model):
content_object = MyGenericForeignKey()
errors = checks.run_checks()
self.assertEqual(errors, ['performed!'])
class GenericRelationshipTests(IsolatedModelsTestCase):
def test_valid_generic_relationship(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem')
errors = Bookmark.tags.field.check()
self.assertEqual(errors, [])
def test_valid_generic_relationship_with_explicit_fields(self):
class TaggedItem(models.Model):
custom_content_type = models.ForeignKey(ContentType, models.CASCADE)
custom_object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'custom_content_type', 'custom_object_id')
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem',
content_type_field='custom_content_type',
object_id_field='custom_object_id')
errors = Bookmark.tags.field.check()
self.assertEqual(errors, [])
def test_pointing_to_missing_model(self):
class Model(models.Model):
rel = GenericRelation('MissingModel')
errors = Model.rel.field.check()
expected = [
checks.Error(
("Field defines a relation with model 'MissingModel', "
"which is either not installed, or is abstract."),
hint=None,
obj=Model.rel.field,
id='fields.E300',
)
]
self.assertEqual(errors, expected)
def test_valid_self_referential_generic_relationship(self):
class Model(models.Model):
rel = GenericRelation('Model')
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.rel.field.check()
self.assertEqual(errors, [])
def test_missing_generic_foreign_key(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem')
errors = Bookmark.tags.field.check()
expected = [
checks.Error(
("The GenericRelation defines a relation with the model "
"'contenttypes_tests.TaggedItem', but that model does not have a "
"GenericForeignKey."),
hint=None,
obj=Bookmark.tags.field,
id='contenttypes.E004',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPED_MODEL='contenttypes_tests.Replacement')
def test_pointing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
rel = GenericRelation('SwappedModel')
errors = Model.rel.field.check()
expected = [
checks.Error(
("Field defines a relation with the model "
"'contenttypes_tests.SwappedModel', "
"which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
obj=Model.rel.field,
id='fields.E301',
)
]
self.assertEqual(errors, expected)
def test_field_name_ending_with_underscore(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class InvalidBookmark(models.Model):
tags_ = GenericRelation('TaggedItem')
errors = InvalidBookmark.tags_.field.check()
expected = [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=InvalidBookmark.tags_.field,
id='fields.E001',
)
]
self.assertEqual(errors, expected)
class UpdateContentTypesTests(TestCase):
def setUp(self):
self.before_count = ContentType.objects.count()
ContentType.objects.create(app_label='contenttypes_tests', model='Fake')
self.app_config = apps.get_app_config('contenttypes_tests')
def test_interactive_true(self):
"""
interactive mode of update_contenttypes() (the default) should delete
stale contenttypes.
"""
management.input = lambda x: force_str("yes")
with captured_stdout() as stdout:
management.update_contenttypes(self.app_config)
self.assertIn("Deleting stale content type", stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_interactive_false(self):
"""
non-interactive mode of update_contenttypes() shouldn't delete stale
content types.
"""
with captured_stdout() as stdout:
management.update_contenttypes(self.app_config, interactive=False)
self.assertIn("Stale content types remain.", stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count + 1)
class TestRouter(object):
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return 'default'
@override_settings(DATABASE_ROUTERS=[TestRouter()])
class ContentTypesMultidbTestCase(TestCase):
def setUp(self):
# Whenever a test starts executing, only the "default" database is
# connected. We explicitly connect to the "other" database here. If we
# don't do it, then it will be implicitly connected later when we query
# it, but in that case some database backends may automatically perform
# extra queries upon connecting (notably mysql executes
# "SET SQL_AUTO_IS_NULL = 0"), which will affect assertNumQueries().
connections['other'].ensure_connection()
def test_multidb(self):
"""
Test that, when using multiple databases, we use the db_for_read (see
#20401).
"""
ContentType.objects.clear_cache()
with self.assertNumQueries(0, using='default'), \
self.assertNumQueries(1, using='other'):
ContentType.objects.get_for_model(Author)
|
biln/airflow | refs/heads/master | airflow/contrib/hooks/fs_hook.py | 63 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from airflow.hooks.base_hook import BaseHook
class FSHook(BaseHook):
'''
Allows for interaction with an file server.
Connection should have a name and a path specified under extra:
example:
Conn Id: fs_test
Conn Type: File (path)
Host, Shchema, Login, Password, Port: empty
Extra: {"path": "/tmp"}
'''
def __init__(self, conn_id='fs_default'):
conn = self.get_connection(conn_id)
self.basepath = conn.extra_dejson.get('path', '')
self.conn = conn
def get_conn(self):
pass
def get_path(self):
return self.basepath
|
dunn/gammu | refs/heads/master | tests/gen_sms_tests.py | 1 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# vim: expandtab sw=4 ts=4 sts=4:
'''
Gammu SMS backup generator.
'''
__author__ = 'Michal Čihař'
__email__ = 'michal@cihar.com'
__license__ = '''
Copyright © 2003 - 2015 Michal Čihař
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License version 2 as published by
the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import os
# Work in both common location when this can be executed:
try:
os.chdir('tests/at-sms-encode/')
except OSError:
os.chdir('at-sms-encode/')
# Numbers we're going to test
NUMBERS = [
'1234',
'800123456',
'+420800123456',
'+41761234567',
]
# Text parts we're going to test
TEXTS = [
'123456',
'Zkouška sirén',
'This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation.',
]
TEMPLATE = '''
[SMSBackup000]
SMSC = "%s"
State = %s
Number = "%s"
Coding = %s
Folder = %d
'''
STATES = [
'Read',
'Read',
'Sent',
]
CODINGS = [
'Default',
'Unicode',
]
def write_text(f, text):
'''
Writes text splitted and encoded in same way as Gammu does it for SMS backups.
'''
encoded = text.encode('UTF-16-BE').encode('HEX')
line = 0
while len(encoded) > 0:
f.write('Text%02d = %s\n' % (line, encoded[:200]))
encoded = encoded[200:]
line = line + 1
def generate_message(index, folder, coding, smscnum, num, text):
'''
Generates single message file.
'''
f = file('%02d.backup' % index, 'w')
f.write(TEMPLATE % (
NUMBERS[smscnum],
STATES[folder],
NUMBERS[num],
CODINGS[coding],
folder
))
if folder > 1:
f.write('Sent = 20070605T135630\n')
write_text(f, TEXTS[text])
f.close()
def generate():
'''
Generates test data based on NUMBERS and TEXTS variables.
'''
index = 1
for smscnum in range(len(NUMBERS)):
for num in range(len(NUMBERS)):
for text in range(len(TEXTS)):
for coding in range(len(CODINGS)):
for folder in [1, 2]:
generate_message(index,
folder,
coding,
smscnum,
num,
text)
index = index + 1
if __name__ == '__main__':
generate()
|
jcftang/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/rds.py | 4 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take. The 'reboot' option is available starting at version 2.0
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
- mariadb was added in version 2.2
required: false
default: null
choices: [ 'mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Used only when command=create or command=replicate.
- Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql.
Starting at 2.0 it automatically defaults to what is expected for each c(db_engine).
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
version_added: "1.5"
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
version_added: "1.9"
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
# Reboot an instance and wait for it to become available again
- rds
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
- local_action:
module: rds
command: restore
snapshot: mypostgres-snapshot
instance_name: MyNewInstanceName
region: us-west-2
zone: us-west-2b
subnet: default-vpc-xx441xxx
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
- debug:
msg: "The new db endpoint is {{ rds.instance.endpoint }}"
'''
import sys
import time
try:
import boto.rds
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto.rds2
has_rds2 = True
except ImportError:
has_rds2 = False
DEFAULT_PORTS= {
'aurora': 3306,
'mariadb': 3306,
'mysql': 3306,
'oracle': 1521,
'sqlserver': 1433,
'postgres': 5432,
}
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError as e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound as e:
return None
except Exception as e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class,
db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.instance.create_time,
'status' : self.status,
'availability_zone' : self.instance.availability_zone,
'backup_retention' : self.instance.backup_retention_period,
'backup_window' : self.instance.preferred_backup_window,
'maintenance_window' : self.instance.preferred_maintenance_window,
'multi_zone' : self.instance.multi_az,
'instance_type' : self.instance.instance_class,
'username' : self.instance.master_username,
'iops' : self.instance.iops
}
# Only assign an Endpoint if one is available
if hasattr(self.instance, 'endpoint'):
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception as e:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if "Endpoint" in self.instance and self.instance["Endpoint"] is not None:
d['endpoint'] = self.instance["Endpoint"].get('Address', None)
d['port'] = self.instance["Endpoint"].get('Port', None)
else:
d['endpoint'] = None
d['port'] = None
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot.snapshot_create_time,
'status' : self.status,
'availability_zone' : self.snapshot.availability_zone,
'instance_id' : self.snapshot.instance_id,
'instance_created' : self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot['SnapshotCreateTime'],
'status' : self.status,
'availability_zone' : self.snapshot['AvailabilityZone'],
'instance_id' : self.snapshot['DBInstanceIdentifier'],
'instance_created' : self.snapshot['InstanceCreateTime'],
'snapshot_type' : self.snapshot['SnapshotType'],
'iops' : self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
wait_timeout = module.params.get('wait_timeout') + time.time()
while wait_timeout > time.time() and resource.status != status:
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
resource = conn.get_db_snapshot(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
resource = conn.get_db_instance(resource.name)
if resource is None:
break
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group','port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if has_rds2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException as e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException as e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception as e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException as e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if result.get_data().get('replication_source'):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
else:
changed = False
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if has_rds2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if has_rds2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'size': 'allocated_storage',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
}
if has_rds2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if has_rds2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name = dict(required=False),
source_instance = dict(required=False),
db_engine = dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'], required=False),
size = dict(required=False),
instance_type = dict(aliases=['type'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(required=False),
engine_version = dict(required=False),
parameter_group = dict(required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone = dict(type='bool', default=False),
iops = dict(required=False),
security_groups = dict(required=False),
vpc_security_groups = dict(type='list', required=False),
port = dict(required=False),
upgrade = dict(type='bool', default=False),
option_group = dict(required=False),
maint_window = dict(required=False),
backup_window = dict(required=False),
backup_retention = dict(required=False),
zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet = dict(required=False),
wait = dict(type='bool', default=False),
wait_timeout = dict(type='int', default=300),
snapshot = dict(required=False),
apply_immediately = dict(type='bool', default=False),
new_instance_name = dict(required=False),
tags = dict(type='dict', required=False),
publicly_accessible = dict(required=False),
character_set_name = dict(required=False),
force_failover = dict(type='bool', required=False, default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.")
# set port to per db defaults if not specified
if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
if '-' in module.params['db_engine']:
engine = module.params['db_engine'].split('-')[0]
else:
engine = module.params['db_engine']
module.params['port'] = DEFAULT_PORTS[engine.lower()]
# connect to the rds endpoint
if has_rds2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
xstaticxgpx/pylw | refs/heads/master | pylw/response.py | 2 | #!/usr/bin/env python
# Copyright 2015 Michael Gugino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''pylw.response. Contains Response object definition. Holds body, header,
and parses/signs cookies.'''
import Cookie
class Response(object):
'''Response object for sending back to WSGI server. It holds the headers
and body'''
def __init__(self, http_cookies=None, signer=None):
'''Initialize our object.'''
self.__header_dict = {}
self.body = None
self.status = None
self.s = signer
self.__cookies = self.parse_http_cookies(http_cookies)
def parse_http_cookies(self, req_cookies):
'''Parse http cookies into a SimpleCookie object'''
C = Cookie.SimpleCookie()
if req_cookies:
C.load(req_cookies)
return C
def HTTP404Error(self):
self.status = '404 Not Found'
self.body = 'The requested URL was not found.'
def get_headers(self):
'''Returns headers in a list of tuples that is usable by WSGI'''
if not self.body:
self.body = ''
self.__header_dict['Content-Length'] = str(len(self.body))
cookies = []
if self.__cookies:
cookies += [("set-cookie", c.OutputString())
for c in self.__cookies.values()]
return self.__header_dict.items() + cookies
def get_cookies(self):
return self.__cookies.output()
def get_cookie(self,cookie):
'''Return the value a cookie.'''
try:
return self.__cookies[cookie].value
except:
return None
def get_signed_cookie(self,cookie):
'''Return the value of a signed cookie.'''
try:
return self.s.loads(self.__cookies[cookie].value)
except:
return None
def add_signed_cookie(self,k,v):
'''Add a signed cookie'''
v = self.s.dumps(v)
self.__cookies[k] = v
def add_cookie(self,k,v):
'''Add an unsigned cookie'''
self.__cookies[k] = v
def add_header(self,k,v):
'''Add a response header'''
self.__header_dict[k] = v
|
IBM-Security/ibmsecurity | refs/heads/master | ibmsecurity/isam/base/network/felb/services/servers.py | 1 | import ibmsecurity.utilities.tools
import logging
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
module_uri = "/isam/felb/configuration/services"
requires_modules = None
requires_version = None
requires_model = "Appliance"
def add(isamAppliance, service_name, address, active, port, weight, secure, ssllabel, check_mode=False, force=False):
"""
Creating a server
"""
check_exist, warnings = _check_exist(isamAppliance, service_name, address, port)
if force is True or check_exist is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post("Creating a server",
"{0}/{1}/servers".format(module_uri, service_name),
{
"active": active,
"address": address,
"port": port,
"weight": weight,
"secure": secure,
"ssllabel": ssllabel
},
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def delete(isamAppliance, service_name, address, port, check_mode=False, force=False):
"""
deletes a server from specified service name
"""
check_exist, warnings = _check_exist(isamAppliance, service_name, address, port)
if force is True or check_exist is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
id = address + ":" + str(port)
return isamAppliance.invoke_delete("Deleting a server",
"{0}/{1}/servers/{2}".format(module_uri, service_name, id),
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def get(isamAppliance, service_name, address, port, check_mode=False, force=False):
"""
Retrieves server from specified service name
"""
id = address + ":" + str(port)
return (isamAppliance.invoke_get("Retrieving a server", "{0}/{1}/servers/{2}".format(module_uri, service_name, id),
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model))
def get_all(isamAppliance, service_name, check_mode=False, force=False):
"""
Retrieves a list of servers under a specified service
"""
return isamAppliance.invoke_get("Retrieving servers for a service",
"{0}/{1}/servers".format(module_uri, service_name),
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model)
def update(isamAppliance, service_name, address, active, port, weight, secure=False, ssllabel=None, new_address=None, new_port=None, check_mode=False, force=False):
"""
Updating server
"""
id = address + ":" + str(port)
json_data = {'active': active, 'secure': secure, 'ssllabel': ssllabel, 'weight': weight}
if new_address is not None:
json_data['address'] = new_address
else:
json_data['address'] = address
if new_port is not None:
json_data['port'] = new_port
else:
json_data['port'] = port
change_required, warnings = _check_update(isamAppliance, service_name, address, port, json_data)
if force is True or change_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put("Updating a server",
"{0}/{1}/servers/{2}".format(module_uri, service_name, id),
json_data,
requires_modules=requires_modules,
requires_version=requires_version,
requires_model = requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def _check_update(isamAppliance, service_name, address, port, json_data):
"""
idempontency test
"""
ret_obj = get(isamAppliance, service_name, address, port)
warnings = ret_obj['warnings']
ret_data = ret_obj['data']
if 'id' in ret_data:
del ret_data['id']
else:
return False, warnings
sorted_ret_data = tools.json_sort(ret_data)
sorted_json_data = tools.json_sort(json_data)
logger.debug("Sorted Existing Data:{0}".format(sorted_ret_data))
logger.debug("Sorted Desired Data:{0}".format(sorted_json_data))
if sorted_ret_data != sorted_json_data:
return True, warnings
else:
return False, warnings
def _check_exist(isamAppliance, service_name, address, port):
"""
idempotency test for delete function
"""
id = address + ":" + str(port)
ret_obj = get_all(isamAppliance, service_name)
warnings = ret_obj['warnings']
for obj in ret_obj['data']:
if obj['id'] == id:
return True, warnings
return False, warnings
def compare(isamAppliance1, service_name1, isamAppliance2, service_name2):
"""
Compare cluster configuration between two appliances
"""
ret_obj1 = get_all(isamAppliance1, service_name1)
ret_obj2 = get_all(isamAppliance2, service_name2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
|
yjxtogo/horizon | refs/heads/master | openstack_dashboard/test/helpers.py | 19 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from functools import wraps # noqa
import json
import os
from ceilometerclient.v2 import client as ceilometer_client
from cinderclient import client as cinder_client
from django.conf import settings
from django.contrib.messages.storage import default_storage # noqa
from django.core.handlers import wsgi
from django.core import urlresolvers
from django.test.client import RequestFactory # noqa
from django.test import utils as django_test_utils
from django.utils.importlib import import_module # noqa
from django.utils import unittest
import glanceclient
from heatclient import client as heat_client
import httplib2
from keystoneclient.v2_0 import client as keystone_client
import mock
import mox
from neutronclient.v2_0 import client as neutron_client
from novaclient.v2 import client as nova_client
from openstack_auth import user
from openstack_auth import utils
from saharaclient import client as sahara_client
from swiftclient import client as swift_client
from troveclient import client as trove_client
from horizon import base
from horizon import conf
from horizon.test import helpers as horizon_helpers
from openstack_dashboard import api
from openstack_dashboard import context_processors
from openstack_dashboard.test.test_data import utils as test_utils
# Makes output of failing mox tests much easier to read.
wsgi.WSGIRequest.__repr__ = lambda self: "<class 'django.http.HttpRequest'>"
def create_stubs(stubs_to_create={}):
"""decorator to simplify setting up multiple stubs at once via mox
:param stubs_to_create: methods to stub in one or more modules
:type stubs_to_create: dict
The keys are python paths to the module containing the methods to mock.
To mock a method in openstack_dashboard/api/nova.py, the key is::
api.nova
The values are either a tuple of list of methods to mock in the module
indicated by the key.
For example::
('server_list',)
-or-
('flavor_list', 'server_list',)
-or-
['flavor_list', 'server_list']
Additionally, multiple modules can be mocked at once::
{
api.nova: ('flavor_list', 'server_list'),
api.glance: ('image_list_detailed',),
}
"""
if not isinstance(stubs_to_create, dict):
raise TypeError("create_stub must be passed a dict, but a %s was "
"given." % type(stubs_to_create).__name__)
def inner_stub_out(fn):
@wraps(fn)
def instance_stub_out(self, *args, **kwargs):
for key in stubs_to_create:
if not (isinstance(stubs_to_create[key], tuple) or
isinstance(stubs_to_create[key], list)):
raise TypeError("The values of the create_stub "
"dict must be lists or tuples, but "
"is a %s."
% type(stubs_to_create[key]).__name__)
for value in stubs_to_create[key]:
self.mox.StubOutWithMock(key, value)
return fn(self, *args, **kwargs)
return instance_stub_out
return inner_stub_out
class RequestFactoryWithMessages(RequestFactory):
def get(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).get(*args, **kwargs)
req.user = utils.get_user(req)
req.session = []
req._messages = default_storage(req)
return req
def post(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).post(*args, **kwargs)
req.user = utils.get_user(req)
req.session = []
req._messages = default_storage(req)
return req
@unittest.skipIf(os.environ.get('SKIP_UNITTESTS', False),
"The SKIP_UNITTESTS env variable is set.")
class TestCase(horizon_helpers.TestCase):
"""Specialized base test case class for Horizon.
It gives access to numerous additional features:
* A full suite of test data through various attached objects and
managers (e.g. ``self.servers``, ``self.user``, etc.). See the
docs for
:class:`~openstack_dashboard.test.test_data.utils.TestData`
for more information.
* The ``mox`` mocking framework via ``self.mox``.
* A set of request context data via ``self.context``.
* A ``RequestFactory`` class which supports Django's ``contrib.messages``
framework via ``self.factory``.
* A ready-to-go request object via ``self.request``.
* The ability to override specific time data controls for easier testing.
* Several handy additional assertion methods.
"""
def setUp(self):
def fake_conn_request(*args, **kwargs):
raise Exception("An external URI request tried to escape through "
"an httplib2 client. Args: %s, kwargs: %s"
% (args, kwargs))
self._real_conn_request = httplib2.Http._conn_request
httplib2.Http._conn_request = fake_conn_request
self._real_context_processor = context_processors.openstack
context_processors.openstack = lambda request: self.context
self.patchers = {}
self.add_panel_mocks()
super(TestCase, self).setUp()
def _setup_test_data(self):
super(TestCase, self)._setup_test_data()
test_utils.load_test_data(self)
self.context = {'authorized_tenants': self.tenants.list()}
def _setup_factory(self):
# For some magical reason we need a copy of this here.
self.factory = RequestFactoryWithMessages()
def _setup_user(self):
self._real_get_user = utils.get_user
tenants = self.context['authorized_tenants']
self.setActiveUser(id=self.user.id,
token=self.token,
username=self.user.name,
domain_id=self.domain.id,
tenant_id=self.tenant.id,
service_catalog=self.service_catalog,
authorized_tenants=tenants)
def _setup_request(self):
super(TestCase, self)._setup_request()
self.request.session['token'] = self.token.id
def add_panel_mocks(self):
"""Global mocks on panels that get called on all views."""
self.patchers['aggregates'] = mock.patch(
'openstack_dashboard.dashboards.admin'
'.aggregates.panel.Aggregates.can_access',
mock.Mock(return_value=True))
self.patchers['aggregates'].start()
def tearDown(self):
httplib2.Http._conn_request = self._real_conn_request
context_processors.openstack = self._real_context_processor
utils.get_user = self._real_get_user
mock.patch.stopall()
super(TestCase, self).tearDown()
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
service_catalog=None, tenant_name=None, roles=None,
authorized_tenants=None, enabled=True, domain_id=None):
def get_user(request):
return user.User(id=id,
token=token,
user=username,
domain_id=domain_id,
tenant_id=tenant_id,
service_catalog=service_catalog,
roles=roles,
enabled=enabled,
authorized_tenants=authorized_tenants,
endpoint=settings.OPENSTACK_KEYSTONE_URL)
utils.get_user = get_user
def assertRedirectsNoFollow(self, response, expected_url):
"""Check for redirect.
Asserts that the given response issued a 302 redirect without
processing the view which is redirected to.
"""
assert (response.status_code / 100 == 3), \
"The response did not return a redirect."
self.assertEqual(response._headers.get('location', None),
('Location', settings.TESTSERVER + expected_url))
self.assertEqual(response.status_code, 302)
def assertNoFormErrors(self, response, context_name="form"):
"""Checks for no form errors.
Asserts that the response either does not contain a form in its
context, or that if it does, that form has no errors.
"""
context = getattr(response, "context", {})
if not context or context_name not in context:
return True
errors = response.context[context_name]._errors
assert len(errors) == 0, \
"Unexpected errors were found on the form: %s" % errors
def assertFormErrors(self, response, count=0, message=None,
context_name="form"):
"""Check for form errors.
Asserts that the response does contain a form in its
context, and that form has errors, if count were given,
it must match the exact numbers of errors
"""
context = getattr(response, "context", {})
assert (context and context_name in context), \
"The response did not contain a form."
errors = response.context[context_name]._errors
if count:
assert len(errors) == count, \
"%d errors were found on the form, %d expected" % \
(len(errors), count)
if message and message not in unicode(errors):
self.fail("Expected message not found, instead found: %s"
% ["%s: %s" % (key, [e for e in field_errors]) for
(key, field_errors) in errors.items()])
else:
assert len(errors) > 0, "No errors were found on the form"
def assertStatusCode(self, response, expected_code):
"""Validates an expected status code.
Matches camel case of other assert functions
"""
if response.status_code == expected_code:
return
self.fail('status code %r != %r: %s' % (response.status_code,
expected_code,
response.content))
def assertItemsCollectionEqual(self, response, items_list):
self.assertEqual(response.content,
'{"items": ' + json.dumps(items_list) + "}")
@staticmethod
def mock_rest_request(**args):
mock_args = {
'user.is_authenticated.return_value': True,
'is_ajax.return_value': True,
'policy.check.return_value': True,
'body': ''
}
mock_args.update(args)
return mock.Mock(**mock_args)
class BaseAdminViewTests(TestCase):
"""Sets an active user with the "admin" role.
For testing admin-only views and functionality.
"""
def setActiveUser(self, *args, **kwargs):
if "roles" not in kwargs:
kwargs['roles'] = [self.roles.admin._info]
super(BaseAdminViewTests, self).setActiveUser(*args, **kwargs)
def setSessionValues(self, **kwargs):
settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file'
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
for key in kwargs:
store[key] = kwargs[key]
self.request.session[key] = kwargs[key]
store.save()
self.session = store
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
class APITestCase(TestCase):
"""Testing APIs.
For use with tests which deal with the underlying clients rather than
stubbing out the openstack_dashboard.api.* methods.
"""
def setUp(self):
super(APITestCase, self).setUp()
utils.patch_middleware_get_user()
def fake_keystoneclient(request, admin=False):
"""Returns the stub keystoneclient.
Only necessary because the function takes too many arguments to
conveniently be a lambda.
"""
return self.stub_keystoneclient()
# Store the original clients
self._original_glanceclient = api.glance.glanceclient
self._original_keystoneclient = api.keystone.keystoneclient
self._original_novaclient = api.nova.novaclient
self._original_neutronclient = api.neutron.neutronclient
self._original_cinderclient = api.cinder.cinderclient
self._original_heatclient = api.heat.heatclient
self._original_ceilometerclient = api.ceilometer.ceilometerclient
self._original_troveclient = api.trove.troveclient
self._original_saharaclient = api.sahara.client
# Replace the clients with our stubs.
api.glance.glanceclient = lambda request: self.stub_glanceclient()
api.keystone.keystoneclient = fake_keystoneclient
api.nova.novaclient = lambda request: self.stub_novaclient()
api.neutron.neutronclient = lambda request: self.stub_neutronclient()
api.cinder.cinderclient = lambda request: self.stub_cinderclient()
api.heat.heatclient = (lambda request, password=None:
self.stub_heatclient())
api.ceilometer.ceilometerclient = (lambda request:
self.stub_ceilometerclient())
api.trove.troveclient = lambda request: self.stub_troveclient()
api.sahara.client = lambda request: self.stub_saharaclient()
def tearDown(self):
super(APITestCase, self).tearDown()
api.glance.glanceclient = self._original_glanceclient
api.nova.novaclient = self._original_novaclient
api.keystone.keystoneclient = self._original_keystoneclient
api.neutron.neutronclient = self._original_neutronclient
api.cinder.cinderclient = self._original_cinderclient
api.heat.heatclient = self._original_heatclient
api.ceilometer.ceilometerclient = self._original_ceilometerclient
api.trove.troveclient = self._original_troveclient
api.sahara.client = self._original_saharaclient
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(nova_client, 'Client')
self.novaclient = self.mox.CreateMock(nova_client.Client)
return self.novaclient
def stub_cinderclient(self):
if not hasattr(self, "cinderclient"):
self.mox.StubOutWithMock(cinder_client, 'Client')
self.cinderclient = self.mox.CreateMock(cinder_client.Client)
return self.cinderclient
def stub_keystoneclient(self):
if not hasattr(self, "keystoneclient"):
self.mox.StubOutWithMock(keystone_client, 'Client')
# NOTE(saschpe): Mock properties, MockObject.__init__ ignores them:
keystone_client.Client.auth_token = 'foo'
keystone_client.Client.service_catalog = None
keystone_client.Client.tenant_id = '1'
keystone_client.Client.tenant_name = 'tenant_1'
keystone_client.Client.management_url = ""
keystone_client.Client.__dir__ = lambda: []
self.keystoneclient = self.mox.CreateMock(keystone_client.Client)
return self.keystoneclient
def stub_glanceclient(self):
if not hasattr(self, "glanceclient"):
self.mox.StubOutWithMock(glanceclient, 'Client')
self.glanceclient = self.mox.CreateMock(glanceclient.Client)
return self.glanceclient
def stub_neutronclient(self):
if not hasattr(self, "neutronclient"):
self.mox.StubOutWithMock(neutron_client, 'Client')
self.neutronclient = self.mox.CreateMock(neutron_client.Client)
return self.neutronclient
def stub_swiftclient(self, expected_calls=1):
if not hasattr(self, "swiftclient"):
self.mox.StubOutWithMock(swift_client, 'Connection')
self.swiftclient = self.mox.CreateMock(swift_client.Connection)
while expected_calls:
swift_client.Connection(None,
mox.IgnoreArg(),
None,
preauthtoken=mox.IgnoreArg(),
preauthurl=mox.IgnoreArg(),
cacert=None,
insecure=False,
auth_version="2.0") \
.AndReturn(self.swiftclient)
expected_calls -= 1
return self.swiftclient
def stub_heatclient(self):
if not hasattr(self, "heatclient"):
self.mox.StubOutWithMock(heat_client, 'Client')
self.heatclient = self.mox.CreateMock(heat_client.Client)
return self.heatclient
def stub_ceilometerclient(self):
if not hasattr(self, "ceilometerclient"):
self.mox.StubOutWithMock(ceilometer_client, 'Client')
self.ceilometerclient = self.mox.\
CreateMock(ceilometer_client.Client)
return self.ceilometerclient
def stub_troveclient(self):
if not hasattr(self, "troveclient"):
self.mox.StubOutWithMock(trove_client, 'Client')
self.troveclient = self.mox.CreateMock(trove_client.Client)
return self.troveclient
def stub_saharaclient(self):
if not hasattr(self, "saharaclient"):
self.mox.StubOutWithMock(sahara_client, 'Client')
self.saharaclient = self.mox.CreateMock(sahara_client.Client)
return self.saharaclient
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTestCase(horizon_helpers.SeleniumTestCase):
def setUp(self):
super(SeleniumTestCase, self).setUp()
test_utils.load_test_data(self)
self.mox = mox.Mox()
self._real_get_user = utils.get_user
self.setActiveUser(id=self.user.id,
token=self.token,
username=self.user.name,
tenant_id=self.tenant.id,
service_catalog=self.service_catalog,
authorized_tenants=self.tenants.list())
self.patchers = {}
self.patchers['aggregates'] = mock.patch(
'openstack_dashboard.dashboards.admin'
'.aggregates.panel.Aggregates.can_access',
mock.Mock(return_value=True))
self.patchers['aggregates'].start()
os.environ["HORIZON_TEST_RUN"] = "True"
def tearDown(self):
self.mox.UnsetStubs()
utils.get_user = self._real_get_user
mock.patch.stopall()
self.mox.VerifyAll()
del os.environ["HORIZON_TEST_RUN"]
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
service_catalog=None, tenant_name=None, roles=None,
authorized_tenants=None, enabled=True):
def get_user(request):
return user.User(id=id,
token=token,
user=username,
tenant_id=tenant_id,
service_catalog=service_catalog,
roles=roles,
enabled=enabled,
authorized_tenants=authorized_tenants,
endpoint=settings.OPENSTACK_KEYSTONE_URL)
utils.get_user = get_user
class SeleniumAdminTestCase(SeleniumTestCase):
"""Version of AdminTestCase for Selenium.
Sets an active user with the "admin" role for testing admin-only views and
functionality.
"""
def setActiveUser(self, *args, **kwargs):
if "roles" not in kwargs:
kwargs['roles'] = [self.roles.admin._info]
super(SeleniumAdminTestCase, self).setActiveUser(*args, **kwargs)
def my_custom_sort(flavor):
sort_order = {
'm1.secret': 0,
'm1.tiny': 1,
'm1.massive': 2,
'm1.metadata': 3,
}
return sort_order[flavor.name]
class PluginTestCase(TestCase):
"""Test case for testing plugin system of Horizon.
For use with tests which deal with the pluggable dashboard and panel
configuration, it takes care of backing up and restoring the Horizon
configuration.
"""
def setUp(self):
super(PluginTestCase, self).setUp()
self.old_horizon_config = conf.HORIZON_CONFIG
conf.HORIZON_CONFIG = conf.LazySettings()
base.Horizon._urls()
# Store our original dashboards
self._discovered_dashboards = base.Horizon._registry.keys()
# Gather up and store our original panels for each dashboard
self._discovered_panels = {}
for dash in self._discovered_dashboards:
panels = base.Horizon._registry[dash]._registry.keys()
self._discovered_panels[dash] = panels
def tearDown(self):
super(PluginTestCase, self).tearDown()
conf.HORIZON_CONFIG = self.old_horizon_config
# Destroy our singleton and re-create it.
base.HorizonSite._instance = None
del base.Horizon
base.Horizon = base.HorizonSite()
# Reload the convenience references to Horizon stored in __init__
reload(import_module("horizon"))
# Re-register our original dashboards and panels.
# This is necessary because autodiscovery only works on the first
# import, and calling reload introduces innumerable additional
# problems. Manual re-registration is the only good way for testing.
for dash in self._discovered_dashboards:
base.Horizon.register(dash)
for panel in self._discovered_panels[dash]:
dash.register(panel)
self._reload_urls()
def _reload_urls(self):
"""CLeans up URLs.
Clears out the URL caches, reloads the root urls module, and
re-triggers the autodiscovery mechanism for Horizon. Allows URLs
to be re-calculated after registering new dashboards. Useful
only for testing and should never be used on a live site.
"""
urlresolvers.clear_url_caches()
reload(import_module(settings.ROOT_URLCONF))
base.Horizon._urls()
class update_settings(django_test_utils.override_settings):
"""override_settings which allows override an item in dict.
django original override_settings replaces a dict completely,
however OpenStack dashboard setting has many dictionary configuration
and there are test case where we want to override only one item in
a dictionary and keep other items in the dictionary.
This version of override_settings allows this if keep_dict is True.
If keep_dict False is specified, the original behavior of
Django override_settings is used.
"""
def __init__(self, keep_dict=True, **kwargs):
if keep_dict:
for key, new_value in kwargs.items():
value = getattr(settings, key, None)
if (isinstance(new_value, collections.Mapping) and
isinstance(value, collections.Mapping)):
copied = copy.copy(value)
copied.update(new_value)
kwargs[key] = copied
super(update_settings, self).__init__(**kwargs)
|
zacko-belsch/pazookle | refs/heads/master | tests/self_test.py | 1 | #!/usr/bin/env python
# see http://docs.python.org/2/library/unittest.html
# or http://docs.python.org/2/library/test.html
import unittest
from StringIO import StringIO
from pazookle.ugen import UGen,Mixer,Pan
from pazookle.generate import Periodic
class TestUGen(unittest.TestCase):
def setUp(self):
UGen.set_debug("stifle ids")
def test_inline_connect(self):
# a >> b >> c a is input to b
# b is input to c
expected = \
"""
a
b in[a]
c in[b]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a >> b >> c
self.assertEqual(self.transcript([a,b,c]),expected)
def test_list_connect(self):
# a >> [b,c] a is input to b
# a is input to c
expected = \
"""
a
b in[a]
c in[a]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a >> [b,c]
self.assertEqual(self.transcript([a,b,c]),expected)
def test_disallowed_list_connect(self):
self.assertRaises(TypeError,self._test_disallowed_list_connect)
def _test_disallowed_list_connect(self):
# a >> [b,c] >> d not allowed
# (only allowed at right end of chain)
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
d = UGen(name="d")
a >> [b,c] >> d
def test_added_connect(self):
# b += a a is input to b
expected = \
"""
a
b in[a]
"""
a = UGen(name="a")
b = UGen(name="b")
b += a
self.assertEqual(self.transcript([a,b]),expected)
def test_added_list_connect(self):
# a += [b,c] b is input to a
# c is input to a
expected = \
"""
a in[b,c]
b
c
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a += [b,c]
self.assertEqual(self.transcript([a,b,c]),expected)
def test_inline_bias_connect(self):
# a + b >> c a is input to b.bias
# b (not b.bias) is input to c
expected = \
"""
a
b bias[b~bias]
b~bias in[a] drives[b.bias]
c in[b]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a + b >> c
self.assertEqual(self.transcript([a,b,b._bias,c]),expected)
def test_lookup_bias_connect(self):
# a >> b["bias"] >> c a is input to b.bias
# b (not b.bias) is input to c
expected = \
"""
a
b bias[b~bias]
b~bias in[a] drives[b.bias]
c in[b]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a >> b["bias"] >> c
self.assertEqual(self.transcript([a,b,b._bias,c]),expected)
def test_assigned_bias_connect(self):
# b.bias = a a is input to b.bias
expected = \
"""
a
b bias[b~bias]
b~bias in[a] drives[b.bias]
"""
a = UGen(name="a")
b = UGen(name="b")
b.bias = a
self.assertEqual(self.transcript([a,b,b._bias]),expected)
def test_assigned_bias_lookup_connect(self):
# b["bias"] = a a is input to b.bias
expected = \
"""
a
b bias[b~bias]
b~bias in[a] drives[b.bias]
"""
a = UGen(name="a")
b = UGen(name="b")
b["bias"] = a
self.assertEqual(self.transcript([a,b,b._bias]),expected)
# def test_bias_7c(self):
# # b.bias += a a is input to b.bias
# expected = \
#"""
#a
#b bias[b~bias]
#b~bias in[a] drives[b.bias]
#"""
# a = UGen(name="a")
# b = UGen(name="b")
#
# b.bias += a
#
# self.assertEqual(self.transcript([a,b,b._bias]),expected)
# def test_bias_7d(self):
# # b["bias"] += a a is input to b.bias
# expected = \
#"""
#a
#b bias[b~bias]
#b~bias in[a] drives[b.bias]
#"""
# a = UGen(name="a")
# b = UGen(name="b")
#
# b["bias"] += a
#
# self.assertEqual(self.transcript([a,b,b._bias]),expected)
def test_inline_gain_connect(self):
# a * b >> c a is input to b.gain
# b (not b.gain) is input to c
expected = \
"""
a
b gain[b~gain]
b~gain in[a] drives[b.gain]
c in[b]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a * b >> c
self.assertEqual(self.transcript([a,b,b._gain,c]),expected)
def test_lookup_gain_connect(self):
# a >> b["gain"] >> c a is input to b.gain
# b (not b.gain) is input to c
expected = \
"""
a
b gain[b~gain]
b~gain in[a] drives[b.gain]
c in[b]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a >> b["gain"] >> c
self.assertEqual(self.transcript([a,b,b._gain,c]),expected)
def test_assigned_gain_connect(self):
# b.gain = a a is input to b.gain
expected = \
"""
a
b gain[b~gain]
b~gain in[a] drives[b.gain]
"""
a = UGen(name="a")
b = UGen(name="b")
b.gain = a
self.assertEqual(self.transcript([a,b,b._gain]),expected)
def test_assigned_gain_lookup_connect(self):
# b["gain"] = a a is input to b.gain
expected = \
"""
a
b gain[b~gain]
b~gain in[a] drives[b.gain]
"""
a = UGen(name="a")
b = UGen(name="b")
b["gain"] = a
self.assertEqual(self.transcript([a,b,b._gain]),expected)
# def test_gain_7c(self):
# # b.gain += a a is input to b.gain
# expected = \
#"""
#a
#b gain[b~gain]
#b~gain in[a] drives[b.gain]
#"""
# a = UGen(name="a")
# b = UGen(name="b")
#
# b.gain += a
#
# self.assertEqual(self.transcript([a,b,b._gain]),expected)
# def test_gain_7d(self):
# # b["gain"] += a a is input to b.gain
# expected = \
#"""
#a
#b gain[b~gain]
#b~gain in[a] drives[b.gain]
#"""
# a = UGen(name="a")
# b = UGen(name="b")
#
# b["gain"] += a
#
# self.assertEqual(self.transcript([a,b,b._gain]),expected)
def test_lookup_dry_connect(self):
# a >> b["dry"] >> c a is input to b.dry
# b (not b.dry) is input to c
expected = \
"""
a
b dry[b~dry]
b~dry in[a] drives[b.dry]
c in[b]
"""
a = UGen(name="a")
b = Mixer(name="b")
c = UGen(name="c")
a >> b["dry"] >> c
self.assertEqual(self.transcript([a,b,b._dry,c]),expected)
def test_assigned_dry_connect(self):
# b.dry = a a is input to b.dry
expected = \
"""
a
b dry[b~dry]
b~dry in[a] drives[b.dry]
"""
a = UGen(name="a")
b = Mixer(name="b")
b.dry = a
self.assertEqual(self.transcript([a,b,b._dry]),expected)
def test_assigned_dry_lookup_connect(self):
# b["dry"] = a a is input to b.dry
expected = \
"""
a
b dry[b~dry]
b~dry in[a] drives[b.dry]
"""
a = UGen(name="a")
b = Mixer(name="b")
b["dry"] = a
self.assertEqual(self.transcript([a,b,b._dry]),expected)
# def test_dry_7c(self):
# # b.dry += a a is input to b.dry
# expected = \
#"""
#a
#b dry[b~dry]
#b~dry in[a] drives[b.dry]
#"""
# a = UGen(name="a")
# b = Mixer(name="b")
#
# b.dry += a
#
# self.assertEqual(self.transcript([a,b,b._dry]),expected)
# def test_dry_7d(self):
# # b["dry"] += a a is input to b.dry
# expected = \
#"""
#a
#b dry[b~dry]
#b~dry in[a] drives[b.dry]
#"""
# a = UGen(name="a")
# b = Mixer(name="b")
#
# b["dry"] += a
#
# self.assertEqual(self.transcript([a,b,b._dry]),expected)
def test_lookup_wet_connect(self):
# a >> b["wet"] >> c a is input to b.wet
# b (not b.wet) is input to c
expected = \
"""
a
b wet[b~wet]
b~wet in[a] drives[b.wet]
c in[b]
"""
a = UGen(name="a")
b = Mixer(name="b")
c = UGen(name="c")
a >> b["wet"] >> c
self.assertEqual(self.transcript([a,b,b._wet,c]),expected)
def test_assigned_wet_connect(self):
# b.wet = a a is input to b.wet
expected = \
"""
a
b wet[b~wet]
b~wet in[a] drives[b.wet]
"""
a = UGen(name="a")
b = Mixer(name="b")
b.wet = a
self.assertEqual(self.transcript([a,b,b._wet]),expected)
def test_assigned_wet_lookup_connect(self):
# b["wet"] = a a is input to b.wet
expected = \
"""
a
b wet[b~wet]
b~wet in[a] drives[b.wet]
"""
a = UGen(name="a")
b = Mixer(name="b")
b["wet"] = a
self.assertEqual(self.transcript([a,b,b._wet]),expected)
# def test_wet_7c(self):
# # b.wet += a a is input to b.wet
# expected = \
#"""
#a
#b wet[b~wet]
#b~wet in[a] drives[b.wet]
#"""
# a = UGen(name="a")
# b = Mixer(name="b")
#
# b.wet += a
#
# self.assertEqual(self.transcript([a,b,b._wet]),expected)
# def test_wet_7d(self):
# # b["wet"] += a a is input to b.wet
# expected = \
#"""
#a
#b wet[b~wet]
#b~wet in[a] drives[b.wet]
#"""
# a = UGen(name="a")
# b = Mixer(name="b")
#
# b["wet"] += a
#
# self.assertEqual(self.transcript([a,b,b._wet]),expected)
def test_lookup_pan_connect(self):
# a >> b["pan"] >> c a is input to b.pan
# b (not b.pan) is input to c
expected = \
"""
a
b pan[b~pan]
b~pan in[a] drives[b.pan]
c in[b]
"""
a = UGen(name="a")
b = Pan(name="b")
c = UGen(name="c")
a >> b["pan"] >> c
self.assertEqual(self.transcript([a,b,b._pan,c]),expected)
def test_assigned_pan_connect(self):
# b.pan = a a is input to b.pan
expected = \
"""
a
b pan[b~pan]
b~pan in[a] drives[b.pan]
"""
a = UGen(name="a")
b = Pan(name="b")
b.pan = a
self.assertEqual(self.transcript([a,b,b._pan]),expected)
def test_assigned_pan_lookup_connect(self):
# b["pan"] = a a is input to b.pan
expected = \
"""
a
b pan[b~pan]
b~pan in[a] drives[b.pan]
"""
a = UGen(name="a")
b = Pan(name="b")
b["pan"] = a
self.assertEqual(self.transcript([a,b,b._pan]),expected)
# def test_pan_7c(self):
# # b.pan += a a is input to b.pan
# expected = \
#"""
#a
#b pan[b~pan]
#b~pan in[a] drives[b.pan]
#"""
# a = UGen(name="a")
# b = Pan(name="b")
#
# b.pan += a
#
# self.assertEqual(self.transcript([a,b,b._pan]),expected)
# def test_pan_7d(self):
# # b["pan"] += a a is input to b.pan
# expected = \
#"""
#a
#b pan[b~pan]
#b~pan in[a] drives[b.pan]
#"""
# a = UGen(name="a")
# b = Pan(name="b")
#
# b["pan"] += a
#
# self.assertEqual(self.transcript([a,b,b._pan]),expected)
def test_inline_freq_connect(self):
# a % b >> c a is input to b.freq
# b (not b.freq) is input to c
expected = \
"""
a
b freq[b~freq]
b~freq in[a] drives[b.freq]
c in[b]
"""
a = UGen(name="a")
b = Periodic(name="b")
c = UGen(name="c")
a % b >> c
self.assertEqual(self.transcript([a,b,b._freq,c]),expected)
def test_lookup_freq_connect(self):
# a >> b["freq"] >> c a is input to b.freq
# b (not b.freq) is input to c
expected = \
"""
a
b freq[b~freq]
b~freq in[a] drives[b.freq]
c in[b]
"""
a = UGen(name="a")
b = Periodic(name="b")
c = UGen(name="c")
a >> b["freq"] >> c
self.assertEqual(self.transcript([a,b,b._freq,c]),expected)
def test_assigned_freq_connect(self):
# b.freq = a a is input to b.freq
expected = \
"""
a
b freq[b~freq]
b~freq in[a] drives[b.freq]
"""
a = UGen(name="a")
b = Periodic(name="b")
b.freq = a
self.assertEqual(self.transcript([a,b,b._freq]),expected)
def test_assigned_freq_lookup_connect(self):
# b["freq"] = a a is input to b.freq
expected = \
"""
a
b freq[b~freq]
b~freq in[a] drives[b.freq]
"""
a = UGen(name="a")
b = Periodic(name="b")
b["freq"] = a
self.assertEqual(self.transcript([a,b,b._freq]),expected)
# def test_freq_7c(self):
# # b.freq += a a is input to b.freq
# expected = \
#"""
#a
#b freq[b~freq]
#b~freq in[a] drives[b.freq]
#"""
# a = UGen(name="a")
# b = Periodic(name="b")
#
# b.freq += a
#
# self.assertEqual(self.transcript([a,b,b._freq]),expected)
# def test_freq_7d(self):
# # b["freq"] += a a is input to b.freq
# expected = \
#"""
#a
#b freq[b~freq]
#b~freq in[a] drives[b.freq]
#"""
# a = UGen(name="a")
# b = Periodic(name="b")
#
# b["freq"] += a
#
# self.assertEqual(self.transcript([a,b,b._freq]),expected)
def test_lookup_phase_connect(self):
# a >> b["phase"] >> c a is input to b.phase
# b (not b.phase) is input to c
expected = \
"""
a
b phase[b~phase]
b~phase in[a] drives[b.phase]
c in[b]
"""
a = UGen(name="a")
b = Periodic(name="b")
c = UGen(name="c")
a >> b["phase"] >> c
self.assertEqual(self.transcript([a,b,b._phase,c]),expected)
def test_assigned_phase_connect(self):
# b.phase = a a is input to b.phase
expected = \
"""
a
b phase[b~phase]
b~phase in[a] drives[b.phase]
"""
a = UGen(name="a")
b = Periodic(name="b")
b.phase = a
self.assertEqual(self.transcript([a,b,b._phase]),expected)
def test_assigned_phase_lookup_connect(self):
# b["phase"] = a a is input to b.phase
expected = \
"""
a
b phase[b~phase]
b~phase in[a] drives[b.phase]
"""
a = UGen(name="a")
b = Periodic(name="b")
b["phase"] = a
self.assertEqual(self.transcript([a,b,b._phase]),expected)
# def test_phase_7c(self):
# # b.phase += a a is input to b.phase
# expected = \
#"""
#a
#b phase[b~phase]
#b~phase in[a] drives[b.phase]
#"""
# a = UGen(name="a")
# b = Periodic(name="b")
#
# b.phase += a
#
# self.assertEqual(self.transcript([a,b,b._phase]),expected)
# def test_phase_7d(self):
# # b["phase"] += a a is input to b.phase
# expected = \
#"""
#a
#b phase[b~phase]
#b~phase in[a] drives[b.phase]
#"""
# a = UGen(name="a")
# b = Periodic(name="b")
#
# b["phase"] += a
#
# self.assertEqual(self.transcript([a,b,b._phase]),expected)
def test_disconnect(self):
# a >> b >> c
# a // b
expected = \
"""
a
b
c in[b]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a >> b >> c
a // b
self.assertEqual(self.transcript([a,b,c]),expected)
def test_inline_disconnect(self):
# a >> b >> c
# a // b // c
expected = \
"""
a
b
c
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a >> b >> c
a // b // c
self.assertEqual(self.transcript([a,b,c]),expected)
def test_all_inputs_disconnect(self):
# a >> b["bias"] >> c
# a >> b
# a // b
expected = \
"""
a
b
c in[b]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a >> b["bias"] >> c
a >> b
a // b
self.assertEqual(self.transcript([a,b,c]),expected)
def test_bias_disconnect(self):
# a >> b["bias"] >> c
# a >> b
# a // b["bias"]
expected = \
"""
a
b in[a]
c in[b]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a >> b["bias"] >> c
a >> b
a // b["bias"]
self.assertEqual(self.transcript([a,b,c]),expected)
def test_LR_connect_to(self):
# a >> c["left"]
# b >> c["right"]
expected = \
"""
a
b
c in[(a,>L),(b,>R)]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
a >> c["left"]
b >> c["right"]
self.assertEqual(self.transcript([a,b,c]),expected)
def test_lookup_LR_connect(self):
# a >> c["left"] >> d a is input to c.left; c.left is input to d
# b >> c["right"] >> e b is input to c.right; c.right is input to e
#
expected = \
"""
a
b
c in[(a,>L),(b,>R)]
d in[(c,L>)]
e in[(c,R>)]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
d = UGen(name="d")
e = UGen(name="e")
a >> c["left"] >> d
b >> c["right"] >> e
self.assertEqual(self.transcript([a,b,c,d,e]),expected)
def test_assigned_LR_connect(self):
# c.left = a
# c.right = b
expected = \
"""
a
b
c in[(a,>L),(b,>R)]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
c.left = a
c.right = b
self.assertEqual(self.transcript([a,b,c]),expected)
def test_assigned_LR_lookup_connect(self):
# c["left"] = a
# c["right"] = b
expected = \
"""
a
b
c in[(a,>L),(b,>R)]
"""
a = UGen(name="a")
b = UGen(name="b")
c = UGen(name="c")
c["left"] = a
c["right"] = b
self.assertEqual(self.transcript([a,b,c]),expected)
def transcript(self,elements):
f = StringIO()
print >>f
for x in elements:
print >>f, x.transcript()
return f.getvalue()
if __name__ == "__main__": unittest.main()
|
boghison/servo | refs/heads/master | tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/etree.py | 658 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import gettext
_ = gettext.gettext
import re
from six import text_type
from . import _base
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return _base.COMMENT, node.text
else:
assert type(node.tag) == text_type, type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
|
TRESCLOUD/odoo | refs/heads/Integracion&ControlDeCalidad | openerp/addons/base/ir/ir_sequence.py | 15 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
import openerp
from openerp.osv import osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class ir_sequence_type(openerp.osv.osv.osv):
_name = 'ir.sequence.type'
_order = 'name'
_columns = {
'name': openerp.osv.fields.char('Name', size=64, required=True),
'code': openerp.osv.fields.char('Code', size=32, required=True),
}
_sql_constraints = [
('code_unique', 'unique(code)', '`code` must be unique.'),
]
def _code_get(self, cr, uid, context=None):
cr.execute('select code, name from ir_sequence_type')
return cr.fetchall()
class ir_sequence(openerp.osv.osv.osv):
""" Sequence model.
The sequence model allows to define and use so-called sequence objects.
Such objects are used to generate unique identifiers in a transaction-safe
way.
"""
_name = 'ir.sequence'
_order = 'name'
def _get_number_next_actual(self, cr, user, ids, field_name, arg, context=None):
'''Return number from ir_sequence row when no_gap implementation,
and number from postgres sequence when standard implementation.'''
res = dict.fromkeys(ids)
for element in self.browse(cr, user, ids, context=context):
if element.implementation != 'standard':
res[element.id] = element.number_next
else:
# get number from postgres sequence. Cannot use
# currval, because that might give an error when
# not having used nextval before.
statement = (
"SELECT last_value, increment_by, is_called"
" FROM ir_sequence_%03d"
% element.id)
cr.execute(statement)
(last_value, increment_by, is_called) = cr.fetchone()
if is_called:
res[element.id] = last_value + increment_by
else:
res[element.id] = last_value
return res
def _set_number_next_actual(self, cr, uid, id, name, value, args=None, context=None):
return self.write(cr, uid, id, {'number_next': value or 0}, context=context)
_columns = {
'name': openerp.osv.fields.char('Name', size=64, required=True),
'code': openerp.osv.fields.selection(_code_get, 'Code', size=64),
'implementation': openerp.osv.fields.selection( # TODO update the view
[('standard', 'Standard'), ('no_gap', 'No gap')],
'Implementation', required=True,
help="Two sequence object implementations are offered: Standard "
"and 'No gap'. The later is slower than the former but forbids any"
" gap in the sequence (while they are possible in the former)."),
'active': openerp.osv.fields.boolean('Active'),
'prefix': openerp.osv.fields.char('Prefix', size=64, help="Prefix value of the record for the sequence"),
'suffix': openerp.osv.fields.char('Suffix', size=64, help="Suffix value of the record for the sequence"),
'number_next': openerp.osv.fields.integer('Next Number', required=True, help="Next number of this sequence"),
'number_next_actual': openerp.osv.fields.function(_get_number_next_actual, fnct_inv=_set_number_next_actual, type='integer', required=True, string='Next Number', help='Next number that will be used. This number can be incremented frequently so the displayed value might already be obsolete'),
'number_increment': openerp.osv.fields.integer('Increment Number', required=True, help="The next number of the sequence will be incremented by this number"),
'padding' : openerp.osv.fields.integer('Number Padding', required=True, help="OpenERP will automatically adds some '0' on the left of the 'Next Number' to get the required padding size."),
'company_id': openerp.osv.fields.many2one('res.company', 'Company'),
}
_defaults = {
'implementation': 'standard',
'active': True,
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.sequence', context=c),
'number_increment': 1,
'number_next': 1,
'number_next_actual': 1,
'padding' : 0,
}
def init(self, cr):
return # Don't do the following index yet.
# CONSTRAINT/UNIQUE INDEX on (code, company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate (code, NULL) ir_sequences.
cr.execute("""
SELECT indexname FROM pg_indexes WHERE indexname =
'ir_sequence_unique_code_company_id_idx'""")
if not cr.fetchone():
cr.execute("""
CREATE UNIQUE INDEX ir_sequence_unique_code_company_id_idx
ON ir_sequence (code, (COALESCE(company_id,-1)))""")
def _create_sequence(self, cr, id, number_increment, number_next):
""" Create a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
sql = "CREATE SEQUENCE ir_sequence_%03d INCREMENT BY %%s START WITH %%s" % id
cr.execute(sql, (number_increment, number_next))
def _drop_sequence(self, cr, ids):
""" Drop the PostreSQL sequence if it exists.
There is no access rights check.
"""
ids = ids if isinstance(ids, (list, tuple)) else [ids]
assert all(isinstance(i, (int, long)) for i in ids), \
"Only ids in (int, long) allowed."
names = ','.join('ir_sequence_%03d' % i for i in ids)
# RESTRICT is the default; it prevents dropping the sequence if an
# object depends on it.
cr.execute("DROP SEQUENCE IF EXISTS %s RESTRICT " % names)
def _alter_sequence(self, cr, id, number_increment, number_next=None):
""" Alter a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
seq_name = 'ir_sequence_%03d' % (id,)
cr.execute("SELECT relname FROM pg_class WHERE relkind = %s AND relname=%s", ('S', seq_name))
if not cr.fetchone():
# sequence is not created yet, we're inside create() so ignore it, will be set later
return
statement = "ALTER SEQUENCE %s INCREMENT BY %d" % (seq_name, number_increment)
if number_next is not None:
statement += " RESTART WITH %d" % (number_next, )
cr.execute(statement)
def create(self, cr, uid, values, context=None):
""" Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used.
"""
values = self._add_missing_default_values(cr, uid, values, context)
values['id'] = super(ir_sequence, self).create(cr, uid, values, context)
if values['implementation'] == 'standard':
self._create_sequence(cr, values['id'], values['number_increment'], values['number_next'])
return values['id']
def unlink(self, cr, uid, ids, context=None):
super(ir_sequence, self).unlink(cr, uid, ids, context)
self._drop_sequence(cr, ids)
return True
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids, (list, tuple)):
ids = [ids]
new_implementation = values.get('implementation')
rows = self.read(cr, uid, ids, ['implementation', 'number_increment', 'number_next'], context)
super(ir_sequence, self).write(cr, uid, ids, values, context)
for row in rows:
# 4 cases: we test the previous impl. against the new one.
i = values.get('number_increment', row['number_increment'])
n = values.get('number_next', row['number_next'])
if row['implementation'] == 'standard':
if new_implementation in ('standard', None):
# Implementation has NOT changed.
# Only change sequence if really requested.
if values.get('number_next'):
self._alter_sequence(cr, row['id'], i, n)
else:
# Just in case only increment changed
self._alter_sequence(cr, row['id'], i)
else:
self._drop_sequence(cr, row['id'])
else:
if new_implementation in ('no_gap', None):
pass
else:
self._create_sequence(cr, row['id'], i, n)
return True
def _interpolate(self, s, d):
if s:
return s % d
return ''
def _interpolation_dict(self):
t = time.localtime() # Actually, the server is always in UTC.
return {
'year': time.strftime('%Y', t),
'month': time.strftime('%m', t),
'day': time.strftime('%d', t),
'y': time.strftime('%y', t),
'doy': time.strftime('%j', t),
'woy': time.strftime('%W', t),
'weekday': time.strftime('%w', t),
'h24': time.strftime('%H', t),
'h12': time.strftime('%I', t),
'min': time.strftime('%M', t),
'sec': time.strftime('%S', t),
}
def _next(self, cr, uid, seq_ids, context=None):
if not seq_ids:
return False
if context is None:
context = {}
force_company = context.get('force_company')
if not force_company:
force_company = self.pool.get('res.users').browse(cr, uid, uid).company_id.id
sequences = self.read(cr, uid, seq_ids, ['name','company_id','implementation','number_next','prefix','suffix','padding'])
preferred_sequences = [s for s in sequences if s['company_id'] and s['company_id'][0] == force_company ]
seq = preferred_sequences[0] if preferred_sequences else sequences[0]
if seq['implementation'] == 'standard':
cr.execute("SELECT nextval('ir_sequence_%03d')" % seq['id'])
seq['number_next'] = cr.fetchone()
else:
cr.execute("SELECT number_next FROM ir_sequence WHERE id=%s FOR UPDATE NOWAIT", (seq['id'],))
cr.execute("UPDATE ir_sequence SET number_next=number_next+number_increment WHERE id=%s ", (seq['id'],))
d = self._interpolation_dict()
try:
interpolated_prefix = self._interpolate(seq['prefix'], d)
interpolated_suffix = self._interpolate(seq['suffix'], d)
except ValueError:
raise osv.except_osv(_('Warning'), _('Invalid prefix or suffix for sequence \'%s\'') % (seq.get('name')))
return interpolated_prefix + '%%0%sd' % seq['padding'] % seq['number_next'] + interpolated_suffix
def next_by_id(self, cr, uid, sequence_id, context=None):
""" Draw an interpolated string using the specified sequence."""
self.check_access_rights(cr, uid, 'read')
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False]
ids = self.search(cr, uid, ['&',('id','=', sequence_id),('company_id','in',company_ids)])
return self._next(cr, uid, ids, context)
def next_by_code(self, cr, uid, sequence_code, context=None):
""" Draw an interpolated string using a sequence with the requested code.
If several sequences with the correct code are available to the user
(multi-company cases), the one from the user's current company will
be used.
:param dict context: context dictionary may contain a
``force_company`` key with the ID of the company to
use instead of the user's current company for the
sequence selection. A matching sequence for that
specific company will get higher priority.
"""
self.check_access_rights(cr, uid, 'read')
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False]
ids = self.search(cr, uid, ['&', ('code', '=', sequence_code), ('company_id', 'in', company_ids)])
return self._next(cr, uid, ids, context)
def get_id(self, cr, uid, sequence_code_or_id, code_or_id='id', context=None):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by the ``sequence_code_or_id``
argument, which can be a code or an id (as controlled by the
``code_or_id`` argument. This method is deprecated.
"""
# TODO: bump up to warning after 6.1 release
_logger.debug("ir_sequence.get() and ir_sequence.get_id() are deprecated. "
"Please use ir_sequence.next_by_code() or ir_sequence.next_by_id().")
if code_or_id == 'id':
return self.next_by_id(cr, uid, sequence_code_or_id, context)
else:
return self.next_by_code(cr, uid, sequence_code_or_id, context)
def get(self, cr, uid, code, context=None):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by its code. This method is
deprecated.
"""
return self.get_id(cr, uid, code, 'code', context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Sing-Li/go-buildpack | refs/heads/master | builds/runtimes/python-2.7.6/lib/python2.7/test/test_univnewlines2k.py | 137 | # Tests universal newline support for both reading and parsing files.
import unittest
import os
import sys
from test import test_support
if not hasattr(sys.stdin, 'newlines'):
raise unittest.SkipTest, \
"This Python does not have universal newline support"
FATX = 'x' * (2**14)
DATA_TEMPLATE = [
"line1=1",
"line2='this is a very long line designed to go past the magic " +
"hundred character limit that is inside fileobject.c and which " +
"is meant to speed up the common case, but we also want to test " +
"the uncommon case, naturally.'",
"def line3():pass",
"line4 = '%s'" % FATX,
]
DATA_LF = "\n".join(DATA_TEMPLATE) + "\n"
DATA_CR = "\r".join(DATA_TEMPLATE) + "\r"
DATA_CRLF = "\r\n".join(DATA_TEMPLATE) + "\r\n"
# Note that DATA_MIXED also tests the ability to recognize a lone \r
# before end-of-file.
DATA_MIXED = "\n".join(DATA_TEMPLATE) + "\r"
DATA_SPLIT = [x + "\n" for x in DATA_TEMPLATE]
del x
class TestGenericUnivNewlines(unittest.TestCase):
# use a class variable DATA to define the data to write to the file
# and a class variable NEWLINE to set the expected newlines value
READMODE = 'U'
WRITEMODE = 'wb'
def setUp(self):
with open(test_support.TESTFN, self.WRITEMODE) as fp:
fp.write(self.DATA)
def tearDown(self):
try:
os.unlink(test_support.TESTFN)
except:
pass
def test_read(self):
with open(test_support.TESTFN, self.READMODE) as fp:
data = fp.read()
self.assertEqual(data, DATA_LF)
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
def test_readlines(self):
with open(test_support.TESTFN, self.READMODE) as fp:
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT)
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
def test_readline(self):
with open(test_support.TESTFN, self.READMODE) as fp:
data = []
d = fp.readline()
while d:
data.append(d)
d = fp.readline()
self.assertEqual(data, DATA_SPLIT)
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
def test_seek(self):
with open(test_support.TESTFN, self.READMODE) as fp:
fp.readline()
pos = fp.tell()
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT[1:])
fp.seek(pos)
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT[1:])
def test_execfile(self):
namespace = {}
with test_support.check_py3k_warnings():
execfile(test_support.TESTFN, namespace)
func = namespace['line3']
self.assertEqual(func.func_code.co_firstlineno, 3)
self.assertEqual(namespace['line4'], FATX)
class TestNativeNewlines(TestGenericUnivNewlines):
NEWLINE = None
DATA = DATA_LF
READMODE = 'r'
WRITEMODE = 'w'
class TestCRNewlines(TestGenericUnivNewlines):
NEWLINE = '\r'
DATA = DATA_CR
class TestLFNewlines(TestGenericUnivNewlines):
NEWLINE = '\n'
DATA = DATA_LF
class TestCRLFNewlines(TestGenericUnivNewlines):
NEWLINE = '\r\n'
DATA = DATA_CRLF
def test_tell(self):
with open(test_support.TESTFN, self.READMODE) as fp:
self.assertEqual(repr(fp.newlines), repr(None))
data = fp.readline()
pos = fp.tell()
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
class TestMixedNewlines(TestGenericUnivNewlines):
NEWLINE = ('\r', '\n')
DATA = DATA_MIXED
def test_main():
test_support.run_unittest(
TestNativeNewlines,
TestCRNewlines,
TestLFNewlines,
TestCRLFNewlines,
TestMixedNewlines
)
if __name__ == '__main__':
test_main()
|
googlearchive/pywebsocket | refs/heads/master | test/set_sys_path.py | 496 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Configuration for testing.
Test files should import this module before mod_pywebsocket.
"""
import os
import sys
# Add the parent directory to sys.path to enable importing mod_pywebsocket.
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))
# vi:sts=4 sw=4 et
|
smourph/PGo-TrainerTools | refs/heads/master | pgoapi/protos/POGOProtos/Networking/Requests/Messages/DiskEncounterMessage_pb2.py | 16 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/DiskEncounterMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/DiskEncounterMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\nBPOGOProtos/Networking/Requests/Messages/DiskEncounterMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\"p\n\x14\x44iskEncounterMessage\x12\x14\n\x0c\x65ncounter_id\x18\x01 \x01(\x04\x12\x0f\n\x07\x66ort_id\x18\x02 \x01(\t\x12\x17\n\x0fplayer_latitude\x18\x03 \x01(\x01\x12\x18\n\x10player_longitude\x18\x04 \x01(\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DISKENCOUNTERMESSAGE = _descriptor.Descriptor(
name='DiskEncounterMessage',
full_name='POGOProtos.Networking.Requests.Messages.DiskEncounterMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='encounter_id', full_name='POGOProtos.Networking.Requests.Messages.DiskEncounterMessage.encounter_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fort_id', full_name='POGOProtos.Networking.Requests.Messages.DiskEncounterMessage.fort_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_latitude', full_name='POGOProtos.Networking.Requests.Messages.DiskEncounterMessage.player_latitude', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_longitude', full_name='POGOProtos.Networking.Requests.Messages.DiskEncounterMessage.player_longitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=111,
serialized_end=223,
)
DESCRIPTOR.message_types_by_name['DiskEncounterMessage'] = _DISKENCOUNTERMESSAGE
DiskEncounterMessage = _reflection.GeneratedProtocolMessageType('DiskEncounterMessage', (_message.Message,), dict(
DESCRIPTOR = _DISKENCOUNTERMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.DiskEncounterMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.DiskEncounterMessage)
))
_sym_db.RegisterMessage(DiskEncounterMessage)
# @@protoc_insertion_point(module_scope)
|
khalibartan/Antidote-DM | refs/heads/master | Antidotes DM/youtube_dl/extractor/howstuffworks.py | 106 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
find_xpath_attr,
int_or_none,
js_to_json,
unescapeHTML,
)
class HowStuffWorksIE(InfoExtractor):
_VALID_URL = r'https?://[\da-z-]+\.howstuffworks\.com/(?:[^/]+/)*(?:\d+-)?(?P<id>.+?)-video\.htm'
_TESTS = [
{
'url': 'http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm',
'info_dict': {
'id': '450221',
'ext': 'flv',
'title': 'Cool Jobs - Iditarod Musher',
'description': 'Cold sleds, freezing temps and warm dog breath... an Iditarod musher\'s dream. Kasey-Dee Gardner jumps on a sled to find out what the big deal is.',
'display_id': 'cool-jobs-iditarod-musher',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 161,
},
},
{
'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
'info_dict': {
'id': '453464',
'ext': 'mp4',
'title': 'Survival Zone: Food and Water In the Savanna',
'description': 'Learn how to find both food and water while trekking in the African savannah. In this video from the Discovery Channel.',
'display_id': 'survival-zone-food-and-water-in-the-savanna',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
{
'url': 'http://entertainment.howstuffworks.com/arts/2706-sword-swallowing-1-by-dan-meyer-video.htm',
'info_dict': {
'id': '440011',
'ext': 'flv',
'title': 'Sword Swallowing #1 by Dan Meyer',
'description': 'Video footage (1 of 3) used by permission of the owner Dan Meyer through Sword Swallowers Association International <www.swordswallow.org>',
'display_id': 'sword-swallowing-1-by-dan-meyer',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
{
'url': 'http://shows.howstuffworks.com/stuff-to-blow-your-mind/optical-illusions-video.htm',
'only_matching': True,
}
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
clip_js = self._search_regex(
r'(?s)var clip = ({.*?});', webpage, 'clip info')
clip_info = self._parse_json(
clip_js, display_id, transform_source=js_to_json)
video_id = clip_info['content_id']
formats = []
m3u8_url = clip_info.get('m3u8')
if m3u8_url:
formats += self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
for video in clip_info.get('mp4', []):
formats.append({
'url': video['src'],
'format_id': video['bitrate'],
'vbr': int(video['bitrate'].rstrip('k')),
})
if not formats:
smil = self._download_xml(
'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % video_id,
video_id, 'Downloading video SMIL')
http_base = find_xpath_attr(
smil,
'./{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'),
'name',
'httpBase').get('content')
URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=A&g=A'
for video in smil.findall(
'./{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')):
vbr = int_or_none(video.attrib['system-bitrate'], scale=1000)
formats.append({
'url': '%s/%s%s' % (http_base, video.attrib['src'], URL_SUFFIX),
'format_id': '%dk' % vbr,
'vbr': vbr,
})
self._sort_formats(formats)
return {
'id': '%s' % video_id,
'display_id': display_id,
'title': unescapeHTML(clip_info['clip_title']),
'description': unescapeHTML(clip_info.get('caption')),
'thumbnail': clip_info.get('video_still_url'),
'duration': clip_info.get('duration'),
'formats': formats,
}
|
WillisXChen/django-oscar | refs/heads/master | oscar/lib/python2.7/site-packages/requests/packages/chardet/euckrprober.py | 2930 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.