repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
vprusso/youtube_tutorials
|
natural_language_processing/nlp_1.py
|
1
|
3973
|
# LucidProgramming -- Natural Language Processing in Python: Part 1
# YouTube Video: https://www.youtube.com/watch?v=tP783g97C5o
# Prior to running this script, you will require Python to be installed on
# your machine. If so, you may run the following command via pip:
# pip install nltk
# Once installed, you should be able to follow along with the
# remainder of this script.
import nltk
# Run this command to download all collections to be used for the NLP tutorials:
nltk.download()
# Now that we've downloaded all the NLTK corpus content, let us go ahead and
# load in the text from Lewis Carroll's "Alice in Wonderland" via Gutenberg:
from nltk.text import Text
alice = Text(nltk.corpus.gutenberg.words('carroll-alice.txt'))
# NLTK also provides other texts from Gutenberg. We can view those by
# running the following command:
print(nltk.corpus.gutenberg.fileids())
# There are many more text data sets provided by NLTK. For now, we will
# just focus on what types of analysis tools NLTK provides to us on the
# text "Alice in Wonderland" by Lewis Carroll:
# Word Count: How many words are contained in "Alice in Wonderland"?
# Note that this includes punctuation as well as traditional words.
print(type(alice))
print(len(alice))
# Unique Word Count: How many unique words are present in
# "Alice in Wonderland"? For instance, the above line would
# count the word "the" on each occurrence.
print(len(set(alice)))
# Specific Word Count: How many times does a specific word occur
# in a text?
print(alice.count("Alice"))
# Concordance: Shows occurence of word in context of use.
# We can check where the term "alice" appears in "Alice in Wonderland".
alice.concordance("Alice")
# Dispersion Plot: Location of where a word is in the text.
# Example:
# Give a visual representation of where the words "Alice", "Rabbit",
# "Hatter", and "Queen" appear in "Alice in Wonderland".
alice.dispersion_plot(["Alice", "Rabbit", "Hatter", "Queen"])
# The word "Alice" is consistently used throughout the entire text, while
# the word "Queen" is found closer to the end of the text. This makes sense,
# since Alice does not encounter the Red Queen until later in the book.
# Frequency Distributions: What are the most frequent words (specifically,
# tokens), that are used in a given text.
# Example:
# Generate the most frequent tokens in "Alice in Wonderland":
# First, use NLTK to generate a frequncy distribution dictionary-like object.
fdist = nltk.FreqDist(alice)
# What are the top 50 most common words in "Alice in Wonderland"?
fdist.plot(50, cumulative=True, title="50 most common tokens in Alice in Wonderland")
# Observe that the x-axis consists of punctuation, which may not
# be precisely what we are going for. It is possible to remove this
# from the words that we plot by filtering out the punctuation.
fdist_no_punc = nltk.FreqDist(
dict((word, freq) for word, freq in fdist.items() if word.isalpha()))
fdist_no_punc.plot(50,
cumulative=True,
title="50 most common tokens (no punctuation)")
# This plot gives us a bit more useful information, but it still contains an
# awful lot of punctuation that we do not particularly care to see. In a
# similar fashion, we may filter this out.
# We may not obtain too much information on the above plot, since
# many of the words on the x-axis are words like "and", "the", "in",
# etc. These types of common English words are referred to as
# stopwords. NLTK provides a method to identify such words.
stopwords = nltk.corpus.stopwords.words('english')
fdist_no_punc_no_stopwords = nltk.FreqDist(
dict((word, freq) for word, freq in fdist.items() if word not in stopwords and word.isalpha()))
# Replot fdist after stopwords filtered out.
fdist_no_punc_no_stopwords.plot(50,
cumulative=True,
title="50 most common tokens (no stopwords or punctuation)")
|
gpl-3.0
| -2,300,163,772,194,960,600
| 41.265957
| 103
| 0.726655
| false
| 3.515929
| false
| false
| false
|
cpitclaudel/dBoost
|
graphics/scalability.pdf.py
|
1
|
2629
|
#!/usr/bin/env python3
from utils import filename, save2pdf, setup, rcparams, to_inches
from utils.plots_helper import sensors
import matplotlib
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
import itertools
matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{siunitx}"]
make,fname = filename("scalability.pdf")
INTEL_TOTAL = 2313153
# labels: vary train size + algo type
# x: vary test size
# y: runtime in s
trs = [1000,100000]#,2313153]
tes = [INTEL_TOTAL]
tes_h = 2000000
_trs = ["1K","100K"]#,2313153]
_tes = [5000000,1000000,15000000,20000000]
#_tes = [0.100,1.000,10.000,100.000,1000.000,2313.153]
#es = ["1_gaussian1.5","0.7_mixture1_0.075","0.7_mixture2_0.075"]
es = [
[1,"gaussian",1.5],
[0.7,"mixture1",0.1],
[0.7,"mixture2",0.05],
[0.7,"histogram"]
]
# build data
results = {}
vals = {}
for (tr,te,e) in itertools.product(trs,tes,es):
if (e[1],tr) not in results:
results[(e[1],tr)] = []
vals[(e[1],tr)] = []
if e[1] == "gaussian":
ofile = "../results/sensors_{}_stat{}_{}{}.out".format(tr,*e)
elif e[1] == "histogram":
ofile = "../results/csail/csail-timings-{}-{}.txt".format(tr,tes_h)
else:
ofile = "../results/sensors_{}_stat{}_{}_{}.out".format(tr,*e)
with open(ofile,'r') as f:
for line in f:
line = line.strip().split()
if line[0] == "Time":
#print("{} {} {}: {}".format(tr,e[1],float(line[1]),float(line[2])))
vals[(e[1],tr)].append(float(line[1]))
results[(e[1],tr)].append(float(line[2]))
if line[0] == "Runtime":
#print("{} {} {}: {}".format(tr,te,e[1],float(line[1])))
vals[(e[1],tr)].append(te)
results[(e[1],tr)].append(float(line[1]))
continue
#print(results)
pdf = PdfPages(fname)
setup()
rcparams()
pyplot.gcf().set_size_inches(to_inches(240), to_inches(240)) # full column size is 240pt
ax = pyplot.gca()
ax.set_title("Scalability")
ax.set_xlabel("Test set size")
ax.set_ylabel("Runtime (s)")
lines = ["-","--"]
linecycler = itertools.cycle(lines)
ax.set_color_cycle(['g','g','r','r','b','b','m','m'])
ax.set_xlim([0,2000000])
for (e,(tr,_tr)) in itertools.product(es,zip(trs,_trs)):
#vals[(e[1],tr)] = [val/1000 for val in vals[(e[1],tr)]]
ax.plot(vals[(e[1],tr)],results[(e[1],tr)],next(linecycler),label = "{}, {}".format(e[1].capitalize(),_tr))#,marker='x',markersize=2.0)
ax.set_xticklabels(['0','0.5M','1M','1.5M','2.0M'])
ax.legend(loc=2,handlelength=3,prop={'size':6})
save2pdf(pdf)
pdf.close()
|
gpl-3.0
| 3,273,065,536,939,600,400
| 32.705128
| 139
| 0.58197
| false
| 2.715909
| false
| false
| false
|
jaygoswami2303/course_dashboard_api
|
v2/DiscussionAPI/permissions.py
|
1
|
1727
|
"""
Permissions classes for Discussion-API views.
"""
from rest_framework import permissions
from django.http import HttpResponse
import MySQLdb
from course_dashboard_api.v2.dbv import *
sql_user = MYSQL_USER
sql_pswd = MYSQL_PSWD
mysql_db = MYSQL_DB
class IsStudent(permissions.BasePermission):
"""
Grants access if the requested id is of the requesting user or if the requesting user is a superuser.
"""
def has_permission(self, request, view):
list = request.META['PATH_INFO'].split("/")
id = list[len(list)-2]
return request.user.is_superuser or request.user.id == int(id)
class IsFaculty(permissions.BasePermission):
"""
Grants access if the requesting user is the faculty of the requested course or if the requesting user is a superuser.
"""
def has_permission(self, request, view):
try:
db_mysql = MySQLdb.connect(user=sql_user, passwd=sql_pswd, db=mysql_db) # Establishing MySQL connection
except:
print "MySQL connection not established"
return HttpResponse("MySQL connection not established") # MySQL could not be connected
query = "select * from student_courseaccessrole where binary course_id = %s and role = 'instructor' and user_id=%s"
list = request.META['PATH_INFO'].split("/")
id = list[len(list) - 2]
course_id = "course-v1:" + id
user_id = request.user.id
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(query, (str(course_id), str(user_id), ))
entry = mysql_cursor.fetchone()
permission = True
if entry is None:
permission = False
return request.user.is_superuser or permission
|
mit
| 3,944,675,056,809,785,300
| 32.211538
| 124
| 0.658367
| false
| 3.88964
| false
| false
| false
|
JaneliaSciComp/Ax
|
ax1.py
|
1
|
6102
|
#!/home/arthurb/bin/anaconda/bin/python
# python ax1.py params_file FILEIN FILEOUT
# python ax1.py params_file FILEIN FILEOUT START STOP
# python ax1.py FS NFFT NW K PVAL FILEIN FILEOUT
# python ax1.py FS NFFT NW K PVAL FILEIN FILEOUT START STOP
#
# analyze a set of time series with multi-taper spectral analysis and
# create a sparse matrix of just the time-frequency pixels whose F-test
# passes PVAL.
#
# typical usage consists of one or more input files being analyzed by one
# or more parameter sets. for example, four microphone recordings of the
# same vocalizing mouse analyzed with three different NFFTs and the same
# NW, K, and PVAL. <filename>.ch[1-4] yield <filename>-[1-3].ax
#
# FS: sampling rate in Hertz
# NFFT: FFT window size in seconds, rounds up to the next power of 2 tics
# NW: multi-taper time-bandwidth product
# K: number of tapers
# PVAL: F-test p-val threshold
# FILEIN: the base filename and path of [0-9].wav files with a single channel each,
# or .ch[0-9] files containing float32s
# FILEOUT: an integer to append to FILEIN to differentiate parameter sets used
# START,STOP: optional time range, in seconds
#
# output is a binary file with a time x frequency x amplitude x channel
# array of hot pixels
#
# python ax1.py 'ultrasonic_params.txt' 'urine' '1'
# python ax1.py 200e3 0.001 15 29 0.01 'urine' '1'
# python ax1.py 450450 0.001 15 29 0.01 0 30 'groundtruth' '1'
# /home/arthurb/bin/anaconda/bin/kernprof.py -l -v ax1.py 450450 0.00025 22 43 0.01 /groups/egnor/egnorlab/ben/Test_D_1 7 0 4
from ax1b import do_it, nextpow2
import struct
import time
import numpy as np
import glob
import sys
import os
from multiprocessing import Pool, cpu_count
#import pdb
import math
from scipy import stats
import pyfftw
from dpss import dpss
import wave
if __name__ == "__main__":
if (len(sys.argv)!=4) and (len(sys.argv)!=6) and (len(sys.argv)!=8) and (len(sys.argv)!=10):
print('invalid args')
exit
tstart=time.time()
if (len(sys.argv)<8):
execfile(sys.argv[1])
FILEIN=sys.argv[2]
FILEOUT=sys.argv[3]
else:
FS=sys.argv[1]
NFFT=sys.argv[2]
NW=sys.argv[3]
K=sys.argv[4]
PVAL=sys.argv[5]
FILEIN=sys.argv[6]
FILEOUT=sys.argv[7]
if ((len(sys.argv)==6) or (len(sys.argv)==10)):
START=sys.argv[-2]
STOP=sys.argv[-1]
if (isinstance(FS,str)):
FS = int(FS)
if (isinstance(NFFT,str)):
NFFT = float(NFFT)
if (isinstance(NW,str)):
NW = int(NW)
if (isinstance(K,str)):
K = int(K)
if (isinstance(PVAL,str)):
PVAL = float(PVAL)
if ((len(sys.argv)==6) or (len(sys.argv)==10)):
if (isinstance(START,str)):
START = float(START)
if (isinstance(STOP,str)):
STOP = float(STOP)
VERSION=1
SUBSAMPLE=1
NWORKERS=cpu_count()
FS=int(FS/SUBSAMPLE);
NFFT=int(nextpow2(NFFT*FS)) # convert to ticks
NWINDOWS_PER_WORKER=int(12*256*1000/NFFT) # NFFT/2 ticks
FIRST_MT=float('nan')
LAST_MT=float('nan')
FRACTION_MT=float('nan')
tapers,eig = dpss(NFFT, NW, K)
tapers = np.array(tapers, dtype=np.float32)
#tapers = tapers * np.sqrt(FS)
f=np.array(range(0,NFFT//2+1))*FS/NFFT
df=f[1]-f[0];
DIROUT=os.path.dirname(FILEIN);
FILEINs=sorted(glob.glob(FILEIN+'.ch*'));
FILE_TYPE=1
if (len(FILEINs)==0):
FILEINs=sorted(glob.glob(FILEIN+'*.wav'));
FILE_TYPE=2
if (len(FILEINs)==0):
print(["can't find any .wav or .ch files with basename '"+FILEIN]);
exit
NCHANNELS=len(FILEINs);
REMAP=list();
for i in range(0,NCHANNELS):
filei=os.path.join(DIROUT,FILEINs[i])
if FILE_TYPE==1:
try:
fid=open(filei,'rb')
except:
print(["can't open file '"+filei+"'"])
exit
fid.seek(0,2);
FILE_LEN=fid.tell()/4/FS;
fid.close()
REMAP.append(FILEINs[i][-1]);
if FILE_TYPE==2:
try:
fid=wave.open(filei,'rb')
except:
print(["can't open file '"+filei+"'"])
exit
FILE_LEN=fid.getnframes()/FS
fid.close();
REMAP.append(FILEINs[i][-5]);
if 'START' not in locals():
tmp=FILE_LEN*FS/(NFFT//2)-1
print('Processing {:.3g} min = {:.3g} windows = {:3g} chunks of data in {:s}'.format(FILE_LEN/60, tmp, tmp/NWINDOWS_PER_WORKER, FILEINs[i]));
t_offset_tic=0;
t_now_sec=0;
else:
tmp=(STOP-START)*FS/(NFFT//2)-1
print('Processing {:.3g} min = {:.3g} windows = {:3g} chunks of data in {:s}'.format((STOP-START)/60, tmp, tmp/NWINDOWS_PER_WORKER, FILEINs[i]));
t_offset_tic=round(START*FS);
t_now_sec=START;
fid_out=open(FILEIN+'-'+FILEOUT+'.ax','wb')
# L=8 bytes on 64-bit systems
fid_out.write(struct.pack('B',VERSION))
fid_out.write(struct.pack('B',SUBSAMPLE))
fid_out.write(struct.pack('B',0))
fid_out.write(struct.pack('I',FS))
fid_out.write(struct.pack('I',NFFT))
fid_out.write(struct.pack('H',NW))
fid_out.write(struct.pack('H',K))
fid_out.write(struct.pack('d',PVAL))
fid_out.write(struct.pack('d',df))
t_now=0
tloop=time.time()
pool=Pool()
while ((t_now_sec<FILE_LEN) and (('STOP' not in locals()) or (t_now_sec<STOP))):
if ((time.time()-tloop)>10):
tmp=t_now_sec
tmp2=0
if 'START' in locals():
tmp=tmp-START
tmp2=START
if 'STOP' in locals():
tmp=tmp/(STOP-tmp2)
else:
tmp=tmp/(FILE_LEN-tmp2)
print('{:d} sec processed; {:d}% done'.format(int(round(t_now_sec-tmp2)),int(round(100*tmp))))
tloop=time.time()
#idx=map(do_it, \
idx=pool.map(do_it, \
[(DIROUT, FILEINs, t_now, NW,K,PVAL,FS,NFFT, NWINDOWS_PER_WORKER, tapers, x, t_offset_tic, FILE_TYPE, round(FILE_LEN*FS)) for x in range(0,NWORKERS)])
for i in idx:
for j in i:
fid_out.write(struct.pack('dddd', \
float(t_now)+j[0], j[1], j[2], float(REMAP[j[3]])))
t_now_sec = t_now_sec+float(NFFT//2)/FS*NWORKERS*NWINDOWS_PER_WORKER
t_now = t_now+NWORKERS*NWINDOWS_PER_WORKER
fid_out.write('Z'.encode('ascii'))
fid_out.close()
tstop = time.time() - tstart
print('Run time was {:.3g} minutes.'.format(tstop/60))
pool.close()
|
bsd-3-clause
| 4,388,245,357,869,162,500
| 28.765854
| 157
| 0.632416
| false
| 2.71683
| false
| false
| false
|
ashoksekar/python
|
CodeJam/2011/Round2/AIWar/main.py
|
1
|
5722
|
#!/usr/bin/python
import networkx as nx
import matplotlib.pyplot as plt
num_of_test = 0 # number of test case (N)
debug = 0
num_P = 0
num_W = 0
gown = 0
gthtn = 0
th = []
class node(object):
""" data = n
child = child nodes """
def __init__(self, data = 0, child = [], parent = [], level = 0):
self.data = data
self.child = child
self.parent = parent
self.level = level
def __str__(self):
return '%d' % self.data
def find_node(root, data):
items = [root]
while len(items):
pt = items.pop()
if pt.data == data:
return pt
for x in pt.child:
if not(x in items):
items.append(x)
return None
def print_node(root):
items = [root]
while len(items):
pt = items.pop(0)
print "pt:", pt, "child:",
for x in pt.child:
print x,
if not(x in items):
items.append(x)
print
return
def open_read_file():
#file_name="D-small-practice.in"
file_name="D-large-practice.in"
#file_name="sample_input.txt"
fin=open(file_name, 'r')
return fin
def find_gthtn(node, parent, ttn):
global gthtn
global graph
global num_P
global i
if 1 in ttn:
l = ttn
l = list(set(l))
if 0 in l:
l.remove(0)
assert(1 in l)
if gthtn < len(l):
gthtn = len(l)
items = node.child[:]
ln1 = []
big = 0
for x in items:
t = ttn+graph[x.data]+graph[parent.data]
if len(prev[x.data]) != 0:
for y in prev[x.data]:
t1 = t + graph[y]
t2 = list(set(t1))
ln1.append(len(t2))
if big < len(t2):
big = len(t2)
else:
t1 = list(set(t))
ln1.append(len(t1))
if big < len(t1):
big = len(t1)
ii = 0
items1 = []
out_break = 0
for x in items:
if len(prev[x.data]) != 0:
for y in prev[x.data]:
if ln1[ii] == big:
items1.append(x)
#out_break = True
break
ii += 1
if out_break:
break
else:
if ln1[ii] == big:
items1.append(x)
break
ii += 1
for pt in items1:
find_gthtn(pt, node, list(set(ttn + graph[pt.data])))
def find_thtn(ptv):
global graph, prev
global gown, gthtn
global i
nodeg = []
for x in range(400):
nodeg.append(None)
root = node(data = ptv, child = [], parent = [], level = 1)
G = nx.Graph()
items = [root]
while len(items):
pt = items.pop(0)
for x in graph[pt.data]:
if not ((pt.data, x) in G.edges()):
G.add_edge(pt.data, x, color = 'blue')
for z in prev[pt.data]:
n = nodeg[z] #node.find_node(root, z)
if (n == None):
n = node(data = z, child = [], parent = [])
nodeg[z] = n
n.level = pt.level + 1
items.append(n)
G.add_edge(pt.data, n.data, color = 'red')
pt.child.append(n)
assert (n.level == (pt.level + 1))
if (debug):
print 'pt:' ,pt, 'ptprev:', prev[pt.data], 'n:', n, "nprev:", prev[n.data]
#print 'pt:', pt, 'n:', n, 'prev:', prev[pt.data], 'parent:',
#for x in n.parent:
# print x,
#print
if (debug):
color = nx.get_edge_attributes(G,'color')
colors = []
for x in color:
colors.append(color[x])
print colors
nx.draw_networkx(G, pos=nx.shell_layout(G), edge_color=colors)
plt.axis('on')
plt.show()
find_gthtn(root, root, graph[root.data])
def find_own(pt):
global graph, prev
global gown, gthtn
global i
own = 0
while True:
if (pt != 1) and (pt != 0):
own += 1
pt = prev[pt][-1]
if pt == 1:
break
gown = own
fin = open_read_file()
num_of_test = int(fin.readline())
i = 0
while i < num_of_test:
string = fin.readline().split()
num_P = int(string[0])
num_W = int(string[1])
graph = dict()
attr = []
prev = []
gown = 0
gthtn = 0
string = fin.readline().split()
#if i == 5:
# debug = 1
#else:
# debug = 0
for x in range(num_P):
graph[x] = []
attr.append([0,0xffffffff])
prev.append([])
attr[1][1] = 0
for x in range(num_W):
s = string[x].split(',')
m = int(s[0])
n = int(s[1])
graph[m].append(n)
graph[n].append(m)
for x in range(num_P):
graph[x].sort()
lst = [1]
while len(lst) > 0:
m = lst.pop(0)
if attr[m][0] != 0:
continue
for x in range(len(graph[m])):
dest = graph[m][x]
if (attr[dest][0] == 0):
lst.append(dest)
if ((attr[m][1]+1) < attr[dest][1]):
attr[dest][1] = attr[m][1]+1
prev[dest] = [m]
elif ((attr[m][1]+1) == attr[dest][1]):
if not (m in prev[dest]):
prev[dest].append(m)
else:
continue
attr[m][0] = 1
find_own(0)
find_thtn(0)
if (debug):
print gown,gthtn
i += 1
gthtn -= gown
print 'Case #%d: %d %d' % (i, gown, gthtn)
|
gpl-2.0
| -8,738,169,697,906,892,000
| 24.207048
| 90
| 0.436386
| false
| 3.348157
| false
| false
| false
|
PuzzleboxIO/synapse-python
|
Puzzlebox/Synapse/Device.py
|
1
|
12424
|
# -*- coding: utf-8 -*-
# Copyright Puzzlebox Productions, LLC (2010-2012)
#
# This code is released under the GNU Pulic License (GPL) version 2
# For more information please refer to http://www.gnu.org/copyleft/gpl.html
__changelog__ = """\
Last Update: 2012.04.23
"""
__todo__ = """
"""
### IMPORTS ###
import os, sys
import Puzzlebox.Synapse.Configuration as configuration
if configuration.ENABLE_PYSIDE:
try:
import PySide as PyQt4
from PySide import QtCore, QtGui
except Exception, e:
print "ERROR: [Synapse:Device] Exception importing PySide:",
print e
configuration.ENABLE_PYSIDE = False
else:
print "INFO: [Synapse:Device] Using PySide module"
if not configuration.ENABLE_PYSIDE:
print "INFO: [Synapse:Device] Using PyQt4 module"
from PyQt4 import QtCore, QtGui
if (sys.platform == 'win32'):
import _winreg as winreg
import itertools
import re
import serial
DEFAULT_IMAGE_PATH = 'images'
elif (sys.platform == 'darwin'):
DEFAULT_IMAGE_PATH = 'images'
else:
import bluetooth
DEFAULT_IMAGE_PATH = '/usr/share/puzzlebox_synapse/images'
#####################################################################
# Globals
#####################################################################
DEBUG = configuration.DEBUG
PATH_TO_HCITOOL = '/usr/bin/hcitool'
#####################################################################
# Classes
#####################################################################
class puzzlebox_synapse_device(QtGui.QWidget):
def __init__(self, log, \
DEBUG=DEBUG, \
parent=None, \
):
self.log = log
self.DEBUG = DEBUG
self.parent=parent
if self.parent == None:
QtGui.QWidget.__init__(self, parent)
#self.setupUi(self)
self.configureSettings()
self.connectWidgets()
self.name = "Synapse:Device"
##################################################################
def configureSettings(self):
pass
##################################################################
def connectWidgets(self):
pass
##################################################################
def enumerateSerialPorts(self):
""" Uses the Win32 registry to return an
iterator of serial (COM) ports
existing on this computer.
from http://eli.thegreenplace.net/2009/07/31/listing-all-serial-ports-on-windows-with-python/
"""
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, path)
except WindowsError:
#raise IterationError
return
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield str(val[1])
except EnvironmentError:
break
##################################################################
def fullPortName(self, portname):
""" Given a port-name (of the form COM7,
COM12, CNCA0, etc.) returns a full
name suitable for opening with the
Serial class.
"""
m = re.match('^COM(\d+)$', portname)
if m and int(m.group(1)) < 10:
return portname
return '\\\\.\\' + portname
##################################################################
def searchForSerialDevices(self, devices=[]):
if (sys.platform == 'win32'):
for portname in self.enumerateSerialPorts():
if portname not in devices:
#portname = self.fullPortName(portname)
devices.append(portname)
elif (sys.platform == 'darwin'):
# Handle Telekinesis first so it shows up at top of listings
for device in os.listdir('/dev'):
if (device.startswith('tty.Telekinesis')):
devices.append( os.path.join('/dev', device))
for device in os.listdir('/dev'):
if (device.startswith('tty.MindWaveMobile') or \
device.startswith('tty.MindWave')):
devices.append( os.path.join('/dev', device))
# Handle MindSet separately so it shows up second in listings
for device in os.listdir('/dev'):
if (device.startswith('tty.MindSet')):
devices.append( os.path.join('/dev', device))
else:
#if os.path.exists('/dev/tty.MindWaveMobile-SPPDev'):
#devices.append('/dev/tty.MindWaveMobile-SPPDev')
#if os.path.exists('/dev/tty.MindWaveMobile-DevA'):
#devices.append('/dev/tty.MindWaveMobile-DevA')
#if os.path.exists('/dev/tty.MindWaveMobile-DevB'):
#devices.append('/dev/tty.MindWaveMobile-DevB')
#if os.path.exists('/dev/tty.MindWave'):
#devices.append('/dev/tty.MindWave')
#if os.path.exists('/dev/tty.MindWave1'):
#devices.append('/dev/tty.MindWave1')
#if os.path.exists('/dev/tty.MindWave2'):
#devices.append('/dev/tty.MindWave2')
#if os.path.exists('/dev/tty.MindWave3'):
#devices.append('/dev/tty.MindWave3')
#if os.path.exists('/dev/tty.MindWave4'):
#devices.append('/dev/tty.MindWave4')
#if os.path.exists('/dev/tty.MindWave5'):
#devices.append('/dev/tty.MindWave5')
#if os.path.exists('/dev/tty.MindSet-DevB'):
#devices.append('/dev/tty.MindSet-DevB')
for device in os.listdir('/dev'):
if (device.startswith('ttyUSB') or \
device.startswith('ttyACM') or \
device.startswith('tty.usbserial') or \
device.startswith('rfcomm')):
devices.append( os.path.join('/dev', device))
#if os.path.exists('/dev/ttyUSB0'):
#devices.append('/dev/ttyUSB0')
#if os.path.exists('/dev/ttyUSB1'):
#devices.append('/dev/ttyUSB1')
#if os.path.exists('/dev/ttyUSB2'):
#devices.append('/dev/ttyUSB2')
#if os.path.exists('/dev/ttyUSB3'):
#devices.append('/dev/ttyUSB3')
#if os.path.exists('/dev/ttyUSB4'):
#devices.append('/dev/ttyUSB4')
#if os.path.exists('/dev/ttyUSB5'):
#devices.append('/dev/ttyUSB5')
#if os.path.exists('/dev/ttyUSB6'):
#devices.append('/dev/ttyUSB6')
#if os.path.exists('/dev/ttyUSB7'):
#devices.append('/dev/ttyUSB7')
#if os.path.exists('/dev/ttyUSB8'):
#devices.append('/dev/ttyUSB8')
#if os.path.exists('/dev/ttyUSB9'):
#devices.append('/dev/ttyUSB9')
#if os.path.exists('/dev/rfcomm0'):
#devices.append('/dev/rfcomm0')
#if os.path.exists('/dev/rfcomm1'):
#devices.append('/dev/rfcomm1')
#if os.path.exists('/dev/rfcomm2'):
#devices.append('/dev/rfcomm2')
#if os.path.exists('/dev/rfcomm3'):
#devices.append('/dev/rfcomm3')
#if os.path.exists('/dev/rfcomm4'):
#devices.append('/dev/rfcomm4')
#if os.path.exists('/dev/ttyACM0'):
#devices.append('/dev/ttyACM0')
#if os.path.exists('/dev/ttyACM1'):
#devices.append('/dev/ttyACM1')
#if os.path.exists('/dev/ttyACM2'):
#devices.append('/dev/ttyACM2')
#if os.path.exists('/dev/ttyACM3'):
#devices.append('/dev/ttyACM3')
#if os.path.exists('/dev/ttyACM4'):
#devices.append('/dev/ttyACM4')
return(devices)
##################################################################
def hcitoolScanForRemoteDevices(self, devices=[]):
bluetooth_devices = []
#command = '%s scan 2> /dev/null' % PATH_TO_HCITOOL
command = '%s scan' % PATH_TO_HCITOOL
if self.DEBUG > 1:
print 'INFO: Calling "%s"' % command
output = os.popen(command, 'r')
try:
result = output.readlines()
except Exception, e:
if self.DEBUG:
print "ERROR [Synapse-Interface]: Failed reading result from call to hcitool:",
print e
result = ''
if result == '':
return([]) # Under OS X hcitool doesn't exist so we don't see any devices
for line in result:
line = line.strip()
if line == '' or line == 'Scanning ...':
continue
elif self.DEBUG > 1:
print line
try:
address = line.split('\t')[0]
except:
pass
else:
bluetooth_devices.append(address)
for address in bluetooth_devices:
command = '%s name %s' % (PATH_TO_HCITOOL, address)
if self.DEBUG:
print 'INFO: Calling "%s"' % command
output = os.popen(command, 'r')
for line in output.readlines():
line = line.strip()
if line == '':
continue
elif self.DEBUG:
print '\t',
print line
device_name = line.strip()
if ((device_name == 'MindSet' or device_name == 'MindWave Mobile') and \
(address not in devices)):
devices.append(address)
else:
if self.DEBUG:
print 'INFO: Found but not recognized: [%s] %s' % \
(address, device_name)
return (devices)
##################################################################
def hcitoolGetActiveConnections(self, devices=[]):
bluetooth_devices = []
#command = '%s con 2> /dev/null' % PATH_TO_HCITOOL
command = '%s con' % PATH_TO_HCITOOL
if self.DEBUG > 1:
print 'INFO: Calling "%s"' % command
output = os.popen(command, 'r')
try:
result = output.readlines()
except Exception, e:
if self.DEBUG:
print "ERROR [Synapse:Interface]: Failed reading result from call to hcitool:",
print e
result = ''
if result == '':
return([]) # Under OS X hcitool doesn't exist so we don't see any devices
for line in result:
line = line.strip()
if line == '' or line == 'Connections:':
continue
elif self.DEBUG > 1:
print line
try:
address = line.split(' ')[2]
except:
pass
else:
bluetooth_devices.append(address)
for address in bluetooth_devices:
command = '%s name %s' % (PATH_TO_HCITOOL, address)
if self.DEBUG:
print 'INFO: Calling "%s":' % command
output = os.popen(command, 'r')
for line in output.readlines():
line = line.strip()
if line == '':
continue
elif self.DEBUG:
print '\t',
print line
device_name = line.strip()
if ((device_name == 'MindSet' or device_name == 'MindWave Mobile') and \
(address not in devices)):
devices.append(address)
return (devices)
##################################################################
def searchForDevices(self):
enable_hcitool = configuration.ENABLE_HCITOOL
devices = []
#self.pushButtonBluetoothSearch.setText('Searching')
if ((sys.platform != 'win32' and sys.platform != 'darwin') and \
configuration.THINKGEAR_BLUETOOTH_SEARCH):
# Bluetooth module doesn't compile properly under Windows
# and doesn't exist under OS X
# PyBluez API Documentation
# http://pybluez.googlecode.com/svn/www/docs-0.7/index.html
bluetooth_devices = []
if not enable_hcitool:
try:
if self.DEBUG:
print "INFO: Searching for Bluetooth devices using PyBluez module"
bluetooth_devices = bluetooth.discover_devices( \
duration=configuration.THINKGEAR_BLUETOOTH_DISCOVER_DEVICES_TIMEOUT, \
flush_cache=True, \
lookup_names=False)
for address in bluetooth_devices:
if self.DEBUG:
print "INFO: Device discovered",
print address
device_name = bluetooth.lookup_name(address, \
configuration.THINKGEAR_BLUETOOTH_LOOKUP_NAME_TIMEOUT)
if ((device_name == 'MindSet' or device_name == 'MindWave Mobile') and \
(address not in devices)):
devices.append(address)
# There is an issue under recent released of Linux
# in which already-connected Bluetooth ThinkGear devices
# are not appearing in a bluetooth device scan. However,
# using "hcitool" connected devices can be listed correctly.
# There does not appear to be an equivalent PyBluez feature.
# (http://pybluez.googlecode.com/svn/www/docs-0.7/index.html)
if devices == []:
if self.DEBUG:
print "INFO: No devices found through PyBluez module. Falling back to hcitool."
devices = self.hcitoolGetActiveConnections(devices)
except Exception, e:
if self.DEBUG:
print "ERROR: Exception calling Python Bluetooth module. (Is PyBluez installed?):"
print e
#if (sys.platform != 'darwin'):
enable_hcitool = True
if enable_hcitool:
devices = self.hcitoolScanForRemoteDevices(devices)
devices = self.hcitoolGetActiveConnections(devices)
if self.DEBUG > 2:
print "Bluetooth Devices found:",
print devices
devices = self.searchForSerialDevices(devices)
if self.DEBUG:
print "Devices found:",
print devices
return(devices)
|
agpl-3.0
| 6,322,157,607,286,303,000
| 24.883333
| 95
| 0.58717
| false
| 3.324592
| true
| false
| false
|
jamesjarlathlong/resourceful
|
two_agents_presleep.py
|
1
|
8165
|
import os
from agent import *
import asyncio
from qlearn import QLearn
from sarsa import Sarsa
import itertools
import functools
import json
import random
import sklearn
import collections
import websockets
import json
import copy
import time
import random
###Helper functions###
def merge(dicts):
super_dict = collections.defaultdict(list)
for d in dicts:
for k, v in d.items():
super_dict[k]+=v
return super_dict
def tuple_namer(name,tupl):
"""convert an unnamed state tuple
to a namedtuple object"""
tupl_templ = collections.namedtuple(name, 'battery status neighbour')
named = tupl_templ(battery = tupl[0], status = tupl[1], neighbour = tupl[2])
return named
def dictionary_saver(d, filename):
"""d is a dictionary whose keys are of the form (namedtuple, 'string')"""
json_friendly_d = {json.dumps(k):v for k,v in d.items()}
sklearn.externals.joblib.dump(json_friendly_d, filename)
def tracker_saver(d, filename):
"""d is a dictionary whose keys are of the form (namedtuple, 'string')"""
json_friendly_d = {json.dumps(k):json.dumps(v) for k,v in d.items()}
sklearn.externals.joblib.dump(json_friendly_d, filename)
#======actions========#
def go_to_sleep(old):
new = old._replace(status = 'sleeping')
return new
def prepare_sleep(old):
new = old._replace(status = 'pending')
return new
def wakeup(old):
new = old._replace(status = 'running')
return new
def noop(old):
print('noop')
return copy.deepcopy(old)
def create_action_states(states):
actions_states_sleeping = {i:[noop, wakeup] for i in states if i.status=='sleeping'}
actions_states_running = {i:[prepare_sleep, noop] for i in states if i.status == 'running'}
actions_states_pending = {i:[go_to_sleep] for i in states if i.status == 'pending'}
return merge([actions_states_sleeping, actions_states_running, actions_states_pending])
#####rewards###########
def state_rewards(state1, state2):
initial_reward = 0
if (state2.status == 'sleeping' and state2.neighbour=='sleeping'):
initial_reward -=50
if state2.status =='running' or state2.neighbour=='running':
initial_reward += 50
if state1.status !=state2.status:
initial_reward -= 2.5
if state2.battery == 0:
initial_reward = -50
return initial_reward
###message passing
def find_lead(qs,recruiter):
"""for recruiter, find potential helper"""
all_candidates = [k for k in qs if k!=recruiter]
return all_candidates[0]
def broadcast_change(old_state, new_state):
"""gets called when a sensor changes
from sleeping to awake, notifies the other
sensors of this change"""
def neighbor_changed(old_other, new_other,old_self):
new_self = old_self._replace(neighbour=new_other.status)
return new_self
update_from = type(new_state).__name__
update_to = find_lead(qs, update_from)
print('updating from: ', update_from, ' to: ', update_to)
neighbor_change_func = functools.partial(neighbor_changed,old_state, new_state)
qs[update_to].update((1,neighbor_change_func))
"""environments"""
#=====autonomous actions=======#
@asyncio.coroutine
def battery_action(q):
sunny = True
def adjust_battery(is_sunny, sensor):
if sensor.status =='sleeping':
new_battery = sensor.battery + (1 + is_sunny*1)#increase by 1 if not sunny, by 2 if sunny
sensor = sensor._replace(battery=new_battery)
else:
new_battery = sensor.battery - (2 - is_sunny*1)
sensor = sensor._replace(battery=new_battery)
if sensor.battery<=0:
sensor = sensor._replace(battery=0)
if sensor.battery>=20:
sensor = sensor._replace(battery=20)
return sensor
while True:
#if random.random()<0.1:
# sunny = not sunny
adjust_battery_sunny = functools.partial(adjust_battery, sunny)
yield from asyncio.sleep(0.15)
print('putting battery action on the q: ',q.qsize(), q._queue)
priority = random.uniform(2.01, 2.99) #we don't care about the order of adjust battery actions
#just want to make sure they don't collide
q.put_nowait((priority,adjust_battery_sunny))
#======reactions to agent actions==========#
def reaction_default(state1,state2, action):
if state1.status!=state2.status:
print('broadcasting change')
broadcast_change(state1, state2)
return state2
"""speak to outside world"""
def writer(self, state):
t = self.loop.time()
name_id_map = {'Sensor1':0, 'Sensor2':1}
idee = name_id_map[type(state).__name__]
update = {'_id':idee, 'battery':state.battery, 'status':state.status, 'neighbour': state.neighbour}
print('update: ', update)
writerq.append((t,update))
#print('put it on the writerq')
@asyncio.coroutine
def socketwriter(websocket, path):
while True:
msg = yield from writerq.get()
print('msg: ', msg)
yield from websocket.send(json.dumps(msg[1]))
"""special update function to ensure only latest event
with info about neighbour is kept on the queue"""
def update(self, new_item):
priority_level = new_item[0]
def matching_level(element, priority_level):
return element[0]==priority_level
try:
match_generator = (index for index,element in enumerate(self._queue)
if matching_level(element, priority_level))
matching_index = next(match_generator)
self._queue[matching_index] = new_item
except StopIteration:
self.put_nowait(new_item)
asyncio.PriorityQueue.update = update
if __name__ == '__main__':
loop = asyncio.get_event_loop()
"""States"""
battery = range(21)
status = ['sleeping','pending', 'running']
neighbour = ['sleeping','pending', 'running']
all_vars = [battery,status, neighbour]
state_combinations = list(itertools.product(*all_vars))
"""websocket comm"""
Agent.writer = writer
"""agent 1"""
states1 = [tuple_namer('Sensor1', i) for i in state_combinations]
initial_state1 = tuple_namer('Sensor1', (3,'running', 'running'))
actions_states1 = create_action_states(states1)
agent1 = Agent(actions_states1, state_rewards, initial_state1, wakeup, Sarsa, 1011, loop)
"""agent 2"""
states2 = [tuple_namer('Sensor2', i) for i in state_combinations]
initial_state2 = tuple_namer('Sensor2', (16,'running', 'sleeping'))
actions_states2 = create_action_states(states2)
agent2 = Agent(actions_states2, state_rewards, initial_state2, wakeup, Sarsa, 1022, loop)
"""message passing between agents"""
qs = {'Sensor1':agent1.sensing_q, 'Sensor2':agent2.sensing_q}
"""message passing to websocket"""
writerq = []#asyncio.PriorityQueue(maxsize = 2048)
start_server = websockets.serve(socketwriter, '127.0.0.1', 8080)
"""now define our environments"""
env_reactions = {'go_to_sleep':reaction_default,'prepare_sleep':reaction_default, 'wakeup':reaction_default,
'noop':reaction_default}
env1 = Environment(env_reactions,[copy.deepcopy(battery_action)], agent1.sensing_q, agent1.action_q)
env2 = Environment(env_reactions,[copy.deepcopy(battery_action)], agent2.sensing_q, agent2.action_q)
"""now run the simulation"""
tasks = [agent1.experience_environment(), env1.react_to_action(),
agent2.experience_environment(), env2.react_to_action()]#,start_server]
for i in env1.env_actions:
tasks.append(i(agent1.sensing_q))
for j in env2.env_actions:
tasks.append(j(agent2.sensing_q))
def loop_stopper():
print('loop stopper')
loop.stop()
print('saving')
dictionary_saver(agent1.learner.q, 'agent1_consolidate')
tracker_saver(agent1.learner.updatecount, 'agent1_hist')
dictionary_saver(agent2.learner.q, 'agent2_consolidate')
tracker_saver(agent2.learner.updatecount, 'agent2_hist')
sklearn.externals.joblib.dump(writerq, 'pend_writer')
print('saved')
loop.call_later(600, loop_stopper)
loop.run_until_complete(asyncio.wait(tasks))
|
mit
| -7,540,607,257,386,654,000
| 40.446701
| 112
| 0.658298
| false
| 3.489316
| false
| false
| false
|
Freso/listenbrainz-server
|
listenbrainz/domain/tests/test_spotify.py
|
1
|
9004
|
import time
import requests_mock
from flask import current_app
from listenbrainz.domain import spotify
from listenbrainz.webserver.testing import ServerTestCase
from unittest import mock
class SpotifyDomainTestCase(ServerTestCase):
def setUp(self):
super(SpotifyDomainTestCase, self).setUp()
self.spotify_user = spotify.Spotify(
user_id=1,
musicbrainz_id='spotify_user',
musicbrainz_row_id=312,
user_token='old-token',
token_expires=int(time.time()),
refresh_token='old-refresh-token',
last_updated=None,
record_listens=True,
error_message=None,
latest_listened_at=None,
permission='user-read-recently-played',
)
def test_none_values_for_last_updated_and_latest_listened_at(self):
self.assertIsNone(self.spotify_user.last_updated_iso)
self.assertIsNone(self.spotify_user.latest_listened_at_iso)
# apparently, requests_mocker does not follow the usual order in which decorators are applied. :-(
@requests_mock.Mocker()
@mock.patch('listenbrainz.domain.spotify.db_spotify.get_user')
@mock.patch('listenbrainz.domain.spotify.db_spotify.update_token')
def test_refresh_user_token(self, mock_requests, mock_update_token, mock_get_user):
expires_at = int(time.time()) + 3600
mock_requests.post(spotify.OAUTH_TOKEN_URL, status_code=200, json={
'access_token': 'tokentoken',
'refresh_token': 'refreshtokentoken',
'expires_in': 3600,
'scope': '',
})
spotify.refresh_user_token(self.spotify_user)
mock_update_token.assert_called_with(
self.spotify_user.user_id,
'tokentoken',
'refreshtokentoken',
expires_at,
)
mock_get_user.assert_called_with(self.spotify_user.user_id)
@requests_mock.Mocker()
@mock.patch('listenbrainz.domain.spotify.db_spotify.get_user')
@mock.patch('listenbrainz.domain.spotify.db_spotify.update_token')
def test_refresh_user_token_only_access(self, mock_requests, mock_update_token, mock_get_user):
mock_requests.post(spotify.OAUTH_TOKEN_URL, status_code=200, json={
'access_token': 'tokentoken',
'expires_in': 3600,
'scope': '',
})
spotify.refresh_user_token(self.spotify_user)
mock_update_token.assert_called_with(
self.spotify_user.user_id,
'tokentoken',
'old-refresh-token',
mock.ANY # expires_at cannot be accurately calculated hence using mock.ANY
# another option is using a range for expires_at and a Matcher but that seems far more work
)
mock_get_user.assert_called_with(self.spotify_user.user_id)
@requests_mock.Mocker()
def test_refresh_user_token_bad(self, mock_requests):
mock_requests.post(spotify.OAUTH_TOKEN_URL, status_code=400, json={
'error': 'invalid request',
'error_description': 'invalid refresh token',
})
with self.assertRaises(spotify.SpotifyAPIError):
spotify.refresh_user_token(self.spotify_user)
# apparently, requests_mocker does not follow the usual order in which decorators are applied. :-(
@requests_mock.Mocker()
def test_refresh_user_token_revoked(self, mock_requests):
mock_requests.post(spotify.OAUTH_TOKEN_URL, status_code=400, json={
'error': 'invalid_grant',
'error_description': 'Refresh token revoked',
})
with self.assertRaises(spotify.SpotifyInvalidGrantError):
spotify.refresh_user_token(self.spotify_user)
def test_get_spotify_oauth(self):
func_oauth = spotify.get_spotify_oauth()
self.assertEqual(func_oauth.client_id, current_app.config['SPOTIFY_CLIENT_ID'])
self.assertEqual(func_oauth.client_secret, current_app.config['SPOTIFY_CLIENT_SECRET'])
self.assertEqual(func_oauth.redirect_uri, 'http://localhost/profile/connect-spotify/callback')
self.assertIsNone(func_oauth.scope)
func_oauth = spotify.get_spotify_oauth(spotify.SPOTIFY_LISTEN_PERMISSIONS)
self.assertIn('streaming', func_oauth.scope)
self.assertIn('user-read-email', func_oauth.scope)
self.assertIn('user-read-private', func_oauth.scope)
self.assertIn('playlist-modify-public', func_oauth.scope)
self.assertIn('playlist-modify-private', func_oauth.scope)
self.assertNotIn('user-read-recently-played', func_oauth.scope)
self.assertNotIn('user-read-currently-playing', func_oauth.scope)
func_oauth = spotify.get_spotify_oauth(spotify.SPOTIFY_IMPORT_PERMISSIONS)
self.assertIn('user-read-currently-playing', func_oauth.scope)
self.assertIn('user-read-recently-played', func_oauth.scope)
self.assertNotIn('streaming', func_oauth.scope)
self.assertNotIn('user-read-email', func_oauth.scope)
self.assertNotIn('user-read-private', func_oauth.scope)
self.assertNotIn('playlist-modify-public', func_oauth.scope)
self.assertNotIn('playlist-modify-private', func_oauth.scope)
@mock.patch('listenbrainz.domain.spotify.db_spotify.get_user')
def test_get_user(self, mock_db_get_user):
t = int(time.time())
mock_db_get_user.return_value = {
'user_id': 1,
'musicbrainz_id': 'spotify_user',
'musicbrainz_row_id': 312,
'user_token': 'token-token-token',
'token_expires': t,
'refresh_token': 'refresh-refresh-refresh',
'last_updated': None,
'record_listens': True,
'error_message': 'oops',
'latest_listened_at': None,
'permission': 'user-read-recently-played',
}
user = spotify.get_user(1)
self.assertIsInstance(user, spotify.Spotify)
self.assertEqual(user.user_id, 1)
self.assertEqual(user.musicbrainz_id, 'spotify_user')
self.assertEqual(user.user_token, 'token-token-token')
self.assertEqual(user.token_expires, t)
self.assertEqual(user.last_updated, None)
self.assertEqual(user.record_listens, True)
self.assertEqual(user.error_message, 'oops')
@mock.patch('listenbrainz.domain.spotify.db_spotify.delete_spotify')
def test_remove_user(self, mock_delete):
spotify.remove_user(1)
mock_delete.assert_called_with(1)
@mock.patch('listenbrainz.domain.spotify.db_spotify.create_spotify')
@mock.patch('listenbrainz.domain.spotify.time.time')
def test_add_new_user(self, mock_time, mock_create):
mock_time.return_value = 0
spotify.add_new_user(1, {
'access_token': 'access-token',
'refresh_token': 'refresh-token',
'expires_in': 3600,
'scope': '',
})
mock_create.assert_called_with(1, 'access-token', 'refresh-token', 3600, False, '')
@mock.patch('listenbrainz.domain.spotify.db_spotify.get_active_users_to_process')
def test_get_active_users(self, mock_get_active_users):
t = int(time.time())
mock_get_active_users.return_value = [
{
'user_id': 1,
'musicbrainz_id': 'spotify_user',
'musicbrainz_row_id': 312,
'user_token': 'token-token-token',
'token_expires': t,
'refresh_token': 'refresh-refresh-refresh',
'last_updated': None,
'record_listens': True,
'error_message': 'oops',
'latest_listened_at': None,
'permission': 'user-read-recently-played',
},
{
'user_id': 2,
'musicbrainz_id': 'spotify_user_2',
'musicbrainz_row_id': 321,
'user_token': 'token-token-token321',
'token_expires': t + 31,
'refresh_token': 'refresh-refresh-refresh321',
'last_updated': None,
'record_listens': True,
'error_message': 'oops2',
'latest_listened_at': None,
'permission': 'user-read-recently-played',
},
]
lst = spotify.get_active_users_to_process()
mock_get_active_users.assert_called_once()
self.assertEqual(len(lst), 2)
self.assertIsInstance(lst[0], spotify.Spotify)
self.assertIsInstance(lst[1], spotify.Spotify)
self.assertEqual(lst[0].user_id, 1)
self.assertEqual(lst[1].user_id, 2)
@mock.patch('listenbrainz.domain.spotify.db_spotify.update_latest_listened_at')
def test_update_latest_listened_at(self, mock_update_listened_at):
t = int(time.time())
spotify.update_latest_listened_at(1, t)
mock_update_listened_at.assert_called_once_with(1, t)
|
gpl-2.0
| 3,376,187,134,347,358,700
| 42.708738
| 103
| 0.615504
| false
| 3.550473
| true
| false
| false
|
dopuskh3/confluence-publisher
|
conf_publisher/confluence.py
|
1
|
7678
|
import os
import copy
from operator import attrgetter
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
class Content(object):
type = None
def __init__(self):
self.id = None
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Attachement(Content):
type = 'attachment'
def __init__(self):
self.title = ''
self.media_type = ''
super(Attachement, self).__init__()
class ImageAttachement(Attachement):
pass
class DownloadAttachement(Attachement):
pass
class Page(Content):
type = 'page'
def __init__(self):
self.version_number = 0
self.space_key = None
self.ancestors = list()
self.body = None
self.title = None
self.unused_title = None
super(Page, self).__init__()
def __eq__(self, other):
first = copy.copy(self.__dict__)
second = copy.copy(other.__dict__)
del first['ancestors']
del second['ancestors']
if len(self.ancestors) != len(other.ancestors):
return False
for first_ancestor, second_ancestor in zip(self.ancestors, other.ancestors):
if not (first_ancestor == second_ancestor):
return False
del first['body']
del second['body']
if not PageBodyComparator.is_equal(self.body, other.body):
return False
return first == second
class Ancestor(Content):
type = 'page'
class ConfluenceManager(object):
def __init__(self, api):
self._api = api
class ConfluencePageManager(ConfluenceManager):
def load(self, content_id):
data = self._api.get_content(content_id, 'ancestors,version,space,body.storage')
p = Page()
p.id = data['id']
p.type = data['type']
p.version_number = data['version']['number']
p.space_key = data['space']['key']
p.title = data['title']
p.body = data['body']['storage']['value']
for ancestor_data in data['ancestors']:
ancestor = Ancestor()
ancestor.id = ancestor_data['id']
ancestor.type = ancestor_data['type']
p.ancestors.append(ancestor)
return p
def create(self, page):
ancestor = page.ancestors[-1]
data = self._page_payload(page.space_key, page.body, page.title,
ancestor_id=ancestor.id, ancestor_type=ancestor.type,)
ret = self._api.create_content(data)
page.id = ret['id']
return page.id
def update(self, page, bump_version=True):
if bump_version:
page.version_number += 1
ancestor = page.ancestors[-1]
data = self._page_payload(page.space_key, page.body, page.title,
ancestor_id=ancestor.id, ancestor_type=ancestor.type,
content_id=page.id, version=page.version_number)
ret = self._api.update_content(page.id, data)
page.id = ret['id']
return page.id
@staticmethod
def _page_payload(space_key, body=None, title=None,
ancestor_id=None, ancestor_type='page',
content_id=None, version=None, content_type='page'):
payload = {
'type': content_type,
'space': {
'key': space_key
},
}
if body:
payload['body'] = {
'storage': {
'value': body,
'representation': 'storage'
}
}
if ancestor_id:
payload['ancestors'] = [
{
'type': ancestor_type,
'id': ancestor_id,
}
]
if content_id:
payload['id'] = content_id
if title:
payload['title'] = title
if version:
payload['version'] = {
'number': version
}
return payload
class AttachmentPublisher(ConfluenceManager):
def publish(self, content_id, filepath):
attachments = self._get_page_metadata(content_id)
filename = os.path.basename(filepath)
if filename in map(attrgetter('title'), attachments):
# TODO: fixme. skipping if file already exists. its ugly hack
return
with open(filepath, 'rb') as f:
self._api.create_attachment(content_id, f)
@staticmethod
def _parse_attachments(data):
attachments = []
for attachment_data in data['children']['attachment']['results']:
media_type = attachment_data['metadata']['mediaType']
attachment_class = ImageAttachement if 'image' in media_type else DownloadAttachement
attachment = attachment_class()
attachment.id = attachment_data['id']
attachment.title = attachment_data['title']
attachment.media_type = media_type
attachments.append(attachment)
return attachments
def _get_page_metadata(self, content_id):
data = self._api.get_content(content_id, 'children.attachment')
page_attachments = self._parse_attachments(data)
return page_attachments
class PageBodyComparator(object):
@classmethod
def is_equal(cls, first, second):
if first == '' and second == '':
return True
if first == '' and second != '' or first != '' and second == '':
return False
# 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# 'xsi:schemaLocation="http://www.atlassian.com/schema/confluence/4/ac/ confluence.xsd" '
wrapper = u'<?xml version="1.0" encoding="UTF-8"?>' \
u'<!DOCTYPE ac:confluence SYSTEM "confluence.dtd">' \
u'<ac:confluence xmlns:ac="http://www.atlassian.com/schema/confluence/4/ac/" ' \
u'xmlns:ri="http://www.atlassian.com/schema/confluence/4/ri/">{}</ac:confluence>'
first_xml = etree.XML(wrapper.format(first).encode(encoding='utf-8'), parser=cls._parser())
second_xml = etree.XML(wrapper.format(second).encode(encoding='utf-8'), parser=cls._parser())
return cls._elements_equal(first_xml, second_xml)
@staticmethod
def _parser():
# use lxml HTMLParser if it exists
if hasattr(etree, 'HTMLParser'):
return etree.HTMLParser()
# or xml.etree.ElementTree.XMLParser
# fix unknown entity
# http://stackoverflow.com/questions/7237466/python-elementtree-support-for-parsing-unknown-xml-entities
parser = etree.XMLParser()
parser.entity['nbsp'] = 'nbsp'
return parser
@classmethod
def _elements_equal(cls, e1, e2):
if e1.tag != e2.tag:
return False
if e1.text != e2.text:
return False
if not cls._attributes_equals(e1, e2):
return False
if len(e1) != len(e2):
return False
return all(cls._elements_equal(c1, c2) for c1, c2 in zip(e1, e2))
@staticmethod
def _attributes_equals(e1, e2):
# confluence create additional attributes for structured macros
if 'structured-macro' == e1.tag:
return e1.attrib.get('name') == e2.attrib.get('name')
elif 'structured-macro' in e1.tag:
confluence_ac_attribute_name = '{http://www.atlassian.com/schema/confluence/4/ac/}name'
return e1.attrib.get(confluence_ac_attribute_name) == e2.attrib.get(confluence_ac_attribute_name)
return e1.attrib == e2.attrib
|
mit
| 1,458,645,083,216,607,500
| 29.347826
| 112
| 0.565642
| false
| 3.99272
| false
| false
| false
|
ampotty/uip-pc3
|
Ejemplos/ejemplo16.py
|
1
|
1153
|
def isPalindromicNumber(num: int) -> bool:
"""
Determina sin un numero es palindromico
:param num: Numbero entero a evaluar
:type num: int
:return: Verdadero si es numero palindromico; Falso si no es numero palindromico
:rtype: bool
"""
try:
if type(num) != int:
raise TypeError("(Tipo incorrecto) Tipo <int> esperado.")
source = [int(n) for n in str(num)]
clone = source[:]
clone.reverse()
return source == clone
except TypeError as error:
print(error.with_traceback())
if __name__ == '__main__':
"""
-- Determinar numero palindromico --
Leer un numero entero e imprimir si es numero palindromico.
Un numero es palindromico, si sus digitos se mantiene lo mismo si es invertido.
En otras palabras es simetrico [https://es.wikipedia.org/wiki/Capic%C3%BAa]
NOTA: Ejemplo utiliza interpretador de Python 3.x.x
"""
try:
number = int(input("Digite un numero: "))
except:
raise
truthiness = "es" if isPalindromicNumber(number) else "no"
print("%d %s numero Palindromo." % (number, truthiness))
|
mit
| 7,433,402,708,684,758,000
| 27.825
| 84
| 0.62706
| false
| 3.211699
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/storage/azure-storage-blob/samples/blob_samples_service_async.py
|
1
|
8293
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: blob_samples_service_async.py
DESCRIPTION:
This sample demos basic operations of the blob service client.
USAGE: python blob_samples_service_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
"""
import os
import asyncio
from azure.core.exceptions import ResourceNotFoundError, ResourceExistsError
class BlobServiceSamplesAsync(object):
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
async def get_storage_account_information_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
# [START get_blob_service_account_info]
account_info = await blob_service_client.get_account_information()
print('Using Storage SKU: {}'.format(account_info['sku_name']))
# [END get_blob_service_account_info]
async def blob_service_properties_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
# [START set_blob_service_properties]
# Create service properties
from azure.storage.blob import BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy
# Create logging settings
logging = BlobAnalyticsLogging(read=True, write=True, delete=True, retention_policy=RetentionPolicy(enabled=True, days=5))
# Create metrics for requests statistics
hour_metrics = Metrics(enabled=True, include_apis=True, retention_policy=RetentionPolicy(enabled=True, days=5))
minute_metrics = Metrics(enabled=True, include_apis=True,
retention_policy=RetentionPolicy(enabled=True, days=5))
# Create CORS rules
cors_rule = CorsRule(['www.xyz.com'], ['GET'])
cors = [cors_rule]
# Set the service properties
await blob_service_client.set_service_properties(logging, hour_metrics, minute_metrics, cors)
# [END set_blob_service_properties]
# [START get_blob_service_properties]
properties = await blob_service_client.get_service_properties()
# [END get_blob_service_properties]
async def blob_service_stats_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
# [START get_blob_service_stats]
stats = await blob_service_client.get_service_stats()
# [END get_blob_service_stats]
async def container_operations_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
try:
# [START bsc_create_container]
try:
new_container = await blob_service_client.create_container("containerfromblobserviceasync")
properties = await new_container.get_container_properties()
except ResourceExistsError:
print("Container already exists.")
# [END bsc_create_container]
# [START bsc_list_containers]
# List all containers
all_containers = []
async for container in blob_service_client.list_containers(include_metadata=True):
all_containers.append(container)
for container in all_containers:
print(container['name'], container['metadata'])
# Filter results with name prefix
test_containers = []
async for name in blob_service_client.list_containers(name_starts_with='test-'):
test_containers.append(name)
for container in test_containers:
print(container['name'], container['metadata'])
# [END bsc_list_containers]
finally:
# [START bsc_delete_container]
# Delete container if it exists
try:
await blob_service_client.delete_container("containerfromblobserviceasync")
except ResourceNotFoundError:
print("Container already deleted.")
# [END bsc_delete_container]
async def get_blob_and_container_clients_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
async with blob_service_client:
# [START bsc_get_container_client]
# Get a client to interact with a specific container - though it may not yet exist
container_client = blob_service_client.get_container_client("containertestasync")
try:
blobs_list = []
async for blob in container_client.list_blobs():
blobs_list.append(blob)
for blob in blobs_list:
print("Found blob: ", blob.name)
except ResourceNotFoundError:
print("Container not found.")
# [END bsc_get_container_client]
try:
# Create new Container in the service
await container_client.create_container()
# [START bsc_get_blob_client]
blob_client = blob_service_client.get_blob_client(container="containertestasync", blob="my_blob")
try:
stream = await blob_client.download_blob()
except ResourceNotFoundError:
print("No blob found.")
# [END bsc_get_blob_client]
finally:
# Delete the container
await blob_service_client.delete_container("containertestasync")
async def get_blob_service_client_from_container_client_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import ContainerClient
container_client1 = ContainerClient.from_connection_string(self.connection_string, "container")
await container_client1.create_container()
# [START get_blob_service_client_from_container_client]
blob_service_client = container_client1._get_blob_service_client()
print(await blob_service_client.get_service_properties())
container_client2 = blob_service_client.get_container_client("container")
print(await container_client2.get_container_properties())
await container_client2.delete_container()
await container_client1.close()
# [END get_blob_service_client_from_container_client]
async def main():
sample = BlobServiceSamplesAsync()
await sample.get_storage_account_information_async()
await sample.get_blob_and_container_clients_async()
await sample.container_operations_async()
await sample.blob_service_properties_async()
await sample.blob_service_stats_async()
await sample.get_blob_service_client_from_container_client_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
mit
| -7,345,336,612,967,402,000
| 42.878307
| 134
| 0.633064
| false
| 4.625209
| true
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-search-visualsearch/setup.py
|
1
|
2961
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-cognitiveservices-search-visualsearch"
PACKAGE_PPRINT_NAME = "Cognitive Services Visual Search"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.cognitiveservices',
'azure.cognitiveservices.search',
]),
install_requires=[
'msrest>=0.5.0',
'azure-common~=1.1',
],
extras_require={
":python_version<'3.0'": ['azure-cognitiveservices-search-nspkg'],
}
)
|
mit
| 5,800,291,530,648,143,000
| 32.647727
| 91
| 0.602499
| false
| 3.896053
| false
| false
| false
|
beeftornado/sentry
|
src/sentry/migrations/0023_hide_environment_none_20191126.py
|
2
|
1733
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def hide_environment_none(apps, schema_editor):
"""
Hide environments that are named none, since they're blacklisted and no longer can be created.
We should iterate over each environment row individually in python instead so that we don't lock the DB up. This is
far slower but much safer
"""
EnvironmentProject = apps.get_model("sentry", "EnvironmentProject")
for project in EnvironmentProject.objects.filter(environment__name='none'):
project.is_hidden = True
project.save()
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Adding indexes to large tables. These indexes should be created concurrently,
# unfortunately we can't run migrations outside of a transaction until Django
# 1.10. So until then these should be run manually.
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
dependencies = [
("sentry", "0022_merge"),
]
operations = [
migrations.RunPython(hide_environment_none, migrations.RunPython.noop)
]
|
bsd-3-clause
| -3,321,665,843,704,771,600
| 41.268293
| 119
| 0.709175
| false
| 4.420918
| false
| false
| false
|
denz/swarm
|
swarm/helpers.py
|
1
|
11212
|
# -*- coding: utf-8 -*-
import sys
import os
import pkgutil
from multiprocessing import RLock
from types import StringTypes
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found. This
returns the path of a package or the folder that contains a module.
Not to be confused with the package path returned by :func:`find_package`.
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
# Some other loaders might exhibit the same behavior.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
filepath = sys.modules[import_name].__file__
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
class _PackageBoundObject(object):
def __init__(self, import_name):
#: The name of the package or module. Do not change this once
#: it was set by the constructor.
self.import_name = import_name
#: Where is the app root located?
self.root_path = get_root_path(self.import_name)
# sentinel
_missing = object()
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Flask(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
.. versionchanged:: 0.6
the `writeable` attribute and parameter was deprecated. If a
cached property is writeable or not has to be documented now.
For performance reasons the implementation does not honor the
writeable setting and will always make the property writeable.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None, writeable=False):
if writeable:
from warnings import warn
warn(DeprecationWarning('the writeable argument to the '
'cached property is a noop since 0.6 '
'because the property is writeable '
'by default for performance reasons'))
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class locked_cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value. Works like the one in Werkzeug but has a lock for
thread safety.
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
For better debugging we recommend the new :func:`import_module`
function to be used instead.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
if isinstance(import_name, unicode):
import_name = str(import_name)
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
return __import__(import_name)
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
if isinstance(obj, unicode):
obj = obj.encode('utf-8')
try:
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
# support importing modules not yet set up by the parent module
# (or package for that matter)
modname = module + '.' + obj
__import__(modname)
return sys.modules[modname]
except ImportError, e:
if not silent:
raise ImportStringError(import_name, e), None, sys.exc_info()[2]
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# package_path ends with __init__.py for a package
if loader.is_package(root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
def caller_locals(decorators=0):
'''
Returns x frames back and returns namespaces for that frame
'''
f = sys._getframe(1 + decorators)
return f.f_locals
obj_converter = lambda obj_path: import_string(obj_path) \
if isinstance(obj_path, basestring)\
else obj_path
def obj_list_converter(string_or_stringlist):
if string_or_stringlist is None:
return []
if isinstance(string_or_stringlist, StringTypes):
return [obj_converter(string_or_stringlist),]
else:
lst = []
for obj_path in string_or_stringlist:
lst.append(obj_converter(obj_path))
return lst
|
bsd-3-clause
| 4,586,606,665,246,934,000
| 38.206294
| 78
| 0.604085
| false
| 4.287572
| false
| false
| false
|
beiko-lab/gengis
|
bin/Lib/site-packages/numpy/numarray/numerictypes.py
|
1
|
15882
|
"""numerictypes: Define the numeric type objects
This module is designed so 'from numerictypes import *' is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Numeric type objects:
Bool
Int8 Int16 Int32 Int64
UInt8 UInt16 UInt32 UInt64
Float32 Double64
Complex32 Complex64
Numeric type classes:
NumericType
BooleanType
SignedType
UnsignedType
IntegralType
SignedIntegralType
UnsignedIntegralType
FloatingType
ComplexType
$Id: numerictypes.py,v 1.55 2005/12/01 16:22:03 jaytmiller Exp $
"""
__all__ = ['NumericType','HasUInt64','typeDict','IsType',
'BooleanType', 'SignedType', 'UnsignedType', 'IntegralType',
'SignedIntegralType', 'UnsignedIntegralType', 'FloatingType',
'ComplexType', 'AnyType', 'ObjectType', 'Any', 'Object',
'Bool', 'Int8', 'Int16', 'Int32', 'Int64', 'Float32',
'Float64', 'UInt8', 'UInt16', 'UInt32', 'UInt64',
'Complex32', 'Complex64', 'Byte', 'Short', 'Int','Long',
'Float', 'Complex', 'genericTypeRank', 'pythonTypeRank',
'pythonTypeMap', 'scalarTypeMap', 'genericCoercions',
'typecodes', 'genericPromotionExclusions','MaximumType',
'getType','scalarTypes', 'typefrom']
MAX_ALIGN = 8
MAX_INT_SIZE = 8
import numpy
LP64 = numpy.intp(0).itemsize == 8
HasUInt64 = 1
try:
numpy.int64(0)
except:
HasUInt64 = 0
#from typeconv import typeConverters as _typeConverters
#import numinclude
#from _numerictype import _numerictype, typeDict
# Enumeration of numarray type codes
typeDict = {}
_tAny = 0
_tBool = 1
_tInt8 = 2
_tUInt8 = 3
_tInt16 = 4
_tUInt16 = 5
_tInt32 = 6
_tUInt32 = 7
_tInt64 = 8
_tUInt64 = 9
_tFloat32 = 10
_tFloat64 = 11
_tComplex32 = 12
_tComplex64 = 13
_tObject = 14
def IsType(rep):
"""Determines whether the given object or string, 'rep', represents
a numarray type."""
return isinstance(rep, NumericType) or rep in typeDict
def _register(name, type, force=0):
"""Register the type object. Raise an exception if it is already registered
unless force is true.
"""
if name in typeDict and not force:
raise ValueError("Type %s has already been registered" % name)
typeDict[name] = type
return type
class NumericType(object):
"""Numeric type class
Used both as a type identification and the repository of
characteristics and conversion functions.
"""
def __new__(type, name, bytes, default, typeno):
"""__new__() implements a 'quasi-singleton pattern because attempts
to create duplicate types return the first created instance of that
particular type parameterization, i.e. the second time you try to
create "Int32", you get the original Int32, not a new one.
"""
if name in typeDict:
self = typeDict[name]
if self.bytes != bytes or self.default != default or \
self.typeno != typeno:
raise ValueError("Redeclaration of existing NumericType "\
"with different parameters.")
return self
else:
self = object.__new__(type)
self.name = "no name"
self.bytes = None
self.default = None
self.typeno = -1
return self
def __init__(self, name, bytes, default, typeno):
if not isinstance(name, str):
raise TypeError("name must be a string")
self.name = name
self.bytes = bytes
self.default = default
self.typeno = typeno
self._conv = None
_register(self.name, self)
def __getnewargs__(self):
"""support the pickling protocol."""
return (self.name, self.bytes, self.default, self.typeno)
def __getstate__(self):
"""support pickling protocol... no __setstate__ required."""
False
class BooleanType(NumericType):
pass
class SignedType(object):
"""Marker class used for signed type check"""
pass
class UnsignedType(object):
"""Marker class used for unsigned type check"""
pass
class IntegralType(NumericType):
pass
class SignedIntegralType(IntegralType, SignedType):
pass
class UnsignedIntegralType(IntegralType, UnsignedType):
pass
class FloatingType(NumericType):
pass
class ComplexType(NumericType):
pass
class AnyType(NumericType):
pass
class ObjectType(NumericType):
pass
# C-API Type Any
Any = AnyType("Any", None, None, _tAny)
Object = ObjectType("Object", None, None, _tObject)
# Numeric Types:
Bool = BooleanType("Bool", 1, 0, _tBool)
Int8 = SignedIntegralType( "Int8", 1, 0, _tInt8)
Int16 = SignedIntegralType("Int16", 2, 0, _tInt16)
Int32 = SignedIntegralType("Int32", 4, 0, _tInt32)
Int64 = SignedIntegralType("Int64", 8, 0, _tInt64)
Float32 = FloatingType("Float32", 4, 0.0, _tFloat32)
Float64 = FloatingType("Float64", 8, 0.0, _tFloat64)
UInt8 = UnsignedIntegralType( "UInt8", 1, 0, _tUInt8)
UInt16 = UnsignedIntegralType("UInt16", 2, 0, _tUInt16)
UInt32 = UnsignedIntegralType("UInt32", 4, 0, _tUInt32)
UInt64 = UnsignedIntegralType("UInt64", 8, 0, _tUInt64)
Complex32 = ComplexType("Complex32", 8, complex(0.0), _tComplex32)
Complex64 = ComplexType("Complex64", 16, complex(0.0), _tComplex64)
Object.dtype = 'O'
Bool.dtype = '?'
Int8.dtype = 'i1'
Int16.dtype = 'i2'
Int32.dtype = 'i4'
Int64.dtype = 'i8'
UInt8.dtype = 'u1'
UInt16.dtype = 'u2'
UInt32.dtype = 'u4'
UInt64.dtype = 'u8'
Float32.dtype = 'f4'
Float64.dtype = 'f8'
Complex32.dtype = 'c8'
Complex64.dtype = 'c16'
# Aliases
Byte = _register("Byte", Int8)
Short = _register("Short", Int16)
Int = _register("Int", Int32)
if LP64:
Long = _register("Long", Int64)
if HasUInt64:
_register("ULong", UInt64)
MaybeLong = _register("MaybeLong", Int64)
__all__.append('MaybeLong')
else:
Long = _register("Long", Int32)
_register("ULong", UInt32)
MaybeLong = _register("MaybeLong", Int32)
__all__.append('MaybeLong')
_register("UByte", UInt8)
_register("UShort", UInt16)
_register("UInt", UInt32)
Float = _register("Float", Float64)
Complex = _register("Complex", Complex64)
# short forms
_register("b1", Bool)
_register("u1", UInt8)
_register("u2", UInt16)
_register("u4", UInt32)
_register("i1", Int8)
_register("i2", Int16)
_register("i4", Int32)
_register("i8", Int64)
if HasUInt64:
_register("u8", UInt64)
_register("f4", Float32)
_register("f8", Float64)
_register("c8", Complex32)
_register("c16", Complex64)
# NumPy forms
_register("1", Int8)
_register("B", Bool)
_register("c", Int8)
_register("b", UInt8)
_register("s", Int16)
_register("w", UInt16)
_register("i", Int32)
_register("N", Int64)
_register("u", UInt32)
_register("U", UInt64)
if LP64:
_register("l", Int64)
else:
_register("l", Int32)
_register("d", Float64)
_register("f", Float32)
_register("D", Complex64)
_register("F", Complex32)
# scipy.base forms
def _scipy_alias(scipy_type, numarray_type):
_register(scipy_type, eval(numarray_type))
globals()[scipy_type] = globals()[numarray_type]
_scipy_alias("bool_", "Bool")
_scipy_alias("bool8", "Bool")
_scipy_alias("int8", "Int8")
_scipy_alias("uint8", "UInt8")
_scipy_alias("int16", "Int16")
_scipy_alias("uint16", "UInt16")
_scipy_alias("int32", "Int32")
_scipy_alias("uint32", "UInt32")
_scipy_alias("int64", "Int64")
_scipy_alias("uint64", "UInt64")
_scipy_alias("float64", "Float64")
_scipy_alias("float32", "Float32")
_scipy_alias("complex128", "Complex64")
_scipy_alias("complex64", "Complex32")
# The rest is used by numeric modules to determine conversions
# Ranking of types from lowest to highest (sorta)
if not HasUInt64:
genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16',
'Int32', 'UInt32', 'Int64',
'Float32','Float64', 'Complex32', 'Complex64', 'Object']
else:
genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16',
'Int32', 'UInt32', 'Int64', 'UInt64',
'Float32','Float64', 'Complex32', 'Complex64', 'Object']
pythonTypeRank = [ bool, int, long, float, complex ]
# The next line is not platform independent XXX Needs to be generalized
if not LP64:
pythonTypeMap = {
int:("Int32","int"),
long:("Int64","int"),
float:("Float64","float"),
complex:("Complex64","complex")}
scalarTypeMap = {
int:"Int32",
long:"Int64",
float:"Float64",
complex:"Complex64"}
else:
pythonTypeMap = {
int:("Int64","int"),
long:("Int64","int"),
float:("Float64","float"),
complex:("Complex64","complex")}
scalarTypeMap = {
int:"Int64",
long:"Int64",
float:"Float64",
complex:"Complex64"}
pythonTypeMap.update({bool:("Bool","bool") })
scalarTypeMap.update({bool:"Bool"})
# Generate coercion matrix
def _initGenericCoercions():
global genericCoercions
genericCoercions = {}
# vector with ...
for ntype1 in genericTypeRank:
nt1 = typeDict[ntype1]
rank1 = genericTypeRank.index(ntype1)
ntypesize1, inttype1, signedtype1 = nt1.bytes, \
isinstance(nt1, IntegralType), isinstance(nt1, SignedIntegralType)
for ntype2 in genericTypeRank:
# vector
nt2 = typeDict[ntype2]
ntypesize2, inttype2, signedtype2 = nt2.bytes, \
isinstance(nt2, IntegralType), isinstance(nt2, SignedIntegralType)
rank2 = genericTypeRank.index(ntype2)
if (signedtype1 != signedtype2) and inttype1 and inttype2:
# mixing of signed and unsigned ints is a special case
# If unsigned same size or larger, final size needs to be bigger
# if possible
if signedtype1:
if ntypesize2 >= ntypesize1:
size = min(2*ntypesize2, MAX_INT_SIZE)
else:
size = ntypesize1
else:
if ntypesize1 >= ntypesize2:
size = min(2*ntypesize1, MAX_INT_SIZE)
else:
size = ntypesize2
outtype = "Int"+str(8*size)
else:
if rank1 >= rank2:
outtype = ntype1
else:
outtype = ntype2
genericCoercions[(ntype1, ntype2)] = outtype
for ntype2 in pythonTypeRank:
# scalar
mapto, kind = pythonTypeMap[ntype2]
if ((inttype1 and kind=="int") or (not inttype1 and kind=="float")):
# both are of the same "kind" thus vector type dominates
outtype = ntype1
else:
rank2 = genericTypeRank.index(mapto)
if rank1 >= rank2:
outtype = ntype1
else:
outtype = mapto
genericCoercions[(ntype1, ntype2)] = outtype
genericCoercions[(ntype2, ntype1)] = outtype
# scalar-scalar
for ntype1 in pythonTypeRank:
maptype1 = scalarTypeMap[ntype1]
genericCoercions[(ntype1,)] = maptype1
for ntype2 in pythonTypeRank:
maptype2 = scalarTypeMap[ntype2]
genericCoercions[(ntype1, ntype2)] = genericCoercions[(maptype1, maptype2)]
# Special cases more easily dealt with outside of the loop
genericCoercions[("Complex32", "Float64")] = "Complex64"
genericCoercions[("Float64", "Complex32")] = "Complex64"
genericCoercions[("Complex32", "Int64")] = "Complex64"
genericCoercions[("Int64", "Complex32")] = "Complex64"
genericCoercions[("Complex32", "UInt64")] = "Complex64"
genericCoercions[("UInt64", "Complex32")] = "Complex64"
genericCoercions[("Int64","Float32")] = "Float64"
genericCoercions[("Float32", "Int64")] = "Float64"
genericCoercions[("UInt64","Float32")] = "Float64"
genericCoercions[("Float32", "UInt64")] = "Float64"
genericCoercions[(float, "Bool")] = "Float64"
genericCoercions[("Bool", float)] = "Float64"
genericCoercions[(float,float,float)] = "Float64" # for scipy.special
genericCoercions[(int,int,float)] = "Float64" # for scipy.special
_initGenericCoercions()
# If complex is subclassed, the following may not be necessary
genericPromotionExclusions = {
'Bool': (),
'Int8': (),
'Int16': (),
'Int32': ('Float32','Complex32'),
'UInt8': (),
'UInt16': (),
'UInt32': ('Float32','Complex32'),
'Int64' : ('Float32','Complex32'),
'UInt64' : ('Float32','Complex32'),
'Float32': (),
'Float64': ('Complex32',),
'Complex32':(),
'Complex64':()
} # e.g., don't allow promotion from Float64 to Complex32 or Int64 to Float32
# Numeric typecodes
typecodes = {'Integer': '1silN',
'UnsignedInteger': 'bBwuU',
'Float': 'fd',
'Character': 'c',
'Complex': 'FD' }
if HasUInt64:
_MaximumType = {
Bool : UInt64,
Int8 : Int64,
Int16 : Int64,
Int32 : Int64,
Int64 : Int64,
UInt8 : UInt64,
UInt16 : UInt64,
UInt32 : UInt64,
UInt8 : UInt64,
Float32 : Float64,
Float64 : Float64,
Complex32 : Complex64,
Complex64 : Complex64
}
else:
_MaximumType = {
Bool : Int64,
Int8 : Int64,
Int16 : Int64,
Int32 : Int64,
Int64 : Int64,
UInt8 : Int64,
UInt16 : Int64,
UInt32 : Int64,
UInt8 : Int64,
Float32 : Float64,
Float64 : Float64,
Complex32 : Complex64,
Complex64 : Complex64
}
def MaximumType(t):
"""returns the type of highest precision of the same general kind as 't'"""
return _MaximumType[t]
def getType(type):
"""Return the numeric type object for type
type may be the name of a type object or the actual object
"""
if isinstance(type, NumericType):
return type
try:
return typeDict[type]
except KeyError:
raise TypeError("Not a numeric type")
scalarTypes = (bool,int,long,float,complex)
_scipy_dtypechar = {
Int8 : 'b',
UInt8 : 'B',
Int16 : 'h',
UInt16 : 'H',
Int32 : 'i',
UInt32 : 'I',
Int64 : 'q',
UInt64 : 'Q',
Float32 : 'f',
Float64 : 'd',
Complex32 : 'F', # Note the switchup here:
Complex64 : 'D' # numarray.Complex32 == scipy.complex64, etc.
}
_scipy_dtypechar_inverse = {}
for key,value in _scipy_dtypechar.items():
_scipy_dtypechar_inverse[value] = key
_val = numpy.int_(0).itemsize
if _val == 8:
_scipy_dtypechar_inverse['l'] = Int64
_scipy_dtypechar_inverse['L'] = UInt64
elif _val == 4:
_scipy_dtypechar_inverse['l'] = Int32
_scipy_dtypechar_inverse['L'] = UInt32
del _val
if LP64:
_scipy_dtypechar_inverse['p'] = Int64
_scipy_dtypechar_inverse['P'] = UInt64
else:
_scipy_dtypechar_inverse['p'] = Int32
_scipy_dtypechar_inverse['P'] = UInt32
def typefrom(obj):
return _scipy_dtypechar_inverse[obj.dtype.char]
|
gpl-3.0
| -3,608,736,657,460,471,300
| 26.981752
| 87
| 0.581287
| false
| 3.405232
| false
| false
| false
|
ryota-sugimoto/hackerrank
|
vmware/logical_hub.py
|
1
|
1266
|
#!/usr/bin/env python
def wire_port2port(d):
for host in d.keys():
hubs = d[host]
for i in range(len(hubs)):
for j in range(i+1,len(hubs)):
if hubs[i] == hubs[j]:
print "PORT_TO_PORT %s %i %i" % (host,i,j)
print "PORT_TO_PORT %s %i %i" % (host,j,i)
def make_hub_map(d):
hub_map = {}
for host in d.keys():
hubs = d[host]
for hub in hubs:
if hub_map.has_key(hub):
hub_map[hub].add(host)
else:
hub_map[hub] = set([host])
return hub_map
def wire_port2tunnel(d,hub_map):
for host in d.keys():
hubs = d[host]
for i,hub in enumerate(hubs):
for dst_host in hub_map[hub]:
if dst_host != host:
print "PORT_TO_TUNNEL %s %i %s %s" % (host,i,dst_host,hub)
def wire_tunnel2port(d,hub_map):
if len(d.keys()) > 1:
for host in d.keys():
hubs = d[host]
for i,hub in enumerate(hubs):
if len(hub_map[hub]) > 1:
print "TUNNEL_TO_PORT %s %s %i" % (host,hub,i)
import sys
hosts = {}
for s in sys.stdin:
l = s.strip().split()
host = l[0]
hosts[host] = []
for hub in l[1:]:
hosts[host].append(hub)
hub_map = make_hub_map(hosts)
wire_port2port(hosts)
wire_port2tunnel(hosts,hub_map)
wire_tunnel2port(hosts,hub_map)
|
gpl-2.0
| -9,089,723,739,468,877,000
| 23.346154
| 68
| 0.561611
| false
| 2.567951
| false
| false
| false
|
wbsavage/shinken
|
shinken/modules/glances_ui/plugins/cv_memory/cv_memory.py
|
1
|
2398
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from shinken.webui.bottle import redirect, abort
from pprint import pprint
import xmlrpclib
import socket
import json
### Will be populated by the UI with it's own value
app = None
def fancy_units(num):
for x in ['','KB','MB','GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def get_processes(h):
addr = h.address
gs = xmlrpclib.ServerProxy('http://%s:%d' % (addr, 61209))
# 10s max to aswer
gs.sock.timeout = 10
ps = json.loads(gs.getProcessList())
return ps
def get_page(hname):
print "MEMORY??"
# First we look for the user sid
# so we bail out if it's a false one
user = app.get_user_auth()
if not user:
redirect("/user/login")
# Ok, we can lookup it
h = app.datamgr.get_host(hname)
error = ''
ps = []
try:
ps = get_processes(h)
except (xmlrpclib.Error, socket.error), exp:
error = str(exp)
return {'app': app, 'elt': h, 'ps':ps, 'fancy_units':fancy_units, 'error' : error}
return {'app': app, 'elt': h, 'ps':ps, 'fancy_units':fancy_units, 'error' : error}
def get_page_proc(hname):
return get_page(hname)
# Void plugin
pages = {get_page: {'routes': ['/cv/memory/:hname'], 'view': 'cv_memory', 'static': True},
get_page_proc: {'routes': ['/cv/processes/:hname'], 'view': 'cv_processes', 'static': True}
}
|
agpl-3.0
| -5,770,006,532,879,468,000
| 25.94382
| 100
| 0.634696
| false
| 3.223118
| false
| false
| false
|
trustpilot/python-lambdarest
|
lambdarest/__init__.py
|
1
|
13223
|
# -*- coding: utf-8 -*-
import json
import logging
from string import Template
from jsonschema import validate, ValidationError, FormatChecker
from werkzeug.routing import Map, Rule, NotFound
from werkzeug.http import HTTP_STATUS_CODES
from functools import wraps
__validate_kwargs = {"format_checker": FormatChecker()}
__required_keys = ["httpMethod"]
__either_keys = ["path", "resource"]
class Response(object):
"""Class to conceptualize a response with default attributes
if no body is specified, empty string is returned
if no status_code is specified, 200 is returned
if no headers are specified, empty dict is returned
"""
def __init__(
self, body=None, status_code=None, headers=None, multiValueHeaders=None
):
self.body = body
self.status_code = status_code
self.headers = headers
self.multiValueHeaders = multiValueHeaders
self.status_code_description = None
self.isBase64_encoded = False
def to_json(self, encoder=json.JSONEncoder, application_load_balancer=False):
"""Generates and returns an object with the expected field names.
Note: method name is slightly misleading, should be populate_response or with_defaults etc
"""
status_code = self.status_code or 200
# if it's already a str, we don't need json.dumps
do_json_dumps = self.body is not None and not isinstance(self.body, str)
response = {
"body": json.dumps(self.body, cls=encoder, sort_keys=True)
if do_json_dumps
else self.body,
"statusCode": status_code,
}
## handle multiValueHeaders if defined, default to headers
if self.multiValueHeaders == None:
response["headers"] = self.headers or {}
else:
response["multiValueHeaders"] = self.multiValueHeaders
# if body is None, remove the key
if response.get("body") == None:
response.pop("body")
if application_load_balancer:
response.update(
{
# note must be HTTP [description] as per:
# https://docs.aws.amazon.com/lambda/latest/dg/services-alb.html
# the value of 200 OK fails:
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html#respond-to-load-balancer
"statusDescription": self.status_code_description
or "HTTP " + HTTP_STATUS_CODES[status_code],
"isBase64Encoded": self.isBase64_encoded,
}
)
return response
class ScopeMissing(Exception):
pass
def __float_cast(value):
try:
return float(value)
except Exception:
pass
return value
def __marshall_query_params(value):
try:
value = json.loads(value)
except Exception:
value_cand = value.split(",")
if len(value_cand) > 1:
value = list(map(__float_cast, value_cand))
return value
def __json_load_query(query):
query = query or {}
return {key: __marshall_query_params(value) for key, value in query.items()}
def default_error_handler(error, method):
logging_message = "[%s][{status_code}]: {message}" % method
logging.exception(logging_message.format(status_code=500, message=str(error)))
def check_update_and_fill_resource_placeholders(resource, path_parameters):
"""
Prepare resource parameters before routing.
In case when resource defined as /path/to/{placeholder}/resource,
the router can't find a correct handler.
This method inserts path parameters
instead of placeholders and returns the result.
:param resource: Resource path definition
:param path_parameters: Path parameters dict
:return: resource definition with inserted path parameters
"""
base_resource = resource
# prepare resource.
# evaluate from /foo/{key1}/bar/{key2}/{proxy+}
# to /foo/${key1}/bar/${key2}/{proxy+}
if path_parameters is not None:
for path_key in path_parameters:
resource = resource.replace("{%s}" % path_key, "${%s}" % path_key)
else:
return base_resource
# insert path_parameteres by template
# /foo/${key1}/bar/${key2}/{proxy+} -> /foo/value1/bar/value2/{proxy+}
template = Template(resource)
try:
resource = template.substitute(**(path_parameters))
return resource
except KeyError:
return base_resource
def create_lambda_handler(
error_handler=default_error_handler,
json_encoder=json.JSONEncoder,
application_load_balancer=False,
):
"""Create a lambda handler function with `handle` decorator as attribute
example:
lambda_handler = create_lambda_handler()
lambda_handler.handle("get")
def my_get_func(event):
pass
Inner_lambda_handler:
is the one you will receive when calling this function. It acts like a
dispatcher calling the registered http handler functions on the basis of the
incoming httpMethod.
All responses are formatted using the lambdarest.Response class.
Inner_handler:
Is the decorator function used to register funtions as handlers of
different http methods.
The inner_handler is also able to validate incoming data using a specified
JSON schema, please see http://json-schema.org for info.
"""
url_maps = Map()
def inner_lambda_handler(event, context=None):
# check if running as "aws lambda proxy"
if (
not isinstance(event, dict)
or not all(key in event for key in __required_keys)
or not any(key in event for key in __either_keys)
):
message = "Bad request, maybe not using Lambda Proxy?"
logging.error(message)
return Response(message, 500).to_json(
application_load_balancer=application_load_balancer
)
# Save context within event for easy access
event["context"] = context
# for application load balancers, no api definition is used hence no resource is set so just use path
if "resource" not in event:
resource = event["path"]
else:
resource = event["resource"]
# Fill placeholders in resource path
if "pathParameters" in event:
resource = check_update_and_fill_resource_placeholders(
resource, event["pathParameters"]
)
path = resource
# Check if a path is set, if so, check if the base path is the same as
# the resource. If not, this is an api with a custom domainname.
# if so, the path will contain the actual request, but it will be
# prefixed with the basepath, which needs to be removed. Api Gateway
# only supports single level basepaths
# eg:
# path: /v2/foo/foobar
# resource: /foo/{name}
# the /v2 needs to be removed
if "path" in event and event["path"].split("/")[1] != resource.split("/")[1]:
path = "/%s" % "/".join(event["path"].split("/")[2:])
# proxy is a bit weird. We just replace the value in the uri with the
# actual value provided by apigw, and use that
if "{proxy+}" in resource:
path = resource.replace("{proxy+}", event["pathParameters"]["proxy"])
method_name = event["httpMethod"].lower()
func = None
kwargs = {}
error_tuple = ("Internal server error", 500)
logging_message = "[%s][{status_code}]: {message}" % method_name
try:
# bind the mapping to an empty server name
mapping = url_maps.bind("")
rule, kwargs = mapping.match(path, method=method_name, return_rule=True)
func = rule.endpoint
# if this is a catch-all rule, don't send any kwargs
if rule.rule == "/<path:path>":
kwargs = {}
except NotFound as e:
logging.warning(logging_message.format(status_code=404, message=str(e)))
error_tuple = (str(e), 404)
if func:
try:
response = func(event, **kwargs)
if not isinstance(response, Response):
# Set defaults
status_code = headers = multiValueHeaders = None
if isinstance(response, tuple):
response_len = len(response)
if response_len > 3:
raise ValueError("Response tuple has more than 3 items")
# Unpack the tuple, missing items will be defaulted
body, status_code, headers, multiValueHeaders = response + (
None,
) * (4 - response_len)
elif isinstance(response, dict) and all(
key in ["body", "statusCode", "headers", "multiValueHeaders"]
for key in response.keys()
):
body = response.get("body")
status_code = response.get("statusCode") or status_code
headers = response.get("headers") or headers
multiValueHeaders = (
response.get("multiValueHeaders") or multiValueHeaders
)
else: # if response is string, int, etc.
body = response
response = Response(body, status_code, headers, multiValueHeaders)
return response.to_json(
encoder=json_encoder,
application_load_balancer=application_load_balancer,
)
except ValidationError as error:
error_description = "Schema[{}] with value {}".format(
"][".join(str(error.absolute_schema_path)), error.message
)
logging.warning(
logging_message.format(status_code=400, message=error_description)
)
error_tuple = ("Validation Error", 400)
except ScopeMissing as error:
error_description = "Permission denied"
logging.warning(
logging_message.format(status_code=403, message=error_description)
)
error_tuple = (error_description, 403)
except Exception as error:
if error_handler:
error_handler(error, method_name)
else:
raise
body, status_code = error_tuple
return Response(body, status_code).to_json(
application_load_balancer=application_load_balancer
)
def inner_handler(method_name, path="/", schema=None, load_json=True, scopes=None):
if schema and not load_json:
raise ValueError("if schema is supplied, load_json needs to be true")
def wrapper(func):
@wraps(func)
def inner(event, *args, **kwargs):
if load_json:
json_data = {
"body": json.loads(event.get("body") or "{}"),
"query": __json_load_query(event.get("queryStringParameters")),
}
event["json"] = json_data
if schema:
# jsonschema.validate using given schema
validate(json_data, schema, **__validate_kwargs)
try:
provided_scopes = json.loads(
event["requestContext"]["authorizer"]["scopes"]
)
except KeyError:
provided_scopes = []
except json.decoder.JSONDecodeError:
# Ignore passed scopes if it isn't properly json encoded
provided_scopes = []
for scope in scopes or []:
if scope not in provided_scopes:
raise ScopeMissing("Scope: '{}' is missing".format(scope))
return func(event, *args, **kwargs)
# if this is a catch all url, make sure that it's setup correctly
if path == "*":
target_path = "/*"
else:
target_path = path
# replace the * with the werkzeug catch all path
if "*" in target_path:
target_path = target_path.replace("*", "<path:path>")
# make sure the path starts with /
if not target_path.startswith("/"):
raise ValueError("Please configure path with starting slash")
# register http handler function
rule = Rule(target_path, endpoint=inner, methods=[method_name.lower()])
url_maps.add(rule)
return inner
return wrapper
lambda_handler = inner_lambda_handler
lambda_handler.handle = inner_handler
return lambda_handler
# singleton
lambda_handler = create_lambda_handler()
|
mit
| -7,489,941,035,686,895,000
| 36.247887
| 138
| 0.571807
| false
| 4.629902
| false
| false
| false
|
zayamatias/retrotool
|
retroclasses.py
|
1
|
6684
|
class sprite:
# Sprite class, to make it easier to manipulate afterwards
spriteCount = 0
def __init__ (self,pattern,colors,ored,x,y):
self.pattern=pattern #binary pattern of the sprite
self.colors=colors #colors of the sprite
self.ored = ored #does this sprite come from an ored sprite (for palette purposes)
self.number = sprite.spriteCount #Sprite index
self.x = x #X location of the sprite according to the original image
self.y = y #y location of the sprite according to the original image
sprite.spriteCount = sprite.spriteCount+1 #add one to the index for next sprite
def displayPattern (self):
#for testing purposes, show the pattern on console
rows = self.pattern
for row in rows:
print (row)
def displayColors (self):
#for testing purposes, show the color on console
rows = self.colors
for row in rows:
print (row)
def getPattern (self):
#retruns the pattern of a sprite
line = ""
rows = self.pattern
for row in rows:
line = line + str(row) + "\n"
return line
def getColors (self,ysize):
#returns the colors of a sprite
line = ""
count = 1
rows = self.colors
for row in rows:
line = line + str(row)
if count < ysize :
count = count + 1
line = line + ","
return line
def getAsmPattern (self,width):
#get the pattern of a sprite in ASM mode (db %xxxxxxxxxxxxxxxx)
#attention: for 16bit sprites, msx splits into 2 8x16 patterns
line = ""
rows = self.pattern
pat1 =""
pat2 =""
for row in rows:
pat1=pat1+"\tdb %"+str(row)[:8]+"\n"
pat2=pat2+"\tdb %"+str(row)[8:]+"\n"
line = pat1
if width > 8:
line = line + pat2
return line
def getBasicPattern (self,width):
#get the pattern of a sprite in ASM mode (db %xxxxxxxxxxxxxxxx)
#attention: for 16bit sprites, msx splits into 2 8x16 patterns
linel = []
liner = []
rows = self.pattern
for row in rows:
linel.append(" DATA "+str(row)[:8]+"\n")
liner.append(" DATA "+str(row)[8:]+"\n")
return linel+liner
def getAsmColors (self,ysize):
#get the colors of a sprite in ASM mode (db 1,2,3....) each byte represents the # of the color in the palette
#for ored colors, bit #7 should be set, thus the +64
line = "\tdb "
rows = self.colors
count = 1
for row in rows:
if self.ored :
if (row!=0):
row = row + 64
line = line + str(row)
if count < ysize :
count = count + 1
line = line + ","
line = line + "\n"
return line
def getBASICColors (self,ysize):
#get the colors of a sprite in ASM mode (db 1,2,3....) each byte represents the # of the color in the palette
#for ored colors, bit #7 should be set, thus the +64
line = ""
rows = self.colors
count = 1
for row in rows:
if self.ored :
if (row!=0):
row = row + 64
line = line + str(row)
if count < ysize :
count = count + 1
line = line + ","
line = line + "\n"
return line
class character:
# defines a character that wil contains a matrix of sprites
def __init__ (self,rows,cols):
self.rows = rows
self.cols = cols
self.sprites = [[0 for x in range(cols)] for y in range(rows)]
def insertSprite (self,sprite,row,col):
self.sprites[row][col]=sprite
class animation:
# defines a animation, which is a list of characters to be shown one after the other
def __init__ (self):
self.characters = []
def addCharacter(self,character):
self.characters.append(character)
def numFrames(self):
return (len(self.characters))
class tile:
# Tile class, to make it easier to manipulate afterwards
tileCount = 0
def __init__ (self,pattern,colors):
self.pattern=pattern #binary pattern of the sprite
self.number = tile.tileCount #Sprite index
self.colors=colors #colors of the sprite
tile.tileCount = tile.tileCount+1 #add one to the index for next sprite
def displayPattern (self):
#for testing purposes, show the pattern on console
rows = self.pattern
for row in rows:
print (row)
def displayColors (self):
#for testing purposes, show the color on console
rows = self.colors
for row in rows:
print (row)
def getPattern (self):
#retruns the pattern of a sprite
line = ""
rows = self.pattern
for row in rows:
line = line + str(row) + "\n"
return line
def getColors (self,ysize):
#returns the colors of a sprite
line = ""
count = 1
rows = self.colors
for row in rows:
line = line + str(row)
if count < ysize :
count = count + 1
line = line + ","
return line
def getAsmPattern (self,width):
#get the pattern of a tile in ASM mode (db %xxxxxxxxxxxxxxxx)
#Normally width is always 8, but let's keep it system agnostic
line = ""
rows = self.pattern
line =""
for row in rows:
line = line + "\tdb %"+str(row)+"\n"
return line
def getAsmColors (self,ysize):
#get the colors of a tile in ASM mode
#things get tricky, 2 colors are saved on a single byte
#bg color is stored in 4 less significant bits (0000XXXX)
#fg color is stored in 4 most significant bits (XXXX0000)
#so final byte is $fgbg
#I could have done it by simply sticking the values together, but shifting is fun!
rows = self.colors
line = "\tdb "
count = 0
for row in rows:
line = line + "$"+"{0:02x}".format(row)
count = count +1
if count < len(rows):
line = line + ","
return line
|
gpl-3.0
| -4,547,728,878,825,318,000
| 31.939086
| 117
| 0.524087
| false
| 4.254615
| false
| false
| false
|
akash1808/cachetools
|
cachetools/func.py
|
1
|
3382
|
import collections
import functools
import random
import time
from .lfu import LFUCache
from .lru import LRUCache
from .rr import RRCache
from .ttl import TTLCache
try:
from threading import RLock
except ImportError:
from dummy_threading import RLock
_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])
class _NullContext:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
_nullcontext = _NullContext()
def _makekey_untyped(args, kwargs):
return (args, tuple(sorted(kwargs.items())))
def _makekey_typed(args, kwargs):
key = _makekey_untyped(args, kwargs)
key += tuple(type(v) for v in args)
key += tuple(type(v) for _, v in sorted(kwargs.items()))
return key
def _cachedfunc(cache, typed=False, lock=None):
makekey = _makekey_typed if typed else _makekey_untyped
context = lock() if lock else _nullcontext
def decorator(func):
stats = [0, 0]
def wrapper(*args, **kwargs):
key = makekey(args, kwargs)
with context:
try:
result = cache[key]
stats[0] += 1
return result
except KeyError:
stats[1] += 1
result = func(*args, **kwargs)
with context:
try:
cache[key] = result
except ValueError:
pass # value too large
return result
def cache_info():
with context:
hits, misses = stats
maxsize = cache.maxsize
currsize = cache.currsize
return _CacheInfo(hits, misses, maxsize, currsize)
def cache_clear():
with context:
stats[:] = [0, 0]
cache.clear()
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return functools.update_wrapper(wrapper, func)
return decorator
def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
return _cachedfunc(LFUCache(maxsize, getsizeof), typed, lock)
def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
return _cachedfunc(LRUCache(maxsize, getsizeof), typed, lock)
def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None,
lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
return _cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock)
def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False,
getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
return _cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock)
|
mit
| -298,878,277,565,286,600
| 26.950413
| 77
| 0.611473
| false
| 3.955556
| false
| false
| false
|
cernops/cloudbase-init
|
cloudbaseinit/tests/plugins/windows/test_localscripts.py
|
1
|
2254
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import unittest
from cloudbaseinit.plugins import base
from cloudbaseinit.plugins.windows import localscripts
from oslo.config import cfg
CONF = cfg.CONF
class LocalScriptsPluginTests(unittest.TestCase):
def setUp(self):
self._localscripts = localscripts.LocalScriptsPlugin()
@mock.patch('os.listdir')
@mock.patch('os.path.isfile')
def test_get_files_in_dir(self, mock_isfile, mock_listdir):
fake_path = os.path.join('fake', 'path')
fake_file_list = ['second', 'first', 'third', 'last']
mock_isfile.return_value = True
mock_listdir.return_value = fake_file_list
response = self._localscripts._get_files_in_dir(fake_path)
mock_listdir.assert_called_once_with(fake_path)
self.assertEqual(
sorted(os.path.join(fake_path, f) for f in fake_file_list),
response)
@mock.patch('cloudbaseinit.plugins.windows.localscripts'
'.LocalScriptsPlugin._get_files_in_dir')
@mock.patch('cloudbaseinit.plugins.windows.fileexecutils.exec_file')
def test_execute(self, mock_exec_file, mock_get_files_in_dir):
mock_service = mock.MagicMock()
fake_path = os.path.join('fake', 'path')
CONF.set_override('local_scripts_path', True)
mock_get_files_in_dir.return_value = [fake_path]
response = self._localscripts.execute(mock_service, shared_data=None)
mock_get_files_in_dir.assert_called_once_with(CONF.local_scripts_path)
mock_exec_file.assert_called_once_with(fake_path)
self.assertEqual((base.PLUGIN_EXECUTION_DONE, False), response)
|
apache-2.0
| -5,743,922,069,370,638,000
| 37.862069
| 78
| 0.692103
| false
| 3.612179
| true
| false
| false
|
MalkmusT/coala-quickstart
|
coala_quickstart/interaction/Logo.py
|
2
|
2373
|
import textwrap
from coala_quickstart.Strings import COALA_BEAR_LOGO, WELCOME_MESSAGES
def print_side_by_side(printer,
left=[],
right=[],
left_color='white',
right_color='blue',
limit=80):
"""
Prints the the given lines side by side. Example usage:
>>> from pyprint.ConsolePrinter import ConsolePrinter
>>> printer = ConsolePrinter()
>>> print_side_by_side(
... printer,
... ["Text content on the left",
... "side of the text."],
... ["Right side should contain",
... "this."],
... left_color=None,
... right_color=None,
... limit=80)
Text content on the left Right side should contain
side of the text. this.
If either side is longer than the other, empty lines will
be added to the shorter side.
:param printer:
A ``ConsolePrinter`` object used for console interaction.
:param left:
The lines for the left portion of the text.
:param right:
The lines for the right portion of the text.
:param left_color:
The color to use for the left side of the text.
:parm right_color:
The color to use for the right side of the text.
:param limit:
The maximum line length limit.
"""
max_left_length = len(max(left, key=len))
for line in range(len(left) - len(right)):
right.append('')
for line in range(len(right) - len(left)):
left.append('')
for left_line, right_line in zip(left, right):
printer.print(left_line, color=left_color, end='')
printer.print(
' ' * (max_left_length - len(left_line) + 1),
end='')
printer.print(right_line, color=right_color)
def print_welcome_message(printer):
"""
Prints the coala bear logo with a welcome message side by side.
:param printer:
A ``ConsolePrinter`` object used for console interaction.
"""
max_length = 80 - len(max(COALA_BEAR_LOGO, key=len))
text_lines = ['']
for welcome_message in WELCOME_MESSAGES:
text_lines += ['']
text_lines += textwrap.wrap(welcome_message, max_length)
print_side_by_side(
printer,
left=COALA_BEAR_LOGO,
right=text_lines,
limit=80)
|
agpl-3.0
| 982,964,152,342,786,000
| 30.223684
| 70
| 0.574378
| false
| 3.928808
| false
| false
| false
|
jimfenton/notif-notifier
|
clockwatcherd.py
|
1
|
5149
|
#!/usr/bin/python
# clockwatcherd.py - Daemon to generate test notifs once a minute
#
# Copyright (c) 2015 Jim Fenton
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
__version__="0.1.0"
import sys
import traceback
from datetime import timedelta, datetime, date, time
import time
import daemon
import syslog
import signal
import lockfile
from notify import Signer, Notification
def clockwatcher_main():
syslog.syslog("clockwatcherd: starting clockwatcher_main")
lifetime = timedelta(days=1) #Notif expiration delta
s = Signer("/etc/clockwatcher/shiny.private", "shiny")
addrlist=[]
updates={}
with open("/etc/clockwatcher/clockwatcherd.cfg","r") as cfg:
for line in cfg:
addrlist.append(line[:-1]) #remembering to remove trailing \n
while 1:
# Synchronize to next whole minute
starttime = time.localtime()
time.sleep(60-starttime.tm_sec)
currtime = datetime.now()+ timedelta(seconds=30) # Force rounding in case we're early
timemsg = currtime.strftime("It is now %H:%M")
notif = Notification(4, lifetime, timemsg, timemsg + " and all is well") # Need to add expiration here
notif.prepare(s)
# For now, minimizing the possibility of a collision between this daemon and new authorizations coming in
# by reading the additional authorizations from a separate file and adding them on here. Only the daemon
# touches the main clockwatcherd.cfg file.
rewrite = False
try:
with open("/etc/clockwatcher/newwatchers.cfg","r") as cfg:
for line in cfg:
newaddr = line
if newaddr not in addrlist: #Handle unlikely duplicates
addrlist.append(newaddr)
rewrite = True
except IOError:
pass
except:
syslog.syslog("clockwatcherd: Unknown error opening newwatchers file")
quit()
if rewrite:
cfg=open("/etc/clockwatcher/newwatchers.cfg","w") #Clobber newwatchers file
cfg.close()
with open("/etc/clockwatcher/clockwatcherd.cfg","w") as cfg: #Update config with new watchers
for idx in range(len(addrlist)):
if addrlist[idx] != "":
cfg.write(addrlist[idx])
cfg.write("\n")
rewrite = False
for idx in range(len(addrlist)):
notaddr = addrlist[idx]
if notaddr == "":
continue
if notaddr in updates: #update an existing notif if possible
notid = updates[notaddr]
status = notif.update(notid)
if status == 404: #if 404 delete notid from updates
del updates[notaddr]
if notaddr not in updates: #not an else because it could have just been removed
# TODO: Handle exceptions (can't connect, etc.) here
(notid, status) = notif.send(notaddr) #Need to get feedback on send failures, delete notaddr
if status == 404:
addrlist[idx]="" #Don't delete entry from addrlist inside loop, just blank it
rewrite = True #Disk copy of list needs updating
elif status == 200:
updates[notaddr] = notid
if rewrite: #Update disk copy of list, removing any blank addresses
with open("/etc/clockwatcher/clockwatcherd.cfg","w") as cfg:
for idx in range(len(addrlist)):
if addrlist[idx] != "":
cfg.write(addrlist[idx])
cfg.write("\n")
def program_cleanup():
conn.close()
syslog.syslog("clockwatcherd: exiting on signal")
quit()
# Uncomment next 2 lines for non-daemon testing
#clockwatcher_main()
#quit()
context = daemon.DaemonContext(
pidfile=lockfile.FileLock('/var/run/clockwatcherd.pid'),
)
context.signal_map = {
signal.SIGHUP: program_cleanup,
}
with context:
clockwatcher_main()
|
mit
| 2,108,621,448,660,416,500
| 36.043165
| 110
| 0.629637
| false
| 4.345148
| false
| false
| false
|
anandka/SEWA
|
project/user/forms.py
|
1
|
2007
|
# project/user/forms.py
from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired, Email, Length, EqualTo
from project.models import User
class LoginForm(Form):
email = TextField('email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
class RegisterForm(Form):
email = TextField(
'email',
validators=[DataRequired(), Email(message=None), Length(min=6, max=40)])
password = PasswordField(
'password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Repeat password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match.')
]
)
def validate(self):
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append("Email already registered")
return False
return True
class ChangePasswordForm(Form):
password = PasswordField(
'password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Repeat password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match.')
]
)
class InputForm(Form):
region = TextField('region', validators=[DataRequired()])
name = TextField('nm', validators=[DataRequired()])
class ProviderForm(Form):
Snm = TextField(
'Snm',
validators=[DataRequired(), Length(min=2, max=40)])
Rnm = TextField(
'Rnm',
validators=[DataRequired(), Length(min=6, max=40)])
Anm = TextField(
'Anm',
validators=[DataRequired(), Length(min=6, max=40)])
Cnm = TextField(
'Cnm',
validators=[DataRequired(), Length(min=3, max=40)])
|
mit
| -8,264,770,392,331,245,000
| 26.121622
| 80
| 0.623817
| false
| 4.087576
| false
| false
| false
|
Abhino/GamifiedTodoList
|
app.py
|
1
|
1583
|
import logging
import logging.config
import sys
from flask import Flask,render_template
from werkzeug.contrib.fixers import ProxyFix
from datetime import datetime
from apis import api, db
import os
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
DATE_FORMAT="%Y-%m-%d %H:%M:%S"
FORMAT = '%(asctime)s - %(filename)s - %(levelname)s:%(lineno)d: %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,format=FORMAT,datefmt=DATE_FORMAT)
LOG = logging.getLogger('app')
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
database_url = str(os.environ['DATABASE_URL'])
database_url.replace("postgre", "postgresql")
app.config['SQLALCHEMY_DATABASE_URI'] = database_url
app.logger_name = "flask.app"
api.init_app(app)
db.init_app(app)
@app.after_request
def after_request(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'
response.headers['Access-Control-Allow-Methods'] = 'GET,PUT,POST,DELETE'
response.headers['Last-Modified'] = datetime.now()
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
app.wsgi_app = ProxyFix(app.wsgi_app)
@app.route('/todo')
def index():
LOG.info("Rendering Template")
return render_template('index.html')
#Create schema for database
with app.app_context():
db.create_all()
if __name__ == '__main__':
app.run(debug=True)
|
mit
| -6,565,310,337,275,284,000
| 30.68
| 113
| 0.725205
| false
| 3.166
| false
| false
| false
|
python-xlib/python-xlib
|
Xlib/xobject/drawable.py
|
1
|
34416
|
# Xlib.xobject.drawable -- drawable objects (window and pixmap)
#
# Copyright (C) 2000 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from Xlib import X, Xatom, Xutil
from Xlib.protocol import request, rq
# Other X resource objects
from . import resource
from . import colormap
from . import cursor
from . import fontable
# Inter-client communication conventions
from . import icccm
class Drawable(resource.Resource):
__drawable__ = resource.Resource.__resource__
def get_geometry(self):
return request.GetGeometry(display = self.display,
drawable = self)
def create_pixmap(self, width, height, depth):
pid = self.display.allocate_resource_id()
request.CreatePixmap(display = self.display,
depth = depth,
pid = pid,
drawable = self.id,
width = width,
height = height)
cls = self.display.get_resource_class('pixmap', Pixmap)
return cls(self.display, pid, owner = 1)
def create_gc(self, **keys):
cid = self.display.allocate_resource_id()
request.CreateGC(display = self.display,
cid = cid,
drawable = self.id,
attrs = keys)
cls = self.display.get_resource_class('gc', fontable.GC)
return cls(self.display, cid, owner = 1)
def copy_area(self, gc, src_drawable, src_x, src_y, width, height, dst_x, dst_y, onerror = None):
request.CopyArea(display = self.display,
onerror = onerror,
src_drawable = src_drawable,
dst_drawable = self.id,
gc = gc,
src_x = src_x,
src_y = src_y,
dst_x = dst_x,
dst_y = dst_y,
width = width,
height = height)
def copy_plane(self, gc, src_drawable, src_x, src_y, width, height,
dst_x, dst_y, bit_plane, onerror = None):
request.CopyPlane(display = self.display,
onerror = onerror,
src_drawable = src_drawable,
dst_drawable = self.id,
gc = gc,
src_x = src_x,
src_y = src_y,
dst_x = dst_x,
dst_y = dst_y,
width = width,
height = height,
bit_plane = bit_plane)
def poly_point(self, gc, coord_mode, points, onerror = None):
request.PolyPoint(display = self.display,
onerror = onerror,
coord_mode = coord_mode,
drawable = self.id,
gc = gc,
points = points)
def point(self, gc, x, y, onerror = None):
request.PolyPoint(display = self.display,
onerror = onerror,
coord_mode = X.CoordModeOrigin,
drawable = self.id,
gc = gc,
points = [(x, y)])
def poly_line(self, gc, coord_mode, points, onerror = None):
request.PolyLine(display = self.display,
onerror = onerror,
coord_mode = coord_mode,
drawable = self.id,
gc = gc,
points = points)
def line(self, gc, x1, y1, x2, y2, onerror = None):
request.PolySegment(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
segments = [(x1, y1, x2, y2)])
def poly_segment(self, gc, segments, onerror = None):
request.PolySegment(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
segments = segments)
def poly_rectangle(self, gc, rectangles, onerror = None):
request.PolyRectangle(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
rectangles = rectangles)
def rectangle(self, gc, x, y, width, height, onerror = None):
request.PolyRectangle(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
rectangles = [(x, y, width, height)])
def poly_arc(self, gc, arcs, onerror = None):
request.PolyArc(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
arcs = arcs)
def arc(self, gc, x, y, width, height, angle1, angle2, onerror = None):
request.PolyArc(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
arcs = [(x, y, width, height, angle1, angle2)])
def fill_poly(self, gc, shape, coord_mode, points, onerror = None):
request.FillPoly(display = self.display,
onerror = onerror,
shape = shape,
coord_mode = coord_mode,
drawable = self.id,
gc = gc,
points = points)
def poly_fill_rectangle(self, gc, rectangles, onerror = None):
request.PolyFillRectangle(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
rectangles = rectangles)
def fill_rectangle(self, gc, x, y, width, height, onerror = None):
request.PolyFillRectangle(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
rectangles = [(x, y, width, height)])
def poly_fill_arc(self, gc, arcs, onerror = None):
request.PolyFillArc(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
arcs = arcs)
def fill_arc(self, gc, x, y, width, height, angle1, angle2, onerror = None):
request.PolyFillArc(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
arcs = [(x, y, width, height, angle1, angle2)])
def put_image(self, gc, x, y, width, height, format,
depth, left_pad, data, onerror = None):
request.PutImage(display = self.display,
onerror = onerror,
format = format,
drawable = self.id,
gc = gc,
width = width,
height = height,
dst_x = x,
dst_y = y,
left_pad = left_pad,
depth = depth,
data = data)
# Trivial little method for putting PIL images. Will break on anything
# but depth 1 or 24...
def put_pil_image(self, gc, x, y, image, onerror = None):
width, height = image.size
if image.mode == '1':
format = X.XYBitmap
depth = 1
if self.display.info.bitmap_format_bit_order == 0:
rawmode = '1;R'
else:
rawmode = '1'
pad = self.display.info.bitmap_format_scanline_pad
stride = roundup(width, pad) >> 3
elif image.mode == 'RGB':
format = X.ZPixmap
depth = 24
if self.display.info.image_byte_order == 0:
rawmode = 'BGRX'
else:
rawmode = 'RGBX'
pad = self.display.info.bitmap_format_scanline_pad
unit = self.display.info.bitmap_format_scanline_unit
stride = roundup(width * unit, pad) >> 3
else:
raise ValueError('Unknown data format')
maxlen = (self.display.info.max_request_length << 2) \
- request.PutImage._request.static_size
split = maxlen // stride
x1 = 0
x2 = width
y1 = 0
while y1 < height:
h = min(height, split)
if h < height:
subimage = image.crop((x1, y1, x2, y1 + h))
else:
subimage = image
w, h = subimage.size
data = subimage.tobytes("raw", rawmode, stride, 0)
self.put_image(gc, x, y, w, h, format, depth, 0, data)
y1 = y1 + h
y = y + h
def get_image(self, x, y, width, height, format, plane_mask):
return request.GetImage(display = self.display,
format = format,
drawable = self.id,
x = x,
y = y,
width = width,
height = height,
plane_mask = plane_mask)
def draw_text(self, gc, x, y, text, onerror = None):
request.PolyText8(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
items = [text])
def poly_text(self, gc, x, y, items, onerror = None):
request.PolyText8(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
items = items)
def poly_text_16(self, gc, x, y, items, onerror = None):
request.PolyText16(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
items = items)
def image_text(self, gc, x, y, string, onerror = None):
request.ImageText8(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
string = string)
def image_text_16(self, gc, x, y, string, onerror = None):
request.ImageText16(display = self.display,
onerror = onerror,
drawable = self.id,
gc = gc,
x = x,
y = y,
string = string)
def query_best_size(self, item_class, width, height):
return request.QueryBestSize(display = self.display,
item_class = item_class,
drawable = self.id,
width = width,
height = height)
class Window(Drawable):
__window__ = resource.Resource.__resource__
_STRING_ENCODING = 'ISO-8859-1'
_UTF8_STRING_ENCODING = 'UTF-8'
def create_window(self, x, y, width, height, border_width, depth,
window_class = X.CopyFromParent,
visual = X.CopyFromParent,
onerror = None,
**keys):
wid = self.display.allocate_resource_id()
request.CreateWindow(display = self.display,
onerror = onerror,
depth = depth,
wid = wid,
parent = self.id,
x = x,
y = y,
width = width,
height = height,
border_width = border_width,
window_class = window_class,
visual = visual,
attrs = keys)
cls = self.display.get_resource_class('window', Window)
return cls(self.display, wid, owner = 1)
def change_attributes(self, onerror = None, **keys):
request.ChangeWindowAttributes(display = self.display,
onerror = onerror,
window = self.id,
attrs = keys)
def get_attributes(self):
return request.GetWindowAttributes(display = self.display,
window = self.id)
def destroy(self, onerror = None):
request.DestroyWindow(display = self.display,
onerror = onerror,
window = self.id)
self.display.free_resource_id(self.id)
def destroy_sub_windows(self, onerror = None):
request.DestroySubWindows(display = self.display,
onerror = onerror,
window = self.id)
def change_save_set(self, mode, onerror = None):
request.ChangeSaveSet(display = self.display,
onerror = onerror,
mode = mode,
window = self.id)
def reparent(self, parent, x, y, onerror = None):
request.ReparentWindow(display = self.display,
onerror = onerror,
window = self.id,
parent = parent,
x = x,
y = y)
def map(self, onerror = None):
request.MapWindow(display = self.display,
onerror = onerror,
window = self.id)
def map_sub_windows(self, onerror = None):
request.MapSubwindows(display = self.display,
onerror = onerror,
window = self.id)
def unmap(self, onerror = None):
request.UnmapWindow(display = self.display,
onerror = onerror,
window = self.id)
def unmap_sub_windows(self, onerror = None):
request.UnmapSubwindows(display = self.display,
onerror = onerror,
window = self.id)
def configure(self, onerror = None, **keys):
request.ConfigureWindow(display = self.display,
onerror = onerror,
window = self.id,
attrs = keys)
def circulate(self, direction, onerror = None):
request.CirculateWindow(display = self.display,
onerror = onerror,
direction = direction,
window = self.id)
def raise_window(self, onerror = None):
"""alias for raising the window to the top - as in XRaiseWindow"""
self.configure(onerror, stack_mode = X.Above)
def query_tree(self):
return request.QueryTree(display = self.display,
window = self.id)
def change_property(self, property, property_type, format, data,
mode = X.PropModeReplace, onerror = None):
request.ChangeProperty(display = self.display,
onerror = onerror,
mode = mode,
window = self.id,
property = property,
type = property_type,
data = (format, data))
def change_text_property(self, property, property_type, data,
mode = X.PropModeReplace, onerror = None):
if not isinstance(data, bytes):
if property_type == Xatom.STRING:
data = data.encode(self._STRING_ENCODING)
elif property_type == self.display.get_atom('UTF8_STRING'):
data = data.encode(self._UTF8_STRING_ENCODING)
self.change_property(property, property_type, 8, data,
mode=mode, onerror=onerror)
def delete_property(self, property, onerror = None):
request.DeleteProperty(display = self.display,
onerror = onerror,
window = self.id,
property = property)
def get_property(self, property, property_type, offset, length, delete = 0):
r = request.GetProperty(display = self.display,
delete = delete,
window = self.id,
property = property,
type = property_type,
long_offset = offset,
long_length = length)
if r.property_type:
fmt, value = r.value
r.format = fmt
r.value = value
return r
else:
return None
def get_full_property(self, property, property_type, sizehint = 10):
prop = self.get_property(property, property_type, 0, sizehint)
if prop:
val = prop.value
if prop.bytes_after:
prop = self.get_property(property, property_type, sizehint,
prop.bytes_after // 4 + 1)
val = val + prop.value
prop.value = val
return prop
else:
return None
def get_full_text_property(self, property, property_type=X.AnyPropertyType, sizehint = 10):
prop = self.get_full_property(property, property_type,
sizehint=sizehint)
if prop is None or prop.format != 8:
return None
if prop.property_type == Xatom.STRING:
prop.value = prop.value.decode(self._STRING_ENCODING)
elif prop.property_type == self.display.get_atom('UTF8_STRING'):
prop.value = prop.value.decode(self._UTF8_STRING_ENCODING)
# FIXME: at least basic support for compound text would be nice.
# elif prop.property_type == self.display.get_atom('COMPOUND_TEXT'):
return prop.value
def list_properties(self):
r = request.ListProperties(display = self.display,
window = self.id)
return r.atoms
def set_selection_owner(self, selection, time, onerror = None):
request.SetSelectionOwner(display = self.display,
onerror = onerror,
window = self.id,
selection = selection,
time = time)
def convert_selection(self, selection, target, property, time, onerror = None):
request.ConvertSelection(display = self.display,
onerror = onerror,
requestor = self.id,
selection = selection,
target = target,
property = property,
time = time)
def send_event(self, event, event_mask = 0, propagate = 0, onerror = None):
request.SendEvent(display = self.display,
onerror = onerror,
propagate = propagate,
destination = self.id,
event_mask = event_mask,
event = event)
def grab_pointer(self, owner_events, event_mask,
pointer_mode, keyboard_mode,
confine_to, cursor, time):
r = request.GrabPointer(display = self.display,
owner_events = owner_events,
grab_window = self.id,
event_mask = event_mask,
pointer_mode = pointer_mode,
keyboard_mode = keyboard_mode,
confine_to = confine_to,
cursor = cursor,
time = time)
return r.status
def grab_button(self, button, modifiers, owner_events, event_mask,
pointer_mode, keyboard_mode,
confine_to, cursor, onerror = None):
request.GrabButton(display = self.display,
onerror = onerror,
owner_events = owner_events,
grab_window = self.id,
event_mask = event_mask,
pointer_mode = pointer_mode,
keyboard_mode = keyboard_mode,
confine_to = confine_to,
cursor = cursor,
button = button,
modifiers = modifiers)
def ungrab_button(self, button, modifiers, onerror = None):
request.UngrabButton(display = self.display,
onerror = onerror,
button = button,
grab_window = self.id,
modifiers = modifiers)
def grab_keyboard(self, owner_events, pointer_mode, keyboard_mode, time):
r = request.GrabKeyboard(display = self.display,
owner_events = owner_events,
grab_window = self.id,
time = time,
pointer_mode = pointer_mode,
keyboard_mode = keyboard_mode)
return r.status
def grab_key(self, key, modifiers, owner_events, pointer_mode, keyboard_mode, onerror = None):
request.GrabKey(display = self.display,
onerror = onerror,
owner_events = owner_events,
grab_window = self.id,
modifiers = modifiers,
key = key,
pointer_mode = pointer_mode,
keyboard_mode = keyboard_mode)
def ungrab_key(self, key, modifiers, onerror = None):
request.UngrabKey(display = self.display,
onerror = onerror,
key = key,
grab_window = self.id,
modifiers = modifiers)
def query_pointer(self):
return request.QueryPointer(display = self.display,
window = self.id)
def get_motion_events(self, start, stop):
r = request.GetMotionEvents(display = self.display,
window = self.id,
start = start,
stop = stop)
return r.events
def translate_coords(self, src_window, src_x, src_y):
return request.TranslateCoords(display = self.display,
src_wid = src_window,
dst_wid = self.id,
src_x = src_x,
src_y = src_y)
def warp_pointer(self, x, y, src_window = 0, src_x = 0, src_y = 0,
src_width = 0, src_height = 0, onerror = None):
request.WarpPointer(display = self.display,
onerror = onerror,
src_window = src_window,
dst_window = self.id,
src_x = src_x,
src_y = src_y,
src_width = src_width,
src_height = src_height,
dst_x = x,
dst_y = y)
def set_input_focus(self, revert_to, time, onerror = None):
request.SetInputFocus(display = self.display,
onerror = onerror,
revert_to = revert_to,
focus = self.id,
time = time)
def clear_area(self, x = 0, y = 0, width = 0, height = 0, exposures = 0, onerror = None):
request.ClearArea(display = self.display,
onerror = onerror,
exposures = exposures,
window = self.id,
x = x,
y = y,
width = width,
height = height)
def create_colormap(self, visual, alloc):
mid = self.display.allocate_resource_id()
request.CreateColormap(display = self.display,
alloc = alloc,
mid = mid,
window = self.id,
visual = visual)
cls = self.display.get_resource_class('colormap', colormap.Colormap)
return cls(self.display, mid, owner = 1)
def list_installed_colormaps(self):
r = request.ListInstalledColormaps(display = self.display,
window = self.id)
return r.cmaps
def rotate_properties(self, properties, delta, onerror = None):
request.RotateProperties(display = self.display,
onerror = onerror,
window = self.id,
delta = delta,
properties = properties)
def set_wm_name(self, name, onerror = None):
self.change_text_property(Xatom.WM_NAME, Xatom.STRING, name,
onerror = onerror)
def get_wm_name(self):
return self.get_full_text_property(Xatom.WM_NAME, Xatom.STRING)
def set_wm_icon_name(self, name, onerror = None):
self.change_text_property(Xatom.WM_ICON_NAME, Xatom.STRING, name,
onerror = onerror)
def get_wm_icon_name(self):
return self.get_full_text_property(Xatom.WM_ICON_NAME, Xatom.STRING)
def set_wm_class(self, inst, cls, onerror = None):
self.change_text_property(Xatom.WM_CLASS, Xatom.STRING,
'%s\0%s\0' % (inst, cls),
onerror = onerror)
def get_wm_class(self):
value = self.get_full_text_property(Xatom.WM_CLASS, Xatom.STRING)
if value is None:
return None
parts = value.split('\0')
if len(parts) < 2:
return None
else:
return parts[0], parts[1]
def set_wm_transient_for(self, window, onerror = None):
self.change_property(Xatom.WM_TRANSIENT_FOR, Xatom.WINDOW,
32, [window.id],
onerror = onerror)
def get_wm_transient_for(self):
d = self.get_property(Xatom.WM_TRANSIENT_FOR, Xatom.WINDOW, 0, 1)
if d is None or d.format != 32 or len(d.value) < 1:
return None
else:
cls = self.display.get_resource_class('window', Window)
return cls(self.display, d.value[0])
def set_wm_protocols(self, protocols, onerror = None):
self.change_property(self.display.get_atom('WM_PROTOCOLS'),
Xatom.ATOM, 32, protocols,
onerror = onerror)
def get_wm_protocols(self):
d = self.get_full_property(self.display.get_atom('WM_PROTOCOLS'), Xatom.ATOM)
if d is None or d.format != 32:
return []
else:
return d.value
def set_wm_colormap_windows(self, windows, onerror = None):
self.change_property(self.display.get_atom('WM_COLORMAP_WINDOWS'),
Xatom.WINDOW, 32,
map(lambda w: w.id, windows),
onerror = onerror)
def get_wm_colormap_windows(self):
d = self.get_full_property(self.display.get_atom('WM_COLORMAP_WINDOWS'),
Xatom.WINDOW)
if d is None or d.format != 32:
return []
else:
cls = self.display.get_resource_class('window', Window)
return map(lambda i, d = self.display, c = cls: c(d, i),
d.value)
def set_wm_client_machine(self, name, onerror = None):
self.change_text_property(Xatom.WM_CLIENT_MACHINE, Xatom.STRING, name,
onerror = onerror)
def get_wm_client_machine(self):
return self.get_full_text_property(Xatom.WM_CLIENT_MACHINE, Xatom.STRING)
def set_wm_normal_hints(self, hints = {}, onerror = None, **keys):
self._set_struct_prop(Xatom.WM_NORMAL_HINTS, Xatom.WM_SIZE_HINTS,
icccm.WMNormalHints, hints, keys, onerror)
def get_wm_normal_hints(self):
return self._get_struct_prop(Xatom.WM_NORMAL_HINTS, Xatom.WM_SIZE_HINTS,
icccm.WMNormalHints)
def set_wm_hints(self, hints = {}, onerror = None, **keys):
self._set_struct_prop(Xatom.WM_HINTS, Xatom.WM_HINTS,
icccm.WMHints, hints, keys, onerror)
def get_wm_hints(self):
return self._get_struct_prop(Xatom.WM_HINTS, Xatom.WM_HINTS,
icccm.WMHints)
def set_wm_state(self, hints = {}, onerror = None, **keys):
atom = self.display.get_atom('WM_STATE')
self._set_struct_prop(atom, atom, icccm.WMState, hints, keys, onerror)
def get_wm_state(self):
atom = self.display.get_atom('WM_STATE')
return self._get_struct_prop(atom, atom, icccm.WMState)
def set_wm_icon_size(self, hints = {}, onerror = None, **keys):
self._set_struct_prop(Xatom.WM_ICON_SIZE, Xatom.WM_ICON_SIZE,
icccm.WMIconSize, hints, keys, onerror)
def get_wm_icon_size(self):
return self._get_struct_prop(Xatom.WM_ICON_SIZE, Xatom.WM_ICON_SIZE,
icccm.WMIconSize)
# Helper function for getting structured properties.
# pname and ptype are atoms, and pstruct is a Struct object.
# Returns a DictWrapper, or None
def _get_struct_prop(self, pname, ptype, pstruct):
r = self.get_property(pname, ptype, 0, pstruct.static_size // 4)
if r and r.format == 32:
value = rq.encode_array(r.value)
if len(value) == pstruct.static_size:
return pstruct.parse_binary(value, self.display)[0]
return None
# Helper function for setting structured properties.
# pname and ptype are atoms, and pstruct is a Struct object.
# hints is a mapping or a DictWrapper, keys is a mapping. keys
# will be modified. onerror is the error handler.
def _set_struct_prop(self, pname, ptype, pstruct, hints, keys, onerror):
if isinstance(hints, rq.DictWrapper):
keys.update(hints._data)
else:
keys.update(hints)
value = pstruct.to_binary(*(), **keys)
self.change_property(pname, ptype, 32, value, onerror = onerror)
class Pixmap(Drawable):
__pixmap__ = resource.Resource.__resource__
def free(self, onerror = None):
request.FreePixmap(display = self.display,
onerror = onerror,
pixmap = self.id)
self.display.free_resource_id(self.id)
def create_cursor(self, mask, foreground, background, x, y):
fore_red, fore_green, fore_blue = foreground
back_red, back_green, back_blue = background
cid = self.display.allocate_resource_id()
request.CreateCursor(display = self.display,
cid = cid,
source = self.id,
mask = mask,
fore_red = fore_red,
fore_green = fore_green,
fore_blue = fore_blue,
back_red = back_red,
back_green = back_green,
back_blue = back_blue,
x = x,
y = y)
cls = self.display.get_resource_class('cursor', cursor.Cursor)
return cls(self.display, cid, owner = 1)
def roundup(value, unit):
return (value + (unit - 1)) & ~(unit - 1)
|
lgpl-2.1
| -8,560,134,836,506,674,000
| 40.216766
| 101
| 0.463389
| false
| 4.683086
| false
| false
| false
|
kiyoad/twimg2rss
|
url_db.py
|
1
|
1450
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import os
import sqlite3
from common import conf
class UrlDb:
def __init__(self):
self.conn = None
self.c = None
def _open_sqlite3(self, url_db_file):
self.conn = sqlite3.connect(
url_db_file,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
self.c = self.conn.cursor()
def _clear_old_db_data(self):
reference = datetime.datetime.now() - datetime.timedelta(
seconds=conf.url_db_period())
self.c.execute('DELETE FROM urls WHERE created < ?', (reference,))
def open(self):
url_db_file = conf.url_db_file()
db_exist = os.path.isfile(url_db_file)
self._open_sqlite3(url_db_file)
if not db_exist:
self.c.execute('CREATE TABLE urls (url TEXT, created TIMESTAMP)')
self.c.execute('CREATE INDEX url_index ON urls(url, created)')
def close(self):
self._clear_old_db_data()
self.conn.commit()
self.c.close()
self.c = None
self.conn.close()
self.conn = None
def url_in_db(self, url):
self.c.execute('SELECT * FROM urls WHERE url == ?', (url,))
return self.c.fetchone() is not None
def add_url(self, url, created):
self.c.execute('INSERT INTO urls(url, created) VALUES (?, ?)',
(url, created))
url_db = UrlDb()
|
mit
| 1,711,929,013,310,290,000
| 27.431373
| 77
| 0.576552
| false
| 3.502415
| false
| false
| false
|
SCUEvals/scuevals-api
|
db/alembic/versions/20170927093446_fix_update_courses.py
|
1
|
4925
|
"""Fix update_courses
Revision ID: 7004250e3ef5
Revises: 8a786f9bf241
Create Date: 2017-09-27 09:34:46.069174
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7004250e3ef5'
down_revision = '8a786f9bf241'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(sa.text("""create or replace function update_courses(_university_id numeric, _json jsonb)
returns numeric as $func$
declare
_d_id numeric;
_c_id numeric;
_p_id numeric;
_quarter numeric;
_latest_quarter numeric;
_s_id numeric;
_department varchar;
_number varchar;
_title varchar;
_professor1 varchar[];
_professor2 varchar[];
_professor3 varchar[];
_professors varchar[][];
_professor varchar[];
_count numeric := 0;
_new_course boolean := false;
begin
for
_quarter,
_department,
_number,
_title,
_professor1,
_professor2,
_professor3
in
select
(course ->> 'term')::int as _quarter,
course ->> 'subject' as _department,
course ->> 'catalog_nbr' as _number,
course ->> 'class_descr' as _title,
-- prof #1
case
when (course ->> 'instr_1') like '%, %' then
array[
split_part(course ->> 'instr_1', ', ', 1),
split_part(course ->> 'instr_1', ', ', 2)
]
when (course ->> 'instr_1') = '' then
null
end as _professor1,
-- prof #2
case
when (course ->> 'instr_2') like '%, %' then
array[
split_part(course ->> 'instr_2', ', ', 1),
split_part(course ->> 'instr_2', ', ', 2)
]
when (course ->> 'instr_2') = '' then
null
end as _professor2,
-- prof #3
case
when (course ->> 'instr_3') like '%, %' then
array[
split_part(course ->> 'instr_3', ', ', 1),
split_part(course ->> 'instr_3', ', ', 2)
]
when (course ->> 'instr_3') = '' then
null
end as _professor3
from jsonb_array_elements(_json -> 'courses') course
loop
if _professor1 is null then continue; end if;
-- get the department id (assume it exists)
select departments.id into _d_id
from departments
where abbreviation = _department
order by school_id limit 1;
-- get the course id if it exists
select id into _c_id
from courses
where department_id = _d_id and number = _number;
-- if the course does not exist, create it
if _c_id is null then
insert into courses (department_id, number, title) values (_d_id, _number, _title)
returning id into _c_id;
_new_course = true;
end if;
-- get the section id if it exists
select id into _s_id
from sections
where quarter_id = _quarter and course_id = _c_id;
-- if the section does not exist, create it
if _s_id is null then
insert into sections (quarter_id, course_id) values (_quarter, _c_id)
returning id into _s_id;
end if;
_professors = array[_professor1];
if _professor2 is not null then _professors = array_cat(_professors, _professor2); end if;
if _professor3 is not null then _professors = array_cat(_professors, _professor3); end if;
foreach _professor slice 1 in array _professors
loop
if _professor[1] is null then continue; end if;
-- get the professor id if it exists
select id into _p_id
from professors
where last_name = _professor[2] and first_name = _professor[1];
-- if the professor does not exist, create it
if _p_id is null then
insert into professors (first_name, last_name, university_id)
values (_professor[1], _professor[2], _university_id)
returning id into _p_id;
end if;
-- check if the professer is listed under this section
if not exists(select 1
from section_professor sp
where sp.section_id = _s_id and sp.professor_id = _p_id)
then
insert into section_professor (section_id, professor_id) values (_s_id, _p_id);
end if;
end loop;
-- if the course existed, make sure the title is up to date
if not _new_course then
-- get the latest quarter which the course was offered in
select q.id into _latest_quarter
from quarters q
join sections s on q.id = s.quarter_id
join courses c on s.course_id = c.id
where c.id = _c_id and q.university_id = _university_id
order by lower(period) desc
limit 1;
-- if this course info is for the latest quarter, update the title
if _quarter = _latest_quarter then
update courses
set title = _title
where id = _c_id;
end if;
end if;
_count = _count + 1;
end loop;
return _count;
end;
$func$ language plpgsql;"""))
def downgrade():
pass
|
agpl-3.0
| -5,163,401,731,827,070,000
| 25.196809
| 106
| 0.591269
| false
| 3.278961
| false
| false
| false
|
leonardr/botfriend
|
bots.sample/podcast/__init__.py
|
1
|
2324
|
from dateutil import parser
from pdb import set_trace
import random
from olipy.ia import Audio
from botfriend.bot import BasicBot
from botfriend.publish.podcast import PodcastPublisher
class PodcastBot(BasicBot):
COLLECTION = "podcasts"
def update_state(self):
# Grab the 100 most recently posted podcasts.
query = Audio.recent("collection:%s" % self.COLLECTION)
max_count = 100
choices = []
a = 0
for audio in query:
choices.append(audio.identifier)
a += 1
if a >= max_count:
break
self.model.json_state = choices
def file(self, item, format_name):
"""Find a file in a specific format."""
for f in item.files:
if f.format == format_name:
return f
return None
def make_post(self, podcast):
"""Convert an Audio object into a post compatible with
the PodcastPublisher.
"""
meta = podcast.metadata
mp3 = self.file(podcast, "VBR MP3")
if not mp3:
# This isn't really a podcast.
return None
title = meta.get('title')
date = parser.parse(
meta.get('date') or meta.get('publicdate')
).strftime("%d %b %Y")
description = meta.get('description', '')
creator = meta.get('creator')
if creator:
byline = " by %s" % creator
else:
byline = ""
detail_url = 'https://archive.org/details/%s' % meta['identifier']
detail_link='<p>Archived at <a href="%s">%s</a>' % (detail_url, detail_url)
template = '<p>Originally published%(byline)s on %(date)s.</p>\n\n%(description)s\n\n%(details)s'
description = template % dict(
details=detail_link,
title=title,
description=description,
date=date,
byline=byline
)
# Create a post compatible with the PodcastPublisher.
return PodcastPublisher.make_post(
self.model, title, mp3.url, description,
media_size=mp3.size, guid=detail_url
)
def new_post(self):
podcast = random.choice(self.model.json_state)
post, is_new = self.make_post(Audio(podcast))
return post
Bot = PodcastBot
|
mit
| -460,678,980,916,038,300
| 29.986667
| 105
| 0.567986
| false
| 3.899329
| false
| false
| false
|
jbteixeir/Openflow-DC-Framework
|
pox/host_tracker/host_tracker.old.py
|
1
|
12063
|
# Copyright 2011 Dorgival Guedes
#
# This file is part of POX.
# Some of the arp/openflow-related code was borrowed from dumb_l3_switch.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Keep track of hosts in the network, where they are and how they are
configured (at least MAC/IP addresses)
For the time being, it keeps tables with the information; later, it should
transfer that information to Topology and handle just the actual
discovery/update of host information.
Timer configuration can be changed when needed (e.g., for debugging) using
the launch facility (check timeoutSec dict and PingCtrl.pingLim).
"""
from pox.core import core
import pox
log = core.getLogger()
#import logging
#log.setLevel(logging.WARN)
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.recoco.recoco import Timer
from pox.lib.revent.revent import EventMixin
from ext.Structures.ercs_host import HostJoin, HostTimeout, HostMove
import time
import string
import pox.openflow.libopenflow_01 as of
import pox.openflow.discovery as discovery
# Times (in seconds) to use for differente timouts:
timeoutSec = dict(
arpAware=60*2, # Quiet ARP-responding entries are pinged after this
arpSilent=60*20, # This is for uiet entries not known to answer ARP
arpReply=4, # Time to wait for an ARP reply before retrial
timerInterval=5, # Seconds between timer routine activations
entryMove=60 # Minimum expected time to move a physical entry
)
# Good values for testing:
# --arpAware=15 --arpSilent=45 --arpReply=1 --entryMove=4
# Another parameter that may be used:
# --pingLim=2
class Alive (object):
""" Holds liveliness information for MAC and IP entries
"""
def __init__ (self, livelinessInterval=timeoutSec['arpAware']):
self.lastTimeSeen = time.time()
self.interval=livelinessInterval
def expired (self):
return time.time() > self.lastTimeSeen + self.interval
def refresh (self):
self.lastTimeSeen = time.time()
class PingCtrl (Alive):
""" Holds information for handling ARP pings for hosts
"""
# Number of ARP ping attemps before deciding it failed
pingLim=3
def __init__ (self):
Alive.__init__(self, timeoutSec['arpReply'])
self.pending = 0
def sent (self):
self.refresh()
self.pending += 1
def failed (self):
return self.pending > PingCtrl.pingLim
def received (self):
# Clear any pending timeouts related to ARP pings
self.pending = 0
class IpEntry (Alive):
"""
This entry keeps track of IP addresses seen from each MAC entry and will
be kept in the macEntry object's ipAddrs dictionary. At least for now,
there is no need to refer to the original macEntry as the code is organized.
"""
def __init__ (self, hasARP):
if hasARP:
Alive.__init__(self,timeoutSec['arpAware'])
else:
Alive.__init__(self,timeoutSec['arpSilent'])
self.hasARP = hasARP
self.pings = PingCtrl()
def setHasARP (self):
if not self.hasARP:
self.hasARP = True
self.interval = timeoutSec['arpAware']
class MacEntry (Alive):
"""
Not strictly an ARP entry.
When it gets moved to Topology, may include other host info, like
services, and it may replace dpid by a general switch object reference
We use the port to determine which port to forward traffic out of.
"""
def __init__ (self, dpid, port, macaddr):
Alive.__init__(self)
self.dpid = dpid
self.port = port
self.macaddr = macaddr
self.ipAddrs = {}
def __str__(self):
return string.join([str(self.dpid), str(self.port), str(self.macaddr)],' ')
def __eq__ (self, other):
if type(other) == type(None):
return type(self) == type(None)
elif type(other) == tuple:
return (self.dpid,self.port,self.macaddr)==other
else:
return (self.dpid,self.port,self.macaddr) \
==(other.dpid,other.port,other.macaddr)
def __ne__ (self, other):
return not self.__eq__(other)
class host_tracker (EventMixin):
_eventMixin_events = set([
HostJoin,
HostTimeout,
HostMove,
])
_core_name = "host_tracker" # we want to be core.host_tracker
def __init__ (self):
# The following tables should go to Topology later
self.entryByMAC = {}
self._t = Timer(timeoutSec['timerInterval'],
self._check_timeouts, recurring=True)
self.listenTo(core)
log.info("host_tracker ready")
# The following two functions should go to Topology also
def getMacEntry(self, macaddr):
try:
result = self.entryByMAC[macaddr]
except KeyError as e:
result = None
return result
def sendPing(self, macEntry, ipAddr):
r = arp() # Builds an "ETH/IP any-to-any ARP packet
r.opcode = arp.REQUEST
r.hwdst = macEntry.macaddr
r.protodst = ipAddr
# src is ETHER_ANY, IP_ANY
e = ethernet(type=ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst)
e.set_payload(r)
log.debug("%i %i sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
msg = of.ofp_packet_out(data = e.pack(),
action = of.ofp_action_output(port = macEntry.port))
if core.openflow.sendToDPID(macEntry.dpid, msg.pack()):
ipEntry = macEntry.ipAddrs[ipAddr]
ipEntry.pings.sent()
else:
# macEntry is stale, remove it.
log.debug("%i %i ERROR sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
del macEntry.ipAddrs[ipAddr]
return
def getSrcIPandARP(self, packet):
"""
This auxiliary function returns the source IPv4 address for packets that
have one (IPv4, ARPv4). Returns None otherwise.
"""
if isinstance(packet, ipv4):
log.debug("IP %s => %s",str(packet.srcip),str(packet.dstip))
return ( packet.srcip, False )
elif isinstance(packet, arp):
log.debug("ARP %s %s => %s",
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(packet.opcode,
'op:%i' % (packet.opcode,)),
str(packet.protosrc), str(packet.protodst))
if packet.hwtype == arp.HW_TYPE_ETHERNET and \
packet.prototype == arp.PROTO_TYPE_IP and \
packet.protosrc != 0:
return ( packet.protosrc, True )
return ( None, False )
def updateIPInfo(self, pckt_srcip, macEntry, hasARP):
""" If there is IP info in the incoming packet, update the macEntry
accordingly. In the past we assumed a 1:1 mapping between MAC and IP
addresses, but removed that restriction later to accomodate cases
like virtual interfaces (1:n) and distributed packet rewriting (n:1)
"""
if pckt_srcip in macEntry.ipAddrs:
# that entry already has that IP
ipEntry = macEntry.ipAddrs[pckt_srcip]
ipEntry.refresh()
log.debug("%s already has IP %s, refreshing",
str(macEntry), str(pckt_srcip) )
else:
# new mapping
ipEntry = IpEntry(hasARP)
macEntry.ipAddrs[pckt_srcip] = ipEntry
log.info("Learned %s got IP %s", str(macEntry), str(pckt_srcip) )
if hasARP:
ipEntry.pings.received()
def _handle_GoingUpEvent (self, event):
self.listenTo(core.openflow)
log.debug("Up...")
def _handle_PacketIn (self, event):
"""
Populate MAC and IP tables based on incoming packets.
Handles only packets from ports identified as not switch-only.
If a MAC was not seen before, insert it in the MAC table;
otherwise, update table and enry.
If packet has a source IP, update that info for the macEntry (may require
removing the info from antoher entry previously with that IP address).
It does not forward any packets, just extract info from them.
"""
dpid = event.connection.dpid
inport = event.port
packet = event.parse()
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if packet.type == ethernet.LLDP_TYPE: # Ignore LLDP packets
return
if packet.type == 34525:
return
# This should use Topology later
if core.openflow_discovery.isSwitchOnlyPort(dpid, inport):
# No host should be right behind a switch-only port
log.debug("%i %i ignoring packetIn at switch-only port", dpid, inport)
return
log.debug("PacketIn: %i %i ETH %s => %s",
dpid, inport, str(packet.src), str(packet.dst))
# Learn or update dpid/port/MAC info
macEntry = self.getMacEntry(packet.src)
if macEntry == None:
# there is no known host by that MAC
# should we raise a NewHostFound event (at the end)?
macEntry = MacEntry(dpid,inport,packet.src)
self.entryByMAC[packet.src] = macEntry
log.info("Learned %s", str(macEntry))
#/begin FOR ERCS Purpose
(srcip, hasARP) = self.getSrcIPandARP(packet.next)
self.raiseEvent(HostJoin, packet.src, srcip, dpid, inport)
#/end FOR ERCS Purpose
elif macEntry != (dpid, inport, packet.src):
# there is already an entry of host with that MAC, but host has moved
# should we raise a HostMoved event (at the end)?
log.info("Learned %s moved to %i %i", str(macEntry), dpid, inport)
try:
# if there has not been long since heard from it...
if time.time() - macEntry.lastTimeSeen < timeoutSec['entryMove']:
log.warning("Possible duplicate: %s at time %i, now (%i %i), time %i",
str(macEntry), macEntry.lastTimeSeen(),
dpid, inport, time.time())
# should we create a whole new entry, or keep the previous host info?
# for now, we keep it: IP info, answers pings, etc.
macEntry.dpid = dpid
macEntry.inport = inport
except Exception, e:
#TODO: Fix this later
pass
#/begin FOR ERCS Purpose
#TODO: Should we check for duplicates?
self.raiseEvent(HostMove, packet.src, dpid, inport)
#/end FOR ERCS Purpose
macEntry.refresh()
(pckt_srcip, hasARP) = self.getSrcIPandARP(packet.next)
if pckt_srcip != None:
self.updateIPInfo(pckt_srcip,macEntry,hasARP)
return
def _check_timeouts(self):
for macEntry in self.entryByMAC.values():
entryPinged = False
for ip_addr, ipEntry in macEntry.ipAddrs.items():
ipa = ip_addr
if ipEntry.expired():
if ipEntry.pings.failed():
del macEntry.ipAddrs[ip_addr]
log.info("Entry %s: IP address %s expired",
str(macEntry), str(ip_addr) )
#/begin FOR ERCS Purpose
self.raiseEvent(HostTimeout, macEntry.macaddr, ipa)
#/end FOR ERCS Purpose
else:
self.sendPing(macEntry,ip_addr)
ipEntry.pings.sent()
entryPinged = True
if macEntry.expired() and not entryPinged:
log.info("Entry %s expired", str(macEntry))
#/begin FOR ERCS Purpose
self.raiseEvent(HostTimeout, macEntry.macaddr, ip_addr)
#/end FOR ERCS Purpose
# sanity check: there should be no IP addresses left
if len(macEntry.ipAddrs) > 0:
for ip in macEntry.ipAddrs.keys():
log.warning("Entry %s expired but still had IP address %s",
str(macEntry), str(ip_addr) )
del macEntry.ipAddrs[ip_addr]
del self.entryByMAC[macEntry.macaddr]
def launch():
core.registerNew(host_tracker)
|
gpl-3.0
| -1,858,211,703,470,429,200
| 33.663793
| 80
| 0.655558
| false
| 3.505667
| false
| false
| false
|
jonaslu/thatswhatsup
|
python/bytecode/bytecode.py
|
1
|
2290
|
method_add = {
"code": [
# func add(x,y):
# return x + y
# STORE_NAME 0
# STORE_NAME 1
# LOAD_NAME 0
# LOAD_NAME 1
# ADD_TWO_VALUES
# RET
("STORE_NAME", 0),
("STORE_NAME", 1),
("LOAD_NAME", 0),
("LOAD_NAME", 1),
("ADD_TWO_VALUES", None),
("RET", None)
],
"constants": [],
"names": ["x", "y"],
"args": 2
}
method_main = {
"code": [
# a = 3
# b = 4
# print(add(a, b))
("LOAD_VALUE", 0),
("STORE_NAME", 0),
("LOAD_VALUE", 1),
("STORE_NAME", 1),
("LOAD_NAME", 0),
("LOAD_NAME", 1),
("CALL", 2),
("PRINT", None)
],
"constants": [3, 4, method_add],
"names": ["a", "b"],
"args": 0
}
class Frame:
def __init__(self, code_block):
self.code_block = code_block
self.stack = []
self.environment = {}
def run(self):
for step in self.code_block["code"]:
instruction, value = step
if instruction == "LOAD_VALUE":
num = self.code_block["constants"][value]
self.stack.append(num)
elif instruction == "LOAD_NAME":
var_name = self.code_block["names"][value]
var_value = self.environment[var_name]
self.stack.append(var_value)
elif instruction == "STORE_NAME":
var_name = self.code_block["names"][value]
self.environment[var_name] = self.stack.pop(0)
elif instruction == "ADD_TWO_VALUES":
op1, op2 = self.stack.pop(0), self.stack.pop(0)
self.stack.append(op1 + op2)
elif instruction == "PRINT":
print(self.stack.pop(0))
elif instruction == "CALL":
code_block = self.code_block["constants"][value]
next_frame = Frame(code_block)
next_frame.stack = self.stack[-2:]
self.stack = self.stack[:-2]
next_frame.run()
if len(next_frame.stack) > 0:
self.stack.append(next_frame.stack[0])
elif instruction == "RET":
break
main_frame = Frame(method_main)
main_frame.run()
|
mit
| -5,269,336,586,440,504,000
| 26.590361
| 64
| 0.460262
| false
| 3.62916
| false
| false
| false
|
googleinterns/e2e-convrec
|
trainer/constants.py
|
1
|
1779
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared Constants."""
import os
INPUT_LENGTH = 512 #1033 - longest training input for redial
TARGET_LENGTH = 128 #159 - longest trainin target for redial
BASE_DIR = "gs://e2e_central"
DATA_DIR = os.path.join(BASE_DIR, "data")
MODELS_DIR = os.path.join(BASE_DIR, "models")
BASE_PRETRAINED_DIR = "gs://t5-data/pretrained_models"
RD_JSONL_DIR = "gs://e2e_central/data/redial/"
RD_JSONL_PATH = {
"train": os.path.join(RD_JSONL_DIR, "rd-train-formatted.jsonl"),
"validation": os.path.join(RD_JSONL_DIR, "rd-test-formatted.jsonl")
}
RD_COUNTS_PATH = os.path.join(DATA_DIR, "rd-counts.json")
RD_TSV_PATH = {
"train": os.path.join(DATA_DIR, "rd-train.tsv"),
"validation": os.path.join(DATA_DIR, "rd-validation.tsv")
}
ML_SEQ_TSV_PATH = {
"train": os.path.join(DATA_DIR, "ml-sequences-train.tsv"),
"validation": os.path.join(DATA_DIR, "ml-sequences-validation.tsv")
}
ML_TAGS_TSV_PATH = {
"train": os.path.join(DATA_DIR, "ml-tags-train.tsv"),
"validation": os.path.join(DATA_DIR, "ml-tags-validation.tsv")
}
ML_TAGS_MASKED_TSV_PATH = {
"train": os.path.join(DATA_DIR, "ml-tags-train-masked-3.tsv"),
"validation": os.path.join(DATA_DIR, "ml-tags-validation-masked-3.tsv")
}
|
apache-2.0
| 3,666,765,548,474,841,000
| 39.431818
| 75
| 0.704328
| false
| 2.940496
| false
| false
| false
|
MWisBest/PyBot
|
Commands/xbox/xbox.py
|
1
|
2126
|
###########################################################################
## PyBot ##
## Copyright (C) 2015, Kyle Repinski ##
## Copyright (C) 2015, Andres Preciado (Glitch) ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
###########################################################################
import __main__, requests
from pybotutils import fixHTMLChars, strbetween
info = { "names" : [ "xbox", "xb" ], "access" : 0, "version" : 1 }
def command( message, user, recvfrom ):
txt = requests.get( "https://live.xbox.com/en-US/Profile?gamertag=" + message ).text
gamerscore = fixHTMLChars( strbetween( txt, "<div class=\"gamerscore\">", "</div>" ) )
lastseen = fixHTMLChars( strbetween( txt, "<div class=\"presence\">", "</div>" ) )
gamertag = fixHTMLChars( strbetween( txt, "<title>", "'s Profile" ) ) #get proper case of gamertag
if gamerscore != "":
__main__.sendMessage( gamertag + " :: Status: " + lastseen + " :: Gamerscore: " + gamerscore, recvfrom )
else:
__main__.sendMessage( message + " was not found.", recvfrom )
return True
|
gpl-3.0
| -3,655,683,782,739,206,700
| 63.424242
| 106
| 0.489652
| false
| 4.438413
| false
| false
| false
|
pneff/wsgiservice
|
tests/test_application.py
|
1
|
20489
|
import io
from datetime import timedelta
from mox3 import mox
import wsgiservice
import wsgiservice.application
import wsgiservice.exceptions
from webob import Request
def test_getapp():
"""get_app returns a list of resources from the dictionary."""
app = wsgiservice.get_app(globals())
print(app)
print(app._resources)
assert isinstance(app, wsgiservice.application.Application)
assert len(app._resources) == 7
resources = (Resource1, Resource2, Resource3, Resource4, Resource5,
Resource6)
assert app._resources[0] in resources
assert app._resources[1] in resources
assert app._resources[2] in resources
assert app._resources[3] in resources
assert app._resources[4] in resources
assert app._resources[5] in resources
def test_app_handle_404():
"""Application returns a 404 status code if no resource is found."""
app = wsgiservice.get_app(globals())
req = Request.blank('/foo', {'HTTP_ACCEPT': 'text/xml'})
res = app._handle_request(req)
print(res)
assert res.status == '404 Not Found'
assert res.body == b'<response><error>' \
b'The requested resource does not exist.</error></response>'
assert res.headers['Content-Type'] == 'text/xml; charset=UTF-8'
def test_app_handle_method_not_allowed():
"""Application returns 405 for known but unimplemented methods."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'GET'})
res = app._handle_request(req)
print(res)
assert res.status == '405 Method Not Allowed'
assert res.body == b''
assert res._headers['Allow'] == 'OPTIONS, POST, PUT'
def test_app_handle_method_not_known():
"""Application returns 501 for unknown and unimplemented methods."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'PATCH'})
res = app._handle_request(req)
print(res)
assert res.status == '501 Not Implemented'
assert res.body == b''
assert res._headers['Allow'] == 'OPTIONS, POST, PUT'
def test_app_handle_response_201_abs():
"""raise_201 used the location header directly if it is absolute."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(res)
assert res.status == '201 Created'
assert res.body == b''
assert res.location == '/res2/test'
def test_app_handle_response_201_rel():
"""raise_201 adds relative location header to the current request path."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'PUT'})
res = app._handle_request(req)
print(res)
assert res.status == '201 Created'
assert res.body == b''
assert res.location == '/res2/foo'
def test_app_handle_response_201_ext():
"""raise_201 ignores extension in the current path."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2.json', {'REQUEST_METHOD': 'PUT'})
res = app._handle_request(req)
print(res)
assert res.status == '201 Created'
assert res.body == b''
assert res.location == '/res2/foo'
def test_app_handle_options():
"""Resource provides a good default for the OPTIONS method."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res2', {'REQUEST_METHOD': 'OPTIONS'})
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res._headers['Allow'] == 'OPTIONS, POST, PUT'
def test_app_get_simple():
"""Application handles GET request and ignored POST data in that case."""
app = wsgiservice.get_app(globals())
body = b'foo=42&baz=foobar'
req = Request.blank('/res1/theid', {
'CONTENT_LENGTH': str(len(body)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': io.BytesIO(body)})
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res._headers['Content-MD5'] == '8d5a8ef21b4afff94c937faabfdf11fa'
assert res.body == b"<response>GET was called with id theid, " \
b"foo None</response>"
def test_app_head_revert_to_get_simple():
"""Application converts a HEAD to a GET request but doesn't send body."""
app = wsgiservice.get_app(globals())
body = b'foo=42&baz=foobar'
req = Request.blank('/res1/theid', {
'REQUEST_METHOD': 'HEAD',
'CONTENT_LENGTH': str(len(body)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': io.BytesIO(body)})
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res.body == b''
def test_app_post_simple():
"""Application handles normal POST request."""
app = wsgiservice.get_app(globals())
body = b'foo=42&baz=foobar'
req = Request.blank('/res1/theid', {
'REQUEST_METHOD': 'POST', 'CONTENT_LENGTH': str(len(body)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': io.BytesIO(body)})
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res.body == b"<response>POST was called with id theid, " \
b"foo 42</response>"
def test_app_wsgi():
"""Application instance works as a WSGI application."""
app = wsgiservice.get_app(globals())
env = Request.blank('/res1/theid.json').environ
start_response = mox.MockAnything()
start_response('200 OK', [('Content-Length', '40'),
('Content-Type', 'application/json; charset=UTF-8'),
('Content-MD5', 'd6fe631718727b542d2ecb70dfd41e4b')])
mox.Replay(start_response)
res = app(env, start_response)
print(res)
mox.Verify(start_response)
assert res == [b'"GET was called with id theid, foo None"']
def test_validation_method():
"""Resource validates a method parameter which was set on the method."""
inst = Resource1(None, None, None)
inst.validate_param(inst.POST, 'foo', '9')
def test_validation_class():
"""Resource validates a method parameter which was set on the class."""
inst = Resource1(None, None, None)
inst.validate_param(inst.GET, 'id', 'anyid')
def test_validation_with_re_none_value():
"""Resource rejects empty values if a validation is defined."""
inst = Resource1(None, None, None)
try:
inst.validate_param(inst.GET, 'id', None)
except wsgiservice.exceptions.ValidationException as e:
print(e)
assert str(e) == 'Value for id must not be empty.'
else:
assert False, "Expected an exception!"
def test_validation_with_re_mismatch():
"""Resource rejects invalid values by regular expression."""
inst = Resource1(None, None, None)
try:
inst.validate_param(inst.GET, 'id', 'fo')
except wsgiservice.exceptions.ValidationException as e:
print(e)
assert str(e) == 'id value fo does not validate.'
else:
assert False, "Expected an exception!"
def test_validation_with_re_mismatch_toolong():
"""Resource rejects invalid values by regular expression."""
inst = Resource1(None, None, None)
try:
inst.validate_param(inst.GET, 'id', 'fooobarrr')
except wsgiservice.exceptions.ValidationException as e:
print(e)
assert str(e) == 'id value fooobarrr does not validate.'
else:
assert False, "Expected an exception!"
def test_with_expires():
"""expires decorator correctly sets the Cache-Control header."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res3')
res = app._handle_request(req)
print(str(res))
print(res._headers)
assert res._headers['Cache-Control'] == 'max-age=86400'
def test_with_expires_vary():
"""expires decorator can set the Vary header."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res6/uid')
res = app._handle_request(req)
print(str(res))
print(res._headers)
assert res._headers['Cache-Control'] == 'max-age=86400'
vary = res._headers['Vary'].split(', ')
assert len(vary) == 2
assert 'Authorization' in vary
assert 'Accept' in vary
def test_with_expires_calculations():
"""expires decorator correctly sets the Expires header."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4')
res = app._handle_request(req)
print(res._headers)
assert res._headers['Cache-Control'] == 'max-age=138'
assert res._headers['Expires'] == 'Mon, 20 Apr 2009 17:55:45 GMT'
def test_with_expires_calculations_double_wrapped():
"""Wrapped expires decorators work by just using the last one."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4', {'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(str(res))
print(res._headers)
assert res._headers['Cache-Control'] == 'max-age=138'
assert res._headers['Expires'] == 'Mon, 20 Apr 2009 17:55:45 GMT'
def test_etag_generate():
"""ETags are calculated by adding the extension to the custom etag."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid')
res = app._handle_request(req)
print(res._headers)
assert res._headers['ETag'] == '"myid_xml"'
def test_etag_generate_json():
"""ETags are calculated by adding the extension to the custom etag."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid', {'HTTP_ACCEPT': 'application/json'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_json"'
def test_etag_generate_json_ext():
"""ETags are calculated by adding the extension to the custom etag."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4.json?id=myid')
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_json"'
def test_etag_if_match_false():
"""A GET request with a non-matching If-Match returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_MATCH': '"otherid"'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '412 Precondition Failed'
def test_etag_if_match_true():
"""A GET request with a matching If-Match passes."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid', {'HTTP_IF_MATCH': '"myid_xml"'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '200 OK'
def test_etag_if_match_not_set():
"""A GET request without an If-Match header passes."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid')
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '200 OK'
def test_etag_if_none_match_get_true():
"""A GET request with a matching If-None-Match returns 304."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid', {'HTTP_IF_NONE_MATCH': '"myid_xml"'})
res = app._handle_request(req)
print(res)
assert res.body == b''
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '304 Not Modified'
assert 'Content-Type' not in res.headers
def test_etag_if_none_match_head_true():
"""A HEAD request with a matching If-None-Match returns 304."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_NONE_MATCH': '"myid_xml"', 'REQUEST_METHOD': 'HEAD'})
res = app._handle_request(req)
print(res)
assert res.body == b''
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '304 Not Modified'
def test_etag_if_none_match_post_true():
"""A POST request with a matching If-None-Match returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_NONE_MATCH': '"myid_xml"', 'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '412 Precondition Failed'
def test_etag_if_none_match_false():
"""A GET request with a non-matching If-None-Match executes normally."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_NONE_MATCH': '"otherid"'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '200 OK'
def test_modified_generate():
"""Resource generates a good Last-Modified response header."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid')
res = app._handle_request(req)
print(res._headers)
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
def test_if_modified_since_false():
"""A GET request with a matching If-Modified-Since returns 304."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_MODIFIED_SINCE': 'Fri, 01 May 2009 14:30:00 GMT'})
res = app._handle_request(req)
print(res)
assert res.body == b''
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res._headers['ETag'] == '"myid_xml"'
assert res.status == '304 Not Modified'
def test_if_modified_since_true():
"""A GET request with an outdated If-Modified-Since passes."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_MODIFIED_SINCE': 'Fri, 01 May 2009 14:18:10 GMT'})
res = app._handle_request(req)
print(res)
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '200 OK'
def test_if_unmodified_since_false():
"""A GET request with an outdated If-Unmodified-Since returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_UNMODIFIED_SINCE': 'Fri, 01 May 2009 12:30:00 GMT'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '412 Precondition Failed'
def test_if_unmodified_since_false_head():
"""A HEAD request with an outdated If-Unmodified-Since returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_UNMODIFIED_SINCE': 'Thu, 30 Apr 2009 19:30:00 GMT',
'REQUEST_METHOD': 'HEAD'})
res = app._handle_request(req)
print(res)
assert res.body == b''
assert res._headers['ETag'] == '"myid_xml"'
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '412 Precondition Failed'
def test_if_unmodified_since_false_post():
"""A POST request with an outdated If-Unmodified-Since returns 412."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_UNMODIFIED_SINCE': 'Thu, 30 Apr 2009 19:30:00 GMT',
'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(res)
print(res.status)
assert res._headers['ETag'] == '"myid_xml"'
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '412 Precondition Failed'
def test_if_unmodified_since_true():
"""A GET request with a current If-Unmodified-Since returns 200."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res4?id=myid',
{'HTTP_IF_UNMODIFIED_SINCE': 'Fri, 01 May 2009 14:30:00 GMT',
'REQUEST_METHOD': 'POST'})
res = app._handle_request(req)
print(res)
assert res._headers['ETag'] == '"myid_xml"'
assert res._headers['Last-Modified'] == 'Fri, 01 May 2009 14:30:00 GMT'
assert res.status == '200 OK'
def test_verify_content_md5_invalid():
"""A request with a body that does not match Content-MD5 returns 400."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res1/theid', {
'HTTP_CONTENT_MD5': '89d5739baabbbe65be35cbe61c88e06d',
'wsgi.input': io.BytesIO(b'foobar')})
res = app._handle_request(req)
print(res)
print(res.status)
print(res._headers)
assert 'ETag' not in res._headers
assert 'Last-Modified' not in res._headers
assert res.status == '400 Bad Request'
assert res.body == b'<response><error>Invalid Content-MD5 request ' \
b'header.</error></response>'
def test_verify_content_md5_valid():
"""A request with a body that matches Content-MD5 passes."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res1/theid', {
'HTTP_CONTENT_MD5': '89d5739baabbbe65be35cbe61c88e06d',
})
req.body_file = io.BytesIO(b'Foobar')
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
def test_exception_json():
"""An exception is serialized as a dictionary in JSON."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res5?throw=1', {'HTTP_ACCEPT': 'application/json'})
res = app._handle_request(req)
print(res)
assert res.status == '500 Internal Server Error'
assert res.body == b'{"error": "Some random exception."}'
def test_exception_xml():
"""An exception is serialized as an error response in XML."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res5?throw=1')
res = app._handle_request(req)
print(res)
assert res.status == '500 Internal Server Error'
assert res.body == b'<response><error>Some random exception.' \
b'</error></response>'
def test_res6_default():
"""Resource6 works normally for keys which exist on the resource."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res6/uid')
res = app._handle_request(req)
print(res)
assert res.status == '200 OK'
assert res.body == b'<response>works</response>'
def test_notfound_xml():
"""Requests that cause a NOT_FOUND exception return 404."""
app = wsgiservice.get_app(globals())
req = Request.blank('/res6/foo')
res = app._handle_request(req)
print(res)
assert res.status == '404 Not Found'
assert res.body == b'<response><error>Not Found</error></response>'
class AbstractResource(wsgiservice.Resource):
"""This resource should not be added to the application as it doesn't
have a path. (Verified by test_getapp)
"""
class Resource1(wsgiservice.Resource):
_path = '/res1/{id}'
_validations = {'id': {'re': '[a-z]{5}'}}
def GET(self, id, foo):
return 'GET was called with id {0}, foo {1}'.format(id, foo)
def POST(self, id, foo):
return 'POST was called with id {0}, foo {1}'.format(id, foo)
POST._validations = {'foo': {'re': '[0-9]+'}}
class Resource2(wsgiservice.Resource):
_path = '/res2'
def POST(self):
wsgiservice.raise_201(self, '/res2/test')
def PUT(self):
wsgiservice.raise_201(self, 'foo')
class Resource3(AbstractResource):
_path = '/res3'
@wsgiservice.expires(timedelta(days=1))
def GET(self, id):
return "Called with id: {0}".format(id)
class Resource4(wsgiservice.Resource):
_path = '/res4'
@wsgiservice.expires(138, currtime=lambda: 1240250007)
def GET(self, id):
return "Called with id: {0}".format(id)
@wsgiservice.expires(139, currtime=lambda: 1240250007)
@wsgiservice.expires(138, currtime=lambda: 1240250007)
def POST(self, id):
return "POST Called with id: {0}".format(id)
def get_etag(self, id):
if id:
return id[0] + '"' + id[1:]
def get_last_modified(self, id):
from webob import UTC
from datetime import datetime
return datetime(2009, 5, 1, 14, 30, tzinfo=UTC)
class Resource5(wsgiservice.Resource):
_path = '/res5'
def GET(self, throw):
if throw == '1':
raise Exception("Some random exception.")
else:
return 'Throwing nothing'
class Resource6(wsgiservice.Resource):
class DummyException(Exception):
pass
NOT_FOUND = (KeyError, DummyException)
_path = '/res6/{id}'
items = {'uid': 'works'}
@wsgiservice.expires(timedelta(days=1), vary=['Authorization'])
def GET(self, id):
return self.items[id]
class NotAResource():
def __getattr__(self, name):
return name
not_a_class = NotAResource()
|
bsd-2-clause
| 3,326,394,759,695,108,600
| 32.699013
| 78
| 0.639123
| false
| 3.437175
| true
| false
| false
|
emailgregn/djtempl
|
djtempl/cli.py
|
1
|
1278
|
from djtempl import render_files
import argparse
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--template",
metavar='file',
default='Dockerfile.tmpl',
type=argparse.FileType(mode='r'), # 2.7 argparse.FileType() doesn't support encoding=
help="The dockerfile template to render")
parser.add_argument("-p", "--pip",
metavar='file',
default='requirements.txt',
type=argparse.FileType(mode='r'),
help="The pip requirements file")
parser.add_argument("-d", "--dockerfile",
metavar='file',
default=sys.stdout,
type=argparse.FileType(mode='w'),
help="The output dockerfile. Default is STDOUT")
parser.add_argument("-q", "--quiet",
action="store_true",
help="Silently overwrite if Dockerfile already exists")
args = parser.parse_args()
dfile = args.dockerfile
pfile = args.pip
tfile = args.template
quiet = args.quiet
render_files(pfile, tfile, dfile, quiet)
|
gpl-3.0
| 3,263,640,041,976,481,000
| 33.540541
| 109
| 0.516432
| false
| 4.822642
| false
| false
| false
|
ama-jharrison/agdc
|
agdc/agdc/abstract_ingester/dataset_record.py
|
1
|
27323
|
#!/usr/bin/env python
# ===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
"""
DatasetRecord: database interface class.
These classes provide an interface between the database and the top-level
ingest algorithm (AbstractIngester and its subclasses). They also provide
the implementation of the database and tile store side of the ingest
process. They are expected to be independent of the structure of any
particular dataset, but will change if the database schema or tile store
format changes.
"""
from __future__ import absolute_import
import logging
import os
import re
from math import floor
from osgeo import osr
from agdc.cube_util import DatasetError, DatasetSkipError
from .ingest_db_wrapper import IngestDBWrapper
from .ingest_db_wrapper import TC_PENDING, TC_SINGLE_SCENE, TC_SUPERSEDED
from .ingest_db_wrapper import TC_MOSAIC
from .mosaic_contents import MosaicContents
from .tile_record import TileRecord, TileRepository
# Set up logger.
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class DatasetRecord(object):
"""DatasetRecord database interface class."""
DATASET_METADATA_FIELDS = ['dataset_path',
'datetime_processed',
'dataset_size',
'll_x',
'll_y',
'lr_x',
'lr_y',
'ul_x',
'ul_y',
'ur_x',
'ur_y',
'x_pixels',
'y_pixels',
'xml_text'
]
def __init__(self, collection, acquisition, dataset):
self.collection = collection
self.datacube = collection.datacube
self.db = IngestDBWrapper(self.datacube.db_connection)
dataset_key = collection.get_dataset_key(dataset)
self.dataset_bands = collection.new_bands[dataset_key]
self.dataset = dataset
self.mdd = dataset.metadata_dict
self.dataset_dict = {}
for field in self.DATASET_METADATA_FIELDS:
self.dataset_dict[field] = self.mdd[field]
self.dataset_dict['acquisition_id'] = acquisition.acquisition_id
self.dataset_dict['crs'] = self.mdd['projection']
self.dataset_dict['level_name'] = self.mdd['processing_level']
self.dataset_dict['level_id'] = \
self.db.get_level_id(self.dataset_dict['level_name'])
self.dataset_dict['dataset_id'] = \
self.db.get_dataset_id(self.dataset_dict)
if self.dataset_dict['dataset_id'] is None:
# create a new dataset record in the database
self.dataset_dict['dataset_id'] = \
self.db.insert_dataset_record(self.dataset_dict)
self.needs_update = False
else:
# check that the old dataset record can be updated
self.__check_update_ok()
self.needs_update = True
self.dataset_id = self.dataset_dict['dataset_id']
def remove_mosaics(self, dataset_filter):
"""Remove mosaics associated with the dataset.
This will mark mosaic files for removal, delete mosaic database
records if they exist, and update the tile class of overlapping
tiles (from other datasets) to reflect the lack of a mosaic. The
'dataset_filter' is a list of dataset_ids to filter on. It should
be the list of dataset_ids that have been locked (including this
dataset). It is used to avoid operating on the tiles of an
unlocked dataset.
"""
# remove new mosaics (those with database records)
overlap_dict = self.db.get_overlapping_tiles_for_dataset(
self.dataset_id,
input_tile_class_filter=(TC_SINGLE_SCENE,
TC_SUPERSEDED,
TC_MOSAIC),
output_tile_class_filter=(TC_MOSAIC,),
dataset_filter=dataset_filter
)
for tile_record_list in overlap_dict.values():
for tr in tile_record_list:
self.db.remove_tile_record(tr['tile_id'])
self.collection.mark_tile_for_removal(tr['tile_pathname'])
# build a dictionary of overlaps (ignoring mosaics)
overlap_dict = self.db.get_overlapping_tiles_for_dataset(
self.dataset_id,
input_tile_class_filter=(TC_SINGLE_SCENE,
TC_SUPERSEDED),
output_tile_class_filter=(TC_SINGLE_SCENE,
TC_SUPERSEDED),
dataset_filter=dataset_filter
)
# update tile classes for overlap tiles from other datasets
for tile_record_list in overlap_dict.values():
if len(tile_record_list) > 2:
raise DatasetError("Attempt to update a mosaic of three or " +
"more datasets. Handling for this case " +
"is not yet implemented.")
for tr in tile_record_list:
if tr['dataset_id'] != self.dataset_id:
self.db.update_tile_class(tr['tile_id'], TC_SINGLE_SCENE)
# remove old mosaics (those without database records)
for tile_record_list in overlap_dict.values():
if len(tile_record_list) > 1:
# tile_record_list is sorted by acquisition start time, so
# the first record should be the one the mosaic filename is
# based on.
tr = tile_record_list[0]
mosaic_pathname = \
self.__make_mosaic_pathname(tr['tile_pathname'])
if os.path.isfile(mosaic_pathname):
self.collection.mark_tile_for_removal(mosaic_pathname)
def remove_tiles(self):
"""Remove the tiles associated with the dataset.
This will remove ALL the tiles belonging to this dataset, deleting
database records and marking tile files for removal on commit. Mosaics
should be removed BEFORE calling this (as it will delete the tiles
needed to figure out the overlaps, but may not delete all the mosaics).
"""
tile_list = self.db.get_dataset_tile_ids(self.dataset_id)
for tile_id in tile_list:
tile_pathname = self.db.get_tile_pathname(tile_id)
self.db.remove_tile_record(tile_id)
self.collection.mark_tile_for_removal(tile_pathname)
def update(self):
"""Update the dataset record in the database.
This first checks that the new dataset is more recent than
the record in the database. If not it raises a dataset error.
"""
self.__check_update_ok()
self.db.update_dataset_record(self.dataset_dict)
def make_tiles(self, tile_type_id, band_stack):
"""Tile the dataset, returning a list of tile_content objects.
:rtype list of TileContents
"""
tile_list = []
tile_footprint_list = sorted(self.get_coverage(tile_type_id))
LOGGER.info('%d tile footprints cover dataset', len(tile_footprint_list))
for tile_footprint in tile_footprint_list:
tile_contents = self.collection.create_tile_contents(
tile_type_id,
tile_footprint,
band_stack
)
tile_contents.reproject()
if tile_contents.has_data():
tile_list.append(tile_contents)
else:
tile_contents.remove()
LOGGER.info('%d non-empty tiles created', len(tile_list))
return tile_list
def store_tiles(self, tile_list):
"""Store tiles in the database and file store.
'tile_list' is a list of tile_contents objects. This
method will create the corresponding database records and
mark tiles for creation when the transaction commits.
:type tile_list: list of TileContents
"""
return [self.create_tile_record(tile_contents) for tile_contents in tile_list]
def create_mosaics(self, dataset_filter):
"""Create mosaics associated with the dataset.
'dataset_filter' is a list of dataset_ids to filter on. It should
be the list of dataset_ids that have been locked (including this
dataset). It is used to avoid operating on the tiles of an
unlocked dataset.
"""
# Build a dictionary of overlaps (ignoring mosaics, including pending).
overlap_dict = self.db.get_overlapping_tiles_for_dataset(
self.dataset_id,
input_tile_class_filter=(TC_PENDING,
TC_SINGLE_SCENE,
TC_SUPERSEDED),
output_tile_class_filter=(TC_PENDING,
TC_SINGLE_SCENE,
TC_SUPERSEDED),
dataset_filter=dataset_filter
)
# Make mosaics and update tile classes as needed.
for tile_record_list in overlap_dict.values():
if len(tile_record_list) > 2:
raise DatasetError("Attempt to create a mosaic of three or " +
"more datasets. Handling for this case " +
"is not yet implemented.")
elif len(tile_record_list) == 2:
self.__make_one_mosaic(tile_record_list)
for tr in tile_record_list:
self.db.update_tile_class(tr['tile_id'], TC_SUPERSEDED)
else:
for tr in tile_record_list:
self.db.update_tile_class(tr['tile_id'], TC_SINGLE_SCENE)
def get_removal_overlaps(self):
"""Returns a list of overlapping dataset ids for mosaic removal."""
tile_class_filter = (TC_SINGLE_SCENE,
TC_SUPERSEDED,
TC_MOSAIC)
return self.get_overlaps(tile_class_filter)
def get_creation_overlaps(self):
"""Returns a list of overlapping dataset_ids for mosaic creation."""
tile_class_filter = (TC_PENDING,
TC_SINGLE_SCENE,
TC_SUPERSEDED)
return self.get_overlaps(tile_class_filter)
def get_overlaps(self, tile_class_filter):
"""Returns a list of overlapping dataset ids, including this dataset.
A dataset is overlapping if it contains tiles that overlap with
tiles belonging to this dataset. Only tiles in the tile_class_filter
are considered.
"""
dataset_list = self.db.get_overlapping_dataset_ids(
self.dataset_id,
tile_class_filter=tile_class_filter
)
if not dataset_list:
dataset_list = [self.dataset_id]
return dataset_list
def create_tile_record(self, tile_contents):
"""Factory method to create an instance of the TileRecord class.
The created object will be responsible for inserting tile table records
into the database for reprojected or mosaiced tiles."""
self.collection.mark_tile_for_creation(tile_contents)
tile = TileRecord(
self.dataset_id,
tile_footprint=tile_contents.tile_footprint,
tile_type_id=tile_contents.tile_type_id,
path=tile_contents.get_output_path(),
size_mb=tile_contents.get_output_size_mb(),
tile_extents=tile_contents.tile_extents
)
TileRepository(self.collection).persist_tile(tile)
return tile
def mark_as_tiled(self):
"""Flag the dataset record as tiled in the database.
This flag does not exist in the current database schema,
so this method does nothing at the moment."""
pass
def list_tile_types(self):
"""Returns a list of the tile type ids for this dataset."""
return self.dataset_bands.keys()
def get_tile_bands(self, tile_type_id):
"""Returns a dictionary containing the band info for one tile type.
The tile_type_id must valid for this dataset, available from
list_tile_types above.
"""
return self.dataset_bands[tile_type_id]
def get_coverage(self, tile_type_id):
"""Given the coordinate reference system of the dataset and that of the
tile_type_id, return a list of tiles within the dataset footprint"""
tile_type_info = self.collection.datacube.tile_type_dict[tile_type_id]
#Get geospatial information from the dataset.
dataset_crs = self.mdd['projection']
dataset_geotransform = self.mdd['geo_transform']
pixels = self.mdd['x_pixels']
lines = self.mdd['y_pixels']
#Look up the datacube's projection information for this tile_type
tile_crs = tile_type_info['crs']
#Get the transformation between the two projections
transformation = self.define_transformation(dataset_crs, tile_crs)
#Determine the bounding quadrilateral of the dataset extent
#in tile coordinates
dataset_bbox = self.get_bbox(transformation, dataset_geotransform,
pixels, lines)
#Determine maximum inner rectangle, which is guaranteed to need tiling
#and the minimum outer rectangle outside which no tiles will exist.
cube_origin = (tile_type_info['x_origin'], tile_type_info['y_origin'])
cube_tile_size = (tile_type_info['x_size'], tile_type_info['y_size'])
coverage = self.get_touched_tiles(dataset_bbox,
cube_origin, cube_tile_size)
return coverage
#
# worker methods
#
def __check_update_ok(self):
"""Checks if an update is possible, raises a DatasetError otherwise.
Note that dataset_older_than_database returns a tuple
(disk_datetime_processed, database_datetime_processed, tile_ingested_datetime)
if no ingestion required"""
tile_class_filter = (TC_SINGLE_SCENE,
TC_SUPERSEDED)
time_tuple = self.db.dataset_older_than_database(
self.dataset_dict['dataset_id'],
self.dataset_dict['datetime_processed'],
tile_class_filter)
if time_tuple is not None:
disk_datetime_processed, database_datetime_processed, tile_ingested_datetime = time_tuple
if (disk_datetime_processed == database_datetime_processed):
skip_message = 'Dataset has already been ingested'
elif disk_datetime_processed < database_datetime_processed:
skip_message = 'Dataset on disk is older than dataset in DB'
else:
skip_message = 'Dataset on disk was created after currently ingested contents'
skip_message += ' (Disk = %s, DB = %s, Ingested = %s)' % time_tuple
raise DatasetSkipError(skip_message)
def __make_one_mosaic(self, tile_record_list):
"""Create a single mosaic.
This create the mosaic contents, creates the database record,
and marks the mosaic contents for creation on transaction commit.
"""
mosaic = MosaicContents(
tile_record_list,
self.datacube.tile_type_dict,
self.dataset_dict['level_name'],
self.collection.get_temp_tile_directory()
)
mosaic.create_record(self.db)
self.collection.mark_tile_for_creation(mosaic)
def __make_mosaic_pathname(self, tile_pathname):
"""Return the pathname of the mosaic corresponding to a tile."""
(tile_dir, tile_basename) = os.path.split(tile_pathname)
mosaic_dir = os.path.join(tile_dir, 'mosaic_cache')
if self.dataset_dict['level_name'] == 'PQA':
mosaic_basename = tile_basename
else:
mosaic_basename = re.sub(r'\.\w+$', '.vrt', tile_basename)
return os.path.join(mosaic_dir, mosaic_basename)
#
# Worker methods for coverage.
#
# These are public so that they can be called by test_dataset_record.
#
def define_transformation(self, dataset_crs, tile_crs):
"""Return the transformation between dataset_crs
and tile_crs projections"""
osr.UseExceptions()
try:
dataset_spatial_reference = self.create_spatial_ref(dataset_crs)
tile_spatial_reference = self.create_spatial_ref(tile_crs)
if dataset_spatial_reference is None:
raise DatasetError('Unknown projecton %s'
% str(dataset_crs))
if tile_spatial_reference is None:
raise DatasetError('Unknown projecton %s'
% str(tile_crs))
return osr.CoordinateTransformation(dataset_spatial_reference,
tile_spatial_reference)
except Exception:
raise DatasetError('Coordinate transformation error ' +
'for transforming %s to %s' %
(str(dataset_crs), str(tile_crs)))
@staticmethod
def create_spatial_ref(crs):
"""Create a spatial reference system for projecton crs.
Called by define_transformation()"""
# pylint: disable=broad-except
osr.UseExceptions()
try:
spatial_ref = osr.SpatialReference()
except Exception:
raise DatasetError('No spatial reference done for %s' % str(crs))
try:
spatial_ref.ImportFromWkt(crs)
return spatial_ref
except Exception:
pass
try:
matchobj = re.match(r'EPSG:(\d+)', crs)
epsg_code = int(matchobj.group(1))
spatial_ref.ImportFromEPSG(epsg_code)
return spatial_ref
except Exception:
return None
@staticmethod
def get_bbox(transform, geotrans, pixels, lines):
"""Return the coordinates of the dataset footprint in clockwise order
from upper-left"""
xul, yul, dummy_z = \
transform.TransformPoint(geotrans[0], geotrans[3], 0)
xur, yur, dummy_z = \
transform.TransformPoint(geotrans[0] + geotrans[1] * pixels,
geotrans[3] + geotrans[4] * pixels, 0)
xll, yll, dummy_z = \
transform.TransformPoint(geotrans[0] + geotrans[2] * lines,
geotrans[3] + geotrans[5] * lines, 0)
xlr, ylr, dummy_z = \
transform.TransformPoint(
geotrans[0] + geotrans[1] * pixels + geotrans[2] * lines,
geotrans[3] + geotrans[4] * pixels + geotrans[5] * lines, 0)
return [(xul, yul), (xur, yur), (xlr, ylr), (xll, yll)]
def get_touched_tiles(self, dataset_bbox, cube_origin, cube_tile_size):
"""Return a list of tuples (itile, jtile) comprising all tiles
footprints that intersect the dataset bounding box"""
definite_tiles, possible_tiles = \
self.get_definite_and_possible_tiles(dataset_bbox,
cube_origin, cube_tile_size)
coverage_set = definite_tiles
#Check possible tiles:
#Check if the tile perimeter intersects the dataset bbox perimeter:
intersected_tiles = \
self.get_intersected_tiles(possible_tiles, dataset_bbox,
cube_origin, cube_tile_size)
coverage_set = coverage_set.union(intersected_tiles)
possible_tiles = possible_tiles.difference(intersected_tiles)
#Otherwise the tile might be wholly contained in the dataset bbox
contained_tiles = \
self.get_contained_tiles(possible_tiles, dataset_bbox,
cube_origin, cube_tile_size)
coverage_set = coverage_set.union(contained_tiles)
return coverage_set
@staticmethod
def get_definite_and_possible_tiles(bbox, cube_origin, cube_tile_size):
"""Return two lists of tile footprints: from the largest rectangle
wholly contained within the dataset bbox and the smallest rectangle
containing the bbox."""
#pylint: disable=too-many-locals
#unpack the bbox vertices in clockwise order from upper-left
xyul, xyur, xylr, xyll = bbox
xul, yul = xyul
xur, yur = xyur
xlr, ylr = xylr
xll, yll = xyll
#unpack the origin of the tiled datacube (e.g. lat=0, lon=0) and the
#datacube tile size
xorigin, yorigin = cube_origin
xsize, ysize = cube_tile_size
#Define the largest rectangle wholly contained within footprint
xmin = max(xll, xul)
xmax = min(xlr, xur)
ymin = max(yll, ylr)
ymax = min(yul, yur)
xmin_index = int(floor((xmin - xorigin) / xsize))
xmax_index = int(floor((xmax - xorigin) / xsize))
ymin_index = int(floor((ymin - yorigin) / ysize))
ymax_index = int(floor((ymax - yorigin) / ysize))
definite_tiles = set([(itile, jtile)
for itile in range(xmin_index, xmax_index + 1)
for jtile in range(ymin_index, ymax_index + 1)])
#Define the smallest rectangle which is guaranteed to include all tiles
#in the foorprint.
xmin = min(xll, xul)
xmax = max(xlr, xur)
ymin = min(yll, ylr)
ymax = max(yul, yur)
xmin_index = int(floor((xmin - xorigin) / xsize))
xmax_index = int(floor((xmax - xorigin) / xsize))
ymin_index = int(floor((ymin - yorigin) / ysize))
ymax_index = int(floor((ymax - yorigin) / ysize))
possible_tiles = set([(itile, jtile)
for itile in range(xmin_index, xmax_index + 1)
for jtile in range(ymin_index, ymax_index + 1)
]).difference(definite_tiles)
return (definite_tiles, possible_tiles)
def get_intersected_tiles(self, candidate_tiles, dset_bbox,
cube_origin, cube_tile_size):
"""Return the subset of candidate_tiles that have an intersection with
the dataset bounding box"""
#pylint: disable=too-many-locals
xorigin, yorigin = cube_origin
xsize, ysize = cube_tile_size
keep_list = []
for itile, jtile in candidate_tiles:
intersection_exists = False
(x0, y0) = (xorigin + itile * xsize,
yorigin + (jtile + 1) * ysize)
tile_bbox = [(x0, y0), (x0 + xsize, y0),
(x0 + xsize, y0 - ysize), (x0, y0 - ysize)]
tile_vtx_number = len(tile_bbox)
dset_vtx_number = len(dset_bbox)
for tile_vtx in range(tile_vtx_number):
x1, y1 = tile_bbox[tile_vtx]
x2, y2 = tile_bbox[(tile_vtx + 1) % tile_vtx_number]
for dset_vtx in range(dset_vtx_number):
x3, y3 = dset_bbox[dset_vtx]
x4, y4 = dset_bbox[(dset_vtx + 1) % dset_vtx_number]
xcoords = [x1, x2, x3, x4]
ycoords = [y1, y2, y3, y4]
intersection_exists = \
self.check_intersection(xcoords, ycoords)
if intersection_exists:
keep_list.append((itile, jtile))
break
if intersection_exists:
break
return set(keep_list)
@staticmethod
def get_contained_tiles(candidate_tiles, dset_bbox,
cube_origin, cube_tile_size):
"""Return the subset of candidate tiles that lie wholly within the
dataset bounding box"""
#pylint: disable=too-many-locals
xorigin, yorigin = cube_origin
xsize, ysize = cube_tile_size
keep_list = []
for itile, jtile in candidate_tiles:
tile_vtx_inside = []
(x0, y0) = (xorigin + itile * xsize,
yorigin + (jtile + 1) * ysize)
tile_bbox = [(x0, y0), (x0 + xsize, y0),
(x0 + xsize, y0 - ysize), (x0, y0 - ysize)]
dset_vtx_number = len(dset_bbox)
for x, y in tile_bbox:
#Check if this vertex lies within the dataset bounding box:
winding_number = 0
for dset_vtx in range(dset_vtx_number):
x1, y1 = dset_bbox[dset_vtx]
x2, y2 = dset_bbox[(dset_vtx + 1) % dset_vtx_number]
if y >= y1 and y < y2:
if (x - x1) * (y2 - y1) > (x2 - x1) * (y - y1):
winding_number += 1
elif y <= y1 and y > y2:
if (x - x1) * (y2 - y1) < (x2 - x1) * (y - y1):
winding_number += 1
tile_vtx_inside.append(winding_number % 2 == 1)
if tile_vtx_inside.count(True) == len(tile_bbox):
keep_list.append((itile, jtile))
assert tile_vtx_inside.count(True) == 4 or \
tile_vtx_inside.count(True) == 0, \
"Tile partially inside dataset bounding box but has" \
"no intersection"
return set(keep_list)
@staticmethod
def check_intersection(xpts, ypts):
"""Determines if the line segments
(xpts[0], ypts[0]) to (xpts[1], ypts[1]) and
(xpts[2], ypts[2]) to (xpts[3], ypts[3]) intersect"""
pvec = (xpts[0], ypts[0])
qvec = (xpts[2], ypts[2])
rvec = (xpts[1] - xpts[0], ypts[1] - ypts[0])
svec = (xpts[3] - xpts[2], ypts[3] - ypts[2])
rvec_cross_svec = rvec[0] * svec[1] - rvec[1] * svec[0]
if rvec_cross_svec == 0:
return False
qminusp_cross_svec = \
(qvec[0] - pvec[0]) * svec[1] - (qvec[1] - pvec[1]) * svec[0]
qminusp_cross_rvec = \
(qvec[0] - pvec[0]) * rvec[1] - (qvec[1] - pvec[1]) * rvec[0]
tparameter = qminusp_cross_svec / rvec_cross_svec
uparameter = qminusp_cross_rvec / rvec_cross_svec
if tparameter > 0 and tparameter < 1 and \
uparameter > 0 and uparameter < 1:
return True
|
apache-2.0
| 7,368,330,845,389,171,000
| 41.493002
| 101
| 0.569044
| false
| 4.016907
| false
| false
| false
|
chris-klinger/Goat
|
databases/database_config.py
|
1
|
7681
|
"""
This module contains code for creating the underlying database structures
and also general code to perform basic actions: add, remove, update, list
"""
import os
import goat
from settings import settings_config
from databases import database_records,database_util,database_dirfiles
from util.inputs import prompts
#print('from database config')
#print(dir(goat))
#record_db = goat.get_record_db()
def get_goat_db():
"""Returns path to DB file"""
# deferred call to method in goat module due to import issues?!
return goat.get_goat_db()
def get_record_db(db_obj):
"""Gets the records database"""
# deferred call to method in goat module due to import issues?!
return goat.get_record_db(db_obj)
def get_query_db(db_obj):
return goat.get_query_db(db_obj)
def get_search_db(db_obj):
return goat.get_search_db(db_obj)
def get_result_db(db_obj):
return goat.get_result_db(db_obj)
def get_summary_db(db_obj):
return goat.get_summary_db(db_obj)
def get_db_dir_path(goat_dir):
"""Returns full pathname to db directory"""
return os.path.join(goat_dir, 'DB')
def check_for_dbs(goat_dir):
"""Checks whether a database folder already exists"""
if os.path.exists(get_db_dir_path(goat_dir)):
return True
return False
def create_dbs(goat_dir):
"""Creates the initial database structure"""
db_dir = get_db_dir_path(goat_dir)
os.mkdir(db_dir)
settings_config.add_setting(goat_dir, database_directory=db_dir)
def add_by_dir(goat_dir, target_dir=None):
"""
Adds records for each file in a directory. If one or more
extensions are specified, will only add files ending in those
extensions and ignore others
"""
exts = []
select_files = False
recurse = False
if target_dir is None:
target_dir = prompts.DirPrompt(
message = 'Please choose a directory to add files from',
errormsg = 'Unrecognized directory').prompt()
recurse = prompts.YesNoPrompt(
message = 'Would you like to add from subdirs too?').prompt()
if recurse.lower() in {'yes','y'}:
recurse = True
limit = prompts.YesNoPrompt(
message = 'Would you like to limit files?').prompt()
if limit.lower() in {'yes','y'}:
valids = ['file','extension','both']
choice = prompts.LimitedPrompt(
message = 'Limit by file, extension, or both?',
errormsg = 'Please choose "file", "extension", or "both"',
valids = valids).prompt()
if choice == 'file':
select_files = True
elif choice == 'extension':
exts = database_util.get_exts()
else:
select_files = True
exts = database_util.get_exts()
database_util.add_files_by_dir(goat_dir, target_dir,
select_files, recurse, *exts)
def add_by_file(goat_dir, addfile=None):
"""Adds a record for the specified file"""
if addfile is None:
addfile = database_util.get_file()
add_record(goat_dir, addfile=addfile)
def add_record(goat_dir, record=None, addfile=None, rdir=None, subdir=None):
"""
Adds a record to the database. The user is requested to provide
values for missing information.
"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
if records_db.check_record(record):
print('Goat has detected an existing record for {}'.format(record))
modify = prompts.YesNoPrompt(
message = 'Do you want to modify {}?'.format(record))
if modify in {'no','n'}:
print('Did not modify {}'.format(record))
elif modify in {'yes','y'}:
update_record(goat_dir,record)
else:
print('No such record exists yet, adding record')
records_db.add_record_obj(record)
if addfile is None:
print('Warning, no file for record {}.'
'Goat requires files for all functionality'.format(record))
add_now = prompts.YesNoPrompt(
message = 'Would you like to add a file now?').prompt()
if add_now.lower() in {'yes','y'}:
addfile = database_util.get_file()
elif add_now.lower() in {'no','n'}:
pass # Might change later
try:
print('File to be added is {}'.format(addfile))
database_dirfiles.add_record_from_file(goat_dir, record, addfile)
except Exception:
pass # Could not add file
more_info = prompts.YesNoPrompt(
message = 'Do you wish to add more info for record {}?'.format(record)).prompt()
if more_info.lower() in {'no', 'n'}:
pass # nothing more to do
elif more_info.lower() in {'yes', 'y'}:
records_db.extend_record(record,
**database_util.add_attribute_loop())
def remove_record(goat_dir, record=None):
"""Removes a record from the database"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
user_conf = prompts.YesNoPrompt(
message = 'Do you wish to delete all data for {}?'.format(record)).prompt()
if user_conf.lower() in {'no', 'n'}:
pass # nothing more to do
elif user_conf.lower() in {'yes', 'y'}:
records_db.remove_record_obj(record)
database_dirfiles.remove_record_dir(goat_dir,record)
def update_record(goat_dir, record=None):
"""
Combines user input with other functions to update records
already present in the database
"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
choices = {'add', 'change', 'remove', 'quit'}
cont = True
while cont is True:
user_choice = prompts.LimitedPrompt(
message = 'Please choose an option: add, change, remove, quit',
errormsg = 'Unrecognized option',
valids = choices).prompt()
if user_choice.lower() == 'quit':
cont = False
elif user_choice.lower() == 'add':
records_db.extend_record(record,
**database_util.add_attribute_loop(
goat_dir,record))
elif user_choice.lower() == 'remove':
records_db.reduce_record(record,
*database_util.remove_attribute_loop(
goat_dir,record))
elif user_choice.lower() =='change':
to_change = database_util.change_attribute_loop(
goat_dir,record)
for k,v in to_change.items():
records_db.change_record_attr(record,k,v)
def check_record(goat_dir, record=None):
"""Checks whether a record is already present"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
if records_db.check_record(record):
print('Record for {} exists in database'.format(record))
else:
print('Could not find record for {} in database'.format(record))
def get_record_attr(goat_dir, attr, record=None):
"""Returns a requested attribute for a record"""
records_db = get_record_db(goat_dir)
if record is None:
record = database_util.get_record()
if records_db.check_record(record):
return records_db.check_record_attr(record, attr)
else:
print('Could not find record for {} in database'.format(record))
def list_records(goat_dir, record_type=None):
"""
Lists records in the database, either by their attributes or by
the included files
"""
records_db = get_record_db(goat_dir)
for record in records_db.list_records():
print(record)
records_db.list_record_info(record)
|
gpl-3.0
| 8,086,966,148,088,573,000
| 35.751196
| 88
| 0.623096
| false
| 3.664599
| false
| false
| false
|
dragoon/kilogram
|
kilogram/dataset/edit_histories/wikipedia/libs/dewikify.py
|
1
|
1518
|
import re
class Parser(object):
"""
Parser to remove all kinds of wiki markup tags from an object
"""
def __init__(self):
"""
Constructor
"""
self.string = ''
# all the following regex remove all tags that cannot be rendered
# in text
self.wiki_re = re.compile(r"""\[{2}(File|Category):[\s\S]+\]{2}|
[\s\w#():-]+\||
(\[{2}|\]{2})|
\'{2,5}|
(<s>|<!--)[\s\S]+(</s>|-->)|
{{[\s\S]+}}|
^={1,6}|={1,6}$""", re.X)
def __list(self, listmatch):
return ' ' * (len(listmatch.group()) - 1) + '*'
def __parse(self, string=''):
"""
Parse a string to remove and replace all wiki markup tags
"""
self.string = string
self.string = self.wiki_re.sub('', self.string)
# search for lists
self.listmatch = re.search('^(\*+)', self.string)
if self.listmatch:
self.string = self.__list(self.listmatch) + re.sub('^(\*+)', '', self.string)
return self.string
def parse_string(self, string=''):
"""
Parse a string object to de-wikified text
"""
self.strings = string.splitlines(True)
self.strings = [self.__parse(line) for line in self.strings]
return ''.join(self.strings)
|
apache-2.0
| 5,212,644,074,540,540,000
| 32.755556
| 89
| 0.431489
| false
| 4.158904
| false
| false
| false
|
rocktavious/DevToolsLib
|
DTL/maya/vertexColorUtils.py
|
1
|
8982
|
import os, sys, traceback
import maya.cmds as cmds
from functools import partial
#Needs refactoring
from ..utils.funcs import selection
from DTL.api import Safe
"""
#------------------------------------------------------------
def buildChannelMatrixFromUI():
'''Helper Function to build the channel matrix from the UI'''
channelMatrix = []
redMix = (cmds.floatField('CM_red_red',q=1,v=1),cmds.floatField('CM_red_green',q=1,v=1),cmds.floatField('CM_red_blue',q=1,v=1),cmds.floatField('CM_red_alpha',q=1,v=1))
greenMix = (cmds.floatField('CM_green_red',q=1,v=1),cmds.floatField('CM_green_green',q=1,v=1),cmds.floatField('CM_green_blue',q=1,v=1),cmds.floatField('CM_green_alpha',q=1,v=1))
blueMix = (cmds.floatField('CM_blue_red',q=1,v=1),cmds.floatField('CM_blue_green',q=1,v=1),cmds.floatField('CM_blue_blue',q=1,v=1),cmds.floatField('CM_blue_alpha',q=1,v=1))
alphaMix = (cmds.floatField('CM_alpha_red',q=1,v=1),cmds.floatField('CM_alpha_green',q=1,v=1),cmds.floatField('CM_alpha_blue',q=1,v=1),cmds.floatField('CM_alpha_alpha',q=1,v=1))
channelMatrix = [redMix,greenMix,blueMix,alphaMix]
return channelMatrix
"""
#------------------------------------------------------------
def vertColorAction(action='apply',rgba=[1,1,1,1],channelMatrix=[],blendMix=None,sel=None):
'''Wrapper Function to aid in vertex color actions - handles selection data for you to get around memory leak'''
#cmds.progressWindow( title='Coloring Verts',progress=0, status='Processing:',isInterruptable=False )
cmds.undoInfo(openChunk=True)
if sel == None :
sel = selection()
try:
for obj in sel.selection.keys():
vertDict = sel.selection[obj][5]
cmds.polyOptions(obj, cs=1, cm='none')
progressCount = 1
#Added the plus one so the dialogue to the user never reaches full - its a perception thing
#cmds.progressWindow(edit=True,max=len(vertDict.keys())+1)
for colorKey, vertFaceList in vertDict.items():
#cmds.progressWindow( edit=True, progress=progressCount, status=('Processing - ' + str(len(vertFaceList)) + ' - Vertex Faces'))
if action == 'apply':
vertexColorApply(vertFaceList,rgba[0],rgba[1],rgba[2],rgba[3])
if action == 'add':
vertexColorAdd(vertFaceList,colorKey,rgba[0],rgba[1],rgba[2])
if action == 'tint':
vertexColorTint(vertFaceList,colorKey,rgba[0],rgba[1],rgba[2],rgba[3])
if action == 'gamma':
vertexColorGamma(vertFaceList,colorKey,rgba[0],rgba[1],rgba[2])
if action == 'blend':
if blendMix == None:
blendMix = cmds.floatSliderGrp('blendMixSlider',q=1,v=1)
vertexColorBlend(vertFaceList,colorKey,rgba[0],rgba[1],rgba[2],blendMix)
if action == 'average':
vertexColorAvg(vertFaceList,colorKey,vertDict.keys())
if action == 'channel':
vertexColorChannelMix(vertFaceList,colorKey,channelMatrix)
if action == 'channelAlpha':
vertexColorChannelMixAlpha(vertFaceList,colorKey,channelMatrix)
progressCount = progressCount + 1
cmds.delete(obj,ch=1)
except Exception:
traceback.print_exc()
finally:
cmds.undoInfo(closeChunk=True)
#cmds.progressWindow(endProgress=1)
#------------------------------------------------------------
@Safe
def vertexColorApply(vertList=None, red=1, green=1, blue=1, alpha=1 ):
'''Straight Color/Alpha Apply'''
if vertList == None or vertList == []:
return
bufferSize = 2000
for begin in xrange(0, len(vertList), bufferSize):
vertBatch = vertList[begin: begin+bufferSize]
cmds.polyColorPerVertex(vertBatch, r=red, g=green, b=blue, a=alpha)
#------------------------------------------------------------
def vertexColorAdd(vertList=None, currentRGBA=None, red=0, green=0, blue=0 ):
'''Add New Color to Current Color - Alpha Excluded'''
if currentRGBA == None:
return
newR = currentRGBA[0] + red
newG = currentRGBA[1] + green
newB = currentRGBA[2] + blue
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorTint(vertList=None, currentRGBA=None, red=1, green=1, blue=1, alpha=1 ):
'''Multiply New Color to Current Color - Alpha Included'''
if currentRGBA == None:
return
newR = currentRGBA[0]*red
newG = currentRGBA[1]*green
newB = currentRGBA[2]*blue
newA = currentRGBA[3]*alpha
vertexColorApply(vertList,newR,newG,newB,newA)
#------------------------------------------------------------
def vertexColorGamma(vertList=None, currentRGBA=None, red=2, green=2, blue=2 ):
'''Multiply New Color Exponetionally to Current Color - Alpha Excluded'''
if currentRGBA == None:
return
newR = currentRGBA[0] ** red
newG = currentRGBA[1] ** green
newB = currentRGBA[2] ** blue
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorBlend(vertList=None, currentRGBA=None, red=1, green=1, blue=1, mix=0.5 ):
'''Blend New Color with Current Color - Alpha Excluded'''
if currentRGBA == None:
return
newR = currentRGBA[0]*(1-mix) + red*mix
newG = currentRGBA[1]*(1-mix) + green*mix
newB = currentRGBA[2]*(1-mix) + blue*mix
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorAvg(vertList=None, currentRGBA=None, colorKeyList=None):
'''Average the Color of the vert list based on the entire obj - Alpha Excluded'''
if currentRGBA == None:
return
if colorKeyList == None:
return
vertColorAvg = [0,0,0]
for colorKey in colorKeyList:
vertColorAvg[0] += colorKey[0]
vertColorAvg[1] += colorKey[1]
vertColorAvg[2] += colorKey[2]
colorKeyCount = len(colorKeyList)
newR = vertColorAvg[0]/colorKeyCount
newG = vertColorAvg[1]/colorKeyCount
newB = vertColorAvg[2]/colorKeyCount
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorChannelMix(vertList=None, currentRGBA=None, channelMatrix=[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]):
'''Channel Mixes Current Color - Alpha Excluded'''
if currentRGBA == None:
return
try:
redMix, greenMix, blueMix, alphaMix = channelMatrix
except:
raise Exception("Unable to unpack channelMatrix")
if len(redMix) != 4:
raise Exception("Must pass a 4-tuple as redMix")
if len(greenMix) != 4:
raise Exception("Must pass a 4-tuple as greenMix")
if len(blueMix) != 4:
raise Exception("Must pass a 4-tuple as blueMix")
newR = currentRGBA[0]*redMix[0] + currentRGBA[1]*redMix[1] + currentRGBA[2]*redMix[2]
newG = currentRGBA[0]*greenMix[0] + currentRGBA[1]*greenMix[1] + currentRGBA[2]*greenMix[2]
newB = currentRGBA[0]*blueMix[0] + currentRGBA[1]*blueMix[1] + currentRGBA[2]*blueMix[2]
vertexColorApply(vertList,newR,newG,newB,currentRGBA[3])
#------------------------------------------------------------
def vertexColorChannelMixAlpha(vertList=None, currentRGBA=None, channelMatrix=[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]] ):
'''Channel Mixes Current Color - Alpha Included'''
if currentRGBA == None:
return
try:
redMix, greenMix, blueMix, alphaMix = channelMatrix
except:
raise Exception("Unable to unpack channelMatrix")
if len(redMix) != 4:
raise Exception("Must pass a 4-tuple as redMix")
if len(greenMix) != 4:
raise Exception("Must pass a 4-tuple as greenMix")
if len(blueMix) != 4:
raise Exception("Must pass a 4-tuple as blueMix")
if len(alphaMix) != 4:
raise Exception("Must pass a 4-tuple as alphaMix")
newR = currentRGBA[0]*redMix[0] + currentRGBA[1]*redMix[1] + currentRGBA[2]*redMix[2] + currentRGBA[3]*redMix[3]
newG = currentRGBA[0]*greenMix[0] + currentRGBA[1]*greenMix[1] + currentRGBA[2]*greenMix[2] + currentRGBA[3]*greenMix[3]
newB = currentRGBA[0]*blueMix[0] + currentRGBA[1]*blueMix[1] + currentRGBA[2]*blueMix[2] + currentRGBA[3]*blueMix[3]
newA = currentRGBA[0]*alphaMix[0] + currentRGBA[1]*alphaMix[1] + currentRGBA[2]*alphaMix[2] + currentRGBA[3]*alphaMix[3]
vertexColorApply(vertList,newR,newG,newB,newA)
#------------------------------------------------------------
def toggleVertColor():
'''Util for toggling the vertex color per obj selected'''
sel = selection()
for obj in sel.selection.keys():
cmds.polyOptions(obj,cs=1-cmds.polyOptions(obj,q=1,cs=1)[0],cm='none')
|
mit
| 5,149,193,529,652,118,000
| 42.606796
| 181
| 0.599978
| false
| 3.441379
| false
| false
| false
|
MapofLife/MOL
|
earthengine/google-api-python-client/samples/oauth2/dailymotion/main.py
|
1
|
3069
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
FLOW = OAuth2WebServerFlow(
client_id='2ad565600216d25d9cde',
client_secret='03b56df2949a520be6049ff98b89813f17b467dc',
scope='read',
user_agent='oauth2client-sample/1.0',
auth_uri='https://api.dailymotion.com/oauth/authorize',
token_uri='https://api.dailymotion.com/oauth/token'
)
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
callback = self.request.relative_url('/auth_return')
authorize_url = FLOW.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(FLOW))
self.redirect(authorize_url)
else:
http = httplib2.Http()
http = credentials.authorize(http)
resp, content = http.request('https://api.dailymotion.com/me')
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
variables = {
'content': content,
'logout': logout
}
self.response.out.write(template.render(path, variables))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
bsd-3-clause
| -1,209,884,518,423,326,000
| 27.95283
| 74
| 0.706419
| false
| 3.756426
| false
| false
| false
|
fmetzger/videostreaming-bufferemulation
|
ytdl.py
|
1
|
15813
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
ytdl_refactored.py
Required python packages:
python-gdata
python-matplotlib
python-numpy
"""
import urllib2
import urllib
import os
import subprocess
import sys
import string
import re
import socket
import datetime
from datetime import datetime
import gdata.youtube
import gdata.youtube.service
print "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO"
print "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO"
print "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO"
class Tools:
"""
The class Tools contains several @classmethod functions.
You can call these functions without initializing a Tools object, just as
simple as Tools.write("filename","Hello Tools").
The private helper function works like a wrapper for subprocess.Popen().
It returns the processes std out.
"""
def __init__(self):
# ?
pass
@classmethod
def chdir(self, directory):
if os.access(directory, os.F_OK) is False:
os.mkdir(directory)
os.chdir(directory)
@classmethod
def pwd(self):
return os.getcwd()
@classmethod
def __helper(self, pstring):
run = subprocess.Popen(pstring, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return run.stdout.read()
@classmethod
def traceroute(self, ip, interface=None):
if interface is not None:
return self.__helper("traceroute -i " + interface + " " + ip)
else:
return self.__helper("traceroute " + ip)
@classmethod
def lft(self, ip, opt=""):
return self.__helper("lft " + opt + " " + ip)
@classmethod
def ping(self, ip, interface=None):
if interface is not None:
return self.__helper("ping -c 10 -I " + interface + " " + ip)
else:
return self.__helper("ping -c 10 " + ip)
@classmethod
def whob(self, ip, opt=""):
return self.__helper("whob " + opt + " " + ip)
@classmethod
def mediainfo(self, mfile):
return self.__helper("mediainfo " + mfile)
@classmethod
def mplayer(self, mfile):
# remove all vstats file beforehand
filelist = os.listdir(".")
for vfile in filelist:
if "vstats_" in vfile:
os.remove(vfile)
os.system("mplayer " + mfile)
@classmethod
def curl(self, url, user_agent, interface=None):
download_start = datetime.now()
url = str(url)
user_agent = str(user_agent)
print "url is " + url
if interface is not None:
os.system("curl \"" + url + "\" " + "--interface " + interface + " --location --retry 10 --retry-delay 1 --user-agent \"" + user_agent + "\" --trace-time --trace-ascii curltrace > curlout")
else:
print "foo"
os.system("curl \"" + url + "\" --location --retry 10 --retry-delay 1 --user-agent \"" + user_agent + "\" --trace-time --trace-ascii curltrace > curlout")
print "bar"
download_end = datetime.now()
return download_end - download_start
@classmethod
def tcpdump(self, hostname, interface=None):
if interface is not None:
args = ["tcpdump", "-i", interface, "-w", "capture.log", "host", hostname]
else:
#args = ["tcpdump", "-w", "capture.log", "host", hostname]args = ["tcpdump", "-w", "capture.log", "host", hostname]
#dont filter for hostname at the moment
args = ["tcpdump", "-w", "capture.log"]
return subprocess.Popen(args)
@classmethod
def getIPAddrsList(self, hostname):
(hostnamelist, aliaslist, ipaddrslist) = socket.gethostbyname_ex(hostname)
return ipaddrslist
@classmethod
def ytBrowser(self, video_id):
write = ""
# gather data from gdata API
yt_service = gdata.youtube.service.YouTubeService()
yt_service.ssl = False
entry = yt_service.GetYouTubeVideoEntry(video_id=video_id)
write += "video_title: " + entry.media.title.text + "\n"
vc = '0'
if hasattr(entry.statistics, 'view_count'):
vc = entry.statistics.view_count
write += "video_viewcount: " + vc + "\n"
vr = 'N/A'
if hasattr(entry, 'rating') and hasattr(entry.rating, 'average'):
vr = entry.rating.average
write += "video_rating: " + vr + "\n"
write += "video_url: " + entry.media.player.url + "\n"
return write
@classmethod
def write(self, fname, write):
f_out = open(fname, 'w')
f_out.write(write)
f_out.close()
class Video:
"""
The video class represents a YouTube video.
It is created using a YouTube video ID and an user agent string.
On initialization, the Video object loads the HTML source and
saves all found URLs and hostnames in private fields.
You can call several functions on a video object,
i.e. get it's YouTube URL, it's YouTube ID, etc.
The agent argument stands for a user agent string used to request the
HTML source code of the webpage containing the YouTube video.
Some example user agent strings:
"""
def __init__(self, video_id, agent='Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27'):
self.user_agent = agent
self.vid = video_id
self.siteString = self.__getHTMLString()
self.urls = self.__getURLs()
self.hostnames = self.__getHostnames()
def getVideoID(self):
return self.vid
def getHTMLString(self):
return self.siteString
def __getHTMLString(self):
headers = { 'User-Agent':self.user_agent }
request = urllib2.Request(self.getURL(), None, headers)
response = urllib2.urlopen(request)
return response.read()
def getURL(self):
# get the full url of the video
# returns http://www.youtube.com/watch?v=<video_id>
return "http://www.youtube.com/watch?v=" + self.vid
def getURLs(self):
return self.urls
"""
def __getURLs(self):
# find all video urls
u = []
# format changed Q2/Q3 2011
strings = string.split(self.siteString, "flashvars=\"")[1]
strings = string.split(strings,"\"")[0]
# strings = string.split(strings,'\"};')[0]
strings = string.split(strings,"&")
for s in strings:
if "url_encoded_fmt_stream_map" in s: # previously was fmt_stream_map
# s = re.split(": \"\d\d|", s)[1]
s = string.split(s, "url%3D")
for rawurl in s:
if "http" in rawurl:
url = urllib.unquote(rawurl)
url = urllib2.unquote(url).replace("%3A",":").replace("%2F","/").replace("%3D","=").replace("%26","&").replace("%3F", "?").replace("%2C", ",")
url = url.rstrip(",")
print url
u.append(url)
return u
"""
def __getURLs(self):
# find all video urls
u = []
strings = string.split(self.siteString,"PLAYER_CONFIG")[1] #extract the swfConfig first
strings = string.split(strings,"});")[0]
a_strings = string.split(strings,'url=')
for i in range(len(a_strings)):
if i != 0:
index = a_strings[i].index('fallback_host')
strings = a_strings[i][0:index-6] #i-6 = das letzte \u0026 (ampersand) entfernen
url = urllib.url2pathname(strings).replace('\/','/').replace("\u0026","&")
#print i,url
u.append(url)
return u
def getHostnames(self):
return self.hostnames
def __getHostnames(self):
hostnames = []
for s in self.urls:
hostname = s.split("//")[1].split("/")[0]
hostnames.append(hostname)
return hostnames
def saveHTML(self):
Tools.write("video.html", self.siteString)
class ExperimentManager:
"""
The ExperimentManager manages everything for you.
Just give him your prefered video id and your running network interfaces
and the ExperimentManager will perform some serious measurments
containing downloading, tracerouting, media analysis 'n stuff.
Example user agent strings (captured and/or from en.wikipedia.org/ ):
user_agent = 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)'
user_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.0) Gecko/20110214 Firefox/4.0.0'
user_agent = 'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)'
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10'
user_agent = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27'
user_agent = 'Mozilla/5.0 (Ubuntu; X11; Linux x86_64; rv:8.0) Gecko/20100101 Firefox/8.0'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:8.0) Gecko/20100101 Firefox/8.0'
"""
def __init__(self, video_id, user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:8.0) Gecko/20100101 Firefox/8.0', interface=None):
self.user_agent = user_agent
self.video = Video(video_id, self.user_agent)
self.interface = interface
self.__run()
def __curlOK(self):
"""
Here happens some renaming action after a successful download.
Because we don't need every bit of information, we filter it a bit.
"""
write = ""
pattern = re.compile('^\d\d:\d\d:')
f_in = open("curltrace", 'r')
for line in f_in:
if pattern.match(line):
write += line
return write
def __run(self):
"""
The »run the experiment« function
Here happens the magic:
- get urls and hostnames from video
- measure download time
- get additional info using mediaplayer, mediainfo
- measurements using ping, traceroute, whob
perform all steps on each given networking interface
"""
urls = self.video.getURLs()
hostnames = self.video.getHostnames()
Tools.chdir(self.video.getVideoID())
path = Tools.pwd()
Tools.chdir(path)
if self.interface is not None:
print "---> using interface " + self.interface + "\n"
Tools.chdir(self.interface)
# do this for every URL
for u in urls:
print "---> using URL " + u + "\n"
#host = re.search('http\:\/\/([a-zA-Z0-9\.]*)\/',u).group(1)
host = re.search('http\:\/\/([a-zA-Z0-9\.-]*)\/',u)
if host is not None:
host = host.group(1)
print host
prefix = str(urls.index(u))
# run tcpdump
#tcpdump = Tools.tcpdump(i, host)
tcpdump = Tools.tcpdump(host, self.interface)
# download using curl
download_duration = Tools.curl(u, self.user_agent, self.interface)
# stop tcpdump again
tcpdump.terminate()
# generic log file with additional data from gdata API
write = "url : " + u + "\n"
write += "file_size: " + str(os.path.getsize("curlout")) + "\n"
write += "download_duration: " + str(float(download_duration.seconds) + float(download_duration.microseconds) / 1000000) + "\n"
write += Tools.ytBrowser(self.video.getVideoID())
Tools.write(prefix + ".log", write)
# fs = os.path.getsize("curlout")
if os.path.getsize("curlout") is not 0:
# print "---> Logfile saved"
# write downloadlog
Tools.write(prefix + ".downloadlog", self.__curlOK())
# print "---> Downloadlog saved"
# generate framelog
Tools.mplayer("curlout -lavdopts vstats -vo null -ao null -speed 10")
# assume that the vstats_* file is the one we want
filelist = os.listdir(".")
for vfile in filelist:
if "vstats_" in vfile:
os.rename(vfile, prefix + ".framelog")
if "capture.log" in vfile:
os.rename(vfile, prefix + ".dump")
# print "---> mediaplayer logfile saved"
# aks mediainfo for extended information
Tools.write(prefix + ".mediainfo", Tools.mediainfo("-f curlout"))
# print "---> mediainfo logfile saved"
# check for 302's (redirects)
# store all additional URLs and hostnames from 302's
f_302check = open(prefix + ".downloadlog",'r')
for line in f_302check:
if "== Info: Issue another request to this URL:" in line:
url302 = line.split(": \'")[1].rstrip("\'")
urls.append(url302)
hostname302 = url302.split("//")[1].split("/")[0]
hostnames.append(hostname302)
# self.video.hostnames.append(hostname302)
print "Got redirected to " + url302
print "Redirection hostname " + hostname302
# TODO: delete remnant files (curlout, curltrace)
else:
os.rename("curltrace",prefix+".downloaderrorlog")
print "Download resulted in a zero size file, check the error log for details.\n\n"
# check every hostname in hostnamelist
# run traceroute, ping, whob for every ip we find
# save results in files
for hn in hostnames:
str_traceroute = ""
str_ping = ""
str_whob = ""
prefix = str(hostnames.index(hn))
for ip in Tools.getIPAddrsList(hn):
# traceroute
str_traceroute += Tools.traceroute(ip, self.interface) + "\n\n"
# ping
str_ping += Tools.ping(ip, self.interface) + "\n\n"
# whob
str_whob += Tools.whob(ip) + "\n\n"
# lft
# Tools.lft(ip, "-D " + i))
Tools.write(prefix + ".traceroute", str_traceroute)
print str_traceroute
Tools.write(prefix + ".ping", str_ping)
print str_ping
Tools.write(prefix + ".whob", str_whob)
print str_whob
video_id = sys.argv[1]
iList = sys.argv[2:]
em = ExperimentManager(video_id, iList)
# TODO: command line options for tcpdump, modules for other streamers, default option with no given interface
|
unlicense
| 7,069,916,604,199,555,000
| 34.450673
| 201
| 0.537411
| false
| 3.954727
| false
| false
| false
|
quisas/albus
|
cli_tools/openpyxl/reader/comments.py
|
1
|
3069
|
# Copyright (c) 2010-2014 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
from os import path
from openpyxl.comments import Comment
from openpyxl.shared.ooxml import PACKAGE_WORKSHEET_RELS, PACKAGE_WORKSHEETS, \
SHEET_MAIN_NS, COMMENTS_NS
from openpyxl.shared.xmltools import fromstring
def _get_author_list(root):
author_subtree = root.find('{%s}authors' % SHEET_MAIN_NS)
return [author.text for author in author_subtree]
def read_comments(ws, xml_source):
"""Given a worksheet and the XML of its comments file, assigns comments to cells"""
root = fromstring(xml_source)
authors = _get_author_list(root)
comment_nodes = root.iter('{%s}comment' % SHEET_MAIN_NS)
for node in comment_nodes:
author = authors[int(node.attrib['authorId'])]
cell = node.attrib['ref']
text_node = node.find('{%s}text' % SHEET_MAIN_NS)
text = ''
substrs = []
for run in text_node.findall('{%s}r' % SHEET_MAIN_NS):
runtext = ''.join([t.text for t in run.findall('{%s}t' % SHEET_MAIN_NS)])
substrs.append(runtext)
comment_text = ''.join(substrs)
comment = Comment(comment_text, author)
ws.cell(coordinate=cell).comment = comment
def get_comments_file(sheet_codename, archive, valid_files):
"""Returns the XML filename in the archive which contains the comments for
the spreadsheet with codename sheet_codename. Returns None if there is no
such file"""
rels_file = PACKAGE_WORKSHEET_RELS + '/' + sheet_codename + '.rels'
if rels_file not in valid_files:
return None
rels_source = archive.read(rels_file)
root = fromstring(rels_source)
for i in root:
if i.attrib['Type'] == COMMENTS_NS:
comments_file = path.normpath(PACKAGE_WORKSHEETS + '/' + i.attrib['Target'])
if comments_file in valid_files:
return comments_file
return None
|
agpl-3.0
| 6,218,087,853,442,427,000
| 44.132353
| 88
| 0.69306
| false
| 3.807692
| false
| false
| false
|
wimglenn/argboss
|
test_override_kwargs.py
|
1
|
1470
|
from override_kwargs import override_kwargs
from other_module import delegating_function, function
from datetime import datetime
from unittest import TestCase
def function_in_this_module(x=123):
"""hello I'm a docstring"""
return x
def MyClass(object):
def method_in_this_module(x=123):
return x
with override_kwargs('__main__', 'function_in_this_module', {'x': 69}) as f:
assert function_in_this_module() == 69
assert function_in_this_module.__doc__ == f.__doc__
assert function_in_this_module.__name__ == f.__name__
assert function_in_this_module() == 123
# with override_kwargs('__main__', 'MyClass.method_in_this_module', {'x': 69}) as f:
# assert method_in_this_module() == 69 == f()
# assert method_in_this_module.__doc__ == f.__doc__
# assert method_in_this_module.__name__ == f.__name__
# assert method_in_this_module() == 123
with override_kwargs('__main__', 'function', {'x': 69}):
assert function() == 69
assert function() == 123
with override_kwargs('other_module', 'ClassInOtherModule.method', {'x': 69}):
assert delegating_function() == 69
assert delegating_function() == 123
with override_kwargs('other_module', 'another_module.another_function', {0: 69}):
assert delegating_function() == 69
assert delegating_function() == 123
then = datetime(year=1982, month=3, day=19)
with override_kwargs('__main__', 'datetime', {'year': 1982}):
assert datetime(year=2014, month=3, day=19) == then
|
mit
| 6,979,320,661,274,152,000
| 34.853659
| 84
| 0.662585
| false
| 3.394919
| false
| false
| false
|
Fluent-networks/floranet
|
floranet/models/application.py
|
1
|
1851
|
from twisted.internet.defer import inlineCallbacks, returnValue
from floranet.models.model import Model
from floranet.models.appinterface import AppInterface
class Application(Model):
"""LoRa application class
Model representing a LoRa application.
Attributes:
name (str): a user friendly name for the application
domain (str): optional customer domain string
appeui (int): global application ID (IEEE EUI64)
appnonce (int): A unique ID provided by the network server
appkey (int): AES-128 application secret key
fport (int): Port field used for this application
"""
TABLENAME = 'applications'
BELONGSTO = [{'name': 'appinterface', 'class_name': 'AppInterface'}]
HASMANY = [{'name': 'properties', 'class_name': 'AppProperty'}]
@inlineCallbacks
def valid(self):
"""Validate an application object.
Returns:
valid (bool), message(dict): (True, empty) on success,
(False, error message dict) otherwise.
"""
messages = {}
# Check for unique appkeys
duplicate = yield Application.exists(where=['appkey = ? AND appeui != ?',
self.appkey, self.appeui])
if duplicate:
messages['appkey'] = "Duplicate application key exists: appkey " \
"must be unique."
# Check the app interface exists
if self.appinterface_id:
exists = yield AppInterface.exists(where=['id = ?', self.appinterface_id])
if not exists:
messages['appinterface_id'] = "Application interface {} does not " \
"exist.".format(self.appinterface_id)
valid = not any(messages)
returnValue((valid, messages))
|
mit
| -2,307,838,893,648,743,000
| 35.313725
| 86
| 0.593193
| false
| 4.674242
| false
| false
| false
|
collective/collective.anonfeedback
|
src/collective/anonfeedback/tests/test_views.py
|
1
|
2732
|
import unittest2 as unittest
from plone.testing.z2 import Browser
from Products.CMFCore.utils import getToolByName
from plone.app.testing import SITE_OWNER_NAME
from plone.app.testing import SITE_OWNER_PASSWORD
from plone.app.testing import login
from collective.anonfeedback.testing import\
COLLECTIVE_ANONFEEDBACK_FUNCTIONAL_TESTING
class TestInstalled(unittest.TestCase):
layer = COLLECTIVE_ANONFEEDBACK_FUNCTIONAL_TESTING
def setUp(self):
self.app = self.layer['app']
self.portal = self.layer['portal']
self.qi_tool = getToolByName(self.portal, 'portal_quickinstaller')
def get_browser(self, username=None, password=None):
browser = Browser(self.app)
browser.handleErrors = False
portalURL = self.portal.absolute_url()
if username:
browser.open(portalURL + '/login_form')
browser.getControl(name='__ac_name').value = username
browser.getControl(name='__ac_password').value = password
browser.getControl(name='submit').click()
return browser
def test_views(self):
""" Validate that our products GS profile has been run and the product
installed
"""
browser = self.get_browser()
portalURL = self.portal.absolute_url()
browser.open(portalURL)
browser.getLink('Give Feedback').click()
form = browser.getForm(name='feedback')
# Submit an incomplete form
form.getControl('Subject').value = 'Test subject'
form.getControl('Submit').click()
self.assertIn('You must enter a subject and some feedback text.', browser.contents)
# The filled in value remains
form = browser.getForm(name='feedback')
self.assertEqual(form.getControl('Subject').value, 'Test subject')
# Complete the form
form.getControl('Feedback').value = 'Test\nmessage.'
form.getControl('Submit').click()
# It worked.
self.assertIn('Your feedback has been submitted.', browser.contents)
# Fields should now be empty.
form = browser.getForm(name='feedback')
self.assertEqual(form.getControl('Subject').value, '')
# Anonymous people can't view the feedback.
self.assertNotIn('View Feedback', browser.contents)
# Login
browser = self.get_browser(SITE_OWNER_NAME, SITE_OWNER_PASSWORD)
browser.open(portalURL)
# Admin *can* see the feedback.
self.assertIn('View Feedback', browser.contents)
browser.getLink('View Feedback').click()
self.assertIn('<h3>Test subject</h3>', browser.contents)
|
mit
| -8,225,146,587,051,691,000
| 36.438356
| 91
| 0.639092
| false
| 4.302362
| true
| false
| false
|
kylef/goji
|
goji/client.py
|
1
|
7210
|
import datetime
import mimetypes
import os
import pickle
from typing import Any, List, Optional
import click
import requests
from requests.auth import AuthBase, HTTPBasicAuth
from requests.compat import urljoin
from goji.models import Attachment, Comment, Issue, Sprint, Transition, User
class JIRAException(click.ClickException):
def __init__(self, error_messages: List[str], errors):
self.error_messages = error_messages
self.errors = errors
def show(self):
for error in self.error_messages:
click.echo(error)
for (key, error) in self.errors.items():
click.echo('- {}: {}'.format(key, error))
class NoneAuth(AuthBase):
"""
Creates a "None" auth type as if actual None is set as auth and a netrc
credentials are found, python-requests will use them instead.
"""
def __call__(self, request):
return request
class JIRAAuth(HTTPBasicAuth):
def __call__(self, request):
if 'Cookie' in request.headers:
# Prevent authorization headers when cookies are present as it
# causes silent authentication errors on the JIRA instance if
# cookies are used and invalid authorization headers are sent
# (although request succeeds)
if (
'atlassian.xsrf.token' in request.headers['Cookie']
and len(request.headers['Cookie'].split('=')) == 2
):
# continue if the cookie is ONLY the xsrf token
# check is very naive as to not get into cookie parsing
# ensure that we check only for key=value (once) being xsrf
return super(JIRAAuth, self).__call__(request)
return request
return super(JIRAAuth, self).__call__(request)
class JIRAClient(object):
def __init__(self, base_url: str, auth=None):
self.session = requests.Session()
self.base_url = base_url
self.rest_base_url = urljoin(self.base_url, 'rest/api/2/')
if auth:
self.session.auth = JIRAAuth(auth[0], auth[1])
else:
self.session.auth = NoneAuth()
self.load_cookies()
# Persistent Cookie
@property
def cookie_path(self) -> str:
return os.path.expanduser('~/.goji/cookies')
def load_cookies(self) -> None:
if os.path.exists(self.cookie_path):
try:
with open(self.cookie_path, 'rb') as fp:
self.session.cookies = pickle.load(fp)
except Exception as e:
print('warning: Could not load cookies from dist: {}'.format(e))
def save_cookies(self) -> None:
cookies = self.session.cookies.keys()
cookies.remove('atlassian.xsrf.token')
if len(cookies) > 0:
os.makedirs(os.path.expanduser('~/.goji'), exist_ok=True)
with open(self.cookie_path, 'wb') as fp:
pickle.dump(self.session.cookies, fp)
elif os.path.exists(self.cookie_path):
os.remove(self.cookie_path)
# Methods
def validate_response(self, response: requests.Response) -> None:
if response.status_code >= 400 and 'application/json' in response.headers.get(
'Content-Type', ''
):
error = response.json()
raise JIRAException(error.get('errorMessages', []), error.get('errors', {}))
def get(self, path: str, **kwargs) -> requests.Response:
url = urljoin(self.rest_base_url, path)
response = self.session.get(url, **kwargs)
self.validate_response(response)
return response
def post(self, path: str, json) -> requests.Response:
url = urljoin(self.rest_base_url, path)
response = self.session.post(url, json=json)
self.validate_response(response)
return response
def put(self, path: str, json) -> requests.Response:
url = urljoin(self.rest_base_url, path)
response = self.session.put(url, json=json)
self.validate_response(response)
return response
@property
def username(self) -> Optional[str]:
if self.session.auth and isinstance(self.session.auth, JIRAAuth):
return self.session.auth.username
return None
def get_user(self) -> Optional[User]:
response = self.get('myself', allow_redirects=False)
response.raise_for_status()
return User.from_json(response.json())
def get_issue(self, issue_key: str) -> Issue:
response = self.get('issue/%s' % issue_key)
response.raise_for_status()
return Issue.from_json(response.json())
def get_issue_transitions(self, issue_key: str) -> List[Transition]:
response = self.get('issue/%s/transitions' % issue_key)
response.raise_for_status()
return list(map(Transition.from_json, response.json()['transitions']))
def change_status(self, issue_key: str, transition_id: str) -> None:
data = {'transition': {'id': transition_id}}
self.post('issue/%s/transitions' % issue_key, data)
def edit_issue(self, issue_key: str, updated_fields) -> None:
data = {'fields': updated_fields}
self.put('issue/%s' % issue_key, data)
def attach(self, issue_key: str, attachment) -> List[Attachment]:
media_type = mimetypes.guess_type(attachment.name)
files = {
'file': (attachment.name, attachment, media_type[0]),
}
url = urljoin(self.rest_base_url, f'issue/{issue_key}/attachments')
response = self.session.post(
url,
headers={'X-Atlassian-Token': 'no-check'},
files=files,
)
self.validate_response(response)
return list(map(Attachment.from_json, response.json()))
def create_issue(self, fields) -> Issue:
response = self.post('issue', {'fields': fields})
return Issue.from_json(response.json())
def assign(self, issue_key: str, name: Optional[str]) -> None:
response = self.put('issue/%s/assignee' % issue_key, {'name': name})
response.raise_for_status()
def comment(self, issue_key: str, comment: str) -> Comment:
response = self.post('issue/%s/comment' % issue_key, {'body': comment})
return Comment.from_json(response.json())
def search(self, query: str) -> List[Issue]:
response = self.post('search', {'jql': query})
response.raise_for_status()
return list(map(Issue.from_json, response.json()['issues']))
def create_sprint(
self,
board_id: int,
name: str,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = None,
) -> Sprint:
payload = {
'originBoardId': board_id,
'name': name,
}
if start_date:
payload['startDate'] = start_date.isoformat()
if end_date:
payload['endDate'] = end_date.isoformat()
url = urljoin(self.base_url, 'rest/agile/1.0/sprint')
response = self.session.post(url, json=payload)
self.validate_response(response)
return Sprint.from_json(response.json())
|
bsd-2-clause
| 8,375,053,474,429,902,000
| 33.663462
| 88
| 0.603745
| false
| 3.882606
| false
| false
| false
|
tomazc/orange-bio
|
orangecontrib/bio/widgets/OWGeneNetwork.py
|
1
|
16713
|
from collections import namedtuple
from PyQt4.QtCore import QTimer, QThread, pyqtSlot as Slot
import Orange.data
import Orange.feature
import Orange.network
from Orange.orng.orngDataCaching import data_hints
from Orange.OrangeWidgets import OWWidget
from Orange.OrangeWidgets import OWGUI
from Orange.OrangeWidgets import OWItemModels
from Orange.OrangeWidgets.OWConcurrent import ThreadExecutor, Task, methodinvoke
from .. import ppi, taxonomy, gene
NAME = "Gene Network"
DESCRIPTION = "Extract a gene network for a set of genes."
ICON = "icons/GeneNetwork.svg"
INPUTS = [("Data", Orange.data.Table, "set_data")]
OUTPUTS = [("Network", Orange.network.Graph)]
Source = namedtuple(
"Source",
["name", "constructor", "tax_mapping", "sf_domain", "sf_filename",
"score_filter"]
)
SOURCES = [
Source("BioGRID", ppi.BioGRID, ppi.BioGRID.TAXID_MAP,
"PPI", ppi.BioGRID.SERVER_FILE, False),
Source("STRING", ppi.STRING, ppi.STRING.TAXID_MAP,
"PPI", ppi.STRING.FILENAME, True)
]
class OWGeneNetwork(OWWidget.OWWidget):
settingsList = ["taxid", "use_attr_names", "network_source",
"include_neighborhood", "min_score"]
contextHandlers = {
"": OWWidget.DomainContextHandler(
"", ["taxid", "gene_var_index", "use_attr_names"]
)
}
def __init__(self, parent=None, signalManager=None, title="Gene Network"):
super(OWGeneNetwork, self).__init__(
parent, signalManager, title, wantMainArea=False,
resizingEnabled=False
)
self.taxid = "9606"
self.gene_var_index = -1
self.use_attr_names = False
self.network_source = 1
self.include_neighborhood = True
self.autocommit = False
self.min_score = 0.9
self.loadSettings()
self.taxids = taxonomy.common_taxids()
self.current_taxid_index = self.taxids.index(self.taxid)
self.data = None
self.geneinfo = None
self.nettask = None
self._invalidated = False
box = OWGUI.widgetBox(self.controlArea, "Info")
self.info = OWGUI.widgetLabel(box, "No data on input\n")
box = OWGUI.widgetBox(self.controlArea, "Organism")
self.organism_cb = OWGUI.comboBox(
box, self, "current_taxid_index",
items=map(taxonomy.name, self.taxids),
callback=self._update_organism
)
box = OWGUI.widgetBox(self.controlArea, "Genes")
self.genes_cb = OWGUI.comboBox(
box, self, "gene_var_index", callback=self._update_query_genes
)
self.varmodel = OWItemModels.VariableListModel()
self.genes_cb.setModel(self.varmodel)
OWGUI.checkBox(
box, self, "use_attr_names",
"Use attribute names",
callback=self._update_query_genes
)
box = OWGUI.widgetBox(self.controlArea, "Network")
OWGUI.comboBox(
box, self, "network_source",
items=[s.name for s in SOURCES],
callback=self._on_source_db_changed
)
OWGUI.checkBox(
box, self, "include_neighborhood",
"Include immediate gene neighbors",
callback=self.invalidate
)
self.score_spin = OWGUI.doubleSpin(
box, self, "min_score", 0.0, 1.0, step=0.001,
label="Minimal edge score",
callback=self.invalidate
)
self.score_spin.setEnabled(SOURCES[self.network_source].score_filter)
box = OWGUI.widgetBox(self.controlArea, "Commit")
OWGUI.button(box, self, "Commit", callback=self.commit, default=True)
self.executor = ThreadExecutor()
def set_data(self, data):
self.closeContext()
self.data = data
if data is not None:
self.varmodel[:] = string_variables(data.domain)
taxid = data_hints.get_hint(data, "taxid", default=self.taxid)
if taxid in self.taxids:
self.set_organism(self.taxids.index(taxid))
self.use_attr_names = data_hints.get_hint(
data, "genesinrows", default=self.use_attr_names
)
if not (0 <= self.gene_var_index < len(self.varmodel)):
self.gene_var_index = len(self.varmodel) - 1
self.openContext("", data)
self.invalidate()
self.commit()
else:
self.varmodel[:] = []
self.send("Network", None)
def set_source_db(self, dbindex):
self.network_source = dbindex
self.invalidate()
def set_organism(self, index):
self.current_taxid_index = index
self.taxid = self.taxids[index]
self.invalidate()
def set_gene_var(self, index):
self.gene_var_index = index
self.invalidate()
def query_genes(self):
if self.use_attr_names:
if self.data is not None:
return [var.name for var in self.data.domain.attributes]
else:
return []
elif self.gene_var_index >= 0:
var = self.varmodel[self.gene_var_index]
genes = [str(inst[var]) for inst in self.data
if not inst[var].isSpecial()]
return list(unique(genes))
else:
return []
def invalidate(self):
self._invalidated = True
if self.nettask is not None:
self.nettask.finished.disconnect(self._on_result_ready)
self.nettask.future().cancel()
self.nettask = None
if self.autocommit:
QTimer.singleShot(10, self._maybe_commit)
@Slot()
def _maybe_commit(self):
if self._invalidated:
self.commit()
@Slot()
def advance(self):
self.progressBarValue = (self.progressBarValue + 1) % 100
@Slot(float)
def set_progress(self, value):
self.progressBarValue = value
def commit(self):
include_neighborhood = self.include_neighborhood
query_genes = self.query_genes()
source = SOURCES[self.network_source]
if source.score_filter:
min_score = self.min_score
assert source.name == "STRING"
min_score = min_score * 1000
else:
min_score = None
taxid = self.taxid
progress = methodinvoke(self, "advance")
if self.geneinfo is None:
self.geneinfo = self.executor.submit(
fetch_ncbi_geneinfo, taxid, progress
)
geneinfo_f = self.geneinfo
taxmap = source.tax_mapping
db_taxid = taxmap.get(taxid, taxid)
if db_taxid is None:
raise ValueError("invalid taxid for this network")
def fetch_network():
geneinfo = geneinfo_f.result()
ppidb = fetch_ppidb(source, db_taxid, progress)
return get_gene_network(ppidb, geneinfo, db_taxid, query_genes,
include_neighborhood=include_neighborhood,
min_score=min_score,
progress=methodinvoke(self, "set_progress", (float,)))
self.nettask = Task(function=fetch_network)
self.nettask.finished.connect(self._on_result_ready)
self.executor.submit(self.nettask)
self.setBlocking(True)
self.setEnabled(False)
self.progressBarInit()
self._invalidated = False
self._update_info()
@Slot(object)
def _on_result_ready(self,):
self.progressBarFinished()
self.setBlocking(False)
self.setEnabled(True)
net = self.nettask.result()
self._update_info()
self.send("Network", net)
def _on_source_db_changed(self):
source = SOURCES[self.network_source]
self.score_spin.setEnabled(source.score_filter)
self.invalidate()
def _update_organism(self):
self.taxid = self.taxids[self.current_taxid_index]
if self.geneinfo is not None:
self.geneinfo.cancel()
self.geneinfo = None
self.invalidate()
def _update_query_genes(self):
self.invalidate()
def _update_info(self):
if self.data is None:
self.info.setText("No data on input\n")
else:
names = self.query_genes()
lines = ["%i unique genes on input" % len(set(names))]
if self.nettask is not None:
if not self.nettask.future().done():
lines.append("Retrieving ...")
else:
net = self.nettask.result()
lines.append("%i nodes %i edges" %
(len(net.nodes()), len(net.edges())))
else:
lines.append("")
self.info.setText("\n".join(lines))
def unique(seq):
seen = set()
for el in seq:
if el not in seen:
seen.add(el)
yield el
def string_variables(domain):
variables = domain.variables + domain.getmetas().values()
return [v for v in variables if isinstance(v, Orange.feature.String)]
def multimap_inverse(multimap):
"""
Return a multimap inverse relation.
"""
d = {}
for key, values in multimap.iteritems():
for v in values:
d.setdefault(v, []).append(key)
return d
def ppidb_synonym_mapping(ppidb, taxid):
keys = ppidb.ids(taxid)
mapping = {key: ppidb.synonyms(key) for key in keys}
return multimap_inverse(mapping)
def taxonomy_match(query_taxids, target_taxids):
taxid_mapping = {}
target_taxids = set(target_taxids)
for taxid in query_taxids:
mapped = taxid_map(taxid, target_taxids)
taxid_mapping[taxid] = mapped
return taxid_mapping
def taxid_map(query, targets):
if query in targets:
return query
lineage = taxonomy.lineage(query)
if any(tid in targets for tid in lineage):
return set(lineage).intersection(targets).pop()
else:
return None
from Orange.utils import serverfiles as sf
def fetch_ppidb(ppisource, taxid, progress=None):
fname = ppisource.sf_filename
if "{taxid}" in fname:
if taxid in ppisource.tax_mapping:
taxid_m = ppisource.tax_mapping[taxid]
if taxid_m is None:
raise ValueError(taxid)
taxid = taxid_m
fname = fname.format(taxid=taxid)
constructor = lambda: ppisource.constructor(taxid)
else:
constructor = ppisource.constructor
sf.localpath_download(
ppisource.sf_domain, fname, callback=progress, verbose=True
)
return constructor()
def fetch_ncbi_geneinfo(taxid, progress=None):
taxid = gene.NCBIGeneInfo.TAX_MAP.get(taxid, taxid)
sf.localpath_download(
"NCBI_geneinfo", "gene_info.{taxid}.db".format(taxid=taxid),
callback=progress, verbose=True,
)
return gene.NCBIGeneInfo(taxid)
def get_gene_network(ppidb, geneinfo, taxid, query_genes,
include_neighborhood=True, min_score=None,
progress=None):
if progress is not None:
progress(1.0)
# Normalize the names to ppidb primary keys
matcher = geneinfo.matcher
query_genes = zip(query_genes, map(matcher.umatch, query_genes))
synonyms = ppidb_synonym_mapping(ppidb, taxid)
query_genes = [(query_gene, geneid,
synonyms.get(query_gene, synonyms.get(geneid)))
for query_gene, geneid in query_genes]
query = [(syn[0], query_gene)
for query_gene, _, syn in query_genes if syn]
net = extract_network(ppidb, dict(query), geneinfo, include_neighborhood,
min_score, progress=progress)
return net
def extract_network(ppidb, query, geneinfo, include_neighborhood=True,
min_score=None, progress=None):
"""
include neighborhood
"""
from functools import partial
from collections import defaultdict
from itertools import count
if not isinstance(query, dict):
query = {name: name for name in query}
report_weights = True
if isinstance(ppidb, ppi.BioGRID):
# BioGRID scores are not comparable (they can be p values,
# confidence scores, ..., i.e. whatever was reported in the source
# publication)
report_weights = False
if min_score is not None:
raise ValueError("min_score used with BioGrid")
graph = Orange.network.Graph()
# node ids in Orange.network.Graph need to be in [0 .. n-1]
nodeids = defaultdict(partial(next, count()))
def gi_info(names):
mapping = [(name, geneinfo.matcher.umatch(name)) for name in names]
mapping = [(name, match) for name, match in mapping if match]
entries = [(name, geneinfo[match]) for name, match in mapping]
if len(entries) > 1:
# try to resolve conflicts by prioritizing entries whose
# symbol/gene_id/locus_tag exactly matches the synonym name.
entries_ = [(name, entry) for name, entry in entries
if name in [entry.gene_id, entry.symbol, entry.locus_tag]]
if len(entries_) == 1:
entries = entries_
if len(entries) == 0:
return None
elif len(entries) >= 1:
# Need to report multiple mappings
return entries[0][1]
# Add query nodes.
for key, query_name in query.items():
nodeid = nodeids[key]
synonyms = ppidb.synonyms(key)
entry = gi_info(synonyms)
graph.add_node(
nodeid,
key=key,
synonyms=synonyms,
query_name=query_name,
symbol=entry.symbol if entry is not None else ""
)
if include_neighborhood:
# extend the set of nodes in the network with immediate neighborers
edges_iter = (edge for key in query for edge in ppidb.edges(key))
for id1, id2, score in edges_iter:
if min_score is None or score >= min_score:
nodeid1 = nodeids[id1]
nodeid2 = nodeids[id2]
if nodeid1 not in graph:
synonyms1 = ppidb.synonyms(id1)
entry1 = gi_info(synonyms1)
symbol1 = entry1.symbol if entry1 is not None else ""
graph.add_node(
nodeid1, key=id1, synonyms=synonyms1,
symbol=symbol1
)
if nodeid2 not in graph:
synonyms2 = ppidb.synonyms(id2)
entry2 = gi_info(synonyms2)
symbol2 = entry2.symbol if entry2 is not None else ""
graph.add_node(
nodeid2, key=id2, synonyms=synonyms2,
symbol=symbol2
)
# add edges between nodes
for i, id1 in enumerate(nodeids.keys()):
if progress is not None:
progress(100.0 * i / len(nodeids))
for _, id2, score in ppidb.edges(id1):
if id2 in nodeids and (min_score is None or score >= min_score):
nodeid1 = nodeids[id1]
nodeid2 = nodeids[id2]
assert nodeid1 in graph and nodeid2 in graph
if score is not None and report_weights:
graph.add_edge(nodeid1, nodeid2, weight=score)
else:
graph.add_edge(nodeid1, nodeid2)
nodedomain = Orange.data.Domain(
[Orange.feature.String("Query name"), # if applicable
Orange.feature.String("id"), # ppidb primary key
Orange.feature.String("Synonyms"), # ppidb synonyms
Orange.feature.String("Symbol"), # ncbi gene name ??
Orange.feature.Discrete("source", values=["false", "true"])],
None
)
node_items = sorted(graph.node.items(), key=lambda t: nodeids[t[0]])
nodeitems = Orange.data.Table(
nodedomain,
[[str(node.get("query_name", "")),
str(node.get("key", "")),
str(", ".join(node.get("synonyms", []))),
str(node.get("symbol", nodeid)),
"true" if "query_name" in node else "false"]
for nodeid, node in node_items]
)
graph.set_items(nodeitems)
return graph
def main():
from PyQt4.QtGui import QApplication
app = QApplication([])
w = OWGeneNetwork()
brown = Orange.data.Table("brown-selected")
w.set_data(Orange.data.Table(brown[:5]))
w.show()
app.exec_()
w.saveSettings()
return 0
if __name__ == "__main__":
main()
|
gpl-3.0
| 4,020,125,862,851,062,000
| 31.389535
| 90
| 0.579609
| false
| 3.739763
| false
| false
| false
|
SkyTruth/pelagos-data
|
setup.py
|
1
|
2925
|
#!/usr/bin/env python
# This document is part of FS_Nav
# https://github.com/geowurster/FS_Nav
# =================================================================================== #
#
# New BSD License
#
# Copyright (c) 2014, Kevin D. Wurster
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * The names of its contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# =================================================================================== #
"""
Setup script for PelagosProcessing
"""
from glob import glob
from setuptools import setup, find_packages
import pelagos_processing
setup(
name='PelagosProcessing',
version=pelagos_processing.__version__,
author=pelagos_processing.__author__,
author_email=pelagos_processing.__author_email__,
description=pelagos_processing.__doc__,
long_description=pelagos_processing.__doc__,
license=pelagos_processing.__license__,
url=pelagos_processing.__source__,
packages=find_packages(),
scripts=glob('bin/*.py'),
include_package_data=True,
classifiers=[
'Topic :: Terminals',
'Topic :: Utilities',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy'
]
)
|
mit
| -8,811,666,424,475,370,000
| 35.5625
| 87
| 0.674188
| false
| 4.479326
| false
| false
| false
|
KRHS-GameProgramming-2016/Memefinity
|
Wall.py
|
1
|
1721
|
import pygame, sys, math
class Wall(pygame.sprite.Sprite):
def __init__(self, pos=[0,0], size=None):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("rsc/wall/wall.png")
if size:
self.image = pygame.transform.scale(self.image, [size,size])
self.rect = self.image.get_rect(center = pos)
def shiftX(self, amount):
self.rect.x += amount
class Wall_5x5(pygame.sprite.Sprite):
def __init__(self, pos=[0,0], size=None):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("rsc/wall/wall.png")
if size:
self.image = pygame.transform.scale(self.image, [size*5,size*5])
self.rect = self.image.get_rect(center = pos)
def shiftX(self, amount):
self.rect.x += amount
class Ground(pygame.sprite.Sprite):
def __init__(self, pos=[0,0], size=None):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("rsc/wall/ground.png")
if size:
self.image = pygame.transform.scale(self.image, [size,size])
self.rect = self.image.get_rect(center = pos)
def shiftX(self, amount):
self.rect.x += amount
class Background(pygame.sprite.Sprite):
def __init__(self, image, size = None):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("rsc/ball/"+image)
if size:
self.image = pygame.transform.scale(self.image, size)
self.rect = self.image.get_rect()
def shiftX(self, amount):
self.rect.x += amount
print "shifting"
|
mit
| 727,751,280,407,669,500
| 35.617021
| 76
| 0.595003
| false
| 3.462777
| false
| false
| false
|
project-generator/project_generator
|
project_generator/commands/build.py
|
1
|
3506
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from ..tools_supported import ToolsSupported
from ..generate import Generator
from ..settings import ProjectSettings
from . import argparse_filestring_type, argparse_string_type, split_options
help = 'Build a project'
def run(args):
# Export if we know how, otherwise return
combined_projects = args.projects + args.project or ['']
kwargs = split_options(args.options)
generator = Generator(args.file)
any_build_failed = False
any_export_failed = False
for project_name in combined_projects:
for project in generator.generate(project_name):
clean_failed = False
if args.clean and project.clean(args.tool) == -1:
clean_failed = True # So we don't attempt to generate or build this project.
any_build_failed = True
if not clean_failed:
if project.generate(args.tool, args.copy) == -1:
any_export_failed = True
if project.build(args.tool, jobs=args.jobs, **kwargs) == -1:
any_build_failed = True
if args.stop_on_failure and (any_build_failed or any_export_failed):
break
if any_build_failed or any_export_failed:
return -1
else:
return 0
def setup(subparser):
subparser.add_argument('-v', dest='verbosity', action='count', default=0,
help='Increase the verbosity of the output (repeat for more verbose output)')
subparser.add_argument('-q', dest='quietness', action='count', default=0,
help='Decrease the verbosity of the output (repeat for less verbose output)')
subparser.add_argument(
"-f", "--file", help="YAML projects file", default='projects.yaml',
type=argparse_filestring_type)
subparser.add_argument(
"-p", "--project", dest="projects", action='append', default=[], help="Name of the project to build")
subparser.add_argument(
"-t", "--tool", help="Build a project files for provided tool",
type=argparse_string_type(str.lower, False), choices=list(ToolsSupported.TOOLS_DICT.keys()) + list(ToolsSupported.TOOLS_ALIAS.keys()))
subparser.add_argument(
"-c", "--copy", action="store_true", help="Copy all files to the exported directory")
subparser.add_argument(
"-k", "--clean", action="store_true", help="Clean project before building")
subparser.add_argument(
"-o", "--options", action="append", help="Toolchain options")
subparser.add_argument(
"-x", "--stop-on-failure", action="store_true", help="Stop on first failure")
subparser.add_argument(
"-j", "--jobs", action="store", type=int, default=1,
help="Number of concurrent build jobs (not supported by all tools)")
subparser.add_argument("project", nargs='*',
help="Specify projects to be generated and built")
|
apache-2.0
| -4,970,498,958,962,731,000
| 45.746667
| 142
| 0.653451
| false
| 4.095794
| false
| false
| false
|
kfarr2/django-local-settings
|
local_settings/__init__.py
|
1
|
3356
|
import json
import os
import sys
from .color_printer import ColorPrinter
from .checker import Checker
from .exc import LocalSettingsError, SettingsFileNotFoundError
from .loader import Loader
from .types import LocalSetting, SecretSetting
from .util import NO_DEFAULT, get_file_name
from .__main__ import make_local_settings
def load_and_check_settings(base_settings, file_name=None, section=None, base_path=None,
quiet=NO_DEFAULT):
"""Merge local settings from file with base settings, then check.
Returns a new OrderedDict containing the base settings and the
loaded settings. Ordering is:
- base settings
- settings from extended file(s), if any
- settings from file
When a setting is overridden, it gets moved to the end.
Settings loaded from the specified file will override base settings,
then the settings will be checked to ensure that all required local
settings have been set.
If a file name is passed: if the file exists, local settings will be
loaded from it and any missing settings will be appended to it; if
the file does not exist, it will be created and all settings will be
added to it.
If a file name isn't passed: if the ``LOCAL_SETTINGS_FILE_NAME``
environment variable is set, the specified file will be used;
otherwise ``{base_path}/local.cfg`` will be used.
``base_path`` is used when ``file_name`` is relative; if it's not
passed, it will be set to the current working directory.
When ``quiet`` is ``True``, informational messages will not be
printed. The ``LOCAL_SETTINGS_CONFIG_QUIET`` can be used to set
``quiet`` (use a JSON value like 'true', '1', 'false', or '0').
See :meth:`.Loader.load` and :meth:`.Checker.check` for more info.
"""
if quiet is NO_DEFAULT:
quiet = json.loads(os.environ.get('LOCAL_SETTINGS_CONFIG_QUIET', 'false'))
if not quiet:
printer = ColorPrinter()
key = 'LOCAL_SETTINGS_DISABLE'
disable_local_settings = os.environ.get(key, base_settings.get(key, False))
if disable_local_settings:
if not quiet:
printer.print_warning('Loading of local settings disabled')
return
else:
if file_name is None:
file_name = get_file_name()
if not os.path.isabs(file_name):
base_path = base_path or os.getcwd()
file_name = os.path.normpath(os.path.join(base_path, file_name))
try:
try:
loader = Loader(file_name, section)
settings = loader.load(base_settings)
registry = loader.registry
except SettingsFileNotFoundError:
registry = None
checker = Checker(file_name, section, registry=registry)
success = checker.check(settings)
except KeyboardInterrupt:
# Loading/checking of local settings was aborted with Ctrl-C.
# This isn't an error, but we don't want to continue.
if not quiet:
printer.print_warning('\nAborted loading/checking of local settings')
sys.exit(0)
if not success:
raise LocalSettingsError(
'Could not load local settings from {0}'.format(file_name))
if not quiet:
printer.print_success('Settings loaded successfully from {0}'.format(file_name))
return settings
|
mit
| -7,191,827,870,245,881,000
| 38.023256
| 89
| 0.666865
| false
| 4.163772
| false
| false
| false
|
line/line-bot-sdk-python
|
tests/models/test_base.py
|
1
|
2959
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals, absolute_import
import json
import unittest
from linebot.models import Base
class Hoge(Base):
def __init__(self, title=None, content=None, hoge_bar=None, **kwargs):
super(Hoge, self).__init__(**kwargs)
self.title = title
self.content = content
self.hoge_bar = hoge_bar
class TestBase(unittest.TestCase):
def test_as_json_string(self):
self.assertEqual(
Hoge().as_json_string(),
'{}')
self.assertEqual(
Hoge(title='title').as_json_string(),
'{"title": "title"}')
self.assertEqual(
Hoge(title='title', content='content').as_json_string(),
'{"content": "content", "title": "title"}')
self.assertEqual(
Hoge(title='title', content={"hoge": "hoge"}).as_json_string(),
'{"content": {"hoge": "hoge"}, "title": "title"}')
self.assertEqual(
Hoge(title=[1, 2]).as_json_string(),
'{"title": [1, 2]}')
self.assertEqual(
Hoge(hoge_bar='hoge_bar').as_json_string(),
'{"hogeBar": "hoge_bar"}')
def test_as_json_dict(self):
self.assertEqual(
Hoge().as_json_dict(),
{})
self.assertEqual(
Hoge(title='title').as_json_dict(),
{'title': 'title'})
self.assertEqual(
Hoge(title='title', content='content').as_json_dict(),
{'content': 'content', 'title': 'title'})
self.assertEqual(
Hoge(title='title', content={"hoge": "hoge"}).as_json_dict(),
{'content': {'hoge': 'hoge'}, 'title': 'title'})
self.assertEqual(
Hoge(title=[1, 2]).as_json_dict(),
{'title': [1, 2]})
def test_new_from_json_dict(self):
self.assertEqual(
Hoge.new_from_json_dict({"title": "title"}),
Hoge(title='title'))
self.assertEqual(
Hoge.new_from_json_dict(json.loads('{"title": "title"}')),
Hoge(title='title'))
self.assertEqual(
Hoge.new_from_json_dict({"hoge_bar": "hoge_bar"}),
Hoge(hoge_bar='hoge_bar'))
self.assertEqual(
Hoge.new_from_json_dict({"hogeBar": "hoge_bar"}),
Hoge(hoge_bar='hoge_bar'))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 4,919,719,617,394,605,000
| 33.406977
| 76
| 0.560324
| false
| 3.639606
| true
| false
| false
|
brianlions/python-nebula
|
nebula/log.py
|
1
|
13537
|
#!/usr/bin/env python3
#
# Copyright (c) 2012 Brian Yi ZHANG <brianlions at gmail dot com>
#
# This file is part of pynebula.
#
# pynebula is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pynebula is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pynebula. If not, see <http://www.gnu.org/licenses/>.
#
import time
import traceback
import os
import sys
class Logger(object):
'''
'''
EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, DEBUG = range(0, 8)
LOG_LEVELS = frozenset((EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, DEBUG))
__level_names = {
EMERG: ('eme', 'emerg'),
ALERT: ('ale', 'alert'),
CRIT: ('cri', 'crit'),
ERR: ('err', 'err'),
WARNING: ('war', 'warning'),
NOTICE: ('not', 'notice'),
INFO: ('inf', 'info'),
DEBUG: ('deb', 'debug'),
}
@classmethod
def log_mask(cls, level):
'''Returns log mask for the specified log level.
Args:
level: one of the constants in Logger.LOG_LEVELS.
Returns:
An integer which can be passed to set_log_mask() etc.
'''
if level not in cls.__level_names:
raise ValueError("invalid log level: {:d}".format(level))
return (1 << level)
@classmethod
def mask_upto(cls, level):
'''Returns log mask for all levels through level.
Args:
level: one of the constants in Logger.LOG_LEVELS.
Returns:
An integer which can be passed to set_log_mask() etc.
'''
if level not in cls.__level_names:
raise ValueError("invalid log level: {:d}".format(level))
return (1 << (level + 1)) - 1
@classmethod
def level_name(cls, level, abbr = False):
'''Returns name of the specified log level.
Args:
level: one of the constants in Logger.LOG_LEVELS.
abbr: whether to use the abbreviated name or not.
Returns:
Human-readable string representation of the log level.'''
if level not in cls.__level_names:
raise ValueError("invalid log level: {:d}".format(level))
return cls.__level_names[level][(not abbr) and 1 or 0]
@classmethod
def timestamp_str(cls, now = None, use_gmtime = False, show_timezone = False):
'''Format and return current date and time.
Args:
now: seconds (as float) since the unix epoch, use current
time stamp if value is false.
use_gmtime: whether to use GMT time or not.
show_timezone: whether to display the time zone or not.
Returns:
String representation of date & time, the format of the returned
value is "YYYY.mm.dd-HH:MM:SS.ssssss-ZZZ".
'''
if not now:
now = time.time()
if show_timezone:
tz_format = use_gmtime and '-GMT' or '-%Z'
else:
tz_format = ''
return time.strftime('%Y.%m.%d-%H:%M:%S' + ('.%06d' % ((now - int(now)) * 1000000)) + tz_format,
use_gmtime and time.gmtime(now) or time.localtime(now))
def __init__(self, log_mask = None, use_gmtime = False, show_timezone = True):
self.__log_mask = log_mask and log_mask or self.mask_upto(self.INFO)
self.__use_gmtime = use_gmtime and True or False
self.__show_timezone = show_timezone and True or False
def set_log_mask(self, new_mask):
'''Set log mask, and return previous log mask.
Args:
new_mask: the new log mask to be set to.
Returns:
Previous log mask (as integer).
'''
if new_mask < self.mask_upto(self.EMERG) or new_mask > self.mask_upto(self.DEBUG):
raise ValueError("invalid log mask: {:d}".format(new_mask))
old_mask = self.__log_mask
self.__log_mask = new_mask
return old_mask
def set_max_level(self, max_level):
'''Log all messages through max_level.
Args:
max_level: one of the constants in Logger.LOG_LEVELS.
Returns:
Previous log mask (as integer).
'''
return self.set_log_mask(Logger.mask_upto(max_level))
def is_use_gmtime(self):
'''Whether we are using GMT time representation of not.
Returns:
True if using GMT, False otherwise.
'''
return self.__use_gmtime
def is_show_timezone(self):
'''Whether we are printing the time zone of not.
Returns:
True if printing time zone, False otherwise.
'''
return self.__show_timezone
def log(self, level, msg, use_gmtime = None, show_timezone = None,
stack_limit = 2):
'''Generate one log message.
Args:
level: level of the message
msg: string message to be logged
use_gmtime: whether to use GMT or not, if value is None, use the
value passed to __init__()
show_timezone: whether to log time zone or not, if value is None, use
the value passed to __init__()
stack_limit: passed to traceback.extract_stack(), in order to get
the correct file name, line number, and method name.
Returns:
True if the message was logged, False otherwise.
'''
if self.log_mask(level) & self.__log_mask:
file_name, line_num, func_name = traceback.extract_stack(limit = stack_limit)[0][:3]
# remove current working directory if it is prefix of the file name
cwd = os.getcwd() + os.path.sep
if file_name.startswith(cwd):
file_name = '.' + os.path.sep + file_name[len(cwd):]
if use_gmtime is None:
use_gmtime = self.is_use_gmtime()
if show_timezone is None:
show_timezone = self.is_show_timezone()
self.output_message(level, msg, file_name, line_num, func_name,
use_gmtime = use_gmtime,
show_timezone = show_timezone)
return True
else:
return False
def debug(self, msg, stack_limit = 3):
return self.log(self.DEBUG, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def info(self, msg, stack_limit = 3):
return self.log(self.INFO, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def notice(self, msg, stack_limit = 3):
return self.log(self.NOTICE, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def warning(self, msg, stack_limit = 3):
return self.log(self.WARNING, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def err(self, msg, stack_limit = 3):
return self.log(self.ERR, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def crit(self, msg, stack_limit = 3):
return self.log(self.CRIT, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def alert(self, msg, stack_limit = 3):
return self.log(self.ALERT, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def emerg(self, msg, stack_limit = 3):
return self.log(self.EMERG, msg, use_gmtime = self.__use_gmtime,
show_timezone = self.__show_timezone,
stack_limit = stack_limit)
def output_message(self, level, msg, file_name, line_num, func_name,
use_gmtime = None, show_timezone = None):
'''Method subclass MUST implement.
Args:
level: (int) level of the message
msg: (str) message to be logged
file_name: (str) in which file the message was generated
line_num: (int) at which line the message was generated
func_name: (str) in which method (or function) the message was
generated
use_gmtime: (bool) whether to use GMT or not
show_timezone: (bool) whether to log the time zone or not
Returns:
(not required)
'''
raise NotImplementedError("{:s}.{:s}: output_message() not implemented".format(self.__class__.__module__,
self.__class__.__name__))
#-------------------------------------------------------------------------------
class ConsoleLogger(Logger):
'''Logger which log messages to console (stdout).'''
def __init__(self, *args, **kwargs):
super(ConsoleLogger, self).__init__(*args, **kwargs)
def output_message(self, level, msg, file_name, line_num, func_name,
use_gmtime = None, show_timezone = None):
'''Implements the abstract method defined in parent class.'''
if use_gmtime is None:
use_gmtime = self.is_use_gmtime()
if show_timezone is None:
show_timezone = self.is_show_timezone()
# time, log level, file name, line number, method name, log message
print("[{:s} {:s} {:s}:{:d}:{:s}] {:s}".format(self.timestamp_str(use_gmtime, show_timezone),
self.level_name(level, abbr = True),
file_name, line_num, func_name, msg))
sys.stdout.flush()
#-------------------------------------------------------------------------------
class WrappedLogger(object):
def __init__(self, log_handle = None):
self.__log_handle = None
self.set_log_handle(log_handle)
def set_log_handle(self, log_handle):
'''Set new log handle to be used.
Args:
log_handle: new log handle to be used
Returns:
Previous log handle, value might be None.
'''
if (log_handle is not None) and (not isinstance(log_handle, Logger)):
raise TypeError("log_handle {:s} is not an instance of {:s}.Logger".format(repr(log_handle),
self.__class__.__module__))
prev_handle = self.__log_handle
self.__log_handle = log_handle
return prev_handle
def get_log_handle(self):
'''Get current log handle current in use.
Returns:
Current log handle in use, value might be None.
'''
return self.__log_handle
def log_debug(self, msg):
if self.__log_handle:
self.__log_handle.debug(msg, stack_limit = 4)
def log_info(self, msg):
if self.__log_handle:
self.__log_handle.info(msg, stack_limit = 4)
def log_notice(self, msg):
if self.__log_handle:
self.__log_handle.notice(msg, stack_limit = 4)
def log_warning(self, msg):
if self.__log_handle:
self.__log_handle.warning(msg, stack_limit = 4)
def log_err(self, msg):
if self.__log_handle:
self.__log_handle.err(msg, stack_limit = 4)
def log_crit(self, msg):
if self.__log_handle:
self.__log_handle.crit(msg, stack_limit = 4)
def log_alert(self, msg):
if self.__log_handle:
self.__log_handle.alert(msg, stack_limit = 4)
def log_emerg(self, msg):
if self.__log_handle:
self.__log_handle.emerg(msg, stack_limit = 4)
#-------------------------------------------------------------------------------
def demo():
logger = ConsoleLogger(show_timezone = True)
for max_level in (Logger.DEBUG, Logger.INFO, Logger.NOTICE, Logger.WARNING, Logger.ERR):
print("max log level: %s" % Logger.level_name(max_level))
logger.set_log_mask(Logger.mask_upto(max_level))
for level in (Logger.DEBUG, Logger.INFO, Logger.NOTICE, Logger.WARNING, Logger.ERR):
logger.log(level, "message level %s" % Logger.level_name(level, abbr = False))
print()
print("max log level: %s" % Logger.level_name(Logger.DEBUG))
logger.set_log_mask(Logger.mask_upto(logger.DEBUG))
logger.debug("debug()")
logger.info("info()")
logger.notice("notice()")
logger.warning("wanring()")
logger.err("err()")
if __name__ == '__main__':
demo()
|
gpl-3.0
| -4,693,924,603,498,929,000
| 35.292225
| 114
| 0.540149
| false
| 4.093438
| false
| false
| false
|
tommo/gii
|
lib/mock/asset/AnimatorAsset.py
|
1
|
1686
|
import os.path
import json
from gii.core import *
from gii.qt.dialogs import requestString, alertMessage
from mock import _MOCK
##----------------------------------------------------------------##
class AnimatorDataCreator(AssetCreator):
def getAssetType( self ):
return 'animator_data'
def getLabel( self ):
return 'Animator Data'
def createAsset( self, name, contextNode, assetType ):
ext = '.animator_data'
filename = name + ext
if contextNode.isType('folder'):
nodepath = contextNode.getChildPath( filename )
else:
nodepath = contextNode.getSiblingPath( filename )
fullpath = AssetLibrary.get().getAbsPath( nodepath )
modelName = _MOCK.Model.findName( 'AnimatorData' )
assert( modelName )
_MOCK.createEmptySerialization( fullpath, modelName )
return nodepath
##----------------------------------------------------------------##
class AnimatorDataAssetManager( AssetManager ):
def getName( self ):
return 'asset_manager.animator_data'
def acceptAssetFile(self, filepath):
if not os.path.isfile(filepath): return False
if not filepath.endswith( '.animator_data' ): return False
return True
def importAsset(self, node, reload = False ):
node.assetType = 'animator_data'
node.setObjectFile( 'data', node.getFilePath() )
# def editAsset(self, node):
# editor = app.getModule( 'animator' )
# if not editor:
# return alertMessage( 'Designer not load', 'AnimatorData Designer not found!' )
# editor.openAsset( node )
##----------------------------------------------------------------##
AnimatorDataAssetManager().register()
AnimatorDataCreator().register()
AssetLibrary.get().setAssetIcon( 'animator_data', 'clip' )
|
mit
| -5,049,870,976,723,200,000
| 29.107143
| 83
| 0.641756
| false
| 3.572034
| false
| false
| false
|
levilucio/SyVOLT
|
UMLRT2Kiltera_MM/Properties/from_thesis/HMM8_then1_ConnectedLHS.py
|
1
|
2650
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM8_then1_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM8_then1_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM8_then1_ConnectedLHS, self).__init__(name='HMM8_then1_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM8_then1')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
mit
| 4,038,207,856,086,871,600
| 42.442623
| 125
| 0.47434
| false
| 5.206287
| false
| false
| false
|
weka511/bioinformatics
|
phylogeny.py
|
1
|
34566
|
# Copyright (C) 2020-2021 Greenweaves Software Limited
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Phylogeny -- http://rosalind.info/problems/topics/phylogeny/
import re
from rosalind import LabelledTree
from random import randrange
from newick import newick_to_adjacency_list
from numpy import argmin,argmax
from fasta import FastaContent
from helpers import flatten
# tree -- Completing a Tree
#
# Given: A positive integer n (n<=1000) and an adjacency list corresponding to a graph on n nodes that contains no cycles.
#
# Return: The minimum number of edges that can be added to the graph to produce a tree.
# This is the number of independent components - 1
def CompleteTree(n,adj):
# create_twigs
#
# Build dictionary tow show which node is linked to what
def create_twigs():
twigs = {i:set() for i in range(1,n+1)}
for a,b in adj:
twigs[a].add(b)
twigs[b].add(a)
return twigs
# find_component
#
# Find one component of graph
def find_component(start):
component = [] # The component being built
todo = set() # Nodes being considered for inclusion
todo.add(start)
while len(todo)>0:
current = todo.pop()
component.append(current)
for node in twigs[current]:
if node not in component:
todo.add(node)
for c in component:
del twigs[c]
return component
twigs = create_twigs()
components = []
while len(twigs)>0:
components.append(find_component(list(twigs.keys())[0]))
return len(components)-1
def chbp(species,character_table):
pass
# cstr
#
# Creating a Character Table from Genetic Strings http://rosalind.info/problems/cstr/
def cstr(strings):
def trivial(split):
if len(split)<2: return True
for k,v in split.items():
if v<2: return True
return False
choices = [[] for s in strings[0]]
counts = [{} for s in strings[0]]
for i in range(len(strings[0])):
for s in strings:
if not s[i] in choices[i]:
choices[i].append(s[i])
if s[i] in counts[i]:
counts[i][s[i]] += 1
else:
counts[i][s[i]] = 1
splits=[]
for i in range(len(strings[0])):
split = {}
for c in choices[i]:
split[c] = 0
for s in strings:
for c in choices[i]:
if s[i]==c:
split[c]+=1
splits.append(split)
result=[]
for i in range(len(strings[0])):
character = []
split = splits[i]
if not trivial(split):
chs = list(split.keys())
for s in strings:
character.append('0' if s[i]==chs[0] else '1')
result.append(''.join(character))
return result
# ctbl Creating a Character Table http://rosalind.info/problems/ctbl/
def CharacterTable(tree):
def create_character(split_species):
character=[]
for s in species:
character.append(1 if s in split_species else 0)
return ''.join([str(c) for c in character])
species=[spec.name for spec in tree.find_elements(terminal=True)]
species.sort()
clades=[clade for clade in tree.find_clades(terminal=False)]
# we iterate over all Clades except the root
return [create_character([spec.name for spec in split.find_elements(terminal=True)]) for split in clades[1:]]
# NumberBinaryTrees
#
# cunr Counting Unrooted Binary Trees
# root Counting Rooted Binary Trees
# See http://carrot.mcb.uconn.edu/~olgazh/bioinf2010/class16.html
def NumberBinaryTrees(n,rooted=True):
N = 1
m = 2*n-3 if rooted else 2*n-5
while m>1:
N *=m
m -= 2
return N
class UnrootedBinaryTree:
@classmethod
# EnumerateUnrootedBinaryTrees
#
# Given: A collection of species names representing n taxa.
#
# Return: A list containing all unrooted binary trees whose leaves are these n
# taxa. Trees should be given in Newick format, with one tree on each line;
# the order of the trees is unimportant.
#
# Idea: all rooted trees with a given number of leaves are isomorphic if we
# ignore the labels of the leaves and nodes. Therfore it is enough to
# build a tree with 3 leaves, and keep adding one leaf at a time in all available positions.
def Enumerate(cls,species):
def enumerate(n):
if n==3:
return [cls({0:[species[0], species[1], species[2]]})]
else:
return [cls.insert(species[n-1],edge,graph) for graph in enumerate(n-1) for edge in graph.Edges()]
return enumerate(len(species))
# insert
#
# Create a rooted tree by adding one nre inernal node and a leaf to a specifed edge
@classmethod
def insert(cls,species,edge,graph):
nextNode = max(list(graph.adj.keys())) + 1
n1,n2 = edge
adj = {nextNode: [species,n2]}
for node,links in graph.adj.items():
adj[node] = [nextNode if ll==n2 else ll for ll in links] if node==n1 else links
return cls(adj)
def __init__(self,adj):
self.adj = adj
def __str__(self):
return self.bfs_newick()
# bfs_newick
#
# Create Newick representation by best first search
def bfs_newick(self,node=0):
newick = []
for child in self.adj[node]:
if type(child)==int:
newick.append(self.bfs_newick(node=child))
else:
newick.append(child)
representation = ','.join(newick)
return f'({representation})'
def Edges(self):
for a,b in self.adj.items():
for c in b:
yield a,c
# qrt Incomplete Characters
#
# Given: A partial character table C
#
# Return: The collection of all quartets that can be inferred from the splits corresponding to the underlying characters of C
def qrt(taxa,characters):
def tuples(n):
for i in range(n):
for j in range(n):
if i==j: continue
for k in range(n):
if k in [i,j]: continue
for l in range(n):
if l in [i,j,k]: continue
if i<j and k<l and i<k:
yield i,j,k,l
def isConsistent(selector):
for char in characters:
character = [char[i] for i in selector]
if any(c is None for c in character): continue
if character[0]==character[1] and character[2]==character[3] and character[0]!=character[2]: return True
return False
for (i,j,k,l) in tuples(len(taxa)):
selector = [i,j,k,l]
if isConsistent(selector):
yield [taxa[m] for m in selector]
# snarfed from https://stackoverflow.com/questions/51373300/how-to-convert-newick-tree-format-to-a-tree-like-hierarchical-object
def parse(newick,start=0):
tokens = re.findall(r"([^:;,()\s]*)(?:\s*:\s*([\d.]+)\s*)?([,);])|(\S)", newick+";")
def recurse(nextid = start, parentid = -1): # one node
thisid = nextid;
children = []
name, length, delim, ch = tokens.pop(0)
if ch == "(":
while ch in "(,":
node, ch, nextid = recurse(nextid+1, thisid)
children.append(node)
name, length, delim, ch = tokens.pop(0)
return {"id": thisid, "name": name, "length": float(length) if length else None,
"parentid": parentid, "children": children}, delim, nextid
return recurse()[0]
def create_adj(tree):
adj = {}
def dfs(tree):
id = tree['id']
name = tree['name']
children = tree['children']
parentid = tree['parentid']
if len(name)==0:
adj[id]=[]
if parentid>-1:
adj[parentid].append(id if len(name)==0 else name)
for child in children:
dfs(child)
dfs(tree)
return adj
# SPTD Phylogeny Comparison with Split Distance
def sptd(species,newick1,newick2):
def replace_leaves(adj):
return {parent:sorted([seiceps[child] if child in seiceps else child for child in children]) for parent,children in adj.items() }
def edges(adj):
for parent,children in adj.items():
for child in children:
if child >= n:
yield parent,child
def splits(adj,min_size=2):
def find_leaves(node,path=[]):
for child in adj[node]:
if child<n:
path.append(child)
else:
find_leaves(child,path=path)
for parent,child in edges(adj):
s1 = []
find_leaves(child,s1)#[leaf for leaf in find_leaves(child)]
if len(s1)<min_size: continue
s2 = [leaf for leaf in range(n) if not leaf in s1]
yield sorted(s1),sorted(s2)
def ds(adj1,adj2):
shared = 0
splits1 = sorted([s for s,_ in splits(adj1)])
splits2 = sorted([s for s,_ in splits(adj2)])
k1 = 0
k2 = 0
i1 = splits1[k1]
i2 = splits2[k2]
while k1<len(splits1) and k2<len(splits2):
if i1==i2:
shared += 1
k1 += 1
k2 += 1
if k1<len(splits1) and k2<len(splits2):
i1 = splits1[k1]
i2 = splits2[k2]
elif i1<i2:
k1+=1
if k1<len(splits1):
i1 = splits1[k1]
else:
k2+=1
if k2<len(splits2):
i2 = splits2[k2]
return 2*(n-3)- 2* shared
n = len(species)
seiceps = {species[i]:i for i in range(n)}
return ds(replace_leaves(create_adj(parse(newick1,start=n))),
replace_leaves(create_adj(parse(newick2,start=n))))
# MEND Inferring Genotype from a Pedigree
#
# Given: A rooted binary tree T in Newick format encoding an individual's pedigree
# for a Mendelian factor whose alleles are A (dominant) and a (recessive).
#
# Return: Three numbers between 0 and 1, corresponding to the respective probabilities
# that the individual at the root of T will exhibit the "AA", "Aa" and "aa" genotypes.
def mend(node):
# combine
#
# Combine two genomes with known probabilities - work out proabilites in next generation
#
# NB: the tree is a pedigree, not a phylogeny: the root is the descendent!
def combine(f1,f2):
return np.sum([[f*f1[i]*f2[j] for f in factors[i][j]] for i in range(n) for j in range(n)],
axis=0)
# Probability of each combination in the initial generation, when we know the genome
frequencies = {
'aa': (0,0,1),
'Aa': (0,1,0),
'AA': (1,0,0)
}
# Probabilty of each combination when we combine two genomes
factors=[# AA Aa/aA aa
[ [1.0, 0.0, 0.0], [0.50, 0.50, 0.00], [0.0, 1.0, 0.0] ], #AA
[ [0.5, 0.5, 0.0], [0.25, 0.50, 0.25], [0.0, 0.5, 0.5] ], #Aa/aA
[ [0.0, 1.0, 0.0], [0.00, 0.50, 0.50], [0.0, 0.0, 1.0] ] #aa
]
n = len(frequencies) # Number of combinations
# If we are at a leaf, we have a known ancestor
if len(node.nodes)==0:
try:
return frequencies['Aa' if node.name=='aA' else node.name]
except KeyError:
return (0,0)
parent_freqs = [mend(parent) for parent in node.nodes]
parent_freqs = [pp for pp in parent_freqs if len(pp)==n]
return combine(parent_freqs[0],parent_freqs[1])
# SmallParsimony
#
# Find the most parsimonious labeling of the internal nodes of a rooted tree.
#
# Given: An integer n followed by an adjacency list for a rooted binary tree with n leaves labeled by DNA strings.
#
# Return: The minimum parsimony score of this tree, followed by the adjacency list of the tree
# corresponding to labeling internal nodes by DNA strings in order to minimize the parsimony score of the tree.
def SmallParsimony(T,alphabet='ATGC'):
# SmallParsimonyC Solve small parsimony for one character
def SmallParsimonyC(Character):
# get_ripe
#
# Returns: a node that is ready fpr preocssing
def get_ripe():
for v in T.get_nodes():
if not processed[v] and v in T.edges:
for e,_ in T.edges[v]:
if e>v: continue
if not processed[e]: break
return v
return None
# calculate_s
# Calculate score if node v is set to a specified symbol
# Parameters:
# symbol The symbol, e.g. 'A', not the index in alphabet
# v The node
def calculate_s(symbol,v):
# delta
#
# Complement of Kronecker delta
def delta(i):
return 0 if symbol==alphabet[i] else 1
def get_min(e):
return min(s[e][i]+delta(i) for i in range(len(alphabet)))
return sum([get_min(e) for e,_ in T.edges[v]])
# update_assignments
#
# Parameters:
# v
# s
def update_assignments(v,s):
if not v in assignments.labels:
assignments.labels[v]=''
index = 0
min_s = float('inf')
for i in range(len(s)):
if s[i] < min_s:
min_s = s[i]
index = i
assignments.set_label(v,assignments.labels[v]+alphabet[index])
return alphabet[index]
# backtrack
#
# Process internal node of tree top down, starting from root
def backtrack(v, current_assignment):
for v_next,_ in T.edges[v]:
if T.is_leaf(v_next): continue
if not v_next in assignments.labels:
assignments.labels[v_next]=''
min_score = min([s[v_next][i] for i in range(len(alphabet))])
indices = [i for i in range(len(alphabet)) if s[v_next][i]==min_score ]
matched = False
for i in indices:
if alphabet[i]==current_assignment:
matched = True
assignments.set_label(v_next,assignments.labels[v_next]+current_assignment)
backtrack(v_next,current_assignment)
if not matched:
# Black magic alert: I am not clear why the introduction of random numbers
# helps here. Maybe it stops the tree being biased towatds the first strings
# in the alphabet.
next_assignment = alphabet[indices[randrange(0,(len(indices)))]]
assignments.set_label(v_next,assignments.labels[v_next]+next_assignment)
backtrack(v_next,next_assignment)
processed = {}
s = {}
# Compute scores for a leaves, and mark internal notes unprocessed
for v in T.get_nodes():
if T.is_leaf(v):
processed[v]=True
s[v] = [0 if symbol==Character[v] else float('inf') for symbol in alphabet]
else:
processed[v]=False
# Process ripe (unprocessed, but whose children have been processed)
# until there are none left
# Keep track of last node as we will use it to start backtracking
v = get_ripe()
while not v == None:
processed[v] = True
s[v] = [calculate_s(symbol,v) for symbol in alphabet ]
v_last = v
v = get_ripe()
backtrack(v_last,update_assignments(v_last,s[v_last]))
return min([s[v_last][c] for c in range(len(alphabet))])
assignments = LabelledTree(T.N)
assignments.initialize_from(T)
return sum([SmallParsimonyC([v[i] for l,v in T.labels.items()]) for i in range(len(T.labels[0]))]),assignments
# alph
#
# Given: A rooted binary tree T on n species, given in Newick format, followed by a multiple alignment of m
# augmented DNA strings having the same length (at most 300 bp) corresponding to the species
# and given in FASTA format.
#
# Return: The minimum possible value of dH(T), followed by a collection of DNA strings to be
# assigned to the internal nodes of T that will minimize dH(T).
def alph(T,Alignment,Alphabet=['A','T','C','G','-']):
# create_fixed_alignments
#
# Extract dictionary of leaves from Alignment
#
# Returns: length of any string in alignment, plus dictionary of leaves
def create_fixed_alignments():
Leaves = {}
k = None
for i in range(0,len(Alignment),2):
Leaves[Alignment[i]] = Alignment[i+1]
if k==None:
k = len(Alignment[i+1])
else:
assert k==len(Alignment[i+1]),f'Alignments should all have same length.'
return k,Leaves
# SmallParsimony
#
# This is the Small Parsimony algorihm from Pevzner and Compeau, which
# processes a single character
#
# Parameters:
# l Index of character in Alignment
# Returns: Score of best assignment, plus an assignment of character that provides this score
def SmallParsimony(l):
# is_ripe
#
# Determine whether now is ready for processing
# A ripe node is one that hasn't been processed,
# but its children have
def is_ripe(v):
for child in Adj[v]:
if not Tag[child]: return False
return True
# find_ripe
#
# Find list of nodes that are ready to be processed
#
# Input: A list of nodes
# Returns: Two lists, those ready for processing, and those which are not
def find_ripe(Nodes):
Ripe = []
Unripe = []
for v in Nodes:
if is_ripe(v):
Ripe.append(v)
else:
Unripe.append(v)
return Ripe,Unripe
# delta
#
# The delta function from Pevzner and Compeau: not the Kronecker delta
def delta(i,j):
return 0 if i==j else 1
# get_distance
#
# Get total distance of node from its children assuming one trial assignmnet
#
# Parameters:
# v Current node
# k Index of character for trial
def get_distance(v,k):
# best_alignment
#
# Find best alignment with child (measured by varying child's index) given
# the current choice of character in this node
#
# Parameters:
# k Trial alignmnet for this node
def best_alignment(child):
return min([s[child][i] + delta(i,k) for i in range(len(Alphabet))])
return sum([best_alignment(child) for child in Adj[v]])
# backtrack
#
# Perform a depth first search through all nodes to determive alignmant.
# Parameters:
# root Root node
# s Scores for all possible best assignments to all nodes
# Returns:
# score Score of best assignment,
# ks For each node the assignment of character that provides this score
# represented an an index into alphabet
#
#
# Comment by Robert Goldberg-Alberts.
# The Backtrack portion of the code consists of a breath first tracking through the tree from
# the root in a left to right fashion through the nodes (sons and daughters)
# row after row until you finally reach the leaves. The leaves already have values assigned to them from the data
# At the root, determine whether the value of the node is A, C, T, G by taking the minimum value of the
# four numbers created for the root set. Break ties by selecting from the ties at random.
# After that, for subsequent nodes take the minimum of each value at a node and determine if
# there are ties at the minimum. Check to see if the ancestor parent of that node has a value
# that is contained in the eligible nucleotides from the node. If it IS contained there force the
# ancestor value for that node.
# Continue in that fashion until all the internal nodes have values assigned to them.
def backtrack(root,s):
def dfs(node,k,parent_score):
def match(i,j,child_scores):
return parent_score == child_scores[0][i] + child_scores[1][j]
if len(Adj[node])==0: return
children = Adj[node]
child_scores_delta = [[s[child][i] + delta(i,k) for i in range(len(Alphabet))] for child in children]
child_scores_raw = [[s[child][i] for i in range(len(Alphabet))] for child in children]
candidates = [(i,j,child_scores_raw) for i in range(len(Alphabet)) for j in range(len(Alphabet)) \
if match(i,j,child_scores_delta)]
selection = candidates[randrange(len(candidates))]
scores_children = [selection[2][i][selection[i]] for i in range(len(children))]
for i in range(len(children)):
ks[children[i]] = selection[i]
for i in range(len(children)):
dfs(children[i],ks[children[i]],scores_children[i])
ks = {}
index = argmin(s[root])
score = s[root][index]
ks[root] = index
dfs(root,index,score)
return score, ks
s = {} # Scores for nodes
Tag = {} # Nodes that have been processed
ToBeProcessed = [] # Nodes that have yet to be processed
# Partition nodes into two groups: leaves are easily processed,
# the others are all marked as unprocessed
for v in Adj.keys():
if v in Leaves:
char = Leaves[v][l]
s[v] = [0 if Alphabet[k]==char else float('inf') for k in range(len(Alphabet))]
Tag[v] = True
else:
Tag[v] = False
ToBeProcessed.append(v)
Ripe,ToBeProcessed = find_ripe(ToBeProcessed)
while len(Ripe)>0:
for v in Ripe:
s[v] = [get_distance(v,k) for k in range(len(Alphabet))]
Tag[v] = True
Ripe,ToBeProcessed = find_ripe(ToBeProcessed)
assert len(ToBeProcessed)==0,'If there are no ripe nodes, ToBeProcessed should be exhausted'
return backtrack(v,s)
Adj = newick_to_adjacency_list(T)
L,Leaves = create_fixed_alignments()
Assignment = {a:[] for a in Adj.keys()}
d = 0
assert len([node for node,value in Adj.items() if len(value)==0 and node not in Leaves])==0,\
f'Some nodes are leaves, but have no strings in alignment'
for l in range(L):
score,ks = SmallParsimony(l)
d += score
for v,index in ks.items():
Assignment[v].append(Alphabet[index])
return d,[(f'{a}',''.join(b)) for a,b in Assignment.items() if len(Adj[a])!=0]
# chbp Character-Based Phylogeny
#
# Strategy: sort character table on entropy, then use each character to divide clades into two.
def chbp(species,character_table):
# Clade
#
# This class represents one clade or taxon
class Clade:
def __init__(self,taxa):
self.taxa = [s for s in taxa]
def is_trivial(self):
return len(self.taxa)==0
def is_singleton(self):
return len(self.taxa)==1
# newick
#
# Convert to string in Newick format
def newick(self):
def conv(taxon):
if type(taxon)==int:
return species[taxon]
else:
return taxon.newick()
if self.is_singleton():
return conv(self.taxa[0])
else:
return '(' + ','.join(conv(taxon) for taxon in self.taxa) +')'
# split
#
# Split clade in two using character: list of taxa is replaced by two clades
#
# Returns True if clade has been split into two non-trivial clades
# False if at least one clade would be trivial--in which case clade is unchanged
#
def split(self,character):
left = []
right = []
for i in self.taxa:
if character[i]==0:
left.append(i)
else:
right.append(i)
leftTaxon = Clade(left)
rightTaxon = Clade(right)
if leftTaxon.is_trivial(): return False
if rightTaxon.is_trivial(): return False
self.taxa = [leftTaxon,rightTaxon]
return True
# splitAll
#
# Split clade using character table
def splitAll(self,characters,depth=0):
if depth<len(characters):
if self.split(characters[depth]):
for taxon in self.taxa:
taxon.splitAll(characters,depth+1)
else:
self.splitAll(characters,depth+1)
# Calculate entropy of a single character
def get_entropy(freq):
if freq==0 or freq==n: return 0
p1 = freq/n
p2 = 1-p1
return - p1 *np.log(p1) - p2 * np.log(p2)
n = len(species)
entropies = [get_entropy(sum(char)) for char in character_table]
entropy_indices = np.argsort(entropies)
characters = [character_table[i] for i in entropy_indices[::-1]]
indices = list(range(len(species)))
root = Clade(indices)
root.splitAll(characters)
return f'{root.newick()};'
# RSUB Identifying Reversing Substitutions
#
# Given: A rooted binary tree T with labeled nodes in Newick format, followed by a collection of at most
# 100 DNA strings in FASTA format whose labels correspond to the labels of T.
#
# We will assume that the DNA strings have the same length, which does not exceed 400 bp).
#
# Return: A list of all reversing substitutions in T (in any order), with each substitution encoded by the following three items:
#
# the name of the species in which the symbol is first changed, followed by the name of the species in which it changes back to its original state
# the position in the string at which the reversing substitution occurs; and
# the reversing substitution in the form original_symbol->substituted_symbol->reverted_symbol.
def rsub(T,Assignments):
# find_path
#
# Find path from the root down to a specified leaf
def find_path(leaf):
Path = [leaf]
parent = Parents[leaf]
while len(parent)>0:
Path.append(parent)
if parent in Parents:
parent = Parents[parent]
else:
break
return Path[::-1]
# FindReversingSubstitutions
#
# Find reversion substitutions in one specified path trhough tree,
# affecting a specified position in the strings
#
# Parameters: Path Path to be searched
# pos position in tree
# Strategy: build up history of changes, and search back whenever a change is detected.
def FindReversingSubstitutions(Path,pos):
History = [Characters[Path[0]][pos]]
Names = Path[0:1]
Reverses = []
for taxon in Path[1:]:
current = Characters[taxon][pos]
if current==History[-1]: continue
History.append(current)
Names.append(taxon)
if len(History)>2 and History[-3]==History[-1]: # we have a reverse
Reverses.append((Names[-2],Names[-1],pos+1,History[-3],History[-2],History[-1]))
return Reverses
# create_parents
# Invert Ajacency list to we have the parent of each child
def create_parents(Adj):
Product = {node:[] for node in flatten(Adj.values())}
for parent,children in Adj.items():
for child in children:
Product[child] = parent
return Product
# get_unique
#
# Convert list of lists into a single list and remove duplicate elements
def get_unique(list_of_lists):
return list(set(flatten(list_of_lists)))
Adj,root = newick_to_adjacency_list(T,return_root=True)
fc = FastaContent(Assignments)
Characters = fc.to_dict() # So we can find character for each species
_,string = fc[0]
m = len(string)
Parents = create_parents(Adj)
Paths = [find_path(node) for node in flatten(Adj.values()) if len(Adj[node])==0]
# Build list of unique reversals.
return get_unique([subst for subst in [FindReversingSubstitutions(path,pos) for path in Paths for pos in range(m)] if len(subst)>0])
# cset A submatrix of a matrix M is a matrix formed by selecting rows and columns from M and
# taking only those entries found at the intersections of the selected rows and columns.
# We may also think of a submatrix as formed by deleting the remaining rows and columns from M
#
# Given: An inconsistent character table C on at most 100 taxa.
#
# Return: A submatrix of C representing a consistent character table on the same taxa
# and formed by deleting a single row of C.
def cset(table):
# get_split
#
# Used to split indices of character (row) into two groups, one for each allele
# First we yield all indices corresponding to 0, then those to 1
def get_splits(character):
for allele in [0,1]:
yield set(i for i, c in enumerate(character) if c == allele)
# conflicts_with
#
# Determine whether two characters are in conflict
# We iterate through all the splits of each character.
# If any pair of splits consists of two disjoint subsets,
# the characters are compatible.
def conflicts_with(c1, c2):
for part1 in get_splits(c1):
for part2 in get_splits(c2):
if len(part1.intersection(part2)) == 0: return False
return True
n = len(table)
Conflicts = [0 for _ in range(n)] # Count number of times each row conflicts with another
for i in range(n):
for j in range(i+1,n):
if conflicts_with(table[i],table[j]):
Conflicts[i] += 1
Conflicts[j] += 1
return [table[row] for row in range(n) if row!=argmax(Conflicts)]
# cntq Counting Quartets
def cntq(n,newick):
def create_adj(tree):
adj = {}
def bfs(tree):
id = tree['id']
name = tree['name']
children = tree['children']
parentid = tree['parentid']
if len(name)==0:
adj[id]=[]
if parentid>-1:
adj[parentid].append(id if len(name)==0 else name)
for child in children:
bfs(child)
bfs(tree)
return adj
def bfs(subtree,leaves):
for node in adj[subtree]:
if type(node)==str:
leaves.append(node)
else:
bfs(node,leaves)
def pair(leaves):
for i in range(len(leaves)):
for j in range(i+1,len(leaves)):
yield [leaves[i],leaves[j]] if leaves[i]<leaves[j] else [leaves[j],leaves[i]]
adj = create_adj(parse(newick))
taxa = [leaf for children in adj.values() for leaf in children if type(leaf)==str]
splitting_edges = [(key,child) for key,value in adj.items() for child in value if type(child)==int]
Quartets = []
for _,node in splitting_edges:
leaves = []
bfs(node,leaves)
other_leaves = [leaf for leaf in taxa if leaf not in leaves]
for pair1 in pair(leaves):
for pair2 in pair(other_leaves):
quartet = pair1 + pair2 if pair1[0]<pair2[0] else pair2 + pair1
Quartets.append(quartet)
Quartets.sort()
Unique =[Quartets[0]]
for i in range(1,len(Quartets)):
if Quartets[i]!=Unique[-1]:
Unique.append(Quartets[i])
return len(Unique),Unique
|
gpl-3.0
| -8,262,652,753,592,696,000
| 35.424658
| 149
| 0.549615
| false
| 3.889064
| false
| false
| false
|
lebek/reversible-raytracer
|
util.py
|
1
|
1864
|
import numpy as np
import theano
import theano.tensor as T
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from scipy.misc import imsave
def initialize_weight(n_vis, n_hid, W_name, numpy_rng, rng_dist):
if 'uniform' in rng_dist:
W = numpy_rng.uniform(low=-np.sqrt(6. / (n_vis + n_hid)),\
high=np.sqrt(6. / (n_vis + n_hid)),
size=(n_vis, n_hid)).astype(theano.config.floatX)
elif rng_dist == 'normal':
W = 0.01 * numpy_rng.normal(size=(n_vis, n_hid)).astype(theano.config.floatX)
return theano.shared(value = W, name=W_name, borrow=True)
'''decaying learning rate'''
def get_epsilon(epsilon, n, i):
return float(epsilon / ( 1 + i/float(n)))
def broadcasted_switch(a, b, c):
return T.switch(a.dimshuffle(0, 1, 'x'), b, c)
def transNorm(transM, vec):
transN = T.zeros_like(vec)
transN = T.set_subtensor(transN[:,:,0], vec[:,:,0] * transM[0][0] \
+ vec[:,:,1] * transM[1][0] + vec[:,:,2] * transM[2][0])
transN = T.set_subtensor(transN[:,:,1], vec[:,:,0] * transM[0][1] \
+ vec[:,:,1] * transM[1][1] + vec[:,:,2] * transM[2][1])
transN = T.set_subtensor(transN[:,:,2], vec[:,:,0] * transM[0][2] \
+ vec[:,:,1] * transM[1][2] + vec[:,:,2] * transM[2][2])
return transN
def drawWithMarkers(fname, im):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(im, interpolation='nearest')
ax.add_patch(plt.Rectangle((85-3, 90-3), 6, 6, color='red',
linewidth=2, fill=False))
ax.add_patch(plt.Rectangle((90-3, 50-3), 6, 6, color='red',
linewidth=2, fill=False))
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
def draw(fname, im):
imsave(fname, im)
|
mit
| -7,149,291,079,279,311,000
| 32.285714
| 88
| 0.551502
| false
| 2.977636
| false
| false
| false
|
tensorflow/lucid
|
lucid/modelzoo/caffe_models/others.py
|
1
|
5766
|
# Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
from lucid.modelzoo.vision_base import Model, _layers_from_list_of_dicts, IMAGENET_MEAN_BGR
class CaffeNet_caffe(Model):
"""CaffeNet (AlexNet variant included in Caffe)
CaffeNet is a slight variant on AlexNet, described here:
https://github.com/BVLC/caffe/tree/master/models/bvlc_reference_caffenet
"""
model_path = 'gs://modelzoo/vision/caffe_models/CaffeNet.pb'
labels_path = 'gs://modelzoo/labels/ImageNet_standard.txt'
synsets_path = 'gs://modelzoo/labels/ImageNet_standard_synsets.txt'
dataset = 'ImageNet'
image_shape = [227, 227, 3]
is_BGR = True
image_value_range = (-IMAGENET_MEAN_BGR, 255-IMAGENET_MEAN_BGR)
input_name = 'data'
CaffeNet_caffe.layers = _layers_from_list_of_dicts(CaffeNet_caffe(), [
{'tags': ['conv'], 'name': 'conv5/concat', 'depth': 256} ,
{'tags': ['conv'], 'name': 'conv5/conv5', 'depth': 256} ,
{'tags': ['dense'], 'name': 'fc6/fc6', 'depth': 4096} ,
{'tags': ['dense'], 'name': 'fc7/fc7', 'depth': 4096} ,
{'tags': ['dense'], 'name': 'prob', 'depth': 1000} ,
])
class VGG16_caffe(Model):
"""VGG16 model used in ImageNet ILSVRC-2014, ported from caffe.
VGG16 was introduced by Simonyan & Zisserman (2014):
https://arxiv.org/pdf/1409.1556.pdf
http://www.robots.ox.ac.uk/~vgg/research/very_deep/
as the Oxford Visual Geometry Group's submission for the ImageNet ILSVRC-2014
contest. We download their caffe trained model from
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
and convert it with caffe-tensorflow.
"""
model_path = 'gs://modelzoo/vision/caffe_models/VGG16.pb'
labels_path = 'gs://modelzoo/labels/ImageNet_standard.txt'
synsets_path = 'gs://modelzoo/labels/ImageNet_standard_synsets.txt'
dataset = 'ImageNet'
image_shape = [224, 224, 3]
is_BGR = True
image_value_range = (-IMAGENET_MEAN_BGR, 255-IMAGENET_MEAN_BGR)
input_name = 'input'
VGG16_caffe.layers = _layers_from_list_of_dicts(VGG16_caffe(), [
{'tags': ['conv'], 'name': 'conv1_1/conv1_1', 'depth': 64},
{'tags': ['conv'], 'name': 'conv1_2/conv1_2', 'depth': 64},
{'tags': ['conv'], 'name': 'conv2_1/conv2_1', 'depth': 128},
{'tags': ['conv'], 'name': 'conv2_2/conv2_2', 'depth': 128},
{'tags': ['conv'], 'name': 'conv3_1/conv3_1', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_2/conv3_2', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_3/conv3_3', 'depth': 256},
{'tags': ['conv'], 'name': 'conv4_1/conv4_1', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_2/conv4_2', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_3/conv4_3', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_1/conv5_1', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_2/conv5_2', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_3/conv5_3', 'depth': 512},
{'tags': ['dense'], 'name': 'fc6/fc6', 'depth': 4096},
{'tags': ['dense'], 'name': 'fc7/fc7', 'depth': 4096},
{'tags': ['dense'], 'name': 'prob', 'depth': 1000},
])
class VGG19_caffe(Model):
"""VGG16 model used in ImageNet ILSVRC-2014, ported from caffe.
VGG19 was introduced by Simonyan & Zisserman (2014):
https://arxiv.org/pdf/1409.1556.pdf
http://www.robots.ox.ac.uk/~vgg/research/very_deep/
as the Oxford Visual Geometry Group's submission for the ImageNet ILSVRC-2014
contest. We download their caffe trained model from
https://gist.github.com/ksimonyan/3785162f95cd2d5fee77#file-readme-md
and convert it with caffe-tensorflow.
"""
model_path = 'gs://modelzoo/vision/caffe_models/VGG19.pb'
labels_path = 'gs://modelzoo/labels/ImageNet_standard.txt'
synsets_path = 'gs://modelzoo/labels/ImageNet_standard_synsets.txt'
dataset = 'ImageNet'
image_shape = [224, 224, 3]
is_BGR = True
image_value_range = (-IMAGENET_MEAN_BGR, 255-IMAGENET_MEAN_BGR)
input_name = 'input'
VGG19_caffe.layers = _layers_from_list_of_dicts(VGG19_caffe(), [
{'tags': ['conv'], 'name': 'conv1_1/conv1_1', 'depth': 64},
{'tags': ['conv'], 'name': 'conv1_2/conv1_2', 'depth': 64},
{'tags': ['conv'], 'name': 'conv2_1/conv2_1', 'depth': 128},
{'tags': ['conv'], 'name': 'conv2_2/conv2_2', 'depth': 128},
{'tags': ['conv'], 'name': 'conv3_1/conv3_1', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_2/conv3_2', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_3/conv3_3', 'depth': 256},
{'tags': ['conv'], 'name': 'conv3_4/conv3_4', 'depth': 256},
{'tags': ['conv'], 'name': 'conv4_1/conv4_1', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_2/conv4_2', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_3/conv4_3', 'depth': 512},
{'tags': ['conv'], 'name': 'conv4_4/conv4_4', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_1/conv5_1', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_2/conv5_2', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_3/conv5_3', 'depth': 512},
{'tags': ['conv'], 'name': 'conv5_4/conv5_4', 'depth': 512},
{'tags': ['dense'], 'name': 'fc6/fc6', 'depth': 4096},
{'tags': ['dense'], 'name': 'fc7/fc7', 'depth': 4096},
{'tags': ['dense'], 'name': 'prob', 'depth': 1000},
])
|
apache-2.0
| 5,251,851,677,240,020,000
| 45.5
| 91
| 0.626084
| false
| 2.760172
| false
| false
| false
|
seoweon/narajangteo
|
narajangteo_crawling.py
|
1
|
9596
|
# coding: utf-8
# # <center> 나라장터 입찰공고 크롤링 with Python3</center>
#
# 나라장터에 올라오는 입찰공고를 모니터링하기 위해 개발된 간단한 프로그램으로, 검색어 리스트를 설정하면 그에 따라 최근 7일간 공고된 입찰공고 리스트를 가져와 엑셀파일로 정리해줍니다. 크롤링 프로그램이지만, BeautifulSoup을 사용하지 않습니다.
# In[18]:
import pandas as pd
import numpy as np
import requests
import os
import datetime, time
import string
from time import localtime, strftime
from datetime import timedelta
from tqdm import tqdm
from xlsxwriter.utility import xl_col_to_name, xl_range
from lxml import html
# In[6]:
class KoreaPageScraper(object):
def __init__(self):
pass
def request_url(self,cat):
'''returns url for a category'''
d = datetime.date.today()
fromtd = d - timedelta(days=7)
start_date = str(fromtd.strftime("%Y/%m/%d"))
end_date =str(d.strftime("%Y/%m/%d"))
fromBidDt = requests.utils.quote(start_date, safe='')
toBidDt = requests.utils.quote(end_date, safe='')
bidNm = requests.utils.quote(cat.encode('euc-kr'))
url = "http://www.g2b.go.kr:8101/ep/tbid/tbidList.do?taskClCds=&bidNm=" + bidNm + "&searchDtType=1&fromBidDt=" + fromBidDt + "&toBidDt=" + toBidDt + "&fromOpenBidDt=&toOpenBidDt=&radOrgan=1&instNm=&exceptEnd=Y&area=®Yn=Y&bidSearchType=1&searchType=1&recordCountPerPage=1000"
return url
def scrape_cat(self,cat):
'''searches for each category'''
cat_url = self.request_url(cat)
df = pd.read_html(cat_url)[0]
df['search_term']=cat
return df
def get_bidurl(self,bidnum):
'''gets the bid url based on the bid registration number
(ones that do not have a proper bid registration number usually doesn't have a corresponding link and would ask the user to go to the organization website for more informatioin)'''
num_split = str(bidnum).split(sep='-')
bidno = num_split[0]
if len(bidno) == 11:
bidseq = num_split[-1]
bidurl = "http://www.g2b.go.kr:8081/ep/invitation/publish/bidInfoDtl.do?bidno="+bidno+"&bidseq="+bidseq
return bidurl
else:
return "Check organization website (공고기관) for details"
bidseq = refnum_split[-1]
bidurl = "http://www.g2b.go.kr:8081/ep/invitation/publish/bidInfoDtl.do?bidno="+bidno+"&bidseq="+bidseq
return bidurl
def scrape_categories(self, categories):
'''scrapes each keyword and compiles it into a list.
There is a 1 second delay between each search term to prevent getting blocked out of the site'''
appended_df = []
for category in tqdm(categories):
one_df = self.scrape_cat(category)
appended_df.append(one_df)
time.sleep(1)
appended_df = pd.concat(appended_df, axis = 0)
urlist=[]
for index,row in appended_df.iterrows():
urlist.append(self.get_bidurl(row['공고번호-차수']))
appended_df['url']=urlist
return appended_df
# In[7]:
#function to read txt files and parse the list
def txt_reader(name):
with open(name+".txt",'rb') as f:
line = f.readline()
return line.decode('utf-8').split('/')
# In[8]:
#load the categories with the txt_reader function
category_list = txt_reader('category')
print("Getting the list of given keywords: " +str(category_list).replace('[','').replace(']','').replace("'",""))
#scrape with the "KoreaPageScraper" class
myscraper = KoreaPageScraper()
df = myscraper.scrape_categories(category_list)
# In[42]:
print(str(len(df))+" results have been found. ")
# In[11]:
#Load the excluding keywords
with open('exclude.txt','rb') as f:
line = f.readline()
contains_excluding = line.decode('utf-8').replace('/','|')
# In[40]:
print("Excluding the list of given keywords: "+str(txt_reader('exclude')).replace('[','').replace(']','').replace("'",""))
# In[43]:
#Deleting the excluding keywords and informing how many lines were deleted.
og = len(df)
df = df[-df.공고명.str.contains(contains_excluding).fillna(True)]
print("Deleted "+str(og-len(df))+" entries with keywords to exclude. (Currently at "+str(len(df))+" entries)")
# In[53]:
def clean_up(df):
#Delete duplicates (more than two keywords together)
og2 = len(df)
df = df[~df.duplicated(['공고명'])].copy()
print(str(og2-len(df))+" duplicates were found and deleted (Currently at "+str(len(df))+" entries)")
#Divide the register date and due date
df['register_date'],df['duedate'] = df['입력일시(입찰마감일시)'].str.split('(', 1).str
df['duedate']=df['duedate'].str.replace(')','').replace('-','')
df = df.drop('입력일시(입찰마감일시)',axis=1)
#Sort the values by duedate. To sort with a different value, change the following line's 'duedate' with the column name you desire to sort it by.
column_sort = 'duedate'
df = df.sort_values(by=column_sort,ascending=False)
print("Values are sorted by the column '"+column_sort+"'. To change this, please talk to the tool owner. ")
return df
# In[45]:
def filter_prioritize(df,filter_list,column):
new_df = df[df[column].isin(filter_list)].copy()
new_df[str(column+"_sorted")] = pd.Categorical(new_df[column],categories=filter_list,ordered=True)
new_df = new_df.sort_values(column+"_sorted")
return new_df
# In[54]:
#Cleaning up the df to make more sense
clean_df = clean_up(df)
# In[55]:
#Get the target organization list
org_list = txt_reader('orgs')
print("Getting the entries from target organization list: "+str(org_list).replace('[','').replace(']','').replace("'",""))
org_df = filter_prioritize(clean_df,org_list,'공고기관')
# In[56]:
class create_excel(object):
def get_length(self,column):
##
##This line is the problem!!
##
valueex = column[~column.isnull()].reset_index(drop=True)[0]
if type(valueex) == str:
if valueex.startswith('=HYPERLINK'):
return len('Click link')
else:
len_list = list(column.dropna().apply(lambda x: len(str(x))))
maxlen = max(len_list)
medlen = np.median(len_list)
meanlen = np.mean(len_list)
diff = maxlen-medlen
stdlen = np.std(len_list)
#min(A,B+C*numchars)
if maxlen < 10:
return maxlen+5
elif diff > 50:
if medlen == 0:
return min(55,meanlen+5)
return medlen
elif maxlen < 50:
return meanlen+15
else:
return 50
else:
return 5
def to_excel(self,df,name):
#Next step, format the excel file
print("saving the "+name+" list...")
docname = "나라장터_입찰공고-"+name+"-"+str(strftime("%y%m%d(%H%M%S)", localtime()))+".xlsx"
#make the destination directory, but guard against race condition
if not os.path.exists(name):
try:
os.makedirs(name)
except OSError as exc:
print(exc)
raise Exception('something failed')
writer = pd.ExcelWriter("%s/%s"%(name,docname), engine='xlsxwriter')
df.to_excel(writer,index=False,sheet_name='Sheet1')
workbook = writer.book
worksheet = writer.sheets['Sheet1']
tablerange = xl_range(0,0,len(df),len(df.columns)-1)
headerrange = xl_range(0,0,0,len(df.columns)-1)
contentrange = xl_range(1,0,len(df),len(df.columns)-1)
#Formatting headers
header_format = workbook.add_format({'bg_color':'black'})
column_format = workbook.add_format({'bottom':True,'bg_color':'white'})
link_format = workbook.add_format({'font_color':'#157993','underline':True})
# Set the column width and format.
columns = []
widths = []
for i in range(0,len(df.columns)):
a = xl_col_to_name(i)+":"+xl_col_to_name(i)
columns.append(a)
widths.append(self.get_length(df[df.columns[i]]))
for c,w in zip(columns,widths):
worksheet.set_column(c, w)
worksheet.conditional_format(contentrange,{'type':'no_errors',
'format':column_format})
worksheet.conditional_format(headerrange,{'type':'no_errors',
'format':header_format})
worksheet.conditional_format(tablerange,{'type':'text',
'criteria':'containing',
'value':'Click link',
'format':link_format})
#Formatting for putting in the header titles
table_headers = [{'header':c} for c in df.columns]
#Create a table with the data
worksheet.add_table(tablerange,{'columns' : table_headers})
writer.save()
return
# In[57]:
go_to_excel = create_excel()
# In[58]:
go_to_excel.to_excel(clean_df,'full')
# In[59]:
go_to_excel.to_excel(org_df,'orgs')
# In[60]:
print ('All done! Please hit Enter to exit this command prompt. ')
input()
# In[ ]:
|
mit
| 7,930,596,444,500,421,000
| 32.516245
| 285
| 0.588216
| false
| 3.084385
| false
| false
| false
|
lahosken/pants
|
contrib/go/src/python/pants/contrib/go/tasks/go_checkstyle.py
|
1
|
1900
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
from pants.base.exceptions import TaskError
from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask
class GoFmt(GoWorkspaceTask):
"""Checks Go code matches gofmt style."""
@classmethod
def register_options(cls, register):
super(GoFmt, cls).register_options(register)
register('--skip', type=bool, fingerprint=True, help='Skip checkstyle.')
_GO_SOURCE_EXTENSION = '.go'
def _is_checked(self, target):
return target.has_sources(self._GO_SOURCE_EXTENSION) and not target.is_synthetic
def execute(self):
if self.get_options().skip:
return
targets = self.context.targets(self._is_checked)
with self.invalidated(targets) as invalidation_check:
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
sources = self.calculate_sources(invalid_targets)
if sources:
args = [os.path.join(self.go_dist.goroot, 'bin', 'gofmt'), '-l'] + list(sources)
try:
output = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
raise TaskError('{} failed with exit code {}'.format(' '.join(args), e.returncode),
exit_code=e.returncode)
if output:
raise TaskError('gofmt command {} failed with output {}'.format(' '.join(args), output))
def calculate_sources(self, targets):
sources = set()
for target in targets:
sources.update(source for source in target.sources_relative_to_buildroot()
if source.endswith(self._GO_SOURCE_EXTENSION))
return sources
|
apache-2.0
| 7,952,592,629,862,693,000
| 36.254902
| 98
| 0.678947
| false
| 3.941909
| false
| false
| false
|
lucidfrontier45/RethinkPool
|
tests/test_pool.py
|
1
|
1063
|
import rethinkdb as r
from future.moves.queue import Empty
from nose.tools import assert_raises
from rethinkpool import RethinkPool
def test_pool_create():
max_conns = 50
initial_conns = 10
rp = RethinkPool(max_conns=max_conns, initial_conns=initial_conns)
assert rp.current_conns == initial_conns
def test_create_connection():
initial_conns = 0
rp = RethinkPool(max_conns=10, initial_conns=initial_conns)
res = rp.get_resource()
assert rp.current_conns == (initial_conns + 1)
assert rp._queue.empty()
res.release()
assert not rp._queue.empty()
rp.get_resource()
assert not rp._queue.empty()
def test_pool_full():
n_conns = 10
rp = RethinkPool(max_conns=n_conns, initial_conns=n_conns, get_timeout=0.5)
assert rp._queue.full()
bussy_resources = [rp.get_resource() for _ in range(n_conns)]
assert rp._queue.empty()
with assert_raises(Empty):
res = rp.get_resource()
bussy_resources[0].release()
rp.get_resource()
[res.release() for res in bussy_resources]
|
apache-2.0
| 2,208,437,086,947,595,500
| 24.309524
| 79
| 0.672625
| false
| 3.301242
| false
| false
| false
|
dmanatunga/uAMP-sim
|
trace_reader.py
|
1
|
4325
|
import dateutil
import events
from sim_interface import TraceReader
import json
import pickle
import gzip
class JsonTraceReader(TraceReader):
def __init__(self, filename):
self.trace_filename = filename
self.trace_logs = None
self.trace_pos = 0
self.start_time = None
self.end_time = None
def build(self):
if self.trace_filename.endswith('.json'):
with open(self.trace_filename, 'r') as fp:
trace_data = json.load(fp, object_hook=events.json_decode_event)
elif self.trace_filename.endswith('.json.gz'):
with gzip.open(self.trace_filename, 'rt') as fp:
trace_data = json.load(fp, object_hook=events.json_decode_event)
else:
raise Exception('Invalid JSON file type. Expected .json or .json.gz')
# Identify start and end time of trace
self.start_time = dateutil.parser.parse(trace_data['start_time'])
self.end_time = dateutil.parser.parse(trace_data['end_time'])
# Get the list of logs in the trace
self.trace_logs = trace_data['logs']
def finish(self):
pass
def get_event(self):
if self.end_of_trace():
return None
event = self.trace_logs[self.trace_pos]
self.trace_pos += 1
return event
def peek_event(self):
if self.end_of_trace():
return None
event = self.trace_logs[self.trace_pos]
return event
def end_of_trace(self):
return self.trace_pos >= len(self.trace_logs)
def get_events(self, count):
events_list = []
for i in range(count):
event = self.get_event()
if event:
events_list.append(event)
else:
break
return events_list
def get_start_time(self):
return self.start_time
def get_end_time(self):
return self.end_time
class PickleTraceReader(TraceReader):
def __init__(self, filename):
self.trace_filename = filename
self.trace_logs = None
self.trace_pos = 0
self.start_time = None
self.end_time = None
def build(self):
if self.trace_filename.endswith('.pkl'):
with open(self.trace_filename, 'r') as fp:
trace_data = pickle.load(fp)
elif self.trace_filename.endswith('.pkl.gz'):
with gzip.open(self.trace_filename, 'rb') as fp:
trace_data = pickle.load(fp)
else:
raise Exception('Invalid JSON file type. Expected .json or .json.gz')
# Identify start and end time of trace
self.start_time = trace_data['start_time']
self.end_time = trace_data['end_time']
# Get the list of logs in the trace
self.trace_logs = trace_data['logs']
def finish(self):
pass
def get_event(self):
if self.end_of_trace():
return None
event = self.trace_logs[self.trace_pos]
self.trace_pos += 1
return event
def peek_event(self):
if self.end_of_trace():
return None
event = self.trace_logs[self.trace_pos]
return event
def end_of_trace(self):
return self.trace_pos >= len(self.trace_logs)
def get_events(self, count):
events_list = []
for i in range(count):
event = self.get_event()
if event:
events_list.append(event)
else:
break
return events_list
def get_start_time(self):
return self.start_time
def get_end_time(self):
return self.end_time
def get_trace_reader(filename, trace_type=None):
if trace_type:
if trace_type == 'json':
return JsonTraceReader(filename=filename)
elif trace_type == 'pickle':
return PickleTraceReader(filename=filename)
else:
raise Exception("Invalid Trace File Type")
else:
if filename.endswith('.json') or filename.endswith('.json.gz'):
return JsonTraceReader(filename=filename)
elif filename.endswith('.pkl') or filename.endswith('.pkl.gz'):
return PickleTraceReader(filename=filename)
else:
raise Exception("Invalid Trace File Type")
|
mit
| -3,759,522,340,739,681,000
| 27.833333
| 81
| 0.578497
| false
| 3.982505
| false
| false
| false
|
bt3gl/Numerical-Methods-for-Physics
|
homework3_linear_algebra_FFT/condition_number/gaussElimination.py
|
1
|
2113
|
"""
This module calculates a linear system by Gaussian elimination with pivoting.
Almost a copy of on Mike Zingale's code, spring 2013.
"""
import numpy as npy
def gaussElim(A, b):
""" perform gaussian elimination with pivoting, solving A x = b A
is an NxN matrix, x and b are an N-element vectors. Note: A
and b are changed upon exit to be in upper triangular (row
echelon) form """
# b is a vector
if not b.ndim == 1:
print "ERROR: b should be a vector"
return None
N = len(b)
# A is square, with each dimension of length N
if not (A.shape[0] == N and A.shape[1] == N):
print "ERROR: A should be square with each dim of same length as b"
return None
# allocation the solution array
x = npy.zeros((N), dtype=A.dtype)
# find the scale factors for each row -- this is used when pivoting
scales = npy.max(npy.abs(A), 1)
# keep track of the number of times we swapped rows
numRowSwap = 0
# main loop over rows
for k in range(N):
# find the pivot row based on the size of column k -- only consider
# the rows beyond the current row
rowMax = npy.argmax(A[k:, k]/scales[k:])
if (k > 0): rowMax += k # we sliced A from k:, correct for total rows
# swap the row with the largest scaled element in the current column
# with the current row (pivot) -- do this with b too!
if not rowMax == k:
A[[k, rowMax],:] = A[[rowMax, k],:]
b[[k, rowMax]] = b[[rowMax, k]]
numRowSwap += 1
# do the forward-elimination for all rows below the current
for i in range(k+1, N):
coeff = A[i,k]/A[k,k]
for j in range(k+1, N):
A[i,j] += -A[k,j]*coeff
A[i,k] = 0.0
b[i] += -coeff*b[k]
# last solution is easy
x[N-1] = b[N-1]/A[N-1,N-1]
for i in reversed(range(N-1)):
isum = b[i]
for j in range(i+1,N):
isum += -A[i,j]*x[j]
x[i] = isum/A[i,i]
return x
|
apache-2.0
| -48,295,339,072,040,350
| 26.802632
| 81
| 0.550402
| false
| 3.338073
| false
| false
| false
|
seem-sky/newspaper
|
newspaper/nlp.py
|
1
|
5105
|
# -*- coding: utf-8 -*-
"""
Anything natural language related should be abstracted into this file.
"""
__title__ = 'newspaper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
import re
import math
import operator
from collections import Counter
from . import settings
with open(settings.NLP_STOPWORDS_EN, 'r') as f:
stopwords = set([w.strip() for w in f.readlines()])
ideal = 20.0
def summarize(url='', title='', text=''):
if (text == '' or title == ''):
return []
if isinstance(title, unicode):
title = title.encode('utf-8', 'ignore')
if isinstance(text, unicode):
text = text.encode('utf-8', 'ignore')
summaries = []
sentences = split_sentences(text)
keys = keywords(text)
titleWords = split_words(title)
# Score setences, and use the top 5 sentences
ranks = score(sentences, titleWords, keys).most_common(5)
for rank in ranks:
summaries.append(rank[0])
return summaries
def score(sentences, titleWords, keywords):
"""Score sentences based on different features
"""
senSize = len(sentences)
ranks = Counter()
for i, s in enumerate(sentences):
sentence = split_words(s)
titleFeature = title_score(titleWords, sentence)
sentenceLength = length_score(len(sentence))
sentencePosition = sentence_position(i+1, senSize)
sbsFeature = sbs(sentence, keywords)
dbsFeature = dbs(sentence, keywords)
frequency = (sbsFeature + dbsFeature) / 2.0 * 10.0
# Weighted average of scores from four categories
totalScore = (titleFeature*1.5 + frequency*2.0 +
sentenceLength*1.0 + sentencePosition*1.0)/4.0
ranks[s] = totalScore
return ranks
def sbs(words, keywords):
score = 0.0
if (len(words) == 0):
return 0
for word in words:
if word in keywords:
score += keywords[word]
return (1.0 / math.fabs(len(words)) * score)/10.0
def dbs(words, keywords):
if (len(words) == 0):
return 0
summ = 0
first = []
second = []
for i, word in enumerate(words):
if word in keywords:
score = keywords[word]
if first == []:
first = [i, score]
else:
second = first
first = [i, score]
dif = first[0] - second[0]
summ += (first[1]*second[1]) / (dif ** 2)
# Number of intersections
k = len(set(keywords.keys()).intersection(set(words)))+1
return (1/(k*(k+1.0))*summ)
def split_words(text):
"""Split a string into array of words
"""
try:
text = re.sub(r'[^\w ]', '', text) # strip special chars
return [x.strip('.').lower() for x in text.split()]
except TypeError:
return None
def keywords(text):
"""Get the top 10 keywords and their frequency scores ignores blacklisted
words in stopwords, counts the number of occurrences of each word, and
sorts them in reverse natural order (so descending) by number of
occurrences.
"""
text = split_words(text)
# of words before removing blacklist words
num_words = len(text)
text = [x for x in text if x not in stopwords]
freq = Counter()
for word in text:
freq[word] += 1
min_size = min(10, len(freq))
keywords = tuple(freq.most_common(min_size))
keywords = dict((x, y) for x, y in keywords)
for k in keywords:
articleScore = keywords[k]*1.0 / max(num_words, 1)
keywords[k] = articleScore * 1.5 + 1
keywords = sorted(keywords.iteritems(), key=operator.itemgetter(1))
keywords.reverse()
return dict(keywords)
def split_sentences(text):
"""Split a large string into sentences
"""
import nltk.data
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = tokenizer.tokenize(text)
sentences = [x.replace('\n', '') for x in sentences if len(x) > 10]
return sentences
def length_score(sentence_len):
return 1 - math.fabs(ideal - sentence_len) / ideal
def title_score(title, sentence):
title = [x for x in title if x not in stopwords]
count = 0.0
for word in sentence:
if (word not in stopwords and word in title):
count += 1.0
return count / max(len(title), 1)
def sentence_position(i, size):
"""Different sentence positions indicate different
probability of being an important sentence.
"""
normalized = i * 1.0 / size
if (normalized > 1.0):
return 0
elif (normalized > 0.9):
return 0.15
elif (normalized > 0.8):
return 0.04
elif (normalized > 0.7):
return 0.04
elif (normalized > 0.6):
return 0.06
elif (normalized > 0.5):
return 0.04
elif (normalized > 0.4):
return 0.05
elif (normalized > 0.3):
return 0.08
elif (normalized > 0.2):
return 0.14
elif (normalized > 0.1):
return 0.23
elif (normalized > 0):
return 0.17
else:
return 0
|
mit
| 8,296,216,312,425,050,000
| 26.446237
| 77
| 0.59667
| false
| 3.677954
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/idlelib/MultiCall.py
|
1
|
14654
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: MultiCall.py
"""
MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
example), but enables multiple calls of functions per virtual event - all
matching events will be called, not only the most specific one. This is done
by wrapping the event functions - event_add, event_delete and event_info.
MultiCall recognizes only a subset of legal event sequences. Sequences which
are not recognized are treated by the original Tk handling mechanism. A
more-specific event will be called before a less-specific event.
The recognized sequences are complete one-event sequences (no emacs-style
Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
Key/Button Press/Release events can have modifiers.
The recognized modifiers are Shift, Control, Option and Command for Mac, and
Control, Alt, Shift, Meta/M for other platforms.
For all events which were handled by MultiCall, a new member is added to the
event instance passed to the binded functions - mc_type. This is one of the
event type constants defined in this module (such as MC_KEYPRESS).
For Key/Button events (which are handled by MultiCall and may receive
modifiers), another member is added - mc_state. This member gives the state
of the recognized modifiers, as a combination of the modifier constants
also defined in this module (for example, MC_SHIFT).
Using these members is absolutely portable.
The order by which events are called is defined by these rules:
1. A more-specific event will be called before a less-specific event.
2. A recently-binded event will be called before a previously-binded event,
unless this conflicts with the first rule.
Each function will be called at most once for each event.
"""
import sys
import string
import re
import Tkinter
from idlelib import macosxSupport
MC_KEYPRESS = 0
MC_KEYRELEASE = 1
MC_BUTTONPRESS = 2
MC_BUTTONRELEASE = 3
MC_ACTIVATE = 4
MC_CIRCULATE = 5
MC_COLORMAP = 6
MC_CONFIGURE = 7
MC_DEACTIVATE = 8
MC_DESTROY = 9
MC_ENTER = 10
MC_EXPOSE = 11
MC_FOCUSIN = 12
MC_FOCUSOUT = 13
MC_GRAVITY = 14
MC_LEAVE = 15
MC_MAP = 16
MC_MOTION = 17
MC_MOUSEWHEEL = 18
MC_PROPERTY = 19
MC_REPARENT = 20
MC_UNMAP = 21
MC_VISIBILITY = 22
MC_SHIFT = 1
MC_CONTROL = 4
MC_ALT = 8
MC_META = 32
MC_OPTION = 64
MC_COMMAND = 128
if macosxSupport.runningAsOSXApp():
_modifiers = (
('Shift', ), ('Control', ), ('Option', ), ('Command', ))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (
('Control', ), ('Alt', ), ('Shift', ), ('Meta', 'M'))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
_modifier_names = dict([ (name, number) for number in range(len(_modifiers)) for name in _modifiers[number]
])
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<' + _types[type][0] + '>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
return
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l=self.bindedfuncs, mc_type=self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l) - 1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst, self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
return
def __del__(self):
if self.handlerid:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
_states = range(1 << len(_modifiers))
_state_names = [ ''.join((m[0] + '-' for i, m in enumerate(_modifiers) if 1 << i & s))
for s in _states
]
def expand_substates(states):
"""For each item of states return a list containing all combinations of
that item with individual bits reset, sorted by the number of set bits.
"""
def nbits(n):
"""number of bits set in n base 2"""
nb = 0
while n:
n, rem = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set((state & x for x in states)))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
_state_subsets = expand_substates(_states)
_state_codes = []
for s in _states:
r = 0
for i in range(len(_modifiers)):
if 1 << i & s:
r |= _modifier_masks[i]
_state_codes.append(r)
class _ComplexBinder:
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists=lists, mc_type=mc_type, mc_state=mc_state, ishandlerrunning=self.ishandlerrunning, doafterhandler=self.doafterhandler):
ishandlerrunning[:] = [
True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l) - 1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
while doafterhandler:
doafterhandler.pop()()
if r:
return r
else:
return
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [ [] for s in _states ]}
self.handlerids = []
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [ self.bindedfuncs[None][i] for i in _state_subsets[s] ]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<' + _state_names[s] + self.typename + '>'
self.handlerids.append((seq,
self.widget.bind(self.widgetinst, seq, handler)))
return
def bind(self, triplet, func):
if triplet[2] not in self.bindedfuncs:
self.bindedfuncs[triplet[2]] = [ [] for s in _states ]
for s in _states:
lists = [ self.bindedfuncs[detail][i] for detail in (
triplet[2], None) for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type, _state_codes[s])
seq = '<%s%s-%s>' % (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq,
self.widget.bind(self.widgetinst, seq, handler)))
doit = lambda : self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
return
def unbind(self, triplet, func):
doit = lambda : self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
self.widget.unbind(self.widgetinst, seq, id)
_types = (
('KeyPress', 'Key'), ('KeyRelease', ), ('ButtonPress', 'Button'),
('ButtonRelease', ), ('Activate', ), ('Circulate', ), ('Colormap', ),
('Configure', ), ('Deactivate', ), ('Destroy', ), ('Enter', ), ('Expose', ),
('FocusIn', ), ('FocusOut', ), ('Gravity', ), ('Leave', ), ('Map', ),
('Motion', ), ('MouseWheel', ), ('Property', ), ('Reparent', ), ('Unmap', ),
('Visibility', ))
_binder_classes = (
_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types) - 4)
_type_names = dict([ (name, number) for number in range(len(_types)) for name in _types[number]
])
_keysym_re = re.compile('^\\w+$')
_button_re = re.compile('^[1-5]$')
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return
else:
words = string.split(sequence[1:-1], '-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return
detail = None
else:
if type in [ _type_names[s] for s in ('KeyPress', 'KeyRelease') ]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return
return (
modifiers, type, detail)
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<' + _state_names[triplet[0]] + _types[triplet[1]][0] + '-' + triplet[2] + '>'
else:
return '<' + _state_names[triplet[0]] + _types[triplet[1]][0] + '>'
_multicall_dict = {}
def MultiCallCreator(widget):
"""Return a MultiCall class which inherits its methods from the
given widget class (for example, Tkinter.Text). This is used
instead of a templating mechanism.
"""
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall(widget):
def __init__(self, *args, **kwargs):
widget.__init__(self, *args, **kwargs)
self.__eventinfo = {}
self.__binders = [ _binder_classes[i](i, widget, self) for i in range(len(_types))
]
def bind(self, sequence=None, func=None, add=None):
if type(sequence) is str and len(sequence) > 2 and sequence[:2] == '<<' and sequence[-2:] == '>>':
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [
func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and sequence[:2] == '<<' and sequence[-2:] == '>>' and sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [
None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
return
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
else:
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
return
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence, self.__eventinfo[virtual][1])) + widget.event_info(self, virtual)
return
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
_multicall_dict[widget] = MultiCall
return MultiCall
if __name__ == '__main__':
root = Tkinter.Tk()
text = MultiCallCreator(Tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print seq
text.bind('<<handler%d>>' % n[0], handler)
text.event_add('<<handler%d>>' % n[0], seq)
n[0] += 1
bindseq('<Key>')
bindseq('<Control-Key>')
bindseq('<Alt-Key-a>')
bindseq('<Control-Key-a>')
bindseq('<Alt-Control-Key-a>')
bindseq('<Key-b>')
bindseq('<Control-Button-1>')
bindseq('<Alt-Button-1>')
bindseq('<FocusOut>')
bindseq('<Enter>')
bindseq('<Leave>')
root.mainloop()
|
unlicense
| 1,765,385,453,857,526,300
| 33.64539
| 152
| 0.556435
| false
| 3.825111
| false
| false
| false
|
ilveroluca/seal
|
seal/dist_bcl2qseq.py
|
1
|
9909
|
# Copyright (C) 2011-2012 CRS4.
#
# This file is part of Seal.
#
# Seal is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Seal is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with Seal. If not, see <http://www.gnu.org/licenses/>.
"""
pydoop script to drive Illumina's bclToQseq program and
convert BCL files to Qseq.
Works in tandem with automator.bcl2qseq_mr. This program *needs direct
access to sequencer's run directory*. It will generate a file listing all the
tiles to be converted, with relative file paths. In turn, this file will be
processed by the distributed component that runs on Hadoop.
"""
import argparse
import logging
import os
import subprocess
import sys
import tempfile
import urlparse
import seal.lib.illumina_run_dir as ill
import seal.bcl2qseq_mr as bcl2qseq_mr
import pydoop.hdfs as hdfs
def serialize_cmd_data(cmd_dict):
def serialize_item(k,v):
# replace None values with empty strings
k = k or ''
v = v or ''
# if there are "illegal" characters raise an exception
if ':' in k or ';' in k or ';' in v or ';' in v:
raise RuntimeError("datum '%s' with : or ;. Can't serialize!" % (k + ' ' + v))
return "%s:%s;" % (k,v)
return ''.join(serialize_item(k,v) for k,v in cmd_dict.iteritems())
class DistBcl2QseqDriver(object):
def __init__(self, options):
self.log = logging.getLogger('DistBcl2Qseq')
self.log.setLevel(logging.DEBUG)
executable = options.bclToQseq_path or self.find_exec('bclToQseq')
if not executable:
self.log.warning("Can't find bclToQseq in PATH. Will try to run anyways...")
executable = 'bclToQseq'
self.options = {
'bclToQseq': executable,
'append_ld_library_path': options.append_ld_library_path or '',
'ignore_missing_bcl': options.ignore_missing_bcl,
'ignore_missing_control': options.ignore_missing_control,
'exclude_controls': options.exclude_controls,
'no_eamss': options.no_eamss
}
u = urlparse.urlparse(options.run_dir)
if u.scheme and u.scheme != 'file':
raise RuntimeError("Sorry! Current implementation requires that " +
"the run directory be on a mounted file system (scheme %s not supported)" % u.scheme)
self.run_dir = ill.RunDir(u.path)
# collect necessary info
self.run_params = self.run_dir.get_run_parameters()
if hdfs.path.exists(options.output_dir):
raise RuntimeError("output path %s already exists." % options.output_dir)
self.output_path = options.output_dir
def __write_mr_input(self, fd):
"""
Write parameters for all the file conversions to be done in a format
suitable for our map-reduce helper script.
Returns the number of records written.
"""
# commands are written one per line, in a form suitable for execution via sh. If module loading
# is required, it is inserted at the start of the command line, followed by && and finally the bclToQseq call.
conversion_params = {
'bclToQseq': self.options['bclToQseq'],
'ld_library_path': self.options['append_ld_library_path'],
'--exclude-controls': '',
'--repeat': '1',
'--instrument': self.run_params.setup['ComputerName'],
'--run-id': self.run_params.setup['ScanNumber'],
'--input-directory': self.run_dir.get_base_calls_dir(),
}
# For the following arguments, we don't want them to be in the conversion_params
# dictionary unless they're set
if self.options['ignore_missing_bcl']:
conversion_params['--ignore-missing-bcl'] = None
if self.options['ignore_missing_control']:
conversion_params['--ignore-missing-control'] = None
if self.options['exclude_controls']:
conversion_params['--exclude-controls'] = None
if self.options['no_eamss']:
conversion_params['--no-eamss'] = None
count = 0
for lane in self.run_params.get_lanes():
conversion_params['--lane'] = str(lane)
for read in self.run_params.get_reads():
conversion_params['--read'] = str(read.num)
conversion_params['--first-cycle'] = str(read.first_cycle)
conversion_params['--number-of-cycles'] = str(read.last_cycle - read.first_cycle + 1)
for tile in self.run_params.iget_simple_tile_codes():
conversion_params['--tile'] = str(tile)
# set filter, control, posotions files
conversion_params['--filter-file'] = self.run_dir.make_filter_path(lane, tile)
conversion_params['--control-file'] = self.run_dir.make_control_path(lane, tile)
conversion_params['--positions-file'] = self.run_dir.make_clocs_path(lane, tile)
# we put the standard qseq name here. The slave implementation may decide not to use it....
conversion_params['--qseq-file'] = os.path.join(self.output_path, self.run_dir.make_qseq_name(lane, tile, read.num))
fd.write(serialize_cmd_data(conversion_params))
fd.write("\n")
count += 1
return count
@staticmethod
def find_exec(exec_name):
"""
Find an executable in the current PATH.
Returns the full path to the executable, if found.
Returns None if not found.
"""
for p in os.environ.get('PATH', '').split(os.pathsep):
exec_path = os.path.join(p, exec_name)
if os.access(exec_path, os.X_OK):
return exec_path
return None
def run(self):
pydoop_exec = self.find_exec('pydoop')
if pydoop_exec is None:
raise RuntimeError("Can't find pydoop executable in PATH")
with tempfile.NamedTemporaryFile() as f:
num_records = self.__write_mr_input(f)
f.flush()
self.log.debug("Wrote temp input file %s", f.name)
input_filename = tempfile.mktemp(dir=os.path.dirname(self.output_path), prefix="dist_bcl2qseq_input")
tmpfile_uri = "file://%s" % f.name
try:
self.log.debug("copying input from %s to %s", tmpfile_uri, input_filename)
hdfs.cp(tmpfile_uri, input_filename)
self.log.info("Run analyzed. Launching distributed job")
# launch mr task
cmd = [ 'pydoop', 'script', '--num-reducers', '0', '--kv-separator', '',
'-Dmapred.map.tasks=%d' % num_records,
'-Dmapred.input.format.class=org.apache.hadoop.mapred.lib.NLineInputFormat',
'-Dmapred.line.input.format.linespermap=1',
bcl2qseq_mr.__file__,
input_filename,
self.output_path]
self.log.debug(str(cmd))
subprocess.check_call(cmd)
self.log.info("Distributed job complete")
except subprocess.CalledProcessError as e:
self.log.exception(e)
self.log.error("Error running pydoop script component")
raise
finally:
try:
hdfs.rmr(input_filename)
except IOError as e:
self.log.debug("Problem cleaning up. Error deleting temporary input file %s", input_filename)
self.log.debug(str(e))
def main(args=None):
from seal import logformat
parser = argparse.ArgumentParser(description="Distributed bcl2qseq.")
parser.add_argument("-l", "--logfile", metavar="FILE", help="Write log output to a file")
parser.add_argument("--bclToQseq-path", metavar="PATH",
help="Full path to the bclToQseq binary. Needed only if it's not in the PATH")
parser.add_argument("--append-ld-library-path", metavar="PATHLIST",
help="If you need to append to the value of LD_LIBRARY_PATH to run the Illumina executable, use this argument")
parser.add_argument("--ignore-missing-bcl", action='store_true',
help="interprets missing *.bcl files as a base calling of '.'")
parser.add_argument("--ignore-missing-control", action='store_true',
help="don't throw an error when *.control files are missing")
parser.add_argument("--exclude-controls", action='store_true',
help="do not include clusters that are used as controls")
parser.add_argument("--no-eamss", action='store_true',
help="do not apply the EAMSS masking on the quality values")
parser.add_argument('run_dir', help="Illumina run directory to process")
parser.add_argument('output_dir', help="Path where the output qseq files should be created")
options = parser.parse_args(args)
if options.logfile:
logging.basicConfig(format=logformat, filename=options.logfile)
else:
logging.basicConfig(format=logformat)
try:
driver = DistBcl2QseqDriver(options)
except StandardError as e:
logging.critical("Error initializing")
if e.message:
logging.exception(e)
return 1
try:
driver.run()
return 0
except RuntimeError as e:
return 2
|
gpl-3.0
| -2,729,236,264,081,831,000
| 43.635135
| 136
| 0.610455
| false
| 3.933704
| false
| false
| false
|
vessemer/concept-to-clinic
|
interface/config/settings/base.py
|
1
|
3697
|
"""
Adapted from pydanny/django-cookiecutter
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import environ
BASE_DIR = environ.Path(__file__) - 3
APPS_DIR = BASE_DIR.path('backend')
# Datasource from where the images will be loaded initially
DATASOURCE_DIR = '/images'
IMAGE_EXTENSIONS = [
'.dcm',
]
env = environ.Env()
env.read_env(str(BASE_DIR.path('.env')))
DEBUG = env.bool('DEBUG', default=False)
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('SECRET_KEY')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# Django
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party
'rest_framework',
# Project specific
'backend.images',
'backend.cases',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(BASE_DIR.path('frontend/templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# CSRF Token
CSRF_COOKIE_NAME = 'csrftoken'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = str(BASE_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(BASE_DIR.path('frontend/static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Project specific
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny'
]
}
try:
with open('/HEAD') as f:
APP_VERSION_NUMBER = f.readlines()[-1].split(' ')[1][:7]
except (IOError, IndexError):
APP_VERSION_NUMBER = '(unknown)'
|
mit
| 3,250,317,145,646,073,000
| 25.219858
| 98
| 0.675412
| false
| 3.585839
| false
| false
| false
|
nicoboss/Floatmotion
|
OpenGLLibrary/glLibOBJLoad.py
|
1
|
3861
|
import pygame, os
from OpenGL.GL import *
def MTL(filename):
contents = {}
mtl = None
for line in open(filename, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'newmtl':
mtl = contents[values[1]] = {}
elif mtl is None:
raise ValueError, "mtl file doesn't start with newmtl stmt"
elif values[0] == 'map_Kd':
# load the texture referred to by this declaration
mtl[values[0]] = values[1]
surf = pygame.image.load(mtl['map_Kd'])
image = pygame.image.tostring(surf, 'RGBA', 1)
ix, iy = surf.get_rect().size
texid = mtl['texture_Kd'] = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texid)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA,
GL_UNSIGNED_BYTE, image)
else:
mtl[values[0]] = map(float, values[1:])
return contents
class OBJ:
def __init__(self, filename, swapyz=False):
"""Loads a Wavefront OBJ file. """
self.vertices = []
self.normals = []
self.texcoords = []
self.faces = []
filename = filename.split("/")
self.mtl = MTL(os.path.join(*filename[:-1]+[filename[-1][:-4]+".mtl"]))
material = None
for line in open(os.path.join(*filename), "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
v = map(float, values[1:4])
if swapyz:
v = v[0], v[2], v[1]
self.vertices.append(v)
elif values[0] == 'vn':
v = map(float, values[1:4])
if swapyz:
v = v[0], v[2], v[1]
self.normals.append(v)
elif values[0] == 'vt':
self.texcoords.append(map(float, values[1:3]))
elif values[0] in ('usemtl', 'usemat'):
material = values[1]
elif values[0] == 'mtllib':
continue
elif values[0] == 'f':
face = []
texcoords = []
norms = []
for v in values[1:]:
w = v.split('/')
face.append(int(w[0]))
if len(w) >= 2 and len(w[1]) > 0:
texcoords.append(int(w[1]))
else:
texcoords.append(0)
if len(w) >= 3 and len(w[2]) > 0:
norms.append(int(w[2]))
else:
norms.append(0)
self.faces.append((face, norms, texcoords, material))
self.gl_list = glGenLists(1)
glNewList(self.gl_list, GL_COMPILE)
for face in self.faces:
vertices, normals, texture_coords, material = face
mtl = self.mtl[material]
if 'texture_Kd' in mtl:
# use diffuse texmap
glBindTexture(GL_TEXTURE_2D, mtl['texture_Kd'])
else:
# just use diffuse colour
glColor(*mtl['Kd'])
glBegin(GL_POLYGON)
for i in range(0, len(vertices)):
if normals[i] > 0:
glNormal3fv(self.normals[normals[i] - 1])
if texture_coords[i] > 0:
glTexCoord2fv(self.texcoords[texture_coords[i] - 1])
glVertex3fv(self.vertices[vertices[i] - 1])
glEnd()
glColor3f(1,1,1)
glEndList()
|
agpl-3.0
| -8,005,413,216,815,216,000
| 36.125
| 79
| 0.465682
| false
| 3.726834
| false
| false
| false
|
akshaybabloo/Python-QT-5-Tutorial
|
2_QTDesigner/2_8_SimpleFormDesigner/SimpleFormDesigner.py
|
1
|
1647
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SimpleFormDesigner.ui'
#
# Created by: PyQt5 UI code generator 5.4.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(640, 249)
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setGeometry(QtCore.QRect(150, 80, 401, 101))
self.widget.setObjectName("widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.widget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.edit = QtWidgets.QLineEdit(self.widget)
self.edit.setObjectName("edit")
self.horizontalLayout.addWidget(self.edit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.change = QtWidgets.QLabel(self.widget)
self.change.setObjectName("change")
self.verticalLayout.addWidget(self.change)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Full Name"))
self.change.setText(_translate("Dialog", "TextLabel"))
|
mit
| -401,647,907,074,165,570
| 39.170732
| 76
| 0.695811
| false
| 4.311518
| false
| false
| false
|
michael-christen/repo-monitor
|
repo_monitor/python/parsers.py
|
1
|
3438
|
import argparse
from .deserializers import CoverageDeserializer
from .deserializers import NosetestDeserializer
from .deserializers import RadonDeserializer
class CoverageParser(object):
def __init__(self):
self.base_parser = argparse.ArgumentParser(
description='Get Python Coverage',
)
self.base_parser.add_argument(
'--file',
default='coverage.xml',
help='Coverage File')
self.base_parser.add_argument(
'--num_decimals',
default=0,
help='Number of decimals to output')
def run(self, args):
parsed_args = self.base_parser.parse_args(args)
with open(parsed_args.file, 'r') as f:
line_rate = CoverageDeserializer(f.read()).line_rate
format_string = '{:.' + str(parsed_args.num_decimals) + 'f}%'
coverage_string = format_string.format(100 * line_rate)
print coverage_string
return coverage_string
class NosetestParser(object):
def __init__(self):
self.base_parser = argparse.ArgumentParser(
description='Get Python Test Output Metrics',
)
self.base_parser.add_argument(
'metric',
choices=['time', 'num_tests', 'test2time'],
help='Metric to gather')
self.base_parser.add_argument(
'--file',
default='nosetests.xml',
help='Test Output File')
def run(self, args):
parsed_args = self.base_parser.parse_args(args)
with open(parsed_args.file, 'r') as f:
data = f.read()
nosetest_data = NosetestDeserializer(data)
metric = getattr(nosetest_data, parsed_args.metric)
output_str = ''
if isinstance(metric, dict):
test_list = ['{} {}'.format(k, v) for k, v in metric.viewitems()]
output_str = '\n'.join(test_list)
else:
output_str = '{}'.format(metric)
print output_str
return output_str
class RadonParser(object):
def __init__(self):
self.base_parser = argparse.ArgumentParser(
description='Get Code Quality Metrics',
)
self.base_parser.add_argument(
'metric',
choices=['lloc', 'cc', 'mi'],
help='Metric to gather')
self.base_parser.add_argument(
'--package',
help='Package to inspect. (Needed for cc).')
self.base_parser.add_argument(
'--raw_json',
help='JSON file with raw Radon metrics')
self.base_parser.add_argument(
'--mi_json',
help='JSON file with maintanability index Radon metrics')
def _read_file_if_available(self, file_name):
if file_name is None:
return None
with open(file_name, 'r') as f:
return f.read()
def run(self, args):
parsed_args = self.base_parser.parse_args(args)
radon_data = RadonDeserializer(
package=parsed_args.package,
raw_json=self._read_file_if_available(parsed_args.raw_json),
mi_json=self._read_file_if_available(parsed_args.mi_json),
).metric_dict
if parsed_args.metric == 'lloc':
format_str = '{:d}'
else:
format_str = '{:0.2f}'
output_str = format_str.format(radon_data[parsed_args.metric])
print output_str
return output_str
|
mit
| 3,296,597,069,242,023,400
| 33.38
| 77
| 0.57039
| false
| 3.906818
| true
| false
| false
|
aleperno/labotpfinal
|
src/labo.py
|
1
|
1051
|
#!/usr/bin/python
import RPi.GPIO as GPIO
import time
import sys,os
import serial
import socket
dir = "./log.txt"
port = '/dev/ttyACM0'
ser = serial.Serial(port,9600)
def getTime():
return time.strftime("%Y;%m;%d;%H;%M;%S")
def is_valid_number(number):
"""In this case numbers higher than 100 will be considered
serial communication errors"""
if float(number) > 100:
#print "Error while validating %s ." % number
return False
else:
return True
def is_valid(data):
try:
float(data)
#Passes first test
aux = data.split('.')
if (len(aux[1]) is 2):
#Passes second test
return is_valid_number(data)
except:
#print "Error while validating %s ." % data
return False
while True:
try:
hora = getTime()
f = open(dir,"a")
p = open('/var/www/log.txt',"w+")
volt = ser.readline()
volt = volt.replace('\r\n','')
if is_valid(volt):
pass
else:
continue
print volt
s=hora+";"+volt+'\n';
f.write(s);
p.write(volt);
except KeyboardInterrupt:
print "Exited cleanly"
break
#except :
#break
|
gpl-2.0
| -4,666,988,098,183,961,000
| 17.12069
| 59
| 0.644148
| false
| 2.640704
| false
| false
| false
|
mnestis/advent2015
|
18/part1.py
|
1
|
1208
|
#!/usr/bin/python
import numpy as np
import re
import itertools
def animated_lights(input_string):
other_chars = re.compile("[^#\.]")
lights = []
for row in input_string.split("\n"):
if row == "":
continue
row = other_chars.sub("", row)
row = row.replace("#", "1")
row = row.replace(".", "0")
lights.append(map(lambda x: int(x), row))
lights = np.array(lights, dtype=int)
for i in range(100):
lights = step_lights(lights)
return np.sum(lights)
def step_lights(lights):
next_lights = np.empty(lights.shape, dtype=int)
for i, j in itertools.product(range(lights.shape[0]), range(lights.shape[1])):
x0 = max(i-1, 0)
x1 = min(i+2, lights.shape[0])
y0 = max(j-1, 0)
y1 = min(j+2, lights.shape[1])
neighbourhood = np.sum(lights[x0:x1, y0:y1])
if lights[i,j] == 1:
next_lights[i,j] = 1 if neighbourhood == 3 or neighbourhood == 4 else 0
else:
next_lights[i,j] = 1 if neighbourhood == 3 else 0
return next_lights
if __name__=="__main__":
input_string = open("input.txt").read()
print animated_lights(input_string)
|
mit
| -8,158,571,688,017,475,000
| 22.230769
| 83
| 0.56043
| false
| 3.081633
| false
| false
| false
|
mozilla-metrics/socorro-toolbox
|
src/main/python/checkimprovedskiplist.py
|
1
|
1764
|
import sys, os
file, = sys.argv[1:]
oldsignaturemap = {}
newsignaturemap = {}
for line in open(file):
line = line.rstrip('\n')
try:
oldsignature, newsignature, count, example = line.split('\t')
except ValueError:
print >>sys.stderr, "Questionable line: %r" % (line,)
continue
count = int(count)
t = count, example
oldsignaturemap.setdefault(oldsignature, {})[newsignature] = t
newsignaturemap.setdefault(newsignature, {})[oldsignature] = t
print "Signature generation report: %s" % (file,)
print
print "******"
print
print "Mappings of current signatures to new signatures"
print
items = filter(lambda i: i[0] > 5,
((sum(count for newsignature, (count, example) in newsignatures.iteritems()),
oldsignature,
newsignatures)
for oldsignature, newsignatures in oldsignaturemap.iteritems()))
items.sort(key=lambda i: i[0])
for totalcount, oldsignature, newsignatures in items:
if len(newsignatures) == 1:
newsignature, (count, example) = newsignatures.items()[0]
print "'%s' always maps to '%s' (%i : %s)" % (oldsignature, newsignature, count, example)
else:
print "'%s' maps to multiple new signatures:" % (oldsignature,)
for newsignature, (count, example) in newsignatures.items():
print " '%s' (%i : %s)" % (newsignature, count, example)
print
print "******"
print
print "New signatures which combine several old signatures"
print
for newsignature, oldsignatures in newsignaturemap.iteritems():
if len(oldsignatures) == 1: continue
print "'%s' combines multiple old signatures:" % (newsignature,)
for oldsignature, (count, example) in oldsignatures.items():
print " '%s' (%i : %s)" % (oldsignature, count, example)
|
apache-2.0
| -5,036,359,242,503,436,000
| 29.947368
| 97
| 0.663265
| false
| 3.675
| false
| false
| false
|
arseneyr/essentia
|
src/examples/python/streaming_extractor/highlevel.py
|
1
|
4041
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import essentia
import essentia.standard as standard
import essentia.streaming as streaming
import numpy
from postprocess import postProcess
def tonalPoolCleaning(pool, namespace=None):
tonalspace = 'tonal.'
if namespace: tonalspace = namespace + '.tonal.'
tuningFreq = pool[tonalspace + 'tuning_frequency'][-1]
pool.remove(tonalspace + 'tuning_frequency')
pool.set(tonalspace + 'tuning_frequency', tuningFreq)
pool.remove(tonalspace + 'hpcp_highres')
def normalize(array):
max = numpy.max(array)
return [float(val)/float(max) for val in array]
def tuningSystemFeatures(pool, namespace=''):
# expects tonal descriptors and tuning features to be in pool
tonalspace = 'tonal.'
if namespace: tonalspace = namespace + '.tonal.'
# 1-diatonic strength
hpcp_highres = normalize(numpy.mean(pool[tonalspace + 'hpcp_highres'], 0))
key,scale,strength,_ = standard.Key(profileType='diatonic')(hpcp_highres)
pool.set(tonalspace + 'tuning_diatonic_strength', strength)
# 2- high resolution features
eqTempDeviation, ntEnergy,_ = standard.HighResolutionFeatures()(hpcp_highres)
pool.set(tonalspace+'tuning_equal_tempered_deviation', eqTempDeviation)
pool.set(tonalspace+'tuning_nontempered_energy_ratio', ntEnergy)
# 3- THPCP
hpcp = normalize(numpy.mean(pool[tonalspace + 'hpcp'], 0))
hpcp_copy = hpcp[:]
idx = numpy.argmax(hpcp)
offset = len(hpcp)-idx
hpcp[:offset] = hpcp_copy[idx:offset+idx]
hpcp[offset:offset+idx] = hpcp_copy[0:idx]
pool.set(tonalspace+'thpcp', essentia.array(hpcp))
def sfxPitch(pool, namespace=''):
sfxspace = 'sfx.'
llspace = 'lowlevel.'
if namespace:
sfxspace = namespace + '.sfx.'
llspace = namespace + '.lowlevel.'
pitch = pool[llspace+'pitch']
gen = streaming.VectorInput(pitch)
maxtt = streaming.MaxToTotal()
mintt = streaming.MinToTotal()
amt = streaming.AfterMaxToBeforeMaxEnergyRatio()
gen.data >> maxtt.envelope
gen.data >> mintt.envelope
gen.data >> amt.pitch
maxtt.maxToTotal >> (pool, sfxspace+'pitch_max_to_total')
mintt.minToTotal >> (pool, sfxspace+'pitch_min_to_total')
amt.afterMaxToBeforeMaxEnergyRatio >> (pool, sfxspace+'pitch_after_max_to_before_max_energy_ratio')
essentia.run(gen)
pc = standard.Centroid(range=len(pitch)-1)(pitch)
pool.set(sfxspace+'pitch_centroid', pc)
def compute(pool, namespace=''):
# 5th pass: High-level descriptors that depend on others, but we
# don't need to stream the audio anymore
# Average Level
from level import levelAverage
levelAverage(pool, namespace)
# SFX Descriptors
sfxPitch(pool, namespace)
# Tuning System Features
tuningSystemFeatures(pool, namespace)
# Pool Cleaning (removing temporary descriptors):
tonalPoolCleaning(pool, namespace)
# Add missing descriptors which are not computed yet, but will be for the
# final release or during the 1.x cycle. However, the schema need to be
# complete before that, so just put default values for these.
postProcess(pool, namespace)
|
agpl-3.0
| -1,231,980,796,570,652,200
| 34.080357
| 103
| 0.690423
| false
| 3.489637
| false
| false
| false
|
sepol/bp-neural-net
|
python/runner.py
|
1
|
1182
|
import sys
import numpy as np
from neuralNet import neuralNet
with open('input.txt') as f:
inputs = []
for line in f:
line = line.split()
if line:
line = [float(i) for i in line]
inputs.append(line)
with open('output.txt') as f:
outputs = []
for line in f:
line = line.split()
if line:
line = [int(i) for i in line]
outputs.append(line)
input = np.array(inputs)
output = np.array(outputs)
nn = neuralNet(400,30,10)
# Training
# ---
# Batch training
nn.trainBatch(input,output,20)
# Live training
#tests = np.size(input,0)
#acc = 0
#for i in xrange(0, tests):
# if (np.argmax(nn.trainLive(input[[i],:],output[i,0])) == output[i,0]):
# acc = acc + 1
#acc = acc / float(tests) * 100
#print("Live training accuracy: %f" % (acc))
# Save/Load
# ---
# Saving weights
#nn.saveWeights('saved.txt')
# Loading weights
#nn.loadWeights('saved.txt')
print("Value: %d, Result: %d" % (output[20,0],nn.classify(input[[20],:])))
print("Value: %d, Result: %d" % (output[300,0],nn.classify(input[[300],:])))
print("Value: %d, Result: %d" % (output[2500,0],nn.classify(input[[2500],:])))
print("Value: %d, Result: %d" % (output[4800,0],nn.classify(input[[4800],:])))
|
mit
| 2,482,796,611,280,662,500
| 21.301887
| 78
| 0.63198
| false
| 2.531049
| false
| false
| false
|
idlesign/django-sitemessage
|
sitemessage/tests/test_messengers.py
|
1
|
12035
|
import pytest
from sitemessage.messengers.base import MessengerBase
from sitemessage.models import Subscription, DispatchError
from sitemessage.toolbox import recipients, schedule_messages, send_scheduled_messages
from sitemessage.utils import get_registered_messenger_objects
from .testapp.sitemessages import (
WONDERLAND_DOMAIN, MessagePlainForTest, MessengerForTest, BuggyMessenger,
messenger_fb,
messenger_smtp,
messenger_telegram,
messenger_twitter,
messenger_vk,
messenger_xmpp,
)
def test_init_params():
messengers = get_registered_messenger_objects()
my = messengers['test_messenger']
assert my.login == 'mylogin'
assert my.password == 'mypassword'
def test_alias():
messenger = type('MyMessenger', (MessengerBase,), {'alias': 'myalias'})
assert messenger.get_alias() == 'myalias'
messenger = type('MyMessenger', (MessengerBase,), {})
assert messenger.get_alias() == 'MyMessenger'
def test_get_recipients_data(user_create):
user = user_create(attributes=dict(username='myuser'))
to = ['gogi', 'givi', user]
r1 = MessengerForTest.structure_recipients_data(to)
assert len(r1) == len(to)
assert r1[0].address == f'gogi{WONDERLAND_DOMAIN}'
assert r1[0].messenger == 'test_messenger'
assert r1[1].address == f'givi{WONDERLAND_DOMAIN}'
assert r1[1].messenger == 'test_messenger'
assert r1[2].address == f'user_myuser{WONDERLAND_DOMAIN}'
assert r1[2].messenger == 'test_messenger'
def test_recipients():
r = MessagePlainForTest.recipients('smtp', 'someone')
assert len(r) == 1
assert r[0].address == 'someone'
def test_send():
m = MessengerForTest('l', 'p')
m.send('message_cls', 'message_model', 'dispatch_models')
assert m.last_send['message_cls'] == 'message_cls'
assert m.last_send['message_model'] == 'message_model'
assert m.last_send['dispatch_models'] == 'dispatch_models'
m = BuggyMessenger()
recipiets_ = recipients('test_messenger', ['a', 'b', 'c', 'd'])
with pytest.raises(Exception):
m.send('a buggy message', '', recipiets_)
def test_subscription(user_create):
user1 = user_create(attributes=dict(username='first'))
user2 = user_create(attributes=dict(username='second'))
user2.is_active = False
user2.save()
Subscription.create(user1.id, MessagePlainForTest, MessengerForTest)
Subscription.create(user2.id, MessagePlainForTest, MessengerForTest)
assert len(MessagePlainForTest.get_subscribers(active_only=False)) == 2
assert len(MessagePlainForTest.get_subscribers(active_only=True)) == 1
def assert_called_n(func, n=1):
assert func.call_count == n
func.call_count = 0
def test_exception_propagation(monkeypatch):
schedule_messages('text', recipients('telegram', ''))
schedule_messages('text', recipients('telegram', ''))
def new_method(*args, **kwargs):
raise Exception('telegram beforesend failed')
monkeypatch.setattr(messenger_telegram, 'before_send', new_method)
send_scheduled_messages()
errors = list(DispatchError.objects.all())
assert len(errors) == 2
assert errors[0].error_log == 'telegram beforesend failed'
assert errors[1].error_log == 'telegram beforesend failed'
class TestSMTPMessenger:
def setup_method(self, method):
messenger_smtp.smtp.sendmail.call_count = 0
def test_get_address(self):
r = object()
assert messenger_smtp.get_address(r) == r
r = type('r', (object,), dict(email='somewhere'))
assert messenger_smtp.get_address(r) == 'somewhere'
def test_send(self):
schedule_messages('text', recipients('smtp', 'someone'))
send_scheduled_messages()
assert_called_n(messenger_smtp.smtp.sendmail)
def test_send_fail(self):
schedule_messages('text', recipients('smtp', 'someone'))
def new_method(*args, **kwargs):
raise Exception('smtp failed')
old_method = messenger_smtp.smtp.sendmail
messenger_smtp.smtp.sendmail = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'smtp failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_smtp.smtp.sendmail = old_method
def test_send_test_message(self):
messenger_smtp.send_test_message('someone', 'sometext')
assert_called_n(messenger_smtp.smtp.sendmail)
class TestTwitterMessenger:
def test_get_address(self):
r = object()
assert messenger_twitter.get_address(r) == r
r = type('r', (object,), dict(twitter='somewhere'))
assert messenger_twitter.get_address(r) == 'somewhere'
def test_send(self):
schedule_messages('text', recipients('twitter', 'someone'))
send_scheduled_messages()
messenger_twitter.api.statuses.update.assert_called_with(status='@someone text')
def test_send_test_message(self):
messenger_twitter.send_test_message('someone', 'sometext')
messenger_twitter.api.statuses.update.assert_called_with(status='@someone sometext')
messenger_twitter.send_test_message('', 'sometext')
messenger_twitter.api.statuses.update.assert_called_with(status='sometext')
def test_send_fail(self):
schedule_messages('text', recipients('twitter', 'someone'))
def new_method(*args, **kwargs):
raise Exception('tweet failed')
old_method = messenger_twitter.api.statuses.update
messenger_twitter.api.statuses.update = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'tweet failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_twitter.api.statuses.update = old_method
class TestXMPPSleekMessenger:
def test_get_address(self):
r = object()
assert messenger_xmpp.get_address(r) == r
r = type('r', (object,), dict(jabber='somewhere'))
assert messenger_xmpp.get_address(r) == 'somewhere'
def test_send(self):
schedule_messages('text', recipients('xmppsleek', 'someone'))
send_scheduled_messages()
messenger_xmpp.xmpp.send_message.assert_called_once_with(
mtype='chat', mbody='text', mfrom='somjid', mto='someone'
)
def test_send_test_message(self):
messenger_xmpp.send_test_message('someone', 'sometext')
messenger_xmpp.xmpp.send_message.assert_called_with(
mtype='chat', mbody='sometext', mfrom='somjid', mto='someone'
)
def test_send_fail(self):
schedule_messages('text', recipients('xmppsleek', 'someone'))
def new_method(*args, **kwargs):
raise Exception('xmppsleek failed')
old_method = messenger_xmpp.xmpp.send_message
messenger_xmpp.xmpp.send_message = new_method
messenger_xmpp._session_started = True
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'xmppsleek failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_xmpp.xmpp.send_message = old_method
class TestTelegramMessenger:
def setup_method(self, method):
messenger_telegram._verify_bot()
messenger_telegram.lib.post.call_count = 0
def test_get_address(self):
r = object()
assert messenger_telegram.get_address(r) == r
r = type('r', (object,), dict(telegram='chat_id'))
assert messenger_telegram.get_address(r) == 'chat_id'
def test_send(self):
schedule_messages('text', recipients('telegram', '1234567'))
send_scheduled_messages()
assert_called_n(messenger_telegram.lib.post, 2)
assert messenger_telegram.lib.post.call_args[1]['proxies'] == {'https': 'socks5://user:pass@host:port'}
def test_send_test_message(self):
messenger_telegram.send_test_message('someone', 'sometext')
assert_called_n(messenger_telegram.lib.post)
messenger_telegram.send_test_message('', 'sometext')
assert_called_n(messenger_telegram.lib.post)
def test_get_chat_ids(self):
assert messenger_telegram.get_chat_ids() == []
assert_called_n(messenger_telegram.lib.post)
def test_send_fail(self):
schedule_messages('text', recipients('telegram', 'someone'))
def new_method(*args, **kwargs):
raise Exception('telegram failed')
old_method = messenger_telegram.lib.post
messenger_telegram.lib.post = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'telegram failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_telegram.lib.post = old_method
class TestFacebookMessenger:
def setup_method(self, method):
messenger_fb.lib.post.call_count = 0
messenger_fb.lib.get.call_count = 0
def test_send(self):
schedule_messages('text', recipients('fb', ''))
send_scheduled_messages()
assert_called_n(messenger_fb.lib.post)
assert messenger_fb.lib.post.call_args[1]['proxies'] == {'https': '0.0.0.0'}
def test_send_test_message(self):
messenger_fb.send_test_message('', 'sometext')
assert_called_n(messenger_fb.lib.post)
messenger_fb.send_test_message('', 'sometext')
assert_called_n(messenger_fb.lib.post)
def test_get_page_access_token(self):
assert messenger_fb.get_page_access_token('app_id', 'app_secret', 'user_token') == {}
assert_called_n(messenger_fb.lib.get, 2)
def test_send_fail(self):
schedule_messages('text', recipients('fb', ''))
def new_method(*args, **kwargs):
raise Exception('fb failed')
old_method = messenger_fb.lib.post
messenger_fb.lib.post = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'fb failed'
assert errors[0].dispatch.address == ''
finally:
messenger_fb.lib.post = old_method
class TestVKontakteMessenger:
def setup_method(self, method):
messenger_vk.lib.post.call_count = 0
messenger_vk.lib.get.call_count = 0
def test_send(self):
schedule_messages('text', recipients('vk', '12345'))
send_scheduled_messages()
assert_called_n(messenger_vk.lib.post)
assert messenger_vk.lib.post.call_args[1]['data']['owner_id'] == '12345'
def test_get_access_token(self, monkeypatch):
monkeypatch.setattr('webbrowser.open', lambda *args: None)
result = messenger_vk.get_access_token(app_id='00000')
assert '00000&scope=wall,' in result
def test_send_test_message(self):
messenger_vk.send_test_message('12345', 'sometext')
assert_called_n(messenger_vk.lib.post)
messenger_vk.send_test_message('12345', 'sometext')
assert_called_n(messenger_vk.lib.post)
def test_send_fail(self):
schedule_messages('text', recipients('vk', '12345'))
def new_method(*args, **kwargs):
raise Exception('vk failed')
old_method = messenger_vk.lib.post
messenger_vk.lib.post = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'vk failed'
assert errors[0].dispatch.address == '12345'
finally:
messenger_vk.lib.post = old_method
|
bsd-3-clause
| -6,108,604,954,456,369,000
| 32.901408
| 111
| 0.637059
| false
| 3.577586
| true
| false
| false
|
red-hood/calendarserver
|
contrib/performance/benchmarks/bounded_recurrence.py
|
1
|
1816
|
##
# Copyright (c) 2011-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Benchmark a server's handling of events with a bounded recurrence.
"""
from uuid import uuid4
from itertools import count
from datetime import datetime, timedelta
from contrib.performance._event_create import (
makeAttendees, makeVCalendar, formatDate, measure as _measure)
def makeEvent(i, organizerSequence, attendeeCount):
"""
Create a new half-hour long event that starts soon and recurs
daily for the next five days.
"""
now = datetime.now()
start = now.replace(minute=15, second=0, microsecond=0) + timedelta(hours=i)
end = start + timedelta(minutes=30)
until = start + timedelta(days=5)
rrule = "RRULE:FREQ=DAILY;INTERVAL=1;UNTIL=" + formatDate(until)
return makeVCalendar(
uuid4(), start, end, rrule, organizerSequence,
makeAttendees(attendeeCount))
def measure(host, port, dtrace, attendeeCount, samples):
calendar = "bounded-recurrence"
organizerSequence = 1
# An infinite stream of recurring VEVENTS to PUT to the server.
events = ((i, makeEvent(i, organizerSequence, attendeeCount)) for i in count(2))
return _measure(
calendar, organizerSequence, events,
host, port, dtrace, samples)
|
apache-2.0
| 3,689,230,688,405,523,000
| 33.264151
| 84
| 0.720815
| false
| 3.799163
| false
| false
| false
|
vialectrum/vialectrum
|
electrum_ltc/gui/kivy/uix/dialogs/lightning_tx_dialog.py
|
1
|
3445
|
import copy
from datetime import datetime
from decimal import Decimal
from typing import NamedTuple, Callable, TYPE_CHECKING
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from electrum_ltc.gui.kivy.i18n import _
if TYPE_CHECKING:
from ...main_window import ElectrumWindow
Builder.load_string('''
<LightningTxDialog>
id: popup
title: _('Lightning Payment')
preimage: ''
is_sent: False
amount_str: ''
fee_str: ''
date_str: ''
payment_hash: ''
description: ''
BoxLayout:
orientation: 'vertical'
ScrollView:
scroll_type: ['bars', 'content']
bar_width: '25dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
padding: '10dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
BoxLabel:
text: _('Description') if root.description else ''
value: root.description
BoxLabel:
text: _('Date')
value: root.date_str
BoxLabel:
text: _('Amount sent') if root.is_sent else _('Amount received')
value: root.amount_str
BoxLabel:
text: _('Transaction fee') if root.fee_str else ''
value: root.fee_str
TopLabel:
text: _('Payment hash') + ':'
TxHashLabel:
data: root.payment_hash
name: _('Payment hash')
TopLabel:
text: _('Preimage')
TxHashLabel:
data: root.preimage
name: _('Preimage')
Widget:
size_hint: 1, 0.1
BoxLayout:
size_hint: 1, None
height: '48dp'
Widget
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Close')
on_release: root.dismiss()
''')
class ActionButtonOption(NamedTuple):
text: str
func: Callable
enabled: bool
class LightningTxDialog(Factory.Popup):
def __init__(self, app, tx_item):
Factory.Popup.__init__(self)
self.app = app # type: ElectrumWindow
self.wallet = self.app.wallet
self._action_button_fn = lambda btn: None
self.is_sent = bool(tx_item['direction'] == 'sent')
self.description = tx_item['label']
self.timestamp = tx_item['timestamp']
self.date_str = datetime.fromtimestamp(self.timestamp).isoformat(' ')[:-3]
self.amount = Decimal(tx_item['amount_msat']) /1000
self.payment_hash = tx_item['payment_hash']
self.preimage = tx_item['preimage']
format_amount = self.app.format_amount_and_units
self.amount_str = format_amount(self.amount)
if self.is_sent:
self.fee_str = format_amount(Decimal(tx_item['fee_msat']) / 1000)
|
mit
| 6,040,807,483,877,699,000
| 30.036036
| 88
| 0.520464
| false
| 4.322459
| false
| false
| false
|
kernevil/samba
|
python/samba/tests/blackbox/smbcontrol_process.py
|
1
|
4652
|
# Blackbox tests for the smbcontrol fault injection commands
#
# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# As the test terminates and sleeps samba processes these tests need to run
# in the preforkrestartdc test environment to prevent them impacting other
# tests.
#
from __future__ import print_function
import time
from samba.tests import BlackboxTestCase, BlackboxProcessError
from samba.messaging import Messaging
COMMAND = "bin/smbcontrol"
PING = "ping"
class SmbcontrolProcessBlockboxTests(BlackboxTestCase):
def setUp(self):
super(SmbcontrolProcessBlockboxTests, self).setUp()
lp_ctx = self.get_loadparm()
self.msg_ctx = Messaging(lp_ctx=lp_ctx)
def get_process_data(self):
services = self.msg_ctx.irpc_all_servers()
processes = []
for service in services:
for id in service.ids:
processes.append((service.name, id.pid))
return processes
def get_process(self, name):
processes = self.get_process_data()
for pname, pid in processes:
if name == pname:
return pid
return None
def test_inject_fault(self):
INJECT = "inject"
FAULT = "segv"
#
# Note that this process name needs to be different to the one used
# in the sleep test to avoid a race condition
#
pid = self.get_process("rpc_server")
#
# Ensure we can ping the process before injecting a fault.
#
try:
self.check_run("%s %s %s" % (COMMAND, pid, PING),
msg="trying to ping rpc_server")
except BlackboxProcessError as e:
self.fail("Unable to ping rpc_server process")
#
# Now inject a fault.
#
try:
self.check_run("%s %s %s %s" % (COMMAND, pid, INJECT, FAULT),
msg="injecting fault into rpc_server")
except BlackboxProcessError as e:
print(e)
self.fail("Unable to inject a fault into the rpc_server process")
#
# The process should have died, so we should not be able to ping it
#
try:
self.check_run("%s %s %s" % (COMMAND, pid, PING),
msg="trying to ping rpc_server")
self.fail("Could ping rpc_server process")
except BlackboxProcessError as e:
pass
def test_sleep(self):
SLEEP = "sleep" # smbcontrol sleep command
DURATION = 5 # duration to sleep server for
DELTA = 1 # permitted error for the sleep duration
#
# Note that this process name needs to be different to the one used
# in the inject fault test to avoid a race condition
#
pid = self.get_process("ldap_server")
#
# Ensure we can ping the process before getting it to sleep
#
try:
self.check_run("%s %s %s" % (COMMAND, pid, PING),
msg="trying to ping rpc_server")
except BlackboxProcessError as e:
self.fail("Unable to ping rpc_server process")
#
# Now ask it to sleep
#
start = time.time()
try:
self.check_run("%s %s %s %s" % (COMMAND, pid, SLEEP, DURATION),
msg="putting rpc_server to sleep for %d" % DURATION)
except BlackboxProcessError as e:
print(e)
self.fail("Failed to get rpc_server to sleep for %d" % DURATION)
#
# The process should be sleeping and not respond until it wakes
#
try:
self.check_run("%s %s %s" % (COMMAND, pid, PING),
msg="trying to ping rpc_server")
end = time.time()
duration = end - start
self.assertGreater(duration + DELTA, DURATION)
except BlackboxProcessError as e:
self.fail("Unable to ping rpc_server process")
|
gpl-3.0
| 2,958,449,365,037,097,500
| 34.242424
| 79
| 0.595658
| false
| 4.153571
| true
| false
| false
|
zaironne/SnippetDetector
|
SD_AddSnippet.py
|
1
|
4637
|
"""
SD_AddSnippet Add a snippet to local/global database. A new snippet is defined by:
- snippet name: directly taken from the name of the function
- snippet description: taken from the comment (if there's one) of the function
- syntactic and semantic bytes sequences
- snippet comments: all the available comments added by the user
by ZaiRoN (zairon.wordpress.com)
Copyright (C) 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from SD_db import sd_db
from SD_Semantic import semantic
from SD_Common import sd_common
db_type = ['global', 'local']
# ask for local/global database
db_answer = AskYN(1, 'Do you want to save the snippet inside the local database?\n[YES = local, NO = global, CANCEL = abort]')
if db_answer == -1:
print('\n[SNIPPET DETECTOR] Add snippet operation aborted')
else:
# start/end address function is automatically taken from the cursor
sd_c = sd_common()
func_start, func_end = sd_c.get_start_end_function(ScreenEA())
if func_start != BADADDR & func_end != BADADDR:
# create database file path
sddb = sd_db()
n_instr = sd_c.get_total_instructions(func_start, func_end)
db_file = sddb.get_db_folder(db_answer, True) + os.sep + 'sd_db_' + str(n_instr) + '.sd'
if not os.path.isfile(db_file):
# create database file
sddb.open_db_connection(db_file)
sddb.create_snippet_table()
else:
sddb.open_db_connection(db_file)
# is the syntactic bytes sequence already inside the db?
syntactic_bytes = GetManyBytes(func_start, func_end - func_start, False)
_snippet = sddb.get_snippet_by_syntactic_bytes(syntactic_bytes)
fail = False
add_snippet = False
if _snippet:
print('\n[SNIPPET DETECTOR] Snippet is already inside the database (syntactic match):')
fail = True
else:
# get semantic bytes sequence
sd_sem = semantic()
semantic_bytes = sd_sem.from_syntactic_to_semantic(func_start, func_end)
if semantic_bytes is not None:
# is the semantic bytes sequence already inside the db?
_snippet = sddb.get_snippet_by_semantic_bytes(semantic_bytes)
if not _snippet:
# add snippet
add_snippet = True
else:
# semantic bytes sequence could be not unique
save_answer = AskYN(1, 'Snippet is already inside the database (semantic match), do you want'
' to add this snippet too?')
if save_answer == 1:
# add the snippet
add_snippet = True
else:
fail = True
print('[SNIPPET DETECTOR] Snippet is already inside the database (semantic match):')
else:
print('\n[SNIPPET DETECTOR] Unable to convert syntactical snippet into semantic one inside function at 0x%x' % func_start)
if fail:
# print the information about the snippet inside the database
print('Snippet name: %s' % _snippet[0])
print('Snippet description: %s\n' % _snippet[1])
if add_snippet:
# time to save the new snippet inside the database
comments = sd_c.get_comments(func_start, func_end)
snippet_name = GetFunctionName(func_start)
snippet_description = GetFunctionCmt(func_start, False)
sddb.save_snippet(snippet_name, snippet_description, syntactic_bytes, semantic_bytes, comments)
print('\n[SNIPPET DETECTOR] Snippet correctly inserted inside %s database!' % db_type[db_answer])
sddb.close_db_connection()
else:
print('\n[SNIPPET DETECTOR] Unable to get function start/end addresses from cursor at 0x%X...' % ScreenEA())
|
gpl-3.0
| 6,602,680,185,730,456,000
| 47.302083
| 138
| 0.614622
| false
| 4.085463
| false
| false
| false
|
johngrantuk/piupdue
|
piupdue/ArduinoFlashEefc.py
|
1
|
7591
|
""" Handles main processor operations.
WriteFileToFlash()
LoadBuffer()
WritePage()
EraseFlash()
SetBootFlash()
Reset()
Not sure what all are doing but tests have worked.
"""
import ArduinoFlashSerial, ArduinoFlashHardValues
import ctypes, time, os
def WriteFileToFlash(SerialPort, Log, File, IsNativePort):
"""
Writes File to processors flash in blocks pageSize long.
"""
Log.Log("Writing file to flash: " + File)
pageSize = ArduinoFlashHardValues.size # Size of data blocks to be written.
pageNum = 0
offset = 0 # -- Flash.h LN99 => 0
numPages = 0
onBufferA = True # LN52 Flash.cpp
fileSizeBytes = os.path.getsize(File) # Find file size.
numPages = (fileSizeBytes + pageSize - 1) / pageSize # 47 pages for blink.
if numPages > ArduinoFlashHardValues.pages:
raise Exception("WriteFlash()-File Size Error. numPages: " + str(numPages))
Log.Log("Writing " + str(fileSizeBytes) + "bytes to flash in " + str(numPages) + " pages.")
f = open(File, 'rb')
while True:
piece = f.read(pageSize) # Reads a block of data from file.
if not piece:
Log.Log("End of file??")
break
readBytes = len(piece)
Log.Log("Read: " + str(readBytes) + "bytes from file. onBufferA: " + str(onBufferA) + ", PageNum: " + str(pageNum))
dataJ = []
for i in range(0, readBytes):
dataJ.append(ord(piece[i]))
LoadBuffer(SerialPort, Log, onBufferA, dataJ, IsNativePort)
page = offset + pageNum
onBufferA = WritePage(page, onBufferA, SerialPort, Log)
pageNum += 1
if pageNum == numPages or readBytes != pageSize:
Log.Log("End of file...")
break
f.close()
Log.Log("End of WriteFlash()\n")
def EraseFlash(SerialPort, Log):
""" Erases processor flash. """
Log.Log("EraseFlash():")
WaitFSR(SerialPort, Log)
WriteFCR0(ArduinoFlashHardValues.EEFC_FCMD_EA, 0, SerialPort, Log)
WaitFSR(SerialPort, Log)
WriteFCR1(ArduinoFlashHardValues.EEFC_FCMD_EA, 0, SerialPort, Log)
Log.Log("Flash Erased.")
def LoadBuffer(SerialPort, Log, OnBufferA, Data, IsNativePort):
"""
Writes SXXXXXXXX,XXXXXXXX# command then Xmodem.
"""
Log.Log("LoadBuffer():")
ArduinoFlashSerial.Write(SerialPort, Log, ArduinoFlashHardValues.pageBufferA if OnBufferA else ArduinoFlashHardValues.pageBufferB, Data, ArduinoFlashHardValues.size, IsNativePort)
Log.Log("End of LoadBuffer()\n")
def WritePage(Page, OnBufferA, SerialPort, Log):
""" LN256 EefcFlash """
Log.Log("Write Page(), Page: " + str(Page) + ", OnBufferA: " + str(OnBufferA))
SetDstAddr(ArduinoFlashHardValues.addr + Page * ArduinoFlashHardValues.size, SerialPort, Log)
SetSrcAddr(ArduinoFlashHardValues.pageBufferA if OnBufferA else ArduinoFlashHardValues.pageBufferB, SerialPort, Log)
OnBufferA = not OnBufferA
WaitFSR(SerialPort, Log)
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.reset, ArduinoFlashHardValues.start + 1, Log) # _wordCopy.runv();
ArduinoFlashSerial.Go(SerialPort, Log, ArduinoFlashHardValues.stack + ArduinoFlashHardValues.user) # _wordCopy.runv();
if ArduinoFlashHardValues.planes == 2 and Page >= ArduinoFlashHardValues.pages / 2:
WriteFCR1(ArduinoFlashHardValues.EEFC_FCMD_EWP if ArduinoFlashHardValues.eraseAuto else ArduinoFlashHardValues.EEFC_FCMD_WP, Page - ArduinoFlashHardValues.pages / 2, SerialPort, Log)
else:
WriteFCR0(ArduinoFlashHardValues.EEFC_FCMD_EWP if ArduinoFlashHardValues.eraseAuto else ArduinoFlashHardValues.EEFC_FCMD_WP, Page, SerialPort, Log)
Log.Log("End of Write Page()\n")
return OnBufferA
def SetBootFlash(SerialPort, Log, Enable):
""" Sets boot flash. """
Log.Log("SetBootFlash():")
WaitFSR(SerialPort, Log)
WriteFCR0(ArduinoFlashHardValues.EEFC_FCMD_SGPB if Enable else ArduinoFlashHardValues.EEFC_FCMD_CGPB, 1, SerialPort, Log)
WaitFSR(SerialPort, Log)
time.sleep(1)
Log.Log("End of SetBootFlash.")
def Reset(SerialPort, Log):
""" Resets processor. """
Log.Log("Reset()...")
ArduinoFlashSerial.WriteWord(SerialPort, 0x400E1A00, 0xA500000D, Log)
Log.Log("Reset done...")
time.sleep(1)
def WaitFSR(SerialPort, Log):
""" Not sure what it does. """
Log.Log("WaitFSR():")
tries = 0
fsr1 = ctypes.c_uint32(0x1).value
while tries <= 500:
addr = "w" + '{0:08X}'.format(ArduinoFlashHardValues.EEFC0_FSR) + ",4#"
Log.Log("Sending EEFC0_FSR: " + addr)
if ArduinoFlashHardValues.LiveWrite:
fsr0 = ArduinoFlashSerial.ReadWord(SerialPort, addr, Log)
if fsr0 & (1 << 2):
Log.Log("WaitFSR() Error. fsr0")
addr = "w" + '{0:08X}'.format(ArduinoFlashHardValues.EEFC1_FSR) + ",4#"
Log.Log("Sending EFC1_FSR: " + addr)
if ArduinoFlashHardValues.LiveWrite:
fsr1 = ArduinoFlashSerial.ReadWord(SerialPort, addr, Log)
if fsr1 & (1 << 2):
Log.Log("WaitFSR() Error. fsr1")
if fsr0 & fsr1 & 0x1:
Log.Log("Breaking.")
break
time.sleep(1) ##########???????
else:
break
tries += 1
if tries > 500:
Log.Log("WaitFSR() Error. Tried and failed!!")
def WriteFCR0(cmd, arg, SerialPort, Log):
"""
writeFCR0(uint8_t cmd, uint32_t arg)
writeFCR0(EEFC_FCMD_EA, 0);
EefcFlash.cpp LN314 _samba.writeWord(EEFC0_FCR, (EEFC_KEY << 24) | (arg << 8) | cmd);
"""
Log.Log("WriteFCR0()")
value = (ArduinoFlashHardValues.EEFC_KEY << 24) | (arg << 8) | cmd
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.EEFC0_FCR, value, Log)
def WriteFCR1(cmd, arg, SerialPort, Log):
"""
EefcFlash::writeFCR1(uint8_t cmd, uint32_t arg)
_samba.writeWord(EEFC1_FCR, (EEFC_KEY << 24) | (arg << 8) | cmd);
"""
Log.Log("WriteFCR1()")
value = (ArduinoFlashHardValues.EEFC_KEY << 24) | (arg << 8) | cmd
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.EEFC1_FCR, value, Log)
def SetDstAddr(DstAddr, SerialPort, Log):
""" Unsure """
Log.Log("SetDstAddr()")
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.user + ArduinoFlashHardValues.dstAddr, DstAddr, Log) # WordCopyApplet (0x20001000 + 0x00000028), DstAddr
def SetSrcAddr(SrcAddr, SerialPort, Log):
""" Unsure """
Log.Log("SetSrcAddr()")
ArduinoFlashSerial.WriteWord(SerialPort, ArduinoFlashHardValues.user + ArduinoFlashHardValues.srcAddr, SrcAddr, Log) # WordCopyApplet (0x20001000 + 0x00000028), DstAddr
|
mit
| -2,672,416,731,614,591,000
| 37.538071
| 247
| 0.577658
| false
| 3.522506
| false
| false
| false
|
zenefits/sentry
|
src/sentry/web/frontend/debug/mail.py
|
1
|
19243
|
from __future__ import absolute_import, print_function
import itertools
import logging
import time
import traceback
import uuid
from datetime import datetime, timedelta
from random import Random
import six
from django.contrib.webdesign.lorem_ipsum import WORDS
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.views.generic import View
from sentry.app import tsdb
from sentry.constants import LOG_LEVELS
from sentry.digests import Record
from sentry.digests.notifications import Notification, build_digest
from sentry.digests.utilities import get_digest_metadata
from sentry.http import get_server_hostname
from sentry.models import (
Activity, Event, Group, GroupStatus, GroupSubscriptionReason, Organization,
OrganizationMember, Project, Release, Rule, Team
)
from sentry.plugins.sentry_mail.activity import emails
from sentry.utils.dates import to_datetime, to_timestamp
from sentry.utils.email import inline_css
from sentry.utils.http import absolute_uri
from sentry.utils.samples import load_data
from sentry.web.decorators import login_required
from sentry.web.helpers import render_to_response, render_to_string
logger = logging.getLogger(__name__)
def get_random(request):
seed = request.GET.get('seed', six.text_type(time.time()))
return Random(seed)
def make_message(random, length=None):
if length is None:
length = int(random.weibullvariate(8, 3))
return ' '.join(random.choice(WORDS) for _ in range(length))
def make_culprit(random):
def make_module_path_components(min, max):
for _ in range(random.randint(min, max)):
yield ''.join(random.sample(WORDS, random.randint(1, int(random.paretovariate(2.2)))))
return '{module} in {function}'.format(
module='.'.join(make_module_path_components(1, 4)),
function=random.choice(WORDS)
)
def make_group_metadata(random, group):
return {
'type': 'error',
'metadata': {
'type': '{}Error'.format(
''.join(word.title() for word in random.sample(WORDS, random.randint(1, 3))),
),
'value': make_message(random),
}
}
def make_group_generator(random, project):
epoch = to_timestamp(datetime(2016, 6, 1, 0, 0, 0, tzinfo=timezone.utc))
for id in itertools.count(1):
first_seen = epoch + random.randint(0, 60 * 60 * 24 * 30)
last_seen = random.randint(first_seen, first_seen + (60 * 60 * 24 * 30))
group = Group(
id=id,
project=project,
culprit=make_culprit(random),
level=random.choice(LOG_LEVELS.keys()),
message=make_message(random),
first_seen=to_datetime(first_seen),
last_seen=to_datetime(last_seen),
status=random.choice((
GroupStatus.UNRESOLVED,
GroupStatus.RESOLVED,
)),
)
if random.random() < 0.8:
group.data = make_group_metadata(random, group)
yield group
def add_unsubscribe_link(context):
if 'unsubscribe_link' not in context:
context['unsubscribe_link'] = 'javascript:alert("This is a preview page, what did you expect to happen?");'
# TODO(dcramer): use https://github.com/disqus/django-mailviews
class MailPreview(object):
def __init__(self, html_template, text_template, context=None, subject=None):
self.html_template = html_template
self.text_template = text_template
self.subject = subject
self.context = context if context is not None else {}
add_unsubscribe_link(self.context)
def text_body(self):
return render_to_string(self.text_template, self.context)
def html_body(self):
try:
return inline_css(render_to_string(self.html_template, self.context))
except Exception:
traceback.print_exc()
raise
def render(self, request):
return render_to_response('sentry/debug/mail/preview.html', {
'preview': self,
'format': request.GET.get('format'),
})
class ActivityMailPreview(object):
def __init__(self, request, activity):
self.request = request
self.email = emails.get(activity.type)(activity)
def get_context(self):
context = self.email.get_base_context()
context['reason'] = get_random(self.request).choice(
GroupSubscriptionReason.descriptions.values()
)
context.update(self.email.get_context())
add_unsubscribe_link(context)
return context
def text_body(self):
return render_to_string(self.email.get_template(), self.get_context())
def html_body(self):
try:
return inline_css(render_to_string(
self.email.get_html_template(), self.get_context()))
except Exception:
import traceback
traceback.print_exc()
raise
class ActivityMailDebugView(View):
def get(self, request):
org = Organization(
id=1,
slug='organization',
name='My Company',
)
team = Team(
id=1,
slug='team',
name='My Team',
organization=org,
)
project = Project(
id=1,
organization=org,
team=team,
slug='project',
name='My Project',
)
group = next(
make_group_generator(
get_random(request),
project,
),
)
event = Event(
id=1,
project=project,
group=group,
message=group.message,
data=load_data('python'),
datetime=datetime(2016, 6, 13, 3, 8, 24, tzinfo=timezone.utc),
)
activity = Activity(
group=event.group, project=event.project,
**self.get_activity(request, event)
)
return render_to_response('sentry/debug/mail/preview.html', {
'preview': ActivityMailPreview(request, activity),
'format': request.GET.get('format'),
})
@login_required
def alert(request):
platform = request.GET.get('platform', 'python')
org = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=org,
)
project = Project(
id=1,
slug='example',
name='Example',
team=team,
organization=org,
)
random = get_random(request)
group = next(
make_group_generator(random, project),
)
event = Event(
id=1,
project=project,
group=group,
message=group.message,
data=load_data(platform),
datetime=to_datetime(
random.randint(
to_timestamp(group.first_seen),
to_timestamp(group.last_seen),
),
),
)
rule = Rule(label="An example rule")
interface_list = []
for interface in six.itervalues(event.interfaces):
body = interface.to_email_html(event)
if not body:
continue
interface_list.append((interface.get_title(), mark_safe(body)))
return MailPreview(
html_template='sentry/emails/error.html',
text_template='sentry/emails/error.txt',
context={
'rule': rule,
'group': group,
'event': event,
'link': 'http://example.com/link',
'interfaces': interface_list,
'tags': event.get_tags(),
'project_label': project.name,
'tags': [
('logger', 'javascript'),
('environment', 'prod'),
('level', 'error'),
('device', 'Other')
]
},
).render(request)
@login_required
def digest(request):
random = get_random(request)
# TODO: Refactor all of these into something more manageable.
org = Organization(
id=1,
slug='example',
name='Example Organization',
)
team = Team(
id=1,
slug='example',
name='Example Team',
organization=org,
)
project = Project(
id=1,
slug='example',
name='Example Project',
team=team,
organization=org,
)
rules = {i: Rule(
id=i,
project=project,
label="Rule #%s" % (i,),
) for i in range(1, random.randint(2, 4))}
state = {
'project': project,
'groups': {},
'rules': rules,
'event_counts': {},
'user_counts': {},
}
records = []
event_sequence = itertools.count(1)
group_generator = make_group_generator(random, project)
for i in range(random.randint(1, 30)):
group = next(group_generator)
state['groups'][group.id] = group
offset = timedelta(seconds=0)
for i in range(random.randint(1, 10)):
offset += timedelta(seconds=random.random() * 120)
event = Event(
id=next(event_sequence),
event_id=uuid.uuid4().hex,
project=project,
group=group,
message=group.message,
data=load_data('python'),
datetime=to_datetime(
random.randint(
to_timestamp(group.first_seen),
to_timestamp(group.last_seen),
),
)
)
records.append(
Record(
event.event_id,
Notification(
event,
random.sample(state['rules'], random.randint(1, len(state['rules']))),
),
to_timestamp(event.datetime),
)
)
state['event_counts'][group.id] = random.randint(10, 1e4)
state['user_counts'][group.id] = random.randint(10, 1e4)
digest = build_digest(project, records, state)
start, end, counts = get_digest_metadata(digest)
context = {
'project': project,
'counts': counts,
'digest': digest,
'start': start,
'end': end,
}
add_unsubscribe_link(context)
return MailPreview(
html_template='sentry/emails/digests/body.html',
text_template='sentry/emails/digests/body.txt',
context=context,
).render(request)
@login_required
def report(request):
from sentry.tasks import reports
random = get_random(request)
duration = 60 * 60 * 24 * 7
timestamp = to_timestamp(
reports.floor_to_utc_day(
to_datetime(
random.randint(
to_timestamp(datetime(2015, 6, 1, 0, 0, 0, tzinfo=timezone.utc)),
to_timestamp(datetime(2016, 7, 1, 0, 0, 0, tzinfo=timezone.utc)),
)
)
)
)
start, stop = interval = reports._to_interval(timestamp, duration)
organization = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=organization,
)
projects = []
for i in xrange(0, random.randint(1, 8)):
name = ' '.join(
random.sample(
WORDS,
random.randint(1, 4)
)
)
projects.append(
Project(
id=i,
organization=organization,
team=team,
slug=slugify(name),
name=name,
date_added=start - timedelta(days=random.randint(0, 120)),
)
)
def make_release_generator():
id_sequence = itertools.count(1)
while True:
dt = to_datetime(
random.randint(
timestamp - (30 * 24 * 60 * 60),
timestamp,
),
)
p = random.choice(projects)
yield Release(
id=next(id_sequence),
project=p,
organization_id=p.organization_id,
version=''.join([
random.choice('0123456789abcdef') for _ in range(40)
]),
date_added=dt,
date_started=dt,
)
def build_issue_summaries():
summaries = []
for i in range(3):
summaries.append(
int(random.weibullvariate(10, 1) * random.paretovariate(0.5))
)
return summaries
def build_usage_summary():
return (
int(random.weibullvariate(3, 1) * random.paretovariate(0.2)),
int(random.weibullvariate(5, 1) * random.paretovariate(0.2)),
)
def build_calendar_data(project):
start, stop = reports.get_calendar_query_range(interval, 3)
rollup = 60 * 60 * 24
series = []
weekend = frozenset((5, 6))
value = int(random.weibullvariate(5000, 3))
for timestamp in tsdb.get_optimal_rollup_series(start, stop, rollup)[1]:
damping = random.uniform(0.2, 0.6) if to_datetime(timestamp).weekday in weekend else 1
jitter = random.paretovariate(1.2)
series.append((timestamp, int(value * damping * jitter)))
value = value * random.uniform(0.25, 2)
return reports.clean_calendar_data(
project,
series,
start,
stop,
rollup,
stop
)
def build_report(project):
daily_maximum = random.randint(1000, 10000)
rollup = 60 * 60 * 24
series = [(
timestamp + (i * rollup),
(random.randint(0, daily_maximum), random.randint(0, daily_maximum))
) for i in xrange(0, 7)]
aggregates = [
random.randint(0, daily_maximum * 7) if random.random() < 0.9 else None for _ in xrange(0, 4)
]
return reports.Report(
series,
aggregates,
build_issue_summaries(),
build_usage_summary(),
build_calendar_data(project),
)
if random.random() < 0.85:
personal = {
'resolved': random.randint(0, 100),
'users': int(random.paretovariate(0.2)),
}
else:
personal = {
'resolved': 0,
'users': 0,
}
return MailPreview(
html_template='sentry/emails/reports/body.html',
text_template='sentry/emails/reports/body.txt',
context={
'duration': reports.durations[duration],
'interval': {
'start': reports.date_format(start),
'stop': reports.date_format(stop),
},
'report': reports.to_context(
organization,
interval,
{project: build_report(project) for project in projects}
),
'organization': organization,
'personal': personal,
'user': request.user,
},
).render(request)
@login_required
def request_access(request):
org = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=org,
)
return MailPreview(
html_template='sentry/emails/request-team-access.html',
text_template='sentry/emails/request-team-access.txt',
context={
'email': 'foo@example.com',
'name': 'George Bush',
'organization': org,
'team': team,
'url': absolute_uri(reverse('sentry-organization-members', kwargs={
'organization_slug': org.slug,
}) + '?ref=access-requests'),
},
).render(request)
@login_required
def invitation(request):
org = Organization(
id=1,
slug='example',
name='Example',
)
om = OrganizationMember(
id=1,
email='foo@example.com',
organization=org,
)
return MailPreview(
html_template='sentry/emails/member-invite.html',
text_template='sentry/emails/member-invite.txt',
context={
'email': 'foo@example.com',
'organization': org,
'url': absolute_uri(reverse('sentry-accept-invite', kwargs={
'member_id': om.id,
'token': om.token,
})),
},
).render(request)
@login_required
def access_approved(request):
org = Organization(
id=1,
slug='example',
name='Example',
)
team = Team(
id=1,
slug='example',
name='Example',
organization=org,
)
return MailPreview(
html_template='sentry/emails/access-approved.html',
text_template='sentry/emails/access-approved.txt',
context={
'email': 'foo@example.com',
'name': 'George Bush',
'organization': org,
'team': team,
},
).render(request)
@login_required
def confirm_email(request):
email = request.user.emails.first()
email.set_hash()
email.save()
return MailPreview(
html_template='sentry/emails/confirm_email.html',
text_template='sentry/emails/confirm_email.txt',
context={
'confirm_email': 'foo@example.com',
'user': request.user,
'url': absolute_uri(reverse(
'sentry-account-confirm-email',
args=[request.user.id, email.validation_hash]
)),
'is_new_user': True,
},
).render(request)
@login_required
def recover_account(request):
return MailPreview(
html_template='sentry/emails/recover_account.html',
text_template='sentry/emails/recover_account.txt',
context={
'user': request.user,
'url': absolute_uri(reverse(
'sentry-account-confirm-email',
args=[request.user.id, 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX']
)),
'domain': get_server_hostname(),
},
).render(request)
@login_required
def org_delete_confirm(request):
from sentry.models import AuditLogEntry
org = Organization.get_default()
entry = AuditLogEntry(
organization=org,
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
)
return MailPreview(
html_template='sentry/emails/org_delete_confirm.html',
text_template='sentry/emails/org_delete_confirm.txt',
context={
'organization': org,
'audit_log_entry': entry,
'eta': timezone.now() + timedelta(days=1),
'url': absolute_uri(reverse(
'sentry-restore-organization',
args=[org.slug],
)),
},
).render(request)
|
bsd-3-clause
| -3,899,513,468,241,302,000
| 27.298529
| 115
| 0.543938
| false
| 4.157053
| false
| false
| false
|
sabajt/Dinos-In-Space
|
endMessage.py
|
1
|
25058
|
"""
EndMessage.py
message / menu that appears upon completion or failure of puzzle
EndMessage.win is where puzzle profile data is modified after completion
"""
import pygame
import dinosInSpace
import static56
import infoGraphic56
import tween
import soundFx56
import dino56
import dataStorage56
import snack
import random
import dinostein
import sparkleTrail
import spriteBasic
OFFSCREEN = (-1000,-1000)
ALPHA = 200
BLACK = (0,0,0)
YELLOW = (255,255, 0)
BLUE = (0,0,255)
GREY = (150,150,150)
WHITE = (255,255,255)
COL_BTEXT = BLACK
COL_MTEXT = WHITE
SIZE_BTEXT = 15
SIZE_MTEXT = 15
BTTN_HEIGHT = 50
BTTN_XBUF = 12
BTTN_YBUF = 8
BTTN_MIDJUST = 3
MSG_TEXT_YREL1 = -65
MSG_TEXT_YREL2 = -30
MSG_TEXT_YREL3 = 5
MSG_TWEENSTART = (1100,300)
MSG_TWEENEND = (400,300)
MSG_TWEENSPD = 60 #45
MSG_TWEENMODE = "EXP"
MSG_TWEENDCLVAL = 0.80 #0.55
MSG_TWEENDCLLEM = 4 #3
SPIRAL_DIRECTION = -1
SPIRAL_ROTATESTEP = 6
SPIRAL_SCALE_STEP = -10
SPIRAL_TERMINATEAFTER = 20
SPARK_SIZE = (6,6)
SPARK_COLOR = BLUE
SPARK_BOUNDS = (20,20)
SPARK_FREQUENCY = 1
SPARK_FADESPEED = 10
LAST_TUTORIAL = "tut7" # used in win to check if last %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% LAST_TUTORIAL %%%%%%%%%%%%%%%%%%%%%%%%%%
def getExlaim():
wordList = [
"Yowza!",
"Check it yo!",
"Wowzies!",
"Yowzies!",
"Look!",
"Jeepers!",
"OoOoOoOo!",
]
return wordList[random.randint(0, len(wordList) - 1)]
class ImgLib(object):
""" image library to load and access local images """
imgDict = None
def __init__(self):
if not ImgLib.imgDict:
_talkbox = dinosInSpace.loadImage("talkBoxBlack.png", "2X", (0,0), ALPHA)
_buttonSize = (_talkbox.get_width()/2 - BTTN_XBUF, BTTN_HEIGHT)
ImgLib.imgDict = {
"CURSORSTD" : dinosInSpace.loadImage("controlCursor.png", "2X", (21,21)),
# "BTTN_0" : dinosInSpace.loadImage("button0.png", "2X", (0,0)),
# "BTTN_1" : dinosInSpace.loadImage("button1.png", "2X", (0,0)),
##"TALKBOX" : dinosInSpace.loadImage("talkBoxBlack.png", "2X", None, ALPHA),
"TALKBOX" : dinosInSpace.loadImage("talkBoxBlack.png", "2X", (0,0), ALPHA),
"BTTN_0" : pygame.Surface(_buttonSize),
"BTTN_1" : pygame.Surface(_buttonSize)
}
ImgLib.imgDict["BTTN_0"].fill(GREY)
ImgLib.imgDict["BTTN_1"].fill(WHITE)
@staticmethod
def getImage(name):
if name in ImgLib.imgDict:
return ImgLib.imgDict[name].copy()
else:
print "image, " + name + " not found"
def initImgLib():
ImgLib()
class BonusDelegate(object):
"""
a simple object to hold snax collected or alt exits taken during puzzle
- data stored here and retrieved by EndMessage if player wins
"""
snax = []
@staticmethod
def wipe():
BonusDelegate.snax = []
@staticmethod
def quickReset():
BonusDelegate.wipe()
class EndMessage(tween.TweenMenu):
"""
message displayed upon failing or completing puzzle
- message is subclass of tween sprite and message frame is image with rect
- creates and controlls features determined by puzzle outcome
"""
me = None
def __init__(self, stateObj, mustSave, profileName, curPuzzle, _fps):
tween.TweenLeader.__init__(self)
EndMessage.me = self
self.IMG = ImgLib.getImage # shortcut to img lib
self.minRatio = [3,1] # quick fix image ratio must be hardcoded
self.game = stateObj
self.screen = self.game.getScreen()
self.image = self.IMG("TALKBOX")
self.original = self.image.copy()
self.spiralSnap = None #image to be fed into sprialOut
self.rect = self.image.get_rect()
self.rect.center = OFFSCREEN
self.centerScreen = (self.screen.get_width()/2, self.screen.get_height()/2)
self.mustSave = mustSave
self.end = False
self.endMessageGroup = pygame.sprite.OrderedUpdates()
self.profileName = profileName
self.curPuzzle = curPuzzle
self.currentDinostein = None
self.currentSteinFrame = None
self._fps = _fps
self.isGoingOut = False
self.isGoingOutFrameCount = 0
self.terminate = False
self.isActive = False # activate after screenshot for state change
self.firstCycle = True # state change bug
self.speed = MSG_TWEENSPD # 45
self.dclval = MSG_TWEENDCLVAL # 0.55
self.dcllim = MSG_TWEENDCLLEM # 3
if self._fps == 30:
self.speed *= 2
self.dclval = .60
self.dcllim = 2
self.endMessageGroup.add(self)
def update(self):
self.checkEndCondition()
if self.isActive:
if not self.firstCycle:
tween.TweenMenu.update(self)
if self.isGoingOut:
self.isGoingOutFrameCount += 1
rotateStep = SPIRAL_ROTATESTEP
scaleStep = SPIRAL_SCALE_STEP
termAfter = SPIRAL_TERMINATEAFTER
if self._fps == 30:
rotateStep *= 2
scaleStep *= 2
termAfter /= 2
#spr, directionAsInt, rotateStep, scaleStep, terminateAfter, frameCount, minRatio, ORIGINAL
self.terminate = spiralOut(
self,
SPIRAL_DIRECTION,
rotateStep,
scaleStep,
termAfter,
self.isGoingOutFrameCount,
self.minRatio,
self.spiralSnap
)
else:
self.firstCycle = False
def updateEnding(self):
""" for state change identification purpose """
pass
@staticmethod
def wipe():
EndMessage.me = None
def checkEndCondition(self):
if not self.end:
if static56.Goal.getSaved() >= self.mustSave:
self.win()
if dino56.Dino.getLost() > 0: ## instead return coord and pic?
self.lose()
def generateMessage(self, didWin, data=None, newSnax=None):
line1 = ""
line2 = ""
line3 = ""
if not didWin:
line1 = ""
line2 = "Dino Down, try again!"
line3 = ""
elif data == "_user":
line1 = ""
line2 = "Good work, you got all dinos to a station"
elif data == "TUTORIAL":
line1 = "Good job"
else:
assert(data)
if not newSnax:
line1 = "Good work, you got all dinos to a Station."
else:
line1 = "you got all dinos to a station and " + str(len(newSnax)) + " new snax!"
snax = data[4]
snaxLeft = 0
itLooks_theyLook = "it looks"
theres_thereAre = "there's"
if snax:
for s in snax:
if s == 0:
snaxComp = False
snaxLeft += 1
if snaxLeft > 1:
theres_thereAre = "there are"
itLooks_theyLook = "they look"
if snaxLeft:
line2 = "It appears " + theres_thereAre + " " + str(snaxLeft) + " snax still in the area..."
line3 = "and " + itLooks_theyLook + " REALLY tasty!"
else:
line2 = ""
return line1, line2, line3
# # puzzle name : [file name, locked, complete, difficulty, snacks collected, secret exit found]
# #
# # -0 (string) _file name_ : passed as 'dest' to map selector (level)
# # -1 (bool) _locked_ : controlls player access / preview
# # -2 (bool) _complete_ : displays if complete, adds to global profile completed count
# # -3 (int) _difficulty_ : displays difficulty level
# # -4 (list) _snacks_ : displays how many snacks collected as fraction, pass 'None' if n/a
@staticmethod
def checkButtonPressed():
dest = None
for s in EndMessage.me.endMessageGroup:
if s.__class__ == EndMessageButton:
dest = s.requestDest()
if dest:
break
return dest
def bind(self, followers):
for f in followers:
self.endMessageGroup.add(f)
self.addFollower(f)
def lose(self):
endDinoImage, endDinoCenter = dino56.Dino.getLastDinoImageAndCenter()
## soundFx56.SoundPlayer.requestSound("lose")
line1, line2, line3 = self.generateMessage(False)
# init features from data and register for access
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
self.retryButton = EndMessageButton(
(-self.rect.width/4 + BTTN_MIDJUST, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"Retry (any key)",
SIZE_BTEXT,
COL_BTEXT,
"QR"
)
self.exitButton = EndMessageButton(
(self.rect.width/4 - BTTN_MIDJUST, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"choose another puzzle",
SIZE_BTEXT,
COL_BTEXT,
"EXIT"
)
else:
self.retryButton = EndMessageButton(
(0, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"Retry",
SIZE_BTEXT,
COL_BTEXT,
"QR"
)
##(170,30), self.IMG("BTTN_0"), self.IMG("BTTN_1"), "Leave Area", SIZE_BTEXT, COL_BTEXT, "EXIT")
self.text1 = EndMessageText((0, MSG_TEXT_YREL1), line1, SIZE_MTEXT, COL_MTEXT)
self.text2 = EndMessageText((0, MSG_TEXT_YREL2), line2, SIZE_MTEXT, COL_MTEXT)
self.text3 = EndMessageText((0, MSG_TEXT_YREL3), line3, SIZE_MTEXT, COL_MTEXT)
# dinostein ***
self.currentDinostein = dinostein.Dinostein(self._fps)
self.currentSteinFrame = dinostein.Frame(self._fps)
self.currentDinostein.addFollower(self.currentSteinFrame)
# *************
cursor = EndMessageCursor([self.IMG("CURSORSTD")]) # true for withTrail - winning condition only
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
self.bind([self.retryButton, self.exitButton, self.text1, self.text2, self.text3])
else:
self.bind([self.retryButton, self.text1, self.text2, self.text3])
self.endMessageGroup.add(self.currentSteinFrame)
self.endMessageGroup.add(self.currentDinostein)
self.endMessageGroup.add(cursor)
#self.setTween((1000,300), (400,300), 35, "EXP", 0.5, 3) # sp, ep, speed, dclMode, dclVal, dclLim
self.setTween(MSG_TWEENSTART, MSG_TWEENEND, self.speed, MSG_TWEENMODE, self.dclval, self.dcllim)
self.currentDinostein.setAndStartTween()
self.startTween()
self.end = True
self.game.setLastDinoDown(endDinoImage, endDinoCenter)
self.game.setIsEnding()
def win(self):
soundFx56.SoundPlayer.requestSound("win")
snax = BonusDelegate.snax
if self.profileName != "_user":
### case for tuts
if self.curPuzzle[:3] == "tut":
self.game.wonTutorialStage = True # tell game instance so level56 can access for returning next stage
if self.curPuzzle == LAST_TUTORIAL:
dataStorage56.modProfile(self.profileName, "tutorial", True)
puzzleData = "TUTORIAL"
else:
dataStorage56.modProfile(self.profileName, self.curPuzzle, True, snax) # modify file & and add snack to archive
puzzleData = dataStorage56.getPuzzleData(self.profileName, self.curPuzzle)
if self.curPuzzle == "gateway":
self.game.wonLastStage = True # tell game instance so level56 can access for returning flag for ending scene
else:
puzzleData = "_user"
dataStorage56.logUserMapsComplete(self.curPuzzle)
if snax:
for s in snax:
s.unregister()
line1, line2, line3 = self.generateMessage(True, puzzleData, snax)
if self.curPuzzle[:3] == "tut" and self.profileName != "_user":
# self.retryButton = EndMessageButton((-170,30), self.IMG("BTTN_0"), self.IMG("BTTN_1"), "next", SIZE_BTEXT, COL_BTEXT, "NEXT")
self.retryButton = EndMessageButton(
(0, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
">",
SIZE_BTEXT,
COL_BTEXT,
"NEXT"
)
line1 = ""; line2 = "Good work, lets move on"; line3 = ""
if self.curPuzzle == LAST_TUTORIAL:
line1 = "Alright, that's it for the training,"
line2 = "you're ready for the real puzzles!"
elif self.curPuzzle == "gateway" and self.profileName != "_user":
self.exitButton = EndMessageButton(
(0, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"!",
SIZE_BTEXT,
COL_BTEXT,
"EXIT"
)
line1 = "i'm so happy i could cry..."
line2 = "excellent work, you got "
line3 = "all dinos to a station!"
else:
self.retryButton = EndMessageButton(
(-self.rect.width/4 + BTTN_MIDJUST, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"Retry (any key)",
SIZE_BTEXT,
COL_BTEXT,
"QR"
)
self.exitButton = EndMessageButton(
(self.rect.width/4 - BTTN_MIDJUST, self.rect.height/2 - self.IMG("BTTN_0").get_height()/2 - BTTN_YBUF),
self.IMG("BTTN_0"),
self.IMG("BTTN_1"),
"choose another puzzle",
SIZE_BTEXT,
COL_BTEXT,
"EXIT"
)
# self.retryButton = EndMessageButton((-170,30), self.IMG("BTTN_0"), self.IMG("BTTN_1"), "Quick Reset", SIZE_BTEXT, COL_BTEXT, "QR")
# self.exitButton = EndMessageButton((170,30), self.IMG("BTTN_0"), self.IMG("BTTN_1"), "Leave Area", SIZE_BTEXT, COL_BTEXT, "EXIT")
self.text1 = EndMessageText((0, MSG_TEXT_YREL1), line1, SIZE_MTEXT, COL_MTEXT)
self.text2 = EndMessageText((0, MSG_TEXT_YREL2), line2, SIZE_MTEXT, COL_MTEXT)
self.text3 = EndMessageText((0, MSG_TEXT_YREL3), line3, SIZE_MTEXT, COL_MTEXT)
cursor = EndMessageCursor([self.IMG("CURSORSTD")], True)
# dinostein ***
self.currentDinostein = dinostein.Dinostein(self._fps)
self.currentSteinFrame = dinostein.Frame(self._fps)
self.currentDinostein.addFollower(self.currentSteinFrame)
# *************
if self.curPuzzle[:3] == "tut" and self.profileName != "_user":
self.bind([self.retryButton, self.text1, self.text2, self.text3])
elif self.curPuzzle == "gateway" and self.profileName != "_user":
self.bind([self.exitButton, self.text1, self.text2, self.text3])
else:
self.bind([self.retryButton, self.exitButton, self.text1, self.text2, self.text3])
self.endMessageGroup.add(self.currentSteinFrame)
self.endMessageGroup.add(self.currentDinostein)
self.endMessageGroup.add(cursor)
self.setTween(MSG_TWEENSTART, MSG_TWEENEND, self.speed, MSG_TWEENMODE, self.dclval, self.dcllim)
self.currentDinostein.setAndStartTween()
self.startTween()
self.end = True
self.game.setIsEnding()
def blitMinions(self):
topleft = self.rect.topleft
bbtopleft = self.retryButton.rect.topleft
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
fbtopleft = self.exitButton.rect.topleft
m1topleft = self.text1.rect.topleft
m2topleft = self.text2.rect.topleft
m3topleft = self.text3.rect.topleft
bbBlitX = bbtopleft[0] - topleft[0]
bbBlitY = bbtopleft[1] - topleft[1]
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
fbBlitX = fbtopleft[0] - topleft[0]
fbBlitY = fbtopleft[1] - topleft[1]
m1BlitX = m1topleft[0] - topleft[0]
m1BlitY = m1topleft[1] - topleft[1]
m2BlitX = m2topleft[0] - topleft[0]
m2BlitY = m2topleft[1] - topleft[1]
m3BlitX = m3topleft[0] - topleft[0]
m3BlitY = m3topleft[1] - topleft[1]
self.spiralSnap = self.original.copy()
self.spiralSnap.blit(self.retryButton.image, (bbBlitX, bbBlitY))
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
self.spiralSnap.blit(self.exitButton.image, (fbBlitX, fbBlitY))
self.spiralSnap.blit(self.text1.image, (m1BlitX, m1BlitY))
self.spiralSnap.blit(self.text2.image, (m2BlitX, m2BlitY))
self.spiralSnap.blit(self.text3.image, (m3BlitX, m3BlitY))
self.hideRealMinions()
def hideRealMinions(self):
self.retryButton.rect.center = (2000,2000)
if self.curPuzzle[:3] != "tut" or self.profileName == "_user":
self.exitButton.rect.center = (2000,2000)
self.text1.rect.center = (2000,2000)
self.text2.rect.center = (2000,2000)
self.text3.rect.center = (2000,2000)
@staticmethod
def setIsGoingOut(isGoingOut):
if isGoingOut and not EndMessage.me.isGoingOut:
soundFx56.SoundPlayer.requestSound("woosh_a")
EndMessage.me.isGoingOutFrameCount = 0
EndMessage.me.isGoingOut = isGoingOut
EndMessage.me.blitMinions()
# dinostein
if EndMessage.me.currentDinostein:
EndMessage.me.currentDinostein.kill()
EndMessage.me.currentSteinFrame.closing = isGoingOut
@staticmethod
def quickReset():
if EndMessage.me.currentDinostein:
EndMessage.me.currentDinostein.kill()
if EndMessage.me.currentSteinFrame:
EndMessage.me.currentSteinFrame.kill()
if EndMessageCursor.me:
EndMessageCursor.me.kill()
EndMessageCursor.wipe()
EndMessage.me.reset()
EndMessage.me.image = EndMessage.me.original
EndMessage.me.rect = EndMessage.me.image.get_rect()
EndMessage.me.rect.center = OFFSCREEN
EndMessage.me.end = False
EndMessage.me.isGoingOut = False
EndMessage.me.isGoingOutFrameCount = 0
EndMessage.me.terminate = False
class EndMessageText(tween.TweenFollower):
def __init__(self, relPos, text, fontSize, fontColor):
tween.TweenFollower.__init__(self, relPos)
self.image = infoGraphic56.TextObject(text, fontSize, fontColor).image
self.rect = self.image.get_rect()
def updateEnding(self):
pass
class EndMessageButton(tween.TweenFollower):
"""
Button that belongs to EndMessage
- is a TweenFollower *** don't override moveFollower()
- returns dest when clicked
- dest can be restart, quick restart, continue (study solution), back (select screen)
"""
def __init__(self, relPos, imageOff, imageOver, text, textSize, textColor, dest):
tween.TweenFollower.__init__(self, relPos)
self.imageOff = self.makeButton(imageOff, text, textSize, textColor)
self.imageOver = self.makeButton(imageOver, text, textSize, textColor)
self.image = self.imageOff
self.rect = self.image.get_rect()
self.dest = dest
self.mouseOver = False
def update(self):
tween.TweenFollower.update(self)
self.checkCursorOver()
def updateEnding(self):
pass
def makeButton(self, image, text, textSize, textColor):
textSurf = infoGraphic56.TextObject(text, textSize, textColor).image
xBlit = (image.get_width() - textSurf.get_width())/2
yBlit = (image.get_height() - textSurf.get_height())/2
image.blit(textSurf, (xBlit, yBlit))
return image
def checkCursorOver(self):
""" if cursor over button set respective image and mouseOver """
if pygame.sprite.collide_rect(self, EndMessageCursor.me):
self.image = self.imageOver
self.mouseOver = True
else:
self.image = self.imageOff
self.mouseOver = False
def requestDest(self):
""" returns dest if mouseOver """
dest = None
if self.mouseOver:
dest = self.dest
return dest
class EndMessageCursor(pygame.sprite.Sprite):
""" cursor used during end message """
me = None
def __init__(self, frames, withTrail=False):
pygame.sprite.Sprite.__init__(self)
if len(frames) > 1:
self.hasFrames = True
self.setFrames(frames)
self.currentFrame = 0
else:
self.hasFrames = False
self.image = frames[0]
self.currentFrame = None
self.rect = pygame.rect.Rect((0,0,1,1))
self.rect.center = (-2000,2000)
self.isActive = False
self.firstCycle = True
# create sparkle trail #
########################
self.withTrail = withTrail
self.trail = None
if self.withTrail:
self.image.set_alpha(0, pygame.RLEACCEL)
self.trail = sparkleTrail.SparkleTrail(SPARK_SIZE, SPARK_COLOR, SPARK_BOUNDS, SPARK_FREQUENCY, SPARK_FADESPEED, self)
########################
########################
EndMessageCursor.me = self
def update(self):
if self.isActive:
if not self.firstCycle:
self.rect.center = pygame.mouse.get_pos()
if self.hasFrames:
self.stepFrames()
else:
self.firstCycle = False
def updateEnding(self):
pass
def getTrailGroup(self):
return self.trail.myGroup
def stepFrames():
self.image = self.frames[self.currentFrame]
self.currentFrame += 1
if self.currentFrame >= len(self.frames):
self.currentFrame = 0
def setFrames(self, frames):
self.frames = frames
@staticmethod
def wipe():
EndMessageCursor.me = None
# quick fix: copied from gfx56 because of circular import
def spiralOut(spr, directionAsInt, rotateStep, scaleStep, terminateAfter, frameCount, minRatio, ORIGINAL):
""" update callback for a sprite to 'spiral out' of view in place using a set image
returns false if spiral hasn't terminated
- directionAsInt -> rotate direction: -1 for right, 1 for left
- rotateStep -> degrees to rotate every frame
- scaleStep -> degrees to scale every frame (takes positive or negative)
- terminateAfter -> returns image as None after this many frames
- frameCount -> expects an iterable count from calling environment: should inc by 1 ever call
- ORIGINAL -> should be a constant of the pre-rotated image
"""
terminate = True
if frameCount <= terminateAfter:
center = spr.rect.center
newImg = pygame.transform.scale(
ORIGINAL,
(ORIGINAL.get_width() + scaleStep*minRatio[0]*frameCount, ORIGINAL.get_height() + scaleStep*minRatio[1]*frameCount)
)
spr.image = pygame.transform.rotate(newImg, directionAsInt*rotateStep*frameCount)
spr.rect = spr.image.get_rect()
spr.rect.center = center
terminate = False
return terminate
def wipe():
EndMessage.wipe()
EndMessageCursor.wipe()
BonusDelegate.wipe()
|
mit
| -114,837,720,802,108,850
| 33.997207
| 143
| 0.559901
| false
| 3.505106
| false
| false
| false
|
DMSalesman/Nemris
|
nemris.py
|
1
|
6514
|
"""Core of the Nemris tool, APK extractor."""
import argparse
import os
# Custom modules necessary for Nemris to work
from modules import apkutils
from modules import configutils
from modules import dirutils
from modules import pkgutils
from modules import utils
##########
# Path of the configuratipn file and default configuration dict
config_path = os.path.dirname(os.path.abspath(__file__)) + "/nemris_config.pkl"
config = {
"aapt": "",
"nougat": None,
"dir": "",
"substratum": None,
"md5sums": []
}
##########
# Commandline args handling
ap = argparse.ArgumentParser(description = "APK file extractor.")
apps = ap.add_mutually_exclusive_group(required = True)
apps.add_argument("-u", "--user", action = "store_true", help = "user apps")
apps.add_argument("-s", "--system", action = "store_true", help = "system apps")
apps.add_argument("-d", "--disabled", action = "store_true", help = "disabled apps")
apps.add_argument("-a", "--all", action = "store_true", help = "any app")
ap.add_argument("-r", "--reset", action = "store_true", required = False, help = "reset Nemris' configuration")
ap.add_argument("--keep-overlays", action = "store_true", required = False, help = "extract Substratum overlays")
ap.add_argument("--keep-arcus", action = "store_true", required = False, help = "extract theme variants compiled with Arcus")
args = ap.parse_args()
##########
if not args.user:
if not args.all:
if args.keep_overlays or args.keep_arcus:
ap.error("one of the arguments -u/--user -a/--all is required when using --keep-overlays or --keep-arcus")
print("************************")
print(" NEMRIS - APK extractor ")
print(" 2017-09-25 ")
print(" by Death Mask Salesman ")
print("************************")
start_time = utils.get_current_time() # store current time for computing elapsed time
if args.reset:
print("[ I ] Resetting configuration...", end = " ", flush = True)
if configutils.reset_config(config_path):
print("done.\n")
else:
print("done.\n[ W ] The configuration was not present.\n")
else:
if configutils.check_config(config_path):
print("[ I ] Loading configuration...", end = " ", flush = True)
config = configutils.load_config(config_path)
print("done.\n")
# Checks for aapt and aopt (as fallback on Nougat)
if not config.get("aapt"):
print("[ I ] Checking if either aapt or aopt is present...", end = " ", flush = True)
aapt_aopt_exist = utils.check_aapt_aopt()
print("done.\n")
if aapt_aopt_exist[0]:
config["aapt"] = "/system/bin/aapt"
elif aapt_aopt_exist[1]:
config["aapt"] = "/system/bin/aopt"
elif aapt_aopt_exist[2]:
config["aapt"] = "/data/data/com.termux/files/usr/bin/aapt"
else:
print("[ F ] Neither aapt nor aopt is installed. Aborting.")
utils.save_exit(config, config_path, 1)
# Checks if the Android version is Nougat
if config.get("nougat") == None:
print("[ I ] Checking the Android version...", end = " ")
config["nougat"] = utils.check_nougat()
print("done.\n")
# Prompts user to set target dir
if not config.get("dir"):
config["dir"] = dirutils.ask_dir()
print()
(dir_exists, dir_has_apks) = dirutils.check_dir(config.get("dir"))
if not dir_exists:
print("[ I ] Creating \"{0}\"...".format(config.get("dir")), end = " ", flush = True)
dir_exists = dirutils.create_dir(config.get("dir"))
print("done.\n")
if not dir_exists:
print("[ F ] Unable to create \"{0}\". Aborting.".format(config.get("dir")))
utils.save_exit(config, config_path, 1)
# Creates a MD5 list to speed up subsequent executions
if not config.get("md5sums"):
if dir_has_apks:
print("[ I ] Generating MD5 checksums...", end = " ", flush = True)
config["md5sums"] = dirutils.calculate_md5(config.get("dir"))
print("done.\n")
# Creates an optimized APK/path dictionary to avoid the sluggish "pm path"
print("[ I ] Creating paths dictionary...", end = " ", flush = True)
pkgdict = pkgutils.create_pkgdict()
print("done.\n")
if not pkgdict:
print("[ F ] Unable to create paths dictionary. Aborting.")
utils.save_exit(config, config_path, 1)
if config.get("nougat") == True:
pkgs = pkgutils.list_installed_pkgs_nougat(args)
if not pkgs: config["nougat"] == False
if config.get("nougat") == False:
pkgs = pkgutils.list_installed_pkgs(args)
if not args.keep_overlays:
if config.get("substratum") == None:
config["substratum"] = pkgutils.check_substratum(config.get("nougat"))
if config.get("substratum"):
print("[ I ] Excluding Substratum overlays...", end = " ", flush = True)
pkgutils.exclude_overlays(config.get("aapt"), pkgdict, pkgs)
print("done.\n")
if not args.keep_arcus and not config.get("substratum"):
print("[ I ] Excluding Arcus theme variants...", end = " ", flush = True)
pkgutils.exclude_arcus_variants(pkgs)
print("done.\n")
# Extract APKs to the target directory and append MD5 checksums to MD5 list
print("[ I ] Extracting previously unextracted packages...", end = " ", flush = True)
n_extracted = 0
n_ignored = 0
extracted = []
for i in pkgs:
pkgpath = pkgdict.get(i)
(already_extracted, pkgsum) = pkgutils.check_already_extracted(pkgpath, config.get("md5sums"))
if already_extracted:
n_ignored += 1
else:
(out, err) = apkutils.get_pkginfo(config.get("aapt"), pkgpath)
pkginfo = out.decode("utf-8")
pkgname = apkutils.get_pkgname(pkginfo)
pkgver = apkutils.get_pkgver(pkginfo)
dest = "{0}/{1}_{2}.apk".format(config.get("dir"), pkgname, pkgver)
dirutils.extract(pkgpath, dest)
config["md5sums"].append(pkgsum)
extracted.append(pkgname)
n_extracted += 1
print("done.")
elapsed_time = utils.get_current_time() - start_time
extracted.sort()
print("\n[ I ] Operations completed in {0:.0f} hours, {1:.0f} minutes and {2:.0f} seconds.".format(elapsed_time / 60 / 60, elapsed_time / 60 % 60, elapsed_time % 60))
if extracted:
print("\n[ I ] Extracted packages:")
for i in extracted:
print(" - {0}".format(i))
print("\n[ I ] Extracted: {0} | Ignored: {1}".format(n_extracted, n_ignored))
print("[ I ] Goodbye!")
utils.save_exit(config, config_path, 0)
|
unlicense
| -1,698,508,673,163,878,700
| 30.317308
| 166
| 0.618514
| false
| 3.335381
| true
| false
| false
|
Chiheb-Nexus/Calculatrice-PyGtk
|
pango_fonts.py
|
1
|
3186
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Calculatrice PyGtk
#
# Copyright 2014 Chiheb Nexus
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
################################################################################
from gi.repository import Gtk,Pango
class PyApp(Gtk.Window):
def __init__(self,widget):
Gtk.Window.__init__(self,title="Choisir un font")
self.set_resizable(False) # Fenêtre à taille fixe
#self.set_size_request(350, 250)
self.set_border_width(8)
self.set_icon_from_file("images/icon.png")
context = self.create_pango_context()
self.fam = context.list_families()
self.combo = Gtk.ComboBoxText() # Un ComboBoxText qui contient les fonts
self.size = Gtk.ComboBoxText() # UnComboBoxText qui contient les tailles
label_font = Gtk.Label("Veuillez choisir un font")
label_size = Gtk.Label("Veuillez choisir la taille")
label_default = Gtk.Label("Font par défaut : Ubuntu | Taille par défaut : 17")
for ff in self.fam:
self.combo.append_text(ff.get_name()) # Générer les fonts et les ajouter au ComboBoxText
for ss in range(31) :
self.size.append_text(str(ss)) # Générer les tailles de 1 à 30 et les ajouter au ComboBoxText
button = Gtk.Button("Valider")
button2 = Gtk.Button("Annuler")
button2.connect("clicked",self.annuler)
button.connect("clicked",self.get_status)
vbox = Gtk.VBox()
hbox = Gtk.HBox()
vbox.pack_start(label_font,False,False,0)
vbox.pack_start(self.combo,False,False,0)
vbox.pack_start(label_size,False,False,0)
vbox.pack_start(self.size,False,False,0)
vbox.pack_start(label_default,False,False,0)
hbox.pack_start(button2,True,True,0)
hbox.pack_start(button,True,True,0)
vbox.pack_end(hbox,True,False,0)
self.add(vbox)
self.set_position(Gtk.WindowPosition.CENTER)
self.show_all()
def get_status(self,widget) :
"Font et taille choisies"
#PyApp.font et PyApp.taille deux variables
# qui peuvent être utilisés par les autres classes
PyApp.font = self.combo.get_active_text()
PyApp.taille = self.size.get_active_text()
self.destroy() # Détruire la fenêtre
def annuler(self,widget) :
"Annuler la saisie du font et de la taille"
self.destroy()
|
gpl-3.0
| 2,591,903,167,614,648,300
| 35.895349
| 106
| 0.624645
| false
| 3.490649
| false
| false
| false
|
pavel-paulau/perfrunner
|
perfrunner/tests/dcp.py
|
1
|
1670
|
from perfrunner.helpers import local
from perfrunner.helpers.cbmonitor import timeit, with_stats
from perfrunner.tests import PerfTest
class DCPThroughputTest(PerfTest):
def _report_kpi(self, time_elapsed: float):
self.reporter.post(
*self.metrics.dcp_throughput(time_elapsed)
)
@with_stats
@timeit
def access(self, *args):
username, password = self.cluster_spec.rest_credentials
for target in self.target_iterator:
local.run_dcptest(
host=target.node,
username=username,
password=password,
bucket=target.bucket,
num_items=self.test_config.load_settings.items,
num_connections=self.test_config.dcp_settings.num_connections
)
def run(self):
self.load()
self.wait_for_persistence()
time_elapsed = self.access()
self.report_kpi(time_elapsed)
class JavaDCPThroughputTest(DCPThroughputTest):
def init_java_dcp_client(self):
local.clone_git_repo(repo=self.test_config.java_dcp_settings.repo,
branch=self.test_config.java_dcp_settings.branch)
local.build_java_dcp_client()
@with_stats
@timeit
def access(self, *args):
for target in self.target_iterator:
local.run_java_dcp_client(
connection_string=target.connection_string,
messages=self.test_config.load_settings.items,
config_file=self.test_config.java_dcp_settings.config,
)
def run(self):
self.init_java_dcp_client()
super().run()
|
apache-2.0
| -4,941,375,111,631,714,000
| 28.298246
| 78
| 0.610778
| false
| 3.830275
| true
| false
| false
|
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/suit/DistributedSuitBaseAI.py
|
1
|
4836
|
from otp.ai.AIBaseGlobal import *
from otp.avatar import DistributedAvatarAI
import SuitPlannerBase
import SuitBase
import SuitDNA
from direct.directnotify import DirectNotifyGlobal
from toontown.battle import SuitBattleGlobals
class DistributedSuitBaseAI(DistributedAvatarAI.DistributedAvatarAI, SuitBase.SuitBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSuitBaseAI')
def __init__(self, air, suitPlanner):
DistributedAvatarAI.DistributedAvatarAI.__init__(self, air)
SuitBase.SuitBase.__init__(self)
self.sp = suitPlanner
self.maxHP = 10
self.currHP = 10
self.zoneId = 0
self.dna = SuitDNA.SuitDNA()
self.virtual = 0
self.skeleRevives = 0
self.maxSkeleRevives = 0
self.reviveFlag = 0
self.buildingHeight = None
return
def generate(self):
DistributedAvatarAI.DistributedAvatarAI.generate(self)
def delete(self):
self.sp = None
del self.dna
DistributedAvatarAI.DistributedAvatarAI.delete(self)
return
def requestRemoval(self):
if self.sp != None:
self.sp.removeSuit(self)
else:
self.requestDelete()
return
def setLevel(self, lvl = None):
attributes = SuitBattleGlobals.SuitAttributes[self.dna.name]
if lvl:
self.level = lvl - attributes['level'] - 1
else:
self.level = SuitBattleGlobals.pickFromFreqList(attributes['freq'])
self.notify.debug('Assigning level ' + str(lvl))
if hasattr(self, 'doId'):
self.d_setLevelDist(self.level)
hp = attributes['hp'][self.level]
self.maxHP = hp
self.currHP = hp
def getLevelDist(self):
return self.getLevel()
def d_setLevelDist(self, level):
self.sendUpdate('setLevelDist', [level])
def setupSuitDNA(self, level, type, track):
dna = SuitDNA.SuitDNA()
dna.newSuitRandom(type, track)
self.dna = dna
self.track = track
self.setLevel(level)
return None
def getDNAString(self):
if self.dna:
return self.dna.makeNetString()
else:
self.notify.debug('No dna has been created for suit %d!' % self.getDoId())
return ''
def b_setBrushOff(self, index):
self.setBrushOff(index)
self.d_setBrushOff(index)
return None
def d_setBrushOff(self, index):
self.sendUpdate('setBrushOff', [index])
def setBrushOff(self, index):
pass
def d_denyBattle(self, toonId):
self.sendUpdateToAvatarId(toonId, 'denyBattle', [])
def b_setSkeleRevives(self, num):
if num == None:
num = 0
self.setSkeleRevives(num)
self.d_setSkeleRevives(self.getSkeleRevives())
return
def d_setSkeleRevives(self, num):
self.sendUpdate('setSkeleRevives', [num])
def getSkeleRevives(self):
return self.skeleRevives
def setSkeleRevives(self, num):
if num == None:
num = 0
self.skeleRevives = num
if num > self.maxSkeleRevives:
self.maxSkeleRevives = num
return
def getMaxSkeleRevives(self):
return self.maxSkeleRevives
def useSkeleRevive(self):
self.skeleRevives -= 1
self.currHP = self.maxHP
self.reviveFlag = 1
def reviveCheckAndClear(self):
returnValue = 0
if self.reviveFlag == 1:
returnValue = 1
self.reviveFlag = 0
return returnValue
def getHP(self):
return self.currHP
def setHP(self, hp):
if hp > self.maxHP:
self.currHP = self.maxHP
else:
self.currHP = hp
return None
def b_setHP(self, hp):
self.setHP(hp)
self.d_setHP(hp)
def d_setHP(self, hp):
self.sendUpdate('setHP', [hp])
def releaseControl(self):
return None
def getDeathEvent(self):
return 'cogDead-%s' % self.doId
def resume(self):
self.notify.debug('resume, hp=%s' % self.currHP)
if self.currHP <= 0:
messenger.send(self.getDeathEvent())
self.requestRemoval()
return None
def prepareToJoinBattle(self):
pass
def b_setSkelecog(self, flag):
self.setSkelecog(flag)
self.d_setSkelecog(flag)
def setSkelecog(self, flag):
SuitBase.SuitBase.setSkelecog(self, flag)
def d_setSkelecog(self, flag):
self.sendUpdate('setSkelecog', [flag])
def isForeman(self):
return 0
def isSupervisor(self):
return 0
def setVirtual(self, virtual):
pass
def getVirtual(self):
return 0
def isVirtual(self):
return self.getVirtual()
|
mit
| -5,873,388,987,407,674,000
| 25.431694
| 88
| 0.605873
| false
| 3.519651
| false
| false
| false
|
JMoravec/unkRadnet
|
fitToCurve/pyeq2/Models_3D/Trigonometric.py
|
1
|
38712
|
# pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2012 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: zunzun@zunzun.com
# web: http://zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
# Version info: $Id: Trigonometric.py 1 2012-01-07 22:20:43Z zunzun.com@gmail.com $
import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy
numpy.seterr(over = 'raise', divide = 'raise', invalid = 'raise', under = 'ignore') # numpy raises warnings, convert to exceptions to trap them
import pyeq2.Model_3D_BaseClass
class CoshA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh A [radians]"
_HTML = 'z = a * cosh(x) + b * cosh(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.CoshX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.CoshY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
CoshX = inDataCacheDictionary['CoshX'] # only need to perform this dictionary look-up once
CoshY = inDataCacheDictionary['CoshY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * CoshX + b * CoshY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(x_in) + b * cosh(y_in);\n"
return s
class CoshA_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh A [radians] Transform"
_HTML = 'z = a * cosh(bx+c) + d * cosh(fy+g)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.cosh(b * x_in + c) + d * numpy.cosh(f * y_in + g)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(b * x_in + c) + d * cosh(f * y_in + g);\n"
return s
class CoshB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh B [radians]"
_HTML = 'z = a * cosh(x) * cosh(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.CoshXCoshY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
CoshXCoshY = inDataCacheDictionary['CoshXCoshY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * CoshXCoshY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(x_in) * cosh(y_in);\n"
return s
class CoshB_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh B [radians] Transform"
_HTML = 'z = a * cosh(bx+c) * cosh(dy+f)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * numpy.cosh(b * x_in + c) * numpy.cosh(d * y_in + f)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(b * x_in + c) * cosh(f * y_in + g);\n"
return s
class CoshXY(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh XY [radians]"
_HTML = 'z = a * cosh(xy)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.CoshXY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
CoshXY = inDataCacheDictionary['CoshXY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * CoshXY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(x_in * y_in);\n"
return s
class CoshXYTransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Cosh XY [radians] Transform"
_HTML = 'z = a * cosh(b * xy + c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XY = inDataCacheDictionary['XY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.cosh(b * XY + c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * cosh(b * x_in * y_in + c);\n"
return s
class RezaCustomOne(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Reza's Custom Equation One [radians]"
_HTML = 'z = (cos(a*x - b*y) + sin(c*x - d*y))<sup>n</sup> - (cos(f*x - g*y) + sin(h*x- i*y))<sup>n</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g', 'h', 'i', 'n']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
h = inCoeffs[6]
i = inCoeffs[7]
n = inCoeffs[8]
try:
temp = numpy.power(numpy.cos(a*x_in - b*y_in) + numpy.sin(c*x_in - d*y_in), n)
temp -= numpy.power(numpy.cos(f*x_in - g*y_in) + numpy.sin(h*x_in - i*y_in), n)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(cos(a*x_in - b*y_in) + sin(c*x_in - d*y_in), n);\n"
s += "\ttemp -= pow(cos(f*x_in - g*y_in) + sin(h*x_in - i*y_in), n);\n"
return s
class RezaCustomTwo(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Reza's Custom Equation Two [radians]"
_HTML = 'z = abs(cos((A*(x+B)) + C*(y+D))) + abs(cos((A*(x+B)) - C*(y+D))) - (sin(E*x+F))<sup>2</sup> - (sin(E*y+G))<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
A = inCoeffs[0]
B = inCoeffs[1]
C = inCoeffs[2]
D = inCoeffs[3]
E = inCoeffs[4]
F = inCoeffs[5]
G = inCoeffs[6]
try:
temp = abs(numpy.cos((A*(x_in+B)) + C*(y_in+D))) + abs(numpy.cos((A*(x_in+B)) - C*(y_in+D))) - numpy.power(numpy.sin(E*x_in+F), 2.0) - numpy.power(numpy.sin(E*y_in+G), 2.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = abs(cos((A*(x_in+B)) + C*(y_in+D))) + abs(cos((A*(x_in+B)) - C*(y_in+D))) - pow(sin(E*x_in+F), 2.0) - pow(sin(E*y_in+G), 2.0);\n"
return s
class SineA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine A [radians]"
_HTML = 'z = a * sin(x) + b * sin(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.SinX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.SinY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
SinX = inDataCacheDictionary['SinX'] # only need to perform this dictionary look-up once
SinY = inDataCacheDictionary['SinY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * SinX + b * SinY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(x_in) + b * sin(y_in);\n"
return s
class SineA_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine A [radians] Transform"
_HTML = 'z = a * sin(bx+c) + d * sin(fy+g)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.sin(b * x_in + c) + d * numpy.sin(f * y_in + g)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(b * x_in + c) + d * sin(f * y_in + g);\n"
return s
class SineB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine B [radians]"
_HTML = 'z = a * sin(x) * sin(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.SinXSinY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
SinXSinY = inDataCacheDictionary['SinXSinY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * SinXSinY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(x_in) * sin(y_in);\n"
return s
class SineB_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine B [radians] Transform"
_HTML = 'z = a * sin(bx+c) * sin(dy+f)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * numpy.sin(b * x_in + c) * numpy.sin(d * y_in + f)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(b * x_in + c) * sin(d * y_in + f);\n"
return s
class SineXY(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine XY [radians]"
_HTML = 'z = a * sin(xy)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.SinXY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
SinXY = inDataCacheDictionary['SinXY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * SinXY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(x_in * y_in);\n"
return s
class SineXYTransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Sine XY [radians] Transform"
_HTML = 'z = a * sin(b * xy + c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XY = inDataCacheDictionary['XY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.sin(b * XY + c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * sin(b * x_in * y_in + c);\n"
return s
class TanA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan A [radians]"
_HTML = 'z = a * tan(x) + b * tan(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.TanX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.TanY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
TanX = inDataCacheDictionary['TanX'] # only need to perform this dictionary look-up once
TanY = inDataCacheDictionary['TanY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * TanX + b * TanY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(x_in) + b * tan(y_in);\n"
return s
class TanATransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan A [radians] Transform"
_HTML = 'z = a * tan(bx + c) + d * tan(fy + g)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.tan(b * x_in + c) + d * numpy.tan(f * y_in + g)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(b * x_in + c) + d * tan(f * y_in + g);\n"
return s
class TanB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan B [radians]"
_HTML = 'z = a * tan(x) * tan(y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.TanXTanY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
TanXTanY = inDataCacheDictionary['TanXTanY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * TanXTanY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(x_in) * tan(y_in);\n"
return s
class TanBTransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan B [radians] Transform"
_HTML = 'z = a * tan(bx + c) * tan(dy + f)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * numpy.tan(b * x_in + c) * numpy.tan(d * y_in + f)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(b * x_in + c) * tan(d * y_in + f);\n"
return s
class TanXY(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan XY [radians]"
_HTML = 'z = a * tan(xy)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = True
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.TanXY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
TanXY = inDataCacheDictionary['TanXY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * TanXY
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(x_in * y_in);\n"
return s
class TanXYTransform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Tan XY [radians] Transform"
_HTML = 'z = a * tan(b * xy + c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = False
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XY = inDataCacheDictionary['XY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.tan(b * XY + c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * tan(b * x_in * y_in + c);\n"
return s
|
bsd-3-clause
| -6,684,742,983,431,546,000
| 35.624409
| 184
| 0.692524
| false
| 3.622684
| false
| false
| false
|
lukaszb/django-guardian
|
example_project/articles/models.py
|
2
|
1851
|
from django.db import models
from django.urls import reverse
from guardian.models import GroupObjectPermissionBase, UserObjectPermissionBase
class Article(models.Model):
title = models.CharField('title', max_length=64)
slug = models.SlugField(max_length=64)
content = models.TextField('content')
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
default_permissions = ('add', 'change', 'delete')
permissions = (
('view_article', 'Can view article'),
)
get_latest_by = 'created_at'
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('articles:details', kwargs={'slug': self.slug})
class ArticleUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(Article, on_delete=models.CASCADE)
class ArticleGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(Article, on_delete=models.CASCADE)
from guardian.models import UserObjectPermissionAbstract, GroupObjectPermissionAbstract
class BigUserObjectPermission(UserObjectPermissionAbstract):
id = models.BigAutoField(editable=False, unique=True, primary_key=True)
class Meta(UserObjectPermissionAbstract.Meta):
abstract = False
indexes = [
*UserObjectPermissionAbstract.Meta.indexes,
models.Index(fields=['content_type', 'object_pk', 'user']),
]
class BigGroupObjectPermission(GroupObjectPermissionAbstract):
id = models.BigAutoField(editable=False, unique=True, primary_key=True)
class Meta(GroupObjectPermissionAbstract.Meta):
abstract = False
indexes = [
*GroupObjectPermissionAbstract.Meta.indexes,
models.Index(fields=['content_type', 'object_pk', 'group']),
]
|
bsd-2-clause
| -4,053,829,553,794,031,000
| 32.053571
| 87
| 0.705024
| false
| 4.226027
| false
| false
| false
|
nitely/Spirit
|
spirit/category/migrations/0001_initial.py
|
1
|
1333
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
import spirit.core.utils.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('title', models.CharField(verbose_name='title', max_length=75)),
('slug', spirit.core.utils.models.AutoSlugField(db_index=False, populate_from='title', blank=True)),
('description', models.CharField(verbose_name='description', max_length=255, blank=True)),
('is_closed', models.BooleanField(verbose_name='closed', default=False)),
('is_removed', models.BooleanField(verbose_name='removed', default=False)),
('is_private', models.BooleanField(verbose_name='private', default=False)),
('parent', models.ForeignKey(null=True, verbose_name='category parent', to='spirit_category.Category', blank=True, on_delete=models.CASCADE)),
],
options={
'ordering': ['title', 'pk'],
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
),
]
|
mit
| -5,217,428,606,251,673,000
| 43.433333
| 158
| 0.582146
| false
| 4.34202
| false
| false
| false
|
ganeti-github-testing/ganeti-test-1
|
lib/rpc_defs.py
|
1
|
31147
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""RPC definitions for communication between master and node daemons.
RPC definition fields:
- Name as string
- L{SINGLE} for single-node calls, L{MULTI} for multi-node
- Name resolver option(s), can be callable receiving all arguments in a tuple
- Timeout (e.g. L{constants.RPC_TMO_NORMAL}), or callback receiving all
arguments in a tuple to calculate timeout
- List of arguments as tuples
- Name as string
- Argument kind used for encoding/decoding
- Description for docstring (can be C{None})
- Custom body encoder (e.g. for preparing per-node bodies)
- Return value wrapper (e.g. for deserializing into L{objects}-based objects)
- Short call description for docstring
"""
from ganeti import constants
from ganeti import utils
from ganeti import objects
# Guidelines for choosing timeouts:
# - call used during watcher: timeout of 1min, constants.RPC_TMO_URGENT
# - trivial (but be sure it is trivial)
# (e.g. reading a file): 5min, constants.RPC_TMO_FAST
# - other calls: 15 min, constants.RPC_TMO_NORMAL
# - special calls (instance add, etc.):
# either constants.RPC_TMO_SLOW (1h) or huge timeouts
SINGLE = "single-node"
MULTI = "multi-node"
ACCEPT_OFFLINE_NODE = object()
# Constants for encoding/decoding
(ED_OBJECT_DICT,
ED_OBJECT_DICT_LIST,
ED_INST_DICT,
ED_INST_DICT_HVP_BEP_DP,
ED_NODE_TO_DISK_DICT_DP,
ED_INST_DICT_OSP_DP,
ED_IMPEXP_IO,
ED_FILE_DETAILS,
ED_FINALIZE_EXPORT_DISKS,
ED_COMPRESS,
ED_BLOCKDEV_RENAME,
ED_DISKS_DICT_DP,
ED_MULTI_DISKS_DICT_DP,
ED_SINGLE_DISK_DICT_DP,
ED_NIC_DICT,
ED_DEVICE_DICT) = range(1, 17)
def _Prepare(calls):
"""Converts list of calls to dictionary.
"""
return utils.SequenceToDict(calls)
def _MigrationStatusPostProc(result):
"""Post-processor for L{rpc.node.RpcRunner.call_instance_get_migration_status}
"""
if not result.fail_msg and result.payload is not None:
result.payload = objects.MigrationStatus.FromDict(result.payload)
return result
def _BlockdevFindPostProc(result):
"""Post-processor for L{rpc.node.RpcRunner.call_blockdev_find}.
"""
if not result.fail_msg and result.payload is not None:
result.payload = objects.BlockDevStatus.FromDict(result.payload)
return result
def _BlockdevGetMirrorStatusPostProc(result):
"""Post-processor for call_blockdev_getmirrorstatus.
"""
if not result.fail_msg:
result.payload = map(objects.BlockDevStatus.FromDict, result.payload)
return result
def _BlockdevGetMirrorStatusMultiPreProc(node, args):
"""Prepares the appropriate node values for blockdev_getmirrorstatus_multi.
"""
# there should be only one argument to this RPC, already holding a
# node->disks dictionary, we just need to extract the value for the
# current node
assert len(args) == 1
return [args[0][node]]
def _BlockdevGetMirrorStatusMultiPostProc(result):
"""Post-processor for call_blockdev_getmirrorstatus_multi.
"""
if not result.fail_msg:
for idx, (success, status) in enumerate(result.payload):
if success:
result.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
return result
def _NodeInfoPreProc(node, args):
"""Prepare the storage_units argument for node_info calls."""
assert len(args) == 2
# The storage_units argument is either a dictionary with one value for each
# node, or a fixed value to be used for all the nodes
if type(args[0]) is dict:
return [args[0][node], args[1]]
else:
return args
def _ImpExpStatusPostProc(result):
"""Post-processor for import/export status.
@rtype: Payload containing list of L{objects.ImportExportStatus} instances
@return: Returns a list of the state of each named import/export or None if
a status couldn't be retrieved
"""
if not result.fail_msg:
decoded = []
for i in result.payload:
if i is None:
decoded.append(None)
continue
decoded.append(objects.ImportExportStatus.FromDict(i))
result.payload = decoded
return result
def _TestDelayTimeout((duration, )):
"""Calculate timeout for "test_delay" RPC.
"""
return int(duration + 5)
_FILE_STORAGE_CALLS = [
("file_storage_dir_create", SINGLE, None, constants.RPC_TMO_FAST, [
("file_storage_dir", None, "File storage directory"),
], None, None, "Create the given file storage directory"),
("file_storage_dir_remove", SINGLE, None, constants.RPC_TMO_FAST, [
("file_storage_dir", None, "File storage directory"),
], None, None, "Remove the given file storage directory"),
("file_storage_dir_rename", SINGLE, None, constants.RPC_TMO_FAST, [
("old_file_storage_dir", None, "Old name"),
("new_file_storage_dir", None, "New name"),
], None, None, "Rename file storage directory"),
]
_STORAGE_CALLS = [
("storage_list", MULTI, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("fields", None, None),
], None, None, "Get list of storage units"),
("storage_modify", SINGLE, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("changes", None, None),
], None, None, "Modify a storage unit"),
("storage_execute", SINGLE, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("op", None, None),
], None, None, "Executes an operation on a storage unit"),
]
_INSTANCE_CALLS = [
("instance_info", SINGLE, None, constants.RPC_TMO_URGENT, [
("instance", None, "Instance name"),
("hname", None, "Hypervisor type"),
("hvparams", None, "Hypervisor parameters"),
], None, None, "Returns information about a single instance"),
("all_instances_info", MULTI, None, constants.RPC_TMO_URGENT, [
("hypervisor_list", None, "Hypervisors to query for instances"),
("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
], None, None,
"Returns information about all instances on the given nodes"),
("instance_list", MULTI, None, constants.RPC_TMO_URGENT, [
("hypervisor_list", None, "Hypervisors to query for instances"),
("hvparams", None, "Hvparams of all hypervisors"),
], None, None, "Returns the list of running instances on the given nodes"),
("instance_reboot", SINGLE, None, constants.RPC_TMO_NORMAL, [
("inst", ED_INST_DICT, "Instance object"),
("reboot_type", None, None),
("shutdown_timeout", None, None),
("reason", None, "The reason for the reboot"),
], None, None, "Returns the list of running instances on the given nodes"),
("instance_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("timeout", None, None),
("reason", None, "The reason for the shutdown"),
], None, None, "Stops an instance"),
("instance_balloon_memory", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("memory", None, None),
], None, None, "Modify the amount of an instance's runtime memory"),
("instance_run_rename", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
("old_name", None, None),
("debug", None, None),
], None, None, "Run the OS rename script for an instance"),
("instance_migratable", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None, "Checks whether the given instance can be migrated"),
("migration_info", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None,
"Gather the information necessary to prepare an instance migration"),
("accept_instance", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("info", None, "Result for the call_migration_info call"),
("target", None, "Target hostname (usually an IP address)"),
], None, None, "Prepare a node to accept an instance"),
("instance_finalize_migration_dst", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("info", None, "Result for the call_migration_info call"),
("success", None, "Whether the migration was a success or failure"),
], None, None, "Finalize any target-node migration specific operation"),
("instance_migrate", SINGLE, None, constants.RPC_TMO_SLOW, [
("cluster_name", None, "Cluster name"),
("instance", ED_INST_DICT, "Instance object"),
("target", None, "Target node name"),
("live", None, "Whether the migration should be done live or not"),
], None, None, "Migrate an instance"),
("instance_finalize_migration_src", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
("success", None, "Whether the migration succeeded or not"),
("live", None, "Whether the user requested a live migration or not"),
], None, None, "Finalize the instance migration on the source node"),
("instance_get_migration_status", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
], None, _MigrationStatusPostProc, "Report migration status"),
("instance_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance_hvp_bep", ED_INST_DICT_HVP_BEP_DP, None),
("startup_paused", None, None),
("reason", None, "The reason for the startup"),
], None, None, "Starts an instance"),
("instance_os_add", SINGLE, None, constants.RPC_TMO_1DAY, [
("instance_osp", ED_INST_DICT_OSP_DP, "Tuple: (target instance,"
" temporary OS parameters"
" overriding configuration)"),
("reinstall", None, "Whether the instance is being reinstalled"),
("debug", None, "Debug level for the OS install script to use"),
], None, None, "Installs an operative system onto an instance"),
("hotplug_device", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("action", None, "Hotplug Action"),
("dev_type", None, "Device type"),
("device", ED_DEVICE_DICT, "Device dict"),
("extra", None, "Extra info for device (dev_path for disk)"),
("seq", None, "Device seq"),
], None, None, "Hoplug a device to a running instance"),
("hotplug_supported", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None, "Check if hotplug is supported"),
("instance_metadata_modify", SINGLE, None, constants.RPC_TMO_URGENT, [
("instance", None, "Instance object"),
], None, None, "Modify instance metadata"),
]
_IMPEXP_CALLS = [
("import_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("opts", ED_OBJECT_DICT, None),
("instance", ED_INST_DICT, None),
("component", None, None),
("dest", ED_IMPEXP_IO, "Import destination"),
], None, None, "Starts an import daemon"),
("export_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("opts", ED_OBJECT_DICT, None),
("host", None, None),
("port", None, None),
("instance", ED_INST_DICT, None),
("component", None, None),
("source", ED_IMPEXP_IO, "Export source"),
], None, None, "Starts an export daemon"),
("impexp_status", SINGLE, None, constants.RPC_TMO_FAST, [
("names", None, "Import/export names"),
], None, _ImpExpStatusPostProc, "Gets the status of an import or export"),
("impexp_abort", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Import/export name"),
], None, None, "Aborts an import or export"),
("impexp_cleanup", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Import/export name"),
], None, None, "Cleans up after an import or export"),
("export_info", SINGLE, None, constants.RPC_TMO_FAST, [
("path", None, None),
], None, None, "Queries the export information in a given path"),
("finalize_export", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, None),
("snap_disks", ED_FINALIZE_EXPORT_DISKS, None),
], None, None, "Request the completion of an export operation"),
("export_list", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Gets the stored exports list"),
("export_remove", SINGLE, None, constants.RPC_TMO_FAST, [
("export", None, None),
], None, None, "Requests removal of a given export"),
]
_X509_CALLS = [
("x509_cert_create", SINGLE, None, constants.RPC_TMO_NORMAL, [
("validity", None, "Validity in seconds"),
], None, None, "Creates a new X509 certificate for SSL/TLS"),
("x509_cert_remove", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Certificate name"),
], None, None, "Removes a X509 certificate"),
]
_BLOCKDEV_CALLS = [
("bdev_sizes", MULTI, None, constants.RPC_TMO_URGENT, [
("devices", None, None),
], None, None,
"Gets the sizes of requested block devices present on a node"),
("blockdev_create", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("size", None, None),
("owner", None, None),
("on_primary", None, None),
("info", None, None),
("exclusive_storage", None, None),
], None, None, "Request creation of a given block device"),
("blockdev_convert", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev_src", ED_SINGLE_DISK_DICT_DP, None),
("bdev_dest", ED_SINGLE_DISK_DICT_DP, None),
], None, None,
"Request the copy of the source block device to the destination one"),
("blockdev_image", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("image", None, None),
("size", None, None),
], None, None,
"Request to dump an image with given size onto a block device"),
("blockdev_wipe", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("offset", None, None),
("size", None, None),
], None, None,
"Request wipe at given offset with given size of a block device"),
("blockdev_remove", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
], None, None, "Request removal of a given block device"),
("blockdev_pause_resume_sync", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
("pause", None, None),
], None, None, "Request a pause/resume of given block device"),
("blockdev_assemble", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
("instance", ED_INST_DICT, None),
("on_primary", None, None),
("idx", None, None),
], None, None, "Request assembling of a given block device"),
("blockdev_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
], None, None, "Request shutdown of a given block device"),
("blockdev_addchildren", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("ndevs", ED_DISKS_DICT_DP, None),
], None, None,
"Request adding a list of children to a (mirroring) device"),
("blockdev_removechildren", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("ndevs", ED_DISKS_DICT_DP, None),
], None, None,
"Request removing a list of children from a (mirroring) device"),
("blockdev_close", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance_name", None, None),
("disks", ED_DISKS_DICT_DP, None),
], None, None, "Closes the given block devices"),
("blockdev_getdimensions", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_MULTI_DISKS_DICT_DP, None),
], None, None, "Returns size and spindles of the given disks"),
("drbd_disconnect_net", MULTI, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
], None, None,
"Disconnects the network of the given drbd devices"),
("drbd_attach_net", MULTI, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
("instance_name", None, None),
("multimaster", None, None),
], None, None, "Connects the given DRBD devices"),
("drbd_wait_sync", MULTI, None, constants.RPC_TMO_SLOW, [
("disks", ED_DISKS_DICT_DP, None),
], None, None,
"Waits for the synchronization of drbd devices is complete"),
("drbd_needs_activation", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_MULTI_DISKS_DICT_DP, None),
], None, None,
"Returns the drbd disks which need activation"),
("blockdev_grow", SINGLE, None, constants.RPC_TMO_NORMAL, [
("cf_bdev", ED_SINGLE_DISK_DICT_DP, None),
("amount", None, None),
("dryrun", None, None),
("backingstore", None, None),
("es_flag", None, None),
], None, None, "Request growing of the given block device by a"
" given amount"),
("blockdev_snapshot", SINGLE, None, constants.RPC_TMO_NORMAL, [
("cf_bdev", ED_SINGLE_DISK_DICT_DP, None),
("snap_name", None, None),
("snap_size", None, None),
], None, None, "Export a given disk to another node"),
("blockdev_rename", SINGLE, None, constants.RPC_TMO_NORMAL, [
("devlist", ED_BLOCKDEV_RENAME, None),
], None, None, "Request rename of the given block devices"),
("blockdev_find", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
], None, _BlockdevFindPostProc,
"Request identification of a given block device"),
("blockdev_getmirrorstatus", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
], None, _BlockdevGetMirrorStatusPostProc,
"Request status of a (mirroring) device"),
("blockdev_getmirrorstatus_multi", MULTI, None, constants.RPC_TMO_NORMAL, [
("node_disks", ED_NODE_TO_DISK_DICT_DP, None),
], _BlockdevGetMirrorStatusMultiPreProc,
_BlockdevGetMirrorStatusMultiPostProc,
"Request status of (mirroring) devices from multiple nodes"),
("blockdev_setinfo", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
("info", None, None),
], None, None, "Sets metadata information on a given block device"),
]
_OS_CALLS = [
("os_diagnose", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Request a diagnose of OS definitions"),
("os_validate", MULTI, None, constants.RPC_TMO_FAST, [
("required", None, None),
("name", None, None),
("checks", None, None),
("params", None, None),
("force_variant", None, None),
], None, None, "Run a validation routine for a given OS"),
("os_export", SINGLE, None, constants.RPC_TMO_FAST, [
("instance", ED_INST_DICT, None),
("override_env", None, None),
], None, None, "Export an OS for a given instance"),
]
_EXTSTORAGE_CALLS = [
("extstorage_diagnose", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Request a diagnose of ExtStorage Providers"),
]
_NODE_CALLS = [
("node_has_ip_address", SINGLE, None, constants.RPC_TMO_FAST, [
("address", None, "IP address"),
], None, None, "Checks if a node has the given IP address"),
("node_info", MULTI, None, constants.RPC_TMO_URGENT, [
("storage_units", None,
"List of tuples '<storage_type>,<key>,[<param>]' to ask for disk space"
" information; the parameter list varies depending on the storage_type"),
("hv_specs", None,
"List of hypervisor specification (name, hvparams) to ask for node "
"information"),
], _NodeInfoPreProc, None, "Return node information"),
("node_verify", MULTI, None, constants.RPC_TMO_NORMAL, [
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
("node_groups", None, "node names mapped to their group uuids"),
("groups_cfg", None,
"a dictionary mapping group uuids to their configuration"),
], None, None, "Request verification of given parameters"),
("node_volumes", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Gets all volumes on node(s)"),
("node_demote_from_mc", SINGLE, None, constants.RPC_TMO_FAST, [], None, None,
"Demote a node from the master candidate role"),
("node_powercycle", SINGLE, ACCEPT_OFFLINE_NODE, constants.RPC_TMO_NORMAL, [
("hypervisor", None, "Hypervisor type"),
("hvparams", None, "Hypervisor parameters"),
], None, None, "Tries to powercycle a node"),
("node_configure_ovs", SINGLE, None, constants.RPC_TMO_NORMAL, [
("ovs_name", None, "Name of the OpenvSwitch to create"),
("ovs_link", None, "Link of the OpenvSwitch to the outside"),
], None, None, "This will create and setup the OpenvSwitch"),
("node_crypto_tokens", SINGLE, None, constants.RPC_TMO_NORMAL, [
("token_request", None,
"List of tuples of requested crypto token types, actions"),
], None, None, "Handle crypto tokens of the node."),
("node_ensure_daemon", MULTI, None, constants.RPC_TMO_URGENT, [
("daemon", None, "Daemon name"),
("run", None, "Whether the daemon should be running or stopped"),
], None, None, "Ensure daemon is running on the node."),
("node_ssh_key_add", MULTI, None, constants.RPC_TMO_URGENT, [
("node_uuid", None, "UUID of the node whose key is distributed"),
("node_name", None, "Name of the node whose key is distributed"),
("potential_master_candidates", None, "Potential master candidates"),
("ssh_port_map", None, "Map of nodes' SSH ports to be used for transfers"),
("to_authorized_keys", None, "Whether the node's key should be added"
" to all nodes' 'authorized_keys' file"),
("to_public_keys", None, "Whether the node's key should be added"
" to all nodes' public key file"),
("get_public_keys", None, "Whether the node should get the other nodes'"
" public keys")],
None, None, "Distribute a new node's public SSH key on the cluster."),
("node_ssh_key_remove", MULTI, None, constants.RPC_TMO_URGENT, [
("node_uuid", None, "UUID of the node whose key is removed"),
("node_name", None, "Name of the node whose key is removed"),
("master_candidate_uuids", None, "List of UUIDs of master candidates."),
("potential_master_candidates", None, "Potential master candidates"),
("ssh_port_map", None, "Map of nodes' SSH ports to be used for transfers"),
("from_authorized_keys", None,
"If the key should be removed from the 'authorized_keys' file."),
("from_public_keys", None,
"If the key should be removed from the public key file."),
("clear_authorized_keys", None,
"If the 'authorized_keys' file of the node should be cleared."),
("clear_public_keys", None,
"If the 'ganeti_pub_keys' file of the node should be cleared.")],
None, None, "Remove a node's SSH key from the other nodes' key files."),
("node_ssh_keys_renew", MULTI, None, constants.RPC_TMO_URGENT, [
("node_uuids", None, "UUIDs of the nodes whose key is renewed"),
("node_names", None, "Names of the nodes whose key is renewed"),
("ssh_port_map", None, "Map of nodes' SSH ports to be used for transfers"),
("master_candidate_uuids", None, "List of UUIDs of master candidates."),
("potential_master_candidates", None, "Potential master candidates")],
None, None, "Renew all SSH key pairs of all nodes nodes."),
]
_MISC_CALLS = [
("lv_list", MULTI, None, constants.RPC_TMO_URGENT, [
("vg_name", None, None),
], None, None, "Gets the logical volumes present in a given volume group"),
("vg_list", MULTI, None, constants.RPC_TMO_URGENT, [], None, None,
"Gets the volume group list"),
("bridges_exist", SINGLE, None, constants.RPC_TMO_URGENT, [
("bridges_list", None, "Bridges which must be present on remote node"),
], None, None, "Checks if a node has all the bridges given"),
("etc_hosts_modify", SINGLE, None, constants.RPC_TMO_NORMAL, [
("mode", None,
"Mode to operate; currently L{constants.ETC_HOSTS_ADD} or"
" L{constants.ETC_HOSTS_REMOVE}"),
("name", None, "Hostname to be modified"),
("ip", None, "IP address (L{constants.ETC_HOSTS_ADD} only)"),
], None, None, "Modify hosts file with name"),
("drbd_helper", MULTI, None, constants.RPC_TMO_URGENT, [],
None, None, "Gets DRBD helper"),
("restricted_command", MULTI, None, constants.RPC_TMO_SLOW, [
("cmd", None, "Command name"),
], None, None, "Runs restricted command"),
("run_oob", SINGLE, None, constants.RPC_TMO_NORMAL, [
("oob_program", None, None),
("command", None, None),
("remote_node", None, None),
("timeout", None, None),
], None, None, "Runs out-of-band command"),
("hooks_runner", MULTI, None, constants.RPC_TMO_NORMAL, [
("hpath", None, None),
("phase", None, None),
("env", None, None),
], None, None, "Call the hooks runner"),
("iallocator_runner", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Iallocator name"),
("idata", None, "JSON-encoded input string"),
("default_iallocator_params", None, "Additional iallocator parameters"),
], None, None, "Call an iallocator on a remote node"),
("test_delay", MULTI, None, _TestDelayTimeout, [
("duration", None, None),
], None, None, "Sleep for a fixed time on given node(s)"),
("hypervisor_validate_params", MULTI, None, constants.RPC_TMO_NORMAL, [
("hvname", None, "Hypervisor name"),
("hvfull", None, "Parameters to be validated"),
], None, None, "Validate hypervisor params"),
("get_watcher_pause", SINGLE, None, constants.RPC_TMO_URGENT, [],
None, None, "Get watcher pause end"),
("set_watcher_pause", MULTI, None, constants.RPC_TMO_URGENT, [
("until", None, None),
], None, None, "Set watcher pause end"),
("get_file_info", SINGLE, None, constants.RPC_TMO_FAST, [
("file_path", None, None),
], None, None, "Checks if a file exists and reports on it"),
]
CALLS = {
"RpcClientDefault":
_Prepare(_IMPEXP_CALLS + _X509_CALLS + _OS_CALLS + _NODE_CALLS +
_FILE_STORAGE_CALLS + _MISC_CALLS + _INSTANCE_CALLS +
_BLOCKDEV_CALLS + _STORAGE_CALLS + _EXTSTORAGE_CALLS),
"RpcClientJobQueue": _Prepare([
("jobqueue_update", MULTI, None, constants.RPC_TMO_URGENT, [
("file_name", None, None),
("content", ED_COMPRESS, None),
], None, None, "Update job queue file"),
("jobqueue_purge", SINGLE, None, constants.RPC_TMO_NORMAL, [], None, None,
"Purge job queue"),
("jobqueue_rename", MULTI, None, constants.RPC_TMO_URGENT, [
("rename", None, None),
], None, None, "Rename job queue file"),
("jobqueue_set_drain_flag", MULTI, None, constants.RPC_TMO_URGENT, [
("flag", None, None),
], None, None, "Set job queue drain flag"),
]),
"RpcClientBootstrap": _Prepare([
("node_start_master_daemons", SINGLE, None, constants.RPC_TMO_FAST, [
("no_voting", None, None),
], None, None, "Starts master daemons on a node"),
("node_activate_master_ip", SINGLE, None, constants.RPC_TMO_FAST, [
("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
("use_external_mip_script", None,
"Whether to use the user-provided master IP address setup script"),
], None, None,
"Activates master IP on a node"),
("node_stop_master", SINGLE, None, constants.RPC_TMO_FAST, [], None, None,
"Deactivates master IP and stops master daemons on a node"),
("node_deactivate_master_ip", SINGLE, None, constants.RPC_TMO_FAST, [
("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
("use_external_mip_script", None,
"Whether to use the user-provided master IP address setup script"),
], None, None,
"Deactivates master IP on a node"),
("node_change_master_netmask", SINGLE, None, constants.RPC_TMO_FAST, [
("old_netmask", None, "The old value of the netmask"),
("netmask", None, "The new value of the netmask"),
("master_ip", None, "The master IP"),
("master_netdev", None, "The master network device"),
], None, None, "Change master IP netmask"),
("node_leave_cluster", SINGLE, None, constants.RPC_TMO_NORMAL, [
("modify_ssh_setup", None, None),
], None, None,
"Requests a node to clean the cluster information it has"),
("master_node_name", MULTI, None, constants.RPC_TMO_URGENT, [], None, None,
"Returns the master node name"),
]),
"RpcClientDnsOnly": _Prepare([
("version", MULTI, ACCEPT_OFFLINE_NODE, constants.RPC_TMO_URGENT, [], None,
None, "Query node version"),
("node_verify_light", MULTI, None, constants.RPC_TMO_NORMAL, [
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
("node_groups", None, "node names mapped to their group uuids"),
("groups_cfg", None,
"a dictionary mapping group uuids to their configuration"),
], None, None, "Request verification of given parameters"),
]),
"RpcClientConfig": _Prepare([
("upload_file", MULTI, None, constants.RPC_TMO_NORMAL, [
("file_name", ED_FILE_DETAILS, None),
], None, None, "Upload files"),
("upload_file_single", MULTI, None, constants.RPC_TMO_NORMAL, [
("file_name", None, "The name of the file"),
("content", ED_COMPRESS, "The data to be uploaded"),
("mode", None, "The mode of the file or None"),
("uid", None, "The owner of the file"),
("gid", None, "The group of the file"),
("atime", None, "The file's last access time"),
("mtime", None, "The file's last modification time"),
], None, None, "Upload files"),
("write_ssconf_files", MULTI, None, constants.RPC_TMO_NORMAL, [
("values", None, None),
], None, None, "Write ssconf files"),
]),
}
|
bsd-2-clause
| -4,034,353,141,925,082,600
| 43.180142
| 80
| 0.654477
| false
| 3.428398
| false
| false
| false
|
iproduct/course-social-robotics
|
11-dnn-keras/venv/Lib/site-packages/pandas/core/dtypes/base.py
|
1
|
13190
|
"""
Extend pandas with custom array types.
"""
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union
import numpy as np
from pandas._typing import DtypeObj
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
if TYPE_CHECKING:
from pandas.core.arrays import ExtensionArray
class ExtensionDtype:
"""
A custom data type, to be paired with an ExtensionArray.
See Also
--------
extensions.register_extension_dtype: Register an ExtensionType
with pandas as class decorator.
extensions.ExtensionArray: Abstract base class for custom 1-D array types.
Notes
-----
The interface includes the following abstract methods that must
be implemented by subclasses:
* type
* name
The following attributes and methods influence the behavior of the dtype in
pandas operations
* _is_numeric
* _is_boolean
* _get_common_dtype
Optionally one can override construct_array_type for construction
with the name of this dtype via the Registry. See
:meth:`extensions.register_extension_dtype`.
* construct_array_type
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
ExtensionDtypes are required to be hashable. The base class provides
a default implementation, which relies on the ``_metadata`` class
attribute. ``_metadata`` should be a tuple containing the strings
that define your data type. For example, with ``PeriodDtype`` that's
the ``freq`` attribute.
**If you have a parametrized dtype you should set the ``_metadata``
class property**.
Ideally, the attributes in ``_metadata`` will match the
parameters to your ``ExtensionDtype.__init__`` (if any). If any of
the attributes in ``_metadata`` don't implement the standard
``__eq__`` or ``__hash__``, the default implementations here will not
work.
.. versionchanged:: 0.24.0
Added ``_metadata``, ``__hash__``, and changed the default definition
of ``__eq__``.
For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
can be implemented: this method receives a pyarrow Array or ChunkedArray
as only argument and is expected to return the appropriate pandas
ExtensionArray for this dtype and the passed values::
class ExtensionDtype:
def __from_arrow__(
self, array: Union[pyarrow.Array, pyarrow.ChunkedArray]
) -> ExtensionArray:
...
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
"""
_metadata: Tuple[str, ...] = ()
def __str__(self) -> str:
return self.name
def __eq__(self, other: Any) -> bool:
"""
Check whether 'other' is equal to self.
By default, 'other' is considered equal if either
* it's a string matching 'self.name'.
* it's an instance of this type and all of the attributes
in ``self._metadata`` are equal between `self` and `other`.
Parameters
----------
other : Any
Returns
-------
bool
"""
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
return all(
getattr(self, attr) == getattr(other, attr) for attr in self._metadata
)
return False
def __hash__(self) -> int:
return hash(tuple(getattr(self, attr) for attr in self._metadata))
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
@property
def na_value(self) -> object:
"""
Default NA value to use for this type.
This is used in e.g. ExtensionArray.take. This should be the
user-facing "boxed" version of the NA value, not the physical NA value
for storage. e.g. for JSONArray, this is an empty dictionary.
"""
return np.nan
@property
def type(self) -> Type:
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
raise AbstractMethodError(self)
@property
def kind(self) -> str:
"""
A character code (one of 'biufcmMOSUV'), default 'O'
This should match the NumPy dtype used when the array is
converted to an ndarray, which is probably 'O' for object if
the extension type cannot be represented as a built-in NumPy
type.
See Also
--------
numpy.dtype.kind
"""
return "O"
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
raise AbstractMethodError(self)
@property
def names(self) -> Optional[List[str]]:
"""
Ordered list of field names, or None if there are no fields.
This is for compatibility with NumPy arrays, and may be removed in the
future.
"""
return None
@classmethod
def construct_array_type(cls) -> Type["ExtensionArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
raise NotImplementedError
@classmethod
def construct_from_string(cls, string: str):
r"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[H]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# error: Non-overlapping equality check (left operand type: "str", right
# operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
assert isinstance(cls.name, str), (cls, type(cls.name))
if string != cls.name:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
return cls()
@classmethod
def is_dtype(cls, dtype: object) -> bool:
"""
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
if isinstance(dtype, str):
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
return False
@property
def _is_numeric(self) -> bool:
"""
Whether columns with this dtype should be considered numeric.
By default ExtensionDtypes are assumed to be non-numeric.
They'll be excluded from operations that exclude non-numeric
columns, like (groupby) reductions, plotting, etc.
"""
return False
@property
def _is_boolean(self) -> bool:
"""
Whether this dtype should be considered boolean.
By default, ExtensionDtypes are assumed to be non-numeric.
Setting this to True will affect the behavior of several places,
e.g.
* is_bool
* boolean indexing
Returns
-------
bool
"""
return False
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
"""
Return the common dtype, if one exists.
Used in `find_common_type` implementation. This is for example used
to determine the resulting dtype in a concat operation.
If no common dtype exists, return None (which gives the other dtypes
the chance to determine a common dtype). If all dtypes in the list
return None, then the common dtype will be "object" dtype (this means
it is never needed to return "object" dtype from this method itself).
Parameters
----------
dtypes : list of dtypes
The dtypes for which to determine a common dtype. This is a list
of np.dtype or ExtensionDtype instances.
Returns
-------
Common dtype (np.dtype or ExtensionDtype) or None
"""
if len(set(dtypes)) == 1:
# only itself
return self
else:
return None
def register_extension_dtype(cls: Type[ExtensionDtype]) -> Type[ExtensionDtype]:
"""
Register an ExtensionType with pandas as class decorator.
.. versionadded:: 0.24.0
This enables operations like ``.astype(name)`` for the name
of the ExtensionDtype.
Returns
-------
callable
A class decorator.
Examples
--------
>>> from pandas.api.extensions import register_extension_dtype
>>> from pandas.api.extensions import ExtensionDtype
>>> @register_extension_dtype
... class MyExtensionDtype(ExtensionDtype):
... name = "myextension"
"""
registry.register(cls)
return cls
class Registry:
"""
Registry for dtype inference.
The registry allows one to map a string repr of a extension
dtype to an extension dtype. The string alias can be used in several
places, including
* Series and Index constructors
* :meth:`pandas.array`
* :meth:`pandas.Series.astype`
Multiple extension types can be registered.
These are tried in order.
"""
def __init__(self):
self.dtypes: List[Type[ExtensionDtype]] = []
def register(self, dtype: Type[ExtensionDtype]) -> None:
"""
Parameters
----------
dtype : ExtensionDtype class
"""
if not issubclass(dtype, ExtensionDtype):
raise ValueError("can only register pandas extension dtypes")
self.dtypes.append(dtype)
def find(
self, dtype: Union[Type[ExtensionDtype], str]
) -> Optional[Type[ExtensionDtype]]:
"""
Parameters
----------
dtype : Type[ExtensionDtype] or str
Returns
-------
return the first matching dtype, otherwise return None
"""
if not isinstance(dtype, str):
dtype_type = dtype
if not isinstance(dtype, type):
dtype_type = type(dtype)
if issubclass(dtype_type, ExtensionDtype):
return dtype
return None
for dtype_type in self.dtypes:
try:
return dtype_type.construct_from_string(dtype)
except TypeError:
pass
return None
registry = Registry()
|
gpl-2.0
| -4,566,161,597,726,716,000
| 28.909297
| 86
| 0.59022
| false
| 4.677305
| false
| false
| false
|
miguelinux/vbox
|
src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Object/Parser/InfLibraryClassesObject.py
|
1
|
10522
|
## @file
# This file is used to define class objects of INF file [LibraryClasses] section.
# It will consumed by InfParser.
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
'''
InfLibraryClassesObject
'''
from Logger import StringTable as ST
from Logger import ToolError
import Logger.Log as Logger
from Library import GlobalData
from Library.Misc import Sdict
from Object.Parser.InfCommonObject import CurrentLine
from Library.ExpressionValidate import IsValidFeatureFlagExp
from Library.ParserValidate import IsValidLibName
## GetArchModuleType
#
# Get Arch List and ModuleType List
#
def GetArchModuleType(KeyList):
__SupArchList = []
__SupModuleList = []
for (ArchItem, ModuleItem) in KeyList:
#
# Validate Arch
#
if (ArchItem == '' or ArchItem == None):
ArchItem = 'COMMON'
if (ModuleItem == '' or ModuleItem == None):
ModuleItem = 'COMMON'
if ArchItem not in __SupArchList:
__SupArchList.append(ArchItem)
List = ModuleItem.split('|')
for Entry in List:
if Entry not in __SupModuleList:
__SupModuleList.append(Entry)
return (__SupArchList, __SupModuleList)
class InfLibraryClassItem():
def __init__(self, LibName='', FeatureFlagExp='', HelpString=None):
self.LibName = LibName
self.FeatureFlagExp = FeatureFlagExp
self.HelpString = HelpString
self.CurrentLine = CurrentLine()
self.SupArchList = []
self.SupModuleList = []
self.FileGuid = ''
self.Version = ''
def SetLibName(self, LibName):
self.LibName = LibName
def GetLibName(self):
return self.LibName
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetFeatureFlagExp(self, FeatureFlagExp):
self.FeatureFlagExp = FeatureFlagExp
def GetFeatureFlagExp(self):
return self.FeatureFlagExp
def SetSupArchList(self, SupArchList):
self.SupArchList = SupArchList
def GetSupArchList(self):
return self.SupArchList
def SetSupModuleList(self, SupModuleList):
self.SupModuleList = SupModuleList
def GetSupModuleList(self):
return self.SupModuleList
#
# As Build related information
#
def SetFileGuid(self, FileGuid):
self.FileGuid = FileGuid
def GetFileGuid(self):
return self.FileGuid
def SetVersion(self, Version):
self.Version = Version
def GetVersion(self):
return self.Version
## INF LibraryClass Section
#
#
#
class InfLibraryClassObject():
def __init__(self):
self.LibraryClasses = Sdict()
#
# Macro defined in this section should be only used in this section.
#
self.Macros = {}
##SetLibraryClasses
#
#
# @param HelpString: It can be a common comment or contain a recommend
# instance.
#
def SetLibraryClasses(self, LibContent, KeyList=None):
#
# Validate Arch
#
(__SupArchList, __SupModuleList) = GetArchModuleType(KeyList)
for LibItem in LibContent:
LibItemObj = InfLibraryClassItem()
if not GlobalData.gIS_BINARY_INF:
HelpStringObj = LibItem[1]
LibItemObj.CurrentLine.SetFileName(LibItem[2][2])
LibItemObj.CurrentLine.SetLineNo(LibItem[2][1])
LibItemObj.CurrentLine.SetLineString(LibItem[2][0])
LibItem = LibItem[0]
if HelpStringObj != None:
LibItemObj.SetHelpString(HelpStringObj)
if len(LibItem) >= 1:
if LibItem[0].strip() != '':
if IsValidLibName(LibItem[0].strip()):
if LibItem[0].strip() != 'NULL':
LibItemObj.SetLibName(LibItem[0])
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_LIB_NAME_INVALID,
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID % (LibItem[0]),
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_LIBRARY_SECTION_LIBNAME_MISSING,
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
if len(LibItem) == 2:
if LibItem[1].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
#
# Validate FFE
#
FeatureFlagRtv = IsValidFeatureFlagExp(LibItem[1].strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID % (FeatureFlagRtv[1]),
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
LibItemObj.SetFeatureFlagExp(LibItem[1].strip())
#
# Invalid strings
#
if len(LibItem) < 1 or len(LibItem) > 2:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_LIBRARY_SECTION_CONTENT_ERROR,
File=GlobalData.gINF_MODULE_NAME,
Line=LibItemObj.CurrentLine.GetLineNo(),
ExtraData=LibItemObj.CurrentLine.GetLineString())
LibItemObj.SetSupArchList(__SupArchList)
LibItemObj.SetSupModuleList(__SupModuleList)
#
# Determine Library class duplicate. Follow below rule:
#
# A library class keyword must not be duplicated within a
# [LibraryClasses] section. Library class keywords may appear in
# multiple architectural and module type [LibraryClasses] sections.
# A library class keyword listed in an architectural or module type
# [LibraryClasses] section must not be listed in the common
# architectural or module type [LibraryClasses] section.
#
# NOTE: This check will not report error now. But keep code for future enhancement.
#
# for Item in self.LibraryClasses:
# if Item.GetLibName() == LibItemObj.GetLibName():
# ItemSupArchList = Item.GetSupArchList()
# ItemSupModuleList = Item.GetSupModuleList()
# for ItemArch in ItemSupArchList:
# for ItemModule in ItemSupModuleList:
# for LibItemObjArch in __SupArchList:
# for LibItemObjModule in __SupModuleList:
# if ItemArch == LibItemObjArch and LibItemObjModule == ItemModule:
# #
# # ERR_INF_PARSER_ITEM_DUPLICATE
# #
# pass
# if (ItemArch.upper() == 'COMMON' or LibItemObjArch.upper() == 'COMMON') \
# and LibItemObjModule == ItemModule:
# #
# # ERR_INF_PARSER_ITEM_DUPLICATE_COMMON
# #
# pass
else:
#
# Assume the file GUID is well formatted.
#
LibItemObj.SetFileGuid(LibItem[0])
LibItemObj.SetVersion(LibItem[1])
LibItemObj.SetSupArchList(__SupArchList)
if self.LibraryClasses.has_key((LibItemObj)):
LibraryList = self.LibraryClasses[LibItemObj]
LibraryList.append(LibItemObj)
self.LibraryClasses[LibItemObj] = LibraryList
else:
LibraryList = []
LibraryList.append(LibItemObj)
self.LibraryClasses[LibItemObj] = LibraryList
return True
def GetLibraryClasses(self):
return self.LibraryClasses
|
gpl-2.0
| -620,824,427,983,713,900
| 40.588933
| 114
| 0.519958
| false
| 4.763241
| false
| false
| false
|
atztogo/phonopy
|
example/Si-gruneisen/Si-gruneisen.py
|
1
|
2004
|
import numpy as np
from phonopy import Phonopy, PhonopyGruneisen
from phonopy.interface.vasp import read_vasp
from phonopy.file_IO import parse_FORCE_SETS
def append_band(bands, q_start, q_end):
band = []
for i in range(51):
band.append(np.array(q_start) +
(np.array(q_end) - np.array(q_start)) / 50 * i)
bands.append(band)
phonons = {}
for vol in ("orig", "plus", "minus"):
unitcell = read_vasp("%s/POSCAR-unitcell" % vol)
phonon = Phonopy(unitcell,
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
force_sets = parse_FORCE_SETS(filename="%s/FORCE_SETS" % vol)
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
phonons[vol] = phonon
gruneisen = PhonopyGruneisen(phonons["orig"],
phonons["plus"],
phonons["minus"])
gruneisen.set_mesh([2, 2, 2])
q_points, _, frequencies, _, gammas = gruneisen.get_mesh()
for q, freq, g in zip(q_points, frequencies, gammas):
print(("%5.2f %5.2f %5.2f " + (" %7.3f" * len(freq)))
% ((q[0], q[1], q[2]) + tuple(freq)))
print(((" " * 18) + (" %7.3f" * len(g))) % tuple(g))
bands = []
append_band(bands, [0.5, 0.5, 0.0], [0.0, 0.0, 0.0])
append_band(bands, [0.0, 0.0, 0.0], [0.5, 0.5, 0.5])
gruneisen.set_band_structure(bands)
q_points, distances, frequencies, _, gammas = gruneisen.get_band_structure()
for q_path, d_path, freq_path, g_path in zip(q_points, distances,
frequencies, gammas):
for q, d, freq, g in zip(q_path, d_path, freq_path, g_path):
print(("%10.5f %5.2f %5.2f %5.2f " + (" %7.3f" * len(freq)))
% ((d, q[0], q[1], q[2]) + tuple(freq)))
print(((" " * 30) + (" %7.3f" * len(g))) % tuple(g))
|
bsd-3-clause
| -4,193,420,619,038,875,000
| 39.08
| 76
| 0.505988
| false
| 2.726531
| false
| false
| false
|
danpozmanter/monitor_requests
|
monitor_requests/__init__.py
|
1
|
4730
|
"""Monitor Requests."""
import datetime
import re
import sys
import traceback
import mock
from requests.utils import urlparse
from .data import DataHandler
from .output import OutputHandler
__version__ = '2.1.1'
class Monitor(object):
"""Monitor class to handle patching."""
# Libraries which mock requests by patching it:
# unittest.mock / mock and responses will not show up in tracebacks.
MOCKING_LIBRARIES = ('requests_mock',)
def __init__(self, domains=[], server_port=None, mocking=True):
"""Initialize Monitor, hot patch requests.
:param domains: List. Regex patterns to match against.
:param server_port: Int. Server mode: witn monitor_requests_server
running on the specified port.
:param mocking: Boolean. Mock requests. Default True, set to False
when running in server mode from the test suite/session level.
"""
self.domain_patterns = [
re.compile(domain_pattern) for domain_pattern in domains
]
self.data = DataHandler(server_port=server_port)
# Mocking
self.mocking = mocking
if mocking:
from requests.adapters import HTTPAdapter
self.stock_send = HTTPAdapter.send
self.send_patcher = mock.patch.object(
HTTPAdapter,
'send',
side_effect=self._generate_mocked_send(),
autospec=True
)
self.send_patcher.start()
def _generate_mocked_send(self):
"""Generate mock function for http request.
:return: Mocked send method for HTTPAdapter.
"""
def mock_send(instance, request, *args, **kwargs):
start = datetime.datetime.now()
response = self.stock_send(instance, request, *args, **kwargs)
duration = (datetime.datetime.now() - start).total_seconds()
self._log_request(request.url, request.method, response, duration)
return response
return mock_send
def _check_domain(self, domain):
if not self.domain_patterns:
return True
matched = False
for pattern in self.domain_patterns:
if pattern.search(domain):
matched = True
return matched
def _check_mocked(self, tb_list):
traceback = str(tb_list)
for library in self.MOCKING_LIBRARIES:
if '/{}/'.format(library) in traceback:
return True
return False
def _log_request(self, url, method, response, duration):
"""Log request, store traceback/response data and update counts."""
domain = urlparse(url).netloc
if not self._check_domain(domain):
return
m_init = 'monitor_requests/__init__.py'
tb_list = [f for f in traceback.format_stack() if m_init not in f]
if self._check_mocked(tb_list):
return
self.data.log(url, domain, method, response, tb_list, duration)
def refresh(self):
"""Refresh data from store (server or instance)."""
self.logged_requests, self.analysis = self.data.retrieve()
def report(
self,
urls=False,
tracebacks=False,
responses=False,
debug=False,
inspect_limit=None,
output=sys.stdout,
tear_down=True
):
"""Print out the requests, general analysis, and optionally unique tracebacks.
If debug is True, show urls, tracebacks, and responses.
If tracebacks or responses are set to True, urls will be output.
:param urls: Boolean. Display unique urls requested.
:param tracebacks: Boolean. Display unique tracebacks per url.
:param responses: Boolean. Display response/request info per url.
:param debug: Boolean. Convenience to display tracebacks and responses.
:param inspect_limit: Integer. How deep the stack trace should be.
:param output: Stream. Output destination.
:param tear_down: Undo the hotpatching (True by default), delete data.
"""
tracebacks = tracebacks or debug
responses = responses or debug
self.refresh()
output_handler = OutputHandler(
output, urls, tracebacks, responses, debug, inspect_limit,
self.logged_requests, self.analysis
)
output_handler.write()
if tear_down:
self.stop(delete=True)
def stop(self, delete=False):
"""Undo the hotpatching.
:param delete: Boolean. Delete data (only with server mode).
"""
if delete:
self.data.delete()
if not self.mocking:
return
self.send_patcher.stop()
|
bsd-3-clause
| 4,755,888,305,894,752,000
| 34.56391
| 86
| 0.611628
| false
| 4.408201
| false
| false
| false
|
gameduell/pysupplies
|
tests/test_descr.py
|
1
|
1053
|
from supplies.annotate import attr, delay, refer
__author__ = 'dwae'
class Test:
def __init__(self, cnt=0):
self.cnt = cnt
@attr
def foo(self):
cnt = self.cnt
self.cnt += 1
return cnt
@delay
def bar(self):
cnt = self.cnt
self.cnt += 1
return cnt
@refer
def baz(self):
cnt = self.cnt
self.cnt += 1
return cnt
def test_attr():
assert isinstance(Test.foo, attr)
t = Test()
assert t.foo == 0
assert t.foo == 1
t.foo = 42
assert t.foo == 42
assert t.foo == 42
assert t.bar == 2
assert t.bar == 2
del t.foo
assert t.foo == 3
assert t.foo == 4
assert t.bar == 2
del t.bar
assert t.bar == 5
assert t.bar == 5
assert t.foo == 6
assert t.baz == 7
assert t.baz == 7
import pickle
t_ = pickle.loads(pickle.dumps(t))
assert t.foo == 8
assert t_.foo == 8
assert t.bar == 5
assert t_.bar == 5
assert t.baz == 7
assert t_.baz == 9
|
mit
| -216,468,087,574,851,420
| 14.042857
| 48
| 0.503324
| false
| 3.375
| false
| false
| false
|
spcui/virt-test
|
virttest/libvirt_xml/nodedev_xml.py
|
1
|
10435
|
"""
Module simplifying manipulation of XML described at
http://libvirt.org/formatnode.html
"""
import os
from virttest.libvirt_xml import base, xcepts, accessors
class CAPXML(base.LibvirtXMLBase):
"""
The base class for capability.
"""
def get_sysfs_sub_path(self):
"""
return the sub path store the info of capibility.
"""
raise NotImplementedError('get_sysfs_sub_path is not implemented.')
@staticmethod
def get_key2filename_dict():
"""
Return a dict which contain the key and the name
of info file.
"""
raise NotImplementedError('get_key2filename_dict is not implemeneted.')
def get_key2value_dict(self):
"""
Reutn a dict which contain the key and the value
in capability xml.
"""
raise NotImplementedError('get_key2value_dict is not implemented.')
class SystemXML(CAPXML):
"""
class for capability which type is system.
"""
__slots__ = CAPXML.__slots__ + ('product', 'hdware_vendor',
'hdware_serial', 'hdware_uuid',
'firmware_vendor', 'firmversion'
'firm_release_date')
__sysfs_sub_path__ = 'dmi/id/'
__key2filename_dict__ = {'product': 'product_name',
'hdware_vendor': 'sys_vendor',
'hdware_serial': 'product_serial',
'hdware_uuid': 'product_uuid',
'firmware_vendor': 'bios_vendor',
'firmversion': 'bios_version',
'firm_release_date': 'bios_date'}
@staticmethod
def get_key2filename_dict():
"""
Return a dict which contain the key and the name
of info file for System node device.
"""
return SystemXML.__key2filename_dict__
def get_key2value_dict(self):
"""
return the dict key2value
key: the key in xml need to check.
value: value in xml for this key.
"""
key2value_dict = {}
for key in SystemXML.__key2filename_dict__:
key2value_dict[key] = self[key]
return key2value_dict
@staticmethod
def make_sysfs_sub_path():
"""
return __sysfs_sub_path__ immediately.
"""
return SystemXML.__sysfs_sub_path__
def get_sysfs_sub_path(self):
"""
Return the sysfs_subdir.
"""
return self.make_sysfs_sub_path()
class PCIXML(CAPXML):
"""
class for capability whose type is pci.
"""
__slots__ = CAPXML.__slots__ + ('domain', 'bus', 'slot',
'function', 'product_id',
'vendor_id')
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementInt('domain', self, parent_xpath='/',
tag_name='domain')
accessors.XMLElementInt('bus', self, parent_xpath='/',
tag_name='bus')
accessors.XMLElementInt('slot', self, parent_xpath='/',
tag_name='slot')
accessors.XMLElementInt('function', self, parent_xpath='/',
tag_name='function')
accessors.XMLAttribute('product_id', self, parent_xpath='/',
tag_name='product', attribute='id')
accessors.XMLAttribute('vendor_id', self, parent_xpath='/',
tag_name='vendor', attribute='id')
super(PCIXML, self).__init__(virsh_instance=virsh_instance)
self.xml = (' <capability type=\'pci\'></capability>')
@staticmethod
def make_sysfs_sub_path(domain, bus, slot, function):
"""
Make sysfs_sub_path for pci by domain,bus,slot and function.
"""
pci_bus_path = ("%04x:%02x" % (domain, bus))
pci_device_path = ("%04x:%02x:%02x.%01x" % (domain, bus,
slot, function))
pci_sysfs_sub_path = ("pci_bus/%s/device/%s"
% (pci_bus_path, pci_device_path))
return pci_sysfs_sub_path
def get_sysfs_sub_path(self):
"""
Return the sysfs_subdir in .
Example:
pci_bus/0000\:00/device/0000\:00\:00.0/
"""
domain = self.domain
bus = self.bus
slot = self.slot
function = self.function
return PCIXML.make_sysfs_sub_path(domain, bus, slot, function)
__key2filename_dict__ = {'product_id': 'device',
'vendor_id': 'vendor'}
@staticmethod
def get_key2filename_dict():
"""
return the dict key2filename.
key: the keys in pcixml need to check.
filename: the name of file stored info for this key.
"""
return PCIXML.__key2filename_dict__
def get_key2value_dict(self):
"""
return the dict key2value
key: the key in xml need to check.
value: value in xml for this key.
"""
key2value_dict = {}
for key in PCIXML.__key2filename_dict__:
key2value_dict[key] = self[key]
return key2value_dict
class NodedevXMLBase(base.LibvirtXMLBase):
"""
Accessor methods for NodedevXML class.
"""
__slots__ = base.LibvirtXMLBase.__slots__ + ('name', 'parent',
'cap_type', 'cap',
'sysfs_main_path')
__schema_name__ = "nodedev"
__sysfs_dir__ = "/sys/class"
__type2class_dict__ = {'system': 'SystemXML',
'pci': 'PCIXML',
'usb_device': 'USBDeviceXML',
'usb': 'USBXML',
'net': 'NetXML',
'scsi_host': 'SCSIHostXML',
'scsi': 'SCSIXML',
'storage': 'StorageXML'}
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementText('name', self, parent_xpath='/',
tag_name='name')
accessors.XMLElementText('parent', self, parent_xpath='/',
tag_name='parent')
accessors.XMLAttribute('cap_type', self, parent_xpath='/',
tag_name='capability', attribute='type')
super(NodedevXMLBase, self).__init__(virsh_instance=virsh_instance)
self.xml = '<device></device>'
@staticmethod
def get_cap_by_type(cap_type):
"""
Init a cap class for a specific type.
:param cap_type: the type of capability.
:return: instanse of the cap.
"""
cap_class_name = NodedevXMLBase.__type2class_dict__[cap_type]
cap_class = globals()[cap_class_name]
capxml = cap_class()
return capxml
def get_cap(self):
"""
Return the capability of nodedev_xml.
"""
try:
cap_root = self.xmltreefile.reroot('/capability')
except KeyError, detail:
raise xcepts.LibvirtXMLError(detail)
capxml = NodedevXMLBase.get_cap_by_type(self.cap_type)
capxml.xmltreefile = cap_root
return capxml
def set_cap(self, value):
"""
Set the capability by value.
"""
if not issubclass(type(value), CAPXML):
raise xcepts.LibvirtXMLError("value must be a CAPXML or subclass")
# remove any existing capability block
self.del_cap()
root = self.xmltreefile.getroot()
root.append(value.getroot())
self.xmltreefile.write()
def del_cap(self):
"""
Delete the capability from nodedev xml.
"""
element = self.xmltreefile.find('/capability')
if element is not None:
self.mltreefile.remove(element)
self.xmltreefile.write()
def get_sysfs_sub_path(self):
"""
Get the sub sysfs path of the capability.
"""
capxml = self.cap
sysfs_sub_path = capxml.get_sysfs_sub_path()
return sysfs_sub_path
def get_sysfs_path(self):
"""
Get the abs path of the capability info.
"""
sysfs_main_path = self.__sysfs_dir__
sysfs_sub_path = self.get_sysfs_sub_path()
sysfs_path = os.path.join(sysfs_main_path, sysfs_sub_path)
return sysfs_path
class NodedevXML(NodedevXMLBase):
"""
class for Node device XML.
"""
__slots__ = NodedevXMLBase.__slots__
def __init__(self, virsh_instance=base.virsh):
"""
Initialize new instance.
"""
super(NodedevXML, self).__init__(virsh_instance=virsh_instance)
self.xml = ('<device></device>')
@staticmethod
def new_from_dumpxml(dev_name, virsh_instance=base.virsh):
"""
Get a instance of NodedevXML by dumpxml dev_name.
"""
nodedevxml = NodedevXML(virsh_instance=virsh_instance)
dumpxml_result = virsh_instance.nodedev_dumpxml(dev_name)
if dumpxml_result.exit_status:
raise xcepts.LibvirtXMLError("Nodedev_dumpxml %s failed.\n"
"Error: %s."
% (dev_name, dumpxml_result.stderr))
nodedevxml.xml = dumpxml_result.stdout
return nodedevxml
def get_key2value_dict(self):
"""
Get the dict which contain key and value in xml.
key: keys in nodedev xml need to check.
value: value in xml for the key.
"""
capxml = self.cap
key2value_dict = capxml.get_key2value_dict()
return key2value_dict
def get_key2syspath_dict(self):
"""
Get the dict which contains key and path.
key: keys in nodedev xml need to check.
syspath: the abs path for the file stores info for the key.
"""
sysfs_path = self.get_sysfs_path()
capxml = self.cap
key2filename_dict = capxml.__class__.get_key2filename_dict()
key2syspath_dict = {}
for key in key2filename_dict:
filename = key2filename_dict[key]
abs_syspath = os.path.join(sysfs_path, filename)
key2syspath_dict[key] = abs_syspath
return key2syspath_dict
|
gpl-2.0
| -8,236,994,242,723,432,000
| 30.717325
| 79
| 0.530331
| false
| 4.060311
| false
| false
| false
|
galactose/wviews
|
program/program.py
|
1
|
13353
|
"""
program.py: Program structures for worldview solving
Copyright (C) 2014 Michael Kelly
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from itertools import combinations
from collections import defaultdict
from atom import EpistemicModality, Atom, EpistemicAtom, NegationAsFailureAtom
from rule import IndexedRule
class LogicProgram(object):
def __init__(self, file_handle):
self._label_id = 1
self._label_set = set()
self._label_cache = {}
self._label_id_lookup = {}
self.label_to_epistemic_atom_id = defaultdict(list)
self._atom_id = 1
self._atom_set = set()
self._atom_cache = {}
self._atom_id_lookup = {}
self.epistemic_atom_cache = {}
self.program = []
self.epistemic_atom_id_to_valuation_index_map = None
def get_program(self):
"""
Returns the program as a list of strings that can be output to
file.
"""
return [str(rule) for rule in self.program]
def index_atoms(self, program_handle):
"""
index_epistemic_atoms: indexes atoms in program rules so that
we can simplify rules and build atom and epistemic atom lookup tables,
to speed up the process of applying epistemic valuations and determine
if a coherent world view is possible from a disjunctive logic program.
Returns:
- atom_index_cache (dict) -
- epistemic_atom_index_cache (dict) -
- indexed_program (set) -
"""
for rule in program_handle: # loop over new rules
new_rule = IndexedRule(head=set(), tail=set(),
atom_dict=self._atom_cache)
if rule.head:
for atom_token in rule.head: # check rule head
atom = self.get_atom_information(atom_token)
new_rule.head.add(atom.atom_id)
if rule.tail:
for atom_token in rule.tail: # check rule body
atom = self.get_atom_information(atom_token)
new_rule.tail.add(atom.atom_id)
self.program.append(new_rule)
# here we map each epistemic id to a number in an order so that when we
# apply a valuation it's consistent and unique so we're not testing the
# same set of valuations twice
self.epistemic_atom_id_to_valuation_index_map = {
epistemic_id: valuation_index
for valuation_index, epistemic_id in
enumerate(self.epistemic_atom_cache.keys())
}
def get_or_create_atom(self, atom):
"""
Given a newly created logical atom, check to see if one exists of the
given type. If it doesn't assign it a unique ID and add it to the atoms
that exist for the program. If it is an epistemic atom add it to the
epistemic atom cache. This allows fast access to atom information.
Also identify unique labels and index them here.
Arguments:
* atom (Atom/EpistemicAtom/NegationAsFailureAtom)
an object representing an atom in an epistemic logic program
"""
if str(atom) in self._atom_set:
return False
if atom.label not in self._label_id_lookup:
atom.label_id = self._label_id
self._label_cache[self._label_id] = atom.label
self._label_id_lookup[atom.label] = self._label_id
self._label_id += 1
else:
atom.label_id = self._label_id_lookup[atom.label]
atom.atom_id = self._atom_id
self._atom_set.add(str(atom))
self._atom_id_lookup[str(atom)] = atom.atom_id
self._atom_cache[atom.atom_id] = atom
self._atom_id += 1
if isinstance(atom, EpistemicAtom):
self.epistemic_atom_cache[atom.atom_id] = atom
self.label_to_epistemic_atom_id[atom.label].append(atom.atom_id)
return True
def get_atom_information(self, atom_token):
"""
Given a logical atom represented as a string of characters, determine
if it is an epistemic atom, if the atom has strong negation, what
kind of epistemic modality is used and if it is negated, and whether
or not negation as failure is used. Finally return an Atom instance
which holds all this information and assign it an atom ID and if
applicable an epistemic ID.
Arguments:
* atom_token (str) - a logical atom represented as a string.
"""
atom_negation = False
epistemic_negation = False
negation_as_failure = False
# it's an epistemic atom
if atom_token.find('K') != -1 or atom_token.find('M') != -1:
modality = EpistemicModality.BELIEVE
epistemic_modality_index = atom_token.find('M')
label = atom_token[1:]
if epistemic_modality_index == -1:
epistemic_modality_index = atom_token.find('K')
modality = EpistemicModality.KNOW
if epistemic_modality_index != 0 and \
atom_token[epistemic_modality_index - 1] in ('-', '~'):
epistemic_negation = True
label = atom_token[epistemic_modality_index + 1:]
if atom_token[epistemic_modality_index + 1] in ('-', '~'):
atom_negation = True
label = atom_token[epistemic_modality_index + 2:]
atom = EpistemicAtom(label, modality, atom_negation=atom_negation,
epistemic_negation=epistemic_negation)
else:
label = atom_token
if atom_token[0] in ('-', '~'):
atom_negation = True
label = atom_token[1:]
if atom_token.startswith('not '):
if '-' in atom_token or '~' in atom_token:
raise ValueError
negation_as_failure = True
label = atom_token[4:]
if negation_as_failure:
atom = NegationAsFailureAtom(label, atom_negation)
else:
atom = Atom(label, atom_negation)
created = self.get_or_create_atom(atom)
if not created:
atom.atom_id = self._atom_id_lookup[str(atom)]
return atom
def get_evaluated_program_and_apply_valuation(self, valuation_tuple):
"""
Given a tuple of valuations to apply to the epistemic atoms, run
through each rule, apply the valuations and determine the consequences
of the valuations to each rule. Here, if a valuation is true for an
epistemic atom it is removed from the rule meaning that we're
considering true for the purposes of determining if it leads to a
valid worldview. If a valuation is false for an epistemic atom the
entire rule is removed, indicating that since one atom is false in the
body of a rule its whole rule is unsatisfiable.
If a rules entire body is true we take the head and say that the head
is therefore true for the evaluated program.
Arguments:
* valuation_tuple (tuple(bool))
- a tuple of boolean values representing valuations to apply to the
epistemic atoms in the program.
"""
evaluated_program = []
for rule in self.program:
evaluated_rule = self.get_evaluated_rule_and_apply_valuation(rule, valuation_tuple)
if evaluated_rule:
evaluated_program.append(evaluated_rule)
return evaluated_program
def get_evaluated_rule_and_apply_valuation(self, rule, valuation_tuple):
"""
At a rule level go through the rule and check for epistemic atoms, if
you find one find its index number in the valuation string. Apply it's
valuation in the atom and work out what that means to the rule in the
evaluated program. If True remove the atom from the rule body,
otherwise remove the rule from the program.
Return the rule string if all True valuations for the epistemic atoms.
Arguments:
* valuation_tuple (tuple(bool))
- a tuple of boolean values representing valuations to apply to the
epistemic atoms in the program.
"""
false_valuation = False
modal_atom_in_rule = False
for atom_id in rule.tail:
atom = self._atom_cache[atom_id]
if not isinstance(atom, EpistemicAtom):
continue
# apply the valuation
modal_atom_in_rule = True
valuation_index = self.epistemic_atom_id_to_valuation_index_map[atom_id]
atom.valuation = valuation_tuple[valuation_index]
if not atom.valuation:
false_valuation = True
if not false_valuation or not modal_atom_in_rule:
return rule.get_rule_string(apply_valuation=True)
return ''
def check_optimisations(self):
"""
Search the label to epistemic atom dictionary and identify any labels
which appear in an epistemic atom more than once. If they have
negations or modalities which conflict valuations can be simplified
to not process these cases.
"""
optimisation_atom_pairs = []
for label, e_atom_id_list in self.label_to_epistemic_atom_id:
if not e_atom_id_list or len(e_atom_id_list) == 1:
continue
e_combinations = combinations(e_atom_id_list, 2)
for e_atom_id_a, e_atom_id_b in e_combinations:
e_atom_a = self._atom_cache[e_atom_id_a]
e_atom_b = self._atom_cache[e_atom_id_b]
if self.check_optimisation(e_atom_a, e_atom_b):
optimisation_atom_pairs.append(e_atom_a, e_atom_b)
return optimisation_atom_pairs
def check_optimisation(e_atom_a, e_atom_b):
"""
"""
return (
self.check_conflicts(e_atom_a, e_atom_b) and
self.check_conflicts(e_atom_b, e_atom_a)
) or \
self.check_conflicting_negation(e_atom_a, e_atom_b) or \
self.check_different_modality(e_atom_a, e_atom_b) or \
self.check_different_modality(e_atom_b, e_atom_a)
@staticmethod
def check_conflicts(atom_a, atom_b):
"""
Given two epistemic atoms, if one is K and doesnt have epistemic
negation and the other is M and doesnt have epistemic negation
and their atom negations do not agree we can safely say that any
valuation where they are both true or both false can't be satisfied.
Argument:
* atom_a (EpistemicAtom) - an epistemic atom
* atom_b (EpistemicAtom) - another epistemic atom
"""
return (atom_a.modality == EpistemicModality.KNOW and
atom_b.modality == EpistemicModality.BELIEVE and
not atom_a.epistemic_negation and
not atom_b.epistemic_negation and
atom_a.atom_negation != atom_b.atom_negation)
@staticmethod
def check_different_modality(atom_a, atom_b):
"""
Given two epistemic atoms, if one is K and has epistemic negation and
the other is M and hasn't and their atom negation is equal we can say
that any valuation that agrees for both of them cannot be true.
Argument:
* atom_a (EpistemicAtom) - an epistemic atom
* atom_b (EpistemicAtom) - another epistemic atom
"""
return (atom_a.modality == EpistemicModality.KNOW and
not atom_a.epistemic_negation and
atom_b.modality == EpistemicModality.BELIEVE and
atom_b.epistemic_negation and
atom_a.atom_negation == atom_b.atom_negation)
@staticmethod
def check_conflicting_negation(atom_a, atom_b):
"""
Given two epistemic atoms, if they have the same modality
(rather K or M) but they have a conflicting negation status for their
modality or for their atom (but not both) then we can safely say that
any valuation which say both of these things are true will be false
valuations.
Argument:
* atom_a (EpistemicAtom) - an epistemic atom
* atom_b (EpistemicAtom) - another epistemic atom
"""
return (atom_a.modality == atom_b.modality and
((atom_a.atom_negation != atom_b.atom_negation and
atom_a.epistemic_negation == atom_b.epistemic_negation) or
(atom_a.atom_negation == atom_b.atom_negation and
atom_a.epistemic_negation != atom_b.epistemic_negation)))
|
gpl-3.0
| -5,458,440,010,236,815,000
| 42.353896
| 95
| 0.609002
| false
| 3.873803
| false
| false
| false
|
reubano/hdxscraper-undp-climate
|
config.py
|
1
|
2244
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
config
~~~~~~
Provides app configuration settings
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from os import path as p
BASEDIR = p.dirname(__file__)
PARENTDIR = p.dirname(BASEDIR)
DB_NAME = 'scraperwiki.sqlite'
RECIPIENT = 'reubano@gmail.com'
class Config(object):
base = 'http://www.geog.ox.ac.uk'
BASE_URL = '%s/research/climate/projects/undp-cp/UNDP_data' % base
FILE_EXT = 'ts.obs.precip.ts.ensemblemean.abs.txt'
DIR = 'Observed/Mean/Timeseries/Absolute'
loc = [
'Afghanistan', 'Angola', 'Antigua and Barbuda', 'Argentina', 'Armenia',
'Bangladesh', 'Barbados', 'Belize', 'Benin', 'Cambodia', 'Cameroon',
'Cape Verde', 'Chad', 'Chile', 'China', 'Colombia', 'Comoros', 'Cuba',
'Dominica', 'Dominican Republic', 'Equatorial Guinea', 'Eritrea',
'Ethiopia', 'Gabon', 'Gambia', 'Ghana', 'Grenada', 'Guinea', 'Guyana',
'Indonesia', 'Jamaica', 'Kenya', 'Liberia', 'Malawi', 'Mali',
'Mauritania', 'Mauritius', 'Mexico', 'Morocco', 'Mozambique', 'Nepal',
'Nicaragua', 'Pakistan', 'Sao Tome and Principe', 'Senegal',
'Sierra Leone', 'St Kitts and Nevis', 'St Lucia',
'St Vincent and the Grenadines', 'Suriname', 'Tanzania', 'The Bahamas',
'Togo', 'Trinidad and Tobago', 'Uganda', 'Vietnam', 'Yemen', 'Zambia']
TABLES = [{'name': 'climate', 'location': l, 'rid': 'rid'} for l in loc]
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s' % p.join(BASEDIR, DB_NAME)
API_LIMIT = 1000
SW = False
DEBUG = False
TESTING = False
PROD = False
CHUNK_SIZE = 2 ** 14
ROW_LIMIT = None
LOGFILE = p.join(BASEDIR, 'http', 'log.txt')
class Scraper(Config):
PROD = True
SW = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s' % p.join(PARENTDIR, DB_NAME)
LOGFILE = p.join(PARENTDIR, 'http', 'log.txt')
class Production(Config):
PROD = True
class Development(Config):
DEBUG = True
CHUNK_SIZE = 2 ** 4
ROW_LIMIT = 16
class Test(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
DEBUG = True
CHUNK_SIZE = 2 ** 4
ROW_LIMIT = 10
TESTING = True
|
mit
| -7,815,678,056,294,117,000
| 28.92
| 79
| 0.610517
| false
| 2.710145
| true
| false
| false
|
wtpayne/hiai
|
a3_src/h70_internal/da/lwc/discover.py
|
1
|
8741
|
# -*- coding: utf-8 -*-
"""
Local working copy path aliasing.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import os
import da.lwc.search
import da.memo
LWC_DIR_EXCLUDE_EXPR_LIST = [r'^\..*$',
r'^\.git$',
r'^\.cache$',
r'^\.vagrant',
r'^__pycache__']
LWC_EXT_INCLUDE_EXPR_LIST = [r'^.*\.bash$',
r'^.*\.css$',
r'^.*\.template.html$',
r'^.*\.template.docx$',
r'^.*\.py$',
r'^.*\.md$',
r'^.*\.json$',
r'^.*\.yaml$']
LWC_PROJECT_DIR_EXPR = r'^.*p[0-9]{4}_[a-z0-9_]{2,64}$'
LWC_COUNTERPARTY_DIR_EXPR = r'^.*c[0-9]{3}_[a-z0-9_]{2,64}$'
LWC_RESEARCHER_DIR_EXPR = r'^.*t[0-9]{3}_[a-z0-9_]{2,64}$'
_LWC_TAB = {
'env': ('a0_env', ),
'cfg': ('a1_cfg', ),
'dat': ('a2_dat', ),
'src': ('a3_src', ),
'tmp': ('a4_tmp', ),
'cms': ('a5_cms', ),
'resource': ('a3_src', 'h10_resource' ),
'daybook': ('a3_src', 'h10_resource', 'daybook' ),
'registry': ('a3_src', 'h10_resource', 'registry' ),
'capability': ('a3_src', 'h20_capability' ),
'product': ('a3_src', 'h30_product' ),
'project': ('a3_src', 'h40_project' ),
'research': ('a3_src', 'h50_research' ),
'demo': ('a3_src', 'h60_demo' ),
'internal': ('a3_src', 'h70_internal' ),
'bldcfg': ('a3_src', 'h70_internal', 'da', 'bldcfg' ),
'doc': ('a3_src', 'h80_doc' )
}
# -----------------------------------------------------------------------------
def gen_product_dirs(dirpath_lwc_root = None):
"""
Generate all product dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'product',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = None,
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_counterparty_dirs(dirpath_lwc_root = None):
"""
Generate all project counterparty dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'project',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = [da.lwc.discover.LWC_COUNTERPARTY_DIR_EXPR],
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_project_dirs(dirpath_lwc_root = None):
"""
Generate all project dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'project',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = [da.lwc.discover.LWC_PROJECT_DIR_EXPR],
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_research_dirs(dirpath_lwc_root = None):
"""
Generate all research (per team member) dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'research',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = [da.lwc.discover.LWC_RESEARCHER_DIR_EXPR],
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_demo_dirs(dirpath_lwc_root = None):
"""
Generate all demo dirs in the local working copy.
"""
return da.lwc.search.filtered_dirpath_generator(
root = path(key = 'demo',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = None,
pathexcl = None)
# -----------------------------------------------------------------------------
def gen_src_files(dirpath_lwc_root = None):
"""
Generate all source files in the local working copy.
"""
if dirpath_lwc_root is None:
dirpath_lwc_root = _lwc_root(__file__)
return da.lwc.search.filtered_filepath_generator(
root = path(key = 'src',
dirpath_lwc_root = dirpath_lwc_root),
direxcl = da.lwc.discover.LWC_DIR_EXCLUDE_EXPR_LIST,
pathincl = da.lwc.discover.LWC_EXT_INCLUDE_EXPR_LIST)
# -----------------------------------------------------------------------------
@da.memo.var
def path(key, dirpath_lwc_root = None):
"""
Return the directory path corresponding to the specified key.
"""
# Get lwc_root if it is not defined
if dirpath_lwc_root is None:
dirpath_lwc_root = _lwc_root(__file__)
# LWC root
if key == 'root':
return dirpath_lwc_root
# Handle 'heavyweight' folders that can't get copied to tmp
if (key == 'env') or (key == 'cfg') or (key == 'dat'):
dirname_tmp = _LWC_TAB['tmp'][0]
is_tmp_lwc = dirname_tmp in dirpath_lwc_root
if is_tmp_lwc:
dirpath_outer_lwc_root = _lwc_root(dirpath_lwc_root)
else:
dirpath_outer_lwc_root = dirpath_lwc_root
return os.path.join(dirpath_outer_lwc_root, *_LWC_TAB[key])
# Env dir for the current runtime environment?
if key == 'current_env':
import da.machine as _machine
dirpath_env = path(key = 'env', dirpath_lwc_root = dirpath_lwc_root)
env_id = _machine.env_id()
return os.path.join(dirpath_env, env_id)
# Config directory for the current user & machine?
if key == 'current_cfg':
import da.team as _team
import da.machine as _machine
dirpath_cfg = path(key = 'cfg', dirpath_lwc_root = dirpath_lwc_root)
member_id = _team.member_id(dirpath_lwc_root = dirpath_lwc_root)
machine_id = _machine.machine_id(dirpath_lwc_root = dirpath_lwc_root)
return os.path.join(dirpath_cfg, member_id, machine_id)
# Key is an entry in the static table above?
if key in _LWC_TAB:
return os.path.join(dirpath_lwc_root, *_LWC_TAB[key])
raise RuntimeError(
'Could not identify path for key: {key}'.format(
key = key))
# -----------------------------------------------------------------------------
@da.memo.var
def _lwc_root(filepath_self):
"""
Return the directory path to the root of the local working copy.
"""
marker_file_name = 'da'
dirpath_self = os.path.dirname(filepath_self)
dirpath_lwc_root = da.lwc.search.find_ancestor_dir_containing(
dirpath_self, marker_file_name, allow_dir = False)
dirpath_normalised = os.path.normpath(dirpath_lwc_root)
dirpath_real = os.path.realpath(dirpath_normalised)
return dirpath_real
|
apache-2.0
| -1,385,293,988,689,755,400
| 37.170306
| 79
| 0.467795
| false
| 3.909213
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.