hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd14235a97115d0da605df7efd9315bb578a44f1
| 101
|
py
|
Python
|
run_kbstopwatch.py
|
mwiens91/kb-stopwatch
|
0eec6bd942ca1e9786f36d8e577ce304b58d77a3
|
[
"MIT"
] | null | null | null |
run_kbstopwatch.py
|
mwiens91/kb-stopwatch
|
0eec6bd942ca1e9786f36d8e577ce304b58d77a3
|
[
"MIT"
] | null | null | null |
run_kbstopwatch.py
|
mwiens91/kb-stopwatch
|
0eec6bd942ca1e9786f36d8e577ce304b58d77a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Runs kb-stopwatch."""
from kbstopwatch.main import main
# Run it
main()
| 11.222222
| 33
| 0.683168
|
#!/usr/bin/env python3
"""Runs kb-stopwatch."""
from kbstopwatch.main import main
# Run it
main()
| 0
| 0
| 0
|
a20c02cbb720f1bb48ec57842efc94d26eab867b
| 4,527
|
py
|
Python
|
transduction_based/laplacian_tags.py
|
vohoaiviet/tag-image-retrieval
|
0a257560581f702cd394f3f28c9e0f6202827ce8
|
[
"MIT"
] | 50
|
2015-11-04T15:53:09.000Z
|
2022-01-03T14:46:17.000Z
|
transduction_based/laplacian_tags.py
|
vohoaiviet/tag-image-retrieval
|
0a257560581f702cd394f3f28c9e0f6202827ce8
|
[
"MIT"
] | 2
|
2018-03-07T09:51:50.000Z
|
2018-10-13T11:05:13.000Z
|
transduction_based/laplacian_tags.py
|
vohoaiviet/tag-image-retrieval
|
0a257560581f702cd394f3f28c9e0f6202827ce8
|
[
"MIT"
] | 17
|
2015-10-26T03:41:49.000Z
|
2021-08-23T08:11:05.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import sys, os
import numpy as np
import bisect
import scipy.io
import bisect
import math
import h5py
from nltk.corpus import wordnet as wn
from nltk.corpus import wordnet_ic
from basic.constant import ROOT_PATH
from basic.common import makedirsforfile, checkToSkip, printStatus
from basic.util import readImageSet
from basic.annotationtable import readConcepts
from util.simpleknn.bigfile import BigFile
from instance_based.tagvote import *
INFO = 'transduction_based.laplacian_tags'
DEFAULT_RATIOCS = 0.9
if __name__ == "__main__":
sys.exit(main())
| 34.295455
| 181
| 0.662249
|
#!/usr/bin/env python
# encoding: utf-8
import sys, os
import numpy as np
import bisect
import scipy.io
import bisect
import math
import h5py
from nltk.corpus import wordnet as wn
from nltk.corpus import wordnet_ic
from basic.constant import ROOT_PATH
from basic.common import makedirsforfile, checkToSkip, printStatus
from basic.util import readImageSet
from basic.annotationtable import readConcepts
from util.simpleknn.bigfile import BigFile
from instance_based.tagvote import *
INFO = 'transduction_based.laplacian_tags'
DEFAULT_RATIOCS = 0.9
def tag_semantic_similarity(x, y, ic):
mx = wn.morphy(x)
my = wn.morphy(y)
if mx is None or my is None:
return 0
synX = wn.synsets(mx, pos=wn.NOUN)
synY = wn.synsets(my, pos=wn.NOUN)
if len(synX) > 0 and len(synY) > 0:
maxSim = synX[0].lin_similarity(synY[0], ic)
else:
maxSim = 0
return maxSim
def process(options, workingCollection):
rootpath = options.rootpath
overwrite = options.overwrite
chunk = options.chunk - 1
n_chunks = options.nchunks
ratio_cs = options.ratiocs
assert chunk < n_chunks and chunk >= 0 and n_chunks > 0
printStatus(INFO, 'RatioCS = %f' % ratio_cs)
printStatus(INFO, 'Using Brown Corpus for the ic')
brown_ic = wordnet_ic.ic('ic-brown.dat')
tags_file = os.path.join(rootpath, workingCollection, 'TextData', 'lemm_wordnet_freq_tags.h5')
if not os.path.exists(tags_file):
printStatus(INFO, 'Tags file not found at %s Did you run wordnet_frequency_tags.py ?' % tags_file)
sys.exit(1)
if n_chunks > 1:
resultfile = os.path.join(rootpath, workingCollection, 'LaplacianT', '%f'%(ratio_cs), 'laplacianT_%d.mat' % chunk)
else:
resultfile = os.path.join(rootpath, workingCollection, 'LaplacianT', '%f'%(ratio_cs), 'laplacianT.mat')
if checkToSkip(resultfile, overwrite):
return 0
tags_data = h5py.File(tags_file, 'r')
vocab = list(tags_data['vocab'][:])
tagmatrix = tags_data['tagmatrix'][:]
N_tags = len(vocab)
# single tag frequency
frequency = tagmatrix.sum(axis=0)
assert len(frequency) == len(vocab), "%s " % len(frequency) == len(vocab)
final_matrix = np.zeros((N_tags, N_tags))
# similarity matrix
printStatus(INFO, 'Building the similarity matrix')
start_chunk = chunk * int(math.floor(N_tags / n_chunks))
if chunk == (n_chunks - 1):
end_chunk = N_tags
else:
end_chunk = (chunk + 1) * int(math.floor(N_tags / n_chunks))
for i in xrange(start_chunk, end_chunk):
if i % 100 == 0:
printStatus(INFO, '%d / %d done' % (i+1, end_chunk))
for k in xrange(i+1, N_tags):
context = ratio_cs * np.sum(tagmatrix[:, [i, k]].sum(axis=1) > 1.5) / (frequency[i] + frequency[k])
semantic = max(0, (1. - ratio_cs) * tag_semantic_similarity(vocab[i], vocab[k], brown_ic))
final_matrix[i, k] = context + semantic
final_matrix[k, i] = final_matrix[i, k]
# laplacian
if n_chunks < 2:
printStatus(INFO, 'Computing the laplacian matrix')
new_diag = final_matrix.sum(axis=0).T
final_matrix = - final_matrix
for i in xrange(N_tags):
final_matrix[i, i] = new_diag[i]
if n_chunks < 2:
printStatus(INFO, 'Saving laplacian matrix to %s' % resultfile)
else:
printStatus(INFO, 'Saving partial matrix to %s' % resultfile)
makedirsforfile(resultfile)
scipy.io.savemat(resultfile, {'tag_similarity' : final_matrix})
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage="""usage: %prog [options] workingCollection""")
parser.add_option("--overwrite", default=0, type="int", help="overwrite existing file (default=0)")
parser.add_option("--ratiocs", default=DEFAULT_RATIOCS, type="float", help="ratio of context vs wordnet similarity in tag similarity compitation (default %f)" % DEFAULT_RATIOCS)
parser.add_option("--rootpath", default=ROOT_PATH, type="string", help="(default: %s)" % ROOT_PATH)
parser.add_option("--chunk", default=1, type="int", help="job number (default: 1)")
parser.add_option("--nchunks", default=1, type="int", help="total number of jobs (default: 1)")
(options, args) = parser.parse_args(argv)
if len(args) < 1:
parser.print_help()
return 1
return process(options, args[0])
if __name__ == "__main__":
sys.exit(main())
| 3,858
| 0
| 69
|
f7b24a3ed38cb3ed9682bfcbcf9843f1d5c4f3d8
| 11,103
|
py
|
Python
|
layers/qlib.py
|
astonzhang/Parameterization-of-Hypercomplex-Multiplications
|
0e3f1ceebccce9f14ce629356733c07602eb351c
|
[
"Apache-2.0"
] | 10
|
2021-12-24T17:13:59.000Z
|
2022-03-22T10:15:00.000Z
|
layers/qlib.py
|
astonzhang/Parameterization-of-Hypercomplex-Multiplications
|
0e3f1ceebccce9f14ce629356733c07602eb351c
|
[
"Apache-2.0"
] | null | null | null |
layers/qlib.py
|
astonzhang/Parameterization-of-Hypercomplex-Multiplications
|
0e3f1ceebccce9f14ce629356733c07602eb351c
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from numpy.random import RandomState
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import random_ops
import random
""" Quarternion layers
References:
https://arxiv.org/pdf/1806.04418.pdf
https://arxiv.org/pdf/1806.07789.pdf
https://github.com/Orkis-Research/light-Recurrent-Neural-Networks
https://github.com/Orkis-Research/light-Convolutional-Neural-Networks-for-End-to-End-Automatic-Speech-Recognition
Some functions are direct ports from the Pytorch library.
"""
def quarternion_attention(a, b):
""" Performs dot product attention between two quarternion sequences.
a = bsz x al x dim
b = bsz x bl x dim
following:
(rr' - xx' - yy' - zz') +
(rx' + xr' + yz' - zy')i +
(ry' - xz' + yr' + zx')j +
(rz' + xy' - yx' + zr')k +
the output should be one attention matrix for each component (r,i,j,k)
"""
print("light Attention!")
print(a)
print(b)
al, bl = tf.shape(a)[2], tf.shape(b)[2]
ar, ax, ay, az = tf.split(a, 4, axis=-1)
br, bx, by, bz = tf.split(b, 4, axis=-1)
r = tf.matmul(ar, br, transpose_b=True) - tf.matmul(ax, bx, transpose_b=True) - tf.matmul(ay, by, transpose_b=True) - tf.matmul(az, bz, transpose_b=True)
i = tf.matmul(ar, bx, transpose_b=True) + tf.matmul(ax, br, transpose_b=True) + tf.matmul(ay, bz, transpose_b=True) - tf.matmul(az, by, transpose_b=True)
j = tf.matmul(ar, by, transpose_b=True) - tf.matmul(ax, bz, transpose_b=True) + tf.matmul(ay, br, transpose_b=True) + tf.matmul(az, bx, transpose_b=True)
k = tf.matmul(ar, bz, transpose_b=True) + tf.matmul(ax, by, transpose_b=True) - tf.matmul(ay, bx, transpose_b=True) + tf.matmul(az, br, transpose_b=True)
return [r, i, j, k]
def quarternion_dot_product_att(a, b):
""" Wrapper for two sequences
"""
al = tf.shape(a)[1]
bl = tf.shape(b)[1]
# print(a)
d = a.get_shape().as_list()[2]
bsz = tf.shape(b)[0]
a = tf.reshape(a, [-1, d])
a = tf.tile(a, [bl, 1])
b = tf.reshape(b, [-1, d])
b = tf.tile(b, [al, 1])
att = quarternion_dot(a, b)
att = tf.reshape(att, [bsz, -1, al * bl])
att = tf.reduce_sum(att, 1)
return tf.reshape(att, [-1, al * bl])
def quarternion_dot(q0, q1):
""" Quarternion product between 2 quarternions
returns same shape and acts like element-wise quarternion mul
"""
q1_r = get_r(q1)
q1_i = get_i(q1)
q1_j = get_j(q1)
q1_k = get_k(q1)
r_base = tf.multiply(q0, q1)
r = get_r(r_base) - get_i(r_base) - get_j(r_base) - get_k(r_base)
i_base = tf.multiply(q0, tf.concat([q1_i, q1_r, q1_k, q1_j], 1))
i = get_r(i_base) + get_i(i_base) + get_j(i_base) - get_k(i_base)
j_base = tf.multiply(q0, tf.concat([q1_j, q1_k, q1_r, q1_i], 1))
j = get_r(j_base) - get_i(j_base) + get_j(j_base) + get_k(j_base)
k_base = tf.multiply(q0, tf.concat([q1_k, q1_j, q1_i, q1_r], 1))
k = get_r(k_base) + get_i(k_base) - get_j(k_base) + get_k(k_base)
return tf.concat([r, i, j, k], 1)
def quarternion_concat(x, axis):
""" Helpful if we have 2 quarternions in [r,i,j,k].
We can't simply concat them as it would mess the components.
So in this case, we extract each component and concat them individually.
"""
output = [[] for i in range(4)]
for _x in x:
sp = tf.split(_x, 4, axis=axis)
for i in range(4):
output[i].append(sp[i])
final = []
for o in output:
o = tf.concat(o, axis)
final.append(o)
return tf.concat(final, axis)
def quarternion_ffn_3d(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" Quarternion Feed-forward layers to 3D input [bsz x seq_len x dim]
returns same shape tensor with new projected dimension.
"""
print("QFFN layer..")
_d = x.get_shape().as_list()[2]
sq = tf.shape(x)[1]
x = tf.reshape(x, [-1, _d])
x = quarternion_ffn(x, dim, name=name, init=init,
num_layers=num_layers,
activation=activation,reuse=reuse)
x = tf.reshape(x, [-1, sq, dim])
return x
def factorized_ffn_3d(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" 3D factorized FFN layer
"""
print("Factor Layer")
_d = x.get_shape().as_list()[2]
sq = tf.shape(x)[1]
x = tf.reshape(x, [-1, _d])
x = factorized_ffn(x, dim, name=name, init=init,
num_layers=num_layers,
activation=activation,reuse=reuse)
x = tf.reshape(x, [-1, sq, dim])
return x
def factorized_ffn(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" Factorized FFN
"""
if(init is None):
init = tf.contrib.layers.xavier_initializer()
input_dim=x.get_shape().as_list()[2]
k1 = tf.get_variable('factork1{}'.format(name), [input_dim], initializer=init)
k2 = tf.get_variable('factork2{}'.format(name), [dim], initializer=init)
W = tf.tensordot(k1, k2, axes=0)
output = tf.matmul(x, W)
if(activation):
output = activation(output)
return output
def quarternion_ffn(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" Implements quarternion feed-forward layer
x is [bsz x features] tensor
"""
if(init is None):
init = tf.contrib.layers.xavier_initializer()
# init = q_xavier_initializer()
input_dim = x.get_shape().as_list()[1] // 4
with tf.variable_scope('Q{}'.format(name), reuse=reuse) as scope:
kernel = tf.get_variable('quarternion', [input_dim, dim], initializer=init)
hamilton = make_quarternion_mul(kernel)
output = tf.matmul(x, hamilton)
if(activation):
output = activation(output)
return output
def make_random_mul(kernel, n=4, concat_dim=0, dual=False):
""" input is dim/n x dim
output is dim x dim
generalization and parameterized hypercomplex product
"""
dim = kernel.get_shape().as_list()[1]
dim2 = kernel.get_shape().as_list()[0]
kernel = tf.reshape(kernel, [dim2, 1, 1, dim])
mix = tf.split(kernel, n, axis=-1)
sdim = mix[0].get_shape().as_list()[-1] # dim//n x 1 x 1 x dim//n
AM = tf.get_variable('A', [n, 1, n, n])
cat = tf.concat(mix, axis=1) # dim/n x n x 1 x dim/n
cat = tf.tile(cat, [1, 1, n, 1]) # dim/n x n x n x dim/n
cat = tf.transpose(cat, [1, 0, 2, 3]) # n x dim/n x n x dim/n
if(dual==1):
print("Using Dual..")
BM = tf.get_variable('B', [n, 1, n, n])
AM *= tf.nn.sigmoid(BM)
AM = tf.tile(AM, [1, dim2, 1, 1]) # n x dim/n x n x n
cat = tf.matmul(AM, cat) # n x dim/n x n x dim/n
output = tf.reshape(cat, [dim2 *n, dim])
return output
def random_ffn_3d(x, dim, n=16, name='', init=None,
num_layers=1, activation=None, reuse=None, dual=False):
""" Implements random feed-forward layer
x is [bsz x features] tensor
"""
print("R-FFN layer..n={} dual={}".format(n, dual))
_d = x.get_shape().as_list()[2]
sq = tf.shape(x)[1]
x = tf.reshape(x, [-1, _d])
print(x)
x = random_ffn(x, dim, n=n, name=name, init=init,
num_layers=num_layers,
activation=activation, reuse=reuse, dual=dual)
x = tf.reshape(x, [-1, sq, dim])
return x
def random_ffn(x, dim, n=4, name='', init=None,
num_layers=1, activation=None, reuse=None, dual=0):
""" Implements random feed-forward layer
x is [bsz x features] tensor
"""
if(init is None):
init = tf.contrib.layers.xavier_initializer()
# init = q_xavier_initializer()
input_dim = x.get_shape().as_list()[1] // n
with tf.variable_scope('R{}'.format(name), reuse=reuse) as scope:
kernel = tf.get_variable('random', [input_dim, dim], initializer=init)
hamilton = make_random_mul(kernel, n=n, dual=dual)
output = tf.matmul(x, hamilton)
if(activation):
output = activation(output)
return output
def octonion_ffn_3d(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" Quarternion Feed-forward layers to 3D input [bsz x seq_len x dim]
returns same shape tensor with new projected dimension.
"""
print("OFFN layer..")
_d = x.get_shape().as_list()[2]
sq = tf.shape(x)[1]
x = tf.reshape(x, [-1, _d])
x = octonion_ffn(x, dim, name=name, init=init,
num_layers=num_layers,
activation=activation,reuse=reuse)
x = tf.reshape(x, [-1, sq, dim])
return x
| 30.841667
| 154
| 0.665766
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from numpy.random import RandomState
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import random_ops
import random
""" Quarternion layers
References:
https://arxiv.org/pdf/1806.04418.pdf
https://arxiv.org/pdf/1806.07789.pdf
https://github.com/Orkis-Research/light-Recurrent-Neural-Networks
https://github.com/Orkis-Research/light-Convolutional-Neural-Networks-for-End-to-End-Automatic-Speech-Recognition
Some functions are direct ports from the Pytorch library.
"""
def make_quarternion_mul(kernel, concat_dim=0):
r, i, j, k = tf.split(kernel, 4, axis=-1)
r2 = tf.concat([r, -i, -j, -k], axis=-1) # 0, 1, 2, 3
i2 = tf.concat([i, r, -k, j], axis=-1) # 1, 0, 3, 2
j2 = tf.concat([j, k, r, -i], axis=-1) # 2, 3, 0, 1
k2 = tf.concat([k, -j, i, r],axis=-1) # 3, 2, 1, 0
hamilton = tf.concat([r2, i2, j2, k2], axis=concat_dim)
return hamilton
def get_r(x, a=1):
return tf.split(x, 4, axis=a)[0]
def get_i(x, a=1):
return tf.split(x, 4, axis=a)[1]
def get_j(x, a=1):
return tf.split(x, 4, axis=a)[2]
def get_k(x, a=1):
return tf.split(x, 4, axis=a)[3]
def quarternion_attention(a, b):
""" Performs dot product attention between two quarternion sequences.
a = bsz x al x dim
b = bsz x bl x dim
following:
(rr' - xx' - yy' - zz') +
(rx' + xr' + yz' - zy')i +
(ry' - xz' + yr' + zx')j +
(rz' + xy' - yx' + zr')k +
the output should be one attention matrix for each component (r,i,j,k)
"""
print("light Attention!")
print(a)
print(b)
al, bl = tf.shape(a)[2], tf.shape(b)[2]
ar, ax, ay, az = tf.split(a, 4, axis=-1)
br, bx, by, bz = tf.split(b, 4, axis=-1)
r = tf.matmul(ar, br, transpose_b=True) - tf.matmul(ax, bx, transpose_b=True) - tf.matmul(ay, by, transpose_b=True) - tf.matmul(az, bz, transpose_b=True)
i = tf.matmul(ar, bx, transpose_b=True) + tf.matmul(ax, br, transpose_b=True) + tf.matmul(ay, bz, transpose_b=True) - tf.matmul(az, by, transpose_b=True)
j = tf.matmul(ar, by, transpose_b=True) - tf.matmul(ax, bz, transpose_b=True) + tf.matmul(ay, br, transpose_b=True) + tf.matmul(az, bx, transpose_b=True)
k = tf.matmul(ar, bz, transpose_b=True) + tf.matmul(ax, by, transpose_b=True) - tf.matmul(ay, bx, transpose_b=True) + tf.matmul(az, br, transpose_b=True)
return [r, i, j, k]
def quarternion_dot_product_att(a, b):
""" Wrapper for two sequences
"""
al = tf.shape(a)[1]
bl = tf.shape(b)[1]
# print(a)
d = a.get_shape().as_list()[2]
bsz = tf.shape(b)[0]
a = tf.reshape(a, [-1, d])
a = tf.tile(a, [bl, 1])
b = tf.reshape(b, [-1, d])
b = tf.tile(b, [al, 1])
att = quarternion_dot(a, b)
att = tf.reshape(att, [bsz, -1, al * bl])
att = tf.reduce_sum(att, 1)
return tf.reshape(att, [-1, al * bl])
def quarternion_dot_3d(q0, q1):
d = q0.get_shape().as_list()[2]
sq = tf.shape(q0)[1]
q0 = tf.reshape(q0, [-1, d])
q1 = tf.reshape(q1, [-1, d])
out = quarternion_dot(q0, q1)
return tf.reshape(out, [-1, sq, d])
def quarternion_dot(q0, q1):
""" Quarternion product between 2 quarternions
returns same shape and acts like element-wise quarternion mul
"""
q1_r = get_r(q1)
q1_i = get_i(q1)
q1_j = get_j(q1)
q1_k = get_k(q1)
r_base = tf.multiply(q0, q1)
r = get_r(r_base) - get_i(r_base) - get_j(r_base) - get_k(r_base)
i_base = tf.multiply(q0, tf.concat([q1_i, q1_r, q1_k, q1_j], 1))
i = get_r(i_base) + get_i(i_base) + get_j(i_base) - get_k(i_base)
j_base = tf.multiply(q0, tf.concat([q1_j, q1_k, q1_r, q1_i], 1))
j = get_r(j_base) - get_i(j_base) + get_j(j_base) + get_k(j_base)
k_base = tf.multiply(q0, tf.concat([q1_k, q1_j, q1_i, q1_r], 1))
k = get_r(k_base) + get_i(k_base) - get_j(k_base) + get_k(k_base)
return tf.concat([r, i, j, k], 1)
def quarternion_concat(x, axis):
""" Helpful if we have 2 quarternions in [r,i,j,k].
We can't simply concat them as it would mess the components.
So in this case, we extract each component and concat them individually.
"""
output = [[] for i in range(4)]
for _x in x:
sp = tf.split(_x, 4, axis=axis)
for i in range(4):
output[i].append(sp[i])
final = []
for o in output:
o = tf.concat(o, axis)
final.append(o)
return tf.concat(final, axis)
def quarternion_ffn_3d(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" Quarternion Feed-forward layers to 3D input [bsz x seq_len x dim]
returns same shape tensor with new projected dimension.
"""
print("QFFN layer..")
_d = x.get_shape().as_list()[2]
sq = tf.shape(x)[1]
x = tf.reshape(x, [-1, _d])
x = quarternion_ffn(x, dim, name=name, init=init,
num_layers=num_layers,
activation=activation,reuse=reuse)
x = tf.reshape(x, [-1, sq, dim])
return x
def factorized_ffn_3d(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" 3D factorized FFN layer
"""
print("Factor Layer")
_d = x.get_shape().as_list()[2]
sq = tf.shape(x)[1]
x = tf.reshape(x, [-1, _d])
x = factorized_ffn(x, dim, name=name, init=init,
num_layers=num_layers,
activation=activation,reuse=reuse)
x = tf.reshape(x, [-1, sq, dim])
return x
def factorized_ffn(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" Factorized FFN
"""
if(init is None):
init = tf.contrib.layers.xavier_initializer()
input_dim=x.get_shape().as_list()[2]
k1 = tf.get_variable('factork1{}'.format(name), [input_dim], initializer=init)
k2 = tf.get_variable('factork2{}'.format(name), [dim], initializer=init)
W = tf.tensordot(k1, k2, axes=0)
output = tf.matmul(x, W)
if(activation):
output = activation(output)
return output
def quarternion_ffn(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" Implements quarternion feed-forward layer
x is [bsz x features] tensor
"""
if(init is None):
init = tf.contrib.layers.xavier_initializer()
# init = q_xavier_initializer()
input_dim = x.get_shape().as_list()[1] // 4
with tf.variable_scope('Q{}'.format(name), reuse=reuse) as scope:
kernel = tf.get_variable('quarternion', [input_dim, dim], initializer=init)
hamilton = make_quarternion_mul(kernel)
output = tf.matmul(x, hamilton)
if(activation):
output = activation(output)
return output
def make_random_mul(kernel, n=4, concat_dim=0, dual=False):
""" input is dim/n x dim
output is dim x dim
generalization and parameterized hypercomplex product
"""
dim = kernel.get_shape().as_list()[1]
dim2 = kernel.get_shape().as_list()[0]
kernel = tf.reshape(kernel, [dim2, 1, 1, dim])
mix = tf.split(kernel, n, axis=-1)
sdim = mix[0].get_shape().as_list()[-1] # dim//n x 1 x 1 x dim//n
AM = tf.get_variable('A', [n, 1, n, n])
cat = tf.concat(mix, axis=1) # dim/n x n x 1 x dim/n
cat = tf.tile(cat, [1, 1, n, 1]) # dim/n x n x n x dim/n
cat = tf.transpose(cat, [1, 0, 2, 3]) # n x dim/n x n x dim/n
if(dual==1):
print("Using Dual..")
BM = tf.get_variable('B', [n, 1, n, n])
AM *= tf.nn.sigmoid(BM)
AM = tf.tile(AM, [1, dim2, 1, 1]) # n x dim/n x n x n
cat = tf.matmul(AM, cat) # n x dim/n x n x dim/n
output = tf.reshape(cat, [dim2 *n, dim])
return output
def random_ffn_3d(x, dim, n=16, name='', init=None,
num_layers=1, activation=None, reuse=None, dual=False):
""" Implements random feed-forward layer
x is [bsz x features] tensor
"""
print("R-FFN layer..n={} dual={}".format(n, dual))
_d = x.get_shape().as_list()[2]
sq = tf.shape(x)[1]
x = tf.reshape(x, [-1, _d])
print(x)
x = random_ffn(x, dim, n=n, name=name, init=init,
num_layers=num_layers,
activation=activation, reuse=reuse, dual=dual)
x = tf.reshape(x, [-1, sq, dim])
return x
def random_ffn(x, dim, n=4, name='', init=None,
num_layers=1, activation=None, reuse=None, dual=0):
""" Implements random feed-forward layer
x is [bsz x features] tensor
"""
if(init is None):
init = tf.contrib.layers.xavier_initializer()
# init = q_xavier_initializer()
input_dim = x.get_shape().as_list()[1] // n
with tf.variable_scope('R{}'.format(name), reuse=reuse) as scope:
kernel = tf.get_variable('random', [input_dim, dim], initializer=init)
hamilton = make_random_mul(kernel, n=n, dual=dual)
output = tf.matmul(x, hamilton)
if(activation):
output = activation(output)
return output
def octonion_ffn_3d(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
""" Quarternion Feed-forward layers to 3D input [bsz x seq_len x dim]
returns same shape tensor with new projected dimension.
"""
print("OFFN layer..")
_d = x.get_shape().as_list()[2]
sq = tf.shape(x)[1]
x = tf.reshape(x, [-1, _d])
x = octonion_ffn(x, dim, name=name, init=init,
num_layers=num_layers,
activation=activation,reuse=reuse)
x = tf.reshape(x, [-1, sq, dim])
return x
def octonion_ffn(x, dim, name='', init=None,
num_layers=1, activation=None, reuse=None):
if(init is None):
init = tf.contrib.layers.xavier_initializer()
input_dim = x.get_shape().as_list()[1] // 8
with tf.variable_scope('OCT{}'.format(name), reuse=reuse) as scope:
kernel = tf.get_variable('octonion', [input_dim, dim], initializer=init)
output = octonion_mul(x, kernel)
return output
def hamilton_product(x, kernel):
h = make_quarternion_mul(kernel)
output = tf.matmul(x, h)
return output
def qstar(x):
x = tf.split(x, 4, axis=-1)
x1 = -x[1]
x2 = -x[2]
return tf.concat([x[0],x1,x2,x[3]], axis=-1)
def octonion_mul(x, kernel):
x1, x2 = tf.split(x, 2, axis=-1)
k1, k2 = tf.split(kernel, 2, axis=-1)
print(x1)
print(k1)
o1 = hamilton_product(x1, k1)
o2 = hamilton_product(k2, x1)
o1 -= hamilton_product(qstar(k2), x2)
o2 += hamilton_product(x2, qstar(k1))
output = tf.concat([o1, o2], axis=-1)
return output
class QuarternionRNN(tf.nn.rnn_cell.RNNCell):
def __init__(self, input_dim, output_dim,
initializer=None, name='', reuse=None):
""" Rough implementation (need double-check)
from the Quarternion RNN paper. For now, works decently.
"""
self.dim = output_dim
with tf.variable_scope("QuartRNN{}".format(name), reuse=reuse) as scope:
if(initializer is None):
# initializer = tf.contrib.layers.xavier_initializer()
initialzier = tf.orthogonal_initializer()
input_dim = input_dim // 4
self.Wh = tf.get_variable("Wh", [input_dim, output_dim],
initializer=initializer)
self.Wx = tf.get_variable("Wx", [input_dim, output_dim],
initializer=initializer)
self.Wy = tf.get_variable("Wy", [input_dim, output_dim],
initializer=initializer)
self.Wh = make_quarternion_mul(self.Wh)
self.Wx = make_quarternion_mul(self.Wx)
self.Wy = make_quarternion_mul(self.Wy)
@property
def state_size(self):
return self.dim
@property
def output_size(self):
return self.dim
def __call__(self, inputs, state, scope=None):
"""
inputs: 2-D tensor of shape [batch_size, feats + [gates]]
"""
new_state = tf.matmul(state, self.Wh) + tf.matmul(inputs, self.Wx)
new_state = tf.nn.sigmoid(new_state)
output = tf.nn.tanh(tf.matmul(inputs, self.Wy))
return output, new_state
| 1,561
| 1,275
| 253
|
71ef1c0962702fc6f2da6abacd53906df9d377f8
| 42,856
|
py
|
Python
|
hfc/protos/token/prover_pb2.py
|
hyperledger-gerrit-archive/fabric-sdk-py
|
0432d0ebce06a053d55febf7ed793dd163fcb68c
|
[
"Apache-2.0"
] | null | null | null |
hfc/protos/token/prover_pb2.py
|
hyperledger-gerrit-archive/fabric-sdk-py
|
0432d0ebce06a053d55febf7ed793dd163fcb68c
|
[
"Apache-2.0"
] | 1
|
2019-12-09T05:05:41.000Z
|
2019-12-09T05:05:41.000Z
|
hfc/protos/token/prover_pb2.py
|
hyperledger-gerrit-archive/fabric-sdk-py
|
0432d0ebce06a053d55febf7ed793dd163fcb68c
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hfc/protos/token/prover.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from hfc.protos.token import expectations_pb2 as hfc_dot_protos_dot_token_dot_expectations__pb2
from hfc.protos.token import transaction_pb2 as hfc_dot_protos_dot_token_dot_transaction__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='hfc/protos/token/prover.proto',
package='token',
syntax='proto3',
serialized_options=_b('\n#org.hyperledger.fabric.protos.tokenZ*github.com/hyperledger/fabric/protos/token'),
serialized_pb=_b('\n\x1dhfc/protos/token/prover.proto\x12\x05token\x1a\x1fgoogle/protobuf/timestamp.proto\x1a#hfc/protos/token/expectations.proto\x1a\"hfc/protos/token/transaction.proto\"A\n\x0cTokenToIssue\x12\x11\n\trecipient\x18\x01 \x01(\x0c\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x10\n\x08quantity\x18\x03 \x01(\x04\"=\n\x16RecipientTransferShare\x12\x11\n\trecipient\x18\x01 \x01(\x0c\x12\x10\n\x08quantity\x18\x02 \x01(\x04\"I\n\x0bTokenOutput\x12\x1a\n\x02id\x18\x01 \x01(\x0b\x32\x0e.token.InputId\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x10\n\x08quantity\x18\x03 \x01(\x04\"3\n\rUnspentTokens\x12\"\n\x06tokens\x18\x01 \x03(\x0b\x32\x12.token.TokenOutput\"!\n\x0bListRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\"Q\n\rImportRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12,\n\x0ftokens_to_issue\x18\x02 \x03(\x0b\x32\x13.token.TokenToIssue\"w\n\x0fTransferRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12!\n\ttoken_ids\x18\x02 \x03(\x0b\x32\x0e.token.InputId\x12-\n\x06shares\x18\x03 \x03(\x0b\x32\x1d.token.RecipientTransferShare\"b\n\rRedeemRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12!\n\ttoken_ids\x18\x02 \x03(\x0b\x32\x0e.token.InputId\x12\x1a\n\x12quantity_to_redeem\x18\x03 \x01(\x04\">\n\x17\x41llowanceRecipientShare\x12\x11\n\trecipient\x18\x01 \x01(\x0c\x12\x10\n\x08quantity\x18\x02 \x01(\x04\"\x81\x01\n\x0e\x41pproveRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12\x38\n\x10\x61llowance_shares\x18\x02 \x03(\x0b\x32\x1e.token.AllowanceRecipientShare\x12!\n\ttoken_ids\x18\x03 \x03(\x0b\x32\x0e.token.InputId\"y\n\x12\x45xpectationRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12,\n\x0b\x65xpectation\x18\x02 \x01(\x0b\x32\x17.token.TokenExpectation\x12!\n\ttoken_ids\x18\x03 \x03(\x0b\x32\x0e.token.InputId\"\x82\x01\n\x06Header\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\nchannel_id\x18\x02 \x01(\t\x12\r\n\x05nonce\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reator\x18\x04 \x01(\x0c\x12\x15\n\rtls_cert_hash\x18\x05 \x01(\x0c\"\x98\x03\n\x07\x43ommand\x12\x1d\n\x06header\x18\x01 \x01(\x0b\x32\r.token.Header\x12.\n\x0eimport_request\x18\x02 \x01(\x0b\x32\x14.token.ImportRequestH\x00\x12\x32\n\x10transfer_request\x18\x03 \x01(\x0b\x32\x16.token.TransferRequestH\x00\x12*\n\x0clist_request\x18\x04 \x01(\x0b\x32\x12.token.ListRequestH\x00\x12.\n\x0eredeem_request\x18\x05 \x01(\x0b\x32\x14.token.RedeemRequestH\x00\x12\x30\n\x0f\x61pprove_request\x18\x06 \x01(\x0b\x32\x15.token.ApproveRequestH\x00\x12\x37\n\x15transfer_from_request\x18\x07 \x01(\x0b\x32\x16.token.TransferRequestH\x00\x12\x38\n\x13\x65xpectation_request\x18\x08 \x01(\x0b\x32\x19.token.ExpectationRequestH\x00\x42\t\n\x07payload\"3\n\rSignedCommand\x12\x0f\n\x07\x63ommand\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\"m\n\x15\x43ommandResponseHeader\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0c\x63ommand_hash\x18\x02 \x01(\x0c\x12\x0f\n\x07\x63reator\x18\x03 \x01(\x0c\")\n\x05\x45rror\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\"\xcd\x01\n\x0f\x43ommandResponse\x12,\n\x06header\x18\x01 \x01(\x0b\x32\x1c.token.CommandResponseHeader\x12\x1b\n\x03\x65rr\x18\x02 \x01(\x0b\x32\x0c.token.ErrorH\x00\x12\x34\n\x11token_transaction\x18\x03 \x01(\x0b\x32\x17.token.TokenTransactionH\x00\x12.\n\x0eunspent_tokens\x18\x04 \x01(\x0b\x32\x14.token.UnspentTokensH\x00\x42\t\n\x07payload\"<\n\x15SignedCommandResponse\x12\x10\n\x08response\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\x32P\n\x06Prover\x12\x46\n\x0eProcessCommand\x12\x14.token.SignedCommand\x1a\x1c.token.SignedCommandResponse\"\x00\x42Q\n#org.hyperledger.fabric.protos.tokenZ*github.com/hyperledger/fabric/protos/tokenb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,hfc_dot_protos_dot_token_dot_expectations__pb2.DESCRIPTOR,hfc_dot_protos_dot_token_dot_transaction__pb2.DESCRIPTOR,])
_TOKENTOISSUE = _descriptor.Descriptor(
name='TokenToIssue',
full_name='token.TokenToIssue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='recipient', full_name='token.TokenToIssue.recipient', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='token.TokenToIssue.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity', full_name='token.TokenToIssue.quantity', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=146,
serialized_end=211,
)
_RECIPIENTTRANSFERSHARE = _descriptor.Descriptor(
name='RecipientTransferShare',
full_name='token.RecipientTransferShare',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='recipient', full_name='token.RecipientTransferShare.recipient', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity', full_name='token.RecipientTransferShare.quantity', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=213,
serialized_end=274,
)
_TOKENOUTPUT = _descriptor.Descriptor(
name='TokenOutput',
full_name='token.TokenOutput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='token.TokenOutput.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='token.TokenOutput.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity', full_name='token.TokenOutput.quantity', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=276,
serialized_end=349,
)
_UNSPENTTOKENS = _descriptor.Descriptor(
name='UnspentTokens',
full_name='token.UnspentTokens',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tokens', full_name='token.UnspentTokens.tokens', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=351,
serialized_end=402,
)
_LISTREQUEST = _descriptor.Descriptor(
name='ListRequest',
full_name='token.ListRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.ListRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=404,
serialized_end=437,
)
_IMPORTREQUEST = _descriptor.Descriptor(
name='ImportRequest',
full_name='token.ImportRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.ImportRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokens_to_issue', full_name='token.ImportRequest.tokens_to_issue', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=439,
serialized_end=520,
)
_TRANSFERREQUEST = _descriptor.Descriptor(
name='TransferRequest',
full_name='token.TransferRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.TransferRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_ids', full_name='token.TransferRequest.token_ids', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shares', full_name='token.TransferRequest.shares', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=522,
serialized_end=641,
)
_REDEEMREQUEST = _descriptor.Descriptor(
name='RedeemRequest',
full_name='token.RedeemRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.RedeemRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_ids', full_name='token.RedeemRequest.token_ids', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity_to_redeem', full_name='token.RedeemRequest.quantity_to_redeem', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=643,
serialized_end=741,
)
_ALLOWANCERECIPIENTSHARE = _descriptor.Descriptor(
name='AllowanceRecipientShare',
full_name='token.AllowanceRecipientShare',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='recipient', full_name='token.AllowanceRecipientShare.recipient', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity', full_name='token.AllowanceRecipientShare.quantity', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=743,
serialized_end=805,
)
_APPROVEREQUEST = _descriptor.Descriptor(
name='ApproveRequest',
full_name='token.ApproveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.ApproveRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowance_shares', full_name='token.ApproveRequest.allowance_shares', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_ids', full_name='token.ApproveRequest.token_ids', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=808,
serialized_end=937,
)
_EXPECTATIONREQUEST = _descriptor.Descriptor(
name='ExpectationRequest',
full_name='token.ExpectationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.ExpectationRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expectation', full_name='token.ExpectationRequest.expectation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_ids', full_name='token.ExpectationRequest.token_ids', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=939,
serialized_end=1060,
)
_HEADER = _descriptor.Descriptor(
name='Header',
full_name='token.Header',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='token.Header.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channel_id', full_name='token.Header.channel_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nonce', full_name='token.Header.nonce', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='token.Header.creator', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tls_cert_hash', full_name='token.Header.tls_cert_hash', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1063,
serialized_end=1193,
)
_COMMAND = _descriptor.Descriptor(
name='Command',
full_name='token.Command',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='token.Command.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='import_request', full_name='token.Command.import_request', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transfer_request', full_name='token.Command.transfer_request', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list_request', full_name='token.Command.list_request', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='redeem_request', full_name='token.Command.redeem_request', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='approve_request', full_name='token.Command.approve_request', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transfer_from_request', full_name='token.Command.transfer_from_request', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expectation_request', full_name='token.Command.expectation_request', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='token.Command.payload',
index=0, containing_type=None, fields=[]),
],
serialized_start=1196,
serialized_end=1604,
)
_SIGNEDCOMMAND = _descriptor.Descriptor(
name='SignedCommand',
full_name='token.SignedCommand',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='command', full_name='token.SignedCommand.command', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='token.SignedCommand.signature', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1606,
serialized_end=1657,
)
_COMMANDRESPONSEHEADER = _descriptor.Descriptor(
name='CommandResponseHeader',
full_name='token.CommandResponseHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='token.CommandResponseHeader.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command_hash', full_name='token.CommandResponseHeader.command_hash', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='token.CommandResponseHeader.creator', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1659,
serialized_end=1768,
)
_ERROR = _descriptor.Descriptor(
name='Error',
full_name='token.Error',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='token.Error.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='token.Error.payload', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1770,
serialized_end=1811,
)
_COMMANDRESPONSE = _descriptor.Descriptor(
name='CommandResponse',
full_name='token.CommandResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='token.CommandResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='err', full_name='token.CommandResponse.err', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_transaction', full_name='token.CommandResponse.token_transaction', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unspent_tokens', full_name='token.CommandResponse.unspent_tokens', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='token.CommandResponse.payload',
index=0, containing_type=None, fields=[]),
],
serialized_start=1814,
serialized_end=2019,
)
_SIGNEDCOMMANDRESPONSE = _descriptor.Descriptor(
name='SignedCommandResponse',
full_name='token.SignedCommandResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='response', full_name='token.SignedCommandResponse.response', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='token.SignedCommandResponse.signature', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2021,
serialized_end=2081,
)
_TOKENOUTPUT.fields_by_name['id'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_UNSPENTTOKENS.fields_by_name['tokens'].message_type = _TOKENOUTPUT
_IMPORTREQUEST.fields_by_name['tokens_to_issue'].message_type = _TOKENTOISSUE
_TRANSFERREQUEST.fields_by_name['token_ids'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_TRANSFERREQUEST.fields_by_name['shares'].message_type = _RECIPIENTTRANSFERSHARE
_REDEEMREQUEST.fields_by_name['token_ids'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_APPROVEREQUEST.fields_by_name['allowance_shares'].message_type = _ALLOWANCERECIPIENTSHARE
_APPROVEREQUEST.fields_by_name['token_ids'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_EXPECTATIONREQUEST.fields_by_name['expectation'].message_type = hfc_dot_protos_dot_token_dot_expectations__pb2._TOKENEXPECTATION
_EXPECTATIONREQUEST.fields_by_name['token_ids'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_HEADER.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_COMMAND.fields_by_name['header'].message_type = _HEADER
_COMMAND.fields_by_name['import_request'].message_type = _IMPORTREQUEST
_COMMAND.fields_by_name['transfer_request'].message_type = _TRANSFERREQUEST
_COMMAND.fields_by_name['list_request'].message_type = _LISTREQUEST
_COMMAND.fields_by_name['redeem_request'].message_type = _REDEEMREQUEST
_COMMAND.fields_by_name['approve_request'].message_type = _APPROVEREQUEST
_COMMAND.fields_by_name['transfer_from_request'].message_type = _TRANSFERREQUEST
_COMMAND.fields_by_name['expectation_request'].message_type = _EXPECTATIONREQUEST
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['import_request'])
_COMMAND.fields_by_name['import_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['transfer_request'])
_COMMAND.fields_by_name['transfer_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['list_request'])
_COMMAND.fields_by_name['list_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['redeem_request'])
_COMMAND.fields_by_name['redeem_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['approve_request'])
_COMMAND.fields_by_name['approve_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['transfer_from_request'])
_COMMAND.fields_by_name['transfer_from_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['expectation_request'])
_COMMAND.fields_by_name['expectation_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMANDRESPONSEHEADER.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_COMMANDRESPONSE.fields_by_name['header'].message_type = _COMMANDRESPONSEHEADER
_COMMANDRESPONSE.fields_by_name['err'].message_type = _ERROR
_COMMANDRESPONSE.fields_by_name['token_transaction'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._TOKENTRANSACTION
_COMMANDRESPONSE.fields_by_name['unspent_tokens'].message_type = _UNSPENTTOKENS
_COMMANDRESPONSE.oneofs_by_name['payload'].fields.append(
_COMMANDRESPONSE.fields_by_name['err'])
_COMMANDRESPONSE.fields_by_name['err'].containing_oneof = _COMMANDRESPONSE.oneofs_by_name['payload']
_COMMANDRESPONSE.oneofs_by_name['payload'].fields.append(
_COMMANDRESPONSE.fields_by_name['token_transaction'])
_COMMANDRESPONSE.fields_by_name['token_transaction'].containing_oneof = _COMMANDRESPONSE.oneofs_by_name['payload']
_COMMANDRESPONSE.oneofs_by_name['payload'].fields.append(
_COMMANDRESPONSE.fields_by_name['unspent_tokens'])
_COMMANDRESPONSE.fields_by_name['unspent_tokens'].containing_oneof = _COMMANDRESPONSE.oneofs_by_name['payload']
DESCRIPTOR.message_types_by_name['TokenToIssue'] = _TOKENTOISSUE
DESCRIPTOR.message_types_by_name['RecipientTransferShare'] = _RECIPIENTTRANSFERSHARE
DESCRIPTOR.message_types_by_name['TokenOutput'] = _TOKENOUTPUT
DESCRIPTOR.message_types_by_name['UnspentTokens'] = _UNSPENTTOKENS
DESCRIPTOR.message_types_by_name['ListRequest'] = _LISTREQUEST
DESCRIPTOR.message_types_by_name['ImportRequest'] = _IMPORTREQUEST
DESCRIPTOR.message_types_by_name['TransferRequest'] = _TRANSFERREQUEST
DESCRIPTOR.message_types_by_name['RedeemRequest'] = _REDEEMREQUEST
DESCRIPTOR.message_types_by_name['AllowanceRecipientShare'] = _ALLOWANCERECIPIENTSHARE
DESCRIPTOR.message_types_by_name['ApproveRequest'] = _APPROVEREQUEST
DESCRIPTOR.message_types_by_name['ExpectationRequest'] = _EXPECTATIONREQUEST
DESCRIPTOR.message_types_by_name['Header'] = _HEADER
DESCRIPTOR.message_types_by_name['Command'] = _COMMAND
DESCRIPTOR.message_types_by_name['SignedCommand'] = _SIGNEDCOMMAND
DESCRIPTOR.message_types_by_name['CommandResponseHeader'] = _COMMANDRESPONSEHEADER
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
DESCRIPTOR.message_types_by_name['CommandResponse'] = _COMMANDRESPONSE
DESCRIPTOR.message_types_by_name['SignedCommandResponse'] = _SIGNEDCOMMANDRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TokenToIssue = _reflection.GeneratedProtocolMessageType('TokenToIssue', (_message.Message,), dict(
DESCRIPTOR = _TOKENTOISSUE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.TokenToIssue)
))
_sym_db.RegisterMessage(TokenToIssue)
RecipientTransferShare = _reflection.GeneratedProtocolMessageType('RecipientTransferShare', (_message.Message,), dict(
DESCRIPTOR = _RECIPIENTTRANSFERSHARE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.RecipientTransferShare)
))
_sym_db.RegisterMessage(RecipientTransferShare)
TokenOutput = _reflection.GeneratedProtocolMessageType('TokenOutput', (_message.Message,), dict(
DESCRIPTOR = _TOKENOUTPUT,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.TokenOutput)
))
_sym_db.RegisterMessage(TokenOutput)
UnspentTokens = _reflection.GeneratedProtocolMessageType('UnspentTokens', (_message.Message,), dict(
DESCRIPTOR = _UNSPENTTOKENS,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.UnspentTokens)
))
_sym_db.RegisterMessage(UnspentTokens)
ListRequest = _reflection.GeneratedProtocolMessageType('ListRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.ListRequest)
))
_sym_db.RegisterMessage(ListRequest)
ImportRequest = _reflection.GeneratedProtocolMessageType('ImportRequest', (_message.Message,), dict(
DESCRIPTOR = _IMPORTREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.ImportRequest)
))
_sym_db.RegisterMessage(ImportRequest)
TransferRequest = _reflection.GeneratedProtocolMessageType('TransferRequest', (_message.Message,), dict(
DESCRIPTOR = _TRANSFERREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.TransferRequest)
))
_sym_db.RegisterMessage(TransferRequest)
RedeemRequest = _reflection.GeneratedProtocolMessageType('RedeemRequest', (_message.Message,), dict(
DESCRIPTOR = _REDEEMREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.RedeemRequest)
))
_sym_db.RegisterMessage(RedeemRequest)
AllowanceRecipientShare = _reflection.GeneratedProtocolMessageType('AllowanceRecipientShare', (_message.Message,), dict(
DESCRIPTOR = _ALLOWANCERECIPIENTSHARE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.AllowanceRecipientShare)
))
_sym_db.RegisterMessage(AllowanceRecipientShare)
ApproveRequest = _reflection.GeneratedProtocolMessageType('ApproveRequest', (_message.Message,), dict(
DESCRIPTOR = _APPROVEREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.ApproveRequest)
))
_sym_db.RegisterMessage(ApproveRequest)
ExpectationRequest = _reflection.GeneratedProtocolMessageType('ExpectationRequest', (_message.Message,), dict(
DESCRIPTOR = _EXPECTATIONREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.ExpectationRequest)
))
_sym_db.RegisterMessage(ExpectationRequest)
Header = _reflection.GeneratedProtocolMessageType('Header', (_message.Message,), dict(
DESCRIPTOR = _HEADER,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.Header)
))
_sym_db.RegisterMessage(Header)
Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), dict(
DESCRIPTOR = _COMMAND,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.Command)
))
_sym_db.RegisterMessage(Command)
SignedCommand = _reflection.GeneratedProtocolMessageType('SignedCommand', (_message.Message,), dict(
DESCRIPTOR = _SIGNEDCOMMAND,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.SignedCommand)
))
_sym_db.RegisterMessage(SignedCommand)
CommandResponseHeader = _reflection.GeneratedProtocolMessageType('CommandResponseHeader', (_message.Message,), dict(
DESCRIPTOR = _COMMANDRESPONSEHEADER,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.CommandResponseHeader)
))
_sym_db.RegisterMessage(CommandResponseHeader)
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
DESCRIPTOR = _ERROR,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.Error)
))
_sym_db.RegisterMessage(Error)
CommandResponse = _reflection.GeneratedProtocolMessageType('CommandResponse', (_message.Message,), dict(
DESCRIPTOR = _COMMANDRESPONSE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.CommandResponse)
))
_sym_db.RegisterMessage(CommandResponse)
SignedCommandResponse = _reflection.GeneratedProtocolMessageType('SignedCommandResponse', (_message.Message,), dict(
DESCRIPTOR = _SIGNEDCOMMANDRESPONSE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.SignedCommandResponse)
))
_sym_db.RegisterMessage(SignedCommandResponse)
DESCRIPTOR._options = None
_PROVER = _descriptor.ServiceDescriptor(
name='Prover',
full_name='token.Prover',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=2083,
serialized_end=2163,
methods=[
_descriptor.MethodDescriptor(
name='ProcessCommand',
full_name='token.Prover.ProcessCommand',
index=0,
containing_service=None,
input_type=_SIGNEDCOMMAND,
output_type=_SIGNEDCOMMANDRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PROVER)
DESCRIPTOR.services_by_name['Prover'] = _PROVER
# @@protoc_insertion_point(module_scope)
| 40.430189
| 3,733
| 0.757584
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hfc/protos/token/prover.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from hfc.protos.token import expectations_pb2 as hfc_dot_protos_dot_token_dot_expectations__pb2
from hfc.protos.token import transaction_pb2 as hfc_dot_protos_dot_token_dot_transaction__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='hfc/protos/token/prover.proto',
package='token',
syntax='proto3',
serialized_options=_b('\n#org.hyperledger.fabric.protos.tokenZ*github.com/hyperledger/fabric/protos/token'),
serialized_pb=_b('\n\x1dhfc/protos/token/prover.proto\x12\x05token\x1a\x1fgoogle/protobuf/timestamp.proto\x1a#hfc/protos/token/expectations.proto\x1a\"hfc/protos/token/transaction.proto\"A\n\x0cTokenToIssue\x12\x11\n\trecipient\x18\x01 \x01(\x0c\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x10\n\x08quantity\x18\x03 \x01(\x04\"=\n\x16RecipientTransferShare\x12\x11\n\trecipient\x18\x01 \x01(\x0c\x12\x10\n\x08quantity\x18\x02 \x01(\x04\"I\n\x0bTokenOutput\x12\x1a\n\x02id\x18\x01 \x01(\x0b\x32\x0e.token.InputId\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x10\n\x08quantity\x18\x03 \x01(\x04\"3\n\rUnspentTokens\x12\"\n\x06tokens\x18\x01 \x03(\x0b\x32\x12.token.TokenOutput\"!\n\x0bListRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\"Q\n\rImportRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12,\n\x0ftokens_to_issue\x18\x02 \x03(\x0b\x32\x13.token.TokenToIssue\"w\n\x0fTransferRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12!\n\ttoken_ids\x18\x02 \x03(\x0b\x32\x0e.token.InputId\x12-\n\x06shares\x18\x03 \x03(\x0b\x32\x1d.token.RecipientTransferShare\"b\n\rRedeemRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12!\n\ttoken_ids\x18\x02 \x03(\x0b\x32\x0e.token.InputId\x12\x1a\n\x12quantity_to_redeem\x18\x03 \x01(\x04\">\n\x17\x41llowanceRecipientShare\x12\x11\n\trecipient\x18\x01 \x01(\x0c\x12\x10\n\x08quantity\x18\x02 \x01(\x04\"\x81\x01\n\x0e\x41pproveRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12\x38\n\x10\x61llowance_shares\x18\x02 \x03(\x0b\x32\x1e.token.AllowanceRecipientShare\x12!\n\ttoken_ids\x18\x03 \x03(\x0b\x32\x0e.token.InputId\"y\n\x12\x45xpectationRequest\x12\x12\n\ncredential\x18\x01 \x01(\x0c\x12,\n\x0b\x65xpectation\x18\x02 \x01(\x0b\x32\x17.token.TokenExpectation\x12!\n\ttoken_ids\x18\x03 \x03(\x0b\x32\x0e.token.InputId\"\x82\x01\n\x06Header\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\nchannel_id\x18\x02 \x01(\t\x12\r\n\x05nonce\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reator\x18\x04 \x01(\x0c\x12\x15\n\rtls_cert_hash\x18\x05 \x01(\x0c\"\x98\x03\n\x07\x43ommand\x12\x1d\n\x06header\x18\x01 \x01(\x0b\x32\r.token.Header\x12.\n\x0eimport_request\x18\x02 \x01(\x0b\x32\x14.token.ImportRequestH\x00\x12\x32\n\x10transfer_request\x18\x03 \x01(\x0b\x32\x16.token.TransferRequestH\x00\x12*\n\x0clist_request\x18\x04 \x01(\x0b\x32\x12.token.ListRequestH\x00\x12.\n\x0eredeem_request\x18\x05 \x01(\x0b\x32\x14.token.RedeemRequestH\x00\x12\x30\n\x0f\x61pprove_request\x18\x06 \x01(\x0b\x32\x15.token.ApproveRequestH\x00\x12\x37\n\x15transfer_from_request\x18\x07 \x01(\x0b\x32\x16.token.TransferRequestH\x00\x12\x38\n\x13\x65xpectation_request\x18\x08 \x01(\x0b\x32\x19.token.ExpectationRequestH\x00\x42\t\n\x07payload\"3\n\rSignedCommand\x12\x0f\n\x07\x63ommand\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\"m\n\x15\x43ommandResponseHeader\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0c\x63ommand_hash\x18\x02 \x01(\x0c\x12\x0f\n\x07\x63reator\x18\x03 \x01(\x0c\")\n\x05\x45rror\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\"\xcd\x01\n\x0f\x43ommandResponse\x12,\n\x06header\x18\x01 \x01(\x0b\x32\x1c.token.CommandResponseHeader\x12\x1b\n\x03\x65rr\x18\x02 \x01(\x0b\x32\x0c.token.ErrorH\x00\x12\x34\n\x11token_transaction\x18\x03 \x01(\x0b\x32\x17.token.TokenTransactionH\x00\x12.\n\x0eunspent_tokens\x18\x04 \x01(\x0b\x32\x14.token.UnspentTokensH\x00\x42\t\n\x07payload\"<\n\x15SignedCommandResponse\x12\x10\n\x08response\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\x32P\n\x06Prover\x12\x46\n\x0eProcessCommand\x12\x14.token.SignedCommand\x1a\x1c.token.SignedCommandResponse\"\x00\x42Q\n#org.hyperledger.fabric.protos.tokenZ*github.com/hyperledger/fabric/protos/tokenb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,hfc_dot_protos_dot_token_dot_expectations__pb2.DESCRIPTOR,hfc_dot_protos_dot_token_dot_transaction__pb2.DESCRIPTOR,])
_TOKENTOISSUE = _descriptor.Descriptor(
name='TokenToIssue',
full_name='token.TokenToIssue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='recipient', full_name='token.TokenToIssue.recipient', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='token.TokenToIssue.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity', full_name='token.TokenToIssue.quantity', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=146,
serialized_end=211,
)
_RECIPIENTTRANSFERSHARE = _descriptor.Descriptor(
name='RecipientTransferShare',
full_name='token.RecipientTransferShare',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='recipient', full_name='token.RecipientTransferShare.recipient', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity', full_name='token.RecipientTransferShare.quantity', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=213,
serialized_end=274,
)
_TOKENOUTPUT = _descriptor.Descriptor(
name='TokenOutput',
full_name='token.TokenOutput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='token.TokenOutput.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='token.TokenOutput.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity', full_name='token.TokenOutput.quantity', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=276,
serialized_end=349,
)
_UNSPENTTOKENS = _descriptor.Descriptor(
name='UnspentTokens',
full_name='token.UnspentTokens',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tokens', full_name='token.UnspentTokens.tokens', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=351,
serialized_end=402,
)
_LISTREQUEST = _descriptor.Descriptor(
name='ListRequest',
full_name='token.ListRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.ListRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=404,
serialized_end=437,
)
_IMPORTREQUEST = _descriptor.Descriptor(
name='ImportRequest',
full_name='token.ImportRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.ImportRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokens_to_issue', full_name='token.ImportRequest.tokens_to_issue', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=439,
serialized_end=520,
)
_TRANSFERREQUEST = _descriptor.Descriptor(
name='TransferRequest',
full_name='token.TransferRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.TransferRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_ids', full_name='token.TransferRequest.token_ids', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shares', full_name='token.TransferRequest.shares', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=522,
serialized_end=641,
)
_REDEEMREQUEST = _descriptor.Descriptor(
name='RedeemRequest',
full_name='token.RedeemRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.RedeemRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_ids', full_name='token.RedeemRequest.token_ids', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity_to_redeem', full_name='token.RedeemRequest.quantity_to_redeem', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=643,
serialized_end=741,
)
_ALLOWANCERECIPIENTSHARE = _descriptor.Descriptor(
name='AllowanceRecipientShare',
full_name='token.AllowanceRecipientShare',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='recipient', full_name='token.AllowanceRecipientShare.recipient', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantity', full_name='token.AllowanceRecipientShare.quantity', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=743,
serialized_end=805,
)
_APPROVEREQUEST = _descriptor.Descriptor(
name='ApproveRequest',
full_name='token.ApproveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.ApproveRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowance_shares', full_name='token.ApproveRequest.allowance_shares', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_ids', full_name='token.ApproveRequest.token_ids', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=808,
serialized_end=937,
)
_EXPECTATIONREQUEST = _descriptor.Descriptor(
name='ExpectationRequest',
full_name='token.ExpectationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='credential', full_name='token.ExpectationRequest.credential', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expectation', full_name='token.ExpectationRequest.expectation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_ids', full_name='token.ExpectationRequest.token_ids', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=939,
serialized_end=1060,
)
_HEADER = _descriptor.Descriptor(
name='Header',
full_name='token.Header',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='token.Header.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channel_id', full_name='token.Header.channel_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nonce', full_name='token.Header.nonce', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='token.Header.creator', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tls_cert_hash', full_name='token.Header.tls_cert_hash', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1063,
serialized_end=1193,
)
_COMMAND = _descriptor.Descriptor(
name='Command',
full_name='token.Command',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='token.Command.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='import_request', full_name='token.Command.import_request', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transfer_request', full_name='token.Command.transfer_request', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list_request', full_name='token.Command.list_request', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='redeem_request', full_name='token.Command.redeem_request', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='approve_request', full_name='token.Command.approve_request', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transfer_from_request', full_name='token.Command.transfer_from_request', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expectation_request', full_name='token.Command.expectation_request', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='token.Command.payload',
index=0, containing_type=None, fields=[]),
],
serialized_start=1196,
serialized_end=1604,
)
_SIGNEDCOMMAND = _descriptor.Descriptor(
name='SignedCommand',
full_name='token.SignedCommand',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='command', full_name='token.SignedCommand.command', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='token.SignedCommand.signature', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1606,
serialized_end=1657,
)
_COMMANDRESPONSEHEADER = _descriptor.Descriptor(
name='CommandResponseHeader',
full_name='token.CommandResponseHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='token.CommandResponseHeader.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command_hash', full_name='token.CommandResponseHeader.command_hash', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='token.CommandResponseHeader.creator', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1659,
serialized_end=1768,
)
_ERROR = _descriptor.Descriptor(
name='Error',
full_name='token.Error',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='token.Error.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='token.Error.payload', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1770,
serialized_end=1811,
)
_COMMANDRESPONSE = _descriptor.Descriptor(
name='CommandResponse',
full_name='token.CommandResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='token.CommandResponse.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='err', full_name='token.CommandResponse.err', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token_transaction', full_name='token.CommandResponse.token_transaction', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unspent_tokens', full_name='token.CommandResponse.unspent_tokens', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='token.CommandResponse.payload',
index=0, containing_type=None, fields=[]),
],
serialized_start=1814,
serialized_end=2019,
)
_SIGNEDCOMMANDRESPONSE = _descriptor.Descriptor(
name='SignedCommandResponse',
full_name='token.SignedCommandResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='response', full_name='token.SignedCommandResponse.response', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='token.SignedCommandResponse.signature', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2021,
serialized_end=2081,
)
_TOKENOUTPUT.fields_by_name['id'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_UNSPENTTOKENS.fields_by_name['tokens'].message_type = _TOKENOUTPUT
_IMPORTREQUEST.fields_by_name['tokens_to_issue'].message_type = _TOKENTOISSUE
_TRANSFERREQUEST.fields_by_name['token_ids'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_TRANSFERREQUEST.fields_by_name['shares'].message_type = _RECIPIENTTRANSFERSHARE
_REDEEMREQUEST.fields_by_name['token_ids'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_APPROVEREQUEST.fields_by_name['allowance_shares'].message_type = _ALLOWANCERECIPIENTSHARE
_APPROVEREQUEST.fields_by_name['token_ids'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_EXPECTATIONREQUEST.fields_by_name['expectation'].message_type = hfc_dot_protos_dot_token_dot_expectations__pb2._TOKENEXPECTATION
_EXPECTATIONREQUEST.fields_by_name['token_ids'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._INPUTID
_HEADER.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_COMMAND.fields_by_name['header'].message_type = _HEADER
_COMMAND.fields_by_name['import_request'].message_type = _IMPORTREQUEST
_COMMAND.fields_by_name['transfer_request'].message_type = _TRANSFERREQUEST
_COMMAND.fields_by_name['list_request'].message_type = _LISTREQUEST
_COMMAND.fields_by_name['redeem_request'].message_type = _REDEEMREQUEST
_COMMAND.fields_by_name['approve_request'].message_type = _APPROVEREQUEST
_COMMAND.fields_by_name['transfer_from_request'].message_type = _TRANSFERREQUEST
_COMMAND.fields_by_name['expectation_request'].message_type = _EXPECTATIONREQUEST
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['import_request'])
_COMMAND.fields_by_name['import_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['transfer_request'])
_COMMAND.fields_by_name['transfer_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['list_request'])
_COMMAND.fields_by_name['list_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['redeem_request'])
_COMMAND.fields_by_name['redeem_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['approve_request'])
_COMMAND.fields_by_name['approve_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['transfer_from_request'])
_COMMAND.fields_by_name['transfer_from_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMAND.oneofs_by_name['payload'].fields.append(
_COMMAND.fields_by_name['expectation_request'])
_COMMAND.fields_by_name['expectation_request'].containing_oneof = _COMMAND.oneofs_by_name['payload']
_COMMANDRESPONSEHEADER.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_COMMANDRESPONSE.fields_by_name['header'].message_type = _COMMANDRESPONSEHEADER
_COMMANDRESPONSE.fields_by_name['err'].message_type = _ERROR
_COMMANDRESPONSE.fields_by_name['token_transaction'].message_type = hfc_dot_protos_dot_token_dot_transaction__pb2._TOKENTRANSACTION
_COMMANDRESPONSE.fields_by_name['unspent_tokens'].message_type = _UNSPENTTOKENS
_COMMANDRESPONSE.oneofs_by_name['payload'].fields.append(
_COMMANDRESPONSE.fields_by_name['err'])
_COMMANDRESPONSE.fields_by_name['err'].containing_oneof = _COMMANDRESPONSE.oneofs_by_name['payload']
_COMMANDRESPONSE.oneofs_by_name['payload'].fields.append(
_COMMANDRESPONSE.fields_by_name['token_transaction'])
_COMMANDRESPONSE.fields_by_name['token_transaction'].containing_oneof = _COMMANDRESPONSE.oneofs_by_name['payload']
_COMMANDRESPONSE.oneofs_by_name['payload'].fields.append(
_COMMANDRESPONSE.fields_by_name['unspent_tokens'])
_COMMANDRESPONSE.fields_by_name['unspent_tokens'].containing_oneof = _COMMANDRESPONSE.oneofs_by_name['payload']
DESCRIPTOR.message_types_by_name['TokenToIssue'] = _TOKENTOISSUE
DESCRIPTOR.message_types_by_name['RecipientTransferShare'] = _RECIPIENTTRANSFERSHARE
DESCRIPTOR.message_types_by_name['TokenOutput'] = _TOKENOUTPUT
DESCRIPTOR.message_types_by_name['UnspentTokens'] = _UNSPENTTOKENS
DESCRIPTOR.message_types_by_name['ListRequest'] = _LISTREQUEST
DESCRIPTOR.message_types_by_name['ImportRequest'] = _IMPORTREQUEST
DESCRIPTOR.message_types_by_name['TransferRequest'] = _TRANSFERREQUEST
DESCRIPTOR.message_types_by_name['RedeemRequest'] = _REDEEMREQUEST
DESCRIPTOR.message_types_by_name['AllowanceRecipientShare'] = _ALLOWANCERECIPIENTSHARE
DESCRIPTOR.message_types_by_name['ApproveRequest'] = _APPROVEREQUEST
DESCRIPTOR.message_types_by_name['ExpectationRequest'] = _EXPECTATIONREQUEST
DESCRIPTOR.message_types_by_name['Header'] = _HEADER
DESCRIPTOR.message_types_by_name['Command'] = _COMMAND
DESCRIPTOR.message_types_by_name['SignedCommand'] = _SIGNEDCOMMAND
DESCRIPTOR.message_types_by_name['CommandResponseHeader'] = _COMMANDRESPONSEHEADER
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
DESCRIPTOR.message_types_by_name['CommandResponse'] = _COMMANDRESPONSE
DESCRIPTOR.message_types_by_name['SignedCommandResponse'] = _SIGNEDCOMMANDRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TokenToIssue = _reflection.GeneratedProtocolMessageType('TokenToIssue', (_message.Message,), dict(
DESCRIPTOR = _TOKENTOISSUE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.TokenToIssue)
))
_sym_db.RegisterMessage(TokenToIssue)
RecipientTransferShare = _reflection.GeneratedProtocolMessageType('RecipientTransferShare', (_message.Message,), dict(
DESCRIPTOR = _RECIPIENTTRANSFERSHARE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.RecipientTransferShare)
))
_sym_db.RegisterMessage(RecipientTransferShare)
TokenOutput = _reflection.GeneratedProtocolMessageType('TokenOutput', (_message.Message,), dict(
DESCRIPTOR = _TOKENOUTPUT,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.TokenOutput)
))
_sym_db.RegisterMessage(TokenOutput)
UnspentTokens = _reflection.GeneratedProtocolMessageType('UnspentTokens', (_message.Message,), dict(
DESCRIPTOR = _UNSPENTTOKENS,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.UnspentTokens)
))
_sym_db.RegisterMessage(UnspentTokens)
ListRequest = _reflection.GeneratedProtocolMessageType('ListRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.ListRequest)
))
_sym_db.RegisterMessage(ListRequest)
ImportRequest = _reflection.GeneratedProtocolMessageType('ImportRequest', (_message.Message,), dict(
DESCRIPTOR = _IMPORTREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.ImportRequest)
))
_sym_db.RegisterMessage(ImportRequest)
TransferRequest = _reflection.GeneratedProtocolMessageType('TransferRequest', (_message.Message,), dict(
DESCRIPTOR = _TRANSFERREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.TransferRequest)
))
_sym_db.RegisterMessage(TransferRequest)
RedeemRequest = _reflection.GeneratedProtocolMessageType('RedeemRequest', (_message.Message,), dict(
DESCRIPTOR = _REDEEMREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.RedeemRequest)
))
_sym_db.RegisterMessage(RedeemRequest)
AllowanceRecipientShare = _reflection.GeneratedProtocolMessageType('AllowanceRecipientShare', (_message.Message,), dict(
DESCRIPTOR = _ALLOWANCERECIPIENTSHARE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.AllowanceRecipientShare)
))
_sym_db.RegisterMessage(AllowanceRecipientShare)
ApproveRequest = _reflection.GeneratedProtocolMessageType('ApproveRequest', (_message.Message,), dict(
DESCRIPTOR = _APPROVEREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.ApproveRequest)
))
_sym_db.RegisterMessage(ApproveRequest)
ExpectationRequest = _reflection.GeneratedProtocolMessageType('ExpectationRequest', (_message.Message,), dict(
DESCRIPTOR = _EXPECTATIONREQUEST,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.ExpectationRequest)
))
_sym_db.RegisterMessage(ExpectationRequest)
Header = _reflection.GeneratedProtocolMessageType('Header', (_message.Message,), dict(
DESCRIPTOR = _HEADER,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.Header)
))
_sym_db.RegisterMessage(Header)
Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), dict(
DESCRIPTOR = _COMMAND,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.Command)
))
_sym_db.RegisterMessage(Command)
SignedCommand = _reflection.GeneratedProtocolMessageType('SignedCommand', (_message.Message,), dict(
DESCRIPTOR = _SIGNEDCOMMAND,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.SignedCommand)
))
_sym_db.RegisterMessage(SignedCommand)
CommandResponseHeader = _reflection.GeneratedProtocolMessageType('CommandResponseHeader', (_message.Message,), dict(
DESCRIPTOR = _COMMANDRESPONSEHEADER,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.CommandResponseHeader)
))
_sym_db.RegisterMessage(CommandResponseHeader)
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
DESCRIPTOR = _ERROR,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.Error)
))
_sym_db.RegisterMessage(Error)
CommandResponse = _reflection.GeneratedProtocolMessageType('CommandResponse', (_message.Message,), dict(
DESCRIPTOR = _COMMANDRESPONSE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.CommandResponse)
))
_sym_db.RegisterMessage(CommandResponse)
SignedCommandResponse = _reflection.GeneratedProtocolMessageType('SignedCommandResponse', (_message.Message,), dict(
DESCRIPTOR = _SIGNEDCOMMANDRESPONSE,
__module__ = 'hfc.protos.token.prover_pb2'
# @@protoc_insertion_point(class_scope:token.SignedCommandResponse)
))
_sym_db.RegisterMessage(SignedCommandResponse)
DESCRIPTOR._options = None
_PROVER = _descriptor.ServiceDescriptor(
name='Prover',
full_name='token.Prover',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=2083,
serialized_end=2163,
methods=[
_descriptor.MethodDescriptor(
name='ProcessCommand',
full_name='token.Prover.ProcessCommand',
index=0,
containing_service=None,
input_type=_SIGNEDCOMMAND,
output_type=_SIGNEDCOMMANDRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PROVER)
DESCRIPTOR.services_by_name['Prover'] = _PROVER
# @@protoc_insertion_point(module_scope)
| 0
| 0
| 0
|
98227190b48433b30a13ee88906b852b86929253
| 2,899
|
py
|
Python
|
instagram_scraper/auth/authentication.py
|
davidargylethacker/instagram-scraper
|
b58760d90aa492f8d7795da572e822d7047a52a5
|
[
"Unlicense"
] | null | null | null |
instagram_scraper/auth/authentication.py
|
davidargylethacker/instagram-scraper
|
b58760d90aa492f8d7795da572e822d7047a52a5
|
[
"Unlicense"
] | null | null | null |
instagram_scraper/auth/authentication.py
|
davidargylethacker/instagram-scraper
|
b58760d90aa492f8d7795da572e822d7047a52a5
|
[
"Unlicense"
] | null | null | null |
import json
import requests
from instagram_scraper.constants import BASE_URL, STORIES_UA, LOGIN_URL, LOGOUT_URL
CHROME_WIN_UA = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
| 38.653333
| 127
| 0.617799
|
import json
import requests
from instagram_scraper.constants import BASE_URL, STORIES_UA, LOGIN_URL, LOGOUT_URL
CHROME_WIN_UA = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
class Authentication(object):
def __init__(self):
self.logged_in = False
self.session = None
self.cookies = None
def guest_login(self):
"""Authenticate as a guest/non-signed in user"""
self.session = requests.Session()
self.session.headers = {'user-agent': CHROME_WIN_UA}
self.session.headers.update({'Referer': BASE_URL, 'user-agent': STORIES_UA})
request = self.session.get(BASE_URL)
self.session.headers.update({'X-CSRFToken': request.cookies['csrftoken']})
self.session.headers.update({'user-agent': CHROME_WIN_UA})
def user_login(self, username, password):
"""Logs in to instagram."""
self.session = requests.Session()
self.session.headers = {'user-agent': CHROME_WIN_UA}
self.session.headers.update({'Referer': BASE_URL, 'user-agent': STORIES_UA})
req = self.session.get(BASE_URL)
self.session.headers.update({'X-CSRFToken': req.cookies['csrftoken']})
login_data = {'username': username, 'password': password}
login = self.session.post(LOGIN_URL, data=login_data, allow_redirects=True)
self.session.headers.update({'X-CSRFToken': login.cookies['csrftoken']})
self.cookies = login.cookies
login_text = json.loads(login.text)
if login_text.get('authenticated') and login.status_code == 200:
self.logged_in = True
self.session.headers.update({'user-agent': CHROME_WIN_UA})
else:
print('Login failed for ' + username)
if 'checkpoint_url' in login_text:
checkpoint_url = login_text.get('checkpoint_url')
print('Please verify your account at ' + BASE_URL[0:-1] + checkpoint_url)
if self.interactive is True:
self.login_challenge(checkpoint_url)
elif 'errors' in login_text:
for count, error in enumerate(login_text['errors'].get('error')):
count += 1
print('Session error %(count)s: "%(error)s"' % locals())
else:
print(json.dumps(login_text))
def logout(self):
"""Logs out of instagram."""
if self.logged_in:
try:
logout_data = {'csrfmiddlewaretoken': self.cookies['csrftoken']}
self.session.post(LOGOUT_URL, data=logout_data)
self.logged_in = False
except requests.exceptions.RequestException:
print('Failed to log out')
def session(self):
return self.session()
def cookies(self):
return self.cookies
| 137
| 2,495
| 23
|
76e2b89bed1f1e5e61c69927513e3fe46864ced5
| 2,051
|
py
|
Python
|
assessor_mark2.py
|
piyushsingariya/assessor-series
|
263b5852b83689f25e03fb2efb174b1aa13c7cf2
|
[
"MIT"
] | 1
|
2019-06-16T17:15:41.000Z
|
2019-06-16T17:15:41.000Z
|
assessor_mark2.py
|
piyushsingariya/assessor-series
|
263b5852b83689f25e03fb2efb174b1aa13c7cf2
|
[
"MIT"
] | null | null | null |
assessor_mark2.py
|
piyushsingariya/assessor-series
|
263b5852b83689f25e03fb2efb174b1aa13c7cf2
|
[
"MIT"
] | null | null | null |
# Determining interface details with netfaces library
# This script uses a number of functions to accomplish specific tasks
# including get_networks, get_addresses, get_gateways and get_interfaces
import netifaces
import sys
try:
import netifaces
except:
sys.exit("[!] Install the netifaces library: pip install netifaces")
gateways = {}
network_ifaces={}
# The second function identifies the gateways and return them as a dictionary
# The third function identifies the addresses for each interface
# This fourth fuction is actucally identifying the gateway IP from the dictionary provided
# by the get_gateways function to the interface.
gateways = get_gateways()
network_ifaces = get_networks(gateways)
print(network_ifaces)
| 33.080645
| 121
| 0.714286
|
# Determining interface details with netfaces library
# This script uses a number of functions to accomplish specific tasks
# including get_networks, get_addresses, get_gateways and get_interfaces
import netifaces
import sys
try:
import netifaces
except:
sys.exit("[!] Install the netifaces library: pip install netifaces")
gateways = {}
network_ifaces={}
def get_interfaces():
interfaces = netifaces.interfaces()
return interfaces
# The second function identifies the gateways and return them as a dictionary
def get_gateways():
gateway_dict = {}
gws = netifaces.gateways()
for gw in gws:
try:
gateway_iface = gws[gw][netifaces.AF_INET]
gateway_ip, iface = gateway_iface[0], gateway_iface[1]
gw_list = [gateway_ip, iface]
gateway_dict[gw] = gw_list
except:
pass
return gateway_dict
# The third function identifies the addresses for each interface
def get_addresses(interface):
addrs = netifaces.ifaddresses(interface)
link_addr = addrs[netifaces.AF_LINK]
iface_addrs = addrs[netifaces.AF_INET]
iface_dict = iface_addrs[0]
link_dict = link_addr[0]
hwaddr = link_dict.get('addr')
iface_addr = iface_dict.get('addr')
iface_broadcast = iface_dict.get('broadcast')
iface_netmask = iface_dict.get('netmask')
return hwaddr, iface_addr, iface_broadcast, iface_netmask
# This fourth fuction is actucally identifying the gateway IP from the dictionary provided
# by the get_gateways function to the interface.
def get_networks(gateways_dict):
networks_dict = {}
for key, value in gateways.iteritems():
gatteway_ip, iface = value[0], value[1]
hwaddress, addr, broadcast, netmask = get_addresses(iface)
network = {'gateway': gatteway_ip, 'hwaddr': hwaddress, 'addr': addr, 'broadcast': broadcast, 'netmask': netmask}
networks_dict[iface] = network
return networks_dict
gateways = get_gateways()
network_ifaces = get_networks(gateways)
print(network_ifaces)
| 1,216
| 0
| 93
|
1e458847a7ea5ac7e0e6b7cd0b13973ccc014c9c
| 1,180
|
py
|
Python
|
controllers/user.py
|
brianpesy/DevCamp
|
48576ef93a1a33d2ea0f2d3901bee77c158d8767
|
[
"BSD-3-Clause"
] | null | null | null |
controllers/user.py
|
brianpesy/DevCamp
|
48576ef93a1a33d2ea0f2d3901bee77c158d8767
|
[
"BSD-3-Clause"
] | null | null | null |
controllers/user.py
|
brianpesy/DevCamp
|
48576ef93a1a33d2ea0f2d3901bee77c158d8767
|
[
"BSD-3-Clause"
] | null | null | null |
from helpers import functions
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack
from werkzeug import check_password_hash, generate_password_hash
def login():
"""Logs the user in."""
if g.user:
return redirect(functions.url_for('/'))
error = None
if request.method == 'POST':
user = functions.query_db('''select * from user where
username = ?''', [request.form['username']], one=True)
if user is None:
error = 'Invalid username'
elif not check_password_hash(user['pw_hash'],
request.form['password']):
error = 'Invalid password'
else:
flash('You were logged in')
session['user_id'] = user['user_id']
return redirect(functions.url_for('/'))
return render_template('login.html', error=error)
def register():
"""Registers the user."""
return render_template('register.html')
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('user_id', None)
return redirect(functions.url_for('/public'))
| 32.777778
| 66
| 0.611864
|
from helpers import functions
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack
from werkzeug import check_password_hash, generate_password_hash
def login():
"""Logs the user in."""
if g.user:
return redirect(functions.url_for('/'))
error = None
if request.method == 'POST':
user = functions.query_db('''select * from user where
username = ?''', [request.form['username']], one=True)
if user is None:
error = 'Invalid username'
elif not check_password_hash(user['pw_hash'],
request.form['password']):
error = 'Invalid password'
else:
flash('You were logged in')
session['user_id'] = user['user_id']
return redirect(functions.url_for('/'))
return render_template('login.html', error=error)
def register():
"""Registers the user."""
return render_template('register.html')
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('user_id', None)
return redirect(functions.url_for('/public'))
| 0
| 0
| 0
|
e706ff590d0748c75f079ded5d16403c7851b34f
| 42,001
|
py
|
Python
|
src/nanoSessionKontrol/ChannelStripController.py
|
lmarie23/nanoSessionKontrol
|
4a307464556734f03d4d073c377cebe1587d9316
|
[
"Apache-2.0"
] | 1
|
2021-06-06T07:20:18.000Z
|
2021-06-06T07:20:18.000Z
|
src/nanoSessionKontrol/ChannelStripController.py
|
lmarie23/nanoSessionKontrol
|
4a307464556734f03d4d073c377cebe1587d9316
|
[
"Apache-2.0"
] | null | null | null |
src/nanoSessionKontrol/ChannelStripController.py
|
lmarie23/nanoSessionKontrol
|
4a307464556734f03d4d073c377cebe1587d9316
|
[
"Apache-2.0"
] | null | null | null |
#Embedded file name: /Users/versonator/Hudson/live/Projects/AppLive/Resources/MIDI Remote Scripts/MackieControl/ChannelStripController.py
from itertools import chain
from .MHControlComponent import *
from _Generic.Devices import *
class ChannelStripController(MHControlComponent):
"""
Controls all channel-strips of the Mackie Control and controller extensions
(Mackie Control XTs) if available: Maps and controls the faders, VPots and the
displays depending on the assignemnt modes (Vol_Pan, PlugIn, IO, Send) and
edit and flip mode.
stack_offset vs. strip_index vs. bank_channel_offset:
When using multiple sets of channel strips (stacking them), we will still only
have one ChannelStripController which rules them all.
To identify and seperate them, the implementation uses 3 different kind of
indices or offsets:
- strip_index: is the index of a channel_strip within its controller box,
so strip no 1 on an extension (XT) and strip number one on the 'main' Mackie
will both have a strip_index of 1.
We need to preserve this index, because every device (extension or main controller
will use a unique MIDI port to send out its MIDI messages which uses the
strip_index, encoded into the MIDI messages channel, to tell the hardware which
channel on the controller is meant.
- stack_offset: descibes how many channels are left to the device that a
channel_strip belongs to. For example: You have 3 Mackies: First, a XT, then
the main Mackie, then another XT.
The first XT will have the stack_index 0, the main Mackie, the stack_index 8,
because 8 faders are on present before it. The second XT has a stack_index of 16
- bank_cha_offset: this shifts all available channel strips within all the tracks
that should be controlled. For example: If you have a song with 32 tracks, and
a main Mackie Control + a XT on the right, then you want to shift the first fader
of the main Mackie to Track 16, to be able to control Track 16 to 32.
The master channel strip is hardcoded and not in the list of "normal" channel_strips,
because its always mapped to the master_volume.
"""
def set_controller_extensions(self, left_extensions, right_extensions):
""" Called from the main script (after all scripts where initialized), to let us
know where and how many MackieControlXT are installed.
There exists only one ChannelStripController, so we will take care about the
extensions channel strips
"""
self.__left_extensions = left_extensions
self.__right_extensions = right_extensions
self.__channel_strips = []
stack_offset = 0
for le in left_extensions:
for s in le.channel_strips():
self.__channel_strips.append(s)
s.set_stack_offset(stack_offset)
stack_offset += NUM_CHANNEL_STRIPS
for s in self.__own_channel_strips:
self.__channel_strips.append(s)
s.set_stack_offset(stack_offset)
stack_offset += NUM_CHANNEL_STRIPS
for re in right_extensions:
for s in re.channel_strips():
self.__channel_strips.append(s)
s.set_stack_offset(stack_offset)
stack_offset += NUM_CHANNEL_STRIPS
for s in self.__channel_strips:
s.set_channel_strip_controller(self)
self.refresh_state()
def request_rebuild_midi_map(self):
""" Overridden to call also the extensions request_rebuild_midi_map"""
MHControlComponent.request_rebuild_midi_map(self)
for ex in self.__left_extensions + self.__right_extensions:
ex.request_rebuild_midi_map()
def toggle_meter_mode(self):
""" called from the main script when the display toggle button was pressed """
self.__meters_enabled = not self.__meters_enabled
self.__apply_meter_mode()
def handle_vpot_rotation(self, strip_index, stack_offset, cc_value):
""" forwarded to us by the channel_strips """
if self.__assignment_mode == CSM_IO:
if cc_value >= 64:
direction = -1
else:
direction = 1
channel_strip = self.__channel_strips[stack_offset + strip_index]
current_routing = self.__routing_target(channel_strip)
available_routings = self.__available_routing_targets(channel_strip)
if current_routing and available_routings:
if current_routing in available_routings:
i = list(available_routings).index(current_routing)
if direction == 1:
new_i = min(len(available_routings) - 1, i + direction)
else:
new_i = max(0, i + direction)
new_routing = available_routings[new_i]
elif len(available_routings):
new_routing = available_routings[0]
self.__set_routing_target(channel_strip, new_routing)
elif self.__assignment_mode == CSM_PLUGINS:
pass
else:
channel_strip = self.__channel_strips[stack_offset + strip_index]
raise not channel_strip.assigned_track() or not channel_strip.assigned_track().has_audio_output or AssertionError('in every other mode, the midimap should handle the messages')
def handle_fader_touch(self, strip_offset, stack_offset, touched):
""" forwarded to us by the channel_strips """
self.__reassign_channel_strip_parameters(for_display_only=True)
def handle_pressed_v_pot(self, strip_index, stack_offset):
""" forwarded to us by the channel_strips """
if self.__assignment_mode == CSM_VOLPAN or self.__assignment_mode == CSM_SENDS or self.__assignment_mode == CSM_PLUGINS and self.__plugin_mode == PCM_PARAMETERS:
if stack_offset + strip_index in range(0, len(self.__channel_strips)):
param = self.__channel_strips[stack_offset + strip_index].v_pot_parameter()
if param and param.is_enabled:
if param.is_quantized:
if param.value + 1 > param.max:
param.value = param.min
else:
param.value = param.value + 1
else:
param.value = param.default_value
elif self.__assignment_mode == CSM_PLUGINS:
if self.__plugin_mode == PCM_DEVICES:
device_index = strip_index + stack_offset + self.__plugin_mode_offsets[PCM_DEVICES]
if device_index >= 0 and device_index < len(self.song().view.selected_track.devices):
if self.__chosen_plugin != None:
self.__chosen_plugin.remove_parameters_listener(self.__on_parameter_list_of_chosen_plugin_changed)
self.__chosen_plugin = self.song().view.selected_track.devices[device_index]
self.__chosen_plugin != None and self.__chosen_plugin.add_parameters_listener(self.__on_parameter_list_of_chosen_plugin_changed)
self.__reorder_parameters()
self.__plugin_mode_offsets[PCM_PARAMETERS] = 0
self.__set_plugin_mode(PCM_PARAMETERS)
def __strip_offset(self):
""" return the bank_channel offset depending if we are in return mode or not
"""
if self.__view_returns:
return self.__bank_cha_offset_returns
else:
return self.__bank_cha_offset
def __controlled_num_of_tracks(self):
""" return the number of tracks, depending on if we are in send_track
mode or normal track mode
"""
if self.__view_returns:
return len(self.song().return_tracks)
else:
return len(self.song().visible_tracks)
def __send_parameter(self, strip_index, stack_index):
""" Return the send parameter that is assigned to the given channel strip
"""
if not self.__assignment_mode == CSM_SENDS:
raise AssertionError
send_index = strip_index + stack_index + self.__send_mode_offset
p = send_index < len(self.song().view.selected_track.mixer_device.sends) and self.song().view.selected_track.mixer_device.sends[send_index]
return (p, p.name)
return (None, None)
def __plugin_parameter(self, strip_index, stack_index):
""" Return the parameter that is assigned to the given channel strip
"""
if not self.__assignment_mode == CSM_PLUGINS:
raise AssertionError
return self.__plugin_mode == PCM_DEVICES and (None, None)
elif not (self.__plugin_mode == PCM_PARAMETERS and self.__chosen_plugin):
raise AssertionError
parameters = self.__ordered_plugin_parameters
parameter_index = strip_index + stack_index + self.__plugin_mode_offsets[PCM_PARAMETERS]
if parameter_index >= 0 and parameter_index < len(parameters):
return parameters[parameter_index]
else:
return (None, None)
else:
raise 0 or AssertionError
def __can_switch_to_prev_page(self):
""" return true if pressing the "next" button will have any effect """
if self.__assignment_mode == CSM_PLUGINS:
return self.__plugin_mode_offsets[self.__plugin_mode] > 0
elif self.__assignment_mode == CSM_SENDS:
return self.__send_mode_offset > 0
else:
return False
def __can_switch_to_next_page(self):
""" return true if pressing the "prev" button will have any effect """
if self.__assignment_mode == CSM_PLUGINS:
sel_track = self.song().view.selected_track
if self.__plugin_mode == PCM_DEVICES:
return self.__plugin_mode_offsets[PCM_DEVICES] + len(self.__channel_strips) < len(sel_track.devices)
elif not (self.__plugin_mode == PCM_PARAMETERS and self.__chosen_plugin):
raise AssertionError
parameters = self.__ordered_plugin_parameters
return self.__plugin_mode_offsets[PCM_PARAMETERS] + len(self.__channel_strips) < len(parameters)
else:
raise 0 or AssertionError
elif self.__assignment_mode == CSM_SENDS:
return self.__send_mode_offset + len(self.__channel_strips) < len(self.song().return_tracks)
else:
return False
def __set_channel_offset(self, new_offset):
""" Set and validate a new channel_strip offset, which shifts all available channel
strips within all the available tracks or reutrn tracks
"""
if new_offset < 0:
new_offset = 0
elif new_offset >= self.__controlled_num_of_tracks():
new_offset = self.__controlled_num_of_tracks() - 1
if self.__view_returns:
self.__bank_cha_offset_returns = new_offset
else:
self.__bank_cha_offset = new_offset
self.__main_display_controller.set_channel_offset(new_offset)
self.__reassign_channel_strip_offsets()
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
self.request_rebuild_midi_map()
def __set_plugin_mode(self, new_mode):
""" Set a new plugin sub-mode, which can be:
1. Choosing the device to control (PCM_DEVICES)
2. Controlling the chosen devices parameters (PCM_PARAMETERS)
"""
if not (new_mode >= 0 and new_mode < PCM_NUMMODES):
raise AssertionError
if self.__plugin_mode != new_mode:
self.__plugin_mode = new_mode
self.__reassign_channel_strip_parameters(for_display_only=False)
self.request_rebuild_midi_map()
self.__plugin_mode == PCM_DEVICES and self.__update_vpot_leds_in_plugins_device_choose_mode()
else:
for plugin in self.__displayed_plugins:
if plugin != None:
plugin.remove_name_listener(self.__update_plugin_names)
self.__displayed_plugins = []
self.__update_page_switch_leds()
self.__update_flip_led()
self.__update_page_switch_leds()
def __switch_to_prev_page(self):
""" Switch to the previous page in the non track strip modes (choosing plugs, or
controlling devices)
"""
if self.__can_switch_to_prev_page():
if self.__assignment_mode == CSM_PLUGINS:
self.__plugin_mode_offsets[self.__plugin_mode] -= len(self.__channel_strips)
if self.__plugin_mode == PCM_DEVICES:
self.__update_vpot_leds_in_plugins_device_choose_mode()
elif self.__assignment_mode == CSM_SENDS:
self.__send_mode_offset -= len(self.__channel_strips)
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
self.__update_page_switch_leds()
self.request_rebuild_midi_map()
def __switch_to_next_page(self):
""" Switch to the next page in the non track strip modes (choosing plugs, or
controlling devices)
"""
if self.__can_switch_to_next_page():
if self.__assignment_mode == CSM_PLUGINS:
self.__plugin_mode_offsets[self.__plugin_mode] += len(self.__channel_strips)
if self.__plugin_mode == PCM_DEVICES:
self.__update_vpot_leds_in_plugins_device_choose_mode()
elif self.__assignment_mode == CSM_SENDS:
self.__send_mode_offset += len(self.__channel_strips)
else:
raise 0 or AssertionError
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
self.__update_page_switch_leds()
self.request_rebuild_midi_map()
def __switch_to_next_io_mode(self):
""" Step through the available IO modes (In/OutPut//Main/Sub)
"""
self.__sub_mode_in_io_mode += 1
if self.__sub_mode_in_io_mode > CSM_IO_LAST_MODE:
self.__sub_mode_in_io_mode = CSM_IO_FIRST_MODE
def __reassign_channel_strip_offsets(self):
""" Update the channel strips bank_channel offset
"""
for s in self.__channel_strips:
s.set_bank_and_channel_offset(self.__strip_offset(), self.__view_returns, self.__within_track_added_or_deleted)
def __reassign_channel_strip_parameters(self, for_display_only):
""" Reevaluate all v-pot/fader -> parameter assignments
"""
display_parameters = []
for s in self.__channel_strips:
vpot_param = (None, None)
slider_param = (None, None)
vpot_display_mode = VPOT_DISPLAY_SINGLE_DOT
slider_display_mode = VPOT_DISPLAY_SINGLE_DOT
if self.__assignment_mode == CSM_VOLPAN:
if s.assigned_track() and s.assigned_track().has_audio_output:
vpot_param = (s.assigned_track().mixer_device.panning, 'Pan')
vpot_display_mode = VPOT_DISPLAY_BOOST_CUT
slider_param = (s.assigned_track().mixer_device.volume, 'Volume')
slider_display_mode = VPOT_DISPLAY_WRAP
elif self.__assignment_mode == CSM_PLUGINS:
vpot_param = self.__plugin_parameter(s.strip_index(), s.stack_offset())
vpot_display_mode = VPOT_DISPLAY_WRAP
if s.assigned_track() and s.assigned_track().has_audio_output:
slider_param = (s.assigned_track().mixer_device.volume, 'Volume')
slider_display_mode = VPOT_DISPLAY_WRAP
elif self.__assignment_mode == CSM_SENDS:
vpot_param = self.__send_parameter(s.strip_index(), s.stack_offset())
vpot_display_mode = VPOT_DISPLAY_WRAP
if s.assigned_track() and s.assigned_track().has_audio_output:
slider_param = (s.assigned_track().mixer_device.volume, 'Volume')
slider_display_mode = VPOT_DISPLAY_WRAP
elif self.__assignment_mode == CSM_IO:
if s.assigned_track() and s.assigned_track().has_audio_output:
slider_param = (s.assigned_track().mixer_device.volume, 'Volume')
if self.__flip and self.__can_flip():
if self.__any_slider_is_touched():
display_parameters.append(vpot_param)
else:
display_parameters.append(slider_param)
if not for_display_only:
s.set_v_pot_parameter(slider_param[0], slider_display_mode)
s.set_fader_parameter(vpot_param[0])
else:
if self.__any_slider_is_touched():
display_parameters.append(slider_param)
else:
display_parameters.append(vpot_param)
if not for_display_only:
s.set_v_pot_parameter(vpot_param[0], vpot_display_mode)
s.set_fader_parameter(slider_param[0])
self.__main_display_controller.set_channel_offset(self.__strip_offset())
if len(display_parameters):
self.__main_display_controller.set_parameters(display_parameters)
def __apply_meter_mode(self):
""" Update the meter mode in the displays and channel strips """
enabled = self.__meters_enabled and self.__assignment_mode is CSM_VOLPAN
for s in self.__channel_strips:
s.enable_meter_mode(enabled)
self.__main_display_controller.enable_meters(enabled)
def __toggle_flip(self):
""" En/Disable V-Pot / Fader flipping
"""
if self.__can_flip():
self.__flip = not self.__flip
self.__on_flip_changed()
def __toggle_view_returns(self):
""" Toggle if we want to control the return tracks or normal tracks
"""
self.__view_returns = not self.__view_returns
self.__update_view_returns_mode()
def __update_assignment_mode_leds(self):
""" Show which assignment mode is currently active """
if self.__assignment_mode == CSM_IO:
sid_on_switch = SID_ASSIGNMENT_IO
elif self.__assignment_mode == CSM_SENDS:
sid_on_switch = SID_ASSIGNMENT_SENDS
elif self.__assignment_mode == CSM_VOLPAN:
sid_on_switch = SID_ASSIGNMENT_PAN
elif self.__assignment_mode == CSM_PLUGINS:
sid_on_switch = SID_ASSIGNMENT_PLUG_INS
else:
raise 0 or AssertionError
sid_on_switch = None
for s in (SID_ASSIGNMENT_IO,
SID_ASSIGNMENT_SENDS,
SID_ASSIGNMENT_PAN,
SID_ASSIGNMENT_PLUG_INS):
if s == sid_on_switch:
self.send_midi((NOTE_ON_STATUS, s, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, s, BUTTON_STATE_OFF))
def __update_assignment_display(self):
""" Cryptically label the current assignment mode in the 2char display above
the assignment buttons
"""
if self.__assignment_mode == CSM_VOLPAN:
ass_string = ['P', 'N']
else:
if self.__assignment_mode == CSM_PLUGINS or self.__assignment_mode == CSM_SENDS:
ass_string = self.__last_attached_selected_track == self.song().master_track and ['M', 'A']
for t in self.song().return_tracks:
if t == self.__last_attached_selected_track:
ass_string = ['R', chr(ord('A') + list(self.song().return_tracks).index(t))]
break
for t in self.song().visible_tracks:
if t == self.__last_attached_selected_track:
ass_string = list('%.2d' % min(99, list(self.song().visible_tracks).index(t) + 1))
break
if not ass_string:
raise AssertionError
elif self.__assignment_mode == CSM_IO:
if self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_MAIN:
ass_string = ['I', "'"]
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_SUB:
ass_string = ['I', ',']
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_MAIN:
ass_string = ['0', "'"]
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_SUB:
ass_string = ['0', ',']
else:
raise 0 or AssertionError
else:
raise 0 or AssertionError
self.send_midi((CC_STATUS, 75, g7_seg_led_conv_table[ass_string[0]]))
self.send_midi((CC_STATUS, 74, g7_seg_led_conv_table[ass_string[1]]))
def __update_page_switch_leds(self):
""" visualize if the "prev" an "next" buttons can be pressed """
if self.__can_switch_to_prev_page():
self.send_midi((NOTE_ON_STATUS, SID_ASSIGNMENT_EQ, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, SID_ASSIGNMENT_EQ, BUTTON_STATE_OFF))
if self.__can_switch_to_next_page():
self.send_midi((NOTE_ON_STATUS, SID_ASSIGNMENT_DYNAMIC, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, SID_ASSIGNMENT_DYNAMIC, BUTTON_STATE_OFF))
def __update_vpot_leds_in_plugins_device_choose_mode(self):
""" To be called in assignment mode CSM_PLUGINS, submode PCM_DEVICES only:
This will enlighten all poties which can be pressed to choose a device
for editing, and unlight all poties where pressing will have no effect
"""
raise self.__assignment_mode == CSM_PLUGINS or AssertionError
raise self.__plugin_mode == PCM_DEVICES or AssertionError
sel_track = self.song().view.selected_track
count = 0
for s in self.__channel_strips:
offset = self.__plugin_mode_offsets[self.__plugin_mode]
if sel_track and offset + count >= 0 and offset + count < len(sel_track.devices):
s.show_full_enlighted_poti()
else:
s.unlight_vpot_leds()
count += 1
def __update_channel_strip_strings(self):
""" In IO mode, collect all strings that will be visible in the main display manually
"""
if not self.__any_slider_is_touched():
if self.__assignment_mode == CSM_IO:
targets = []
for s in self.__channel_strips:
if self.__routing_target(s):
targets.append(self.__routing_target(s))
else:
targets.append('')
self.__main_display_controller.set_channel_strip_strings(targets)
elif self.__assignment_mode == CSM_PLUGINS and self.__plugin_mode == PCM_DEVICES:
for plugin in self.__displayed_plugins:
if plugin != None:
plugin.remove_name_listener(self.__update_plugin_names)
self.__displayed_plugins = []
sel_track = self.song().view.selected_track
for i in range(len(self.__channel_strips)):
device_index = i + self.__plugin_mode_offsets[PCM_DEVICES]
if device_index >= 0 and device_index < len(sel_track.devices):
sel_track.devices[device_index].add_name_listener(self.__update_plugin_names)
self.__displayed_plugins.append(sel_track.devices[device_index])
else:
self.__displayed_plugins.append(None)
self.__update_plugin_names()
def __update_view_returns_mode(self):
""" Update the control return tracks LED
"""
if self.__view_returns:
self.send_midi((NOTE_ON_STATUS, SID_FADERBANK_EDIT, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, SID_FADERBANK_EDIT, BUTTON_STATE_OFF))
self.__main_display_controller.set_show_return_track_names(self.__view_returns)
self.__reassign_channel_strip_offsets()
self.__reassign_channel_strip_parameters(for_display_only=False)
self.request_rebuild_midi_map()
def __on_selected_track_changed(self):
""" Notifier, called as soon as the selected track has changed
"""
st = self.__last_attached_selected_track
if st and st.devices_has_listener(self.__on_selected_device_chain_changed):
st.remove_devices_listener(self.__on_selected_device_chain_changed)
self.__last_attached_selected_track = self.song().view.selected_track
st = self.__last_attached_selected_track
if st:
st.add_devices_listener(self.__on_selected_device_chain_changed)
if self.__assignment_mode == CSM_PLUGINS:
self.__plugin_mode_offsets = [ 0 for x in range(PCM_NUMMODES) ]
if self.__chosen_plugin != None:
self.__chosen_plugin.remove_parameters_listener(self.__on_parameter_list_of_chosen_plugin_changed)
self.__chosen_plugin = None
self.__ordered_plugin_parameters = []
self.__update_assignment_display()
if self.__plugin_mode == PCM_DEVICES:
self.__update_vpot_leds_in_plugins_device_choose_mode()
else:
self.__set_plugin_mode(PCM_DEVICES)
elif self.__assignment_mode == CSM_SENDS:
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_assignment_display()
self.request_rebuild_midi_map()
def __on_flip_changed(self):
""" Update the flip button LED when the flip mode changed
"""
self.__update_flip_led()
if self.__can_flip():
self.__update_assignment_display()
self.__reassign_channel_strip_parameters(for_display_only=False)
self.request_rebuild_midi_map()
def __on_tracks_added_or_deleted(self):
""" Notifier, called as soon as tracks where added, removed or moved
"""
self.__within_track_added_or_deleted = True
for t in chain(self.song().visible_tracks, self.song().return_tracks):
if not t.solo_has_listener(self.__update_rude_solo_led):
t.add_solo_listener(self.__update_rude_solo_led)
if not t.has_audio_output_has_listener(self.__on_any_tracks_output_type_changed):
t.add_has_audio_output_listener(self.__on_any_tracks_output_type_changed)
if self.__send_mode_offset >= len(self.song().return_tracks):
self.__send_mode_offset = 0
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
if self.__strip_offset() + len(self.__channel_strips) >= self.__controlled_num_of_tracks():
self.__set_channel_offset(max(0, self.__controlled_num_of_tracks() - len(self.__channel_strips)))
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
if self.__assignment_mode == CSM_SENDS:
self.__update_page_switch_leds()
self.refresh_state()
self.__main_display_controller.refresh_state()
self.__within_track_added_or_deleted = False
self.request_rebuild_midi_map()
def __on_any_tracks_output_type_changed(self):
""" called as soon as any device chain has changed (devices where
added/removed/swapped...)
"""
self.__reassign_channel_strip_parameters(for_display_only=False)
self.request_rebuild_midi_map()
| 48.725058
| 188
| 0.649794
|
#Embedded file name: /Users/versonator/Hudson/live/Projects/AppLive/Resources/MIDI Remote Scripts/MackieControl/ChannelStripController.py
from itertools import chain
from .MHControlComponent import *
from _Generic.Devices import *
class ChannelStripController(MHControlComponent):
"""
Controls all channel-strips of the Mackie Control and controller extensions
(Mackie Control XTs) if available: Maps and controls the faders, VPots and the
displays depending on the assignemnt modes (Vol_Pan, PlugIn, IO, Send) and
edit and flip mode.
stack_offset vs. strip_index vs. bank_channel_offset:
When using multiple sets of channel strips (stacking them), we will still only
have one ChannelStripController which rules them all.
To identify and seperate them, the implementation uses 3 different kind of
indices or offsets:
- strip_index: is the index of a channel_strip within its controller box,
so strip no 1 on an extension (XT) and strip number one on the 'main' Mackie
will both have a strip_index of 1.
We need to preserve this index, because every device (extension or main controller
will use a unique MIDI port to send out its MIDI messages which uses the
strip_index, encoded into the MIDI messages channel, to tell the hardware which
channel on the controller is meant.
- stack_offset: descibes how many channels are left to the device that a
channel_strip belongs to. For example: You have 3 Mackies: First, a XT, then
the main Mackie, then another XT.
The first XT will have the stack_index 0, the main Mackie, the stack_index 8,
because 8 faders are on present before it. The second XT has a stack_index of 16
- bank_cha_offset: this shifts all available channel strips within all the tracks
that should be controlled. For example: If you have a song with 32 tracks, and
a main Mackie Control + a XT on the right, then you want to shift the first fader
of the main Mackie to Track 16, to be able to control Track 16 to 32.
The master channel strip is hardcoded and not in the list of "normal" channel_strips,
because its always mapped to the master_volume.
"""
def __init__(self, main_script, channel_strips, master_strip, main_display_controller):
MHControlComponent.__init__(self, main_script)
self.__left_extensions = []
self.__right_extensions = []
self.__own_channel_strips = channel_strips
self.__master_strip = master_strip
self.__channel_strips = channel_strips
self.__main_display_controller = main_display_controller
self.__meters_enabled = False
self.__assignment_mode = CSM_VOLPAN
self.__sub_mode_in_io_mode = CSM_IO_FIRST_MODE
self.__plugin_mode = PCM_DEVICES
self.__plugin_mode_offsets = [ 0 for x in range(PCM_NUMMODES) ]
self.__chosen_plugin = None
self.__ordered_plugin_parameters = []
self.__displayed_plugins = []
self.__last_attached_selected_track = None
self.__send_mode_offset = 0
self.__flip = False
self.__view_returns = False
self.__bank_cha_offset = 0
self.__bank_cha_offset_returns = 0
self.__within_track_added_or_deleted = False
self.song().add_visible_tracks_listener(self.__on_tracks_added_or_deleted)
self.song().view.add_selected_track_listener(self.__on_selected_track_changed)
for t in chain(self.song().visible_tracks, self.song().return_tracks):
if not t.solo_has_listener(self.__update_rude_solo_led):
t.add_solo_listener(self.__update_rude_solo_led)
if not t.has_audio_output_has_listener(self.__on_any_tracks_output_type_changed):
t.add_has_audio_output_listener(self.__on_any_tracks_output_type_changed)
self.__on_selected_track_changed()
for s in self.__own_channel_strips:
s.set_channel_strip_controller(self)
self.__reassign_channel_strip_offsets()
self.__reassign_channel_strip_parameters(for_display_only=False)
def destroy(self):
self.song().remove_visible_tracks_listener(self.__on_tracks_added_or_deleted)
self.song().view.remove_selected_track_listener(self.__on_selected_track_changed)
for t in chain(self.song().visible_tracks, self.song().return_tracks):
if t.solo_has_listener(self.__update_rude_solo_led):
t.remove_solo_listener(self.__update_rude_solo_led)
if t.has_audio_output_has_listener(self.__on_any_tracks_output_type_changed):
t.remove_has_audio_output_listener(self.__on_any_tracks_output_type_changed)
st = self.__last_attached_selected_track
if st and st.devices_has_listener(self.__on_selected_device_chain_changed):
st.remove_devices_listener(self.__on_selected_device_chain_changed)
for note in channel_strip_assignment_switch_ids:
self.send_midi((NOTE_ON_STATUS, note, BUTTON_STATE_OFF))
for note in channel_strip_control_switch_ids:
self.send_midi((NOTE_ON_STATUS, note, BUTTON_STATE_OFF))
self.send_midi((NOTE_ON_STATUS, SELECT_RUDE_SOLO, BUTTON_STATE_OFF))
self.send_midi((CC_STATUS, 75, g7_seg_led_conv_table[' ']))
self.send_midi((CC_STATUS, 74, g7_seg_led_conv_table[' ']))
MHControlComponent.destroy(self)
def set_controller_extensions(self, left_extensions, right_extensions):
""" Called from the main script (after all scripts where initialized), to let us
know where and how many MackieControlXT are installed.
There exists only one ChannelStripController, so we will take care about the
extensions channel strips
"""
self.__left_extensions = left_extensions
self.__right_extensions = right_extensions
self.__channel_strips = []
stack_offset = 0
for le in left_extensions:
for s in le.channel_strips():
self.__channel_strips.append(s)
s.set_stack_offset(stack_offset)
stack_offset += NUM_CHANNEL_STRIPS
for s in self.__own_channel_strips:
self.__channel_strips.append(s)
s.set_stack_offset(stack_offset)
stack_offset += NUM_CHANNEL_STRIPS
for re in right_extensions:
for s in re.channel_strips():
self.__channel_strips.append(s)
s.set_stack_offset(stack_offset)
stack_offset += NUM_CHANNEL_STRIPS
for s in self.__channel_strips:
s.set_channel_strip_controller(self)
self.refresh_state()
def refresh_state(self):
self.__update_assignment_mode_leds()
self.__update_assignment_display()
self.__update_rude_solo_led()
self.__reassign_channel_strip_offsets()
self.__on_flip_changed()
self.__update_view_returns_mode()
def request_rebuild_midi_map(self):
""" Overridden to call also the extensions request_rebuild_midi_map"""
MHControlComponent.request_rebuild_midi_map(self)
for ex in self.__left_extensions + self.__right_extensions:
ex.request_rebuild_midi_map()
def on_update_display_timer(self):
self.__update_channel_strip_strings()
def toggle_meter_mode(self):
""" called from the main script when the display toggle button was pressed """
self.__meters_enabled = not self.__meters_enabled
self.__apply_meter_mode()
def handle_assignment_switch_ids(self, switch_id, value):
if switch_id == SID_ASSIGNMENT_IO:
if value == BUTTON_PRESSED:
self.__set_assignment_mode(CSM_IO)
elif switch_id == SID_ASSIGNMENT_SENDS:
if value == BUTTON_PRESSED:
self.__set_assignment_mode(CSM_SENDS)
elif switch_id == SID_ASSIGNMENT_PAN:
if value == BUTTON_PRESSED:
self.__set_assignment_mode(CSM_VOLPAN)
elif switch_id == SID_ASSIGNMENT_PLUG_INS:
if value == BUTTON_PRESSED:
self.__set_assignment_mode(CSM_PLUGINS)
elif switch_id == SID_ASSIGNMENT_EQ:
if value == BUTTON_PRESSED:
self.__switch_to_prev_page()
elif switch_id == SID_ASSIGNMENT_DYNAMIC:
if value == BUTTON_PRESSED:
self.__switch_to_next_page()
elif switch_id == SID_FADERBANK_PREV_BANK:
if value == BUTTON_PRESSED:
print('Previous scene')
self.song().undo() #MH
#MH if self.shift_is_pressed():
#MH self.__set_channel_offset(0)
#MH else:
#MH self.__set_channel_offset(self.__strip_offset() - len(self.__channel_strips))
elif switch_id == SID_FADERBANK_NEXT_BANK:
if value == BUTTON_PRESSED:
print('Next scene')
self.song().redo() #MH
#MH if self.shift_is_pressed():
#MH last_possible_offset = (self.__controlled_num_of_tracks() - self.__strip_offset()) / len(self.__channel_strips) * len(self.__channel_strips) + self.__strip_offset()
#MH if last_possible_offset == self.__controlled_num_of_tracks():
#MH last_possible_offset -= len(self.__channel_strips)
#MH self.__set_channel_offset(last_possible_offset)
#MH elif self.__strip_offset() < self.__controlled_num_of_tracks() - len(self.__channel_strips):
#MH self.__set_channel_offset(self.__strip_offset() + len(self.__channel_strips))
elif switch_id == SID_FADERBANK_PREV_CH:
if value == BUTTON_PRESSED:
if self.shift_is_pressed():
self.__set_channel_offset(0)
else:
self.__set_channel_offset(self.__strip_offset() - 1)
elif switch_id == SID_FADERBANK_NEXT_CH:
if value == BUTTON_PRESSED:
if self.shift_is_pressed():
self.__set_channel_offset(self.__controlled_num_of_tracks() - len(self.__channel_strips))
elif self.__strip_offset() < self.__controlled_num_of_tracks() - len(self.__channel_strips):
self.__set_channel_offset(self.__strip_offset() + 1)
elif switch_id == SID_FADERBANK_FLIP:
if value == BUTTON_PRESSED:
self.__toggle_flip()
elif switch_id == SID_FADERBANK_EDIT:
if value == BUTTON_PRESSED:
self.__toggle_view_returns()
def handle_vpot_rotation(self, strip_index, stack_offset, cc_value):
""" forwarded to us by the channel_strips """
if self.__assignment_mode == CSM_IO:
if cc_value >= 64:
direction = -1
else:
direction = 1
channel_strip = self.__channel_strips[stack_offset + strip_index]
current_routing = self.__routing_target(channel_strip)
available_routings = self.__available_routing_targets(channel_strip)
if current_routing and available_routings:
if current_routing in available_routings:
i = list(available_routings).index(current_routing)
if direction == 1:
new_i = min(len(available_routings) - 1, i + direction)
else:
new_i = max(0, i + direction)
new_routing = available_routings[new_i]
elif len(available_routings):
new_routing = available_routings[0]
self.__set_routing_target(channel_strip, new_routing)
elif self.__assignment_mode == CSM_PLUGINS:
pass
else:
channel_strip = self.__channel_strips[stack_offset + strip_index]
raise not channel_strip.assigned_track() or not channel_strip.assigned_track().has_audio_output or AssertionError('in every other mode, the midimap should handle the messages')
def handle_fader_touch(self, strip_offset, stack_offset, touched):
""" forwarded to us by the channel_strips """
self.__reassign_channel_strip_parameters(for_display_only=True)
def handle_pressed_v_pot(self, strip_index, stack_offset):
""" forwarded to us by the channel_strips """
if self.__assignment_mode == CSM_VOLPAN or self.__assignment_mode == CSM_SENDS or self.__assignment_mode == CSM_PLUGINS and self.__plugin_mode == PCM_PARAMETERS:
if stack_offset + strip_index in range(0, len(self.__channel_strips)):
param = self.__channel_strips[stack_offset + strip_index].v_pot_parameter()
if param and param.is_enabled:
if param.is_quantized:
if param.value + 1 > param.max:
param.value = param.min
else:
param.value = param.value + 1
else:
param.value = param.default_value
elif self.__assignment_mode == CSM_PLUGINS:
if self.__plugin_mode == PCM_DEVICES:
device_index = strip_index + stack_offset + self.__plugin_mode_offsets[PCM_DEVICES]
if device_index >= 0 and device_index < len(self.song().view.selected_track.devices):
if self.__chosen_plugin != None:
self.__chosen_plugin.remove_parameters_listener(self.__on_parameter_list_of_chosen_plugin_changed)
self.__chosen_plugin = self.song().view.selected_track.devices[device_index]
self.__chosen_plugin != None and self.__chosen_plugin.add_parameters_listener(self.__on_parameter_list_of_chosen_plugin_changed)
self.__reorder_parameters()
self.__plugin_mode_offsets[PCM_PARAMETERS] = 0
self.__set_plugin_mode(PCM_PARAMETERS)
def __strip_offset(self):
""" return the bank_channel offset depending if we are in return mode or not
"""
if self.__view_returns:
return self.__bank_cha_offset_returns
else:
return self.__bank_cha_offset
def __controlled_num_of_tracks(self):
""" return the number of tracks, depending on if we are in send_track
mode or normal track mode
"""
if self.__view_returns:
return len(self.song().return_tracks)
else:
return len(self.song().visible_tracks)
def __send_parameter(self, strip_index, stack_index):
""" Return the send parameter that is assigned to the given channel strip
"""
if not self.__assignment_mode == CSM_SENDS:
raise AssertionError
send_index = strip_index + stack_index + self.__send_mode_offset
p = send_index < len(self.song().view.selected_track.mixer_device.sends) and self.song().view.selected_track.mixer_device.sends[send_index]
return (p, p.name)
return (None, None)
def __plugin_parameter(self, strip_index, stack_index):
""" Return the parameter that is assigned to the given channel strip
"""
if not self.__assignment_mode == CSM_PLUGINS:
raise AssertionError
return self.__plugin_mode == PCM_DEVICES and (None, None)
elif not (self.__plugin_mode == PCM_PARAMETERS and self.__chosen_plugin):
raise AssertionError
parameters = self.__ordered_plugin_parameters
parameter_index = strip_index + stack_index + self.__plugin_mode_offsets[PCM_PARAMETERS]
if parameter_index >= 0 and parameter_index < len(parameters):
return parameters[parameter_index]
else:
return (None, None)
else:
raise 0 or AssertionError
def __any_slider_is_touched(self):
for s in self.__channel_strips:
if s.is_touched():
return True
return False
def __can_flip(self):
if self.__assignment_mode == CSM_PLUGINS and self.__plugin_mode == PCM_DEVICES:
return False
elif self.__assignment_mode == CSM_IO:
return False
return True
def __can_switch_to_prev_page(self):
""" return true if pressing the "next" button will have any effect """
if self.__assignment_mode == CSM_PLUGINS:
return self.__plugin_mode_offsets[self.__plugin_mode] > 0
elif self.__assignment_mode == CSM_SENDS:
return self.__send_mode_offset > 0
else:
return False
def __can_switch_to_next_page(self):
""" return true if pressing the "prev" button will have any effect """
if self.__assignment_mode == CSM_PLUGINS:
sel_track = self.song().view.selected_track
if self.__plugin_mode == PCM_DEVICES:
return self.__plugin_mode_offsets[PCM_DEVICES] + len(self.__channel_strips) < len(sel_track.devices)
elif not (self.__plugin_mode == PCM_PARAMETERS and self.__chosen_plugin):
raise AssertionError
parameters = self.__ordered_plugin_parameters
return self.__plugin_mode_offsets[PCM_PARAMETERS] + len(self.__channel_strips) < len(parameters)
else:
raise 0 or AssertionError
elif self.__assignment_mode == CSM_SENDS:
return self.__send_mode_offset + len(self.__channel_strips) < len(self.song().return_tracks)
else:
return False
def __available_routing_targets(self, channel_strip):
raise self.__assignment_mode == CSM_IO or AssertionError
t = channel_strip.assigned_track()
if t:
if self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_MAIN:
return t.input_routings
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_SUB:
return t.input_sub_routings
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_MAIN:
return t.output_routings
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_SUB:
return t.output_sub_routings
else:
raise 0 or AssertionError
else:
return None
def __routing_target(self, channel_strip):
raise self.__assignment_mode == CSM_IO or AssertionError
t = channel_strip.assigned_track()
if t:
if self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_MAIN:
return t.current_input_routing
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_SUB:
return t.current_input_sub_routing
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_MAIN:
return t.current_output_routing
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_SUB:
return t.current_output_sub_routing
else:
raise 0 or AssertionError
else:
return None
def __set_routing_target(self, channel_strip, target_string):
raise self.__assignment_mode == CSM_IO or AssertionError
t = channel_strip.assigned_track()
if t:
if self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_MAIN:
t.current_input_routing = target_string
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_SUB:
t.current_input_sub_routing = target_string
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_MAIN:
t.current_output_routing = target_string
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_SUB:
t.current_output_sub_routing = target_string
else:
raise 0 or AssertionError
def __set_channel_offset(self, new_offset):
""" Set and validate a new channel_strip offset, which shifts all available channel
strips within all the available tracks or reutrn tracks
"""
if new_offset < 0:
new_offset = 0
elif new_offset >= self.__controlled_num_of_tracks():
new_offset = self.__controlled_num_of_tracks() - 1
if self.__view_returns:
self.__bank_cha_offset_returns = new_offset
else:
self.__bank_cha_offset = new_offset
self.__main_display_controller.set_channel_offset(new_offset)
self.__reassign_channel_strip_offsets()
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
self.request_rebuild_midi_map()
def __set_assignment_mode(self, mode):
for plugin in self.__displayed_plugins:
if plugin != None:
plugin.remove_name_listener(self.__update_plugin_names)
self.__displayed_plugins = []
if mode == CSM_PLUGINS:
self.__assignment_mode = mode
self.__main_display_controller.set_show_parameter_names(True)
self.__set_plugin_mode(PCM_DEVICES)
elif mode == CSM_SENDS:
self.__main_display_controller.set_show_parameter_names(True)
self.__assignment_mode = mode
else:
if mode == CSM_IO:
for s in self.__channel_strips:
s.unlight_vpot_leds()
self.__main_display_controller.set_show_parameter_names(False)
if self.__assignment_mode != mode:
self.__assignment_mode = mode
elif self.__assignment_mode == CSM_IO:
self.__switch_to_next_io_mode()
self.__update_assignment_mode_leds()
self.__update_assignment_display()
self.__apply_meter_mode()
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
self.__update_page_switch_leds()
if mode == CSM_PLUGINS:
self.__update_vpot_leds_in_plugins_device_choose_mode()
self.__update_flip_led()
self.request_rebuild_midi_map()
def __set_plugin_mode(self, new_mode):
""" Set a new plugin sub-mode, which can be:
1. Choosing the device to control (PCM_DEVICES)
2. Controlling the chosen devices parameters (PCM_PARAMETERS)
"""
if not (new_mode >= 0 and new_mode < PCM_NUMMODES):
raise AssertionError
if self.__plugin_mode != new_mode:
self.__plugin_mode = new_mode
self.__reassign_channel_strip_parameters(for_display_only=False)
self.request_rebuild_midi_map()
self.__plugin_mode == PCM_DEVICES and self.__update_vpot_leds_in_plugins_device_choose_mode()
else:
for plugin in self.__displayed_plugins:
if plugin != None:
plugin.remove_name_listener(self.__update_plugin_names)
self.__displayed_plugins = []
self.__update_page_switch_leds()
self.__update_flip_led()
self.__update_page_switch_leds()
def __switch_to_prev_page(self):
""" Switch to the previous page in the non track strip modes (choosing plugs, or
controlling devices)
"""
if self.__can_switch_to_prev_page():
if self.__assignment_mode == CSM_PLUGINS:
self.__plugin_mode_offsets[self.__plugin_mode] -= len(self.__channel_strips)
if self.__plugin_mode == PCM_DEVICES:
self.__update_vpot_leds_in_plugins_device_choose_mode()
elif self.__assignment_mode == CSM_SENDS:
self.__send_mode_offset -= len(self.__channel_strips)
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
self.__update_page_switch_leds()
self.request_rebuild_midi_map()
def __switch_to_next_page(self):
""" Switch to the next page in the non track strip modes (choosing plugs, or
controlling devices)
"""
if self.__can_switch_to_next_page():
if self.__assignment_mode == CSM_PLUGINS:
self.__plugin_mode_offsets[self.__plugin_mode] += len(self.__channel_strips)
if self.__plugin_mode == PCM_DEVICES:
self.__update_vpot_leds_in_plugins_device_choose_mode()
elif self.__assignment_mode == CSM_SENDS:
self.__send_mode_offset += len(self.__channel_strips)
else:
raise 0 or AssertionError
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
self.__update_page_switch_leds()
self.request_rebuild_midi_map()
def __switch_to_next_io_mode(self):
""" Step through the available IO modes (In/OutPut//Main/Sub)
"""
self.__sub_mode_in_io_mode += 1
if self.__sub_mode_in_io_mode > CSM_IO_LAST_MODE:
self.__sub_mode_in_io_mode = CSM_IO_FIRST_MODE
def __reassign_channel_strip_offsets(self):
""" Update the channel strips bank_channel offset
"""
for s in self.__channel_strips:
s.set_bank_and_channel_offset(self.__strip_offset(), self.__view_returns, self.__within_track_added_or_deleted)
def __reassign_channel_strip_parameters(self, for_display_only):
""" Reevaluate all v-pot/fader -> parameter assignments
"""
display_parameters = []
for s in self.__channel_strips:
vpot_param = (None, None)
slider_param = (None, None)
vpot_display_mode = VPOT_DISPLAY_SINGLE_DOT
slider_display_mode = VPOT_DISPLAY_SINGLE_DOT
if self.__assignment_mode == CSM_VOLPAN:
if s.assigned_track() and s.assigned_track().has_audio_output:
vpot_param = (s.assigned_track().mixer_device.panning, 'Pan')
vpot_display_mode = VPOT_DISPLAY_BOOST_CUT
slider_param = (s.assigned_track().mixer_device.volume, 'Volume')
slider_display_mode = VPOT_DISPLAY_WRAP
elif self.__assignment_mode == CSM_PLUGINS:
vpot_param = self.__plugin_parameter(s.strip_index(), s.stack_offset())
vpot_display_mode = VPOT_DISPLAY_WRAP
if s.assigned_track() and s.assigned_track().has_audio_output:
slider_param = (s.assigned_track().mixer_device.volume, 'Volume')
slider_display_mode = VPOT_DISPLAY_WRAP
elif self.__assignment_mode == CSM_SENDS:
vpot_param = self.__send_parameter(s.strip_index(), s.stack_offset())
vpot_display_mode = VPOT_DISPLAY_WRAP
if s.assigned_track() and s.assigned_track().has_audio_output:
slider_param = (s.assigned_track().mixer_device.volume, 'Volume')
slider_display_mode = VPOT_DISPLAY_WRAP
elif self.__assignment_mode == CSM_IO:
if s.assigned_track() and s.assigned_track().has_audio_output:
slider_param = (s.assigned_track().mixer_device.volume, 'Volume')
if self.__flip and self.__can_flip():
if self.__any_slider_is_touched():
display_parameters.append(vpot_param)
else:
display_parameters.append(slider_param)
if not for_display_only:
s.set_v_pot_parameter(slider_param[0], slider_display_mode)
s.set_fader_parameter(vpot_param[0])
else:
if self.__any_slider_is_touched():
display_parameters.append(slider_param)
else:
display_parameters.append(vpot_param)
if not for_display_only:
s.set_v_pot_parameter(vpot_param[0], vpot_display_mode)
s.set_fader_parameter(slider_param[0])
self.__main_display_controller.set_channel_offset(self.__strip_offset())
if len(display_parameters):
self.__main_display_controller.set_parameters(display_parameters)
def __apply_meter_mode(self):
""" Update the meter mode in the displays and channel strips """
enabled = self.__meters_enabled and self.__assignment_mode is CSM_VOLPAN
for s in self.__channel_strips:
s.enable_meter_mode(enabled)
self.__main_display_controller.enable_meters(enabled)
def __toggle_flip(self):
""" En/Disable V-Pot / Fader flipping
"""
if self.__can_flip():
self.__flip = not self.__flip
self.__on_flip_changed()
def __toggle_view_returns(self):
""" Toggle if we want to control the return tracks or normal tracks
"""
self.__view_returns = not self.__view_returns
self.__update_view_returns_mode()
def __update_assignment_mode_leds(self):
""" Show which assignment mode is currently active """
if self.__assignment_mode == CSM_IO:
sid_on_switch = SID_ASSIGNMENT_IO
elif self.__assignment_mode == CSM_SENDS:
sid_on_switch = SID_ASSIGNMENT_SENDS
elif self.__assignment_mode == CSM_VOLPAN:
sid_on_switch = SID_ASSIGNMENT_PAN
elif self.__assignment_mode == CSM_PLUGINS:
sid_on_switch = SID_ASSIGNMENT_PLUG_INS
else:
raise 0 or AssertionError
sid_on_switch = None
for s in (SID_ASSIGNMENT_IO,
SID_ASSIGNMENT_SENDS,
SID_ASSIGNMENT_PAN,
SID_ASSIGNMENT_PLUG_INS):
if s == sid_on_switch:
self.send_midi((NOTE_ON_STATUS, s, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, s, BUTTON_STATE_OFF))
def __update_assignment_display(self):
""" Cryptically label the current assignment mode in the 2char display above
the assignment buttons
"""
if self.__assignment_mode == CSM_VOLPAN:
ass_string = ['P', 'N']
else:
if self.__assignment_mode == CSM_PLUGINS or self.__assignment_mode == CSM_SENDS:
ass_string = self.__last_attached_selected_track == self.song().master_track and ['M', 'A']
for t in self.song().return_tracks:
if t == self.__last_attached_selected_track:
ass_string = ['R', chr(ord('A') + list(self.song().return_tracks).index(t))]
break
for t in self.song().visible_tracks:
if t == self.__last_attached_selected_track:
ass_string = list('%.2d' % min(99, list(self.song().visible_tracks).index(t) + 1))
break
if not ass_string:
raise AssertionError
elif self.__assignment_mode == CSM_IO:
if self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_MAIN:
ass_string = ['I', "'"]
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_INPUT_SUB:
ass_string = ['I', ',']
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_MAIN:
ass_string = ['0', "'"]
elif self.__sub_mode_in_io_mode == CSM_IO_MODE_OUTPUT_SUB:
ass_string = ['0', ',']
else:
raise 0 or AssertionError
else:
raise 0 or AssertionError
self.send_midi((CC_STATUS, 75, g7_seg_led_conv_table[ass_string[0]]))
self.send_midi((CC_STATUS, 74, g7_seg_led_conv_table[ass_string[1]]))
def __update_rude_solo_led(self):
any_track_soloed = False
for t in chain(self.song().tracks, self.song().return_tracks):
if t.solo:
any_track_soloed = True
break
if any_track_soloed:
self.send_midi((NOTE_ON_STATUS, SELECT_RUDE_SOLO, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, SELECT_RUDE_SOLO, BUTTON_STATE_OFF))
def __update_page_switch_leds(self):
""" visualize if the "prev" an "next" buttons can be pressed """
if self.__can_switch_to_prev_page():
self.send_midi((NOTE_ON_STATUS, SID_ASSIGNMENT_EQ, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, SID_ASSIGNMENT_EQ, BUTTON_STATE_OFF))
if self.__can_switch_to_next_page():
self.send_midi((NOTE_ON_STATUS, SID_ASSIGNMENT_DYNAMIC, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, SID_ASSIGNMENT_DYNAMIC, BUTTON_STATE_OFF))
def __update_flip_led(self):
if self.__flip and self.__can_flip():
self.send_midi((NOTE_ON_STATUS, SID_FADERBANK_FLIP, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, SID_FADERBANK_FLIP, BUTTON_STATE_OFF))
def __update_vpot_leds_in_plugins_device_choose_mode(self):
""" To be called in assignment mode CSM_PLUGINS, submode PCM_DEVICES only:
This will enlighten all poties which can be pressed to choose a device
for editing, and unlight all poties where pressing will have no effect
"""
raise self.__assignment_mode == CSM_PLUGINS or AssertionError
raise self.__plugin_mode == PCM_DEVICES or AssertionError
sel_track = self.song().view.selected_track
count = 0
for s in self.__channel_strips:
offset = self.__plugin_mode_offsets[self.__plugin_mode]
if sel_track and offset + count >= 0 and offset + count < len(sel_track.devices):
s.show_full_enlighted_poti()
else:
s.unlight_vpot_leds()
count += 1
def __update_channel_strip_strings(self):
""" In IO mode, collect all strings that will be visible in the main display manually
"""
if not self.__any_slider_is_touched():
if self.__assignment_mode == CSM_IO:
targets = []
for s in self.__channel_strips:
if self.__routing_target(s):
targets.append(self.__routing_target(s))
else:
targets.append('')
self.__main_display_controller.set_channel_strip_strings(targets)
elif self.__assignment_mode == CSM_PLUGINS and self.__plugin_mode == PCM_DEVICES:
for plugin in self.__displayed_plugins:
if plugin != None:
plugin.remove_name_listener(self.__update_plugin_names)
self.__displayed_plugins = []
sel_track = self.song().view.selected_track
for i in range(len(self.__channel_strips)):
device_index = i + self.__plugin_mode_offsets[PCM_DEVICES]
if device_index >= 0 and device_index < len(sel_track.devices):
sel_track.devices[device_index].add_name_listener(self.__update_plugin_names)
self.__displayed_plugins.append(sel_track.devices[device_index])
else:
self.__displayed_plugins.append(None)
self.__update_plugin_names()
def __update_plugin_names(self):
raise self.__assignment_mode == CSM_PLUGINS and self.__plugin_mode == PCM_DEVICES or AssertionError
device_strings = []
for plugin in self.__displayed_plugins:
if plugin != None:
device_strings.append(plugin.name)
else:
device_strings.append('')
self.__main_display_controller.set_channel_strip_strings(device_strings)
def __update_view_returns_mode(self):
""" Update the control return tracks LED
"""
if self.__view_returns:
self.send_midi((NOTE_ON_STATUS, SID_FADERBANK_EDIT, BUTTON_STATE_ON))
else:
self.send_midi((NOTE_ON_STATUS, SID_FADERBANK_EDIT, BUTTON_STATE_OFF))
self.__main_display_controller.set_show_return_track_names(self.__view_returns)
self.__reassign_channel_strip_offsets()
self.__reassign_channel_strip_parameters(for_display_only=False)
self.request_rebuild_midi_map()
def __on_selected_track_changed(self):
""" Notifier, called as soon as the selected track has changed
"""
st = self.__last_attached_selected_track
if st and st.devices_has_listener(self.__on_selected_device_chain_changed):
st.remove_devices_listener(self.__on_selected_device_chain_changed)
self.__last_attached_selected_track = self.song().view.selected_track
st = self.__last_attached_selected_track
if st:
st.add_devices_listener(self.__on_selected_device_chain_changed)
if self.__assignment_mode == CSM_PLUGINS:
self.__plugin_mode_offsets = [ 0 for x in range(PCM_NUMMODES) ]
if self.__chosen_plugin != None:
self.__chosen_plugin.remove_parameters_listener(self.__on_parameter_list_of_chosen_plugin_changed)
self.__chosen_plugin = None
self.__ordered_plugin_parameters = []
self.__update_assignment_display()
if self.__plugin_mode == PCM_DEVICES:
self.__update_vpot_leds_in_plugins_device_choose_mode()
else:
self.__set_plugin_mode(PCM_DEVICES)
elif self.__assignment_mode == CSM_SENDS:
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_assignment_display()
self.request_rebuild_midi_map()
def __on_flip_changed(self):
""" Update the flip button LED when the flip mode changed
"""
self.__update_flip_led()
if self.__can_flip():
self.__update_assignment_display()
self.__reassign_channel_strip_parameters(for_display_only=False)
self.request_rebuild_midi_map()
def __on_selected_device_chain_changed(self):
if self.__assignment_mode == CSM_PLUGINS:
if self.__plugin_mode == PCM_DEVICES:
self.__update_vpot_leds_in_plugins_device_choose_mode()
self.__update_page_switch_leds()
elif self.__plugin_mode == PCM_PARAMETERS:
if not self.__chosen_plugin:
self.__set_plugin_mode(PCM_DEVICES)
elif self.__chosen_plugin not in self.__last_attached_selected_track.devices:
if self.__chosen_plugin != None:
self.__chosen_plugin.remove_parameters_listener(self.__on_parameter_list_of_chosen_plugin_changed)
self.__chosen_plugin = None
self.__set_plugin_mode(PCM_DEVICES)
def __on_tracks_added_or_deleted(self):
""" Notifier, called as soon as tracks where added, removed or moved
"""
self.__within_track_added_or_deleted = True
for t in chain(self.song().visible_tracks, self.song().return_tracks):
if not t.solo_has_listener(self.__update_rude_solo_led):
t.add_solo_listener(self.__update_rude_solo_led)
if not t.has_audio_output_has_listener(self.__on_any_tracks_output_type_changed):
t.add_has_audio_output_listener(self.__on_any_tracks_output_type_changed)
if self.__send_mode_offset >= len(self.song().return_tracks):
self.__send_mode_offset = 0
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
if self.__strip_offset() + len(self.__channel_strips) >= self.__controlled_num_of_tracks():
self.__set_channel_offset(max(0, self.__controlled_num_of_tracks() - len(self.__channel_strips)))
self.__reassign_channel_strip_parameters(for_display_only=False)
self.__update_channel_strip_strings()
if self.__assignment_mode == CSM_SENDS:
self.__update_page_switch_leds()
self.refresh_state()
self.__main_display_controller.refresh_state()
self.__within_track_added_or_deleted = False
self.request_rebuild_midi_map()
def __on_any_tracks_output_type_changed(self):
""" called as soon as any device chain has changed (devices where
added/removed/swapped...)
"""
self.__reassign_channel_strip_parameters(for_display_only=False)
self.request_rebuild_midi_map()
def __on_parameter_list_of_chosen_plugin_changed(self):
raise self.__chosen_plugin != None or AssertionError
raise self.__plugin_mode == PCM_PARAMETERS or AssertionError
self.__reorder_parameters()
self.__reassign_channel_strip_parameters(for_display_only=False)
self.request_rebuild_midi_map()
def __reorder_parameters(self):
result = []
if self.__chosen_plugin:
if self.__chosen_plugin.class_name in DEVICE_DICT.keys():
device_banks = DEVICE_DICT[self.__chosen_plugin.class_name]
for bank in device_banks:
for param_name in bank:
parameter_name = ''
parameter = get_parameter_by_name(self.__chosen_plugin, param_name)
if parameter:
parameter_name = parameter.name
result.append((parameter, parameter_name))
else:
result = [ (p, p.name) for p in self.__chosen_plugin.parameters[1:] ]
self.__ordered_plugin_parameters = result
| 13,355
| 0
| 459
|
ab774b27049c59a7c1608f2fef3901d45566ef6f
| 81
|
py
|
Python
|
Aula09/ex14.py
|
danicon/Curso_Python
|
18fb4aad975bdc3b96cb5320331dbc7d3c3a459d
|
[
"MIT"
] | null | null | null |
Aula09/ex14.py
|
danicon/Curso_Python
|
18fb4aad975bdc3b96cb5320331dbc7d3c3a459d
|
[
"MIT"
] | null | null | null |
Aula09/ex14.py
|
danicon/Curso_Python
|
18fb4aad975bdc3b96cb5320331dbc7d3c3a459d
|
[
"MIT"
] | null | null | null |
nome = str(input('Digite o seu nome: '))
nom = nome.title()
print('Silva' in nom)
| 27
| 40
| 0.654321
|
nome = str(input('Digite o seu nome: '))
nom = nome.title()
print('Silva' in nom)
| 0
| 0
| 0
|
4c05ba9565f2cbca21d16be983c81c5b28beccd3
| 402
|
py
|
Python
|
09 Sorting/3 insertionSort.py
|
harshrajm/Python-Algo-DS
|
02437fa923b2b6264b29bd7ba84ccbb7feb9c8f0
|
[
"MIT"
] | null | null | null |
09 Sorting/3 insertionSort.py
|
harshrajm/Python-Algo-DS
|
02437fa923b2b6264b29bd7ba84ccbb7feb9c8f0
|
[
"MIT"
] | null | null | null |
09 Sorting/3 insertionSort.py
|
harshrajm/Python-Algo-DS
|
02437fa923b2b6264b29bd7ba84ccbb7feb9c8f0
|
[
"MIT"
] | null | null | null |
print(insertionSort([55,3,2,5,6,75,4]))
| 21.157895
| 61
| 0.487562
|
def insertionSort(arr):
for i in range(1,len(arr)):
currentVal = arr[i]
position = i
while position > 0 and arr[position-1] > currentVal:
arr[position] = arr[position-1]
position = position - 1
arr[position] = currentVal
return arr
print(insertionSort([55,3,2,5,6,75,4]))
| 332
| 0
| 25
|
e1877cfa1bdf9f6a3b42eb79a4b5b0362841a6c8
| 350
|
py
|
Python
|
PyMOTW/source/argparse/argparse_mutually_exclusive.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2019-01-04T05:47:50.000Z
|
2019-01-04T05:47:50.000Z
|
PyMOTW/source/argparse/argparse_mutually_exclusive.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2020-07-18T03:52:03.000Z
|
2020-07-18T04:18:01.000Z
|
PyMOTW/source/argparse/argparse_mutually_exclusive.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 2
|
2021-03-06T04:28:32.000Z
|
2021-03-06T04:59:17.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""
"""
#end_pymotw_header
import argparse
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('-a', action='store_true')
group.add_argument('-b', action='store_true')
print(parser.parse_args())
| 18.421053
| 57
| 0.737143
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""
"""
#end_pymotw_header
import argparse
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('-a', action='store_true')
group.add_argument('-b', action='store_true')
print(parser.parse_args())
| 0
| 0
| 0
|
3fbb83487a465a1a51cc8203d043aefa5f77fea9
| 2,306
|
py
|
Python
|
example/server.py
|
inovonics/cloud-oauth
|
951124100dd45204c3325b975084fa9690d16147
|
[
"MIT"
] | 1
|
2017-10-19T19:23:38.000Z
|
2017-10-19T19:23:38.000Z
|
example/server.py
|
inovonics/cloud-oauth
|
951124100dd45204c3325b975084fa9690d16147
|
[
"MIT"
] | 3
|
2017-12-04T18:11:31.000Z
|
2018-04-10T23:58:59.000Z
|
example/server.py
|
inovonics/cloud-oauth
|
951124100dd45204c3325b975084fa9690d16147
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# === IMPORTS ===
import logging
import os
from flask import Flask
from inovonics.cloud.datastore import InoRedis
from inovonics.cloud.oauth import InoOAuth2Provider, oauth_register_handlers
from inovonics.cloud.oauth import OAuthClients, OAuthClient, OAuthUsers, OAuthUser
# === GLOBALS ===
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', 6379)
REDIS_DB = os.getenv('REDIS_DB', 0)
dstore = InoRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
app = Flask(__name__)
oauth = InoOAuth2Provider(app, dstore)
oauth_register_handlers(app, oauth, token_path='/oauth/token', revoke_path='/oauth/revoke')
# === FUNCTIONS ===
@app.before_first_request
@app.route('/')
@app.route('/protected/')
@oauth.require_oauth('protected')
# === CLASSES ===
# === MAIN ===
if __name__ == '__main__':
main()
| 27.452381
| 114
| 0.669124
|
#!/usr/bin/env python3
# === IMPORTS ===
import logging
import os
from flask import Flask
from inovonics.cloud.datastore import InoRedis
from inovonics.cloud.oauth import InoOAuth2Provider, oauth_register_handlers
from inovonics.cloud.oauth import OAuthClients, OAuthClient, OAuthUsers, OAuthUser
# === GLOBALS ===
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', 6379)
REDIS_DB = os.getenv('REDIS_DB', 0)
dstore = InoRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
app = Flask(__name__)
oauth = InoOAuth2Provider(app, dstore)
oauth_register_handlers(app, oauth, token_path='/oauth/token', revoke_path='/oauth/revoke')
# === FUNCTIONS ===
@app.before_first_request
def db_init():
# This flushes the Redis database and pushed a default user and a couple of default clients into the database.
dstore.redis.flushdb()
users = OAuthUsers(dstore)
clients = OAuthClients(dstore)
user1_data = {
'username': 'admin@example.com',
'first_name': 'Admin',
'last_name': 'Testuser',
'is_active': True,
'scopes': ['protected']
}
user1 = OAuthUser(user1_data)
user1.update_password('<insert_password_here>')
users.create(user1)
client1_data = {
'name': 'Test Client One',
'client_id': '<insert_client_id_here>', # API_KEY
'client_secret': '', # API_SECRET
'user': 'admin@example.com',
'is_confidential': False,
'allowed_grant_types': ['password'],
'redirect_uris': [],
'default_scopes': ['protected'],
'allowed_scopes': ['protected']
}
client1 = OAuthClient(client1_data)
clients.create(client1)
@app.route('/')
def static_root():
return app.send_static_file('index.html')
@app.route('/protected/')
@oauth.require_oauth('protected')
def protected_page():
return app.send_static_file('protected.html')
# === CLASSES ===
# === MAIN ===
def main():
# Allow non-TLS protected requests for testing
os.environ['DEBUG'] = 'true'
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true'
# Enable DEBUG logging
logging.basicConfig(level=logging.DEBUG)
# Make the magic happen
app.run(debug=True, host='0.0.0.0', port=8080)
if __name__ == '__main__':
main()
| 1,349
| 0
| 88
|
efbb55992e67f2574dbed2a58dbe02993a8fd9cc
| 8,122
|
py
|
Python
|
social_exclusion/tests.py
|
AWI-Lab/otree
|
6815c38a92cd93540f6e9c7ebdaecf90627ca865
|
[
"MIT"
] | 3
|
2017-03-10T18:56:57.000Z
|
2019-03-07T18:18:37.000Z
|
social_exclusion/tests.py
|
AWI-Lab/otree
|
6815c38a92cd93540f6e9c7ebdaecf90627ca865
|
[
"MIT"
] | null | null | null |
social_exclusion/tests.py
|
AWI-Lab/otree
|
6815c38a92cd93540f6e9c7ebdaecf90627ca865
|
[
"MIT"
] | null | null | null |
from otree.api import Currency as c, currency_range
from . import views
from ._builtin import Bot
from .models import Constants
from otree.api import SubmissionMustFail
| 44.626374
| 123
| 0.573381
|
from otree.api import Currency as c, currency_range
from . import views
from ._builtin import Bot
from .models import Constants
from otree.api import SubmissionMustFail
class PlayerBot(Bot):
# in allplay_1 all players invite
# in allplay_2 some not invite but no majority
# in notallplay there is a majority
cases = ['allplay_1', 'allplay_2', 'notallplay']
#the voting of a player is a complex behavior, because each player only can vote for the other players, but not himself
#therefore the input a player makes, depends on his player label
#function returns a dic with the correct voting behavior of a player
#all players invite all players
def set_voting_behavior(self, in_ex):
votealldic = {in_ex+'_A':True, in_ex+'_B':True, in_ex+'_C':True, in_ex+'_D':True, in_ex+'_E':True}
label = self.player.label[-1]
icanvotefor = {}
for poss in votealldic.keys():
if label not in poss:
icanvotefor[poss] = votealldic[poss]
print('Bot ' + self.player.label + ' will make vote decisions: ' + str(icanvotefor))
return icanvotefor
#note: votes here can be used as invitations or as exclusions
def complex_voting_behavior(self,in_ex, majority):
if majority == 'nomajority':
return {'Player A': {in_ex+'_B':False, in_ex+'_C':True, in_ex+'_D':True, in_ex+'_E':True},
'Player B': {in_ex+'_A':False, in_ex+'_C':True, in_ex+'_D':True, in_ex+'_E':True},
'Player C': {in_ex+'_A':True, in_ex+'_B':True, in_ex+'_D':True, in_ex+'_E':True},
'Player D': {in_ex+'_A':False, in_ex+'_B':True, in_ex+'_C':True, in_ex+'_E':True},
'Player E': {in_ex+'_A':True, in_ex+'_B':False, in_ex+'_C':True, in_ex+'_D':True},}
elif majority == 'yesmajority':
return {'Player A': {in_ex+'_B':True, in_ex+'_C':True, in_ex+'_D':False, in_ex+'_E':True },
'Player B': {in_ex+'_A':True, in_ex+'_C':True, in_ex+'_D':False, in_ex+'_E':True },
'Player C': {in_ex+'_A':True, in_ex+'_B':True, in_ex+'_D':False, in_ex+'_E':True },
'Player D': {in_ex+'_A':True, in_ex+'_B':True, in_ex+'_C':True, in_ex+'_E':True },
'Player E': {in_ex+'_A':True, in_ex+'_B':True, in_ex+'_C':True, in_ex+'_D':False },}
else:
raise Exception ('wrong input of function complex voting behavior')
def play_round(self):
if self.player.treatment == 'inclusion':
yield (views.Instructions)
for wrong_input in [-1,-0.01, 'hello', '!', 101, 55.5, '']:
yield SubmissionMustFail(views.FirstContribution, {'cont_first': wrong_input})
yield (views.FirstContribution, {'cont_first':100})
assert self.group.total_cont_first == 500
assert self.group.indiv_share_first == 200
assert self.player.payoff == 200
yield (views.FirstResults)
#all players invite all players
if self.case == 'allplay_1':
print('Ive been here: allplay 1')
yield (views.Vote, self.set_voting_behavior('invite'))
assert self.group.all_play == 'True'
assert self.player.plays_secondpg == True
#player A and B only get 2 invitations, others get all, no majority
elif self.case == 'allplay_2':
print('Ive been here: allplay 2')
yield (views.Vote, self.complex_voting_behavior('invite','nomajority')[self.player.label])
if self.player.label == 'Player A' or self.player.label == 'Player B':
assert self.player.myvotes_inclusion == 2
else:
assert self.player.myvotes_inclusion == 4
assert self.group.all_play == 'True'
#player D gets 0 invitations
elif self.case == 'notallplay':
print('Ive been here: notallplay')
yield (views.Vote, self.complex_voting_behavior('invite','yesmajority')[self.player.label])
if self.player.label == 'Player D':
assert self.player.myvotes_inclusion == 0
else:
assert self.player.myvotes_inclusion == 4
assert self.group.all_play=='False'
else:
print('titanic sinks in 1 sec...')
yield (views.VoteResults)
if self.player.plays_secondpg == False:
yield (views.InsteadOfSecondContribution)
elif self.player.plays_secondpg == True:
for wrong_input in [-1,-0.01, 'hello', '!', 101, 55.5, '']:
yield SubmissionMustFail(views.SecondContribution, {'cont_second': wrong_input})
yield (views.SecondContribution, {'cont_second': 50})
if self.group.all_play == 'True':
assert self.group.total_cont_second == 250
#if only 4 players contribute 50
else:
assert self.group.total_cont_second == 200
yield (views.SecondResults)
if self.round_number == Constants.num_rounds:
yield (views.LastPage)
elif self.player.treatment == 'exclusion':
yield (views.Instructions)
for wrong_input in [-1, -0.01, 'hello', '!', 101, 55.5, '']:
yield SubmissionMustFail(views.FirstContribution, {'cont_first': wrong_input})
yield (views.FirstContribution, {'cont_first': 100})
assert self.group.total_cont_first == 500
assert self.group.indiv_share_first == 200
assert self.player.payoff == 200
yield (views.FirstResults)
# all players invite all players
if self.case == 'allplay_1':
print('Ive been here: allplay 1')
yield (views.Vote, self.set_voting_behavior('exclude'))
assert self.group.all_play == 'True'
assert self.player.plays_secondpg == True
# player A and B only get 2 exclusions, others get non exclusions, no majority
elif self.case == 'allplay_2':
print('Ive been here: allplay 2')
yield (views.Vote, self.complex_voting_behavior('exclude','nomajority')[self.player.label])
if self.player.label == 'Player A' or self.player.label == 'Player B':
assert self.player.myvotes_exclusion == 2
else:
assert self.player.myvotes_exclusion == 0
assert self.group.all_play == 'True'
# player D gets 4 exclusions, other get non exclusio
elif self.case == 'notallplay':
print('Ive been here: notallplay')
yield (views.Vote, self.complex_voting_behavior('exclude','yesmajority')[self.player.label])
if self.player.label == 'Player D':
assert self.player.myvotes_exclusion == 4
else:
assert self.player.myvotes_exclusion == 0
assert self.group.all_play == 'False'
else:
print('titanic sinks in 1 sec...')
yield (views.VoteResults)
if self.player.plays_secondpg == False:
yield (views.InsteadOfSecondContribution)
elif self.player.plays_secondpg == True:
for wrong_input in [-1, -0.01, 'hello', '!', 101, 55.5, '']:
yield SubmissionMustFail(views.SecondContribution, {'cont_second': wrong_input})
yield (views.SecondContribution, {'cont_second': 50})
if self.group.all_play == 'True':
assert self.group.total_cont_second == 250
# if only 4 players contribute 50
else:
assert self.group.total_cont_second == 200
yield (views.SecondResults)
if self.round_number == Constants.num_rounds:
yield (views.LastPage)
| 7,290
| 635
| 23
|
6586336720ee9e57187421d19fc6336e7ecc0c4a
| 707
|
py
|
Python
|
tests/test_sftp.py
|
somenzz/transferfile
|
0b718ee43a5ea4e728e23bf2bf7d92ca0f2aa413
|
[
"MIT"
] | 1
|
2021-06-08T06:20:45.000Z
|
2021-06-08T06:20:45.000Z
|
tests/test_sftp.py
|
somenzz/transferfile
|
0b718ee43a5ea4e728e23bf2bf7d92ca0f2aa413
|
[
"MIT"
] | null | null | null |
tests/test_sftp.py
|
somenzz/transferfile
|
0b718ee43a5ea4e728e23bf2bf7d92ca0f2aa413
|
[
"MIT"
] | null | null | null |
from transferfile import TransferFactory
from pathlib import Path
| 33.666667
| 88
| 0.62942
|
from transferfile import TransferFactory
from pathlib import Path
class TestTransfer:
def test_sftp_put(self):
sftp = TransferFactory.create(
type="sftp", host="172.17.0.2", username="admin", load_system_host_keys=True
)
sftp.put("testsftp.txt", "/home/admin/testsftp.txt")
assert True
def test_sftp_get(self):
sftp = TransferFactory.create(
type="sftp", host="172.17.0.2", username="admin", load_system_host_keys=True
)
sftp.get("./file/testsftp", "/home/admin/testsftp.txt")
file1 = Path("./testsftp.txt")
file2 = Path("./file/testsftp")
assert file1.stat().st_size == file2.stat().st_size
| 566
| -2
| 76
|
bdf6fc45c72e4cc335707fd6de0a5dd2d2ecb997
| 1,256
|
py
|
Python
|
sandbox/server.py
|
tacosync/quote-socket
|
e65b79968663d9d5a0d991166e791e0ee1029a91
|
[
"MIT"
] | null | null | null |
sandbox/server.py
|
tacosync/quote-socket
|
e65b79968663d9d5a0d991166e791e0ee1029a91
|
[
"MIT"
] | 1
|
2020-06-17T08:55:54.000Z
|
2020-06-17T08:55:54.000Z
|
sandbox/server.py
|
tacosync/quote-socket
|
e65b79968663d9d5a0d991166e791e0ee1029a91
|
[
"MIT"
] | null | null | null |
'''
Socket.IO server for testing
CLI:
python -m watchgod server.main [aiohttp|sanic|tornado|asgi]
Test results:
| connect | disconnect | event | background_task | Ctrl+C
---------+---------+------------+-------+-----------------|--------
aiohttp | O | O | O | X | O
sanic | O | O | O | X | O
tornado | O | O | O | O | O
asgi | O | O | O | ! | O
'''
import sys
from servers.aiohttp_server import main as aiohttp_main
from servers.asgi_server import main as asgi_main
from servers.sanic_server import main as sanic_main
from servers.tornado_server import main as tornado_main
if __name__ == '__main__':
main()
| 27.911111
| 70
| 0.494427
|
'''
Socket.IO server for testing
CLI:
python -m watchgod server.main [aiohttp|sanic|tornado|asgi]
Test results:
| connect | disconnect | event | background_task | Ctrl+C
---------+---------+------------+-------+-----------------|--------
aiohttp | O | O | O | X | O
sanic | O | O | O | X | O
tornado | O | O | O | O | O
asgi | O | O | O | ! | O
'''
import sys
from servers.aiohttp_server import main as aiohttp_main
from servers.asgi_server import main as asgi_main
from servers.sanic_server import main as sanic_main
from servers.tornado_server import main as tornado_main
def main():
if len(sys.argv) > 1:
framework = sys.argv[1]
else:
framework = 'tornado'
boot_options = {
'aiohttp': aiohttp_main,
'asgi': asgi_main,
'sanic': sanic_main,
'tornado': tornado_main
}
if framework in boot_options:
bootstrap = boot_options[framework]
bootstrap()
else:
print('Unknown framework "%s".' % framework)
if __name__ == '__main__':
main()
| 416
| 0
| 25
|
5d135caa4532bcc7d1789591afc459bd38d60010
| 323
|
py
|
Python
|
1/video2frames.py
|
AnushK-Fro/ASCII-Video-Discord-Bot
|
c18e2278075e254f91492abb844713b7e5367542
|
[
"MIT"
] | null | null | null |
1/video2frames.py
|
AnushK-Fro/ASCII-Video-Discord-Bot
|
c18e2278075e254f91492abb844713b7e5367542
|
[
"MIT"
] | null | null | null |
1/video2frames.py
|
AnushK-Fro/ASCII-Video-Discord-Bot
|
c18e2278075e254f91492abb844713b7e5367542
|
[
"MIT"
] | null | null | null |
import cv2
video_file_name = ""
if __name__ == '__main__':
FrameCapture(video_file_name)
| 20.1875
| 49
| 0.606811
|
import cv2
video_file_name = ""
def FrameCapture(path):
vidObj = cv2.VideoCapture(path)
count = 0
success = 1
while success:
success, image = vidObj.read()
cv2.imwrite("frame%d.jpg" % count, image)
count += 1
if __name__ == '__main__':
FrameCapture(video_file_name)
| 199
| 0
| 25
|
8a1fa51fea4d1f0796cf567794820f1f43be633e
| 14,907
|
py
|
Python
|
vpv/model/model.py
|
mpi2/vpv
|
3d0445fa36a8916dfbeed07f50e702b1ffe0973a
|
[
"Apache-2.0"
] | 2
|
2016-08-15T22:06:28.000Z
|
2016-11-15T14:23:37.000Z
|
vpv/model/model.py
|
mpi2/vpv
|
3d0445fa36a8916dfbeed07f50e702b1ffe0973a
|
[
"Apache-2.0"
] | 1
|
2018-07-30T22:47:37.000Z
|
2018-07-30T22:47:37.000Z
|
vpv/model/model.py
|
mpi2/vpv
|
3d0445fa36a8916dfbeed07f50e702b1ffe0973a
|
[
"Apache-2.0"
] | 1
|
2020-08-05T03:54:02.000Z
|
2020-08-05T03:54:02.000Z
|
# Copyright 2016 Medical Research Council Harwell.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @author Neil Horner <n.horner@har.mrc.ac.uk>
"""
TODO: don't duplicate the full array for each _get_* function
"""
import numpy as np
import os
import tempfile
from PIL import Image
from PyQt5 import QtCore
from vpv.common import read_image, get_stage_and_modality, error_dialog
from vpv.annotations.impc_xml import load_xml, get_annotator_id_and_date
from vpv.annotations.annotations_model import centre_stage_options, PROCEDURE_METADATA, ANNOTATION_DONE_METADATA_FILE
from .ImageVolume import ImageVolume
from .HeatmapVolume import HeatmapVolume
from .VectorVolume import VectorVolume
from .ImageSeriesVolume import ImageSeriesVolume
from .VirtualStackVolume import VirtualStackVolume
import yaml
class DataModel(QtCore.QObject):
"""
The model for our app
"""
data_changed_signal = QtCore.pyqtSignal()
updating_started_signal = QtCore.pyqtSignal()
updating_msg_signal = QtCore.pyqtSignal(str)
updating_finished_signal = QtCore.pyqtSignal()
def update_msg_slot(self, msg):
"""
Gets update messages from the different volume classes which are then propagated to the main window to display
a progress message
Parameters
----------
msg: str
progress message
"""
self.update_msg_signal.emit(msg)
def load_annotation(self, ann_path):
"""
Load annotations from an IMPC xml file.
Parameters
----------
ann_path: str
path to xml annotation file
Returns
-------
"""
# Load in data from xml
try:
centerID, pipeline, project, doe, ex_id, spec_id, proc_id, \
simple_and_series_params, procedure_metadata = load_xml(ann_path)
except IOError as e:
print("Cannot read xml file {}\n".format(ann_path, e))
error_dialog(None, 'File read error', "Problem reading annotaitons file\n{}".format(ann_path))
return
# try to find a corresponding procedure_metadata.yaml file
ann_dir = os.path.split(ann_path)[0]
procedure_metadata_file = os.path.join(ann_dir, PROCEDURE_METADATA)
if not os.path.isfile(procedure_metadata_file):
vol = None # Should also check if annotation options have been loaded
else:
vol_id = os.path.basename(ann_dir) # The annotation directory is the same name as the annotated volume
vol = self._volumes.get(vol_id)
if not vol:
return "Could not load annotation: {}. Not able to find loaded volume with same id".format(vol_id)
vol.annotations.clear()
# Get the dict that contains the available options for a given center/stage
annotation_date_param_id = get_annotator_id_and_date(proc_id)[1]
ann_date = [x[1] for x in procedure_metadata if x[0] == annotation_date_param_id]
ann_date = ann_date[0]
vol.annotations.annotation_date = ann_date
default_opts = centre_stage_options.opts
stage = get_stage_and_modality(proc_id, centerID)
######################################
# This all needs moving into Annotations
# Set the xml file path which is where it will get resaved to
vol.annotations.saved_xml_fname = ann_path
# Get all the simpleParameter entries form the xml file
for xml_param, xml_data in simple_and_series_params.items():
option = xml_data['option']
xyz = xml_data.get('xyz')
if xyz:
x, y, z = [int(i) for i in xyz]
else:
x = y = z = None
dims = vol.shape_xyz()
# Some of the data needed to create an annotation object is not recorded in the XML output
# So we need to load that from the center annotation options file
for center, default_data in default_opts['centers'].items():
if default_data['short_name'] == centerID:
params = default_data['procedures'][proc_id]['parameters']
for param_id, default_param_info in params.items():
if param_id == xml_param:
name = default_param_info['name']
options = default_opts['available_options'][
default_param_info['options']] # available options for this parameter
order = default_param_info['order']
is_mandatory = default_param_info['mandatory']
vol.annotations.add_impc_annotation(x, y, z, xml_param, name, options, option,
stage,
order, is_mandatory, dims)
vol.annotations._load_done_status()
# def load_annotation(self, ann_path):
# """
# Load annotations from an IMPC xml file.
#
# Parameters
# ----------
# ann_path: str
# path to xml annotation file
#
# Returns
# -------
#
# """
# # Load in data from xml
# centerID, pipeline, project, doe, ex_id, spec_id, proc_id, \
# simple_and_series_params, procedure_metadata = load_xml(ann_path)
#
# # try to find a corresponding procedure_metadata.yaml file
# ann_dir = os.path.split(ann_path)[0]
# procedure_metadata_file = os.path.join(ann_dir, PROCEDURE_METADATA)
# if not os.path.isfile(procedure_metadata_file):
# vol = None # Should also check if annotation options have been loaded
# else:
# vol_id = os.path.basename(ann_dir) # The annotation directory is the same name as the annotated volume
# vol = self._volumes.get(vol_id)
#
# if vol:
# # Get the dict that contains the available options for a given center/stage
# default_opts = centre_stage_options.opts
# stage = get_stage_from_proc_id(proc_id, centerID)
#
# # Get all the simpleParameter entries form the xml file
# for xml_param, xml_data in simple_and_series_params.items():
# option = xml_data['option']
# xyz = xml_data.get('xyz')
# if xyz:
# x, y, z = [int(i) for i in xyz]
# else:
# x = y = z = None
# dims = vol.shape_xyz()
#
# # Some of the data neded to crate an annotation object is not recorded in the XML output
# # So we need to load that from the center annotation options file
# for center, default_data in default_opts['centers'].items():
# if default_data['short_name'] == centerID:
# params = default_data['stages'][stage]['parameters']
#
# for param_id, default_param_info in params.items():
# if param_id == xml_param:
# name = default_param_info['name']
# options = default_opts['available_options'][default_param_info['options']]# available options for this parameter
# order = default_param_info['options']
# is_mandatory = default_param_info['mandatory']
#
# vol.annotations.add_impc_annotation(x, y, z, xml_param, name, options, option, stage,
# order, is_mandatory, dims)
#
# else:
# return "Could not load annotation: {}. Not able to find loaded volume with same id".format(vol_id)
# return None
def add_volume(self, volpath, data_type, memory_map, fdr_thresholds=False) -> str:
"""
Load a volume into a subclass of a Volume object
Parameters
----------
volpath: str
data_type: str
memory_map: bool
fdr_thresholds: fdict
q -> t statistic mappings
{0.01: 3.4,
0.05:, 3.1}
Returns
-------
unique id of loaded image
"""
if data_type != 'virtual_stack':
volpath = str(volpath)
n = os.path.basename(volpath)
unique_name = self.create_unique_name(n)
else:
n = os.path.basename(os.path.split(volpath[0])[0])
unique_name = self.create_unique_name(n)
if data_type == 'heatmap':
vol = HeatmapVolume(volpath, self, 'heatmap')
if fdr_thresholds or fdr_thresholds is None:
vol.fdr_thresholds = fdr_thresholds
vol.name = unique_name
self._data[vol.name] = vol
elif data_type == 'vol':
vol = ImageVolume(volpath, self, 'volume', memory_map)
vol.name = unique_name
self._volumes[vol.name] = vol
elif data_type == 'virtual_stack':
vol = VirtualStackVolume(volpath, self, 'virtual_stack', memory_map)
vol.name = unique_name
self._volumes[vol.name] = vol
elif data_type == 'vector':
vol = VectorVolume(volpath, self, 'vector')
vol.name = unique_name
self._vectors[vol.name] = vol
self.id_counter += 1
self.data_changed_signal.emit()
return unique_name
def create_unique_name(self, name):
"""
Create a unique name for each volume. If it already exists, append a digit in a bracket to it
:param name:
:return:
"""
name = os.path.splitext(name)[0]
if name not in self._volumes and name not in self._data and name not in self._vectors:
return name
else:
for i in range(1, 100):
new_name = '{}({})'.format(name, i)
if new_name not in self._volumes and new_name not in self._data:
return new_name
def write_temporary_annotations_metadata(self):
"""
Returns
-------
"""
from os.path import join
for id_, vol in self._volumes.items():
if vol.annotations.annotation_dir:
# Check for previous done list
done_file = join(vol.annotations.annotation_dir, ANNOTATION_DONE_METADATA_FILE)
done_status = {}
for ann in vol.annotations:
done_status[ann.term] = ann.looked_at
with open(done_file, 'w') as fh:
fh.write(yaml.dump(done_status))
| 37.08209
| 146
| 0.581673
|
# Copyright 2016 Medical Research Council Harwell.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @author Neil Horner <n.horner@har.mrc.ac.uk>
"""
TODO: don't duplicate the full array for each _get_* function
"""
import numpy as np
import os
import tempfile
from PIL import Image
from PyQt5 import QtCore
from vpv.common import read_image, get_stage_and_modality, error_dialog
from vpv.annotations.impc_xml import load_xml, get_annotator_id_and_date
from vpv.annotations.annotations_model import centre_stage_options, PROCEDURE_METADATA, ANNOTATION_DONE_METADATA_FILE
from .ImageVolume import ImageVolume
from .HeatmapVolume import HeatmapVolume
from .VectorVolume import VectorVolume
from .ImageSeriesVolume import ImageSeriesVolume
from .VirtualStackVolume import VirtualStackVolume
import yaml
class LoadVirtualStackWorker(QtCore.QThread):
progress_signal = QtCore.pyqtSignal([str])
def __init__(self, file_paths):
QtCore.QThread.__init__(self)
self.file_paths = file_paths
self.memmap_result = None # Populated at end of run()
def sitk_load(self, p):
read_image(p, convert_to_ras=True)
def pil_load(self, p):
im = Image.open(p)
return np.array(im)
def run(self):
size = len(self.file_paths)
# SimpleITK reads in 2D bmps as 3D. So use PIL instead
if self.file_paths[0].lower().endswith('.bmp'):
reader = self.pil_load
else:
reader = self.sitk_load
arr = reader(self.file_paths[0])
dtype = arr.dtype
zyx = list(arr.shape)
zyx.insert(0, len(self.file_paths))
t = tempfile.TemporaryFile()
m = np.memmap(t, dtype=dtype, mode='w+', shape=tuple(zyx))
for i, path in enumerate(sorted(self.file_paths)):
img_arr = reader(path)
m[i] = img_arr
self.progress_signal.emit("Loading virtual stack.. {}%".format(str(100.0/size * i)))
self.memmap_result = m
class DataModel(QtCore.QObject):
"""
The model for our app
"""
data_changed_signal = QtCore.pyqtSignal()
updating_started_signal = QtCore.pyqtSignal()
updating_msg_signal = QtCore.pyqtSignal(str)
updating_finished_signal = QtCore.pyqtSignal()
def update_msg_slot(self, msg):
"""
Gets update messages from the different volume classes which are then propagated to the main window to display
a progress message
Parameters
----------
msg: str
progress message
"""
self.update_msg_signal.emit(msg)
def __init__(self):
super(DataModel, self).__init__()
self.id_counter = 0
self._volumes = {}
self._data = {}
self._vectors = {}
def change_vol_name(self, old_name, new_name):
# Only work on image volumes for now
if self._volumes.get(old_name):
# Change the dictionary key entry
self._volumes[new_name] = self._volumes.pop(old_name)
# Change the id on the object
self._volumes[new_name].name = new_name
def set_interpolation(self, onoff):
for vol in self._volumes.values():
vol.set_interpolation(onoff)
def clear_data(self):
keys = list(self._volumes.keys())
for k in keys:
del self._volumes[k]
self._volumes = {}
self._data = {}
def volume_id_list(self, sort=True):
if sort: # Not sure if we need this
return sorted([id_ for id_ in self._volumes])
else:
return [id_ for id_ in self._volumes]
def data_id_list(self):
return sorted([id_ for id_ in self._data])
def vector_id_list(self):
return sorted([id_ for id_ in self._vectors])
for key in self._data.keys():
self._data[key].destroy()
def all_volumes(self):
return [vol for vol in self._volumes.values()]
def getvol(self, id_):
# bodge. should merge vols and data, as they have unique ids
vol = None
if id_ == 'None':
return 'None'
try:
vol = self._volumes[id_]
except KeyError:
pass
if not vol:
try:
vol = self._data[id_]
except KeyError:
pass
if not vol:
try:
vol = self._vectors[id_]
except KeyError:
return "None" # Need to do something else here, like logging
return vol
def getdata(self, id_):
if id_ == 'None':
return 'None'
return self._data[id_]
def load_image_series(self, series_paths, memory_map):
volpath = str(series_paths[0])
n = os.path.basename(volpath)
unique_name = self.create_unique_name(n)
vol = ImageSeriesVolume(series_paths, self, 'series', memory_map)
vol.name = unique_name
self._volumes[vol.name] = vol
self.id_counter += 1
def load_annotation(self, ann_path):
"""
Load annotations from an IMPC xml file.
Parameters
----------
ann_path: str
path to xml annotation file
Returns
-------
"""
# Load in data from xml
try:
centerID, pipeline, project, doe, ex_id, spec_id, proc_id, \
simple_and_series_params, procedure_metadata = load_xml(ann_path)
except IOError as e:
print("Cannot read xml file {}\n".format(ann_path, e))
error_dialog(None, 'File read error', "Problem reading annotaitons file\n{}".format(ann_path))
return
# try to find a corresponding procedure_metadata.yaml file
ann_dir = os.path.split(ann_path)[0]
procedure_metadata_file = os.path.join(ann_dir, PROCEDURE_METADATA)
if not os.path.isfile(procedure_metadata_file):
vol = None # Should also check if annotation options have been loaded
else:
vol_id = os.path.basename(ann_dir) # The annotation directory is the same name as the annotated volume
vol = self._volumes.get(vol_id)
if not vol:
return "Could not load annotation: {}. Not able to find loaded volume with same id".format(vol_id)
vol.annotations.clear()
# Get the dict that contains the available options for a given center/stage
annotation_date_param_id = get_annotator_id_and_date(proc_id)[1]
ann_date = [x[1] for x in procedure_metadata if x[0] == annotation_date_param_id]
ann_date = ann_date[0]
vol.annotations.annotation_date = ann_date
default_opts = centre_stage_options.opts
stage = get_stage_and_modality(proc_id, centerID)
######################################
# This all needs moving into Annotations
# Set the xml file path which is where it will get resaved to
vol.annotations.saved_xml_fname = ann_path
# Get all the simpleParameter entries form the xml file
for xml_param, xml_data in simple_and_series_params.items():
option = xml_data['option']
xyz = xml_data.get('xyz')
if xyz:
x, y, z = [int(i) for i in xyz]
else:
x = y = z = None
dims = vol.shape_xyz()
# Some of the data needed to create an annotation object is not recorded in the XML output
# So we need to load that from the center annotation options file
for center, default_data in default_opts['centers'].items():
if default_data['short_name'] == centerID:
params = default_data['procedures'][proc_id]['parameters']
for param_id, default_param_info in params.items():
if param_id == xml_param:
name = default_param_info['name']
options = default_opts['available_options'][
default_param_info['options']] # available options for this parameter
order = default_param_info['order']
is_mandatory = default_param_info['mandatory']
vol.annotations.add_impc_annotation(x, y, z, xml_param, name, options, option,
stage,
order, is_mandatory, dims)
vol.annotations._load_done_status()
# def load_annotation(self, ann_path):
# """
# Load annotations from an IMPC xml file.
#
# Parameters
# ----------
# ann_path: str
# path to xml annotation file
#
# Returns
# -------
#
# """
# # Load in data from xml
# centerID, pipeline, project, doe, ex_id, spec_id, proc_id, \
# simple_and_series_params, procedure_metadata = load_xml(ann_path)
#
# # try to find a corresponding procedure_metadata.yaml file
# ann_dir = os.path.split(ann_path)[0]
# procedure_metadata_file = os.path.join(ann_dir, PROCEDURE_METADATA)
# if not os.path.isfile(procedure_metadata_file):
# vol = None # Should also check if annotation options have been loaded
# else:
# vol_id = os.path.basename(ann_dir) # The annotation directory is the same name as the annotated volume
# vol = self._volumes.get(vol_id)
#
# if vol:
# # Get the dict that contains the available options for a given center/stage
# default_opts = centre_stage_options.opts
# stage = get_stage_from_proc_id(proc_id, centerID)
#
# # Get all the simpleParameter entries form the xml file
# for xml_param, xml_data in simple_and_series_params.items():
# option = xml_data['option']
# xyz = xml_data.get('xyz')
# if xyz:
# x, y, z = [int(i) for i in xyz]
# else:
# x = y = z = None
# dims = vol.shape_xyz()
#
# # Some of the data neded to crate an annotation object is not recorded in the XML output
# # So we need to load that from the center annotation options file
# for center, default_data in default_opts['centers'].items():
# if default_data['short_name'] == centerID:
# params = default_data['stages'][stage]['parameters']
#
# for param_id, default_param_info in params.items():
# if param_id == xml_param:
# name = default_param_info['name']
# options = default_opts['available_options'][default_param_info['options']]# available options for this parameter
# order = default_param_info['options']
# is_mandatory = default_param_info['mandatory']
#
# vol.annotations.add_impc_annotation(x, y, z, xml_param, name, options, option, stage,
# order, is_mandatory, dims)
#
# else:
# return "Could not load annotation: {}. Not able to find loaded volume with same id".format(vol_id)
# return None
def add_volume(self, volpath, data_type, memory_map, fdr_thresholds=False) -> str:
"""
Load a volume into a subclass of a Volume object
Parameters
----------
volpath: str
data_type: str
memory_map: bool
fdr_thresholds: fdict
q -> t statistic mappings
{0.01: 3.4,
0.05:, 3.1}
Returns
-------
unique id of loaded image
"""
if data_type != 'virtual_stack':
volpath = str(volpath)
n = os.path.basename(volpath)
unique_name = self.create_unique_name(n)
else:
n = os.path.basename(os.path.split(volpath[0])[0])
unique_name = self.create_unique_name(n)
if data_type == 'heatmap':
vol = HeatmapVolume(volpath, self, 'heatmap')
if fdr_thresholds or fdr_thresholds is None:
vol.fdr_thresholds = fdr_thresholds
vol.name = unique_name
self._data[vol.name] = vol
elif data_type == 'vol':
vol = ImageVolume(volpath, self, 'volume', memory_map)
vol.name = unique_name
self._volumes[vol.name] = vol
elif data_type == 'virtual_stack':
vol = VirtualStackVolume(volpath, self, 'virtual_stack', memory_map)
vol.name = unique_name
self._volumes[vol.name] = vol
elif data_type == 'vector':
vol = VectorVolume(volpath, self, 'vector')
vol.name = unique_name
self._vectors[vol.name] = vol
self.id_counter += 1
self.data_changed_signal.emit()
return unique_name
def create_unique_name(self, name):
"""
Create a unique name for each volume. If it already exists, append a digit in a bracket to it
:param name:
:return:
"""
name = os.path.splitext(name)[0]
if name not in self._volumes and name not in self._data and name not in self._vectors:
return name
else:
for i in range(1, 100):
new_name = '{}({})'.format(name, i)
if new_name not in self._volumes and new_name not in self._data:
return new_name
def write_temporary_annotations_metadata(self):
"""
Returns
-------
"""
from os.path import join
for id_, vol in self._volumes.items():
if vol.annotations.annotation_dir:
# Check for previous done list
done_file = join(vol.annotations.annotation_dir, ANNOTATION_DONE_METADATA_FILE)
done_status = {}
for ann in vol.annotations:
done_status[ann.term] = ann.looked_at
with open(done_file, 'w') as fh:
fh.write(yaml.dump(done_status))
| 3,090
| 179
| 320
|
7f31bb9213a5312eb7c272816e3ae00023c5e887
| 16,836
|
py
|
Python
|
python2.7/site-packages/twisted/trial/test/test_reporter.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 4
|
2020-10-31T19:52:05.000Z
|
2021-09-22T11:39:27.000Z
|
python2.7/site-packages/twisted/trial/test/test_reporter.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | null | null | null |
python2.7/site-packages/twisted/trial/test/test_reporter.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 2
|
2020-02-27T08:28:35.000Z
|
2020-09-13T12:39:26.000Z
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: Jonathan Lange <jml@twistedmatrix.com>
import errno, sys, os, re, StringIO
from twisted.internet.utils import suppressWarnings
from twisted.python import failure
from twisted.trial import unittest, runner, reporter, util
from twisted.trial.test import erroneous
class BrokenStream(object):
"""
Stream-ish object that raises a signal interrupt error. We use this to make
sure that Trial still manages to write what it needs to write.
"""
written = False
flushed = False
class MockColorizer:
"""
Used by TestTreeReporter to make sure that output is colored correctly.
"""
supported = classmethod(supported)
| 35.897655
| 79
| 0.622238
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: Jonathan Lange <jml@twistedmatrix.com>
import errno, sys, os, re, StringIO
from twisted.internet.utils import suppressWarnings
from twisted.python import failure
from twisted.trial import unittest, runner, reporter, util
from twisted.trial.test import erroneous
class BrokenStream(object):
"""
Stream-ish object that raises a signal interrupt error. We use this to make
sure that Trial still manages to write what it needs to write.
"""
written = False
flushed = False
def __init__(self, fObj):
self.fObj = fObj
def write(self, s):
if self.written:
return self.fObj.write(s)
self.written = True
raise IOError(errno.EINTR, "Interrupted write")
def flush(self):
if self.flushed:
return self.fObj.flush()
self.flushed = True
raise IOError(errno.EINTR, "Interrupted flush")
class StringTest(unittest.TestCase):
def stringComparison(self, expect, output):
output = filter(None, output)
self.failUnless(len(expect) <= len(output),
"Must have more observed than expected"
"lines %d < %d" % (len(output), len(expect)))
REGEX_PATTERN_TYPE = type(re.compile(''))
for exp, out in zip(expect, output):
if exp is None:
continue
elif isinstance(exp, str):
self.assertSubstring(exp, out)
elif isinstance(exp, REGEX_PATTERN_TYPE):
self.failUnless(exp.match(out), "%r did not match string %r"
% (exp.pattern, out))
else:
raise TypeError("don't know what to do with object %r"
% (exp,))
class TestTestResult(unittest.TestCase):
def setUp(self):
self.result = reporter.TestResult()
def test_pyunitAddError(self):
# pyunit passes an exc_info tuple directly to addError
try:
raise RuntimeError('foo')
except RuntimeError, excValue:
self.result.addError(self, sys.exc_info())
failure = self.result.errors[0][1]
self.assertEqual(excValue, failure.value)
self.assertEqual(RuntimeError, failure.type)
def test_pyunitAddFailure(self):
# pyunit passes an exc_info tuple directly to addFailure
try:
raise self.failureException('foo')
except self.failureException, excValue:
self.result.addFailure(self, sys.exc_info())
failure = self.result.failures[0][1]
self.assertEqual(excValue, failure.value)
self.assertEqual(self.failureException, failure.type)
class TestReporterRealtime(TestTestResult):
def setUp(self):
output = StringIO.StringIO()
self.result = reporter.Reporter(output, realtime=True)
class TestErrorReporting(StringTest):
doubleSeparator = re.compile(r'^=+$')
def setUp(self):
self.loader = runner.TestLoader()
self.output = StringIO.StringIO()
self.result = reporter.Reporter(self.output)
def getOutput(self, suite):
result = self.getResult(suite)
result.printErrors()
return self.output.getvalue()
def getResult(self, suite):
suite.run(self.result)
return self.result
def testFormatErroredMethod(self):
suite = self.loader.loadClass(erroneous.TestFailureInSetUp)
output = self.getOutput(suite).splitlines()
match = [self.doubleSeparator,
('[ERROR]: twisted.trial.test.erroneous.'
'TestFailureInSetUp.test_noop'),
'Traceback (most recent call last):',
re.compile(r'^\s+File .*erroneous\.py., line \d+, in setUp$'),
re.compile(r'^\s+raise FoolishError, '
r'.I am a broken setUp method.$'),
('twisted.trial.test.erroneous.FoolishError: '
'I am a broken setUp method')]
self.stringComparison(match, output)
def testFormatFailedMethod(self):
suite = self.loader.loadMethod(erroneous.TestRegularFail.test_fail)
output = self.getOutput(suite).splitlines()
match = [
self.doubleSeparator,
'[FAIL]: '
'twisted.trial.test.erroneous.TestRegularFail.test_fail',
'Traceback (most recent call last):',
re.compile(r'^\s+File .*erroneous\.py., line \d+, in test_fail$'),
re.compile(r'^\s+self\.fail\("I fail"\)$'),
'twisted.trial.unittest.FailTest: I fail'
]
self.stringComparison(match, output)
def testDoctestError(self):
from twisted.trial.test import erroneous
suite = self.loader.loadDoctests(erroneous)
output = self.getOutput(suite)
path = 'twisted.trial.test.erroneous.unexpectedException'
for substring in ['1/0', 'ZeroDivisionError',
'Exception raised:', path]:
self.assertSubstring(substring, output)
self.failUnless(re.search('Fail(ed|ure in) example:', output),
"Couldn't match 'Failure in example: ' "
"or 'Failed example: '")
expect = [self.doubleSeparator,
re.compile(r'\[(ERROR|FAIL)\]: .*[Dd]octest.*'
+ re.escape(path))]
self.stringComparison(expect, output.splitlines())
def testHiddenException(self):
"""
Check that errors in C{DelayedCall}s get reported, even if the
test already has a failure.
Only really necessary for testing the deprecated style of tests that
use iterate() directly. See
L{erroneous.DelayedCall.testHiddenException} for more details.
"""
test = erroneous.DelayedCall('testHiddenException')
result = self.getResult(test)
self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.errors[0][1].getErrorMessage(),
test.hiddenExceptionMsg)
class TracebackHandling(unittest.TestCase):
def getErrorFrames(self, test):
stream = StringIO.StringIO()
result = reporter.Reporter(stream)
test.run(result)
bads = result.failures + result.errors
assert len(bads) == 1
assert bads[0][0] == test
return result._trimFrames(bads[0][1].frames)
def checkFrames(self, observedFrames, expectedFrames):
for observed, expected in zip(observedFrames, expectedFrames):
self.assertEqual(observed[0], expected[0])
observedSegs = os.path.splitext(observed[1])[0].split(os.sep)
expectedSegs = expected[1].split('/')
self.assertEqual(observedSegs[-len(expectedSegs):],
expectedSegs)
self.assertEqual(len(observedFrames), len(expectedFrames))
def test_basic(self):
test = erroneous.TestRegularFail('test_fail')
frames = self.getErrorFrames(test)
self.checkFrames(frames,
[('test_fail', 'twisted/trial/test/erroneous')])
def test_subroutine(self):
test = erroneous.TestRegularFail('test_subfail')
frames = self.getErrorFrames(test)
self.checkFrames(frames,
[('test_subfail', 'twisted/trial/test/erroneous'),
('subroutine', 'twisted/trial/test/erroneous')])
def test_deferred(self):
test = erroneous.TestFailureInDeferredChain('test_fail')
frames = self.getErrorFrames(test)
self.checkFrames(frames,
[('_later', 'twisted/trial/test/erroneous')])
def test_noFrames(self):
result = reporter.Reporter(None)
self.assertEqual([], result._trimFrames([]))
def test_oneFrame(self):
result = reporter.Reporter(None)
self.assertEqual(['fake frame'], result._trimFrames(['fake frame']))
class FormatFailures(StringTest):
def setUp(self):
try:
raise RuntimeError('foo')
except RuntimeError:
self.f = failure.Failure()
self.f.frames = [
['foo', 'foo/bar.py', 5, [('x', 5)], [('y', 'orange')]],
['qux', 'foo/bar.py', 10, [('a', 'two')], [('b', 'MCMXCIX')]]
]
self.stream = StringIO.StringIO()
self.result = reporter.Reporter(self.stream)
def test_formatDefault(self):
tb = self.result._formatFailureTraceback(self.f)
self.stringComparison([
'Traceback (most recent call last):',
' File "foo/bar.py", line 5, in foo',
re.compile(r'^\s*$'),
' File "foo/bar.py", line 10, in qux',
re.compile(r'^\s*$'),
'RuntimeError: foo'], tb.splitlines())
def test_formatString(self):
tb = '''
File "twisted/trial/unittest.py", line 256, in failUnlessSubstring
return self.failUnlessIn(substring, astring, msg)
exceptions.TypeError: iterable argument required
'''
expected = '''
File "twisted/trial/unittest.py", line 256, in failUnlessSubstring
return self.failUnlessIn(substring, astring, msg)
exceptions.TypeError: iterable argument required
'''
formatted = self.result._formatFailureTraceback(tb)
self.assertEqual(expected, formatted)
def test_mutation(self):
frames = self.f.frames[:]
tb = self.result._formatFailureTraceback(self.f)
self.assertEqual(self.f.frames, frames)
class PyunitTestNames(unittest.TestCase):
def setUp(self):
from twisted.trial.test import sample
self.stream = StringIO.StringIO()
self.test = sample.PyunitTest('test_foo')
def test_verboseReporter(self):
result = reporter.VerboseTextReporter(self.stream)
result.startTest(self.test)
output = self.stream.getvalue()
self.failUnlessEqual(
output, 'twisted.trial.test.sample.PyunitTest.test_foo ... ')
def test_treeReporter(self):
result = reporter.TreeReporter(self.stream)
result.startTest(self.test)
output = self.stream.getvalue()
output = output.splitlines()[-1].strip()
self.failUnlessEqual(output, result.getDescription(self.test) + ' ...')
def test_getDescription(self):
result = reporter.TreeReporter(self.stream)
output = result.getDescription(self.test)
self.failUnlessEqual(output, 'test_foo')
def test_minimalReporter(self):
result = reporter.MinimalReporter(self.stream)
self.test.run(result)
result.printSummary()
output = self.stream.getvalue().strip().split(' ')
self.failUnlessEqual(output[1:], ['1', '1', '0', '0', '0'])
class TrialTestNames(unittest.TestCase):
def setUp(self):
from twisted.trial.test import sample
self.stream = StringIO.StringIO()
self.test = sample.FooTest('test_foo')
def test_verboseReporter(self):
result = reporter.VerboseTextReporter(self.stream)
result.startTest(self.test)
output = self.stream.getvalue()
self.failUnlessEqual(output, self.test.id() + ' ... ')
def test_treeReporter(self):
result = reporter.TreeReporter(self.stream)
result.startTest(self.test)
output = self.stream.getvalue()
output = output.splitlines()[-1].strip()
self.failUnlessEqual(output, result.getDescription(self.test) + ' ...')
def test_treeReporterWithDocstrings(self):
"""A docstring"""
result = reporter.TreeReporter(self.stream)
self.assertEqual(result.getDescription(self),
'test_treeReporterWithDocstrings')
def test_getDescription(self):
result = reporter.TreeReporter(self.stream)
output = result.getDescription(self.test)
self.failUnlessEqual(output, "test_foo")
class SkipTest(unittest.TestCase):
def setUp(self):
from twisted.trial.test import sample
self.stream = StringIO.StringIO()
self.result = reporter.Reporter(self.stream)
self.test = sample.FooTest('test_foo')
def test_accumulation(self):
self.result.addSkip(self.test, 'some reason')
self.failUnlessEqual(1, len(self.result.skips))
def test_success(self):
self.result.addSkip(self.test, 'some reason')
self.failUnlessEqual(True, self.result.wasSuccessful())
def test_summary(self):
self.result.addSkip(self.test, 'some reason')
self.result.printSummary()
output = self.stream.getvalue()
prefix = 'PASSED '
self.failUnless(output.startswith(prefix))
self.failUnlessEqual(output[len(prefix):].strip(), '(skips=1)')
def test_basicErrors(self):
self.result.addSkip(self.test, 'some reason')
self.result.printErrors()
output = self.stream.getvalue().splitlines()[-1]
self.failUnlessEqual(output.strip(), 'some reason')
def test_booleanSkip(self):
self.result.addSkip(self.test, True)
self.result.printErrors()
output = self.stream.getvalue().splitlines()[-1]
self.failUnlessEqual(output.strip(), 'True')
def test_exceptionSkip(self):
try:
1/0
except Exception, e:
error = e
self.result.addSkip(self.test, error)
self.result.printErrors()
output = '\n'.join(self.stream.getvalue().splitlines()[3:]).strip()
self.failUnlessEqual(output, str(e))
class MockColorizer:
"""
Used by TestTreeReporter to make sure that output is colored correctly.
"""
def __init__(self, stream):
self.log = []
def supported(self):
return True
supported = classmethod(supported)
def write(self, text, color):
self.log.append((color, text))
class TestTreeReporter(unittest.TestCase):
def setUp(self):
from twisted.trial.test import sample
self.test = sample.FooTest('test_foo')
self.stream = StringIO.StringIO()
self.result = reporter.TreeReporter(self.stream)
self.result._colorizer = MockColorizer(self.stream)
self.log = self.result._colorizer.log
def makeError(self):
try:
1/0
except ZeroDivisionError:
f = failure.Failure()
return f
def test_cleanupError(self):
"""
Run cleanupErrors and check that the output is correct, and colored
correctly.
"""
f = self.makeError()
self.result.cleanupErrors(f)
color, text = self.log[0]
self.assertEqual(color.strip(), self.result.ERROR)
self.assertEqual(text.strip(), 'cleanup errors')
color, text = self.log[1]
self.assertEqual(color.strip(), self.result.ERROR)
self.assertEqual(text.strip(), '[ERROR]')
test_cleanupError = suppressWarnings(
test_cleanupError,
util.suppress(category=reporter.BrokenTestCaseWarning))
class TestReporter(unittest.TestCase):
resultFactory = reporter.Reporter
def setUp(self):
from twisted.trial.test import sample
self.test = sample.FooTest('test_foo')
self.stream = StringIO.StringIO()
self.result = self.resultFactory(self.stream)
self._timer = 0
self.result._getTime = self._getTime
def _getTime(self):
self._timer += 1
return self._timer
def test_startStop(self):
self.result.startTest(self.test)
self.result.stopTest(self.test)
self.assertTrue(self.result._lastTime > 0)
self.assertEqual(self.result.testsRun, 1)
self.assertEqual(self.result.wasSuccessful(), True)
def test_brokenStream(self):
"""
Test that the reporter safely writes to its stream.
"""
result = self.resultFactory(stream=BrokenStream(self.stream))
result.writeln("Hello")
self.assertEqual(self.stream.getvalue(), 'Hello\n')
self.stream.truncate(0)
result.writeln("Hello %s!", 'World')
self.assertEqual(self.stream.getvalue(), 'Hello World!\n')
class TestSafeStream(unittest.TestCase):
def test_safe(self):
"""
Test that L{reporter.SafeStream} successfully write to its original
stream even if an interrupt happens during the write.
"""
stream = StringIO.StringIO()
broken = BrokenStream(stream)
safe = reporter.SafeStream(broken)
safe.write("Hello")
self.assertEqual(stream.getvalue(), "Hello")
class TestTimingReporter(TestReporter):
resultFactory = reporter.TimingTextReporter
| 11,697
| 3,167
| 1,209
|
beb3e066fe6e8c920e49769ce5fec01b8e52d337
| 218
|
py
|
Python
|
run.py
|
lowell-dev-club/live-flask-workshop
|
a57a3db79fd772a6676ad3bce1ff45e1a5c1c06d
|
[
"MIT"
] | null | null | null |
run.py
|
lowell-dev-club/live-flask-workshop
|
a57a3db79fd772a6676ad3bce1ff45e1a5c1c06d
|
[
"MIT"
] | null | null | null |
run.py
|
lowell-dev-club/live-flask-workshop
|
a57a3db79fd772a6676ad3bce1ff45e1a5c1c06d
|
[
"MIT"
] | null | null | null |
# Import Flask app object from project folder (For python this means from the __init__.py file)
from project import app
# Run flask app with debug on and listening on port 8000
app.run(
debug=True,
port=8000
)
| 27.25
| 95
| 0.747706
|
# Import Flask app object from project folder (For python this means from the __init__.py file)
from project import app
# Run flask app with debug on and listening on port 8000
app.run(
debug=True,
port=8000
)
| 0
| 0
| 0
|
acb8f38d1fce7e807bb197cfdc155940d8636b58
| 704
|
py
|
Python
|
Section 4 - Lists/prediction exercise.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | 10
|
2020-02-14T14:28:15.000Z
|
2022-02-02T18:44:11.000Z
|
Section 4 - Lists/prediction exercise.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | null | null | null |
Section 4 - Lists/prediction exercise.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | 8
|
2020-03-25T09:27:42.000Z
|
2021-11-03T15:24:38.000Z
|
# Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: Prediction exercise
fruits = ['Strawberry', 'Lemon', 'Orange', 'Raspberry', 'Cherry']
print(fruits[0])
print(fruits[3])
print(fruits[2])
print(fruits[len(fruits)-1])
print(fruits[1])
fruit = fruits[2+2]
print(fruit)
print(fruit[0])
orange = fruits[1]
print(orange)
lemon = fruits[1]
print(lemon)
# Additional question (on the same page)
# A string is in fact a list of single character strings
# The first index refers to the list element
# The second index is applied to that element
print(fruits[0][0]+fruits[1][0]+fruits[2][0])
| 23.466667
| 66
| 0.693182
|
# Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: Prediction exercise
fruits = ['Strawberry', 'Lemon', 'Orange', 'Raspberry', 'Cherry']
print(fruits[0])
print(fruits[3])
print(fruits[2])
print(fruits[len(fruits)-1])
print(fruits[1])
fruit = fruits[2+2]
print(fruit)
print(fruit[0])
orange = fruits[1]
print(orange)
lemon = fruits[1]
print(lemon)
# Additional question (on the same page)
# A string is in fact a list of single character strings
# The first index refers to the list element
# The second index is applied to that element
print(fruits[0][0]+fruits[1][0]+fruits[2][0])
| 0
| 0
| 0
|
b5797a6e0b5ac972df8e6cf74d6538070b12831e
| 762
|
py
|
Python
|
src/Overload.py
|
LukeMcCulloch/PyCFD
|
6720e6575e25f8c274ef591d6c215de90a740935
|
[
"MIT"
] | 1
|
2020-07-04T15:42:15.000Z
|
2020-07-04T15:42:15.000Z
|
src/Overload.py
|
LukeMcCulloch/PyCFD
|
6720e6575e25f8c274ef591d6c215de90a740935
|
[
"MIT"
] | null | null | null |
src/Overload.py
|
LukeMcCulloch/PyCFD
|
6720e6575e25f8c274ef591d6c215de90a740935
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 4 07:43:40 2020
@author: lukemcculloch
"""
import numpy as np
| 21.771429
| 45
| 0.566929
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 4 07:43:40 2020
@author: lukemcculloch
"""
import numpy as np
class Overload(object):
def __init__(self, vector):
self.vector = vector
self.parentcell = []
def __add__(self, other):
if isinstance(other, Overload):
return self.vector + other.vector
else:
return self.vector + other
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, Overload):
return self.vector - other.vector
else:
return self.vector - other
def __rsub__(self, other):
return self.__sub__(other)
| 430
| 2
| 178
|
8f049df44809d1fe48e8f05eb98329ff0b64f235
| 126
|
py
|
Python
|
clu/__init__.py
|
basameera/CLI-Utils
|
7f52ffa3fee29ee7a7d864a497e4f947f94c40e0
|
[
"MIT"
] | null | null | null |
clu/__init__.py
|
basameera/CLI-Utils
|
7f52ffa3fee29ee7a7d864a497e4f947f94c40e0
|
[
"MIT"
] | 1
|
2020-05-06T20:37:48.000Z
|
2020-05-06T20:37:48.000Z
|
clu/__init__.py
|
basameera/CLI-Utils
|
7f52ffa3fee29ee7a7d864a497e4f947f94c40e0
|
[
"MIT"
] | null | null | null |
from .vsr import VERSION
__version__ = VERSION
__author__ = 'Sameera Sandaruwan'
__author_email__ = 'basameera@protonmail.com'
| 31.5
| 45
| 0.81746
|
from .vsr import VERSION
__version__ = VERSION
__author__ = 'Sameera Sandaruwan'
__author_email__ = 'basameera@protonmail.com'
| 0
| 0
| 0
|
44af19045c58f933bd90128459e09cb4e7656830
| 3,295
|
py
|
Python
|
simpleblog/tests/test_functional_tests.py
|
blacktower2016/simpleblog
|
e0a7e79de7daf3774518a21f6e3c808e2fc79ec5
|
[
"MIT"
] | null | null | null |
simpleblog/tests/test_functional_tests.py
|
blacktower2016/simpleblog
|
e0a7e79de7daf3774518a21f6e3c808e2fc79ec5
|
[
"MIT"
] | null | null | null |
simpleblog/tests/test_functional_tests.py
|
blacktower2016/simpleblog
|
e0a7e79de7daf3774518a21f6e3c808e2fc79ec5
|
[
"MIT"
] | null | null | null |
from django.test import LiveServerTestCase
from selenium import webdriver
from django.urls import reverse
from django.utils.translation import activate, gettext_lazy as _
from .creation_utils import create_user, create_post
from simpleblog.models import Post
if __name__ == '__main__':
unittest.main()
| 37.022472
| 84
| 0.692261
|
from django.test import LiveServerTestCase
from selenium import webdriver
from django.urls import reverse
from django.utils.translation import activate, gettext_lazy as _
from .creation_utils import create_user, create_post
from simpleblog.models import Post
class TestPostCreate(LiveServerTestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.user = create_user()
#activate("en")
def test_log_in_and_create_new_post(self):
# user come to the simpleblog to create post
self.driver.get(self.live_server_url+reverse("simpleblog:create-post"))
self.assertEqual(Post.objects.count(), 0)
self.assertIn("<h2>Вход</h2>", self.driver.page_source)
# Oh, I forgot to log in!
self.driver.find_element_by_id("id_username").send_keys("user")
self.driver.find_element_by_id("id_password").send_keys("password")
self.driver.find_element_by_tag_name('button').click()
self.assertIn("user", self.driver.page_source)
# create post
self.driver.find_element_by_partial_link_text("Новая").click()
self.assertIn("Новая запись", self.driver.page_source)
self.driver.find_element_by_id("id_title").send_keys("New post title")
self.driver.find_element_by_id("id_subtitle").send_keys("New post subtitle")
self.driver.find_element_by_id("id_text").send_keys("New post text")
self.driver.find_element_by_id("id_tags").send_keys("New post tag")
self.driver.find_element_by_tag_name('button').click()
self.assertEqual(Post.objects.count(), 1)
def tearDown(self):
self.driver.quit()
pass
class TestPostUpdate(LiveServerTestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.user = create_user()
self.post = create_post(author=self.user, is_public=True)
#activate("en")
def test_log_in_and_update_post(self):
# user come to the simpleblog to create post
self.driver.get(self.live_server_url+reverse("simpleblog:create-post"))
self.assertEqual(Post.objects.count(), 1)
self.assertIn("<h2>Вход</h2>", self.driver.page_source)
# Oh, I forgot to log in!
self.driver.find_element_by_id("id_username").send_keys("user")
self.driver.find_element_by_id("id_password").send_keys("password")
self.driver.find_element_by_tag_name('button').click()
self.assertIn("user", self.driver.page_source)
# create post
self.driver.find_element_by_partial_link_text("Мои записи").click()
self.driver.find_element_by_class_name("fa-edit").click()
self.assertIn("<h2>Редактирование записи</h2>", self.driver.page_source)
self.driver.find_element_by_id("id_title").send_keys("New post title")
self.driver.find_element_by_id("id_subtitle").send_keys("New post subtitle")
self.driver.find_element_by_id("id_text").send_keys("New post text")
self.driver.find_element_by_id("id_tags").send_keys("New post tag")
self.driver.find_element_by_tag_name('button').click()
self.assertEqual(Post.objects.count(), 1)
def tearDown(self):
self.driver.quit()
pass
if __name__ == '__main__':
unittest.main()
| 2,793
| 40
| 208
|
877a288b95e328f85cddeb7a00bdc71a41bafbba
| 498
|
py
|
Python
|
arrays/first_duplicate_value.py
|
maanavshah/coding-interview
|
4c842cdbc6870da79684635f379966d1caec2162
|
[
"MIT"
] | null | null | null |
arrays/first_duplicate_value.py
|
maanavshah/coding-interview
|
4c842cdbc6870da79684635f379966d1caec2162
|
[
"MIT"
] | null | null | null |
arrays/first_duplicate_value.py
|
maanavshah/coding-interview
|
4c842cdbc6870da79684635f379966d1caec2162
|
[
"MIT"
] | null | null | null |
# O(n) time | O(n) space
# O(n) time | O(1) space
| 24.9
| 41
| 0.566265
|
# O(n) time | O(n) space
def firstDuplicateValue(array):
seenElements = {}
for idx, element in enumerate(array):
if element in seenElements:
return element
else:
seenElements[element] = idx
return -1
# O(n) time | O(1) space
def firstDuplicateValue(array):
for element in array:
absValue = abs(element)
if array[absValue - 1] < 0:
return absValue
else:
array[absValue - 1] *= -1
return -1
| 403
| 0
| 44
|
0aaae7be3b0ace41a6421efe879e353e865df314
| 4,063
|
py
|
Python
|
Sources/Workflows/Shanbay_Alfred2_Workflow/shanbay.py
|
yagosys/AlfredWorkflow.com
|
9e5087e61fb89640a7a6ca89ba554303aec0b037
|
[
"MIT"
] | 2,177
|
2015-01-02T09:56:51.000Z
|
2022-03-27T01:48:37.000Z
|
Sources/Workflows/Shanbay_Alfred2_Workflow/shanbay.py
|
yagosys/AlfredWorkflow.com
|
9e5087e61fb89640a7a6ca89ba554303aec0b037
|
[
"MIT"
] | 24
|
2015-01-02T19:11:51.000Z
|
2021-01-27T07:20:33.000Z
|
Sources/Workflows/Shanbay_Alfred2_Workflow/shanbay.py
|
yagosys/AlfredWorkflow.com
|
9e5087e61fb89640a7a6ca89ba554303aec0b037
|
[
"MIT"
] | 516
|
2015-01-02T18:48:29.000Z
|
2022-01-26T07:12:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#! 强制默认编码为utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import urllib, urllib2, json, time
from pprint import pprint
from pdb import set_trace
import requests
from lxml import html
from alfred.feedback import Feedback
from config import service, addword, loginurl, username, pwd
# 扇贝词典
if __name__ == '__main__':
d = ShanbayDict()
d.query(sys.argv[1])
d.output()
| 31.992126
| 123
| 0.535811
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#! 强制默认编码为utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import urllib, urllib2, json, time
from pprint import pprint
from pdb import set_trace
import requests
from lxml import html
from alfred.feedback import Feedback
from config import service, addword, loginurl, username, pwd
# 扇贝词典
class ShanbayDict():
def __init__(self):
self.service = service
self.query_word =''
self.feedback = Feedback()
# 从字典中安全的取出值
self.save_get_dict_value = lambda d, k: d[k] if d.has_key(k) else ''
def get_csrfmiddlewaretoken(self):
page = requests.get(loginurl).text
script = html.fromstring(page).xpath("(//input[@name='csrfmiddlewaretoken']/@value)[1]")[0]
return script
def login(self):
csrftoken = self.get_csrfmiddlewaretoken()
postdata = {}
postdata['csrfmiddlewaretoken'] = csrftoken
postdata['username'] = username
postdata['password'] = pwd
postdata['login'] = ''
postdata['continue'] = 'home'
postdata['u'] = 1
postdata['next'] = '/review/new/'
headers = {
'User-Agent':'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0',
'Host':'www.shanbay.com',
'Origin':'http://www.shanbay.com',
'Referer':'http://www.shanbay.com/accounts/login/',
'Cookie':'csrftoken='+csrftoken+';csrftoken='+csrftoken+';sessionid=f7df88e25d184e487df6ddc6a88caafb;',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset':'UTF-8,*;q=0.5',
'Accept-Encoding':'gzip,deflate,sdc',
'Accept-Language':'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Content-Type':'application/x-www-form-urlencoded'
}
#r = requests.post(url, data=json.dumps(postdata), headers=headers)
r = requests.post(loginurl, data=postdata, headers=headers)
#print r.status_code
#print r.headers
self.cookies = r.cookies
return True if r.status_code == 200 else False
def fetch(self, word):
islogin = self.login()
if islogin == False:
print '登陆失败'
return
url = self.service+word
try:
r = requests.get(url, cookies = self.cookies)
res = json.loads(r.text)
except:
return {}
return res
def parse(self, data):
if(data['voc']):
voc = data['voc']
word = voc['content']
# 发音
pron = voc['pron']
title = "%s [%s]" % (word, pron)
subtitle = voc['definition']
self.addItem(title = title, subtitle = subtitle, arg = word)
# 解释
if voc.has_key('en_definitions') and voc['en_definitions']:
for type in voc['en_definitions']:
for line in voc['en_definitions'][type]:
title = type+', '+line
if not title:
continue
self.addItem(title = title, arg = word)
else:
self.addItem(title='no results')
def query(self, word):
if not word or not isinstance(word, (str, unicode)):
return
self.query_word = word
self.parse( self.fetch(word) )
def addItem(self, **kwargs):
self.feedback.addItem(**kwargs)
def output(self):
if self.feedback.isEmpty():
self.addItem(
title = self.query_word,
subtitle = 'Sorry, no result.',
arg = self.query_word )
print(self.feedback.get(unescape = True))
if __name__ == '__main__':
d = ShanbayDict()
d.query(sys.argv[1])
d.output()
| 3,419
| -1
| 237
|
8dcef86aa8287856e10a453698de6d0f45be68cd
| 78
|
py
|
Python
|
odl_details.py
|
vaibhgupta157/PathCompute
|
44109176dc3770f7742e4e70225083ffe8eccad7
|
[
"MIT"
] | 5
|
2019-08-05T20:00:03.000Z
|
2021-11-24T07:39:06.000Z
|
odl_details.py
|
vaibhgupta157/PathCompute
|
44109176dc3770f7742e4e70225083ffe8eccad7
|
[
"MIT"
] | 1
|
2019-07-15T13:04:45.000Z
|
2019-10-10T02:21:51.000Z
|
odl_details.py
|
vaibhgupta157/PathCompute
|
44109176dc3770f7742e4e70225083ffe8eccad7
|
[
"MIT"
] | 2
|
2019-08-01T21:17:15.000Z
|
2020-09-30T20:19:49.000Z
|
ODL_IP = "127.0.0.1"
ODL_PORT = "8181"
ODL_USER = "admin"
ODL_PASS = "admin"
| 13
| 20
| 0.641026
|
ODL_IP = "127.0.0.1"
ODL_PORT = "8181"
ODL_USER = "admin"
ODL_PASS = "admin"
| 0
| 0
| 0
|
174da0292817370750a98ca1b863b2c249ba736f
| 574
|
py
|
Python
|
nova_dveri_ru/html_work/html_code.py
|
Aleksey-Voko/nova_dveri_ru
|
7657c81e12f4486100385b9f181895f7688a8106
|
[
"MIT"
] | null | null | null |
nova_dveri_ru/html_work/html_code.py
|
Aleksey-Voko/nova_dveri_ru
|
7657c81e12f4486100385b9f181895f7688a8106
|
[
"MIT"
] | null | null | null |
nova_dveri_ru/html_work/html_code.py
|
Aleksey-Voko/nova_dveri_ru
|
7657c81e12f4486100385b9f181895f7688a8106
|
[
"MIT"
] | null | null | null |
import requests
from nova_dveri_ru.data import USER_AGENT
if __name__ == '__main__':
url = 'https://nova-dveri.ru/mezhkomnatnye-dveri/ehkoshpon/il%20doors/dver-galleya-07-chern-yasen-svetlyj'
out_html_file = 'galleya-07-chern-yasen-svetlyj.html'
save_html_code(url, out_html_file)
| 27.333333
| 110
| 0.705575
|
import requests
from nova_dveri_ru.data import USER_AGENT
def save_html_code(input_url: str, out_file: str):
headers = {
'User-Agent': USER_AGENT,
}
response = requests.get(input_url, headers=headers)
html_code = response.text
with open(out_file, 'w', encoding='utf-8') as fl:
fl.write(html_code)
if __name__ == '__main__':
url = 'https://nova-dveri.ru/mezhkomnatnye-dveri/ehkoshpon/il%20doors/dver-galleya-07-chern-yasen-svetlyj'
out_html_file = 'galleya-07-chern-yasen-svetlyj.html'
save_html_code(url, out_html_file)
| 254
| 0
| 23
|
3fd50a97d477c6606a8e1d7b5ef666f3120603de
| 6,976
|
py
|
Python
|
src/mutiny/_internal/models/user.py
|
jack1142/Mutiny
|
586dc7131c0d9eba98a271601a446e00792e8cde
|
[
"ECL-2.0",
"Apache-2.0"
] | 20
|
2021-08-18T20:46:16.000Z
|
2022-01-06T22:53:25.000Z
|
src/mutiny/_internal/models/user.py
|
jack1142/Mutiny
|
586dc7131c0d9eba98a271601a446e00792e8cde
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/mutiny/_internal/models/user.py
|
jack1142/Mutiny
|
586dc7131c0d9eba98a271601a446e00792e8cde
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Jakub Kuczys (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User models"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, final
from ..bit_fields import Badges, UserFlags
from ..enums import Presence, RelationshipStatus
from .attachment import Attachment
from .bases import Model, ParserData, StatefulModel, StatefulResource, field
if TYPE_CHECKING:
from ...events import UserUpdateEvent
from ..state import State
__all__ = (
"User",
"BotInfo",
"Relationship",
"Status",
"UserProfile",
)
@final
class Status(Model):
"""
Status()
Represents a user's status.
Attributes:
text: The custom status text.
presence: The user's presence.
"""
text: Optional[str] = field("text", default=None)
# Users who have never changed their presence do not have the `presence`.
# New users start with an Online presence,
# so that's what we should use in such case.
presence: Presence = field("presence", factory=True, default="Online")
@final
class Relationship(StatefulModel):
"""
Relationship()
Represents the client user's relationship with other user.
Attributes:
user_id: The ID of the other user in this relation.
status: The relationship's status.
"""
user_id: str = field("_id")
status: RelationshipStatus = field("status", factory=True)
@final
class BotInfo(StatefulModel):
"""
BotInfo()
Represents the information about a bot user.
Attributes:
owner_id: The ID of the bot owner.
"""
owner_id: str = field("owner")
@classmethod
@final
class UserProfile(StatefulModel):
"""
UserProfile()
Represents a profile of a user.
Attributes:
content: The profile content if provided.
background: The profile background if provided.
"""
content: Optional[str] = field("content", default=None)
background: Optional[Attachment] = field("background", factory=True, default=None)
@classmethod
@final
class User(StatefulResource):
"""
User()
Represents a user.
Attributes:
id: The user ID.
username: The username.
avatar: The user's avatar.
relations: The user's relations. This is only present for the client user.
badges: The user's badges.
status: The user's status.
relationship_status: The client user's relationship status with this user.
online: Indicates whether the user is online.
flags: The user flags.
bot: The information about this bot, or `None` if this user is not a bot.
profile: The user's profile.
"""
id: str = field("_id")
username: str = field("username")
avatar: Optional[Attachment] = field("avatar", factory=True, default=None)
relations: Optional[dict[str, Relationship]] = field(
"relations", factory=True, default=None
)
badges: Badges = field("badges", factory=True, default=0)
status: Status = field("status", factory=True, default_factory=dict)
relationship_status: Optional[RelationshipStatus] = field(
"relationship", factory=True, default=None
)
online: bool = field("online")
flags: UserFlags = field("flags", factory=True, default=0)
bot: Optional[BotInfo] = field("bot", factory=True, default=None)
profile: Optional[UserProfile] = field("profile", factory=True, default=None)
| 32.446512
| 88
| 0.673595
|
# Copyright 2021 Jakub Kuczys (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User models"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, final
from ..bit_fields import Badges, UserFlags
from ..enums import Presence, RelationshipStatus
from .attachment import Attachment
from .bases import Model, ParserData, StatefulModel, StatefulResource, field
if TYPE_CHECKING:
from ...events import UserUpdateEvent
from ..state import State
__all__ = (
"User",
"BotInfo",
"Relationship",
"Status",
"UserProfile",
)
@final
class Status(Model):
"""
Status()
Represents a user's status.
Attributes:
text: The custom status text.
presence: The user's presence.
"""
text: Optional[str] = field("text", default=None)
# Users who have never changed their presence do not have the `presence`.
# New users start with an Online presence,
# so that's what we should use in such case.
presence: Presence = field("presence", factory=True, default="Online")
def _presence_parser(self, parser_data: ParserData) -> Presence:
return Presence(parser_data.get_field())
@final
class Relationship(StatefulModel):
"""
Relationship()
Represents the client user's relationship with other user.
Attributes:
user_id: The ID of the other user in this relation.
status: The relationship's status.
"""
user_id: str = field("_id")
status: RelationshipStatus = field("status", factory=True)
def _status_parser(self, parser_data: ParserData) -> RelationshipStatus:
return RelationshipStatus(parser_data.get_field())
@final
class BotInfo(StatefulModel):
"""
BotInfo()
Represents the information about a bot user.
Attributes:
owner_id: The ID of the bot owner.
"""
owner_id: str = field("owner")
@classmethod
def _from_raw_data(
cls, state: State, raw_data: Optional[dict[str, Any]]
) -> Optional[BotInfo]:
if raw_data is None:
return None
return cls(state, raw_data)
@final
class UserProfile(StatefulModel):
"""
UserProfile()
Represents a profile of a user.
Attributes:
content: The profile content if provided.
background: The profile background if provided.
"""
content: Optional[str] = field("content", default=None)
background: Optional[Attachment] = field("background", factory=True, default=None)
def _background_parser(self, parser_data: ParserData) -> Optional[Attachment]:
return Attachment._from_raw_data(self._state, parser_data.get_field())
@classmethod
def _from_raw_data(
cls, state: State, raw_data: Optional[dict[str, Any]]
) -> Optional[UserProfile]:
if raw_data is None:
return None
return cls(state, raw_data)
@final
class User(StatefulResource):
"""
User()
Represents a user.
Attributes:
id: The user ID.
username: The username.
avatar: The user's avatar.
relations: The user's relations. This is only present for the client user.
badges: The user's badges.
status: The user's status.
relationship_status: The client user's relationship status with this user.
online: Indicates whether the user is online.
flags: The user flags.
bot: The information about this bot, or `None` if this user is not a bot.
profile: The user's profile.
"""
id: str = field("_id")
username: str = field("username")
avatar: Optional[Attachment] = field("avatar", factory=True, default=None)
relations: Optional[dict[str, Relationship]] = field(
"relations", factory=True, default=None
)
badges: Badges = field("badges", factory=True, default=0)
status: Status = field("status", factory=True, default_factory=dict)
relationship_status: Optional[RelationshipStatus] = field(
"relationship", factory=True, default=None
)
online: bool = field("online")
flags: UserFlags = field("flags", factory=True, default=0)
bot: Optional[BotInfo] = field("bot", factory=True, default=None)
profile: Optional[UserProfile] = field("profile", factory=True, default=None)
def _avatar_parser(self, parser_data: ParserData) -> Optional[Attachment]:
return Attachment._from_raw_data(self._state, parser_data.get_field())
def _relations_parser(
self, parser_data: ParserData
) -> Optional[dict[str, Relationship]]:
relations_data = parser_data.get_field()
if relations_data is None:
return None
return {data["_id"]: Relationship(self._state, data) for data in relations_data}
def _badges_parser(self, parser_data: ParserData) -> Badges:
return Badges(parser_data.get_field())
def _status_parser(self, parser_data: ParserData) -> Status:
return Status(parser_data.get_field())
def _relationship_status_parser(
self, parser_data: ParserData
) -> Optional[RelationshipStatus]:
return RelationshipStatus._from_raw_data(parser_data.get_field())
def _flags_parser(self, parser_data: ParserData) -> UserFlags:
return UserFlags(parser_data.get_field())
def _bot_parser(self, parser_data: ParserData) -> Optional[BotInfo]:
return BotInfo._from_raw_data(self._state, parser_data.get_field())
def _profile_parser(self, parser_data: ParserData) -> Optional[UserProfile]:
return UserProfile._from_raw_data(self._state, parser_data.get_field())
def _update_from_event(self, event: UserUpdateEvent) -> None:
if event.clear == "ProfileContent":
if self.profile is not None:
self.profile.raw_data.pop("content", None)
self.profile.content = None
elif event.clear == "ProfileBackground":
if self.profile is not None:
self.profile.raw_data.pop("background", None)
self.profile.background = None
elif event.clear == "StatusText":
self.status.raw_data.pop("text", None)
self.status.text = None
elif event.clear == "Avatar":
self.raw_data["avatar"] = None
self.avatar = None
# XXX: updates to `profile` are currently not handled
# due to the use of dot notation
self._update_from_dict(event.data)
| 2,605
| 0
| 376
|
ffafc0fccda94ffbed91a8280654703587dbde9e
| 355
|
py
|
Python
|
profit/types/blockchain_format/pool_target.py
|
zcomputerwiz/profit-blockchain
|
d6d4337ea7c418c66f05f22a263e94190452aed6
|
[
"Apache-2.0"
] | 7
|
2022-03-15T01:33:35.000Z
|
2022-03-26T21:29:45.000Z
|
profit/types/blockchain_format/pool_target.py
|
zcomputerwiz/profit-blockchain
|
d6d4337ea7c418c66f05f22a263e94190452aed6
|
[
"Apache-2.0"
] | 3
|
2022-03-19T23:02:18.000Z
|
2022-03-19T23:02:19.000Z
|
profit/types/blockchain_format/pool_target.py
|
zcomputerwiz/profit-blockchain
|
d6d4337ea7c418c66f05f22a263e94190452aed6
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from profit.types.blockchain_format.sized_bytes import bytes32
from profit.util.ints import uint32
from profit.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
| 27.307692
| 69
| 0.811268
|
from dataclasses import dataclass
from profit.types.blockchain_format.sized_bytes import bytes32
from profit.util.ints import uint32
from profit.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class PoolTarget(Streamable):
puzzle_hash: bytes32
max_height: uint32 # A max height of 0 means it is valid forever
| 0
| 103
| 22
|
0addda13f42195fa3b166c4628f48102cc4fb485
| 33,961
|
py
|
Python
|
Lib/site-packages/win32/lib/winnt.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/win32/lib/winnt.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/win32/lib/winnt.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
# Generated by h2py from \mssdk\include\winnt.h
APPLICATION_ERROR_MASK = 536870912
ERROR_SEVERITY_SUCCESS = 0
ERROR_SEVERITY_INFORMATIONAL = 1073741824
ERROR_SEVERITY_WARNING = -2147483648
ERROR_SEVERITY_ERROR = -1073741824
MINCHAR = 128
MAXCHAR = 127
MINSHORT = 32768
MAXSHORT = 32767
MINLONG = -2147483648
MAXLONG = 2147483647
MAXBYTE = 255
MAXWORD = 65535
MAXDWORD = -1
LANG_NEUTRAL = 0
LANG_AFRIKAANS = 54
LANG_ALBANIAN = 28
LANG_ARABIC = 1
LANG_BASQUE = 45
LANG_BELARUSIAN = 35
LANG_BULGARIAN = 2
LANG_CATALAN = 3
LANG_CHINESE = 4
LANG_CROATIAN = 26
LANG_CZECH = 5
LANG_DANISH = 6
LANG_DUTCH = 19
LANG_ENGLISH = 9
LANG_ESTONIAN = 37
LANG_FAEROESE = 56
LANG_FARSI = 41
LANG_FINNISH = 11
LANG_FRENCH = 12
LANG_GERMAN = 7
LANG_GREEK = 8
LANG_HEBREW = 13
LANG_HINDI = 57
LANG_HUNGARIAN = 14
LANG_ICELANDIC = 15
LANG_INDONESIAN = 33
LANG_ITALIAN = 16
LANG_JAPANESE = 17
LANG_KOREAN = 18
LANG_LATVIAN = 38
LANG_LITHUANIAN = 39
LANG_MACEDONIAN = 47
LANG_MALAY = 62
LANG_NORWEGIAN = 20
LANG_POLISH = 21
LANG_PORTUGUESE = 22
LANG_ROMANIAN = 24
LANG_RUSSIAN = 25
LANG_SERBIAN = 26
LANG_SLOVAK = 27
LANG_SLOVENIAN = 36
LANG_SPANISH = 10
LANG_SWAHILI = 65
LANG_SWEDISH = 29
LANG_THAI = 30
LANG_TURKISH = 31
LANG_UKRAINIAN = 34
LANG_VIETNAMESE = 42
SUBLANG_NEUTRAL = 0
SUBLANG_DEFAULT = 1
SUBLANG_SYS_DEFAULT = 2
SUBLANG_ARABIC_SAUDI_ARABIA = 1
SUBLANG_ARABIC_IRAQ = 2
SUBLANG_ARABIC_EGYPT = 3
SUBLANG_ARABIC_LIBYA = 4
SUBLANG_ARABIC_ALGERIA = 5
SUBLANG_ARABIC_MOROCCO = 6
SUBLANG_ARABIC_TUNISIA = 7
SUBLANG_ARABIC_OMAN = 8
SUBLANG_ARABIC_YEMEN = 9
SUBLANG_ARABIC_SYRIA = 10
SUBLANG_ARABIC_JORDAN = 11
SUBLANG_ARABIC_LEBANON = 12
SUBLANG_ARABIC_KUWAIT = 13
SUBLANG_ARABIC_UAE = 14
SUBLANG_ARABIC_BAHRAIN = 15
SUBLANG_ARABIC_QATAR = 16
SUBLANG_CHINESE_TRADITIONAL = 1
SUBLANG_CHINESE_SIMPLIFIED = 2
SUBLANG_CHINESE_HONGKONG = 3
SUBLANG_CHINESE_SINGAPORE = 4
SUBLANG_CHINESE_MACAU = 5
SUBLANG_DUTCH = 1
SUBLANG_DUTCH_BELGIAN = 2
SUBLANG_ENGLISH_US = 1
SUBLANG_ENGLISH_UK = 2
SUBLANG_ENGLISH_AUS = 3
SUBLANG_ENGLISH_CAN = 4
SUBLANG_ENGLISH_NZ = 5
SUBLANG_ENGLISH_EIRE = 6
SUBLANG_ENGLISH_SOUTH_AFRICA = 7
SUBLANG_ENGLISH_JAMAICA = 8
SUBLANG_ENGLISH_CARIBBEAN = 9
SUBLANG_ENGLISH_BELIZE = 10
SUBLANG_ENGLISH_TRINIDAD = 11
SUBLANG_ENGLISH_ZIMBABWE = 12
SUBLANG_ENGLISH_PHILIPPINES = 13
SUBLANG_FRENCH = 1
SUBLANG_FRENCH_BELGIAN = 2
SUBLANG_FRENCH_CANADIAN = 3
SUBLANG_FRENCH_SWISS = 4
SUBLANG_FRENCH_LUXEMBOURG = 5
SUBLANG_FRENCH_MONACO = 6
SUBLANG_GERMAN = 1
SUBLANG_GERMAN_SWISS = 2
SUBLANG_GERMAN_AUSTRIAN = 3
SUBLANG_GERMAN_LUXEMBOURG = 4
SUBLANG_GERMAN_LIECHTENSTEIN = 5
SUBLANG_ITALIAN = 1
SUBLANG_ITALIAN_SWISS = 2
SUBLANG_KOREAN = 1
SUBLANG_KOREAN_JOHAB = 2
SUBLANG_LITHUANIAN = 1
SUBLANG_LITHUANIAN_CLASSIC = 2
SUBLANG_MALAY_MALAYSIA = 1
SUBLANG_MALAY_BRUNEI_DARUSSALAM = 2
SUBLANG_NORWEGIAN_BOKMAL = 1
SUBLANG_NORWEGIAN_NYNORSK = 2
SUBLANG_PORTUGUESE = 2
SUBLANG_PORTUGUESE_BRAZILIAN = 1
SUBLANG_SERBIAN_LATIN = 2
SUBLANG_SERBIAN_CYRILLIC = 3
SUBLANG_SPANISH = 1
SUBLANG_SPANISH_MEXICAN = 2
SUBLANG_SPANISH_MODERN = 3
SUBLANG_SPANISH_GUATEMALA = 4
SUBLANG_SPANISH_COSTA_RICA = 5
SUBLANG_SPANISH_PANAMA = 6
SUBLANG_SPANISH_DOMINICAN_REPUBLIC = 7
SUBLANG_SPANISH_VENEZUELA = 8
SUBLANG_SPANISH_COLOMBIA = 9
SUBLANG_SPANISH_PERU = 10
SUBLANG_SPANISH_ARGENTINA = 11
SUBLANG_SPANISH_ECUADOR = 12
SUBLANG_SPANISH_CHILE = 13
SUBLANG_SPANISH_URUGUAY = 14
SUBLANG_SPANISH_PARAGUAY = 15
SUBLANG_SPANISH_BOLIVIA = 16
SUBLANG_SPANISH_EL_SALVADOR = 17
SUBLANG_SPANISH_HONDURAS = 18
SUBLANG_SPANISH_NICARAGUA = 19
SUBLANG_SPANISH_PUERTO_RICO = 20
SUBLANG_SWEDISH = 1
SUBLANG_SWEDISH_FINLAND = 2
SORT_DEFAULT = 0
SORT_JAPANESE_XJIS = 0
SORT_JAPANESE_UNICODE = 1
SORT_CHINESE_BIG5 = 0
SORT_CHINESE_PRCP = 0
SORT_CHINESE_UNICODE = 1
SORT_CHINESE_PRC = 2
SORT_KOREAN_KSC = 0
SORT_KOREAN_UNICODE = 1
SORT_GERMAN_PHONE_BOOK = 1
NLS_VALID_LOCALE_MASK = 1048575
MAXIMUM_WAIT_OBJECTS = 64
MAXIMUM_SUSPEND_COUNT = MAXCHAR
EXCEPTION_NONCONTINUABLE = 1
EXCEPTION_MAXIMUM_PARAMETERS = 15
PROCESS_TERMINATE = (1)
PROCESS_CREATE_THREAD = (2)
PROCESS_VM_OPERATION = (8)
PROCESS_VM_READ = (16)
PROCESS_VM_WRITE = (32)
PROCESS_DUP_HANDLE = (64)
PROCESS_CREATE_PROCESS = (128)
PROCESS_SET_QUOTA = (256)
PROCESS_SET_INFORMATION = (512)
PROCESS_QUERY_INFORMATION = (1024)
MAXIMUM_PROCESSORS = 32
THREAD_TERMINATE = (1)
THREAD_SUSPEND_RESUME = (2)
THREAD_GET_CONTEXT = (8)
THREAD_SET_CONTEXT = (16)
THREAD_SET_INFORMATION = (32)
THREAD_QUERY_INFORMATION = (64)
THREAD_SET_THREAD_TOKEN = (128)
THREAD_IMPERSONATE = (256)
THREAD_DIRECT_IMPERSONATION = (512)
JOB_OBJECT_ASSIGN_PROCESS = (1)
JOB_OBJECT_SET_ATTRIBUTES = (2)
JOB_OBJECT_QUERY = (4)
JOB_OBJECT_TERMINATE = (8)
TLS_MINIMUM_AVAILABLE = 64
THREAD_BASE_PRIORITY_LOWRT = 15
THREAD_BASE_PRIORITY_MAX = 2
THREAD_BASE_PRIORITY_MIN = -2
THREAD_BASE_PRIORITY_IDLE = -15
JOB_OBJECT_LIMIT_WORKINGSET = 1
JOB_OBJECT_LIMIT_PROCESS_TIME = 2
JOB_OBJECT_LIMIT_JOB_TIME = 4
JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8
JOB_OBJECT_LIMIT_AFFINITY = 16
JOB_OBJECT_LIMIT_PRIORITY_CLASS = 32
JOB_OBJECT_LIMIT_VALID_FLAGS = 63
EVENT_MODIFY_STATE = 2
MUTANT_QUERY_STATE = 1
SEMAPHORE_MODIFY_STATE = 2
TIME_ZONE_ID_UNKNOWN = 0
TIME_ZONE_ID_STANDARD = 1
TIME_ZONE_ID_DAYLIGHT = 2
PROCESSOR_INTEL_386 = 386
PROCESSOR_INTEL_486 = 486
PROCESSOR_INTEL_PENTIUM = 586
PROCESSOR_MIPS_R4000 = 4000
PROCESSOR_ALPHA_21064 = 21064
PROCESSOR_HITACHI_SH3 = 10003
PROCESSOR_HITACHI_SH3E = 10004
PROCESSOR_HITACHI_SH4 = 10005
PROCESSOR_MOTOROLA_821 = 821
PROCESSOR_ARM_7TDMI = 70001
PROCESSOR_ARCHITECTURE_INTEL = 0
PROCESSOR_ARCHITECTURE_MIPS = 1
PROCESSOR_ARCHITECTURE_ALPHA = 2
PROCESSOR_ARCHITECTURE_PPC = 3
PROCESSOR_ARCHITECTURE_SH = 4
PROCESSOR_ARCHITECTURE_ARM = 5
PROCESSOR_ARCHITECTURE_UNKNOWN = 65535
PF_FLOATING_POINT_PRECISION_ERRATA = 0
PF_FLOATING_POINT_EMULATED = 1
PF_COMPARE_EXCHANGE_DOUBLE = 2
PF_MMX_INSTRUCTIONS_AVAILABLE = 3
PF_PPC_MOVEMEM_64BIT_OK = 4
PF_ALPHA_BYTE_INSTRUCTIONS = 5
SECTION_QUERY = 1
SECTION_MAP_WRITE = 2
SECTION_MAP_READ = 4
SECTION_MAP_EXECUTE = 8
SECTION_EXTEND_SIZE = 16
PAGE_NOACCESS = 1
PAGE_READONLY = 2
PAGE_READWRITE = 4
PAGE_WRITECOPY = 8
PAGE_EXECUTE = 16
PAGE_EXECUTE_READ = 32
PAGE_EXECUTE_READWRITE = 64
PAGE_EXECUTE_WRITECOPY = 128
PAGE_GUARD = 256
PAGE_NOCACHE = 512
MEM_COMMIT = 4096
MEM_RESERVE = 8192
MEM_DECOMMIT = 16384
MEM_RELEASE = 32768
MEM_FREE = 65536
MEM_PRIVATE = 131072
MEM_MAPPED = 262144
MEM_RESET = 524288
MEM_TOP_DOWN = 1048576
MEM_4MB_PAGES = -2147483648
SEC_FILE = 8388608
SEC_IMAGE = 16777216
SEC_VLM = 33554432
SEC_RESERVE = 67108864
SEC_COMMIT = 134217728
SEC_NOCACHE = 268435456
MEM_IMAGE = SEC_IMAGE
FILE_READ_DATA = ( 1 )
FILE_LIST_DIRECTORY = ( 1 )
FILE_WRITE_DATA = ( 2 )
FILE_ADD_FILE = ( 2 )
FILE_APPEND_DATA = ( 4 )
FILE_ADD_SUBDIRECTORY = ( 4 )
FILE_CREATE_PIPE_INSTANCE = ( 4 )
FILE_READ_EA = ( 8 )
FILE_WRITE_EA = ( 16 )
FILE_EXECUTE = ( 32 )
FILE_TRAVERSE = ( 32 )
FILE_DELETE_CHILD = ( 64 )
FILE_READ_ATTRIBUTES = ( 128 )
FILE_WRITE_ATTRIBUTES = ( 256 )
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
FILE_SHARE_DELETE = 4
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_ENCRYPTED = 64
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_TEMPORARY = 256
FILE_ATTRIBUTE_SPARSE_FILE = 512
FILE_ATTRIBUTE_REPARSE_POINT = 1024
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_NOTIFY_CHANGE_FILE_NAME = 1
FILE_NOTIFY_CHANGE_DIR_NAME = 2
FILE_NOTIFY_CHANGE_ATTRIBUTES = 4
FILE_NOTIFY_CHANGE_SIZE = 8
FILE_NOTIFY_CHANGE_LAST_WRITE = 16
FILE_NOTIFY_CHANGE_LAST_ACCESS = 32
FILE_NOTIFY_CHANGE_CREATION = 64
FILE_NOTIFY_CHANGE_SECURITY = 256
FILE_ACTION_ADDED = 1
FILE_ACTION_REMOVED = 2
FILE_ACTION_MODIFIED = 3
FILE_ACTION_RENAMED_OLD_NAME = 4
FILE_ACTION_RENAMED_NEW_NAME = 5
FILE_CASE_SENSITIVE_SEARCH = 1
FILE_CASE_PRESERVED_NAMES = 2
FILE_UNICODE_ON_DISK = 4
FILE_PERSISTENT_ACLS = 8
FILE_FILE_COMPRESSION = 16
FILE_VOLUME_QUOTAS = 32
FILE_SUPPORTS_SPARSE_FILES = 64
FILE_SUPPORTS_REPARSE_POINTS = 128
FILE_SUPPORTS_REMOTE_STORAGE = 256
FILE_VOLUME_IS_COMPRESSED = 32768
FILE_SUPPORTS_OBJECT_IDS = 65536
FILE_SUPPORTS_ENCRYPTION = 131072
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = ( 16 * 1024 )
IO_REPARSE_TAG_RESERVED_ZERO = (0)
IO_REPARSE_TAG_RESERVED_ONE = (1)
IO_REPARSE_TAG_SYMBOLIC_LINK = (2)
IO_REPARSE_TAG_NSS = (5)
IO_REPARSE_TAG_FILTER_MANAGER = -2147483637
IO_REPARSE_TAG_DFS = -2147483638
IO_REPARSE_TAG_SIS = -2147483641
IO_REPARSE_TAG_MOUNT_POINT = -1610612733
IO_REPARSE_TAG_HSM = -1073741820
IO_REPARSE_TAG_NSSRECOVER = (8)
IO_REPARSE_TAG_RESERVED_MS_RANGE = (256)
IO_REPARSE_TAG_RESERVED_RANGE = IO_REPARSE_TAG_RESERVED_ONE
IO_COMPLETION_MODIFY_STATE = 2
DUPLICATE_CLOSE_SOURCE = 1
DUPLICATE_SAME_ACCESS = 2
DELETE = (65536)
READ_CONTROL = (131072)
WRITE_DAC = (262144)
WRITE_OWNER = (524288)
SYNCHRONIZE = (1048576)
STANDARD_RIGHTS_REQUIRED = (983040)
STANDARD_RIGHTS_READ = (READ_CONTROL)
STANDARD_RIGHTS_WRITE = (READ_CONTROL)
STANDARD_RIGHTS_EXECUTE = (READ_CONTROL)
STANDARD_RIGHTS_ALL = (2031616)
SPECIFIC_RIGHTS_ALL = (65535)
IO_COMPLETION_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED|SYNCHRONIZE|0x3
ACCESS_SYSTEM_SECURITY = (16777216)
MAXIMUM_ALLOWED = (33554432)
GENERIC_READ = (-2147483648)
GENERIC_WRITE = (1073741824)
GENERIC_EXECUTE = (536870912)
GENERIC_ALL = (268435456)
# Included from pshpack4.h
# Included from poppack.h
SID_REVISION = (1)
SID_MAX_SUB_AUTHORITIES = (15)
SID_RECOMMENDED_SUB_AUTHORITIES = (1)
SidTypeUser = 1
SidTypeGroup = 2
SidTypeDomain =3
SidTypeAlias = 4
SidTypeWellKnownGroup = 5
SidTypeDeletedAccount = 6
SidTypeInvalid = 7
SidTypeUnknown = 8
SECURITY_NULL_RID = (0)
SECURITY_WORLD_RID = (0)
SECURITY_LOCAL_RID = (0X00000000)
SECURITY_CREATOR_OWNER_RID = (0)
SECURITY_CREATOR_GROUP_RID = (1)
SECURITY_CREATOR_OWNER_SERVER_RID = (2)
SECURITY_CREATOR_GROUP_SERVER_RID = (3)
SECURITY_DIALUP_RID = (1)
SECURITY_NETWORK_RID = (2)
SECURITY_BATCH_RID = (3)
SECURITY_INTERACTIVE_RID = (4)
SECURITY_SERVICE_RID = (6)
SECURITY_ANONYMOUS_LOGON_RID = (7)
SECURITY_PROXY_RID = (8)
SECURITY_SERVER_LOGON_RID = (9)
SECURITY_PRINCIPAL_SELF_RID = (10)
SECURITY_AUTHENTICATED_USER_RID = (11)
SECURITY_LOGON_IDS_RID = (5)
SECURITY_LOGON_IDS_RID_COUNT = (3)
SECURITY_LOCAL_SYSTEM_RID = (18)
SECURITY_NT_NON_UNIQUE = (21)
SECURITY_BUILTIN_DOMAIN_RID = (32)
DOMAIN_USER_RID_ADMIN = (500)
DOMAIN_USER_RID_GUEST = (501)
DOMAIN_GROUP_RID_ADMINS = (512)
DOMAIN_GROUP_RID_USERS = (513)
DOMAIN_GROUP_RID_GUESTS = (514)
DOMAIN_ALIAS_RID_ADMINS = (544)
DOMAIN_ALIAS_RID_USERS = (545)
DOMAIN_ALIAS_RID_GUESTS = (546)
DOMAIN_ALIAS_RID_POWER_USERS = (547)
DOMAIN_ALIAS_RID_ACCOUNT_OPS = (548)
DOMAIN_ALIAS_RID_SYSTEM_OPS = (549)
DOMAIN_ALIAS_RID_PRINT_OPS = (550)
DOMAIN_ALIAS_RID_BACKUP_OPS = (551)
DOMAIN_ALIAS_RID_REPLICATOR = (552)
SE_GROUP_MANDATORY = (1)
SE_GROUP_ENABLED_BY_DEFAULT = (2)
SE_GROUP_ENABLED = (4)
SE_GROUP_OWNER = (8)
SE_GROUP_LOGON_ID = (-1073741824)
ACL_REVISION = (2)
ACL_REVISION_DS = (4)
ACL_REVISION1 = (1)
ACL_REVISION2 = (2)
ACL_REVISION3 = (3)
ACL_REVISION4 = (4)
MAX_ACL_REVISION = ACL_REVISION4
ACCESS_MIN_MS_ACE_TYPE = (0)
ACCESS_ALLOWED_ACE_TYPE = (0)
ACCESS_DENIED_ACE_TYPE = (1)
SYSTEM_AUDIT_ACE_TYPE = (2)
SYSTEM_ALARM_ACE_TYPE = (3)
ACCESS_MAX_MS_V2_ACE_TYPE = (3)
ACCESS_ALLOWED_COMPOUND_ACE_TYPE = (4)
ACCESS_MAX_MS_V3_ACE_TYPE = (4)
ACCESS_MIN_MS_OBJECT_ACE_TYPE = (5)
ACCESS_ALLOWED_OBJECT_ACE_TYPE = (5)
ACCESS_DENIED_OBJECT_ACE_TYPE = (6)
SYSTEM_AUDIT_OBJECT_ACE_TYPE = (7)
SYSTEM_ALARM_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_V4_ACE_TYPE = (8)
ACCESS_MAX_MS_ACE_TYPE = (8)
OBJECT_INHERIT_ACE = (1)
CONTAINER_INHERIT_ACE = (2)
NO_PROPAGATE_INHERIT_ACE = (4)
INHERIT_ONLY_ACE = (8)
INHERITED_ACE = (16)
VALID_INHERIT_FLAGS = (31)
SUCCESSFUL_ACCESS_ACE_FLAG = (64)
FAILED_ACCESS_ACE_FLAG = (128)
ACE_OBJECT_TYPE_PRESENT = 1
ACE_INHERITED_OBJECT_TYPE_PRESENT = 2
SECURITY_DESCRIPTOR_REVISION = (1)
SECURITY_DESCRIPTOR_REVISION1 = (1)
SECURITY_DESCRIPTOR_MIN_LENGTH = (20)
SE_OWNER_DEFAULTED = (1)
SE_GROUP_DEFAULTED = (2)
SE_DACL_PRESENT = (4)
SE_DACL_DEFAULTED = (8)
SE_SACL_PRESENT = (16)
SE_SACL_DEFAULTED = (32)
SE_DACL_AUTO_INHERIT_REQ = (256)
SE_SACL_AUTO_INHERIT_REQ = (512)
SE_DACL_AUTO_INHERITED = (1024)
SE_SACL_AUTO_INHERITED = (2048)
SE_DACL_PROTECTED = (4096)
SE_SACL_PROTECTED = (8192)
SE_SELF_RELATIVE = (32768)
ACCESS_OBJECT_GUID = 0
ACCESS_PROPERTY_SET_GUID = 1
ACCESS_PROPERTY_GUID = 2
ACCESS_MAX_LEVEL = 4
AUDIT_ALLOW_NO_PRIVILEGE = 1
ACCESS_DS_SOURCE_A = "Directory Service"
ACCESS_DS_OBJECT_TYPE_NAME_A = "Directory Service Object"
SE_PRIVILEGE_ENABLED_BY_DEFAULT = (1)
SE_PRIVILEGE_ENABLED = (2)
SE_PRIVILEGE_USED_FOR_ACCESS = (-2147483648)
PRIVILEGE_SET_ALL_NECESSARY = (1)
SE_CREATE_TOKEN_NAME = "SeCreateTokenPrivilege"
SE_ASSIGNPRIMARYTOKEN_NAME = "SeAssignPrimaryTokenPrivilege"
SE_LOCK_MEMORY_NAME = "SeLockMemoryPrivilege"
SE_INCREASE_QUOTA_NAME = "SeIncreaseQuotaPrivilege"
SE_UNSOLICITED_INPUT_NAME = "SeUnsolicitedInputPrivilege"
SE_MACHINE_ACCOUNT_NAME = "SeMachineAccountPrivilege"
SE_TCB_NAME = "SeTcbPrivilege"
SE_SECURITY_NAME = "SeSecurityPrivilege"
SE_TAKE_OWNERSHIP_NAME = "SeTakeOwnershipPrivilege"
SE_LOAD_DRIVER_NAME = "SeLoadDriverPrivilege"
SE_SYSTEM_PROFILE_NAME = "SeSystemProfilePrivilege"
SE_SYSTEMTIME_NAME = "SeSystemtimePrivilege"
SE_PROF_SINGLE_PROCESS_NAME = "SeProfileSingleProcessPrivilege"
SE_INC_BASE_PRIORITY_NAME = "SeIncreaseBasePriorityPrivilege"
SE_CREATE_PAGEFILE_NAME = "SeCreatePagefilePrivilege"
SE_CREATE_PERMANENT_NAME = "SeCreatePermanentPrivilege"
SE_BACKUP_NAME = "SeBackupPrivilege"
SE_RESTORE_NAME = "SeRestorePrivilege"
SE_SHUTDOWN_NAME = "SeShutdownPrivilege"
SE_DEBUG_NAME = "SeDebugPrivilege"
SE_AUDIT_NAME = "SeAuditPrivilege"
SE_SYSTEM_ENVIRONMENT_NAME = "SeSystemEnvironmentPrivilege"
SE_CHANGE_NOTIFY_NAME = "SeChangeNotifyPrivilege"
SE_REMOTE_SHUTDOWN_NAME = "SeRemoteShutdownPrivilege"
TOKEN_ASSIGN_PRIMARY = (1)
TOKEN_DUPLICATE = (2)
TOKEN_IMPERSONATE = (4)
TOKEN_QUERY = (8)
TOKEN_QUERY_SOURCE = (16)
TOKEN_ADJUST_PRIVILEGES = (32)
TOKEN_ADJUST_GROUPS = (64)
TOKEN_ADJUST_DEFAULT = (128)
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED |\
TOKEN_ASSIGN_PRIMARY |\
TOKEN_DUPLICATE |\
TOKEN_IMPERSONATE |\
TOKEN_QUERY |\
TOKEN_QUERY_SOURCE |\
TOKEN_ADJUST_PRIVILEGES |\
TOKEN_ADJUST_GROUPS |\
TOKEN_ADJUST_DEFAULT)
TOKEN_READ = (STANDARD_RIGHTS_READ |\
TOKEN_QUERY)
TOKEN_WRITE = (STANDARD_RIGHTS_WRITE |\
TOKEN_ADJUST_PRIVILEGES |\
TOKEN_ADJUST_GROUPS |\
TOKEN_ADJUST_DEFAULT)
TOKEN_EXECUTE = (STANDARD_RIGHTS_EXECUTE)
TOKEN_SOURCE_LENGTH = 8
# Token types
TokenPrimary = 1
TokenImpersonation = 2
TokenUser = 1
TokenGroups = 2
TokenPrivileges = 3
TokenOwner = 4
TokenPrimaryGroup = 5
TokenDefaultDacl = 6
TokenSource = 7
TokenType = 8
TokenImpersonationLevel = 9
TokenStatistics = 10
OWNER_SECURITY_INFORMATION = (0X00000001)
GROUP_SECURITY_INFORMATION = (0X00000002)
DACL_SECURITY_INFORMATION = (0X00000004)
SACL_SECURITY_INFORMATION = (0X00000008)
IMAGE_DOS_SIGNATURE = 23117
IMAGE_OS2_SIGNATURE = 17742
IMAGE_OS2_SIGNATURE_LE = 17740
IMAGE_VXD_SIGNATURE = 17740
IMAGE_NT_SIGNATURE = 17744
IMAGE_SIZEOF_FILE_HEADER = 20
IMAGE_FILE_RELOCS_STRIPPED = 1
IMAGE_FILE_EXECUTABLE_IMAGE = 2
IMAGE_FILE_LINE_NUMS_STRIPPED = 4
IMAGE_FILE_LOCAL_SYMS_STRIPPED = 8
IMAGE_FILE_AGGRESIVE_WS_TRIM = 16
IMAGE_FILE_LARGE_ADDRESS_AWARE = 32
IMAGE_FILE_BYTES_REVERSED_LO = 128
IMAGE_FILE_32BIT_MACHINE = 256
IMAGE_FILE_DEBUG_STRIPPED = 512
IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 1024
IMAGE_FILE_NET_RUN_FROM_SWAP = 2048
IMAGE_FILE_SYSTEM = 4096
IMAGE_FILE_DLL = 8192
IMAGE_FILE_UP_SYSTEM_ONLY = 16384
IMAGE_FILE_BYTES_REVERSED_HI = 32768
IMAGE_FILE_MACHINE_UNKNOWN = 0
IMAGE_FILE_MACHINE_I386 = 332
IMAGE_FILE_MACHINE_R3000 = 354
IMAGE_FILE_MACHINE_R4000 = 358
IMAGE_FILE_MACHINE_R10000 = 360
IMAGE_FILE_MACHINE_WCEMIPSV2 = 361
IMAGE_FILE_MACHINE_ALPHA = 388
IMAGE_FILE_MACHINE_POWERPC = 496
IMAGE_FILE_MACHINE_SH3 = 418
IMAGE_FILE_MACHINE_SH3E = 420
IMAGE_FILE_MACHINE_SH4 = 422
IMAGE_FILE_MACHINE_ARM = 448
IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16
IMAGE_SIZEOF_ROM_OPTIONAL_HEADER = 56
IMAGE_SIZEOF_STD_OPTIONAL_HEADER = 28
IMAGE_SIZEOF_NT_OPTIONAL_HEADER = 224
IMAGE_NT_OPTIONAL_HDR_MAGIC = 267
IMAGE_ROM_OPTIONAL_HDR_MAGIC = 263
IMAGE_SUBSYSTEM_UNKNOWN = 0
IMAGE_SUBSYSTEM_NATIVE = 1
IMAGE_SUBSYSTEM_WINDOWS_GUI = 2
IMAGE_SUBSYSTEM_WINDOWS_CUI = 3
IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 4
IMAGE_SUBSYSTEM_OS2_CUI = 5
IMAGE_SUBSYSTEM_POSIX_CUI = 7
IMAGE_SUBSYSTEM_RESERVED8 = 8
IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 8192
IMAGE_DIRECTORY_ENTRY_EXPORT = 0
IMAGE_DIRECTORY_ENTRY_IMPORT = 1
IMAGE_DIRECTORY_ENTRY_RESOURCE = 2
IMAGE_DIRECTORY_ENTRY_EXCEPTION = 3
IMAGE_DIRECTORY_ENTRY_SECURITY = 4
IMAGE_DIRECTORY_ENTRY_BASERELOC = 5
IMAGE_DIRECTORY_ENTRY_DEBUG = 6
IMAGE_DIRECTORY_ENTRY_COPYRIGHT = 7
IMAGE_DIRECTORY_ENTRY_GLOBALPTR = 8
IMAGE_DIRECTORY_ENTRY_TLS = 9
IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG = 10
IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT = 11
IMAGE_DIRECTORY_ENTRY_IAT = 12
IMAGE_SIZEOF_SHORT_NAME = 8
IMAGE_SIZEOF_SECTION_HEADER = 40
IMAGE_SCN_TYPE_NO_PAD = 8
IMAGE_SCN_CNT_CODE = 32
IMAGE_SCN_CNT_INITIALIZED_DATA = 64
IMAGE_SCN_CNT_UNINITIALIZED_DATA = 128
IMAGE_SCN_LNK_OTHER = 256
IMAGE_SCN_LNK_INFO = 512
IMAGE_SCN_LNK_REMOVE = 2048
IMAGE_SCN_LNK_COMDAT = 4096
IMAGE_SCN_MEM_FARDATA = 32768
IMAGE_SCN_MEM_PURGEABLE = 131072
IMAGE_SCN_MEM_16BIT = 131072
IMAGE_SCN_MEM_LOCKED = 262144
IMAGE_SCN_MEM_PRELOAD = 524288
IMAGE_SCN_ALIGN_1BYTES = 1048576
IMAGE_SCN_ALIGN_2BYTES = 2097152
IMAGE_SCN_ALIGN_4BYTES = 3145728
IMAGE_SCN_ALIGN_8BYTES = 4194304
IMAGE_SCN_ALIGN_16BYTES = 5242880
IMAGE_SCN_ALIGN_32BYTES = 6291456
IMAGE_SCN_ALIGN_64BYTES = 7340032
IMAGE_SCN_LNK_NRELOC_OVFL = 16777216
IMAGE_SCN_MEM_DISCARDABLE = 33554432
IMAGE_SCN_MEM_NOT_CACHED = 67108864
IMAGE_SCN_MEM_NOT_PAGED = 134217728
IMAGE_SCN_MEM_SHARED = 268435456
IMAGE_SCN_MEM_EXECUTE = 536870912
IMAGE_SCN_MEM_READ = 1073741824
IMAGE_SCN_MEM_WRITE = -2147483648
IMAGE_SCN_SCALE_INDEX = 1
IMAGE_SIZEOF_SYMBOL = 18
IMAGE_SYM_TYPE_NULL = 0
IMAGE_SYM_TYPE_VOID = 1
IMAGE_SYM_TYPE_CHAR = 2
IMAGE_SYM_TYPE_SHORT = 3
IMAGE_SYM_TYPE_INT = 4
IMAGE_SYM_TYPE_LONG = 5
IMAGE_SYM_TYPE_FLOAT = 6
IMAGE_SYM_TYPE_DOUBLE = 7
IMAGE_SYM_TYPE_STRUCT = 8
IMAGE_SYM_TYPE_UNION = 9
IMAGE_SYM_TYPE_ENUM = 10
IMAGE_SYM_TYPE_MOE = 11
IMAGE_SYM_TYPE_BYTE = 12
IMAGE_SYM_TYPE_WORD = 13
IMAGE_SYM_TYPE_UINT = 14
IMAGE_SYM_TYPE_DWORD = 15
IMAGE_SYM_TYPE_PCODE = 32768
IMAGE_SYM_DTYPE_NULL = 0
IMAGE_SYM_DTYPE_POINTER = 1
IMAGE_SYM_DTYPE_FUNCTION = 2
IMAGE_SYM_DTYPE_ARRAY = 3
IMAGE_SYM_CLASS_NULL = 0
IMAGE_SYM_CLASS_AUTOMATIC = 1
IMAGE_SYM_CLASS_EXTERNAL = 2
IMAGE_SYM_CLASS_STATIC = 3
IMAGE_SYM_CLASS_REGISTER = 4
IMAGE_SYM_CLASS_EXTERNAL_DEF = 5
IMAGE_SYM_CLASS_LABEL = 6
IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7
IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8
IMAGE_SYM_CLASS_ARGUMENT = 9
IMAGE_SYM_CLASS_STRUCT_TAG = 10
IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11
IMAGE_SYM_CLASS_UNION_TAG = 12
IMAGE_SYM_CLASS_TYPE_DEFINITION = 13
IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14
IMAGE_SYM_CLASS_ENUM_TAG = 15
IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16
IMAGE_SYM_CLASS_REGISTER_PARAM = 17
IMAGE_SYM_CLASS_BIT_FIELD = 18
IMAGE_SYM_CLASS_FAR_EXTERNAL = 68
IMAGE_SYM_CLASS_BLOCK = 100
IMAGE_SYM_CLASS_FUNCTION = 101
IMAGE_SYM_CLASS_END_OF_STRUCT = 102
IMAGE_SYM_CLASS_FILE = 103
IMAGE_SYM_CLASS_SECTION = 104
IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105
N_BTMASK = 15
N_TMASK = 48
N_TMASK1 = 192
N_TMASK2 = 240
N_BTSHFT = 4
N_TSHIFT = 2
IMAGE_SIZEOF_AUX_SYMBOL = 18
IMAGE_COMDAT_SELECT_NODUPLICATES = 1
IMAGE_COMDAT_SELECT_ANY = 2
IMAGE_COMDAT_SELECT_SAME_SIZE = 3
IMAGE_COMDAT_SELECT_EXACT_MATCH = 4
IMAGE_COMDAT_SELECT_ASSOCIATIVE = 5
IMAGE_COMDAT_SELECT_LARGEST = 6
IMAGE_COMDAT_SELECT_NEWEST = 7
IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY = 1
IMAGE_WEAK_EXTERN_SEARCH_LIBRARY = 2
IMAGE_WEAK_EXTERN_SEARCH_ALIAS = 3
IMAGE_SIZEOF_RELOCATION = 10
IMAGE_REL_I386_ABSOLUTE = 0
IMAGE_REL_I386_DIR16 = 1
IMAGE_REL_I386_REL16 = 2
IMAGE_REL_I386_DIR32 = 6
IMAGE_REL_I386_DIR32NB = 7
IMAGE_REL_I386_SEG12 = 9
IMAGE_REL_I386_SECTION = 10
IMAGE_REL_I386_SECREL = 11
IMAGE_REL_I386_REL32 = 20
IMAGE_REL_MIPS_ABSOLUTE = 0
IMAGE_REL_MIPS_REFHALF = 1
IMAGE_REL_MIPS_REFWORD = 2
IMAGE_REL_MIPS_JMPADDR = 3
IMAGE_REL_MIPS_REFHI = 4
IMAGE_REL_MIPS_REFLO = 5
IMAGE_REL_MIPS_GPREL = 6
IMAGE_REL_MIPS_LITERAL = 7
IMAGE_REL_MIPS_SECTION = 10
IMAGE_REL_MIPS_SECREL = 11
IMAGE_REL_MIPS_SECRELLO = 12
IMAGE_REL_MIPS_SECRELHI = 13
IMAGE_REL_MIPS_REFWORDNB = 34
IMAGE_REL_MIPS_PAIR = 37
IMAGE_REL_ALPHA_ABSOLUTE = 0
IMAGE_REL_ALPHA_REFLONG = 1
IMAGE_REL_ALPHA_REFQUAD = 2
IMAGE_REL_ALPHA_GPREL32 = 3
IMAGE_REL_ALPHA_LITERAL = 4
IMAGE_REL_ALPHA_LITUSE = 5
IMAGE_REL_ALPHA_GPDISP = 6
IMAGE_REL_ALPHA_BRADDR = 7
IMAGE_REL_ALPHA_HINT = 8
IMAGE_REL_ALPHA_INLINE_REFLONG = 9
IMAGE_REL_ALPHA_REFHI = 10
IMAGE_REL_ALPHA_REFLO = 11
IMAGE_REL_ALPHA_PAIR = 12
IMAGE_REL_ALPHA_MATCH = 13
IMAGE_REL_ALPHA_SECTION = 14
IMAGE_REL_ALPHA_SECREL = 15
IMAGE_REL_ALPHA_REFLONGNB = 16
IMAGE_REL_ALPHA_SECRELLO = 17
IMAGE_REL_ALPHA_SECRELHI = 18
IMAGE_REL_PPC_ABSOLUTE = 0
IMAGE_REL_PPC_ADDR64 = 1
IMAGE_REL_PPC_ADDR32 = 2
IMAGE_REL_PPC_ADDR24 = 3
IMAGE_REL_PPC_ADDR16 = 4
IMAGE_REL_PPC_ADDR14 = 5
IMAGE_REL_PPC_REL24 = 6
IMAGE_REL_PPC_REL14 = 7
IMAGE_REL_PPC_TOCREL16 = 8
IMAGE_REL_PPC_TOCREL14 = 9
IMAGE_REL_PPC_ADDR32NB = 10
IMAGE_REL_PPC_SECREL = 11
IMAGE_REL_PPC_SECTION = 12
IMAGE_REL_PPC_IFGLUE = 13
IMAGE_REL_PPC_IMGLUE = 14
IMAGE_REL_PPC_SECREL16 = 15
IMAGE_REL_PPC_REFHI = 16
IMAGE_REL_PPC_REFLO = 17
IMAGE_REL_PPC_PAIR = 18
IMAGE_REL_PPC_SECRELLO = 19
IMAGE_REL_PPC_SECRELHI = 20
IMAGE_REL_PPC_TYPEMASK = 255
IMAGE_REL_PPC_NEG = 256
IMAGE_REL_PPC_BRTAKEN = 512
IMAGE_REL_PPC_BRNTAKEN = 1024
IMAGE_REL_PPC_TOCDEFN = 2048
IMAGE_REL_SH3_ABSOLUTE = 0
IMAGE_REL_SH3_DIRECT16 = 1
IMAGE_REL_SH3_DIRECT32 = 2
IMAGE_REL_SH3_DIRECT8 = 3
IMAGE_REL_SH3_DIRECT8_WORD = 4
IMAGE_REL_SH3_DIRECT8_LONG = 5
IMAGE_REL_SH3_DIRECT4 = 6
IMAGE_REL_SH3_DIRECT4_WORD = 7
IMAGE_REL_SH3_DIRECT4_LONG = 8
IMAGE_REL_SH3_PCREL8_WORD = 9
IMAGE_REL_SH3_PCREL8_LONG = 10
IMAGE_REL_SH3_PCREL12_WORD = 11
IMAGE_REL_SH3_STARTOF_SECTION = 12
IMAGE_REL_SH3_SIZEOF_SECTION = 13
IMAGE_REL_SH3_SECTION = 14
IMAGE_REL_SH3_SECREL = 15
IMAGE_REL_SH3_DIRECT32_NB = 16
IMAGE_SIZEOF_LINENUMBER = 6
IMAGE_SIZEOF_BASE_RELOCATION = 8
IMAGE_REL_BASED_ABSOLUTE = 0
IMAGE_REL_BASED_HIGH = 1
IMAGE_REL_BASED_LOW = 2
IMAGE_REL_BASED_HIGHLOW = 3
IMAGE_REL_BASED_HIGHADJ = 4
IMAGE_REL_BASED_MIPS_JMPADDR = 5
IMAGE_REL_BASED_SECTION = 6
IMAGE_REL_BASED_REL32 = 7
IMAGE_ARCHIVE_START_SIZE = 8
IMAGE_ARCHIVE_START = "!<arch>\n"
IMAGE_ARCHIVE_END = "`\n"
IMAGE_ARCHIVE_PAD = "\n"
IMAGE_ARCHIVE_LINKER_MEMBER = "/ "
IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR = 60
IMAGE_ORDINAL_FLAG = -2147483648
IMAGE_RESOURCE_NAME_IS_STRING = -2147483648
IMAGE_RESOURCE_DATA_IS_DIRECTORY = -2147483648
IMAGE_DEBUG_TYPE_UNKNOWN = 0
IMAGE_DEBUG_TYPE_COFF = 1
IMAGE_DEBUG_TYPE_CODEVIEW = 2
IMAGE_DEBUG_TYPE_FPO = 3
IMAGE_DEBUG_TYPE_MISC = 4
IMAGE_DEBUG_TYPE_EXCEPTION = 5
IMAGE_DEBUG_TYPE_FIXUP = 6
IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7
IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8
IMAGE_DEBUG_TYPE_BORLAND = 9
FRAME_FPO = 0
FRAME_TRAP = 1
FRAME_TSS = 2
FRAME_NONFPO = 3
SIZEOF_RFPO_DATA = 16
IMAGE_DEBUG_MISC_EXENAME = 1
IMAGE_SEPARATE_DEBUG_SIGNATURE = 18756
IMAGE_SEPARATE_DEBUG_FLAGS_MASK = 32768
IMAGE_SEPARATE_DEBUG_MISMATCH = 32768
# Included from string.h
_NLSCMPERROR = 2147483647
NULL = 0
HEAP_NO_SERIALIZE = 1
HEAP_GROWABLE = 2
HEAP_GENERATE_EXCEPTIONS = 4
HEAP_ZERO_MEMORY = 8
HEAP_REALLOC_IN_PLACE_ONLY = 16
HEAP_TAIL_CHECKING_ENABLED = 32
HEAP_FREE_CHECKING_ENABLED = 64
HEAP_DISABLE_COALESCE_ON_FREE = 128
HEAP_CREATE_ALIGN_16 = 65536
HEAP_CREATE_ENABLE_TRACING = 131072
HEAP_MAXIMUM_TAG = 4095
HEAP_PSEUDO_TAG_FLAG = 32768
HEAP_TAG_SHIFT = 16
IS_TEXT_UNICODE_ASCII16 = 1
IS_TEXT_UNICODE_REVERSE_ASCII16 = 16
IS_TEXT_UNICODE_STATISTICS = 2
IS_TEXT_UNICODE_REVERSE_STATISTICS = 32
IS_TEXT_UNICODE_CONTROLS = 4
IS_TEXT_UNICODE_REVERSE_CONTROLS = 64
IS_TEXT_UNICODE_SIGNATURE = 8
IS_TEXT_UNICODE_REVERSE_SIGNATURE = 128
IS_TEXT_UNICODE_ILLEGAL_CHARS = 256
IS_TEXT_UNICODE_ODD_LENGTH = 512
IS_TEXT_UNICODE_DBCS_LEADBYTE = 1024
IS_TEXT_UNICODE_NULL_BYTES = 4096
IS_TEXT_UNICODE_UNICODE_MASK = 15
IS_TEXT_UNICODE_REVERSE_MASK = 240
IS_TEXT_UNICODE_NOT_UNICODE_MASK = 3840
IS_TEXT_UNICODE_NOT_ASCII_MASK = 61440
COMPRESSION_FORMAT_NONE = (0)
COMPRESSION_FORMAT_DEFAULT = (1)
COMPRESSION_FORMAT_LZNT1 = (2)
COMPRESSION_ENGINE_STANDARD = (0)
COMPRESSION_ENGINE_MAXIMUM = (256)
MESSAGE_RESOURCE_UNICODE = 1
RTL_CRITSECT_TYPE = 0
RTL_RESOURCE_TYPE = 1
SEF_DACL_AUTO_INHERIT = 1
SEF_SACL_AUTO_INHERIT = 2
SEF_DEFAULT_DESCRIPTOR_FOR_OBJECT = 4
SEF_AVOID_PRIVILEGE_CHECK = 8
DLL_PROCESS_ATTACH = 1
DLL_THREAD_ATTACH = 2
DLL_THREAD_DETACH = 3
DLL_PROCESS_DETACH = 0
EVENTLOG_SEQUENTIAL_READ = 0X0001
EVENTLOG_SEEK_READ = 0X0002
EVENTLOG_FORWARDS_READ = 0X0004
EVENTLOG_BACKWARDS_READ = 0X0008
EVENTLOG_SUCCESS = 0X0000
EVENTLOG_ERROR_TYPE = 1
EVENTLOG_WARNING_TYPE = 2
EVENTLOG_INFORMATION_TYPE = 4
EVENTLOG_AUDIT_SUCCESS = 8
EVENTLOG_AUDIT_FAILURE = 16
EVENTLOG_START_PAIRED_EVENT = 1
EVENTLOG_END_PAIRED_EVENT = 2
EVENTLOG_END_ALL_PAIRED_EVENTS = 4
EVENTLOG_PAIRED_EVENT_ACTIVE = 8
EVENTLOG_PAIRED_EVENT_INACTIVE = 16
KEY_QUERY_VALUE = (1)
KEY_SET_VALUE = (2)
KEY_CREATE_SUB_KEY = (4)
KEY_ENUMERATE_SUB_KEYS = (8)
KEY_NOTIFY = (16)
KEY_CREATE_LINK = (32)
KEY_READ = ((STANDARD_RIGHTS_READ |\
KEY_QUERY_VALUE |\
KEY_ENUMERATE_SUB_KEYS |\
KEY_NOTIFY) \
& \
(~SYNCHRONIZE))
KEY_WRITE = ((STANDARD_RIGHTS_WRITE |\
KEY_SET_VALUE |\
KEY_CREATE_SUB_KEY) \
& \
(~SYNCHRONIZE))
KEY_EXECUTE = ((KEY_READ) \
& \
(~SYNCHRONIZE))
KEY_ALL_ACCESS = ((STANDARD_RIGHTS_ALL |\
KEY_QUERY_VALUE |\
KEY_SET_VALUE |\
KEY_CREATE_SUB_KEY |\
KEY_ENUMERATE_SUB_KEYS |\
KEY_NOTIFY |\
KEY_CREATE_LINK) \
& \
(~SYNCHRONIZE))
REG_OPTION_RESERVED = (0)
REG_OPTION_NON_VOLATILE = (0)
REG_OPTION_VOLATILE = (1)
REG_OPTION_CREATE_LINK = (2)
REG_OPTION_BACKUP_RESTORE = (4)
REG_OPTION_OPEN_LINK = (8)
REG_LEGAL_OPTION = \
(REG_OPTION_RESERVED |\
REG_OPTION_NON_VOLATILE |\
REG_OPTION_VOLATILE |\
REG_OPTION_CREATE_LINK |\
REG_OPTION_BACKUP_RESTORE |\
REG_OPTION_OPEN_LINK)
REG_CREATED_NEW_KEY = (1)
REG_OPENED_EXISTING_KEY = (2)
REG_WHOLE_HIVE_VOLATILE = (1)
REG_REFRESH_HIVE = (2)
REG_NO_LAZY_FLUSH = (4)
REG_NOTIFY_CHANGE_NAME = (1)
REG_NOTIFY_CHANGE_ATTRIBUTES = (2)
REG_NOTIFY_CHANGE_LAST_SET = (4)
REG_NOTIFY_CHANGE_SECURITY = (8)
REG_LEGAL_CHANGE_FILTER = \
(REG_NOTIFY_CHANGE_NAME |\
REG_NOTIFY_CHANGE_ATTRIBUTES |\
REG_NOTIFY_CHANGE_LAST_SET |\
REG_NOTIFY_CHANGE_SECURITY)
REG_NONE = ( 0 )
REG_SZ = ( 1 )
REG_EXPAND_SZ = ( 2 )
REG_BINARY = ( 3 )
REG_DWORD = ( 4 )
REG_DWORD_LITTLE_ENDIAN = ( 4 )
REG_DWORD_BIG_ENDIAN = ( 5 )
REG_LINK = ( 6 )
REG_MULTI_SZ = ( 7 )
REG_RESOURCE_LIST = ( 8 )
REG_FULL_RESOURCE_DESCRIPTOR = ( 9 )
REG_RESOURCE_REQUIREMENTS_LIST = ( 10 )
SERVICE_KERNEL_DRIVER = 1
SERVICE_FILE_SYSTEM_DRIVER = 2
SERVICE_ADAPTER = 4
SERVICE_RECOGNIZER_DRIVER = 8
SERVICE_DRIVER = (SERVICE_KERNEL_DRIVER | \
SERVICE_FILE_SYSTEM_DRIVER | \
SERVICE_RECOGNIZER_DRIVER)
SERVICE_WIN32_OWN_PROCESS = 16
SERVICE_WIN32_SHARE_PROCESS = 32
SERVICE_WIN32 = (SERVICE_WIN32_OWN_PROCESS | \
SERVICE_WIN32_SHARE_PROCESS)
SERVICE_INTERACTIVE_PROCESS = 256
SERVICE_TYPE_ALL = (SERVICE_WIN32 | \
SERVICE_ADAPTER | \
SERVICE_DRIVER | \
SERVICE_INTERACTIVE_PROCESS)
SERVICE_BOOT_START = 0
SERVICE_SYSTEM_START = 1
SERVICE_AUTO_START = 2
SERVICE_DEMAND_START = 3
SERVICE_DISABLED = 4
SERVICE_ERROR_IGNORE = 0
SERVICE_ERROR_NORMAL = 1
SERVICE_ERROR_SEVERE = 2
SERVICE_ERROR_CRITICAL = 3
TAPE_ERASE_SHORT = 0
TAPE_ERASE_LONG = 1
TAPE_LOAD = 0
TAPE_UNLOAD = 1
TAPE_TENSION = 2
TAPE_LOCK = 3
TAPE_UNLOCK = 4
TAPE_FORMAT = 5
TAPE_SETMARKS = 0
TAPE_FILEMARKS = 1
TAPE_SHORT_FILEMARKS = 2
TAPE_LONG_FILEMARKS = 3
TAPE_ABSOLUTE_POSITION = 0
TAPE_LOGICAL_POSITION = 1
TAPE_PSEUDO_LOGICAL_POSITION = 2
TAPE_REWIND = 0
TAPE_ABSOLUTE_BLOCK = 1
TAPE_LOGICAL_BLOCK = 2
TAPE_PSEUDO_LOGICAL_BLOCK = 3
TAPE_SPACE_END_OF_DATA = 4
TAPE_SPACE_RELATIVE_BLOCKS = 5
TAPE_SPACE_FILEMARKS = 6
TAPE_SPACE_SEQUENTIAL_FMKS = 7
TAPE_SPACE_SETMARKS = 8
TAPE_SPACE_SEQUENTIAL_SMKS = 9
TAPE_DRIVE_FIXED = 1
TAPE_DRIVE_SELECT = 2
TAPE_DRIVE_INITIATOR = 4
TAPE_DRIVE_ERASE_SHORT = 16
TAPE_DRIVE_ERASE_LONG = 32
TAPE_DRIVE_ERASE_BOP_ONLY = 64
TAPE_DRIVE_ERASE_IMMEDIATE = 128
TAPE_DRIVE_TAPE_CAPACITY = 256
TAPE_DRIVE_TAPE_REMAINING = 512
TAPE_DRIVE_FIXED_BLOCK = 1024
TAPE_DRIVE_VARIABLE_BLOCK = 2048
TAPE_DRIVE_WRITE_PROTECT = 4096
TAPE_DRIVE_EOT_WZ_SIZE = 8192
TAPE_DRIVE_ECC = 65536
TAPE_DRIVE_COMPRESSION = 131072
TAPE_DRIVE_PADDING = 262144
TAPE_DRIVE_REPORT_SMKS = 524288
TAPE_DRIVE_GET_ABSOLUTE_BLK = 1048576
TAPE_DRIVE_GET_LOGICAL_BLK = 2097152
TAPE_DRIVE_SET_EOT_WZ_SIZE = 4194304
TAPE_DRIVE_EJECT_MEDIA = 16777216
TAPE_DRIVE_RESERVED_BIT = -2147483648
TAPE_DRIVE_LOAD_UNLOAD = -2147483647
TAPE_DRIVE_TENSION = -2147483646
TAPE_DRIVE_LOCK_UNLOCK = -2147483644
TAPE_DRIVE_REWIND_IMMEDIATE = -2147483640
TAPE_DRIVE_SET_BLOCK_SIZE = -2147483632
TAPE_DRIVE_LOAD_UNLD_IMMED = -2147483616
TAPE_DRIVE_TENSION_IMMED = -2147483584
TAPE_DRIVE_LOCK_UNLK_IMMED = -2147483520
TAPE_DRIVE_SET_ECC = -2147483392
TAPE_DRIVE_SET_COMPRESSION = -2147483136
TAPE_DRIVE_SET_PADDING = -2147482624
TAPE_DRIVE_SET_REPORT_SMKS = -2147481600
TAPE_DRIVE_ABSOLUTE_BLK = -2147479552
TAPE_DRIVE_ABS_BLK_IMMED = -2147475456
TAPE_DRIVE_LOGICAL_BLK = -2147467264
TAPE_DRIVE_LOG_BLK_IMMED = -2147450880
TAPE_DRIVE_END_OF_DATA = -2147418112
TAPE_DRIVE_RELATIVE_BLKS = -2147352576
TAPE_DRIVE_FILEMARKS = -2147221504
TAPE_DRIVE_SEQUENTIAL_FMKS = -2146959360
TAPE_DRIVE_SETMARKS = -2146435072
TAPE_DRIVE_SEQUENTIAL_SMKS = -2145386496
TAPE_DRIVE_REVERSE_POSITION = -2143289344
TAPE_DRIVE_SPACE_IMMEDIATE = -2139095040
TAPE_DRIVE_WRITE_SETMARKS = -2130706432
TAPE_DRIVE_WRITE_FILEMARKS = -2113929216
TAPE_DRIVE_WRITE_SHORT_FMKS = -2080374784
TAPE_DRIVE_WRITE_LONG_FMKS = -2013265920
TAPE_DRIVE_WRITE_MARK_IMMED = -1879048192
TAPE_DRIVE_FORMAT = -1610612736
TAPE_DRIVE_FORMAT_IMMEDIATE = -1073741824
TAPE_DRIVE_HIGH_FEATURES = -2147483648
TAPE_FIXED_PARTITIONS = 0
TAPE_SELECT_PARTITIONS = 1
TAPE_INITIATOR_PARTITIONS = 2
| 30.901729
| 103
| 0.76317
|
# Generated by h2py from \mssdk\include\winnt.h
APPLICATION_ERROR_MASK = 536870912
ERROR_SEVERITY_SUCCESS = 0
ERROR_SEVERITY_INFORMATIONAL = 1073741824
ERROR_SEVERITY_WARNING = -2147483648
ERROR_SEVERITY_ERROR = -1073741824
MINCHAR = 128
MAXCHAR = 127
MINSHORT = 32768
MAXSHORT = 32767
MINLONG = -2147483648
MAXLONG = 2147483647
MAXBYTE = 255
MAXWORD = 65535
MAXDWORD = -1
LANG_NEUTRAL = 0
LANG_AFRIKAANS = 54
LANG_ALBANIAN = 28
LANG_ARABIC = 1
LANG_BASQUE = 45
LANG_BELARUSIAN = 35
LANG_BULGARIAN = 2
LANG_CATALAN = 3
LANG_CHINESE = 4
LANG_CROATIAN = 26
LANG_CZECH = 5
LANG_DANISH = 6
LANG_DUTCH = 19
LANG_ENGLISH = 9
LANG_ESTONIAN = 37
LANG_FAEROESE = 56
LANG_FARSI = 41
LANG_FINNISH = 11
LANG_FRENCH = 12
LANG_GERMAN = 7
LANG_GREEK = 8
LANG_HEBREW = 13
LANG_HINDI = 57
LANG_HUNGARIAN = 14
LANG_ICELANDIC = 15
LANG_INDONESIAN = 33
LANG_ITALIAN = 16
LANG_JAPANESE = 17
LANG_KOREAN = 18
LANG_LATVIAN = 38
LANG_LITHUANIAN = 39
LANG_MACEDONIAN = 47
LANG_MALAY = 62
LANG_NORWEGIAN = 20
LANG_POLISH = 21
LANG_PORTUGUESE = 22
LANG_ROMANIAN = 24
LANG_RUSSIAN = 25
LANG_SERBIAN = 26
LANG_SLOVAK = 27
LANG_SLOVENIAN = 36
LANG_SPANISH = 10
LANG_SWAHILI = 65
LANG_SWEDISH = 29
LANG_THAI = 30
LANG_TURKISH = 31
LANG_UKRAINIAN = 34
LANG_VIETNAMESE = 42
SUBLANG_NEUTRAL = 0
SUBLANG_DEFAULT = 1
SUBLANG_SYS_DEFAULT = 2
SUBLANG_ARABIC_SAUDI_ARABIA = 1
SUBLANG_ARABIC_IRAQ = 2
SUBLANG_ARABIC_EGYPT = 3
SUBLANG_ARABIC_LIBYA = 4
SUBLANG_ARABIC_ALGERIA = 5
SUBLANG_ARABIC_MOROCCO = 6
SUBLANG_ARABIC_TUNISIA = 7
SUBLANG_ARABIC_OMAN = 8
SUBLANG_ARABIC_YEMEN = 9
SUBLANG_ARABIC_SYRIA = 10
SUBLANG_ARABIC_JORDAN = 11
SUBLANG_ARABIC_LEBANON = 12
SUBLANG_ARABIC_KUWAIT = 13
SUBLANG_ARABIC_UAE = 14
SUBLANG_ARABIC_BAHRAIN = 15
SUBLANG_ARABIC_QATAR = 16
SUBLANG_CHINESE_TRADITIONAL = 1
SUBLANG_CHINESE_SIMPLIFIED = 2
SUBLANG_CHINESE_HONGKONG = 3
SUBLANG_CHINESE_SINGAPORE = 4
SUBLANG_CHINESE_MACAU = 5
SUBLANG_DUTCH = 1
SUBLANG_DUTCH_BELGIAN = 2
SUBLANG_ENGLISH_US = 1
SUBLANG_ENGLISH_UK = 2
SUBLANG_ENGLISH_AUS = 3
SUBLANG_ENGLISH_CAN = 4
SUBLANG_ENGLISH_NZ = 5
SUBLANG_ENGLISH_EIRE = 6
SUBLANG_ENGLISH_SOUTH_AFRICA = 7
SUBLANG_ENGLISH_JAMAICA = 8
SUBLANG_ENGLISH_CARIBBEAN = 9
SUBLANG_ENGLISH_BELIZE = 10
SUBLANG_ENGLISH_TRINIDAD = 11
SUBLANG_ENGLISH_ZIMBABWE = 12
SUBLANG_ENGLISH_PHILIPPINES = 13
SUBLANG_FRENCH = 1
SUBLANG_FRENCH_BELGIAN = 2
SUBLANG_FRENCH_CANADIAN = 3
SUBLANG_FRENCH_SWISS = 4
SUBLANG_FRENCH_LUXEMBOURG = 5
SUBLANG_FRENCH_MONACO = 6
SUBLANG_GERMAN = 1
SUBLANG_GERMAN_SWISS = 2
SUBLANG_GERMAN_AUSTRIAN = 3
SUBLANG_GERMAN_LUXEMBOURG = 4
SUBLANG_GERMAN_LIECHTENSTEIN = 5
SUBLANG_ITALIAN = 1
SUBLANG_ITALIAN_SWISS = 2
SUBLANG_KOREAN = 1
SUBLANG_KOREAN_JOHAB = 2
SUBLANG_LITHUANIAN = 1
SUBLANG_LITHUANIAN_CLASSIC = 2
SUBLANG_MALAY_MALAYSIA = 1
SUBLANG_MALAY_BRUNEI_DARUSSALAM = 2
SUBLANG_NORWEGIAN_BOKMAL = 1
SUBLANG_NORWEGIAN_NYNORSK = 2
SUBLANG_PORTUGUESE = 2
SUBLANG_PORTUGUESE_BRAZILIAN = 1
SUBLANG_SERBIAN_LATIN = 2
SUBLANG_SERBIAN_CYRILLIC = 3
SUBLANG_SPANISH = 1
SUBLANG_SPANISH_MEXICAN = 2
SUBLANG_SPANISH_MODERN = 3
SUBLANG_SPANISH_GUATEMALA = 4
SUBLANG_SPANISH_COSTA_RICA = 5
SUBLANG_SPANISH_PANAMA = 6
SUBLANG_SPANISH_DOMINICAN_REPUBLIC = 7
SUBLANG_SPANISH_VENEZUELA = 8
SUBLANG_SPANISH_COLOMBIA = 9
SUBLANG_SPANISH_PERU = 10
SUBLANG_SPANISH_ARGENTINA = 11
SUBLANG_SPANISH_ECUADOR = 12
SUBLANG_SPANISH_CHILE = 13
SUBLANG_SPANISH_URUGUAY = 14
SUBLANG_SPANISH_PARAGUAY = 15
SUBLANG_SPANISH_BOLIVIA = 16
SUBLANG_SPANISH_EL_SALVADOR = 17
SUBLANG_SPANISH_HONDURAS = 18
SUBLANG_SPANISH_NICARAGUA = 19
SUBLANG_SPANISH_PUERTO_RICO = 20
SUBLANG_SWEDISH = 1
SUBLANG_SWEDISH_FINLAND = 2
SORT_DEFAULT = 0
SORT_JAPANESE_XJIS = 0
SORT_JAPANESE_UNICODE = 1
SORT_CHINESE_BIG5 = 0
SORT_CHINESE_PRCP = 0
SORT_CHINESE_UNICODE = 1
SORT_CHINESE_PRC = 2
SORT_KOREAN_KSC = 0
SORT_KOREAN_UNICODE = 1
SORT_GERMAN_PHONE_BOOK = 1
def PRIMARYLANGID(lgid): return ((WORD )(lgid) & 1023)
def SUBLANGID(lgid): return ((WORD )(lgid) >> 10)
NLS_VALID_LOCALE_MASK = 1048575
def LANGIDFROMLCID(lcid): return ((WORD )(lcid))
def SORTIDFROMLCID(lcid): return ((WORD )((((DWORD)(lcid)) & NLS_VALID_LOCALE_MASK) >> 16))
def UNREFERENCED_PARAMETER(P): return (P)
def DBG_UNREFERENCED_PARAMETER(P): return (P)
def DBG_UNREFERENCED_LOCAL_VARIABLE(V): return (V)
def UNREFERENCED_PARAMETER(P): return \
def DBG_UNREFERENCED_PARAMETER(P): return \
def DBG_UNREFERENCED_LOCAL_VARIABLE(V): return \
MAXIMUM_WAIT_OBJECTS = 64
MAXIMUM_SUSPEND_COUNT = MAXCHAR
EXCEPTION_NONCONTINUABLE = 1
EXCEPTION_MAXIMUM_PARAMETERS = 15
PROCESS_TERMINATE = (1)
PROCESS_CREATE_THREAD = (2)
PROCESS_VM_OPERATION = (8)
PROCESS_VM_READ = (16)
PROCESS_VM_WRITE = (32)
PROCESS_DUP_HANDLE = (64)
PROCESS_CREATE_PROCESS = (128)
PROCESS_SET_QUOTA = (256)
PROCESS_SET_INFORMATION = (512)
PROCESS_QUERY_INFORMATION = (1024)
MAXIMUM_PROCESSORS = 32
THREAD_TERMINATE = (1)
THREAD_SUSPEND_RESUME = (2)
THREAD_GET_CONTEXT = (8)
THREAD_SET_CONTEXT = (16)
THREAD_SET_INFORMATION = (32)
THREAD_QUERY_INFORMATION = (64)
THREAD_SET_THREAD_TOKEN = (128)
THREAD_IMPERSONATE = (256)
THREAD_DIRECT_IMPERSONATION = (512)
JOB_OBJECT_ASSIGN_PROCESS = (1)
JOB_OBJECT_SET_ATTRIBUTES = (2)
JOB_OBJECT_QUERY = (4)
JOB_OBJECT_TERMINATE = (8)
TLS_MINIMUM_AVAILABLE = 64
THREAD_BASE_PRIORITY_LOWRT = 15
THREAD_BASE_PRIORITY_MAX = 2
THREAD_BASE_PRIORITY_MIN = -2
THREAD_BASE_PRIORITY_IDLE = -15
JOB_OBJECT_LIMIT_WORKINGSET = 1
JOB_OBJECT_LIMIT_PROCESS_TIME = 2
JOB_OBJECT_LIMIT_JOB_TIME = 4
JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8
JOB_OBJECT_LIMIT_AFFINITY = 16
JOB_OBJECT_LIMIT_PRIORITY_CLASS = 32
JOB_OBJECT_LIMIT_VALID_FLAGS = 63
EVENT_MODIFY_STATE = 2
MUTANT_QUERY_STATE = 1
SEMAPHORE_MODIFY_STATE = 2
TIME_ZONE_ID_UNKNOWN = 0
TIME_ZONE_ID_STANDARD = 1
TIME_ZONE_ID_DAYLIGHT = 2
PROCESSOR_INTEL_386 = 386
PROCESSOR_INTEL_486 = 486
PROCESSOR_INTEL_PENTIUM = 586
PROCESSOR_MIPS_R4000 = 4000
PROCESSOR_ALPHA_21064 = 21064
PROCESSOR_HITACHI_SH3 = 10003
PROCESSOR_HITACHI_SH3E = 10004
PROCESSOR_HITACHI_SH4 = 10005
PROCESSOR_MOTOROLA_821 = 821
PROCESSOR_ARM_7TDMI = 70001
PROCESSOR_ARCHITECTURE_INTEL = 0
PROCESSOR_ARCHITECTURE_MIPS = 1
PROCESSOR_ARCHITECTURE_ALPHA = 2
PROCESSOR_ARCHITECTURE_PPC = 3
PROCESSOR_ARCHITECTURE_SH = 4
PROCESSOR_ARCHITECTURE_ARM = 5
PROCESSOR_ARCHITECTURE_UNKNOWN = 65535
PF_FLOATING_POINT_PRECISION_ERRATA = 0
PF_FLOATING_POINT_EMULATED = 1
PF_COMPARE_EXCHANGE_DOUBLE = 2
PF_MMX_INSTRUCTIONS_AVAILABLE = 3
PF_PPC_MOVEMEM_64BIT_OK = 4
PF_ALPHA_BYTE_INSTRUCTIONS = 5
SECTION_QUERY = 1
SECTION_MAP_WRITE = 2
SECTION_MAP_READ = 4
SECTION_MAP_EXECUTE = 8
SECTION_EXTEND_SIZE = 16
PAGE_NOACCESS = 1
PAGE_READONLY = 2
PAGE_READWRITE = 4
PAGE_WRITECOPY = 8
PAGE_EXECUTE = 16
PAGE_EXECUTE_READ = 32
PAGE_EXECUTE_READWRITE = 64
PAGE_EXECUTE_WRITECOPY = 128
PAGE_GUARD = 256
PAGE_NOCACHE = 512
MEM_COMMIT = 4096
MEM_RESERVE = 8192
MEM_DECOMMIT = 16384
MEM_RELEASE = 32768
MEM_FREE = 65536
MEM_PRIVATE = 131072
MEM_MAPPED = 262144
MEM_RESET = 524288
MEM_TOP_DOWN = 1048576
MEM_4MB_PAGES = -2147483648
SEC_FILE = 8388608
SEC_IMAGE = 16777216
SEC_VLM = 33554432
SEC_RESERVE = 67108864
SEC_COMMIT = 134217728
SEC_NOCACHE = 268435456
MEM_IMAGE = SEC_IMAGE
FILE_READ_DATA = ( 1 )
FILE_LIST_DIRECTORY = ( 1 )
FILE_WRITE_DATA = ( 2 )
FILE_ADD_FILE = ( 2 )
FILE_APPEND_DATA = ( 4 )
FILE_ADD_SUBDIRECTORY = ( 4 )
FILE_CREATE_PIPE_INSTANCE = ( 4 )
FILE_READ_EA = ( 8 )
FILE_WRITE_EA = ( 16 )
FILE_EXECUTE = ( 32 )
FILE_TRAVERSE = ( 32 )
FILE_DELETE_CHILD = ( 64 )
FILE_READ_ATTRIBUTES = ( 128 )
FILE_WRITE_ATTRIBUTES = ( 256 )
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
FILE_SHARE_DELETE = 4
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_ENCRYPTED = 64
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_TEMPORARY = 256
FILE_ATTRIBUTE_SPARSE_FILE = 512
FILE_ATTRIBUTE_REPARSE_POINT = 1024
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_NOTIFY_CHANGE_FILE_NAME = 1
FILE_NOTIFY_CHANGE_DIR_NAME = 2
FILE_NOTIFY_CHANGE_ATTRIBUTES = 4
FILE_NOTIFY_CHANGE_SIZE = 8
FILE_NOTIFY_CHANGE_LAST_WRITE = 16
FILE_NOTIFY_CHANGE_LAST_ACCESS = 32
FILE_NOTIFY_CHANGE_CREATION = 64
FILE_NOTIFY_CHANGE_SECURITY = 256
FILE_ACTION_ADDED = 1
FILE_ACTION_REMOVED = 2
FILE_ACTION_MODIFIED = 3
FILE_ACTION_RENAMED_OLD_NAME = 4
FILE_ACTION_RENAMED_NEW_NAME = 5
FILE_CASE_SENSITIVE_SEARCH = 1
FILE_CASE_PRESERVED_NAMES = 2
FILE_UNICODE_ON_DISK = 4
FILE_PERSISTENT_ACLS = 8
FILE_FILE_COMPRESSION = 16
FILE_VOLUME_QUOTAS = 32
FILE_SUPPORTS_SPARSE_FILES = 64
FILE_SUPPORTS_REPARSE_POINTS = 128
FILE_SUPPORTS_REMOTE_STORAGE = 256
FILE_VOLUME_IS_COMPRESSED = 32768
FILE_SUPPORTS_OBJECT_IDS = 65536
FILE_SUPPORTS_ENCRYPTION = 131072
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = ( 16 * 1024 )
IO_REPARSE_TAG_RESERVED_ZERO = (0)
IO_REPARSE_TAG_RESERVED_ONE = (1)
IO_REPARSE_TAG_SYMBOLIC_LINK = (2)
IO_REPARSE_TAG_NSS = (5)
IO_REPARSE_TAG_FILTER_MANAGER = -2147483637
IO_REPARSE_TAG_DFS = -2147483638
IO_REPARSE_TAG_SIS = -2147483641
IO_REPARSE_TAG_MOUNT_POINT = -1610612733
IO_REPARSE_TAG_HSM = -1073741820
IO_REPARSE_TAG_NSSRECOVER = (8)
IO_REPARSE_TAG_RESERVED_MS_RANGE = (256)
IO_REPARSE_TAG_RESERVED_RANGE = IO_REPARSE_TAG_RESERVED_ONE
IO_COMPLETION_MODIFY_STATE = 2
DUPLICATE_CLOSE_SOURCE = 1
DUPLICATE_SAME_ACCESS = 2
DELETE = (65536)
READ_CONTROL = (131072)
WRITE_DAC = (262144)
WRITE_OWNER = (524288)
SYNCHRONIZE = (1048576)
STANDARD_RIGHTS_REQUIRED = (983040)
STANDARD_RIGHTS_READ = (READ_CONTROL)
STANDARD_RIGHTS_WRITE = (READ_CONTROL)
STANDARD_RIGHTS_EXECUTE = (READ_CONTROL)
STANDARD_RIGHTS_ALL = (2031616)
SPECIFIC_RIGHTS_ALL = (65535)
IO_COMPLETION_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED|SYNCHRONIZE|0x3
ACCESS_SYSTEM_SECURITY = (16777216)
MAXIMUM_ALLOWED = (33554432)
GENERIC_READ = (-2147483648)
GENERIC_WRITE = (1073741824)
GENERIC_EXECUTE = (536870912)
GENERIC_ALL = (268435456)
# Included from pshpack4.h
# Included from poppack.h
SID_REVISION = (1)
SID_MAX_SUB_AUTHORITIES = (15)
SID_RECOMMENDED_SUB_AUTHORITIES = (1)
SidTypeUser = 1
SidTypeGroup = 2
SidTypeDomain =3
SidTypeAlias = 4
SidTypeWellKnownGroup = 5
SidTypeDeletedAccount = 6
SidTypeInvalid = 7
SidTypeUnknown = 8
SECURITY_NULL_RID = (0)
SECURITY_WORLD_RID = (0)
SECURITY_LOCAL_RID = (0X00000000)
SECURITY_CREATOR_OWNER_RID = (0)
SECURITY_CREATOR_GROUP_RID = (1)
SECURITY_CREATOR_OWNER_SERVER_RID = (2)
SECURITY_CREATOR_GROUP_SERVER_RID = (3)
SECURITY_DIALUP_RID = (1)
SECURITY_NETWORK_RID = (2)
SECURITY_BATCH_RID = (3)
SECURITY_INTERACTIVE_RID = (4)
SECURITY_SERVICE_RID = (6)
SECURITY_ANONYMOUS_LOGON_RID = (7)
SECURITY_PROXY_RID = (8)
SECURITY_SERVER_LOGON_RID = (9)
SECURITY_PRINCIPAL_SELF_RID = (10)
SECURITY_AUTHENTICATED_USER_RID = (11)
SECURITY_LOGON_IDS_RID = (5)
SECURITY_LOGON_IDS_RID_COUNT = (3)
SECURITY_LOCAL_SYSTEM_RID = (18)
SECURITY_NT_NON_UNIQUE = (21)
SECURITY_BUILTIN_DOMAIN_RID = (32)
DOMAIN_USER_RID_ADMIN = (500)
DOMAIN_USER_RID_GUEST = (501)
DOMAIN_GROUP_RID_ADMINS = (512)
DOMAIN_GROUP_RID_USERS = (513)
DOMAIN_GROUP_RID_GUESTS = (514)
DOMAIN_ALIAS_RID_ADMINS = (544)
DOMAIN_ALIAS_RID_USERS = (545)
DOMAIN_ALIAS_RID_GUESTS = (546)
DOMAIN_ALIAS_RID_POWER_USERS = (547)
DOMAIN_ALIAS_RID_ACCOUNT_OPS = (548)
DOMAIN_ALIAS_RID_SYSTEM_OPS = (549)
DOMAIN_ALIAS_RID_PRINT_OPS = (550)
DOMAIN_ALIAS_RID_BACKUP_OPS = (551)
DOMAIN_ALIAS_RID_REPLICATOR = (552)
SE_GROUP_MANDATORY = (1)
SE_GROUP_ENABLED_BY_DEFAULT = (2)
SE_GROUP_ENABLED = (4)
SE_GROUP_OWNER = (8)
SE_GROUP_LOGON_ID = (-1073741824)
ACL_REVISION = (2)
ACL_REVISION_DS = (4)
ACL_REVISION1 = (1)
ACL_REVISION2 = (2)
ACL_REVISION3 = (3)
ACL_REVISION4 = (4)
MAX_ACL_REVISION = ACL_REVISION4
ACCESS_MIN_MS_ACE_TYPE = (0)
ACCESS_ALLOWED_ACE_TYPE = (0)
ACCESS_DENIED_ACE_TYPE = (1)
SYSTEM_AUDIT_ACE_TYPE = (2)
SYSTEM_ALARM_ACE_TYPE = (3)
ACCESS_MAX_MS_V2_ACE_TYPE = (3)
ACCESS_ALLOWED_COMPOUND_ACE_TYPE = (4)
ACCESS_MAX_MS_V3_ACE_TYPE = (4)
ACCESS_MIN_MS_OBJECT_ACE_TYPE = (5)
ACCESS_ALLOWED_OBJECT_ACE_TYPE = (5)
ACCESS_DENIED_OBJECT_ACE_TYPE = (6)
SYSTEM_AUDIT_OBJECT_ACE_TYPE = (7)
SYSTEM_ALARM_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_V4_ACE_TYPE = (8)
ACCESS_MAX_MS_ACE_TYPE = (8)
OBJECT_INHERIT_ACE = (1)
CONTAINER_INHERIT_ACE = (2)
NO_PROPAGATE_INHERIT_ACE = (4)
INHERIT_ONLY_ACE = (8)
INHERITED_ACE = (16)
VALID_INHERIT_FLAGS = (31)
SUCCESSFUL_ACCESS_ACE_FLAG = (64)
FAILED_ACCESS_ACE_FLAG = (128)
ACE_OBJECT_TYPE_PRESENT = 1
ACE_INHERITED_OBJECT_TYPE_PRESENT = 2
SECURITY_DESCRIPTOR_REVISION = (1)
SECURITY_DESCRIPTOR_REVISION1 = (1)
SECURITY_DESCRIPTOR_MIN_LENGTH = (20)
SE_OWNER_DEFAULTED = (1)
SE_GROUP_DEFAULTED = (2)
SE_DACL_PRESENT = (4)
SE_DACL_DEFAULTED = (8)
SE_SACL_PRESENT = (16)
SE_SACL_DEFAULTED = (32)
SE_DACL_AUTO_INHERIT_REQ = (256)
SE_SACL_AUTO_INHERIT_REQ = (512)
SE_DACL_AUTO_INHERITED = (1024)
SE_SACL_AUTO_INHERITED = (2048)
SE_DACL_PROTECTED = (4096)
SE_SACL_PROTECTED = (8192)
SE_SELF_RELATIVE = (32768)
ACCESS_OBJECT_GUID = 0
ACCESS_PROPERTY_SET_GUID = 1
ACCESS_PROPERTY_GUID = 2
ACCESS_MAX_LEVEL = 4
AUDIT_ALLOW_NO_PRIVILEGE = 1
ACCESS_DS_SOURCE_A = "Directory Service"
ACCESS_DS_OBJECT_TYPE_NAME_A = "Directory Service Object"
SE_PRIVILEGE_ENABLED_BY_DEFAULT = (1)
SE_PRIVILEGE_ENABLED = (2)
SE_PRIVILEGE_USED_FOR_ACCESS = (-2147483648)
PRIVILEGE_SET_ALL_NECESSARY = (1)
SE_CREATE_TOKEN_NAME = "SeCreateTokenPrivilege"
SE_ASSIGNPRIMARYTOKEN_NAME = "SeAssignPrimaryTokenPrivilege"
SE_LOCK_MEMORY_NAME = "SeLockMemoryPrivilege"
SE_INCREASE_QUOTA_NAME = "SeIncreaseQuotaPrivilege"
SE_UNSOLICITED_INPUT_NAME = "SeUnsolicitedInputPrivilege"
SE_MACHINE_ACCOUNT_NAME = "SeMachineAccountPrivilege"
SE_TCB_NAME = "SeTcbPrivilege"
SE_SECURITY_NAME = "SeSecurityPrivilege"
SE_TAKE_OWNERSHIP_NAME = "SeTakeOwnershipPrivilege"
SE_LOAD_DRIVER_NAME = "SeLoadDriverPrivilege"
SE_SYSTEM_PROFILE_NAME = "SeSystemProfilePrivilege"
SE_SYSTEMTIME_NAME = "SeSystemtimePrivilege"
SE_PROF_SINGLE_PROCESS_NAME = "SeProfileSingleProcessPrivilege"
SE_INC_BASE_PRIORITY_NAME = "SeIncreaseBasePriorityPrivilege"
SE_CREATE_PAGEFILE_NAME = "SeCreatePagefilePrivilege"
SE_CREATE_PERMANENT_NAME = "SeCreatePermanentPrivilege"
SE_BACKUP_NAME = "SeBackupPrivilege"
SE_RESTORE_NAME = "SeRestorePrivilege"
SE_SHUTDOWN_NAME = "SeShutdownPrivilege"
SE_DEBUG_NAME = "SeDebugPrivilege"
SE_AUDIT_NAME = "SeAuditPrivilege"
SE_SYSTEM_ENVIRONMENT_NAME = "SeSystemEnvironmentPrivilege"
SE_CHANGE_NOTIFY_NAME = "SeChangeNotifyPrivilege"
SE_REMOTE_SHUTDOWN_NAME = "SeRemoteShutdownPrivilege"
TOKEN_ASSIGN_PRIMARY = (1)
TOKEN_DUPLICATE = (2)
TOKEN_IMPERSONATE = (4)
TOKEN_QUERY = (8)
TOKEN_QUERY_SOURCE = (16)
TOKEN_ADJUST_PRIVILEGES = (32)
TOKEN_ADJUST_GROUPS = (64)
TOKEN_ADJUST_DEFAULT = (128)
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED |\
TOKEN_ASSIGN_PRIMARY |\
TOKEN_DUPLICATE |\
TOKEN_IMPERSONATE |\
TOKEN_QUERY |\
TOKEN_QUERY_SOURCE |\
TOKEN_ADJUST_PRIVILEGES |\
TOKEN_ADJUST_GROUPS |\
TOKEN_ADJUST_DEFAULT)
TOKEN_READ = (STANDARD_RIGHTS_READ |\
TOKEN_QUERY)
TOKEN_WRITE = (STANDARD_RIGHTS_WRITE |\
TOKEN_ADJUST_PRIVILEGES |\
TOKEN_ADJUST_GROUPS |\
TOKEN_ADJUST_DEFAULT)
TOKEN_EXECUTE = (STANDARD_RIGHTS_EXECUTE)
TOKEN_SOURCE_LENGTH = 8
# Token types
TokenPrimary = 1
TokenImpersonation = 2
TokenUser = 1
TokenGroups = 2
TokenPrivileges = 3
TokenOwner = 4
TokenPrimaryGroup = 5
TokenDefaultDacl = 6
TokenSource = 7
TokenType = 8
TokenImpersonationLevel = 9
TokenStatistics = 10
OWNER_SECURITY_INFORMATION = (0X00000001)
GROUP_SECURITY_INFORMATION = (0X00000002)
DACL_SECURITY_INFORMATION = (0X00000004)
SACL_SECURITY_INFORMATION = (0X00000008)
IMAGE_DOS_SIGNATURE = 23117
IMAGE_OS2_SIGNATURE = 17742
IMAGE_OS2_SIGNATURE_LE = 17740
IMAGE_VXD_SIGNATURE = 17740
IMAGE_NT_SIGNATURE = 17744
IMAGE_SIZEOF_FILE_HEADER = 20
IMAGE_FILE_RELOCS_STRIPPED = 1
IMAGE_FILE_EXECUTABLE_IMAGE = 2
IMAGE_FILE_LINE_NUMS_STRIPPED = 4
IMAGE_FILE_LOCAL_SYMS_STRIPPED = 8
IMAGE_FILE_AGGRESIVE_WS_TRIM = 16
IMAGE_FILE_LARGE_ADDRESS_AWARE = 32
IMAGE_FILE_BYTES_REVERSED_LO = 128
IMAGE_FILE_32BIT_MACHINE = 256
IMAGE_FILE_DEBUG_STRIPPED = 512
IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 1024
IMAGE_FILE_NET_RUN_FROM_SWAP = 2048
IMAGE_FILE_SYSTEM = 4096
IMAGE_FILE_DLL = 8192
IMAGE_FILE_UP_SYSTEM_ONLY = 16384
IMAGE_FILE_BYTES_REVERSED_HI = 32768
IMAGE_FILE_MACHINE_UNKNOWN = 0
IMAGE_FILE_MACHINE_I386 = 332
IMAGE_FILE_MACHINE_R3000 = 354
IMAGE_FILE_MACHINE_R4000 = 358
IMAGE_FILE_MACHINE_R10000 = 360
IMAGE_FILE_MACHINE_WCEMIPSV2 = 361
IMAGE_FILE_MACHINE_ALPHA = 388
IMAGE_FILE_MACHINE_POWERPC = 496
IMAGE_FILE_MACHINE_SH3 = 418
IMAGE_FILE_MACHINE_SH3E = 420
IMAGE_FILE_MACHINE_SH4 = 422
IMAGE_FILE_MACHINE_ARM = 448
IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16
IMAGE_SIZEOF_ROM_OPTIONAL_HEADER = 56
IMAGE_SIZEOF_STD_OPTIONAL_HEADER = 28
IMAGE_SIZEOF_NT_OPTIONAL_HEADER = 224
IMAGE_NT_OPTIONAL_HDR_MAGIC = 267
IMAGE_ROM_OPTIONAL_HDR_MAGIC = 263
IMAGE_SUBSYSTEM_UNKNOWN = 0
IMAGE_SUBSYSTEM_NATIVE = 1
IMAGE_SUBSYSTEM_WINDOWS_GUI = 2
IMAGE_SUBSYSTEM_WINDOWS_CUI = 3
IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 4
IMAGE_SUBSYSTEM_OS2_CUI = 5
IMAGE_SUBSYSTEM_POSIX_CUI = 7
IMAGE_SUBSYSTEM_RESERVED8 = 8
IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 8192
IMAGE_DIRECTORY_ENTRY_EXPORT = 0
IMAGE_DIRECTORY_ENTRY_IMPORT = 1
IMAGE_DIRECTORY_ENTRY_RESOURCE = 2
IMAGE_DIRECTORY_ENTRY_EXCEPTION = 3
IMAGE_DIRECTORY_ENTRY_SECURITY = 4
IMAGE_DIRECTORY_ENTRY_BASERELOC = 5
IMAGE_DIRECTORY_ENTRY_DEBUG = 6
IMAGE_DIRECTORY_ENTRY_COPYRIGHT = 7
IMAGE_DIRECTORY_ENTRY_GLOBALPTR = 8
IMAGE_DIRECTORY_ENTRY_TLS = 9
IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG = 10
IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT = 11
IMAGE_DIRECTORY_ENTRY_IAT = 12
IMAGE_SIZEOF_SHORT_NAME = 8
IMAGE_SIZEOF_SECTION_HEADER = 40
IMAGE_SCN_TYPE_NO_PAD = 8
IMAGE_SCN_CNT_CODE = 32
IMAGE_SCN_CNT_INITIALIZED_DATA = 64
IMAGE_SCN_CNT_UNINITIALIZED_DATA = 128
IMAGE_SCN_LNK_OTHER = 256
IMAGE_SCN_LNK_INFO = 512
IMAGE_SCN_LNK_REMOVE = 2048
IMAGE_SCN_LNK_COMDAT = 4096
IMAGE_SCN_MEM_FARDATA = 32768
IMAGE_SCN_MEM_PURGEABLE = 131072
IMAGE_SCN_MEM_16BIT = 131072
IMAGE_SCN_MEM_LOCKED = 262144
IMAGE_SCN_MEM_PRELOAD = 524288
IMAGE_SCN_ALIGN_1BYTES = 1048576
IMAGE_SCN_ALIGN_2BYTES = 2097152
IMAGE_SCN_ALIGN_4BYTES = 3145728
IMAGE_SCN_ALIGN_8BYTES = 4194304
IMAGE_SCN_ALIGN_16BYTES = 5242880
IMAGE_SCN_ALIGN_32BYTES = 6291456
IMAGE_SCN_ALIGN_64BYTES = 7340032
IMAGE_SCN_LNK_NRELOC_OVFL = 16777216
IMAGE_SCN_MEM_DISCARDABLE = 33554432
IMAGE_SCN_MEM_NOT_CACHED = 67108864
IMAGE_SCN_MEM_NOT_PAGED = 134217728
IMAGE_SCN_MEM_SHARED = 268435456
IMAGE_SCN_MEM_EXECUTE = 536870912
IMAGE_SCN_MEM_READ = 1073741824
IMAGE_SCN_MEM_WRITE = -2147483648
IMAGE_SCN_SCALE_INDEX = 1
IMAGE_SIZEOF_SYMBOL = 18
IMAGE_SYM_TYPE_NULL = 0
IMAGE_SYM_TYPE_VOID = 1
IMAGE_SYM_TYPE_CHAR = 2
IMAGE_SYM_TYPE_SHORT = 3
IMAGE_SYM_TYPE_INT = 4
IMAGE_SYM_TYPE_LONG = 5
IMAGE_SYM_TYPE_FLOAT = 6
IMAGE_SYM_TYPE_DOUBLE = 7
IMAGE_SYM_TYPE_STRUCT = 8
IMAGE_SYM_TYPE_UNION = 9
IMAGE_SYM_TYPE_ENUM = 10
IMAGE_SYM_TYPE_MOE = 11
IMAGE_SYM_TYPE_BYTE = 12
IMAGE_SYM_TYPE_WORD = 13
IMAGE_SYM_TYPE_UINT = 14
IMAGE_SYM_TYPE_DWORD = 15
IMAGE_SYM_TYPE_PCODE = 32768
IMAGE_SYM_DTYPE_NULL = 0
IMAGE_SYM_DTYPE_POINTER = 1
IMAGE_SYM_DTYPE_FUNCTION = 2
IMAGE_SYM_DTYPE_ARRAY = 3
IMAGE_SYM_CLASS_NULL = 0
IMAGE_SYM_CLASS_AUTOMATIC = 1
IMAGE_SYM_CLASS_EXTERNAL = 2
IMAGE_SYM_CLASS_STATIC = 3
IMAGE_SYM_CLASS_REGISTER = 4
IMAGE_SYM_CLASS_EXTERNAL_DEF = 5
IMAGE_SYM_CLASS_LABEL = 6
IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7
IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8
IMAGE_SYM_CLASS_ARGUMENT = 9
IMAGE_SYM_CLASS_STRUCT_TAG = 10
IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11
IMAGE_SYM_CLASS_UNION_TAG = 12
IMAGE_SYM_CLASS_TYPE_DEFINITION = 13
IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14
IMAGE_SYM_CLASS_ENUM_TAG = 15
IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16
IMAGE_SYM_CLASS_REGISTER_PARAM = 17
IMAGE_SYM_CLASS_BIT_FIELD = 18
IMAGE_SYM_CLASS_FAR_EXTERNAL = 68
IMAGE_SYM_CLASS_BLOCK = 100
IMAGE_SYM_CLASS_FUNCTION = 101
IMAGE_SYM_CLASS_END_OF_STRUCT = 102
IMAGE_SYM_CLASS_FILE = 103
IMAGE_SYM_CLASS_SECTION = 104
IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105
N_BTMASK = 15
N_TMASK = 48
N_TMASK1 = 192
N_TMASK2 = 240
N_BTSHFT = 4
N_TSHIFT = 2
def BTYPE(x): return ((x) & N_BTMASK)
def ISPTR(x): return (((x) & N_TMASK) == (IMAGE_SYM_DTYPE_POINTER << N_BTSHFT))
def ISFCN(x): return (((x) & N_TMASK) == (IMAGE_SYM_DTYPE_FUNCTION << N_BTSHFT))
def ISARY(x): return (((x) & N_TMASK) == (IMAGE_SYM_DTYPE_ARRAY << N_BTSHFT))
def INCREF(x): return ((((x)&~N_BTMASK)<<N_TSHIFT)|(IMAGE_SYM_DTYPE_POINTER<<N_BTSHFT)|((x)&N_BTMASK))
def DECREF(x): return ((((x)>>N_TSHIFT)&~N_BTMASK)|((x)&N_BTMASK))
IMAGE_SIZEOF_AUX_SYMBOL = 18
IMAGE_COMDAT_SELECT_NODUPLICATES = 1
IMAGE_COMDAT_SELECT_ANY = 2
IMAGE_COMDAT_SELECT_SAME_SIZE = 3
IMAGE_COMDAT_SELECT_EXACT_MATCH = 4
IMAGE_COMDAT_SELECT_ASSOCIATIVE = 5
IMAGE_COMDAT_SELECT_LARGEST = 6
IMAGE_COMDAT_SELECT_NEWEST = 7
IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY = 1
IMAGE_WEAK_EXTERN_SEARCH_LIBRARY = 2
IMAGE_WEAK_EXTERN_SEARCH_ALIAS = 3
IMAGE_SIZEOF_RELOCATION = 10
IMAGE_REL_I386_ABSOLUTE = 0
IMAGE_REL_I386_DIR16 = 1
IMAGE_REL_I386_REL16 = 2
IMAGE_REL_I386_DIR32 = 6
IMAGE_REL_I386_DIR32NB = 7
IMAGE_REL_I386_SEG12 = 9
IMAGE_REL_I386_SECTION = 10
IMAGE_REL_I386_SECREL = 11
IMAGE_REL_I386_REL32 = 20
IMAGE_REL_MIPS_ABSOLUTE = 0
IMAGE_REL_MIPS_REFHALF = 1
IMAGE_REL_MIPS_REFWORD = 2
IMAGE_REL_MIPS_JMPADDR = 3
IMAGE_REL_MIPS_REFHI = 4
IMAGE_REL_MIPS_REFLO = 5
IMAGE_REL_MIPS_GPREL = 6
IMAGE_REL_MIPS_LITERAL = 7
IMAGE_REL_MIPS_SECTION = 10
IMAGE_REL_MIPS_SECREL = 11
IMAGE_REL_MIPS_SECRELLO = 12
IMAGE_REL_MIPS_SECRELHI = 13
IMAGE_REL_MIPS_REFWORDNB = 34
IMAGE_REL_MIPS_PAIR = 37
IMAGE_REL_ALPHA_ABSOLUTE = 0
IMAGE_REL_ALPHA_REFLONG = 1
IMAGE_REL_ALPHA_REFQUAD = 2
IMAGE_REL_ALPHA_GPREL32 = 3
IMAGE_REL_ALPHA_LITERAL = 4
IMAGE_REL_ALPHA_LITUSE = 5
IMAGE_REL_ALPHA_GPDISP = 6
IMAGE_REL_ALPHA_BRADDR = 7
IMAGE_REL_ALPHA_HINT = 8
IMAGE_REL_ALPHA_INLINE_REFLONG = 9
IMAGE_REL_ALPHA_REFHI = 10
IMAGE_REL_ALPHA_REFLO = 11
IMAGE_REL_ALPHA_PAIR = 12
IMAGE_REL_ALPHA_MATCH = 13
IMAGE_REL_ALPHA_SECTION = 14
IMAGE_REL_ALPHA_SECREL = 15
IMAGE_REL_ALPHA_REFLONGNB = 16
IMAGE_REL_ALPHA_SECRELLO = 17
IMAGE_REL_ALPHA_SECRELHI = 18
IMAGE_REL_PPC_ABSOLUTE = 0
IMAGE_REL_PPC_ADDR64 = 1
IMAGE_REL_PPC_ADDR32 = 2
IMAGE_REL_PPC_ADDR24 = 3
IMAGE_REL_PPC_ADDR16 = 4
IMAGE_REL_PPC_ADDR14 = 5
IMAGE_REL_PPC_REL24 = 6
IMAGE_REL_PPC_REL14 = 7
IMAGE_REL_PPC_TOCREL16 = 8
IMAGE_REL_PPC_TOCREL14 = 9
IMAGE_REL_PPC_ADDR32NB = 10
IMAGE_REL_PPC_SECREL = 11
IMAGE_REL_PPC_SECTION = 12
IMAGE_REL_PPC_IFGLUE = 13
IMAGE_REL_PPC_IMGLUE = 14
IMAGE_REL_PPC_SECREL16 = 15
IMAGE_REL_PPC_REFHI = 16
IMAGE_REL_PPC_REFLO = 17
IMAGE_REL_PPC_PAIR = 18
IMAGE_REL_PPC_SECRELLO = 19
IMAGE_REL_PPC_SECRELHI = 20
IMAGE_REL_PPC_TYPEMASK = 255
IMAGE_REL_PPC_NEG = 256
IMAGE_REL_PPC_BRTAKEN = 512
IMAGE_REL_PPC_BRNTAKEN = 1024
IMAGE_REL_PPC_TOCDEFN = 2048
IMAGE_REL_SH3_ABSOLUTE = 0
IMAGE_REL_SH3_DIRECT16 = 1
IMAGE_REL_SH3_DIRECT32 = 2
IMAGE_REL_SH3_DIRECT8 = 3
IMAGE_REL_SH3_DIRECT8_WORD = 4
IMAGE_REL_SH3_DIRECT8_LONG = 5
IMAGE_REL_SH3_DIRECT4 = 6
IMAGE_REL_SH3_DIRECT4_WORD = 7
IMAGE_REL_SH3_DIRECT4_LONG = 8
IMAGE_REL_SH3_PCREL8_WORD = 9
IMAGE_REL_SH3_PCREL8_LONG = 10
IMAGE_REL_SH3_PCREL12_WORD = 11
IMAGE_REL_SH3_STARTOF_SECTION = 12
IMAGE_REL_SH3_SIZEOF_SECTION = 13
IMAGE_REL_SH3_SECTION = 14
IMAGE_REL_SH3_SECREL = 15
IMAGE_REL_SH3_DIRECT32_NB = 16
IMAGE_SIZEOF_LINENUMBER = 6
IMAGE_SIZEOF_BASE_RELOCATION = 8
IMAGE_REL_BASED_ABSOLUTE = 0
IMAGE_REL_BASED_HIGH = 1
IMAGE_REL_BASED_LOW = 2
IMAGE_REL_BASED_HIGHLOW = 3
IMAGE_REL_BASED_HIGHADJ = 4
IMAGE_REL_BASED_MIPS_JMPADDR = 5
IMAGE_REL_BASED_SECTION = 6
IMAGE_REL_BASED_REL32 = 7
IMAGE_ARCHIVE_START_SIZE = 8
IMAGE_ARCHIVE_START = "!<arch>\n"
IMAGE_ARCHIVE_END = "`\n"
IMAGE_ARCHIVE_PAD = "\n"
IMAGE_ARCHIVE_LINKER_MEMBER = "/ "
IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR = 60
IMAGE_ORDINAL_FLAG = -2147483648
def IMAGE_SNAP_BY_ORDINAL(Ordina): return ((Ordinal & IMAGE_ORDINAL_FLAG) != 0)
def IMAGE_ORDINAL(Ordina): return (Ordinal & 65535)
IMAGE_RESOURCE_NAME_IS_STRING = -2147483648
IMAGE_RESOURCE_DATA_IS_DIRECTORY = -2147483648
IMAGE_DEBUG_TYPE_UNKNOWN = 0
IMAGE_DEBUG_TYPE_COFF = 1
IMAGE_DEBUG_TYPE_CODEVIEW = 2
IMAGE_DEBUG_TYPE_FPO = 3
IMAGE_DEBUG_TYPE_MISC = 4
IMAGE_DEBUG_TYPE_EXCEPTION = 5
IMAGE_DEBUG_TYPE_FIXUP = 6
IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7
IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8
IMAGE_DEBUG_TYPE_BORLAND = 9
FRAME_FPO = 0
FRAME_TRAP = 1
FRAME_TSS = 2
FRAME_NONFPO = 3
SIZEOF_RFPO_DATA = 16
IMAGE_DEBUG_MISC_EXENAME = 1
IMAGE_SEPARATE_DEBUG_SIGNATURE = 18756
IMAGE_SEPARATE_DEBUG_FLAGS_MASK = 32768
IMAGE_SEPARATE_DEBUG_MISMATCH = 32768
# Included from string.h
_NLSCMPERROR = 2147483647
NULL = 0
HEAP_NO_SERIALIZE = 1
HEAP_GROWABLE = 2
HEAP_GENERATE_EXCEPTIONS = 4
HEAP_ZERO_MEMORY = 8
HEAP_REALLOC_IN_PLACE_ONLY = 16
HEAP_TAIL_CHECKING_ENABLED = 32
HEAP_FREE_CHECKING_ENABLED = 64
HEAP_DISABLE_COALESCE_ON_FREE = 128
HEAP_CREATE_ALIGN_16 = 65536
HEAP_CREATE_ENABLE_TRACING = 131072
HEAP_MAXIMUM_TAG = 4095
HEAP_PSEUDO_TAG_FLAG = 32768
HEAP_TAG_SHIFT = 16
IS_TEXT_UNICODE_ASCII16 = 1
IS_TEXT_UNICODE_REVERSE_ASCII16 = 16
IS_TEXT_UNICODE_STATISTICS = 2
IS_TEXT_UNICODE_REVERSE_STATISTICS = 32
IS_TEXT_UNICODE_CONTROLS = 4
IS_TEXT_UNICODE_REVERSE_CONTROLS = 64
IS_TEXT_UNICODE_SIGNATURE = 8
IS_TEXT_UNICODE_REVERSE_SIGNATURE = 128
IS_TEXT_UNICODE_ILLEGAL_CHARS = 256
IS_TEXT_UNICODE_ODD_LENGTH = 512
IS_TEXT_UNICODE_DBCS_LEADBYTE = 1024
IS_TEXT_UNICODE_NULL_BYTES = 4096
IS_TEXT_UNICODE_UNICODE_MASK = 15
IS_TEXT_UNICODE_REVERSE_MASK = 240
IS_TEXT_UNICODE_NOT_UNICODE_MASK = 3840
IS_TEXT_UNICODE_NOT_ASCII_MASK = 61440
COMPRESSION_FORMAT_NONE = (0)
COMPRESSION_FORMAT_DEFAULT = (1)
COMPRESSION_FORMAT_LZNT1 = (2)
COMPRESSION_ENGINE_STANDARD = (0)
COMPRESSION_ENGINE_MAXIMUM = (256)
MESSAGE_RESOURCE_UNICODE = 1
RTL_CRITSECT_TYPE = 0
RTL_RESOURCE_TYPE = 1
SEF_DACL_AUTO_INHERIT = 1
SEF_SACL_AUTO_INHERIT = 2
SEF_DEFAULT_DESCRIPTOR_FOR_OBJECT = 4
SEF_AVOID_PRIVILEGE_CHECK = 8
DLL_PROCESS_ATTACH = 1
DLL_THREAD_ATTACH = 2
DLL_THREAD_DETACH = 3
DLL_PROCESS_DETACH = 0
EVENTLOG_SEQUENTIAL_READ = 0X0001
EVENTLOG_SEEK_READ = 0X0002
EVENTLOG_FORWARDS_READ = 0X0004
EVENTLOG_BACKWARDS_READ = 0X0008
EVENTLOG_SUCCESS = 0X0000
EVENTLOG_ERROR_TYPE = 1
EVENTLOG_WARNING_TYPE = 2
EVENTLOG_INFORMATION_TYPE = 4
EVENTLOG_AUDIT_SUCCESS = 8
EVENTLOG_AUDIT_FAILURE = 16
EVENTLOG_START_PAIRED_EVENT = 1
EVENTLOG_END_PAIRED_EVENT = 2
EVENTLOG_END_ALL_PAIRED_EVENTS = 4
EVENTLOG_PAIRED_EVENT_ACTIVE = 8
EVENTLOG_PAIRED_EVENT_INACTIVE = 16
KEY_QUERY_VALUE = (1)
KEY_SET_VALUE = (2)
KEY_CREATE_SUB_KEY = (4)
KEY_ENUMERATE_SUB_KEYS = (8)
KEY_NOTIFY = (16)
KEY_CREATE_LINK = (32)
KEY_READ = ((STANDARD_RIGHTS_READ |\
KEY_QUERY_VALUE |\
KEY_ENUMERATE_SUB_KEYS |\
KEY_NOTIFY) \
& \
(~SYNCHRONIZE))
KEY_WRITE = ((STANDARD_RIGHTS_WRITE |\
KEY_SET_VALUE |\
KEY_CREATE_SUB_KEY) \
& \
(~SYNCHRONIZE))
KEY_EXECUTE = ((KEY_READ) \
& \
(~SYNCHRONIZE))
KEY_ALL_ACCESS = ((STANDARD_RIGHTS_ALL |\
KEY_QUERY_VALUE |\
KEY_SET_VALUE |\
KEY_CREATE_SUB_KEY |\
KEY_ENUMERATE_SUB_KEYS |\
KEY_NOTIFY |\
KEY_CREATE_LINK) \
& \
(~SYNCHRONIZE))
REG_OPTION_RESERVED = (0)
REG_OPTION_NON_VOLATILE = (0)
REG_OPTION_VOLATILE = (1)
REG_OPTION_CREATE_LINK = (2)
REG_OPTION_BACKUP_RESTORE = (4)
REG_OPTION_OPEN_LINK = (8)
REG_LEGAL_OPTION = \
(REG_OPTION_RESERVED |\
REG_OPTION_NON_VOLATILE |\
REG_OPTION_VOLATILE |\
REG_OPTION_CREATE_LINK |\
REG_OPTION_BACKUP_RESTORE |\
REG_OPTION_OPEN_LINK)
REG_CREATED_NEW_KEY = (1)
REG_OPENED_EXISTING_KEY = (2)
REG_WHOLE_HIVE_VOLATILE = (1)
REG_REFRESH_HIVE = (2)
REG_NO_LAZY_FLUSH = (4)
REG_NOTIFY_CHANGE_NAME = (1)
REG_NOTIFY_CHANGE_ATTRIBUTES = (2)
REG_NOTIFY_CHANGE_LAST_SET = (4)
REG_NOTIFY_CHANGE_SECURITY = (8)
REG_LEGAL_CHANGE_FILTER = \
(REG_NOTIFY_CHANGE_NAME |\
REG_NOTIFY_CHANGE_ATTRIBUTES |\
REG_NOTIFY_CHANGE_LAST_SET |\
REG_NOTIFY_CHANGE_SECURITY)
REG_NONE = ( 0 )
REG_SZ = ( 1 )
REG_EXPAND_SZ = ( 2 )
REG_BINARY = ( 3 )
REG_DWORD = ( 4 )
REG_DWORD_LITTLE_ENDIAN = ( 4 )
REG_DWORD_BIG_ENDIAN = ( 5 )
REG_LINK = ( 6 )
REG_MULTI_SZ = ( 7 )
REG_RESOURCE_LIST = ( 8 )
REG_FULL_RESOURCE_DESCRIPTOR = ( 9 )
REG_RESOURCE_REQUIREMENTS_LIST = ( 10 )
SERVICE_KERNEL_DRIVER = 1
SERVICE_FILE_SYSTEM_DRIVER = 2
SERVICE_ADAPTER = 4
SERVICE_RECOGNIZER_DRIVER = 8
SERVICE_DRIVER = (SERVICE_KERNEL_DRIVER | \
SERVICE_FILE_SYSTEM_DRIVER | \
SERVICE_RECOGNIZER_DRIVER)
SERVICE_WIN32_OWN_PROCESS = 16
SERVICE_WIN32_SHARE_PROCESS = 32
SERVICE_WIN32 = (SERVICE_WIN32_OWN_PROCESS | \
SERVICE_WIN32_SHARE_PROCESS)
SERVICE_INTERACTIVE_PROCESS = 256
SERVICE_TYPE_ALL = (SERVICE_WIN32 | \
SERVICE_ADAPTER | \
SERVICE_DRIVER | \
SERVICE_INTERACTIVE_PROCESS)
SERVICE_BOOT_START = 0
SERVICE_SYSTEM_START = 1
SERVICE_AUTO_START = 2
SERVICE_DEMAND_START = 3
SERVICE_DISABLED = 4
SERVICE_ERROR_IGNORE = 0
SERVICE_ERROR_NORMAL = 1
SERVICE_ERROR_SEVERE = 2
SERVICE_ERROR_CRITICAL = 3
TAPE_ERASE_SHORT = 0
TAPE_ERASE_LONG = 1
TAPE_LOAD = 0
TAPE_UNLOAD = 1
TAPE_TENSION = 2
TAPE_LOCK = 3
TAPE_UNLOCK = 4
TAPE_FORMAT = 5
TAPE_SETMARKS = 0
TAPE_FILEMARKS = 1
TAPE_SHORT_FILEMARKS = 2
TAPE_LONG_FILEMARKS = 3
TAPE_ABSOLUTE_POSITION = 0
TAPE_LOGICAL_POSITION = 1
TAPE_PSEUDO_LOGICAL_POSITION = 2
TAPE_REWIND = 0
TAPE_ABSOLUTE_BLOCK = 1
TAPE_LOGICAL_BLOCK = 2
TAPE_PSEUDO_LOGICAL_BLOCK = 3
TAPE_SPACE_END_OF_DATA = 4
TAPE_SPACE_RELATIVE_BLOCKS = 5
TAPE_SPACE_FILEMARKS = 6
TAPE_SPACE_SEQUENTIAL_FMKS = 7
TAPE_SPACE_SETMARKS = 8
TAPE_SPACE_SEQUENTIAL_SMKS = 9
TAPE_DRIVE_FIXED = 1
TAPE_DRIVE_SELECT = 2
TAPE_DRIVE_INITIATOR = 4
TAPE_DRIVE_ERASE_SHORT = 16
TAPE_DRIVE_ERASE_LONG = 32
TAPE_DRIVE_ERASE_BOP_ONLY = 64
TAPE_DRIVE_ERASE_IMMEDIATE = 128
TAPE_DRIVE_TAPE_CAPACITY = 256
TAPE_DRIVE_TAPE_REMAINING = 512
TAPE_DRIVE_FIXED_BLOCK = 1024
TAPE_DRIVE_VARIABLE_BLOCK = 2048
TAPE_DRIVE_WRITE_PROTECT = 4096
TAPE_DRIVE_EOT_WZ_SIZE = 8192
TAPE_DRIVE_ECC = 65536
TAPE_DRIVE_COMPRESSION = 131072
TAPE_DRIVE_PADDING = 262144
TAPE_DRIVE_REPORT_SMKS = 524288
TAPE_DRIVE_GET_ABSOLUTE_BLK = 1048576
TAPE_DRIVE_GET_LOGICAL_BLK = 2097152
TAPE_DRIVE_SET_EOT_WZ_SIZE = 4194304
TAPE_DRIVE_EJECT_MEDIA = 16777216
TAPE_DRIVE_RESERVED_BIT = -2147483648
TAPE_DRIVE_LOAD_UNLOAD = -2147483647
TAPE_DRIVE_TENSION = -2147483646
TAPE_DRIVE_LOCK_UNLOCK = -2147483644
TAPE_DRIVE_REWIND_IMMEDIATE = -2147483640
TAPE_DRIVE_SET_BLOCK_SIZE = -2147483632
TAPE_DRIVE_LOAD_UNLD_IMMED = -2147483616
TAPE_DRIVE_TENSION_IMMED = -2147483584
TAPE_DRIVE_LOCK_UNLK_IMMED = -2147483520
TAPE_DRIVE_SET_ECC = -2147483392
TAPE_DRIVE_SET_COMPRESSION = -2147483136
TAPE_DRIVE_SET_PADDING = -2147482624
TAPE_DRIVE_SET_REPORT_SMKS = -2147481600
TAPE_DRIVE_ABSOLUTE_BLK = -2147479552
TAPE_DRIVE_ABS_BLK_IMMED = -2147475456
TAPE_DRIVE_LOGICAL_BLK = -2147467264
TAPE_DRIVE_LOG_BLK_IMMED = -2147450880
TAPE_DRIVE_END_OF_DATA = -2147418112
TAPE_DRIVE_RELATIVE_BLKS = -2147352576
TAPE_DRIVE_FILEMARKS = -2147221504
TAPE_DRIVE_SEQUENTIAL_FMKS = -2146959360
TAPE_DRIVE_SETMARKS = -2146435072
TAPE_DRIVE_SEQUENTIAL_SMKS = -2145386496
TAPE_DRIVE_REVERSE_POSITION = -2143289344
TAPE_DRIVE_SPACE_IMMEDIATE = -2139095040
TAPE_DRIVE_WRITE_SETMARKS = -2130706432
TAPE_DRIVE_WRITE_FILEMARKS = -2113929216
TAPE_DRIVE_WRITE_SHORT_FMKS = -2080374784
TAPE_DRIVE_WRITE_LONG_FMKS = -2013265920
TAPE_DRIVE_WRITE_MARK_IMMED = -1879048192
TAPE_DRIVE_FORMAT = -1610612736
TAPE_DRIVE_FORMAT_IMMEDIATE = -1073741824
TAPE_DRIVE_HIGH_FEATURES = -2147483648
TAPE_FIXED_PARTITIONS = 0
TAPE_SELECT_PARTITIONS = 1
TAPE_INITIATOR_PARTITIONS = 2
| 699
| 0
| 448
|
e22e54a9a99a8dec899f21dc34205db0365bd080
| 775
|
py
|
Python
|
esipy/exceptions.py
|
klinger/EsiPy
|
f51863034f933c15ec4c506a466576b1b966d5ef
|
[
"BSD-3-Clause"
] | null | null | null |
esipy/exceptions.py
|
klinger/EsiPy
|
f51863034f933c15ec4c506a466576b1b966d5ef
|
[
"BSD-3-Clause"
] | null | null | null |
esipy/exceptions.py
|
klinger/EsiPy
|
f51863034f933c15ec4c506a466576b1b966d5ef
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
""" Exceptions for EsiPy related errors """
class APIException(Exception):
""" Exception for SSO related errors """
| 35.227273
| 68
| 0.532903
|
# -*- encoding: utf-8 -*-
""" Exceptions for EsiPy related errors """
class APIException(Exception):
""" Exception for SSO related errors """
def __init__(self, url, code, json_response):
self.url = url
self.status_code = code
self.response = json_response
super(APIException, self).__init__(str(self))
def __str__(self):
if 'error' in self.response:
return 'HTTP Error %s: %s' % (self.status_code,
self.response['error'])
elif 'message' in self.response:
return 'HTTP Error %s: %s' % (self.status_code,
self.response['message'])
return 'HTTP Error %s' % (self.status_code)
| 563
| 0
| 58
|
9ea5d5434ac2b3e087de3f3f4a234d09730d9a32
| 594
|
py
|
Python
|
problems/599.Minimum_Index_Sum_of_Two_Lists/li_counter.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/599.Minimum_Index_Sum_of_Two_Lists/li_counter.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/599.Minimum_Index_Sum_of_Two_Lists/li_counter.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
# coding=utf-8
# Author: Jianghan LI
# Question: 599.Minimum_Index_Sum_of_Two_Lists
# Date: 2017-05-30, 0 wrong try
from collections import Counter
s = Solution()
print s.findRestaurant(["Shogun", "Tapioca Express", "Burger King", "KFC"], ["KFC", "Shogun", "Burger King"])
| 28.285714
| 109
| 0.606061
|
# coding=utf-8
# Author: Jianghan LI
# Question: 599.Minimum_Index_Sum_of_Two_Lists
# Date: 2017-05-30, 0 wrong try
from collections import Counter
class Solution(object):
def findRestaurant(self, l1, l2):
d = set(l1) & set(l2)
c = Counter({v: i + 1 for i, v in enumerate(l1) if v in d}) + \
Counter({v: i + 1 for i, v in enumerate(l2) if v in d})
res = min(c.values())
return [i for i, v in c.items() if v == res]
s = Solution()
print s.findRestaurant(["Shogun", "Tapioca Express", "Burger King", "KFC"], ["KFC", "Shogun", "Burger King"])
| 265
| 2
| 50
|
1eb5869afdf8053c80b5e34a7e2f20873044f9ab
| 3,666
|
py
|
Python
|
tests/test_awesomeversion.py
|
agners/awesomeversion
|
4d03a0a3fb35e35e4eab984423c903cf50137510
|
[
"MIT"
] | null | null | null |
tests/test_awesomeversion.py
|
agners/awesomeversion
|
4d03a0a3fb35e35e4eab984423c903cf50137510
|
[
"MIT"
] | null | null | null |
tests/test_awesomeversion.py
|
agners/awesomeversion
|
4d03a0a3fb35e35e4eab984423c903cf50137510
|
[
"MIT"
] | null | null | null |
"""Test awesomeversion."""
import json
import pytest
from awesomeversion import (
AwesomeVersion,
AwesomeVersionStrategy,
AwesomeVersionStrategyException,
)
def test_awesomeversion():
"""Test awesomeversion."""
version = AwesomeVersion("2020.12.1")
assert not version.beta
version = AwesomeVersion("2020.12.1a0")
assert version.alpha
version = AwesomeVersion("2020.12.1b0")
assert version.beta
version = AwesomeVersion("2020.12.1dev0")
assert version.dev
version = AwesomeVersion("2020.12.1d0")
assert version.dev
version = AwesomeVersion("2020.12.1rc0")
assert version.release_candidate
assert version.prefix is None
version = AwesomeVersion("v2020.12.1rc0")
assert version.prefix == "v"
version2 = AwesomeVersion(version)
assert version == version2
assert str(version) == str(version2)
assert str(version) == "v2020.12.1rc0"
assert version.string == "2020.12.1rc0"
assert repr(version) == "<AwesomeVersion CalVer '2020.12.1rc0'>"
assert AwesomeVersion("1.0.0-beta.2").modifier == "beta.2"
assert AwesomeVersion("2020.2.0b1").modifier_type == "b"
with AwesomeVersion("20.12.0") as current:
with AwesomeVersion("20.12.1") as upstream:
assert upstream > current
def test_serialization():
"""Test to and from JSON serialization."""
version = AwesomeVersion("20.12.1")
dumps = json.dumps({"version": version})
assert dumps == '{"version": "20.12.1"}'
assert json.loads(dumps)["version"] == version.string
test_data = [
("2020.12.1b0"),
("2020.12.1"),
("2021.2.0.dev20210118"),
]
@pytest.mark.parametrize("version", test_data)
def test_nesting(version):
"""Test nesting AwesomeVersion objects."""
obj = AwesomeVersion(version)
assert obj.string == version
assert str(obj) == version
assert AwesomeVersion(obj) == AwesomeVersion(version)
assert AwesomeVersion(obj).string == AwesomeVersion(version)
assert str(AwesomeVersion(obj)) == AwesomeVersion(version)
assert AwesomeVersion(obj) == version
assert AwesomeVersion(obj).string == version
assert str(AwesomeVersion(obj)) == version
assert (
AwesomeVersion(
AwesomeVersion(AwesomeVersion(AwesomeVersion(AwesomeVersion(obj))))
)
== version
)
assert (
AwesomeVersion(
AwesomeVersion(AwesomeVersion(AwesomeVersion(AwesomeVersion(obj))))
).string
== version
)
assert str(
(
AwesomeVersion(
AwesomeVersion(AwesomeVersion(AwesomeVersion(AwesomeVersion(obj))))
)
)
== version
)
def test_ensure_strategy(caplog):
"""test ensure_strategy."""
obj = AwesomeVersion("1.0.0", AwesomeVersionStrategy.SEMVER)
assert obj.strategy == AwesomeVersionStrategy.SEMVER
obj = AwesomeVersion(
"1.0.0",
[AwesomeVersionStrategy.SEMVER, AwesomeVersionStrategy.SPECIALCONTAINER],
)
assert obj.strategy in [
AwesomeVersionStrategy.SEMVER,
AwesomeVersionStrategy.SPECIALCONTAINER,
]
with pytest.raises(AwesomeVersionStrategyException):
AwesomeVersion("1", AwesomeVersionStrategy.SEMVER)
with pytest.raises(AwesomeVersionStrategyException):
AwesomeVersion(
"1",
[AwesomeVersionStrategy.SEMVER, AwesomeVersionStrategy.SPECIALCONTAINER],
)
obj = AwesomeVersion.ensure_strategy("1.0.0", AwesomeVersionStrategy.SEMVER)
assert (
"Using AwesomeVersion.ensure_strategy(version, strategy) is deprecated"
in caplog.text
)
| 27.358209
| 85
| 0.665576
|
"""Test awesomeversion."""
import json
import pytest
from awesomeversion import (
AwesomeVersion,
AwesomeVersionStrategy,
AwesomeVersionStrategyException,
)
def test_awesomeversion():
"""Test awesomeversion."""
version = AwesomeVersion("2020.12.1")
assert not version.beta
version = AwesomeVersion("2020.12.1a0")
assert version.alpha
version = AwesomeVersion("2020.12.1b0")
assert version.beta
version = AwesomeVersion("2020.12.1dev0")
assert version.dev
version = AwesomeVersion("2020.12.1d0")
assert version.dev
version = AwesomeVersion("2020.12.1rc0")
assert version.release_candidate
assert version.prefix is None
version = AwesomeVersion("v2020.12.1rc0")
assert version.prefix == "v"
version2 = AwesomeVersion(version)
assert version == version2
assert str(version) == str(version2)
assert str(version) == "v2020.12.1rc0"
assert version.string == "2020.12.1rc0"
assert repr(version) == "<AwesomeVersion CalVer '2020.12.1rc0'>"
assert AwesomeVersion("1.0.0-beta.2").modifier == "beta.2"
assert AwesomeVersion("2020.2.0b1").modifier_type == "b"
with AwesomeVersion("20.12.0") as current:
with AwesomeVersion("20.12.1") as upstream:
assert upstream > current
def test_serialization():
"""Test to and from JSON serialization."""
version = AwesomeVersion("20.12.1")
dumps = json.dumps({"version": version})
assert dumps == '{"version": "20.12.1"}'
assert json.loads(dumps)["version"] == version.string
test_data = [
("2020.12.1b0"),
("2020.12.1"),
("2021.2.0.dev20210118"),
]
@pytest.mark.parametrize("version", test_data)
def test_nesting(version):
"""Test nesting AwesomeVersion objects."""
obj = AwesomeVersion(version)
assert obj.string == version
assert str(obj) == version
assert AwesomeVersion(obj) == AwesomeVersion(version)
assert AwesomeVersion(obj).string == AwesomeVersion(version)
assert str(AwesomeVersion(obj)) == AwesomeVersion(version)
assert AwesomeVersion(obj) == version
assert AwesomeVersion(obj).string == version
assert str(AwesomeVersion(obj)) == version
assert (
AwesomeVersion(
AwesomeVersion(AwesomeVersion(AwesomeVersion(AwesomeVersion(obj))))
)
== version
)
assert (
AwesomeVersion(
AwesomeVersion(AwesomeVersion(AwesomeVersion(AwesomeVersion(obj))))
).string
== version
)
assert str(
(
AwesomeVersion(
AwesomeVersion(AwesomeVersion(AwesomeVersion(AwesomeVersion(obj))))
)
)
== version
)
def test_ensure_strategy(caplog):
"""test ensure_strategy."""
obj = AwesomeVersion("1.0.0", AwesomeVersionStrategy.SEMVER)
assert obj.strategy == AwesomeVersionStrategy.SEMVER
obj = AwesomeVersion(
"1.0.0",
[AwesomeVersionStrategy.SEMVER, AwesomeVersionStrategy.SPECIALCONTAINER],
)
assert obj.strategy in [
AwesomeVersionStrategy.SEMVER,
AwesomeVersionStrategy.SPECIALCONTAINER,
]
with pytest.raises(AwesomeVersionStrategyException):
AwesomeVersion("1", AwesomeVersionStrategy.SEMVER)
with pytest.raises(AwesomeVersionStrategyException):
AwesomeVersion(
"1",
[AwesomeVersionStrategy.SEMVER, AwesomeVersionStrategy.SPECIALCONTAINER],
)
obj = AwesomeVersion.ensure_strategy("1.0.0", AwesomeVersionStrategy.SEMVER)
assert (
"Using AwesomeVersion.ensure_strategy(version, strategy) is deprecated"
in caplog.text
)
| 0
| 0
| 0
|
543dafa81620a4ef6fd62bec3b7da69e65bb0a52
| 2,934
|
py
|
Python
|
user/migrations/0001_initial.py
|
Hrsn2861/pysat-server
|
72224bb0e6af8ef825eaf3259587698b5639b8a5
|
[
"MIT"
] | null | null | null |
user/migrations/0001_initial.py
|
Hrsn2861/pysat-server
|
72224bb0e6af8ef825eaf3259587698b5639b8a5
|
[
"MIT"
] | 7
|
2020-06-06T01:55:39.000Z
|
2022-02-10T11:46:31.000Z
|
user/migrations/0001_initial.py
|
Hrsnnnn/pysat-server
|
72224bb0e6af8ef825eaf3259587698b5639b8a5
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-11-02 11:35
from django.db import migrations, models
| 37.615385
| 114
| 0.504431
|
# Generated by Django 2.2.5 on 2019-11-02 11:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EntryLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_id', models.IntegerField()),
('user_id', models.IntegerField()),
('login_time', models.DateTimeField()),
('logout_time', models.DateTimeField()),
],
options={
'verbose_name': 'log',
'verbose_name_plural': 'logs',
'get_latest_by': 'id',
},
),
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField()),
('school_id', models.IntegerField()),
('permission', models.IntegerField()),
],
options={
'verbose_name': 'permission',
'verbose_name_plural': 'permissions',
'get_latest_by': 'id',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=32)),
('password', models.CharField(max_length=128)),
('phone', models.CharField(max_length=11)),
('email', models.CharField(default='', max_length=64)),
('email_verify', models.CharField(default='', max_length=64)),
('realname', models.CharField(default='', max_length=32)),
('motto', models.CharField(default='', max_length=256)),
('permission', models.IntegerField(default=1)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'get_latest_by': 'id',
},
),
migrations.CreateModel(
name='VerifyCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_id', models.IntegerField()),
('phone', models.CharField(max_length=11)),
('code', models.CharField(max_length=8)),
('send_time', models.DateTimeField()),
],
options={
'verbose_name': 'verifycode',
'verbose_name_plural': 'verifycodes',
'get_latest_by': 'id',
},
),
]
| 0
| 2,820
| 23
|
ca346a4bb1f71cd9d7868ca9808795b7f0dc484d
| 1,983
|
py
|
Python
|
portfolio/Python/scrapy/swedishtruckparts/parts4scania.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/swedishtruckparts/parts4scania.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/swedishtruckparts/parts4scania.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | 5
|
2016-03-22T07:40:46.000Z
|
2021-05-30T16:12:21.000Z
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
import csv
from product_spiders.items import Product, ProductLoader
| 31.47619
| 96
| 0.630862
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
import csv
from product_spiders.items import Product, ProductLoader
class Parts4ScaniaSpider(BaseSpider):
name = 'parts4scania.co.uk'
allowed_domains = ['www.parts4scania.co.uk']
start_urls = ('http://www.parts4scania.co.uk',)
def __init__(self, *args, **kwargs):
super(Parts4ScaniaSpider, self).__init__(*args, **kwargs)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
categories = hxs.select('//table[@id="NavigationBar4"]//a/@href').extract()
for category in categories:
url = urljoin_rfc(get_base_url(response), category)
yield Request(url)
# pages
# next_page = hxs.select('').extract()
# if next_page:
# url = urljoin_rfc(get_base_url(response), next_page[0])
# yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# products
products = hxs.select(u'//b[contains(text(), "\xa3")]/../..')
for product in products:
product_loader = ProductLoader(item=Product(), selector=product)
product_loader.add_xpath('name', './b/font/text()')
product_loader.add_value('url', response.url)
price = product.select(u'.//b[contains(text(), "\xa3")]/text()').re('\xa3(.*[0-9])')
if not price:
continue
product_loader.add_value('price', price)
yield product_loader.load_item()
| 1,420
| 231
| 23
|
857923f165523a228611c35189dcf54fa43aa704
| 5,837
|
py
|
Python
|
examples/extension/fruitproperty/pyproperty.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 31
|
2015-04-01T15:59:36.000Z
|
2022-03-18T20:21:47.000Z
|
examples/extension/fruitproperty/pyproperty.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 3
|
2015-02-06T19:30:24.000Z
|
2017-05-25T14:14:31.000Z
|
examples/extension/fruitproperty/pyproperty.py
|
usnistgov/OOF3D
|
4fd423a48aea9c5dc207520f02de53ae184be74c
|
[
"X11"
] | 7
|
2015-01-23T15:19:22.000Z
|
2021-06-09T09:03:59.000Z
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
# Example of an all-Python property, using the PyPropertyWrapper
# mechanism to route callbacks through to Python routines contained
# here.
# Casting of flux and field to the "most-derived" class via the
# PythonExportable mechanism is thinkable, but probably not actually
# necessary. The property knows its field and flux as derived types
# at construction/initialization time, and can get the appropriate
# iterators from them, rather than from the passed-in arguments to
# fluxmatrix, fluxrhs, etc. Those it can just use for comparison,
# to detect which field is currently presenting its fluxmatrix.
from ooflib.SWIG.engine import fieldindex
from ooflib.SWIG.engine import outputval
from ooflib.SWIG.engine import planarity
from ooflib.SWIG.engine import pypropertywrapper
from ooflib.SWIG.engine import symmmatrix
from ooflib.SWIG.engine.property.elasticity import cijkl
from ooflib.common import debug
from ooflib.engine import propertyregistration
from ooflib.engine import problem
Displacement = problem.Displacement
Stress = problem.Stress
# This property has a simple repr, with no parameters.
propertyregistration.PropertyRegistration('PyProperty', TestProp,
"fruitproperty.pyproperty",
1000,
params=[],
fields=[Displacement],
fluxes=[Stress],
outputs=["Energy", "Strain"],
propertyType="Elasticity")
| 47.455285
| 79
| 0.587802
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
# Example of an all-Python property, using the PyPropertyWrapper
# mechanism to route callbacks through to Python routines contained
# here.
# Casting of flux and field to the "most-derived" class via the
# PythonExportable mechanism is thinkable, but probably not actually
# necessary. The property knows its field and flux as derived types
# at construction/initialization time, and can get the appropriate
# iterators from them, rather than from the passed-in arguments to
# fluxmatrix, fluxrhs, etc. Those it can just use for comparison,
# to detect which field is currently presenting its fluxmatrix.
from ooflib.SWIG.engine import fieldindex
from ooflib.SWIG.engine import outputval
from ooflib.SWIG.engine import planarity
from ooflib.SWIG.engine import pypropertywrapper
from ooflib.SWIG.engine import symmmatrix
from ooflib.SWIG.engine.property.elasticity import cijkl
from ooflib.common import debug
from ooflib.engine import propertyregistration
from ooflib.engine import problem
Displacement = problem.Displacement
Stress = problem.Stress
class TestProp(pypropertywrapper.PyPropertyWrapper):
def __init__(self, registration, name):
self.modulus = cijkl.Cijkl();
pypropertywrapper.PyPropertyWrapper.__init__(self,
registration,
name)
# Fixed, hard-coded moduli, for now, numerically HexagonalElasticity
self.modulus[0,0] = self.modulus[1,1] = 1.0
self.modulus[0,1] = 0.5
self.modulus[5,5] = 0.25
# Explicitly set all others to zero.
self.modulus[0,2] = self.modulus[0,3] = self.modulus[0,4] = 0.0
self.modulus[0,5] = 0.0
self.modulus[1,2] = self.modulus[1,3] = self.modulus[1,4] = 0.0
self.modulus[1,5] = 0.0
self.modulus[2,2] = self.modulus[2,3] = self.modulus[2,4] = 0.0
self.modulus[2,5] = 0.0
self.modulus[3,3] = self.modulus[3,4] = self.modulus[3,5] = 0.0
self.modulus[4,4] = self.modulus[4,5] = 0.0
# This property has a simple repr, with no parameters.
def __repr__(self):
return "Test(name='%s')" % self.name()
def integration_order(self, mesh, element):
return element.shapefun_degree()
def cross_reference(self, material):
# This property requires an orientation to be present in the
# same Material. It doesn't actually use it, though...
self.orientation = material.fetchProperty('Orientation')
def flux_matrix(self, mesh, element, funcnodeiterator, flux,
masterpos, fluxdata):
# Shape functions.
sf = funcnodeiterator.shapefunction(masterpos)
dshapedx = funcnodeiterator.dshapefunction(0,masterpos)
dshapedy = funcnodeiterator.dshapefunction(1,masterpos)
fluxcomp = Stress.iterator(planarity.ALL_INDICES)
while not fluxcomp.end():
fieldcomp = Displacement.iterator(planarity.IN_PLANE)
while not fieldcomp.end():
ell0 = fieldindex.SymTensorIndex(0, fieldcomp.integer())
ell1 = fieldindex.SymTensorIndex(1, fieldcomp.integer())
v = self.modulus[fluxcomp.integer(), ell0.integer()]*dshapedx+\
self.modulus[fluxcomp.integer(), ell1.integer()]*dshapedy
fluxdata.add_stiffness_matrix_element(fluxcomp, Displacement,
fieldcomp,
funcnodeiterator, v)
fieldcomp.next()
# loop over out-of-plane strains
if not Displacement.in_plane(mesh):
dispz = Displacement.out_of_plane()
ell = dispz.iterator(planarity.ALL_INDICES)
while not ell.end():
if ell.integer() == 2:
diag_factor = 1.
else:
diag_factor = 0.5
v = self.modulus[fluxcomp.integer(),
fieldindex.SymTensorIndex(
2,ell.integer()).integer() ] * sf * diag_factor
fluxdata.add_stiffness_matrix_element(fluxcomp,
dispz, ell,
funcnodeiterator, v)
ell.next()
fluxcomp.next()
def output(self, mesh, element, propertyoutput, position):
if propertyoutput.name() == "Energy":
return outputval.ScalarOutputVal(3.14)*position.mastercoord()[0]
if propertyoutput.name() == "Strain":
stype = propertyoutput.getRegisteredParamName("type")
if stype == "Geometric":
return symmmatrix.SymmMatrix3(0,1,2,3,4,5)
propertyregistration.PropertyRegistration('PyProperty', TestProp,
"fruitproperty.pyproperty",
1000,
params=[],
fields=[Displacement],
fluxes=[Stress],
outputs=["Energy", "Strain"],
propertyType="Elasticity")
| 3,513
| 31
| 198
|
64b5a2cb8929ad161a1ee1dc4b0d356c36e7cf8d
| 2,930
|
py
|
Python
|
distributed_dp/dme_utils.py
|
AbdulmoneamAli/federated
|
c54a9f5053d6316f81aa6f6d1eba61068927a33d
|
[
"Apache-2.0"
] | 1
|
2021-07-29T16:35:21.000Z
|
2021-07-29T16:35:21.000Z
|
distributed_dp/dme_utils.py
|
AbdulmoneamAli/federated
|
c54a9f5053d6316f81aa6f6d1eba61068927a33d
|
[
"Apache-2.0"
] | null | null | null |
distributed_dp/dme_utils.py
|
AbdulmoneamAli/federated
|
c54a9f5053d6316f81aa6f6d1eba61068927a33d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for distributed mean estimation."""
import numpy as np
import tensorflow as tf
from distributed_dp.modular_clipping_factory import modular_clip_by_value
def generate_client_data(d, n, l2_norm=1):
"""Sample `n` of `d`-dim vectors on the l2 ball with radius `l2_norm`.
Args:
d: The dimension of the client vector.
n: The number of clients.
l2_norm: The L2 norm of the sampled vector.
Returns:
A list of `n` np.array each with shape (d,).
"""
vectors = np.random.normal(size=(n, d))
unit_vectors = vectors / np.linalg.norm(vectors, axis=-1, keepdims=True)
scaled_vectors = unit_vectors * l2_norm
# Cast to float32 as TF implementations use float32.
return list(scaled_vectors.astype(np.float32))
def compute_dp_average(client_data, dp_query, is_compressed, bits):
"""Aggregate client data with DPQuery's interface and take average."""
global_state = dp_query.initial_global_state()
sample_params = dp_query.derive_sample_params(global_state)
client_template = tf.zeros_like(client_data[0])
sample_state = dp_query.initial_sample_state(client_template)
if is_compressed:
# Achieve compression via modular clipping. Upper bound is exclusive.
clip_lo, clip_hi = -(2**(bits - 1)), 2**(bits - 1)
# 1. Client pre-processing stage.
for x in client_data:
record = tf.convert_to_tensor(x)
prep_record = dp_query.preprocess_record(sample_params, record)
# Client applies modular clip on the preprocessed record.
prep_record = modular_clip_by_value(prep_record, clip_lo, clip_hi)
sample_state = dp_query.accumulate_preprocessed_record(
sample_state, prep_record)
# 2. Server applies modular clip on the aggregate.
sample_state = modular_clip_by_value(sample_state, clip_lo, clip_hi)
else:
for x in client_data:
record = tf.convert_to_tensor(x)
sample_state = dp_query.accumulate_record(
sample_params, sample_state, record=record)
# Apply server post-processing.
agg_result, _ = dp_query.get_noised_result(sample_state, global_state)
# The agg_result should have the same input type as client_data.
assert agg_result.shape == client_data[0].shape
assert agg_result.dtype == client_data[0].dtype
# Take the average on the aggregate.
return agg_result / len(client_data)
| 37.088608
| 74
| 0.743686
|
# Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for distributed mean estimation."""
import numpy as np
import tensorflow as tf
from distributed_dp.modular_clipping_factory import modular_clip_by_value
def generate_client_data(d, n, l2_norm=1):
"""Sample `n` of `d`-dim vectors on the l2 ball with radius `l2_norm`.
Args:
d: The dimension of the client vector.
n: The number of clients.
l2_norm: The L2 norm of the sampled vector.
Returns:
A list of `n` np.array each with shape (d,).
"""
vectors = np.random.normal(size=(n, d))
unit_vectors = vectors / np.linalg.norm(vectors, axis=-1, keepdims=True)
scaled_vectors = unit_vectors * l2_norm
# Cast to float32 as TF implementations use float32.
return list(scaled_vectors.astype(np.float32))
def compute_dp_average(client_data, dp_query, is_compressed, bits):
"""Aggregate client data with DPQuery's interface and take average."""
global_state = dp_query.initial_global_state()
sample_params = dp_query.derive_sample_params(global_state)
client_template = tf.zeros_like(client_data[0])
sample_state = dp_query.initial_sample_state(client_template)
if is_compressed:
# Achieve compression via modular clipping. Upper bound is exclusive.
clip_lo, clip_hi = -(2**(bits - 1)), 2**(bits - 1)
# 1. Client pre-processing stage.
for x in client_data:
record = tf.convert_to_tensor(x)
prep_record = dp_query.preprocess_record(sample_params, record)
# Client applies modular clip on the preprocessed record.
prep_record = modular_clip_by_value(prep_record, clip_lo, clip_hi)
sample_state = dp_query.accumulate_preprocessed_record(
sample_state, prep_record)
# 2. Server applies modular clip on the aggregate.
sample_state = modular_clip_by_value(sample_state, clip_lo, clip_hi)
else:
for x in client_data:
record = tf.convert_to_tensor(x)
sample_state = dp_query.accumulate_record(
sample_params, sample_state, record=record)
# Apply server post-processing.
agg_result, _ = dp_query.get_noised_result(sample_state, global_state)
# The agg_result should have the same input type as client_data.
assert agg_result.shape == client_data[0].shape
assert agg_result.dtype == client_data[0].dtype
# Take the average on the aggregate.
return agg_result / len(client_data)
| 0
| 0
| 0
|
d4c2ee1d2c14dcb31de7addfc1b457de42691515
| 3,199
|
py
|
Python
|
p2ner/components/plugin/overlayviz2/overlayviz/xmlinterface.py
|
schristakidis/p2ner
|
46694a41e8c1ebdc28f520b58c126da8785f3eed
|
[
"Apache-2.0"
] | 2
|
2015-06-01T22:04:34.000Z
|
2017-07-06T09:35:00.000Z
|
p2ner/components/plugin/overlayviz2/overlayviz/xmlinterface.py
|
schristakidis/p2ner
|
46694a41e8c1ebdc28f520b58c126da8785f3eed
|
[
"Apache-2.0"
] | null | null | null |
p2ner/components/plugin/overlayviz2/overlayviz/xmlinterface.py
|
schristakidis/p2ner
|
46694a41e8c1ebdc28f520b58c126da8785f3eed
|
[
"Apache-2.0"
] | 1
|
2019-11-26T10:22:35.000Z
|
2019-11-26T10:22:35.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.abstract.interface import Interface
from cPickle import loads
| 33.673684
| 91
| 0.524539
|
# -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.abstract.interface import Interface
from cPickle import loads
class VizXMLInterface(Interface):
def initInterface(self):
pass
def setId(self,id):
self.id=id
def getNeighbours(self,func):
self.func=func
self.neighs={}
for peer in self.parent.getRunningClients(self.id):
ip=peer[0]
port=peer[1]
p=peer[7]
self.neighs[(ip,port)]={}
self.neighs[(ip,port)]['response']=False
for ov in range(3):
self.neighs[(ip,port)][ov]={}
p.getNeighbours(self.id,ip,port,self.gotNeighs,self.failed)
def failed(self,err,ip,port):
del self.neighs[(ip,port)]
self.parent.removePeer(ip,port)
for p in self.neighs.values():
if not p['response']:
return
self.constructNeighs()
def gotNeighs(self,neighs,ip,port):
if self.neighs[(ip,port)]['response']:
print 'already got the neighs from that peer'
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
self.neighs[(ip,port)]['response']=True
for ov,v in neighs.items():
if v:
n = [loads(p) for p in v['neighs']]
for p in n:
try:
p.plotRtt=sum(p.lastRtt)/len(p.lastRtt)
except:
print 'no rtt values ',p.lastRtt
p.plotRtt=1
self.neighs[(ip,port)][int(ov)]['neighs']=n
self.neighs[(ip,port)][int(ov)]['energy']=neighs[ov]['energy']
self.neighs[(ip,port)][int(ov)]['stats']=neighs[ov]['stats']
else:
self.neighs[(ip,port)][int(ov)]=None
for p in self.neighs.values():
if not p['response']:
return
self.constructNeighs()
def constructNeighs(self):
ret={}
for ov in range(3):
ret[ov]={}
ret[ov]['neighs']={}
ret[ov]['energy']=[]
ret[ov]['stats']={}
energy=[]
for peer,v in self.neighs.items():
if v[ov]:
ret[ov]['energy'].append(v[ov]['energy'])
ret[ov]['neighs'][peer]=[]
for p in v[ov]['neighs']:
ret[ov]['neighs'][peer].append(((p.getIP(),p.getPort()),p.plotRtt))
ret[ov]['stats'][peer]=v[ov]['stats']
peers=self.neighs.keys()
self.func(peers,ret)
| 2,286
| 12
| 184
|
c4847d72a088620fab5a7b96015b61f8285613d4
| 1,858
|
py
|
Python
|
migrations/versions/8580b5c0888c_initial_migrate.py
|
alchermd/client_payroll
|
fefb434cb3a4af768a1d01d0e88a4f22f88a2e6c
|
[
"MIT"
] | 2
|
2018-12-02T01:36:57.000Z
|
2019-11-23T01:01:49.000Z
|
migrations/versions/8580b5c0888c_initial_migrate.py
|
alchermd/client_payroll
|
fefb434cb3a4af768a1d01d0e88a4f22f88a2e6c
|
[
"MIT"
] | 4
|
2017-11-03T11:47:05.000Z
|
2017-11-05T15:20:00.000Z
|
migrations/versions/8580b5c0888c_initial_migrate.py
|
alchermd/client_payroll
|
fefb434cb3a4af768a1d01d0e88a4f22f88a2e6c
|
[
"MIT"
] | 5
|
2018-04-02T13:52:43.000Z
|
2022-01-25T06:38:44.000Z
|
"""Initial migrate
Revision ID: 8580b5c0888c
Revises:
Create Date: 2017-11-02 23:42:58.243681
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8580b5c0888c'
down_revision = None
branch_labels = None
depends_on = None
| 30.966667
| 65
| 0.669537
|
"""Initial migrate
Revision ID: 8580b5c0888c
Revises:
Create Date: 2017-11-02 23:42:58.243681
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8580b5c0888c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('admins',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=25), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('employees',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('date_employed', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('employers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('payments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('employer_id', sa.Integer(), nullable=True),
sa.Column('employee_id', sa.Integer(), nullable=True),
sa.Column('payment_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['employee_id'], ['employees.id'], ),
sa.ForeignKeyConstraint(['employer_id'], ['employers.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('payments')
op.drop_table('employers')
op.drop_table('employees')
op.drop_table('admins')
# ### end Alembic commands ###
| 1,533
| 0
| 46
|
2d6073c49b3cd04526985cca2603a8dd633458fa
| 108
|
py
|
Python
|
src/model/__init__.py
|
celsomilne/soccer
|
b3b164b6610ebd88b13601970d2d4e65423d0a57
|
[
"MIT"
] | null | null | null |
src/model/__init__.py
|
celsomilne/soccer
|
b3b164b6610ebd88b13601970d2d4e65423d0a57
|
[
"MIT"
] | 2
|
2021-06-08T20:34:37.000Z
|
2022-02-10T01:12:59.000Z
|
src/model/__init__.py
|
celsomilne/soccer
|
b3b164b6610ebd88b13601970d2d4e65423d0a57
|
[
"MIT"
] | null | null | null |
from ._base import *
from .soccer import SoccerModel
from .soccerObjectDetector import SoccerObjectDetector
| 27
| 54
| 0.851852
|
from ._base import *
from .soccer import SoccerModel
from .soccerObjectDetector import SoccerObjectDetector
| 0
| 0
| 0
|
d219082334961cda51c1bcbec9e557f2b3ee25de
| 884
|
py
|
Python
|
examples/uni_arviz_rhat.py
|
kushagragpt99/kanga
|
1f2343d15965148ae7c1dea95168c31a87b27c4f
|
[
"MIT"
] | 1
|
2022-02-03T08:50:35.000Z
|
2022-02-03T08:50:35.000Z
|
examples/uni_arviz_rhat.py
|
kushagragpt99/kanga
|
1f2343d15965148ae7c1dea95168c31a87b27c4f
|
[
"MIT"
] | null | null | null |
examples/uni_arviz_rhat.py
|
kushagragpt99/kanga
|
1f2343d15965148ae7c1dea95168c31a87b27c4f
|
[
"MIT"
] | null | null | null |
# Compute potential scale reduction factor (Rhat) using rhat function of kanga, which is rhat of arviz
# %% Load packages
import arviz as az
import numpy as np
# %% Define function for computing univariate Rhat based on arviz
# x is a numpy array of 3 dimensions, (chain, MC iteration, parameter)
# %% Read chains
chains = np.array([np.genfromtxt('chain'+str(i+1).zfill(2)+'.csv', delimiter=',') for i in range(4)])
# %% Compute Rhat using rank method
# The output of rhat in kanga coincides with the output of Rhat in rstan
# See
# https://mc-stan.org/rstan/reference/Rhat.html
rhat_rank = uni_arviz_rhat(chains, method='rank')
print('Rhat based on rank method: {}'.format(rhat_rank))
| 34
| 121
| 0.726244
|
# Compute potential scale reduction factor (Rhat) using rhat function of kanga, which is rhat of arviz
# %% Load packages
import arviz as az
import numpy as np
# %% Define function for computing univariate Rhat based on arviz
# x is a numpy array of 3 dimensions, (chain, MC iteration, parameter)
def uni_arviz_rhat(x, var_names=None, method='folded', vars=None):
return [az.rhat(x.transpose()[i].transpose(), var_names=var_names, method=method) for i in vars or range(x.shape[2])]
# %% Read chains
chains = np.array([np.genfromtxt('chain'+str(i+1).zfill(2)+'.csv', delimiter=',') for i in range(4)])
# %% Compute Rhat using rank method
# The output of rhat in kanga coincides with the output of Rhat in rstan
# See
# https://mc-stan.org/rstan/reference/Rhat.html
rhat_rank = uni_arviz_rhat(chains, method='rank')
print('Rhat based on rank method: {}'.format(rhat_rank))
| 167
| 0
| 22
|
c4d06705bc583b21e2502fb5ef2e395b64121841
| 971
|
py
|
Python
|
tests/helpers/information/test_read_hacs_manifest.py
|
bdraco/integration
|
b30e799bb27fdd978bd68f21909c82005d0dd3ea
|
[
"MIT"
] | 2
|
2019-06-18T11:30:53.000Z
|
2019-10-03T21:34:11.000Z
|
tests/helpers/information/test_read_hacs_manifest.py
|
bdraco/integration
|
b30e799bb27fdd978bd68f21909c82005d0dd3ea
|
[
"MIT"
] | 341
|
2019-06-18T11:30:55.000Z
|
2021-07-15T05:38:46.000Z
|
tests/helpers/information/test_read_hacs_manifest.py
|
bdraco/integration
|
b30e799bb27fdd978bd68f21909c82005d0dd3ea
|
[
"MIT"
] | null | null | null |
"""Helpers: Information: read_hacs_manifest."""
import json
# pylint: disable=missing-docstring
import os
from custom_components.hacs.helpers.functions.information import read_hacs_manifest
from custom_components.hacs.share import get_hacs
| 27.742857
| 83
| 0.727085
|
"""Helpers: Information: read_hacs_manifest."""
import json
# pylint: disable=missing-docstring
import os
from custom_components.hacs.helpers.functions.information import read_hacs_manifest
from custom_components.hacs.share import get_hacs
def temp_cleanup(tmpdir):
hacsdir = f"{tmpdir.dirname}/custom_components/hacs"
manifestfile = f"{hacsdir}/manifest.json"
if os.path.exists(manifestfile):
os.remove(manifestfile)
if os.path.exists(hacsdir):
os.removedirs(hacsdir)
def test_read_hacs_manifest(tmpdir):
hacsdir = f"{tmpdir.dirname}/custom_components/hacs"
manifestfile = f"{hacsdir}/manifest.json"
hacs = get_hacs()
hacs.system.config_path = tmpdir.dirname
data = {"test": "test"}
os.makedirs(hacsdir, exist_ok=True)
with open(manifestfile, "w") as manifest_file:
manifest_file.write(json.dumps(data))
manifest = read_hacs_manifest()
assert data == manifest
temp_cleanup(tmpdir)
| 681
| 0
| 46
|
a28a414297ec2f20a9b5a310b17760682c11db49
| 1,573
|
py
|
Python
|
ctf/pieces.py
|
documentedai/capture-the-flag
|
4dbfba094f2a56a6b581098b603fe49a4515efb6
|
[
"BSD-3-Clause"
] | null | null | null |
ctf/pieces.py
|
documentedai/capture-the-flag
|
4dbfba094f2a56a6b581098b603fe49a4515efb6
|
[
"BSD-3-Clause"
] | 3
|
2020-04-22T01:12:06.000Z
|
2020-04-26T17:59:11.000Z
|
ctf/pieces.py
|
documentedai/capture-the-flag
|
4dbfba094f2a56a6b581098b603fe49a4515efb6
|
[
"BSD-3-Clause"
] | null | null | null |
"""Pieces used in Capture The Flag (Ctf) game."""
| 32.770833
| 77
| 0.583598
|
"""Pieces used in Capture The Flag (Ctf) game."""
class Piece(object):
def __init__(self, idx, team, position):
"""This class initializes the storage of standard attributes,
shared amongst the other pieces.
Args:
idx (:obj:`int`): Index of Piece.
team (:obj:`int`): The team this piece belongs to.
position (:obj:`tuple`): Location of the piece on the board.
"""
self.idx = idx
self.team = team
self.position = position
class Unit(Piece):
def __init__(self, idx, team, position, has_flag=False, in_jail=False):
"""Unit piece, representing a controllable character on the board.
Args:
has_flag (:obj:`bool`, optional): Whether or not the unit has the
flag. Defaults to `False`.
in_jail (:obj:`bool`, optional): Whether or not the unit is in
jail. Defaults to `False`.
"""
super().__init__(idx, team, position)
self.has_flag = has_flag
self.in_jail = in_jail
class Flag(Piece):
def __init__(self, idx, team, position, grounded=True):
"""Flag piece, representing one of the team's flags.
Args:
grounded (:obj:`bool`, optional): Whether or not this flag is on
the ground. `True` meaning this flag is on the ground,
`False` meaning a unit is currently carrying this flag.
Defaults to `True`.
"""
super().__init__(idx, team, position)
self.grounded = grounded
| 0
| 1,452
| 69
|
2c368ee1137bb051389f24bf90f066aacb7f7eb3
| 6,202
|
py
|
Python
|
black_box/cem.py
|
domluna/deep-rl-gym-tutorials
|
7e6f53fac0cd4eb22f73916f477d43e3b6ef8715
|
[
"MIT"
] | 17
|
2016-05-29T21:47:22.000Z
|
2021-02-05T07:51:24.000Z
|
black_box/cem.py
|
domluna/deep-rl-gym-tutorials
|
7e6f53fac0cd4eb22f73916f477d43e3b6ef8715
|
[
"MIT"
] | null | null | null |
black_box/cem.py
|
domluna/deep-rl-gym-tutorials
|
7e6f53fac0cd4eb22f73916f477d43e3b6ef8715
|
[
"MIT"
] | 5
|
2016-05-29T21:46:58.000Z
|
2019-05-15T07:00:01.000Z
|
"""The main idea of CE (Cross Entropy) is to maintain a distribution
of possible solution, and update this distribution accordingly.
Preliminary investigation showed that applicability of CE to RL problems
is restricted severly by the phenomenon that the distribution concentrates to
a single point too fast.
To prevent this issue, noise is added to the previous stddev/variance update
calculation.
We implement two algorithms cem, the Cross-Entropy Method (CEM) with noise [1] and
Proportional Cross-Entropy (PCEM) [2].
CEM is implemented with decreasing variance noise
variance + max(5 - t / 10, 0), where t is the iteration step
PCEM is implemented the same as CEM except we adjust the weights, evaluations of f
as follows:
M = max(weights)
m = min(weights)
weights = (weight - m) / (M - m + eps)
where eps is a very small value to avoid division by 0
An issue with CEM is it might not optimize the actual objective. PCEM helps
with this.
References:
[1] Learning Tetris with the Noisy Cross-Entropy Method (Szita, Lorincz 2006)
[2] The Cross-Entropy Method Optimizes for Quantiles (Goschin, Weinstein, Littman 2013)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import range
import gym
import numpy as np
import logging
import argparse
# only two possible actions 0 or 1
def do_rollout(agent, env, num_steps, render=False):
"""
Performs actions for num_steps on the environment
based on the agents current params
"""
total_rew = 0
ob = env.reset()
for t in range(num_steps):
a = agent.act(ob)
(ob, reward, done, _) = env.step(a)
total_rew += reward
if render and t%3==0: env.render()
if done: break
return total_rew, t+1
# mean and std are 1D array of size d
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--iters', default=50, type=int, help='number of iterations')
parser.add_argument('--samples', default=30, type=int, help='number of samples CEM algorithm chooses from on each iter')
parser.add_argument('--num_steps', default=200, type=int, help='number of steps/actions in the rollout')
parser.add_argument('--top_frac', default=0.2, type=float, help='percentage of top samples used to calculate mean and variance of next iteration')
parser.add_argument('--algorithm', default='cem', type=str, choices=['pcem', 'cem'])
parser.add_argument('--outdir', default='CartPole-v0-cem', type=str, help='output directory where results are saved (/tmp/ prefixed)')
parser.add_argument('--render', action='store_true', help='show rendered results during training')
parser.add_argument('--upload', action='store_true', help='upload results via OpenAI API')
args = parser.parse_args()
print(args)
np.random.seed(args.seed)
env = gym.make('CartPole-v0')
num_steps = args.num_steps
ef = None
if args.algorithm == 'cem':
ef = cem
else:
ef = pcem
outdir = '/tmp/' + args.outdir
env.monitor.start(outdir, force=True)
f = evaluation_func(BinaryActionLinearPolicy, env, num_steps)
# params for cem
params = dict(n_iters=args.iters, n_samples=args.samples, top_frac=args.top_frac)
u = np.random.randn(env.observation_space.shape[0]+1)
var = np.square(np.ones_like(u) * 0.1)
for (i, data) in enumerate(ef(f, u, var, **params)):
print("Iteration {}. Episode mean reward: {}".format(i, data['y_mean']))
agent = BinaryActionLinearPolicy(data['theta_mean'])
if args.render:
do_rollout(agent, env, num_steps, render=True)
env.monitor.close()
# make sure to setup your OPENAI_GYM_API_KEY environment variable
if args.upload:
gym.upload(outdir, algorithm_id=args.algorithm)
| 37.587879
| 150
| 0.666075
|
"""The main idea of CE (Cross Entropy) is to maintain a distribution
of possible solution, and update this distribution accordingly.
Preliminary investigation showed that applicability of CE to RL problems
is restricted severly by the phenomenon that the distribution concentrates to
a single point too fast.
To prevent this issue, noise is added to the previous stddev/variance update
calculation.
We implement two algorithms cem, the Cross-Entropy Method (CEM) with noise [1] and
Proportional Cross-Entropy (PCEM) [2].
CEM is implemented with decreasing variance noise
variance + max(5 - t / 10, 0), where t is the iteration step
PCEM is implemented the same as CEM except we adjust the weights, evaluations of f
as follows:
M = max(weights)
m = min(weights)
weights = (weight - m) / (M - m + eps)
where eps is a very small value to avoid division by 0
An issue with CEM is it might not optimize the actual objective. PCEM helps
with this.
References:
[1] Learning Tetris with the Noisy Cross-Entropy Method (Szita, Lorincz 2006)
[2] The Cross-Entropy Method Optimizes for Quantiles (Goschin, Weinstein, Littman 2013)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import range
import gym
import numpy as np
import logging
import argparse
# only two possible actions 0 or 1
class BinaryActionLinearPolicy(object):
def __init__(self, theta):
self.w = theta[:-1]
self.b = theta[-1]
def act(self, ob):
y = ob.dot(self.w) + self.b
a = int(y < 0)
return a
def do_rollout(agent, env, num_steps, render=False):
"""
Performs actions for num_steps on the environment
based on the agents current params
"""
total_rew = 0
ob = env.reset()
for t in range(num_steps):
a = agent.act(ob)
(ob, reward, done, _) = env.step(a)
total_rew += reward
if render and t%3==0: env.render()
if done: break
return total_rew, t+1
# mean and std are 1D array of size d
def cem(f, mean, var, n_iters, n_samples, top_frac):
top_n = int(np.round(top_frac * n_samples))
for i in range(n_iters):
# generate n_samples each iteration with new mean and stddev
samples = np.transpose(np.array([np.random.normal(u, np.sqrt(o), n_samples) for u, o in zip(mean, var)]))
ys = np.array([f(s) for s in samples])
# the top samples are the ones which give the lowest f evaluation results
top_idxs = ys.argsort()[::-1][:top_n]
top_samples = samples[top_idxs]
# this is taken straight from [1], constant noise param
# dependent on the iteration step.
v = max(5 - i / 10, 0)
mean = top_samples.mean(axis=0)
var = top_samples.var(axis=0) + v
yield {'ys': ys, 'theta_mean': mean, 'y_mean': ys.mean()}
def pcem(f, mean, var, n_iters, n_samples, top_frac):
eps = 1e-10 # avoid dividing by 0
top_n = int(np.round(top_frac * n_samples))
for i in range(n_iters):
# generate n_samples each iteration with new mean and stddev
samples = np.transpose(np.array([np.random.normal(u, np.sqrt(o), n_samples) for u, o in zip(mean, var)]))
ys = np.array([f(s) for s in samples])
max_y = np.max(ys)
min_y = np.min(ys)
ys = (ys - min_y) / (max_y - min_y + eps)
# the top samples are the ones which give the lowest f evaluation results
top_idxs = ys.argsort()[::-1][:top_n]
top_samples = samples[top_idxs]
# this is taken straight from [1], constant noise param
# dependent on the iteration step.
v = max(5 - i / 10, 0)
mean = top_samples.mean(axis=0)
var = top_samples.var(axis=0) + v
yield {'ys': ys, 'theta_mean': mean, 'y_mean': ys.mean()}
def evaluation_func(policy, env, num_steps):
def f(theta):
agent = policy(theta)
rew, t = do_rollout(agent, env, num_steps, render=False)
return rew
return f
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--iters', default=50, type=int, help='number of iterations')
parser.add_argument('--samples', default=30, type=int, help='number of samples CEM algorithm chooses from on each iter')
parser.add_argument('--num_steps', default=200, type=int, help='number of steps/actions in the rollout')
parser.add_argument('--top_frac', default=0.2, type=float, help='percentage of top samples used to calculate mean and variance of next iteration')
parser.add_argument('--algorithm', default='cem', type=str, choices=['pcem', 'cem'])
parser.add_argument('--outdir', default='CartPole-v0-cem', type=str, help='output directory where results are saved (/tmp/ prefixed)')
parser.add_argument('--render', action='store_true', help='show rendered results during training')
parser.add_argument('--upload', action='store_true', help='upload results via OpenAI API')
args = parser.parse_args()
print(args)
np.random.seed(args.seed)
env = gym.make('CartPole-v0')
num_steps = args.num_steps
ef = None
if args.algorithm == 'cem':
ef = cem
else:
ef = pcem
outdir = '/tmp/' + args.outdir
env.monitor.start(outdir, force=True)
f = evaluation_func(BinaryActionLinearPolicy, env, num_steps)
# params for cem
params = dict(n_iters=args.iters, n_samples=args.samples, top_frac=args.top_frac)
u = np.random.randn(env.observation_space.shape[0]+1)
var = np.square(np.ones_like(u) * 0.1)
for (i, data) in enumerate(ef(f, u, var, **params)):
print("Iteration {}. Episode mean reward: {}".format(i, data['y_mean']))
agent = BinaryActionLinearPolicy(data['theta_mean'])
if args.render:
do_rollout(agent, env, num_steps, render=True)
env.monitor.close()
# make sure to setup your OPENAI_GYM_API_KEY environment variable
if args.upload:
gym.upload(outdir, algorithm_id=args.algorithm)
| 2,028
| 18
| 142
|
6a1dc6b0666aee9e976d0587cbda3b6fe02edfd5
| 851
|
py
|
Python
|
avwx_api/views.py
|
flyinactor91/AVWX-API
|
2c56cb4c82b0fc6d1644fbd68693476254928d68
|
[
"MIT"
] | 30
|
2016-12-16T07:26:13.000Z
|
2019-03-01T07:57:07.000Z
|
avwx_api/views.py
|
flyinactor91/AVWX-API
|
2c56cb4c82b0fc6d1644fbd68693476254928d68
|
[
"MIT"
] | 12
|
2018-02-18T20:41:39.000Z
|
2019-04-13T06:10:54.000Z
|
avwx_api/views.py
|
flyinactor91/AVWX-API
|
2c56cb4c82b0fc6d1644fbd68693476254928d68
|
[
"MIT"
] | 12
|
2018-02-14T08:53:06.000Z
|
2019-03-31T19:27:58.000Z
|
"""
Michael duPont - michael@mdupont.com
avwx_api.views - Routes and views for the Quart application
"""
# pylint: disable=W0702
# library
from quart import Response, jsonify
# module
from avwx_api import app
# Static Web Pages
@app.route("/")
@app.route("/home")
async def home() -> Response:
"""Returns static home page"""
return await app.send_static_file("html/home.html")
@app.route("/ping")
def ping() -> Response:
"""Send empty 200 ping response"""
return Response(None, 200)
# API Routing Errors
@app.route("/api")
async def no_report() -> Response:
"""Returns no report msg"""
return jsonify({"error": "No report type given"}), 400
@app.route("/api/metar")
@app.route("/api/taf")
async def no_station() -> Response:
"""Returns no station msg"""
return jsonify({"error": "No station given"}), 400
| 19.340909
| 59
| 0.66745
|
"""
Michael duPont - michael@mdupont.com
avwx_api.views - Routes and views for the Quart application
"""
# pylint: disable=W0702
# library
from quart import Response, jsonify
# module
from avwx_api import app
# Static Web Pages
@app.route("/")
@app.route("/home")
async def home() -> Response:
"""Returns static home page"""
return await app.send_static_file("html/home.html")
@app.route("/ping")
def ping() -> Response:
"""Send empty 200 ping response"""
return Response(None, 200)
# API Routing Errors
@app.route("/api")
async def no_report() -> Response:
"""Returns no report msg"""
return jsonify({"error": "No report type given"}), 400
@app.route("/api/metar")
@app.route("/api/taf")
async def no_station() -> Response:
"""Returns no station msg"""
return jsonify({"error": "No station given"}), 400
| 0
| 0
| 0
|
d7ef2a582472d8a5f158d57e9f43f8fbb6fa2f14
| 27,036
|
py
|
Python
|
third_party/ite/cost/base_i.py
|
gdikov/adversarial-variational-bayes
|
ebd692c70349f34bcb3a2086269bd814cafce96f
|
[
"MIT"
] | 11
|
2017-09-25T07:46:43.000Z
|
2019-12-04T12:02:49.000Z
|
Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/ite/cost/base_i.py
|
gonzalo-munillag/Private_AI_OpenMined
|
c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca
|
[
"MIT"
] | null | null | null |
Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/ite/cost/base_i.py
|
gonzalo-munillag/Private_AI_OpenMined
|
c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca
|
[
"MIT"
] | 4
|
2018-04-26T15:08:37.000Z
|
2019-07-02T14:15:08.000Z
|
""" Base mutual information estimators. """
from numpy import sum, sqrt, isnan, exp, mean, eye, ones, dot, cumsum, \
hstack, newaxis, maximum, prod, abs, arange, log
from numpy.linalg import norm
from scipy.spatial.distance import pdist, squareform
from scipy.special import factorial
from scipy.linalg import det
from scipy.sparse.linalg import eigsh
from ite.cost.x_initialization import InitX, InitEtaKernel
from ite.cost.x_verification import VerCompSubspaceDims, \
VerSubspaceNumberIsK,\
VerOneDSubspaces
from ite.shared import compute_dcov_dcorr_statistics, median_heuristic,\
copula_transformation, compute_matrix_r_kcca_kgv
from ite.cost.x_kernel import Kernel
class BIDistCov(InitX, VerCompSubspaceDims, VerSubspaceNumberIsK):
""" Distance covariance estimator using pairwise distances.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, alpha=1):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
alpha : float, optional
Parameter of the distance covariance: 0 < alpha < 2
(default is 1).
Examples
--------
>>> import ite
>>> co1 = ite.cost.BIDistCov()
>>> co2 = ite.cost.BIDistCov(alpha = 1.2)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attribute:
if alpha <= 0 or alpha >= 2:
raise Exception('0 < alpha < 2 is needed for this estimator!')
self.alpha = alpha
def estimation(self, y, ds):
""" Estimate distance covariance.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension. len(ds) = 2.
Returns
-------
i : float
Estimated distance covariance.
References
----------
Gabor J. Szekely and Maria L. Rizzo. Brownian distance covariance.
The Annals of Applied Statistics, 3:1236-1265, 2009.
Gabor J. Szekely, Maria L. Rizzo, and Nail K. Bakirov. Measuring
and testing dependence by correlation of distances. The Annals of
Statistics, 35:2769-2794, 2007.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_subspace_number_is_k(ds, 2)
num_of_samples = y.shape[0] # number of samples
a = compute_dcov_dcorr_statistics(y[:, :ds[0]], self.alpha)
b = compute_dcov_dcorr_statistics(y[:, ds[0]:], self.alpha)
i = sqrt(sum(a*b)) / num_of_samples
return i
class BIDistCorr(InitX, VerCompSubspaceDims, VerSubspaceNumberIsK):
""" Distance correlation estimator using pairwise distances.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, alpha=1):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
alpha : float, optional
Parameter of the distance covariance: 0 < alpha < 2
(default is 1).
Examples
--------
>>> import ite
>>> co1 = ite.cost.BIDistCorr()
>>> co2 = ite.cost.BIDistCorr(alpha = 1.2)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attribute:
if alpha <= 0 or alpha >= 2:
raise Exception('0 < alpha < 2 is needed for this estimator!')
self.alpha = alpha
def estimation(self, y, ds):
""" Estimate distance correlation.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension. len(ds) = 2.
Returns
-------
i : float
Estimated distance correlation.
References
----------
Gabor J. Szekely and Maria L. Rizzo. Brownian distance covariance.
The Annals of Applied Statistics, 3:1236-1265, 2009.
Gabor J. Szekely, Maria L. Rizzo, and Nail K. Bakirov. Measuring
and testing dependence by correlation of distances. The Annals of
Statistics, 35:2769-2794, 2007.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_subspace_number_is_k(ds, 2)
a = compute_dcov_dcorr_statistics(y[:, :ds[0]], self.alpha)
b = compute_dcov_dcorr_statistics(y[:, ds[0]:], self.alpha)
n = sum(a*b) # numerator
d1 = sum(a**2) # denumerator-1 (without sqrt)
d2 = sum(b**2) # denumerator-2 (without sqrt)
if (d1 * d2) == 0: # >=1 of the random variables is constant
i = 0
else:
i = n / sqrt(d1 * d2) # <A,B> / sqrt(<A,A><B,B>)
i = sqrt(i)
return i
class BI3WayJoint(InitX, VerCompSubspaceDims, VerSubspaceNumberIsK):
""" Joint dependency from the mean embedding of the 'joint minus the
product of the marginals'.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, sigma1=0.1, sigma2=0.1, sigma3=0.1):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
sigma1 : float, optional
Std in the RBF kernel on the first subspace (default is
sigma1 = 0.1). sigma1 = nan means 'use median heuristic'.
sigma2 : float, optional
Std in the RBF kernel on the second subspace (default is
sigma2 = 0.1). sigma2 = nan means 'use median heuristic'.
sigma3 : float, optional
Std in the RBF kernel on the third subspace (default is
sigma3 = 0.1). sigma3 = nan means 'use median heuristic'.
Examples
--------
>>> from numpy import nan
>>> import ite
>>> co1 = ite.cost.BI3WayJoint()
>>> co2 = ite.cost.BI3WayJoint(sigma1=0.1,sigma2=0.1,sigma3=0.1)
>>> co3 = ite.cost.BI3WayJoint(sigma1=nan,sigma2=nan,sigma3=nan)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attributes:
self.sigma1, self.sigma2, self.sigma3 = sigma1, sigma2, sigma3
def estimation(self, y, ds):
""" Estimate joint dependency.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension. len(ds) = 3.
Returns
-------
i : float
Estimated joint dependency.
References
----------
Dino Sejdinovic, Arthur Gretton, and Wicher Bergsma. A kernel test
for three-variable interactions. In Advances in Neural Information
Processing Systems (NIPS), pages 1124-1132, 2013. (Lancaster
three-variable interaction based dependency index).
Henry Oliver Lancaster. The Chi-squared Distribution. John Wiley
and Sons Inc, 1969. (Lancaster interaction)
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_subspace_number_is_k(ds, 3)
# Gram matrices (k1,k2,k3):
sigma1, sigma2, sigma3 = self.sigma1, self.sigma2, self.sigma3
# k1 (set co.sigma1 using median heuristic, if needed):
if isnan(sigma1):
sigma1 = median_heuristic(y[:, 0:ds[0]])
k1 = squareform(pdist(y[:, 0:ds[0]]))
k1 = exp(-k1**2 / (2 * sigma1**2))
# k2 (set co.sigma2 using median heuristic, if needed):
if isnan(sigma2):
sigma2 = median_heuristic(y[:, ds[0]:ds[0]+ds[1]])
k2 = squareform(pdist(y[:, ds[0]:ds[0]+ds[1]]))
k2 = exp(-k2**2 / (2 * sigma2**2))
# k3 (set co.sigma3 using median heuristic, if needed):
if isnan(sigma3):
sigma3 = median_heuristic(y[:, ds[0]+ds[1]:])
k3 = squareform(pdist(y[:, ds[0]+ds[1]:], 'euclidean'))
k3 = exp(-k3**2 / (2 * sigma3**2))
prod_of_ks = k1 * k2 * k3 # Hadamard product
term1 = mean(prod_of_ks)
term2 = -2 * mean(mean(k1, axis=1) * mean(k2, axis=1) *
mean(k3, axis=1))
term3 = mean(k1) * mean(k2) * mean(k3)
i = term1 + term2 + term3
return i
class BI3WayLancaster(InitX, VerCompSubspaceDims, VerSubspaceNumberIsK):
""" Estimate the Lancaster three-variable interaction measure.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, sigma1=0.1, sigma2=0.1, sigma3=0.1):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
sigma1 : float, optional
Std in the RBF kernel on the first subspace (default is
sigma1 = 0.1). sigma1 = nan means 'use median heuristic'.
sigma2 : float, optional
Std in the RBF kernel on the second subspace (default is
sigma2 = 0.1). sigma2 = nan means 'use median heuristic'.
sigma3 : float, optional
Std in the RBF kernel on the third subspace (default is
sigma3 = 0.1). sigma3 = nan means 'use median heuristic'.
Examples
--------
>>> from numpy import nan
>>> import ite
>>> co1 = ite.cost.BI3WayLancaster()
>>> co2 = ite.cost.BI3WayLancaster(sigma1=0.1, sigma2=0.1,\
sigma3=0.1)
>>> co3 = ite.cost.BI3WayLancaster(sigma1=nan, sigma2=nan,\
sigma3=nan)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attributes:
self.sigma1, self.sigma2, self.sigma3 = sigma1, sigma2, sigma3
def estimation(self, y, ds):
""" Estimate Lancaster three-variable interaction measure.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension. len(ds) = 3.
Returns
-------
i : float
Estimated Lancaster three-variable interaction measure.
References
----------
Dino Sejdinovic, Arthur Gretton, and Wicher Bergsma. A kernel test
for three-variable interactions. In Advances in Neural Information
Processing Systems (NIPS), pages 1124-1132, 2013. (Lancaster
three-variable interaction based dependency index).
Henry Oliver Lancaster. The Chi-squared Distribution. John Wiley
and Sons Inc, 1969. (Lancaster interaction)
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_subspace_number_is_k(ds, 3)
num_of_samples = y.shape[0] # number of samples
# Gram matrices (k1,k2,k3):
sigma1, sigma2, sigma3 = self.sigma1, self.sigma2, self.sigma3
# k1 (set co.sigma1 using median heuristic, if needed):
if isnan(sigma1):
sigma1 = median_heuristic(y[:, 0:ds[0]])
k1 = squareform(pdist(y[:, 0:ds[0]]))
k1 = exp(-k1**2 / (2 * sigma1**2))
# k2 (set co.sigma2 using median heuristic, if needed):
if isnan(sigma2):
sigma2 = median_heuristic(y[:, ds[0]:ds[0]+ds[1]])
k2 = squareform(pdist(y[:, ds[0]:ds[0]+ds[1]]))
k2 = exp(-k2**2 / (2 * sigma2**2))
# k3 set co.sigma3 using median heuristic, if needed():
if isnan(sigma3):
sigma3 = median_heuristic(y[:, ds[0]+ds[1]:])
k3 = squareform(pdist(y[:, ds[0]+ds[1]:]))
k3 = exp(-k3**2 / (2 * sigma3**2))
# centering of k1, k2, k3:
h = eye(num_of_samples) -\
ones((num_of_samples, num_of_samples)) / num_of_samples
k1 = dot(dot(h, k1), h)
k2 = dot(dot(h, k2), h)
k3 = dot(dot(h, k3), h)
i = mean(k1 * k2 * k3)
return i
class BIHSIC_IChol(InitEtaKernel, VerCompSubspaceDims):
""" Estimate HSIC using incomplete Cholesky decomposition.
HSIC refers to Hilbert-Schmidt Independence Criterion.
Partial initialization comes from 'InitEtaKernel', verification is
from 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
Notes
-----
The current implementation uses the same kernel an all the subspaces:
k = k_1 = ... = k_M, where y = [y^1;...;y^M].
Examples
--------
>>> from ite.cost.x_kernel import Kernel
>>> import ite
>>> co1 = ite.cost.BIHSIC_IChol()
>>> co2 = ite.cost.BIHSIC_IChol(eta=1e-3)
>>> k = Kernel({'name': 'RBF','sigma': 1})
>>> co3 = ite.cost.BIHSIC_IChol(kernel=k, eta=1e-3)
"""
def estimation(self, y, ds):
""" Estimate HSIC.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension.
Returns
-------
i : float
Estimated value of HSIC.
References
----------
Arthur Gretton, Olivier Bousquet, Alexander Smola and Bernhard
Scholkopf. Measuring Statistical Dependence with Hilbert-Schmidt
Norms. International Conference on Algorithmic Learnng Theory
(ALT), 63-78, 2005.
Alain Berlinet and Christine Thomas-Agnan. Reproducing Kernel
Hilbert Spaces in Probability and Statistics. Kluwer, 2004. (mean
embedding)
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
# initialization:
num_of_samples = y.shape[0] # number of samples
num_of_subspaces = len(ds)
# Step-1 (g1, g2, ...):
# 0,d_1,d_1+d_2,...,d_1+...+d_{M-1}; starting indices of the
# subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
gs = list()
for m in range(num_of_subspaces):
idx = range(cum_ds[m], cum_ds[m] + ds[m])
g = self.kernel.ichol(y[:, idx], num_of_samples * self.eta)
g = g - mean(g, axis=0) # center the Gram matrix: dot(g,g.T)
gs.append(g)
# Step-2 (g1, g2, ... -> i):
i = 0
for i1 in range(num_of_subspaces-1): # i1 = 0:M-2
for i2 in range(i1+1, num_of_subspaces): # i2 = i1+1:M-1
i += norm(dot(gs[i2].T, gs[i1]))**2 # norm = Frob. norm
i /= num_of_samples**2
return i
class BIHoeffding(InitX, VerOneDSubspaces, VerCompSubspaceDims):
""" Estimate the multivariate version of Hoeffding's Phi.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, small_sample_adjustment=True):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
small_sample_adjustment: boolean, optional
Whether we want small-sample adjustment.
Examples
--------
>>> import ite
>>> co1 = ite.cost.BIHoeffding()
>>> co2 = ite.cost.BIHoeffding(small_sample_adjustment=False)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attributes:
self.small_sample_adjustment = small_sample_adjustment
def estimation(self, y, ds):
""" Estimate multivariate version of Hoeffding's Phi.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension = 1 for this estimator.
Returns
-------
i : float
Estimated value of the multivariate version of Hoeffding's Phi.
References
----------
Sandra Gaiser, Martin Ruppert, Friedrich Schmid. A multivariate
version of Hoeffding's Phi-Square. Journal of Multivariate
Analysis. 101: pages 2571-2586, 2010.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_one_dimensional_subspaces(ds)
num_of_samples, dim = y.shape
u = copula_transformation(y)
# term1:
m = 1 - maximum(u[:, 0][:, newaxis], u[:, 0])
for i in range(1, dim):
m *= 1 - maximum(u[:, i][:, newaxis], u[:, i])
term1 = mean(m)
# term2:
if self.small_sample_adjustment:
term2 = \
- mean(prod(1 - u**2 - (1 - u) / num_of_samples,
axis=1)) / \
(2**(dim - 1))
else:
term2 = - mean(prod(1 - u**2, axis=1)) / (2 ** (dim - 1))
# term3:
if self.small_sample_adjustment:
term3 = \
((num_of_samples - 1) * (2 * num_of_samples-1) /
(3 * 2 * num_of_samples**2))**dim
else:
term3 = 1 / 3**dim
i = term1 + term2 + term3
if self.mult:
if self.small_sample_adjustment:
t1 = \
sum((1 - arange(1,
num_of_samples) / num_of_samples)**dim
* (2*arange(1, num_of_samples) - 1)) \
/ num_of_samples**2
t2 = \
-2 * mean(((num_of_samples * (num_of_samples - 1) -
arange(1, num_of_samples+1) *
arange(num_of_samples)) /
(2 * num_of_samples ** 2))**dim)
t3 = term3
inv_hd = t1 + t2 + t3 # 1 / h(d, n)
else:
inv_hd = \
2 / ((dim + 1) * (dim + 2)) - factorial(dim) / \
(2 ** dim * prod(arange(dim + 1) + 1 / 2)) + \
1 / 3 ** dim # 1 / h(d)s
i /= inv_hd
i = sqrt(abs(i))
return i
class BIKGV(InitEtaKernel, VerCompSubspaceDims):
""" Estimate kernel generalized variance (KGV).
Partial initialization comes from 'InitEtaKernel', verification is
from 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, kernel=Kernel(), eta=1e-2, kappa=0.01):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
kernel : Kernel, optional
For examples, see 'ite.cost.x_kernel.Kernel'
eta : float, >0, optional
It is used to control the quality of the incomplete Cholesky
decomposition based Gram matrix approximation. Smaller 'eta'
means larger sized Gram factor and better approximation.
(default is 1e-2)
kappa: float, >0
Regularization parameter.
Examples
--------
>>> import ite
>>> from ite.cost.x_kernel import Kernel
>>> co1 = ite.cost.BIKGV()
>>> co2 = ite.cost.BIKGV(eta=1e-4)
>>> co3 = ite.cost.BIKGV(eta=1e-4, kappa=0.02)
>>> k = Kernel({'name': 'RBF', 'sigma': 0.3})
>>> co4 = ite.cost.BIKGV(eta=1e-4, kernel=k)
"""
# initialize with 'InitEtaKernel':
super().__init__(mult=mult, kernel=kernel, eta=eta)
# other attributes:
self.kappa = kappa
def estimation(self, y, ds):
""" Estimate KGV.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension.
Returns
-------
i : float
Estimated value of KGV.
References
----------
Francis Bach, Michael I. Jordan. Kernel Independent Component
Analysis. Journal of Machine Learning Research, 3: 1-48, 2002.
Francis Bach, Michael I. Jordan. Learning graphical models with
Mercer kernels. International Conference on Neural Information
Processing Systems (NIPS), pages 1033-1040, 2002.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
num_of_samples = y.shape[0]
tol = num_of_samples * self.eta
r = compute_matrix_r_kcca_kgv(y, ds, self.kernel, tol, self.kappa)
i = -log(det(r)) / 2
return i
class BIKCCA(InitEtaKernel, VerCompSubspaceDims):
""" Kernel canonical correlation analysis (KCCA) based estimator.
Partial initialization comes from 'InitEtaKernel', verification is
from 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, kernel=Kernel(), eta=1e-2, kappa=0.01):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
kernel : Kernel, optional
For examples, see 'ite.cost.x_kernel.Kernel'
eta : float, >0, optional
It is used to control the quality of the incomplete Cholesky
decomposition based Gram matrix approximation. Smaller 'eta'
means larger sized Gram factor and better approximation.
(default is 1e-2)
kappa: float, >0
Regularization parameter.
Examples
--------
>>> import ite
>>> from ite.cost.x_kernel import Kernel
>>> co1 = ite.cost.BIKCCA()
>>> co2 = ite.cost.BIKCCA(eta=1e-4)
>>> co3 = ite.cost.BIKCCA(eta=1e-4, kappa=0.02)
>>> k = Kernel({'name': 'RBF', 'sigma': 0.3})
>>> co4 = ite.cost.BIKCCA(eta=1e-4, kernel=k)
"""
# initialize with 'InitEtaKernel':
super().__init__(mult=mult, kernel=kernel, eta=eta)
# other attributes:
self.kappa = kappa
def estimation(self, y, ds):
""" Estimate KCCA.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension.
Returns
-------
i : float
Estimated value of KCCA.
References
----------
Francis Bach, Michael I. Jordan. Learning graphical models with
Mercer kernels. International Conference on Neural Information
Processing Systems (NIPS), pages 1033-1040, 2002.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
num_of_samples = y.shape[0]
tol = num_of_samples * self.eta
r = compute_matrix_r_kcca_kgv(y, ds, self.kernel, tol, self.kappa)
eig_min = eigsh(r, k=1, which='SM')[0][0]
i = -log(eig_min) / 2
return i
| 33.543424
| 79
| 0.544755
|
""" Base mutual information estimators. """
from numpy import sum, sqrt, isnan, exp, mean, eye, ones, dot, cumsum, \
hstack, newaxis, maximum, prod, abs, arange, log
from numpy.linalg import norm
from scipy.spatial.distance import pdist, squareform
from scipy.special import factorial
from scipy.linalg import det
from scipy.sparse.linalg import eigsh
from ite.cost.x_initialization import InitX, InitEtaKernel
from ite.cost.x_verification import VerCompSubspaceDims, \
VerSubspaceNumberIsK,\
VerOneDSubspaces
from ite.shared import compute_dcov_dcorr_statistics, median_heuristic,\
copula_transformation, compute_matrix_r_kcca_kgv
from ite.cost.x_kernel import Kernel
class BIDistCov(InitX, VerCompSubspaceDims, VerSubspaceNumberIsK):
""" Distance covariance estimator using pairwise distances.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, alpha=1):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
alpha : float, optional
Parameter of the distance covariance: 0 < alpha < 2
(default is 1).
Examples
--------
>>> import ite
>>> co1 = ite.cost.BIDistCov()
>>> co2 = ite.cost.BIDistCov(alpha = 1.2)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attribute:
if alpha <= 0 or alpha >= 2:
raise Exception('0 < alpha < 2 is needed for this estimator!')
self.alpha = alpha
def estimation(self, y, ds):
""" Estimate distance covariance.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension. len(ds) = 2.
Returns
-------
i : float
Estimated distance covariance.
References
----------
Gabor J. Szekely and Maria L. Rizzo. Brownian distance covariance.
The Annals of Applied Statistics, 3:1236-1265, 2009.
Gabor J. Szekely, Maria L. Rizzo, and Nail K. Bakirov. Measuring
and testing dependence by correlation of distances. The Annals of
Statistics, 35:2769-2794, 2007.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_subspace_number_is_k(ds, 2)
num_of_samples = y.shape[0] # number of samples
a = compute_dcov_dcorr_statistics(y[:, :ds[0]], self.alpha)
b = compute_dcov_dcorr_statistics(y[:, ds[0]:], self.alpha)
i = sqrt(sum(a*b)) / num_of_samples
return i
class BIDistCorr(InitX, VerCompSubspaceDims, VerSubspaceNumberIsK):
""" Distance correlation estimator using pairwise distances.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, alpha=1):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
alpha : float, optional
Parameter of the distance covariance: 0 < alpha < 2
(default is 1).
Examples
--------
>>> import ite
>>> co1 = ite.cost.BIDistCorr()
>>> co2 = ite.cost.BIDistCorr(alpha = 1.2)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attribute:
if alpha <= 0 or alpha >= 2:
raise Exception('0 < alpha < 2 is needed for this estimator!')
self.alpha = alpha
def estimation(self, y, ds):
""" Estimate distance correlation.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension. len(ds) = 2.
Returns
-------
i : float
Estimated distance correlation.
References
----------
Gabor J. Szekely and Maria L. Rizzo. Brownian distance covariance.
The Annals of Applied Statistics, 3:1236-1265, 2009.
Gabor J. Szekely, Maria L. Rizzo, and Nail K. Bakirov. Measuring
and testing dependence by correlation of distances. The Annals of
Statistics, 35:2769-2794, 2007.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_subspace_number_is_k(ds, 2)
a = compute_dcov_dcorr_statistics(y[:, :ds[0]], self.alpha)
b = compute_dcov_dcorr_statistics(y[:, ds[0]:], self.alpha)
n = sum(a*b) # numerator
d1 = sum(a**2) # denumerator-1 (without sqrt)
d2 = sum(b**2) # denumerator-2 (without sqrt)
if (d1 * d2) == 0: # >=1 of the random variables is constant
i = 0
else:
i = n / sqrt(d1 * d2) # <A,B> / sqrt(<A,A><B,B>)
i = sqrt(i)
return i
class BI3WayJoint(InitX, VerCompSubspaceDims, VerSubspaceNumberIsK):
""" Joint dependency from the mean embedding of the 'joint minus the
product of the marginals'.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, sigma1=0.1, sigma2=0.1, sigma3=0.1):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
sigma1 : float, optional
Std in the RBF kernel on the first subspace (default is
sigma1 = 0.1). sigma1 = nan means 'use median heuristic'.
sigma2 : float, optional
Std in the RBF kernel on the second subspace (default is
sigma2 = 0.1). sigma2 = nan means 'use median heuristic'.
sigma3 : float, optional
Std in the RBF kernel on the third subspace (default is
sigma3 = 0.1). sigma3 = nan means 'use median heuristic'.
Examples
--------
>>> from numpy import nan
>>> import ite
>>> co1 = ite.cost.BI3WayJoint()
>>> co2 = ite.cost.BI3WayJoint(sigma1=0.1,sigma2=0.1,sigma3=0.1)
>>> co3 = ite.cost.BI3WayJoint(sigma1=nan,sigma2=nan,sigma3=nan)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attributes:
self.sigma1, self.sigma2, self.sigma3 = sigma1, sigma2, sigma3
def estimation(self, y, ds):
""" Estimate joint dependency.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension. len(ds) = 3.
Returns
-------
i : float
Estimated joint dependency.
References
----------
Dino Sejdinovic, Arthur Gretton, and Wicher Bergsma. A kernel test
for three-variable interactions. In Advances in Neural Information
Processing Systems (NIPS), pages 1124-1132, 2013. (Lancaster
three-variable interaction based dependency index).
Henry Oliver Lancaster. The Chi-squared Distribution. John Wiley
and Sons Inc, 1969. (Lancaster interaction)
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_subspace_number_is_k(ds, 3)
# Gram matrices (k1,k2,k3):
sigma1, sigma2, sigma3 = self.sigma1, self.sigma2, self.sigma3
# k1 (set co.sigma1 using median heuristic, if needed):
if isnan(sigma1):
sigma1 = median_heuristic(y[:, 0:ds[0]])
k1 = squareform(pdist(y[:, 0:ds[0]]))
k1 = exp(-k1**2 / (2 * sigma1**2))
# k2 (set co.sigma2 using median heuristic, if needed):
if isnan(sigma2):
sigma2 = median_heuristic(y[:, ds[0]:ds[0]+ds[1]])
k2 = squareform(pdist(y[:, ds[0]:ds[0]+ds[1]]))
k2 = exp(-k2**2 / (2 * sigma2**2))
# k3 (set co.sigma3 using median heuristic, if needed):
if isnan(sigma3):
sigma3 = median_heuristic(y[:, ds[0]+ds[1]:])
k3 = squareform(pdist(y[:, ds[0]+ds[1]:], 'euclidean'))
k3 = exp(-k3**2 / (2 * sigma3**2))
prod_of_ks = k1 * k2 * k3 # Hadamard product
term1 = mean(prod_of_ks)
term2 = -2 * mean(mean(k1, axis=1) * mean(k2, axis=1) *
mean(k3, axis=1))
term3 = mean(k1) * mean(k2) * mean(k3)
i = term1 + term2 + term3
return i
class BI3WayLancaster(InitX, VerCompSubspaceDims, VerSubspaceNumberIsK):
""" Estimate the Lancaster three-variable interaction measure.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, sigma1=0.1, sigma2=0.1, sigma3=0.1):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
sigma1 : float, optional
Std in the RBF kernel on the first subspace (default is
sigma1 = 0.1). sigma1 = nan means 'use median heuristic'.
sigma2 : float, optional
Std in the RBF kernel on the second subspace (default is
sigma2 = 0.1). sigma2 = nan means 'use median heuristic'.
sigma3 : float, optional
Std in the RBF kernel on the third subspace (default is
sigma3 = 0.1). sigma3 = nan means 'use median heuristic'.
Examples
--------
>>> from numpy import nan
>>> import ite
>>> co1 = ite.cost.BI3WayLancaster()
>>> co2 = ite.cost.BI3WayLancaster(sigma1=0.1, sigma2=0.1,\
sigma3=0.1)
>>> co3 = ite.cost.BI3WayLancaster(sigma1=nan, sigma2=nan,\
sigma3=nan)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attributes:
self.sigma1, self.sigma2, self.sigma3 = sigma1, sigma2, sigma3
def estimation(self, y, ds):
""" Estimate Lancaster three-variable interaction measure.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension. len(ds) = 3.
Returns
-------
i : float
Estimated Lancaster three-variable interaction measure.
References
----------
Dino Sejdinovic, Arthur Gretton, and Wicher Bergsma. A kernel test
for three-variable interactions. In Advances in Neural Information
Processing Systems (NIPS), pages 1124-1132, 2013. (Lancaster
three-variable interaction based dependency index).
Henry Oliver Lancaster. The Chi-squared Distribution. John Wiley
and Sons Inc, 1969. (Lancaster interaction)
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_subspace_number_is_k(ds, 3)
num_of_samples = y.shape[0] # number of samples
# Gram matrices (k1,k2,k3):
sigma1, sigma2, sigma3 = self.sigma1, self.sigma2, self.sigma3
# k1 (set co.sigma1 using median heuristic, if needed):
if isnan(sigma1):
sigma1 = median_heuristic(y[:, 0:ds[0]])
k1 = squareform(pdist(y[:, 0:ds[0]]))
k1 = exp(-k1**2 / (2 * sigma1**2))
# k2 (set co.sigma2 using median heuristic, if needed):
if isnan(sigma2):
sigma2 = median_heuristic(y[:, ds[0]:ds[0]+ds[1]])
k2 = squareform(pdist(y[:, ds[0]:ds[0]+ds[1]]))
k2 = exp(-k2**2 / (2 * sigma2**2))
# k3 set co.sigma3 using median heuristic, if needed():
if isnan(sigma3):
sigma3 = median_heuristic(y[:, ds[0]+ds[1]:])
k3 = squareform(pdist(y[:, ds[0]+ds[1]:]))
k3 = exp(-k3**2 / (2 * sigma3**2))
# centering of k1, k2, k3:
h = eye(num_of_samples) -\
ones((num_of_samples, num_of_samples)) / num_of_samples
k1 = dot(dot(h, k1), h)
k2 = dot(dot(h, k2), h)
k3 = dot(dot(h, k3), h)
i = mean(k1 * k2 * k3)
return i
class BIHSIC_IChol(InitEtaKernel, VerCompSubspaceDims):
""" Estimate HSIC using incomplete Cholesky decomposition.
HSIC refers to Hilbert-Schmidt Independence Criterion.
Partial initialization comes from 'InitEtaKernel', verification is
from 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
Notes
-----
The current implementation uses the same kernel an all the subspaces:
k = k_1 = ... = k_M, where y = [y^1;...;y^M].
Examples
--------
>>> from ite.cost.x_kernel import Kernel
>>> import ite
>>> co1 = ite.cost.BIHSIC_IChol()
>>> co2 = ite.cost.BIHSIC_IChol(eta=1e-3)
>>> k = Kernel({'name': 'RBF','sigma': 1})
>>> co3 = ite.cost.BIHSIC_IChol(kernel=k, eta=1e-3)
"""
def estimation(self, y, ds):
""" Estimate HSIC.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension.
Returns
-------
i : float
Estimated value of HSIC.
References
----------
Arthur Gretton, Olivier Bousquet, Alexander Smola and Bernhard
Scholkopf. Measuring Statistical Dependence with Hilbert-Schmidt
Norms. International Conference on Algorithmic Learnng Theory
(ALT), 63-78, 2005.
Alain Berlinet and Christine Thomas-Agnan. Reproducing Kernel
Hilbert Spaces in Probability and Statistics. Kluwer, 2004. (mean
embedding)
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
# initialization:
num_of_samples = y.shape[0] # number of samples
num_of_subspaces = len(ds)
# Step-1 (g1, g2, ...):
# 0,d_1,d_1+d_2,...,d_1+...+d_{M-1}; starting indices of the
# subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
gs = list()
for m in range(num_of_subspaces):
idx = range(cum_ds[m], cum_ds[m] + ds[m])
g = self.kernel.ichol(y[:, idx], num_of_samples * self.eta)
g = g - mean(g, axis=0) # center the Gram matrix: dot(g,g.T)
gs.append(g)
# Step-2 (g1, g2, ... -> i):
i = 0
for i1 in range(num_of_subspaces-1): # i1 = 0:M-2
for i2 in range(i1+1, num_of_subspaces): # i2 = i1+1:M-1
i += norm(dot(gs[i2].T, gs[i1]))**2 # norm = Frob. norm
i /= num_of_samples**2
return i
class BIHoeffding(InitX, VerOneDSubspaces, VerCompSubspaceDims):
""" Estimate the multivariate version of Hoeffding's Phi.
Partial initialization comes from 'InitX', verification is from
'VerCompSubspaceDims' and 'VerSubspaceNumber' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, small_sample_adjustment=True):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
small_sample_adjustment: boolean, optional
Whether we want small-sample adjustment.
Examples
--------
>>> import ite
>>> co1 = ite.cost.BIHoeffding()
>>> co2 = ite.cost.BIHoeffding(small_sample_adjustment=False)
"""
# initialize with 'InitX':
super().__init__(mult=mult)
# other attributes:
self.small_sample_adjustment = small_sample_adjustment
def estimation(self, y, ds):
""" Estimate multivariate version of Hoeffding's Phi.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension = 1 for this estimator.
Returns
-------
i : float
Estimated value of the multivariate version of Hoeffding's Phi.
References
----------
Sandra Gaiser, Martin Ruppert, Friedrich Schmid. A multivariate
version of Hoeffding's Phi-Square. Journal of Multivariate
Analysis. 101: pages 2571-2586, 2010.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
self.verification_one_dimensional_subspaces(ds)
num_of_samples, dim = y.shape
u = copula_transformation(y)
# term1:
m = 1 - maximum(u[:, 0][:, newaxis], u[:, 0])
for i in range(1, dim):
m *= 1 - maximum(u[:, i][:, newaxis], u[:, i])
term1 = mean(m)
# term2:
if self.small_sample_adjustment:
term2 = \
- mean(prod(1 - u**2 - (1 - u) / num_of_samples,
axis=1)) / \
(2**(dim - 1))
else:
term2 = - mean(prod(1 - u**2, axis=1)) / (2 ** (dim - 1))
# term3:
if self.small_sample_adjustment:
term3 = \
((num_of_samples - 1) * (2 * num_of_samples-1) /
(3 * 2 * num_of_samples**2))**dim
else:
term3 = 1 / 3**dim
i = term1 + term2 + term3
if self.mult:
if self.small_sample_adjustment:
t1 = \
sum((1 - arange(1,
num_of_samples) / num_of_samples)**dim
* (2*arange(1, num_of_samples) - 1)) \
/ num_of_samples**2
t2 = \
-2 * mean(((num_of_samples * (num_of_samples - 1) -
arange(1, num_of_samples+1) *
arange(num_of_samples)) /
(2 * num_of_samples ** 2))**dim)
t3 = term3
inv_hd = t1 + t2 + t3 # 1 / h(d, n)
else:
inv_hd = \
2 / ((dim + 1) * (dim + 2)) - factorial(dim) / \
(2 ** dim * prod(arange(dim + 1) + 1 / 2)) + \
1 / 3 ** dim # 1 / h(d)s
i /= inv_hd
i = sqrt(abs(i))
return i
class BIKGV(InitEtaKernel, VerCompSubspaceDims):
""" Estimate kernel generalized variance (KGV).
Partial initialization comes from 'InitEtaKernel', verification is
from 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, kernel=Kernel(), eta=1e-2, kappa=0.01):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
kernel : Kernel, optional
For examples, see 'ite.cost.x_kernel.Kernel'
eta : float, >0, optional
It is used to control the quality of the incomplete Cholesky
decomposition based Gram matrix approximation. Smaller 'eta'
means larger sized Gram factor and better approximation.
(default is 1e-2)
kappa: float, >0
Regularization parameter.
Examples
--------
>>> import ite
>>> from ite.cost.x_kernel import Kernel
>>> co1 = ite.cost.BIKGV()
>>> co2 = ite.cost.BIKGV(eta=1e-4)
>>> co3 = ite.cost.BIKGV(eta=1e-4, kappa=0.02)
>>> k = Kernel({'name': 'RBF', 'sigma': 0.3})
>>> co4 = ite.cost.BIKGV(eta=1e-4, kernel=k)
"""
# initialize with 'InitEtaKernel':
super().__init__(mult=mult, kernel=kernel, eta=eta)
# other attributes:
self.kappa = kappa
def estimation(self, y, ds):
""" Estimate KGV.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension.
Returns
-------
i : float
Estimated value of KGV.
References
----------
Francis Bach, Michael I. Jordan. Kernel Independent Component
Analysis. Journal of Machine Learning Research, 3: 1-48, 2002.
Francis Bach, Michael I. Jordan. Learning graphical models with
Mercer kernels. International Conference on Neural Information
Processing Systems (NIPS), pages 1033-1040, 2002.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
num_of_samples = y.shape[0]
tol = num_of_samples * self.eta
r = compute_matrix_r_kcca_kgv(y, ds, self.kernel, tol, self.kappa)
i = -log(det(r)) / 2
return i
class BIKCCA(InitEtaKernel, VerCompSubspaceDims):
""" Kernel canonical correlation analysis (KCCA) based estimator.
Partial initialization comes from 'InitEtaKernel', verification is
from 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, kernel=Kernel(), eta=1e-2, kappa=0.01):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
kernel : Kernel, optional
For examples, see 'ite.cost.x_kernel.Kernel'
eta : float, >0, optional
It is used to control the quality of the incomplete Cholesky
decomposition based Gram matrix approximation. Smaller 'eta'
means larger sized Gram factor and better approximation.
(default is 1e-2)
kappa: float, >0
Regularization parameter.
Examples
--------
>>> import ite
>>> from ite.cost.x_kernel import Kernel
>>> co1 = ite.cost.BIKCCA()
>>> co2 = ite.cost.BIKCCA(eta=1e-4)
>>> co3 = ite.cost.BIKCCA(eta=1e-4, kappa=0.02)
>>> k = Kernel({'name': 'RBF', 'sigma': 0.3})
>>> co4 = ite.cost.BIKCCA(eta=1e-4, kernel=k)
"""
# initialize with 'InitEtaKernel':
super().__init__(mult=mult, kernel=kernel, eta=eta)
# other attributes:
self.kappa = kappa
def estimation(self, y, ds):
""" Estimate KCCA.
Parameters
----------
y : (number of samples, dimension)-ndarray
One row of y corresponds to one sample.
ds : int vector
Dimensions of the individual subspaces in y; ds[i] = i^th
subspace dimension.
Returns
-------
i : float
Estimated value of KCCA.
References
----------
Francis Bach, Michael I. Jordan. Learning graphical models with
Mercer kernels. International Conference on Neural Information
Processing Systems (NIPS), pages 1033-1040, 2002.
Examples
--------
i = co.estimation(y,ds)
"""
# verification:
self.verification_compatible_subspace_dimensions(y, ds)
num_of_samples = y.shape[0]
tol = num_of_samples * self.eta
r = compute_matrix_r_kcca_kgv(y, ds, self.kernel, tol, self.kappa)
eig_min = eigsh(r, k=1, which='SM')[0][0]
i = -log(eig_min) / 2
return i
| 0
| 0
| 0
|
289665ab30341dbcc3ad6785d5d631ee488b154e
| 2,268
|
py
|
Python
|
Chapter 17/17.3.py
|
adrian88szymanski/Python_Crash_Course_Eric_Matthes
|
74e9a627e3e044ea30e4a8579843d95fe8e4fc14
|
[
"MIT"
] | 8
|
2021-07-21T02:52:49.000Z
|
2022-02-08T20:47:09.000Z
|
Chapter 17/17.3.py
|
barbarian47/Python_Crash_Course_Eric_Matthes
|
74e9a627e3e044ea30e4a8579843d95fe8e4fc14
|
[
"MIT"
] | null | null | null |
Chapter 17/17.3.py
|
barbarian47/Python_Crash_Course_Eric_Matthes
|
74e9a627e3e044ea30e4a8579843d95fe8e4fc14
|
[
"MIT"
] | 7
|
2021-06-10T12:27:56.000Z
|
2022-01-29T13:53:15.000Z
|
#! python3
print("Task 17.3")
import requests
from plotly.graph_objs import Bar
from plotly import offline
def get_response():
"""Make an api call, and return the response."""
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
headers = {'Accept': 'application/vnd.github.v3+json'}
r = requests.get(url, headers=headers)
return r
def get_repo_dicts(r):
"""Return a set of dicts representing the most popular repositories."""
response_dict = r.json()
repo_dicts = response_dict['items']
return repo_dicts
def get_project_data(repo_dicts):
"""Return data needed for each project in visualization."""
repo_links, stars, labels = [], [], []
for repo_dict in repo_dicts:
repo_name = repo_dict['name']
repo_url = repo_dict['html_url']
repo_link = f"<a href='{repo_url}'>{repo_name}</a>"
repo_links.append(repo_link)
stars.append(repo_dict['stargazers_count'])
owner = repo_dict['owner']['login']
description = repo_dict['description']
label = f"{owner}<br />{description}"
labels.append(label)
return repo_links, stars, labels
def make_visualization(repo_links, stars, labels):
"""Generate the visualization of most commented articles."""
data = [{
'type': 'bar',
'x': repo_links,
'y': stars,
'hovertext': labels,
'marker': {
'color': 'rgb(255, 0, 0)',
'line': {'width': 2, 'color': 'rgb(250, 200, 0)'}
},
'opacity': 0.6,
}]
my_layout = {
'title': 'Most-Starred Python Projects on GitHub',
'titlefont': {'size': 20},
'xaxis': {
'title': 'Repository',
'titlefont': {'size': 18},
'tickfont': {'size': 14},
},
'yaxis': {
'title': 'Stars',
'titlefont': {'size': 18},
'tickfont': {'size': 14},
},
}
fig = {'data': data, 'layout': my_layout}
offline.plot(fig, filename='python_repos.html')
if __name__ == '__main__':
r = get_response()
repo_dicts = get_repo_dicts(r)
repo_links, stars, labels = get_project_data(repo_dicts)
make_visualization(repo_links, stars, labels)
| 29.076923
| 83
| 0.588183
|
#! python3
print("Task 17.3")
import requests
from plotly.graph_objs import Bar
from plotly import offline
def get_response():
"""Make an api call, and return the response."""
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
headers = {'Accept': 'application/vnd.github.v3+json'}
r = requests.get(url, headers=headers)
return r
def get_repo_dicts(r):
"""Return a set of dicts representing the most popular repositories."""
response_dict = r.json()
repo_dicts = response_dict['items']
return repo_dicts
def get_project_data(repo_dicts):
"""Return data needed for each project in visualization."""
repo_links, stars, labels = [], [], []
for repo_dict in repo_dicts:
repo_name = repo_dict['name']
repo_url = repo_dict['html_url']
repo_link = f"<a href='{repo_url}'>{repo_name}</a>"
repo_links.append(repo_link)
stars.append(repo_dict['stargazers_count'])
owner = repo_dict['owner']['login']
description = repo_dict['description']
label = f"{owner}<br />{description}"
labels.append(label)
return repo_links, stars, labels
def make_visualization(repo_links, stars, labels):
"""Generate the visualization of most commented articles."""
data = [{
'type': 'bar',
'x': repo_links,
'y': stars,
'hovertext': labels,
'marker': {
'color': 'rgb(255, 0, 0)',
'line': {'width': 2, 'color': 'rgb(250, 200, 0)'}
},
'opacity': 0.6,
}]
my_layout = {
'title': 'Most-Starred Python Projects on GitHub',
'titlefont': {'size': 20},
'xaxis': {
'title': 'Repository',
'titlefont': {'size': 18},
'tickfont': {'size': 14},
},
'yaxis': {
'title': 'Stars',
'titlefont': {'size': 18},
'tickfont': {'size': 14},
},
}
fig = {'data': data, 'layout': my_layout}
offline.plot(fig, filename='python_repos.html')
if __name__ == '__main__':
r = get_response()
repo_dicts = get_repo_dicts(r)
repo_links, stars, labels = get_project_data(repo_dicts)
make_visualization(repo_links, stars, labels)
| 0
| 0
| 0
|
89ed7f78af8281098bbc29e8127e532af7a1d748
| 124
|
py
|
Python
|
nj/exceptions.py
|
trstringer/jersey
|
08daaf84b7367c56a4c86893878cd16ed690ab08
|
[
"MIT"
] | 7
|
2018-04-15T18:46:30.000Z
|
2020-10-03T10:50:31.000Z
|
nj/exceptions.py
|
trstringer/jersey
|
08daaf84b7367c56a4c86893878cd16ed690ab08
|
[
"MIT"
] | 3
|
2018-01-23T13:44:45.000Z
|
2020-01-03T20:43:22.000Z
|
nj/exceptions.py
|
trstringer/jersey
|
08daaf84b7367c56a4c86893878cd16ed690ab08
|
[
"MIT"
] | 5
|
2018-04-15T18:57:49.000Z
|
2021-06-27T11:44:13.000Z
|
"""Custom exceptions for this package"""
class JerseyError(Exception):
"""Basic exception for this package"""
pass
| 20.666667
| 42
| 0.701613
|
"""Custom exceptions for this package"""
class JerseyError(Exception):
"""Basic exception for this package"""
pass
| 0
| 0
| 0
|
def9b3d01bc3f769f677336481708ac65bc8dffd
| 1,579
|
py
|
Python
|
Day1-9/6.py
|
bcongdon/advent_of_code_2018
|
8f35d93401055d82b305f2887b3a85a67f70c468
|
[
"MIT"
] | null | null | null |
Day1-9/6.py
|
bcongdon/advent_of_code_2018
|
8f35d93401055d82b305f2887b3a85a67f70c468
|
[
"MIT"
] | null | null | null |
Day1-9/6.py
|
bcongdon/advent_of_code_2018
|
8f35d93401055d82b305f2887b3a85a67f70c468
|
[
"MIT"
] | null | null | null |
# influenced by https://www.reddit.com/r/adventofcode/comments/a3kr4r/2018_day_6_solutions/eb7385m/
import itertools
from collections import defaultdict, Counter
if __name__ == "__main__":
with open("6.txt") as f:
points = f.readlines()
points = [tuple(int(i) for i in l.split(",")) for l in points]
print("Part 1: {}".format(part1(points)))
print("Part 2: {}".format(part2(points)))
| 30.960784
| 99
| 0.580747
|
# influenced by https://www.reddit.com/r/adventofcode/comments/a3kr4r/2018_day_6_solutions/eb7385m/
import itertools
from collections import defaultdict, Counter
def part1(points):
max_x, max_y = max(x[0] for x in points), max(x[1] for x in points)
grid = defaultdict(lambda: -1)
for x, y in itertools.product(range(max_x + 1), range(max_y + 1)):
closest_dist = min(abs(x - i) + abs(y - j) for i, j in points)
closest_points = [
(i, j) for i, j in points if abs(x - i) + abs(y - j) == closest_dist
]
if len(closest_points) > 1:
grid[x, y] = -1
else:
grid[x, y] = closest_points[0]
# Exclude corners of grid
infinite_points = (
set(grid[(x, max_y - 1)] for x in range(max_x))
.union((grid[(max_x - 1, y)] for y in range(max_y)))
.union((grid[(x, 0)] for x in range(max_x)))
.union((grid[(0, y)] for y in range(max_y)))
)
grid_values = list(grid.values())
return max(grid_values.count(p) for p in points if p not in infinite_points)
def part2(points):
max_x, max_y = max(x[0] for x in points), max(x[1] for x in points)
return sum(
1
for x in range(max_x)
for y in range(max_y)
if sum(abs(x - i) + abs(y - j) for i, j in points) < 10000
)
if __name__ == "__main__":
with open("6.txt") as f:
points = f.readlines()
points = [tuple(int(i) for i in l.split(",")) for l in points]
print("Part 1: {}".format(part1(points)))
print("Part 2: {}".format(part2(points)))
| 1,119
| 0
| 46
|
fe26a724953a0c5baec9d22e630995498eeb16db
| 16,332
|
py
|
Python
|
pybnf/parse.py
|
arakkkkk/PyBNF
|
2f4cee1dd0b7467f82ca2fd859a10de2ed61df92
|
[
"BSD-3-Clause"
] | null | null | null |
pybnf/parse.py
|
arakkkkk/PyBNF
|
2f4cee1dd0b7467f82ca2fd859a10de2ed61df92
|
[
"BSD-3-Clause"
] | null | null | null |
pybnf/parse.py
|
arakkkkk/PyBNF
|
2f4cee1dd0b7467f82ca2fd859a10de2ed61df92
|
[
"BSD-3-Clause"
] | null | null | null |
"""Grammar and methods for parsing the configuration file"""
from .printing import PybnfError, print1
from .config import Configuration
from string import punctuation
import logging
import pyparsing as pp
import re
logger = logging.getLogger(__name__)
numkeys_int = ['verbosity', 'parallel_count', 'delete_old_files', 'population_size',
'smoothing', 'max_iterations',
'num_to_output', 'output_every', 'islands', 'migrate_every', 'num_to_migrate', 'init_size',
'local_min_limit', 'reserve_size', 'burn_in', 'sample_every', 'output_hist_every',
'hist_bins', 'refine', 'simplex_max_iterations', 'wall_time_sim', 'wall_time_gen', 'verbosity',
'exchange_every', 'backup_every', 'bootstrap', 'crossover_number', 'ind_var_rounding',
'local_objective_eval', 'reps_per_beta', 'save_best_data', 'parallelize_models', 'adaptive', 'continue_run']
numkeys_float = ['min_objective', 'cognitive', 'social', 'particle_weight',
'particle_weight_final', 'adaptive_n_max', 'adaptive_n_stop', 'adaptive_abs_tol', 'adaptive_rel_tol',
'mutation_rate', 'mutation_factor', 'stop_tolerance', 'step_size', 'simplex_step', 'simplex_log_step',
'simplex_reflection', 'simplex_expansion', 'simplex_contraction', 'simplex_shrink', 'cooling',
'beta_max', 'bootstrap_max_obj', 'simplex_stop_tol', 'v_stop', 'gamma_prob', 'zeta', 'lambda',
'constraint_scale', 'neg_bin_r', 'stablizingCov']
multnumkeys = ['credible_intervals', 'beta', 'beta_range', 'starting_params']
b_var_def_keys = ['uniform_var', 'loguniform_var']
var_def_keys = ['lognormal_var', 'normal_var']
var_def_keys_1or2nums = ['var', 'logvar']
strkeylist = ['bng_command', 'output_dir', 'fit_type', 'objfunc', 'initialization',
'cluster_type', 'scheduler_node', 'scheduler_file', 'de_strategy', 'sbml_integrator', 'simulation_dir']
multstrkeys = ['worker_nodes', 'postprocess', 'output_trajectory', 'output_noise_trajectory']
dictkeys = ['time_course', 'param_scan']
punctuation_safe = re.sub('[:,]', '', punctuation)
def parse_normalization_def(s):
"""
Parse the complicated normalization grammar
If the grammar is specified incorrectly, it will end up calling something invalid the normalization type or the
exp file, and this error will be caught later.
:param s: The string following the equals sign in the normalization key
:return: What to write in the config dictionary: A string, or a dictionary {expfile: string} or
{expfile: (string, index_list)} or {expfile: (string, name_list)}
"""
def parse_range(x):
"""Parse a string as a set of numbers like 10,"""
result = []
for part in x.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return result
# Remove all spaces
s = re.sub('\s', '', s)
if ':' in s:
# List of exp files
res = dict()
i = s.index(':')
normtype = s[:i]
explist = s[i+1:]
exps = re.split(r',(?![^()]*\))', explist) # Dark magic: split on commas that aren't inside parentheses
# Achievement unlocked: Use 16 punctuation marks in a row
for e in exps:
if e[0] == '(' and e[-1] == ')':
# It's an exp in parentheses with column-wise specs
pair = e[1:-1].split(':')
if len(pair) == 1:
res[pair[0]] = normtype
elif len(pair) == 2:
e, cols = pair
if re.match('^[\d,\-]+$', cols):
col_nums = parse_range(cols)
res[e] = (normtype, col_nums)
else:
col_names = cols.split(',')
res[e] = (normtype, col_names)
else:
raise PybnfError("Parsing normalization key - the item '%s' has too many colons in it" % e)
else:
# It's just an exp
res[e] = normtype
return res
else:
# Single string for all
return s
| 47.476744
| 125
| 0.541207
|
"""Grammar and methods for parsing the configuration file"""
from .printing import PybnfError, print1
from .config import Configuration
from string import punctuation
import logging
import pyparsing as pp
import re
logger = logging.getLogger(__name__)
numkeys_int = ['verbosity', 'parallel_count', 'delete_old_files', 'population_size',
'smoothing', 'max_iterations',
'num_to_output', 'output_every', 'islands', 'migrate_every', 'num_to_migrate', 'init_size',
'local_min_limit', 'reserve_size', 'burn_in', 'sample_every', 'output_hist_every',
'hist_bins', 'refine', 'simplex_max_iterations', 'wall_time_sim', 'wall_time_gen', 'verbosity',
'exchange_every', 'backup_every', 'bootstrap', 'crossover_number', 'ind_var_rounding',
'local_objective_eval', 'reps_per_beta', 'save_best_data', 'parallelize_models', 'adaptive', 'continue_run']
numkeys_float = ['min_objective', 'cognitive', 'social', 'particle_weight',
'particle_weight_final', 'adaptive_n_max', 'adaptive_n_stop', 'adaptive_abs_tol', 'adaptive_rel_tol',
'mutation_rate', 'mutation_factor', 'stop_tolerance', 'step_size', 'simplex_step', 'simplex_log_step',
'simplex_reflection', 'simplex_expansion', 'simplex_contraction', 'simplex_shrink', 'cooling',
'beta_max', 'bootstrap_max_obj', 'simplex_stop_tol', 'v_stop', 'gamma_prob', 'zeta', 'lambda',
'constraint_scale', 'neg_bin_r', 'stablizingCov']
multnumkeys = ['credible_intervals', 'beta', 'beta_range', 'starting_params']
b_var_def_keys = ['uniform_var', 'loguniform_var']
var_def_keys = ['lognormal_var', 'normal_var']
var_def_keys_1or2nums = ['var', 'logvar']
strkeylist = ['bng_command', 'output_dir', 'fit_type', 'objfunc', 'initialization',
'cluster_type', 'scheduler_node', 'scheduler_file', 'de_strategy', 'sbml_integrator', 'simulation_dir']
multstrkeys = ['worker_nodes', 'postprocess', 'output_trajectory', 'output_noise_trajectory']
dictkeys = ['time_course', 'param_scan']
punctuation_safe = re.sub('[:,]', '', punctuation)
def parse(s):
equals = pp.Suppress('=')
colon = pp.Suppress(':')
comment = pp.Suppress(pp.Optional(pp.Literal('#') - pp.ZeroOrMore(pp.Word(pp.printables))))
# set up multiple grammars
# single str value
strkeys = pp.oneOf(' '.join(strkeylist),
caseless=True)
string = pp.Word(pp.alphanums + punctuation)
strgram = strkeys - equals - string - comment
# single num value
numkeys = pp.oneOf(' '.join(numkeys_int + numkeys_float), caseless=True)
point = pp.Literal(".")
e = pp.CaselessLiteral("E")
num = pp.Combine(pp.Word("+-" + pp.nums, pp.nums) +
pp.Optional(point + pp.Optional(pp.Word(pp.nums))) +
pp.Optional(e + pp.Word("+-" + pp.nums, pp.nums)))
numgram = numkeys - equals - num - comment
# variable definition grammar
strnumkeys = pp.oneOf(' '.join(var_def_keys + b_var_def_keys), caseless=True)
bng_parameter = pp.Word(pp.alphas, pp.alphanums + "_")
varnums = bng_parameter - num - num - pp.Optional(pp.Word("ubBU"))
strnumgram = strnumkeys - equals - varnums - comment
# multiple string value grammar
multstrkey = pp.oneOf(' '.join(multstrkeys), caseless=True)
multstrgram = multstrkey - equals - pp.OneOrMore(string)
# var and logvar alt grammar (only one number given)
varkeys = pp.oneOf(' '.join(var_def_keys_1or2nums), caseless=True)
vargram = varkeys - equals - bng_parameter - num - pp.Optional(num) - comment
# multiple num value
multnumkey = pp.oneOf(' '.join(multnumkeys), caseless=True)
multnumgram = multnumkey - equals - pp.OneOrMore(num) - comment
# model-data mapping grammar
mdmkey = pp.CaselessLiteral("model")
nonetoken = pp.Suppress(pp.CaselessLiteral("none"))
model_file = pp.Regex(".*?\.(bngl|xml)")
exp_file = pp.Regex(".*?\.(exp|con|prop)")
mdmgram = mdmkey - equals - model_file - colon - (pp.delimitedList(exp_file) ^ nonetoken) - comment
# normalization mapping grammar
normkey = pp.CaselessLiteral("normalization")
anything = pp.Word(pp.alphanums+punctuation+' ')
normgram = normkey - equals - anything # The set of legal grammars for normalization is too complicated,
# Will handle with separate code.
# Grammar for dictionary-like specification of simulation actions
# We are intentionally over-permissive here, because the Action class will be able to give more helpful error
# messages than a failed parse.
dict_entry = pp.Word(pp.alphas) - colon - pp.Word(pp.alphanums + punctuation_safe)
dict_key = pp.oneOf(' '.join(dictkeys), caseless=True)
dictgram = dict_key - equals - pp.delimitedList(dict_entry) - comment
# mutant model grammar
mutkey = pp.CaselessLiteral('mutant')
mut_op = pp.Group(pp.Word(pp.alphas+'_', pp.alphanums+'_') - pp.oneOf('+ - * / =') - num)
mutgram = mutkey - equals - string - string - pp.Group(pp.OneOrMore(mut_op)) - \
pp.Group(colon - (pp.delimitedList(exp_file) ^ nonetoken)) - comment
# check each grammar and output somewhat legible error message
line = (mdmgram | strgram | numgram | strnumgram | multnumgram | multstrgram | vargram | normgram | dictgram
| mutgram).parseString(s, parseAll=True).asList()
return line
def load_config(path):
try:
infile = open(path, 'r')
except FileNotFoundError:
raise PybnfError('Configuration file %s not found' % path)
param_dict = ploop(infile.readlines())
infile.close()
return Configuration(param_dict)
def flatten(vs):
return vs[0] if len(vs) == 1 else vs
def ploop(ls): # parse loop
d = {}
models = set()
exp_data = set()
for i, line in enumerate(ls):
if re.match('\s*$', line) or re.match('\s*#', line):
continue
try:
logger.debug('Parsing line %s' % line.strip())
l = parse(line)
# Find parameter assignments that reference distinct parameters
if l[0] in b_var_def_keys:
key = (l[0], l[1])
values = [float(x) for x in l[2:4]]
if len(l) == 5:
values.append(re.fullmatch('b', l[4], flags=re.IGNORECASE) is not None)
else:
values.append(True)
elif l[0] in var_def_keys_1or2nums or l[0] in var_def_keys:
key = (l[0], l[1])
values = [float(x) for x in l[2:]]
elif l[0] in numkeys_int:
key = l[0]
values = int(l[1])
elif l[0] in numkeys_float:
key = l[0]
values = float(l[1])
elif l[0] in multnumkeys:
key = l[0]
values = [float(x) for x in l[1:]]
elif l[0] in multstrkeys:
key = l[0]
values = l[1:]
elif l[0] != 'model':
key = l[0]
values = flatten(l[1:])
# Find parameter assignments defining model and experimental data
if l[0] == 'model':
key = l[1]
values = l[2:]
d[key] = values # individual data files remain in list
models.add(key)
exp_data.update(values)
elif l[0] in dictkeys:
# Multiple declarations allowed; config dict entry should contain a list of all the declarations.
# Convert the line into a dict of key-value pairs. Keep everything as strings, check later
entry = dict()
for xi in range(0, len(values), 2):
if values[xi] in entry:
raise PybnfError('For config key %s, attribute %s is specified multiple times' %
(l[0], values[xi]))
entry[values[xi]] = values[xi+1]
if l[0] in d:
d[l[0]].append(entry)
else:
d[l[0]] = [entry]
elif l[0] == 'mutant':
if 'mutant' in d:
d['mutant'].append(l[1:])
else:
d['mutant'] = [l[1:]]
exp_data.update(l[-1])
elif l[0] == 'postprocess':
if len(values) < 2:
raise PybnfError("Config key 'postprocess' should specify a python file, followed by one or more "
"suffixes.")
if 'postprocess' in d:
d['postprocess'].append([values])
else:
d['postprocess'] = [values]
elif l[0] == 'normalization':
# Normalization defined with way too many possible options
# At the end of all this, the config dict has one of the following formats:
# 'normalization' : 'type'
# 'normalization' : {'expfile':'type', 'expfile2':[('type1', [numbers]), ('type2', [colnames]), ...]}
parsed = parse_normalization_def(values)
if type(parsed) == str:
if 'normalization' in d:
raise PybnfError('contradictory normalization keys',
"Config file contains multiple 'normalization' keys, one of which specifies"
" no specific exp files, thereby applying to all of them. If you are using "
"this option, you should only have one 'normalization' key in the config file.")
d['normalization'] = parsed
else:
if 'normalization' in d:
if type(d['normalization']) != dict:
raise PybnfError('contradictory normalization keys',
"Config file contains multiple 'normalization' keys, one of which specifies"
" no specific exp files, thereby applying to all of them. If you are using "
"this option, you should only have one 'normalization' key in the config file.")
else:
d['normalization'] = dict()
for k in parsed:
if k in d['normalization'] and (type(parsed[k]) == str or type(d['normalization'][k]) == str):
raise PybnfError('contradictory normalization keys for %s' % k,
"File %s has normalization specified multiple times in a way that is "
"contradictory." % k)
if type(parsed[k]) == str:
d['normalization'][k] = parsed[k]
else:
if k not in d['normalization']:
d['normalization'][k] = []
d['normalization'][k].append(parsed[k])
else:
if key in d:
if d[key] == values:
print1("Warning: Config key '%s' is specified multiple times" % (key,))
else:
raise PybnfError("Config key '%s' is specified multiple times with different values." % (key,))
d[key] = values
except pp.ParseBaseException:
key = re.split('[ =]', line)[0].lower()
fmt = ''
if key in numkeys_int:
fmt = "'%s=x' where x is an integer" % key
elif key in numkeys_float:
fmt = "'%s=x' where x is a decimal number" % key
elif key in multnumkeys:
fmt = "'%s=x1 x2 ...' where x1, x2, ... is a list of numbers" % key
elif key in var_def_keys:
fmt = "'%s=v x y' where v is a variable name, and x and y are numbers" % key
elif key in b_var_def_keys:
fmt = "'%s=v x y z' where v is a variable name, x and y are numbers, and z is optional and specifies " \
"whether or not the variable should be bounded ('u' is unbounded, 'b' or left blank is bounded)" % key
elif key in var_def_keys_1or2nums:
fmt = "'%s=v x' or '%s=v x y' where v is a variable name, and x and y are decimal numbers" % (key, key)
elif key in strkeylist:
fmt = "'%s=s' where s is a string" % key
elif key == 'model':
fmt = "'model=modelfile.bngl : datafile.exp' or 'model=modelfile.bngl : datafile1.exp, datafile2.exp'" \
" Supported modelfile extensions are .bngl and .xml"
elif key == 'normalization':
fmt = "'%s=s' or '%s=s : datafile1.exp, datafile2.exp' where s is a string ('init', 'peak', " \
"'unit', or 'zero')"\
% (key, key)
elif key in dictkeys:
fmt = "'%s=key1: value1, key2: value2,...' where key1, key2, etc are attributes of the %s (see " \
"documentation for available options)" % (key, key)
elif key == 'mutant':
fmt = "'mutant=base model var1=val1 var2*val2 ... : datafile1.exp, datafile2.exp' where mutation " \
"operations (var1=val1 etc) have the format [variable_name][operator][number] and other " \
"arguments are strings"
message = "Parsing configuration key '%s' on line %s.\n" % (key, i)
if fmt == '':
message += '%s is not a valid configuration key.' % key
else:
message += '%s should be specified in the format %s' % (key, fmt)
raise PybnfError("Misconfigured config key '%s' at line: %s" % (line.strip(), i), message)
d['models'] = models
d['exp_data'] = exp_data
return d
def parse_normalization_def(s):
"""
Parse the complicated normalization grammar
If the grammar is specified incorrectly, it will end up calling something invalid the normalization type or the
exp file, and this error will be caught later.
:param s: The string following the equals sign in the normalization key
:return: What to write in the config dictionary: A string, or a dictionary {expfile: string} or
{expfile: (string, index_list)} or {expfile: (string, name_list)}
"""
def parse_range(x):
"""Parse a string as a set of numbers like 10,"""
result = []
for part in x.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return result
# Remove all spaces
s = re.sub('\s', '', s)
if ':' in s:
# List of exp files
res = dict()
i = s.index(':')
normtype = s[:i]
explist = s[i+1:]
exps = re.split(r',(?![^()]*\))', explist) # Dark magic: split on commas that aren't inside parentheses
# Achievement unlocked: Use 16 punctuation marks in a row
for e in exps:
if e[0] == '(' and e[-1] == ')':
# It's an exp in parentheses with column-wise specs
pair = e[1:-1].split(':')
if len(pair) == 1:
res[pair[0]] = normtype
elif len(pair) == 2:
e, cols = pair
if re.match('^[\d,\-]+$', cols):
col_nums = parse_range(cols)
res[e] = (normtype, col_nums)
else:
col_names = cols.split(',')
res[e] = (normtype, col_names)
else:
raise PybnfError("Parsing normalization key - the item '%s' has too many colons in it" % e)
else:
# It's just an exp
res[e] = normtype
return res
else:
# Single string for all
return s
| 11,893
| 0
| 92
|
6e81d1c3a57eefd7fdf314c858a807162d6f994a
| 1,339
|
py
|
Python
|
dl/callbacks/train.py
|
timoninn/pide-pipe
|
2eec6c918cf1edbfa22ba6cfe3af57c9abb53ac6
|
[
"MIT"
] | 1
|
2019-11-21T07:26:46.000Z
|
2019-11-21T07:26:46.000Z
|
dl/callbacks/train.py
|
timoninn/pide-pipe
|
2eec6c918cf1edbfa22ba6cfe3af57c9abb53ac6
|
[
"MIT"
] | null | null | null |
dl/callbacks/train.py
|
timoninn/pide-pipe
|
2eec6c918cf1edbfa22ba6cfe3af57c9abb53ac6
|
[
"MIT"
] | null | null | null |
from typing import Dict, Any
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch import optim
from torch.optim.lr_scheduler import _LRScheduler
from ..core.callback import Callback
from ..core.state import State
from ..utils.torch import get_available_device
| 29.108696
| 77
| 0.613144
|
from typing import Dict, Any
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch import optim
from torch.optim.lr_scheduler import _LRScheduler
from ..core.callback import Callback
from ..core.state import State
from ..utils.torch import get_available_device
class TrainCallback(Callback):
def __init__(
self,
criterion: nn.Module,
optimizer: optim.Optimizer
):
self.optimizer = optimizer
self.criterion = criterion
def on_begin(self, state: State):
state.optimizer = self.optimizer
state.criterion = self.criterion
def on_batch_begin(self, state: State):
with torch.set_grad_enabled(state.is_train_phase):
# Criterion may be None for infer and valid phases.
if state.is_infer_phase == False and state.criterion is not None:
loss = state.criterion(state.output, state.target)
state.meter.add_batch_value(
phase=state.phase,
metric_name='loss',
value=loss.item(),
batch_size=state.input.size(0)
)
if state.is_train_phase:
state.optimizer.zero_grad()
loss.backward()
state.optimizer.step()
| 931
| 9
| 104
|
771da24f5050ff10192faea4365fa098041b5d2b
| 8,358
|
py
|
Python
|
baseball.py
|
jason-sa/baseball_lin_regression
|
936535693f00b28d17b2b901144dcba8bce45ab9
|
[
"MIT"
] | null | null | null |
baseball.py
|
jason-sa/baseball_lin_regression
|
936535693f00b28d17b2b901144dcba8bce45ab9
|
[
"MIT"
] | null | null | null |
baseball.py
|
jason-sa/baseball_lin_regression
|
936535693f00b28d17b2b901144dcba8bce45ab9
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import requests
import time
import os
from selenium.webdriver.common.by import By
import re
import time
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import LassoCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
import scipy.stats as stats
PATH_RS = '/Users/jadams/ds/metis/baseball_lin_regression/data/processed_df/rookie_stats.csv'
PATH_S = '/Users/jadams/ds/metis/baseball_lin_regression/data/processed_df/salary.csv'
def count_awards(s):
''' Counts the numebr of awards from baseballreference.com where the awards are listed in CSV format
s: CSV string of awards
return: int (count of awards)
'''
awards = 0
s = str(s)
if len(s) > 0:
awards = s.count(',')+1
return awards
def get_player_data(html, year, name):
''' Parses a player page on baseballreference.com, builds and writes a data frame to the data directory.
html: html scrapped from baseballreference.com
year: rookie year of the player
name: name of the player
return: writes a data frame to data/ directory
'''
soup_players = BeautifulSoup(html, 'lxml')
# Get position
position = soup_players.find('p')
position = position.contents[2].strip()
if position in 'Pitcher':
return None
# Get the debut for identification in case duplicate name
debut = soup_players.find('a', {'href': re.compile('=debut')})
debut = debut.contents
# Get batting stats
batting = soup_players.find('table',{'id':'batting_standard'})
batting_tbl_list = pd.read_html(str(batting))
batting_df = batting_tbl_list[0]
batting_df = batting_df[:-1]
rookie_stats = batting_df[(batting_df.Year == str(year))]
rookie_stats = rookie_stats[(~rookie_stats.Tm.str.contains('-min'))]
rookie_stats = rookie_stats[rookie_stats.Tm != 'TOT']
columns = ['Year', 'Age', 'Tm', 'Lg', 'G', 'PA', 'AB', 'R','H', 'SB','BA','HR','TB','2B','3B','RBI','BB','SO','Awards']
rookie_stats = rookie_stats.loc[:, columns]
rookie_stats = rookie_stats[rookie_stats.Lg.str.contains(r'[A,N]L$')]
rookie_stats['position'] = position
rookie_stats['name'] = name
rookie_stats['debut'] = debut * rookie_stats.shape[0]
rookie_stats.Year = rookie_stats.Year.astype(int)
rookie_stats.debut = pd.to_datetime(rookie_stats.debut, format='%B %d, %Y')
rookie_stats.loc[rookie_stats.Awards.isnull(),'Awards'] = ''
rookie_stats['award_count'] = rookie_stats.Awards.apply(count_awards)
with open(PATH_RS, 'a') as f:
rookie_stats.to_csv(f, header=False)
def build_rookie_table(rookie_pages):
''' Builds a master data set of all rookie players using the rookie summary page on baseballreference.com
rookie_pages: pd.DataFrame containing [html, year]
return: pd.DataFrame() ['Name','Debut','Age','Tm','rookie_year']
'''
rookie_df = pd.DataFrame(columns=['Name','Debut','Age','Tm','rookie_year'])
rookie_dfs = []
for i in rookie_pages.year.values:
# scrape the rookie batters (includes pitchers if PA)
soup_pages = BeautifulSoup(rookie_pages.html[i], 'lxml')
batting = soup_pages.find('table',{'id':'misc_batting'})
batting_df = pd.read_html(str(batting))
# add Name, Debut, Age, Tm, and rookie_year
year_df = batting_df[0].loc[:,['Name','Debut','Age','Tm']]
year_df['rookie_year'] = [i] * batting_df[0].shape[0]
year_df.rookie_year = year_df.rookie_year.astype(int)
rookie_dfs.append(year_df) #= rookie_df.append(year_df)
# Combine the rookie_dfs
rookie_df = pd.concat(rookie_dfs)
# Strip HOF indicator from name
rookie_df.Name = rookie_df.Name.str.replace('HOF','')
rookie_df[rookie_df.Name.str.contains('HOF')]
rookie_df.Name = rookie_df.Name.str.strip()
# Make Debut a date time
rookie_df.Debut = rookie_df.Debut.astype('datetime64')
return rookie_df
def get_player_salary(html, year, name, ind):
''' Parses a player's page on baseballrefernce.com and builds a data frame of their salary data
html: player page from baseballreference.com
year: rookie year
name: player name
ind: index to build a unique identfier
return: appends to /data/salary.csv
'''
salary_soup = BeautifulSoup(html, 'lxml')
salary_html = salary_soup.find('table',{'id':'br-salaries'})
if salary_html is None:
return None
salary_tables_lst = pd.read_html(str(salary_html))
salary_df = salary_tables_lst[0]
salary_df = salary_df[~salary_df.Year.isnull()]
salary_df = salary_df[salary_df.Year.str.contains(r'[1-2]\d{3}$')]
salary_df['name'] = [name] * salary_df.shape[0]
salary_df['UID'] = [ind] * salary_df.shape[0]
salary_df['rookie_year'] = [year] * salary_df.shape[0]
salary_df.Salary = (salary_df.Salary
.str.replace('$','')
.str.replace(',','')
.str.replace('*','')
)
salary_df.loc[salary_df.Salary == '', 'Salary'] = np.nan
salary_df.Salary = salary_df.Salary.astype(float)
salary_df.Age = salary_df.Age.astype(float)
if salary_df.SrvTm.dtype != 'float64':
salary_df.loc[salary_df.SrvTm == '?','SrvTm'] = np.nan
salary_df.SrvTm = salary_df.SrvTm.astype(float)
if ind == 1:
salary_df.to_csv(PATH_S)
else:
with open(PATH_S, 'a') as f:
salary_df.to_csv(f, header=False)
def run_models(X_train, y_train, name, results = None, cv=10, alphas=[10**a for a in range(-2,5)]):
''' Method to quickly run all models with different feature sets.
Runs: OLS, Standard Scaler + LassoCV, and PolynomialFeatures + Standard Scaler + LassoCV
X_train: training feature set
y_train: training actuals
name: name of the type of run (used for comparing different feature sets)
results: data frame to hold the results if varying the feature set
cv: number of n-fold cross validations
alphas: range of alpha values for CV
return: pd.DataFrame of the MSE results
'''
# capture the results for the feature set
model_results = pd.Series(name=name)
# Perform 10-fold cross-validation linear regression model.
lin_model = LinearRegression()
scores = cross_val_score(lin_model, X_train, y_train, cv=cv, scoring='neg_mean_squared_error')
model_results['linear model - cv10'] = np.mean(-scores)
# Now perform a n-fold cross validation
cv_lasso = make_pipeline(StandardScaler(), LassoCV(cv=cv, alphas=alphas, tol=0.001))
cv_lasso.fit(X_train, y_train)
model_results['lasso cv - ' + str(cv_lasso.get_params()['lassocv'].alpha_)] = mean_mse_Lasso(cv_lasso, 'lassocv')
# Now 2-5 degree polynomial features and perform a n-fold cross validation.
for degrees in range(2,6):
cv_lasso_poly = make_pipeline(PolynomialFeatures(degrees), StandardScaler(), LassoCV(cv=cv, alphas=alphas,tol=0.001))
cv_lasso_poly.fit(X_train, y_train)
model_results['lasso poly ' + str(degrees) + ' cv - ' + str(cv_lasso_poly.get_params()['lassocv'].alpha_)] = mean_mse_Lasso(cv_lasso_poly, 'lassocv')
if results is None:
results = pd.DataFrame(model_results)
else:
results = pd.concat([results, pd.DataFrame(model_results)], axis=1, sort=True)
return results
def mean_mse_Lasso(model,name):
''' Calcualtes the MSE of an n-fold CV from LassoCV model
model: sklearn model or pipeline
name: name of the lassocv model
return float64 (mean MSE)
'''
mse = model.get_params()[name].mse_path_
alphas = model.get_params()[name].alphas_
mse_df = pd.DataFrame(data=mse, index=alphas)
return mse_df.loc[model.get_params()[name].alpha_].mean()
| 37.3125
| 157
| 0.686169
|
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import requests
import time
import os
from selenium.webdriver.common.by import By
import re
import time
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import LassoCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
import scipy.stats as stats
PATH_RS = '/Users/jadams/ds/metis/baseball_lin_regression/data/processed_df/rookie_stats.csv'
PATH_S = '/Users/jadams/ds/metis/baseball_lin_regression/data/processed_df/salary.csv'
def count_awards(s):
''' Counts the numebr of awards from baseballreference.com where the awards are listed in CSV format
s: CSV string of awards
return: int (count of awards)
'''
awards = 0
s = str(s)
if len(s) > 0:
awards = s.count(',')+1
return awards
def get_player_data(html, year, name):
''' Parses a player page on baseballreference.com, builds and writes a data frame to the data directory.
html: html scrapped from baseballreference.com
year: rookie year of the player
name: name of the player
return: writes a data frame to data/ directory
'''
soup_players = BeautifulSoup(html, 'lxml')
# Get position
position = soup_players.find('p')
position = position.contents[2].strip()
if position in 'Pitcher':
return None
# Get the debut for identification in case duplicate name
debut = soup_players.find('a', {'href': re.compile('=debut')})
debut = debut.contents
# Get batting stats
batting = soup_players.find('table',{'id':'batting_standard'})
batting_tbl_list = pd.read_html(str(batting))
batting_df = batting_tbl_list[0]
batting_df = batting_df[:-1]
rookie_stats = batting_df[(batting_df.Year == str(year))]
rookie_stats = rookie_stats[(~rookie_stats.Tm.str.contains('-min'))]
rookie_stats = rookie_stats[rookie_stats.Tm != 'TOT']
columns = ['Year', 'Age', 'Tm', 'Lg', 'G', 'PA', 'AB', 'R','H', 'SB','BA','HR','TB','2B','3B','RBI','BB','SO','Awards']
rookie_stats = rookie_stats.loc[:, columns]
rookie_stats = rookie_stats[rookie_stats.Lg.str.contains(r'[A,N]L$')]
rookie_stats['position'] = position
rookie_stats['name'] = name
rookie_stats['debut'] = debut * rookie_stats.shape[0]
rookie_stats.Year = rookie_stats.Year.astype(int)
rookie_stats.debut = pd.to_datetime(rookie_stats.debut, format='%B %d, %Y')
rookie_stats.loc[rookie_stats.Awards.isnull(),'Awards'] = ''
rookie_stats['award_count'] = rookie_stats.Awards.apply(count_awards)
with open(PATH_RS, 'a') as f:
rookie_stats.to_csv(f, header=False)
def build_rookie_table(rookie_pages):
''' Builds a master data set of all rookie players using the rookie summary page on baseballreference.com
rookie_pages: pd.DataFrame containing [html, year]
return: pd.DataFrame() ['Name','Debut','Age','Tm','rookie_year']
'''
rookie_df = pd.DataFrame(columns=['Name','Debut','Age','Tm','rookie_year'])
rookie_dfs = []
for i in rookie_pages.year.values:
# scrape the rookie batters (includes pitchers if PA)
soup_pages = BeautifulSoup(rookie_pages.html[i], 'lxml')
batting = soup_pages.find('table',{'id':'misc_batting'})
batting_df = pd.read_html(str(batting))
# add Name, Debut, Age, Tm, and rookie_year
year_df = batting_df[0].loc[:,['Name','Debut','Age','Tm']]
year_df['rookie_year'] = [i] * batting_df[0].shape[0]
year_df.rookie_year = year_df.rookie_year.astype(int)
rookie_dfs.append(year_df) #= rookie_df.append(year_df)
# Combine the rookie_dfs
rookie_df = pd.concat(rookie_dfs)
# Strip HOF indicator from name
rookie_df.Name = rookie_df.Name.str.replace('HOF','')
rookie_df[rookie_df.Name.str.contains('HOF')]
rookie_df.Name = rookie_df.Name.str.strip()
# Make Debut a date time
rookie_df.Debut = rookie_df.Debut.astype('datetime64')
return rookie_df
def get_player_salary(html, year, name, ind):
''' Parses a player's page on baseballrefernce.com and builds a data frame of their salary data
html: player page from baseballreference.com
year: rookie year
name: player name
ind: index to build a unique identfier
return: appends to /data/salary.csv
'''
salary_soup = BeautifulSoup(html, 'lxml')
salary_html = salary_soup.find('table',{'id':'br-salaries'})
if salary_html is None:
return None
salary_tables_lst = pd.read_html(str(salary_html))
salary_df = salary_tables_lst[0]
salary_df = salary_df[~salary_df.Year.isnull()]
salary_df = salary_df[salary_df.Year.str.contains(r'[1-2]\d{3}$')]
salary_df['name'] = [name] * salary_df.shape[0]
salary_df['UID'] = [ind] * salary_df.shape[0]
salary_df['rookie_year'] = [year] * salary_df.shape[0]
salary_df.Salary = (salary_df.Salary
.str.replace('$','')
.str.replace(',','')
.str.replace('*','')
)
salary_df.loc[salary_df.Salary == '', 'Salary'] = np.nan
salary_df.Salary = salary_df.Salary.astype(float)
salary_df.Age = salary_df.Age.astype(float)
if salary_df.SrvTm.dtype != 'float64':
salary_df.loc[salary_df.SrvTm == '?','SrvTm'] = np.nan
salary_df.SrvTm = salary_df.SrvTm.astype(float)
if ind == 1:
salary_df.to_csv(PATH_S)
else:
with open(PATH_S, 'a') as f:
salary_df.to_csv(f, header=False)
def run_models(X_train, y_train, name, results = None, cv=10, alphas=[10**a for a in range(-2,5)]):
''' Method to quickly run all models with different feature sets.
Runs: OLS, Standard Scaler + LassoCV, and PolynomialFeatures + Standard Scaler + LassoCV
X_train: training feature set
y_train: training actuals
name: name of the type of run (used for comparing different feature sets)
results: data frame to hold the results if varying the feature set
cv: number of n-fold cross validations
alphas: range of alpha values for CV
return: pd.DataFrame of the MSE results
'''
# capture the results for the feature set
model_results = pd.Series(name=name)
# Perform 10-fold cross-validation linear regression model.
lin_model = LinearRegression()
scores = cross_val_score(lin_model, X_train, y_train, cv=cv, scoring='neg_mean_squared_error')
model_results['linear model - cv10'] = np.mean(-scores)
# Now perform a n-fold cross validation
cv_lasso = make_pipeline(StandardScaler(), LassoCV(cv=cv, alphas=alphas, tol=0.001))
cv_lasso.fit(X_train, y_train)
model_results['lasso cv - ' + str(cv_lasso.get_params()['lassocv'].alpha_)] = mean_mse_Lasso(cv_lasso, 'lassocv')
# Now 2-5 degree polynomial features and perform a n-fold cross validation.
for degrees in range(2,6):
cv_lasso_poly = make_pipeline(PolynomialFeatures(degrees), StandardScaler(), LassoCV(cv=cv, alphas=alphas,tol=0.001))
cv_lasso_poly.fit(X_train, y_train)
model_results['lasso poly ' + str(degrees) + ' cv - ' + str(cv_lasso_poly.get_params()['lassocv'].alpha_)] = mean_mse_Lasso(cv_lasso_poly, 'lassocv')
if results is None:
results = pd.DataFrame(model_results)
else:
results = pd.concat([results, pd.DataFrame(model_results)], axis=1, sort=True)
return results
def mean_mse_Lasso(model,name):
''' Calcualtes the MSE of an n-fold CV from LassoCV model
model: sklearn model or pipeline
name: name of the lassocv model
return float64 (mean MSE)
'''
mse = model.get_params()[name].mse_path_
alphas = model.get_params()[name].alphas_
mse_df = pd.DataFrame(data=mse, index=alphas)
return mse_df.loc[model.get_params()[name].alpha_].mean()
| 0
| 0
| 0
|
9fd68fa00d2ede88bbdff5ff4167ef2f9ac066c1
| 4,387
|
py
|
Python
|
utils/feature_desc.py
|
Hzfinfdu/DataLab
|
0da0226866f59ed2e535c346833f0797499b5174
|
[
"Apache-2.0"
] | null | null | null |
utils/feature_desc.py
|
Hzfinfdu/DataLab
|
0da0226866f59ed2e535c346833f0797499b5174
|
[
"Apache-2.0"
] | null | null | null |
utils/feature_desc.py
|
Hzfinfdu/DataLab
|
0da0226866f59ed2e535c346833f0797499b5174
|
[
"Apache-2.0"
] | null | null | null |
import sys
feature_map_basic = {
"avg_length":"the average length",
"avg_basic_words":"the ratio of basic words (pre-defied by a dictionary)",
"avg_lexical_richness":"the lexical diversity",
"avg_gender_bias_word_male":"the average ratio of male words",
"avg_gender_bias_word_female":"the average ratio of female words",
"avg_gender_bias_single_name_male":"the average ratio of male names",
"avg_gender_bias_single_name_female":"the average ratio of female names",
"avg_gender_bias_name_male":"the average ratio of male names",
"avg_gender_bias_name_female": "the average ratio of female names",
"avg_span_length_of_ner_tags": "the average of entity length",
"avg_eCon_of_ner_tags": "the average of entity's label consistency (defined in the paper: Interpretable Multi-dataset Evaluation for Named Entity Recognition)",
"avg_eFre_of_ner_tags":"the average of entity frequency",
"avg_density":"the average density (measures to what extent a summary covers the content in the source text)",
"avg_coverage":"the average of coverage (measures to what extent a summary covers the content in the source text)",
"avg_compression": "the average of compression (measures the compression ratio from the source text to the generated summary)",
"avg_repetition": "the average of repetition (measures the rate of repeated segments in summaries. The segments are instantiated as trigrams)",
"avg_novelty": "the average of novelty (the proportion of segments in the summaries that have not appeared in source documents. The segments are instantiated as bigrams)",
"avg_copy_length": "the average of copy_length (the average length of segments in summary copied from source document)",
}
feature_map_basic2 = {"divide":"fraction",
"minus":"difference",
"add":"addition",}
# Usage & Test Cases:
# feature_name = "background_train_avg_gender_bias_single_name_male"
# feature_name = "bleu_question_situation_avg_test"
# feature_name = "question_length_divide_situation_avg_validation_length"
# feature_name = "avg_span_length_of_ner_tags_test"
# feature_name = "avg_eCon_of_ner_tags_validation"
# feature_name = "avg_compression_of_test_highlights_and_article"
# feature_name = "avg_copy_length_of_test_highlights_and_article"
# feature_name = "premise_length_add_hypothesis_avg_validation_length"
# feature_name = "premise_length_divide_hypothesis_avg_train_length"
# feature_name = "bleu_question_context_avg_train"
# feature_name = "question_length_divide_context_avg_validation_length"
#
# print(get_feature_description(feature_name))
| 49.292135
| 175
| 0.714611
|
import sys
feature_map_basic = {
"avg_length":"the average length",
"avg_basic_words":"the ratio of basic words (pre-defied by a dictionary)",
"avg_lexical_richness":"the lexical diversity",
"avg_gender_bias_word_male":"the average ratio of male words",
"avg_gender_bias_word_female":"the average ratio of female words",
"avg_gender_bias_single_name_male":"the average ratio of male names",
"avg_gender_bias_single_name_female":"the average ratio of female names",
"avg_gender_bias_name_male":"the average ratio of male names",
"avg_gender_bias_name_female": "the average ratio of female names",
"avg_span_length_of_ner_tags": "the average of entity length",
"avg_eCon_of_ner_tags": "the average of entity's label consistency (defined in the paper: Interpretable Multi-dataset Evaluation for Named Entity Recognition)",
"avg_eFre_of_ner_tags":"the average of entity frequency",
"avg_density":"the average density (measures to what extent a summary covers the content in the source text)",
"avg_coverage":"the average of coverage (measures to what extent a summary covers the content in the source text)",
"avg_compression": "the average of compression (measures the compression ratio from the source text to the generated summary)",
"avg_repetition": "the average of repetition (measures the rate of repeated segments in summaries. The segments are instantiated as trigrams)",
"avg_novelty": "the average of novelty (the proportion of segments in the summaries that have not appeared in source documents. The segments are instantiated as bigrams)",
"avg_copy_length": "the average of copy_length (the average length of segments in summary copied from source document)",
}
feature_map_basic2 = {"divide":"fraction",
"minus":"difference",
"add":"addition",}
def get_feature_description(feature_name:str):
desc = ""
if ("avg_" + feature_name.split("avg_")[-1]) in feature_map_basic.keys():
raw_feature_name = feature_name.split("_")[0]
split_name = feature_name.split("_")[1]
desc = feature_map_basic["avg_" + feature_name.split("avg_")[-1]]
desc = desc + " of " + raw_feature_name + " in " + split_name + " set"
elif feature_name.split("_")[0] == "bleu":
raw_feature_name1 = feature_name.split("_")[1]
raw_feature_name2 = feature_name.split("_")[2]
split_name = feature_name.split("_")[-1]
desc = "the similarity score (using BLEU) between `" + raw_feature_name1 + "` and `" + raw_feature_name2 + "` in " + split_name + " set"
elif feature_name.split("_")[2] in feature_map_basic2.keys():
operation_name = feature_map_basic2[feature_name.split("_")[2]]
raw_feature_name1 = feature_name.split("_")[0]
raw_feature_name2 = feature_name.split("_")[3]
split_name = feature_name.split("_")[-2]
desc = "the length " + operation_name + " between `" + raw_feature_name1 + "` and `" + raw_feature_name2 + "` in " + split_name + " set"
elif "_".join(feature_name.split("_")[0:-1]) in feature_map_basic.keys():
split_name = feature_name.split("_")[-1]
desc = feature_map_basic["_".join(feature_name.split("_")[0:-1])]
desc = desc + " in " + split_name + " set"
elif feature_name.split("_of")[0] in feature_map_basic.keys():
split_name = feature_name.split("_of")[-1].split("_")[1]
desc = feature_map_basic[feature_name.split("_of")[0]] + " of " + split_name + " set"
if desc == "":
desc = feature_name
return desc
# Usage & Test Cases:
# feature_name = "background_train_avg_gender_bias_single_name_male"
# feature_name = "bleu_question_situation_avg_test"
# feature_name = "question_length_divide_situation_avg_validation_length"
# feature_name = "avg_span_length_of_ner_tags_test"
# feature_name = "avg_eCon_of_ner_tags_validation"
# feature_name = "avg_compression_of_test_highlights_and_article"
# feature_name = "avg_copy_length_of_test_highlights_and_article"
# feature_name = "premise_length_add_hypothesis_avg_validation_length"
# feature_name = "premise_length_divide_hypothesis_avg_train_length"
# feature_name = "bleu_question_context_avg_train"
# feature_name = "question_length_divide_context_avg_validation_length"
#
# print(get_feature_description(feature_name))
| 1,717
| 0
| 23
|
043864d58961f211e064016975098e833c2fe172
| 317
|
py
|
Python
|
api/src/wt/fields/links/_model.py
|
sedlar/work-tracking
|
78917ff8200829eb674142ce43b503d8e892d7eb
|
[
"BSD-2-Clause"
] | null | null | null |
api/src/wt/fields/links/_model.py
|
sedlar/work-tracking
|
78917ff8200829eb674142ce43b503d8e892d7eb
|
[
"BSD-2-Clause"
] | null | null | null |
api/src/wt/fields/links/_model.py
|
sedlar/work-tracking
|
78917ff8200829eb674142ce43b503d8e892d7eb
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import List
from wt.ids import BaseId
from wt.fields.links._obj import Link
| 24.384615
| 69
| 0.731861
|
from typing import List
from wt.ids import BaseId
from wt.fields.links._obj import Link
class LinksModel:
def set_entity_links(self, entity_id: BaseId, links: List[Link]):
raise NotImplementedError()
def get_entity_links(self, entity_id: BaseId) -> List[Link]:
raise NotImplementedError()
| 155
| -4
| 76
|
f533df67d3a04bbd440edb67989f011161b292e5
| 584
|
py
|
Python
|
filedemo.py
|
evertmulder/Aan-de-slag-met-Python
|
414bc12e5bca56d99ffedc46ada81bd44bc75ac5
|
[
"MIT"
] | null | null | null |
filedemo.py
|
evertmulder/Aan-de-slag-met-Python
|
414bc12e5bca56d99ffedc46ada81bd44bc75ac5
|
[
"MIT"
] | null | null | null |
filedemo.py
|
evertmulder/Aan-de-slag-met-Python
|
414bc12e5bca56d99ffedc46ada81bd44bc75ac5
|
[
"MIT"
] | null | null | null |
fileName = 'test.txt'
linebreak = '\n'
items = [['a,99,22'], ['b,34,dd'], ['c,5,21']]
writeItemsToFile(items)
items = getItemsFromFile()
print(items)
| 18.83871
| 57
| 0.642123
|
fileName = 'test.txt'
linebreak = '\n'
def getItemsFromFile():
try:
file = open(fileName,"r")
except:
return []
listOfItems = []
for item in file:
listOfItems.append(item.strip(linebreak).split(', '))
file.close()
return listOfItems
def writeItemsToFile(listOfItems):
try:
file = open(fileName,"w")
except:
return False
for item in listOfItems:
file.write((', '.join(map(str, item))) + linebreak)
file.close()
return True
items = [['a,99,22'], ['b,34,dd'], ['c,5,21']]
writeItemsToFile(items)
items = getItemsFromFile()
print(items)
| 387
| 0
| 46
|
20fa375c6b7669cc69f05dbc68d39a9201f664f4
| 2,045
|
py
|
Python
|
Python3/450.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 854
|
2018-11-09T08:06:16.000Z
|
2022-03-31T06:05:53.000Z
|
Python3/450.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 29
|
2019-06-02T05:02:25.000Z
|
2021-11-15T04:09:37.000Z
|
Python3/450.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 347
|
2018-12-23T01:57:37.000Z
|
2022-03-12T14:51:21.000Z
|
__________________________________________________________________________________________________
sample 56 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
__________________________________________________________________________________________________
sample 17372 kb submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
__________________________________________________________________________________________________
| 32.460317
| 98
| 0.577995
|
__________________________________________________________________________________________________
sample 56 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def deleteNode(self, root: TreeNode, key: int) -> TreeNode:
# always check for null
if not root:
return
# find key using binary search
if key < root.val:
root.left = self.deleteNode(root.left, key)
elif key > root.val:
root.right = self.deleteNode(root.right, key)
else:
# at this point, we are at the root
if not root.left:
return root.right
else:
temp = root.left
while temp.right:
temp = temp.right
root.val = temp.val
root.left = self.deleteNode(root.left, temp.val)
return root
__________________________________________________________________________________________________
sample 17372 kb submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def deleteNode(self, root: TreeNode, key: int) -> TreeNode:
if root is None:
return None
if root.val == key:
if root.right is not None:
cur = root.right;
while cur.left is not None:
cur = cur.left
cur.left = root.left
return root.right
else:
return root.left
elif root.val < key:
root.right = self.deleteNode(root.right, key)
else:
root.left = self.deleteNode(root.left, key)
return root
__________________________________________________________________________________________________
| 1,287
| -12
| 98
|
e1782d29f219364bea2d656d4bc1f58100d7018c
| 5,689
|
py
|
Python
|
IEX_29id/devices/slits.py
|
ravescovi/macros_29id
|
c569d4af29644f622a882162b2aa1ca03fb5454c
|
[
"MIT"
] | null | null | null |
IEX_29id/devices/slits.py
|
ravescovi/macros_29id
|
c569d4af29644f622a882162b2aa1ca03fb5454c
|
[
"MIT"
] | null | null | null |
IEX_29id/devices/slits.py
|
ravescovi/macros_29id
|
c569d4af29644f622a882162b2aa1ca03fb5454c
|
[
"MIT"
] | 1
|
2021-09-20T21:26:26.000Z
|
2021-09-20T21:26:26.000Z
|
from epics import caput, caget
from math import inf, nan
from IEX_29id.utils.exp import CheckBranch
from IEX_29id.utils.misc import read_dict
from bluesky import plan_stubs as bps
import logging
from ophyd import EpicsMotor, EpicsSignal, PVPositionerPC, EpicsSignalRO, Signal
from ophyd import Component, Device
from apstools.devices import EpicsDescriptionMixin
# busy_record = Component(EpicsSignalRO, "29idKappa:Kappa_busy", done_value=0,kind='omitted')
## Instantiate pseudo motors
slits = _My4Slits("29idb:Slit",name="motors")
#--------------------------- Old Functions ----------------------------#
# def slit(val):
# """
# Sets the exit slits:
# ARPES = 0 < x < 300 um
# Kappa = 0 < x < 1000 um
# """
# SetExitSlit(val)
# def SetSlit1A(Hsize,Vsize,Hcenter,Vcenter,q=None):
# """move slits 1A: Hsize x Vsize centered at (Hcenter,Vcenter)"""
# caput("29idb:Slit1Hsync.PROC",1) # make sure slits are in sink with real motors
# caput("29idb:Slit1Vsync.PROC",1)
# caput("29idb:Slit1Hsize.VAL", Hsize)
# caput("29idb:Slit1Vsize.VAL", Vsize)
# caput("29idb:Slit1Hcenter.VAL",Hcenter)
# caput("29idb:Slit1Vcenter.VAL",Vcenter)
# if not q:
# print("Slit-1A = ("+str(round(Hsize,3))+"x"+str(round(Vsize,3))+") @ ("+str(Hcenter)+","+str(Vcenter)+")")
# def SetSlit2B(Hsize,Vsize,Hcenter,Vcenter,q=None):
# caput("29idb:Slit2Hsync.PROC",1)
# caput("29idb:Slit2Vsync.PROC",1)
# caput("29idb:Slit2Hsize.VAL", Hsize)
# caput("29idb:Slit2Vsize.VAL", Vsize)
# caput("29idb:Slit2Hcenter.VAL",Hcenter)
# caput("29idb:Slit2Vcenter.VAL",Vcenter)
# if not q:
# print("Slit-2B = ("+str(Hsize)+"x"+str(Vsize)+") @ ("+str(Hcenter)+","+str(Vcenter)+")")
# def SetSlit3C(size):
# caput("29idb:Slit3CFit.A",size)
# print("Slit-3C =",size,"um")
# def SetSlit3D(size,position=None):
# if position == None:
# position=round(caget('29idb:Slit4Vt2.D'),2)
# caput("29idb:Slit4Vcenter.VAL")
# caput("29idb:Slit4Vsize.VAL",size,wait=True,timeout=18000)
# print("Slit-3D =",size,"um")
# def SetSlit_BL(c2B=1,c1A=1,q=None):
# RBV=caget("29idmono:ENERGY_MON")
# GRT=caget("29idmono:GRT_DENSITY")
# hv=max(RBV,500)
# hv=min(RBV,2000)
# c=4.2/2.2
# if GRT==1200:
# GRT='MEG'
# V=0.65 # set to 65% of RR calculation for both grt => cf 2016_2_summary
# elif GRT==2400:
# GRT='HEG'
# V=0.65*c # set to 65% of RR calculation (no longer 80%) => cf 2016_2_summary
# try:
# slit_position=read_dict(FileName='Dict_Slit.txt')
# except KeyError:
# print("Unable to read dictionary")
# return
# V2center= slit_position[GRT]['S2V']
# H2center= slit_position[GRT]['S2H']
# V1center= slit_position[GRT]['S1V']
# H1center= slit_position[GRT]['S1H']
# Size1A=( Aperture_Fit(hv,1)[0]*c1A, Aperture_Fit(hv,1)[1]*c1A )
# Size2B=( Aperture_Fit(hv,2)[0]*c2B, round(Aperture_Fit(hv,2)[1]*c2B*V,3))
# SetSlit1A (Size1A[0],Size1A[1],H1center,V1center,q) # standard operating
# SetSlit2B(Size2B[0],Size2B[1],H2center,V2center,q)
# def SetExitSlit(size):
# branch=CheckBranch()
# if branch == "c":
# SetSlit3C(size)
# elif branch == "d":
# SetSlit3D(size)
# def Slit3C_Fit(size):
# K0=-36.383
# K1=0.16473
# K2=-0.00070276
# K3=8.4346e-06
# K4=-5.6215e-08
# K5=1.8223e-10
# K6=-2.2635e-13
# motor=K0+K1*size+K2*size**2+K3*size**3+K4*size**4+K5*size**5+K6*size**6
# return motor
# def Slit_Coef(n):
# if n == 1:
# pv='29id:k_slit1A'
# #Redshifted x (H):
# H0=2.3325
# H1=-.000936
# H2=2.4e-7
# #Redshifted z (V):
# V0=2.3935
# V1=-.0013442
# V2=3.18e-7
# if n == 2:
# pv='29id:k_slit2B'
# #Redshifted x (H):
# H0=3.61
# H1=-0.00186
# H2=5.2e-7
# #Redshifted z (V):
# V0=6.8075
# V1=-0.003929
# V2=9.5e-7
# K=H0,H1,H2,V0,V1,V2
# return pv,K
# def Aperture_Fit(hv,n):
# K=Slit_Coef(n)[1]
# sizeH=K[0]+K[1]*hv+K[2]*hv*hv
# sizeV=K[3]+K[4]*hv+K[5]*hv*hv
# return [round(sizeH,3),round(sizeV,3)]
# # SetSlits:
# def SetSlit(n,Hsize=None,Vsize=None,Hcenter=0,Vcenter=0,q=None):
# if n == 1:
# if Hsize in [inf,nan,None]: Hsize=4.5
# if Vsize in [inf,nan,None]: Vsize=4.5
# SetSlit1A(Hsize,Vsize,Hcenter,Vcenter,q=None)
# elif n == 2:
# if Hsize in [inf,nan,None]: Hsize=6
# if Vsize in [inf,nan,None]: Vsize=8
# SetSlit2B(Hsize,Vsize,Hcenter,Vcenter,q=None)
# else:
# print('Not a valid slit number')
| 29.942105
| 120
| 0.587449
|
from epics import caput, caget
from math import inf, nan
from IEX_29id.utils.exp import CheckBranch
from IEX_29id.utils.misc import read_dict
from bluesky import plan_stubs as bps
import logging
from ophyd import EpicsMotor, EpicsSignal, PVPositionerPC, EpicsSignalRO, Signal
from ophyd import Component, Device
from apstools.devices import EpicsDescriptionMixin
class _SoftSlitH(PVPositionerPC):
setpoint = Component(EpicsSignal, "Hsize.VAL") # 29idb:Slit1Hsize.VAL => setpoint
readback = Component(EpicsSignalRO, "Ht2.C") # 29idb:Slit1t2.C => readback
sync = Component(EpicsSignal,"Hsync.PROC") # RO means ReadOnly, those are PV that we cannot write to
class _SoftSlitV(PVPositionerPC):
setpoint = Component(EpicsSignal, "Vsize.VAL") # 29idb:SlitVsize.VAL => setpoint
readback = Component(EpicsSignalRO, "Vt2.D") # 29idb:Slit1t2.D => readback
sync = Component(EpicsSignal,"Vsync.PROC") # RO means ReadOnly, those are PV that we cannot write to
class _My4Slits(Device):
h1 = Component(_SoftSlitH, "1")
v1 = Component(_SoftSlitV, "1")
h2 = Component(_SoftSlitH, "2")
v2 = Component(_SoftSlitV, "2")
# busy_record = Component(EpicsSignalRO, "29idKappa:Kappa_busy", done_value=0,kind='omitted')
## Instantiate pseudo motors
slits = _My4Slits("29idb:Slit",name="motors")
#--------------------------- Old Functions ----------------------------#
# def slit(val):
# """
# Sets the exit slits:
# ARPES = 0 < x < 300 um
# Kappa = 0 < x < 1000 um
# """
# SetExitSlit(val)
# def SetSlit1A(Hsize,Vsize,Hcenter,Vcenter,q=None):
# """move slits 1A: Hsize x Vsize centered at (Hcenter,Vcenter)"""
# caput("29idb:Slit1Hsync.PROC",1) # make sure slits are in sink with real motors
# caput("29idb:Slit1Vsync.PROC",1)
# caput("29idb:Slit1Hsize.VAL", Hsize)
# caput("29idb:Slit1Vsize.VAL", Vsize)
# caput("29idb:Slit1Hcenter.VAL",Hcenter)
# caput("29idb:Slit1Vcenter.VAL",Vcenter)
# if not q:
# print("Slit-1A = ("+str(round(Hsize,3))+"x"+str(round(Vsize,3))+") @ ("+str(Hcenter)+","+str(Vcenter)+")")
# def SetSlit2B(Hsize,Vsize,Hcenter,Vcenter,q=None):
# caput("29idb:Slit2Hsync.PROC",1)
# caput("29idb:Slit2Vsync.PROC",1)
# caput("29idb:Slit2Hsize.VAL", Hsize)
# caput("29idb:Slit2Vsize.VAL", Vsize)
# caput("29idb:Slit2Hcenter.VAL",Hcenter)
# caput("29idb:Slit2Vcenter.VAL",Vcenter)
# if not q:
# print("Slit-2B = ("+str(Hsize)+"x"+str(Vsize)+") @ ("+str(Hcenter)+","+str(Vcenter)+")")
# def SetSlit3C(size):
# caput("29idb:Slit3CFit.A",size)
# print("Slit-3C =",size,"um")
# def SetSlit3D(size,position=None):
# if position == None:
# position=round(caget('29idb:Slit4Vt2.D'),2)
# caput("29idb:Slit4Vcenter.VAL")
# caput("29idb:Slit4Vsize.VAL",size,wait=True,timeout=18000)
# print("Slit-3D =",size,"um")
# def SetSlit_BL(c2B=1,c1A=1,q=None):
# RBV=caget("29idmono:ENERGY_MON")
# GRT=caget("29idmono:GRT_DENSITY")
# hv=max(RBV,500)
# hv=min(RBV,2000)
# c=4.2/2.2
# if GRT==1200:
# GRT='MEG'
# V=0.65 # set to 65% of RR calculation for both grt => cf 2016_2_summary
# elif GRT==2400:
# GRT='HEG'
# V=0.65*c # set to 65% of RR calculation (no longer 80%) => cf 2016_2_summary
# try:
# slit_position=read_dict(FileName='Dict_Slit.txt')
# except KeyError:
# print("Unable to read dictionary")
# return
# V2center= slit_position[GRT]['S2V']
# H2center= slit_position[GRT]['S2H']
# V1center= slit_position[GRT]['S1V']
# H1center= slit_position[GRT]['S1H']
# Size1A=( Aperture_Fit(hv,1)[0]*c1A, Aperture_Fit(hv,1)[1]*c1A )
# Size2B=( Aperture_Fit(hv,2)[0]*c2B, round(Aperture_Fit(hv,2)[1]*c2B*V,3))
# SetSlit1A (Size1A[0],Size1A[1],H1center,V1center,q) # standard operating
# SetSlit2B(Size2B[0],Size2B[1],H2center,V2center,q)
# def SetExitSlit(size):
# branch=CheckBranch()
# if branch == "c":
# SetSlit3C(size)
# elif branch == "d":
# SetSlit3D(size)
# def Slit3C_Fit(size):
# K0=-36.383
# K1=0.16473
# K2=-0.00070276
# K3=8.4346e-06
# K4=-5.6215e-08
# K5=1.8223e-10
# K6=-2.2635e-13
# motor=K0+K1*size+K2*size**2+K3*size**3+K4*size**4+K5*size**5+K6*size**6
# return motor
# def Slit_Coef(n):
# if n == 1:
# pv='29id:k_slit1A'
# #Redshifted x (H):
# H0=2.3325
# H1=-.000936
# H2=2.4e-7
# #Redshifted z (V):
# V0=2.3935
# V1=-.0013442
# V2=3.18e-7
# if n == 2:
# pv='29id:k_slit2B'
# #Redshifted x (H):
# H0=3.61
# H1=-0.00186
# H2=5.2e-7
# #Redshifted z (V):
# V0=6.8075
# V1=-0.003929
# V2=9.5e-7
# K=H0,H1,H2,V0,V1,V2
# return pv,K
# def Aperture_Fit(hv,n):
# K=Slit_Coef(n)[1]
# sizeH=K[0]+K[1]*hv+K[2]*hv*hv
# sizeV=K[3]+K[4]*hv+K[5]*hv*hv
# return [round(sizeH,3),round(sizeV,3)]
# # SetSlits:
# def SetSlit(n,Hsize=None,Vsize=None,Hcenter=0,Vcenter=0,q=None):
# if n == 1:
# if Hsize in [inf,nan,None]: Hsize=4.5
# if Vsize in [inf,nan,None]: Vsize=4.5
# SetSlit1A(Hsize,Vsize,Hcenter,Vcenter,q=None)
# elif n == 2:
# if Hsize in [inf,nan,None]: Hsize=6
# if Vsize in [inf,nan,None]: Vsize=8
# SetSlit2B(Hsize,Vsize,Hcenter,Vcenter,q=None)
# else:
# print('Not a valid slit number')
| 0
| 817
| 69
|
3a37727f98514ee7c9bc433d59a81158bc9871f0
| 2,292
|
py
|
Python
|
tests/getnet/services/customers/test_integration.py
|
rafagonc/getnet-py
|
d2a5278b497408b5245d5d0fecd2e424f4ddb0d5
|
[
"MIT"
] | null | null | null |
tests/getnet/services/customers/test_integration.py
|
rafagonc/getnet-py
|
d2a5278b497408b5245d5d0fecd2e424f4ddb0d5
|
[
"MIT"
] | null | null | null |
tests/getnet/services/customers/test_integration.py
|
rafagonc/getnet-py
|
d2a5278b497408b5245d5d0fecd2e424f4ddb0d5
|
[
"MIT"
] | null | null | null |
import os
import unittest
from vcr_unittest import VCRTestCase
import getnet
from getnet import NotFound
from getnet.services.base import ResponseList
from getnet.services.customers import Service, Customer
from tests.getnet.services.customers.test_customer import sample
if __name__ == "__main__":
unittest.main()
| 32.742857
| 76
| 0.687609
|
import os
import unittest
from vcr_unittest import VCRTestCase
import getnet
from getnet import NotFound
from getnet.services.base import ResponseList
from getnet.services.customers import Service, Customer
from tests.getnet.services.customers.test_customer import sample
class CustomersIntegrationTest(VCRTestCase):
def setUp(self) -> None:
super(CustomersIntegrationTest, self).setUp()
self.client = getnet.Client(
os.environ.get("GETNET_SELLER_ID"),
os.environ.get("GETNET_CLIENT_ID"),
os.environ.get("GETNET_CLIENT_SECRET"),
getnet.api.HOMOLOG,
)
self.service = Service(self.client)
def testCreate(self):
data = sample.copy()
data["document_number"] = "01234567888"
customer = self.service.create(Customer(**data))
self.assertIsInstance(customer, Customer)
self.assertEqual(data.get("customer_id"), customer.customer_id)
def testInvalidCreate(self):
with self.assertRaises(getnet.BadRequest) as err:
self.service.create(Customer(**sample))
self.assertEqual("Bad Request", err.exception.error_code)
def testGet(self):
data = sample.copy()
data["customer_id"] = "test_integration_get"
data["document_number"] = "01234567811"
created_customer = self.service.create(Customer(**data))
customer = self.service.get(created_customer.customer_id)
self.assertIsInstance(customer, Customer)
self.assertEqual(created_customer, customer)
self.assertEqual(created_customer.customer_id, customer.customer_id)
def testInvalidGet(self):
with self.assertRaises(NotFound) as err:
self.service.get("14a2ce5d-ebc3-49dc-a516-cb5239b02285")
self.assertEqual("Not Found", err.exception.error_code)
def testAll(self):
customers = self.service.all()
self.assertIsInstance(customers, ResponseList)
self.assertEqual(1, customers.page)
self.assertEqual(100, customers.limit)
self.assertIsNotNone(customers.total)
def testAllNotFound(self):
cards = self.service.all(document_number="01234567855")
self.assertEqual(0, cards.total)
if __name__ == "__main__":
unittest.main()
| 1,734
| 23
| 211
|
c197af5016a465d3ef80877ff635bb66841b6040
| 116
|
py
|
Python
|
src/Metrics/tobetested/memory_total.py
|
coolmay/hpcpack-acm
|
8c367148e8e9655a497b9589e9d1050faba02382
|
[
"MIT"
] | null | null | null |
src/Metrics/tobetested/memory_total.py
|
coolmay/hpcpack-acm
|
8c367148e8e9655a497b9589e9d1050faba02382
|
[
"MIT"
] | null | null | null |
src/Metrics/tobetested/memory_total.py
|
coolmay/hpcpack-acm
|
8c367148e8e9655a497b9589e9d1050faba02382
|
[
"MIT"
] | null | null | null |
import psutil
import json
mem = psutil.virtual_memory()
result = { "_Total": mem.total }
print(json.dumps(result))
| 16.571429
| 32
| 0.732759
|
import psutil
import json
mem = psutil.virtual_memory()
result = { "_Total": mem.total }
print(json.dumps(result))
| 0
| 0
| 0
|
5d843f8c623b2cc6623f676f601598f6e245f5fa
| 3,962
|
py
|
Python
|
pixelsort/interval.py
|
HexCodeFFF/pixelsort
|
2b90d8e1cc937a332cea5b0a59c690e986d58e82
|
[
"MIT"
] | null | null | null |
pixelsort/interval.py
|
HexCodeFFF/pixelsort
|
2b90d8e1cc937a332cea5b0a59c690e986d58e82
|
[
"MIT"
] | null | null | null |
pixelsort/interval.py
|
HexCodeFFF/pixelsort
|
2b90d8e1cc937a332cea5b0a59c690e986d58e82
|
[
"MIT"
] | null | null | null |
import typing
from random import randint, random as random_range
from PIL import ImageFilter, Image
from pixelsort.sorting import lightness
def edge(image: Image.Image, lower_threshold: float, **_) -> typing.List[typing.List[int]]:
"""Performs an edge detection, which is used to define intervals. Tweak threshold with threshold."""
edge_data = image.filter(ImageFilter.FIND_EDGES).convert('RGBA').load()
intervals = []
for y in range(image.size[1]):
intervals.append([])
flag = True
for x in range(image.size[0]):
if lightness(edge_data[x, y]) < lower_threshold * 255:
flag = True
elif flag:
intervals[y].append(x)
flag = False
return intervals
def threshold(image: Image.Image, lower_threshold: float, upper_threshold: float, **_) -> typing.List[typing.List[int]]:
"""Intervals defined by lightness thresholds; only pixels with a lightness between the upper and lower thresholds
are sorted."""
intervals = []
image_data = image.load()
for y in range(image.size[1]):
intervals.append([])
for x in range(image.size[0]):
level = lightness(image_data[x, y])
if level < lower_threshold * 255 or level > upper_threshold * 255:
intervals[y].append(x)
return intervals
def random(image, char_length, **_) -> typing.List[typing.List[int]]:
"""Randomly generate intervals. Distribution of widths is linear by default. Interval widths can be scaled using
char_length."""
intervals = []
for y in range(image.size[1]):
intervals.append([])
x = 0
while True:
x += int(char_length * random_range())
if x > image.size[0]:
break
else:
intervals[y].append(x)
return intervals
def waves(image, char_length, **_) -> typing.List[typing.List[int]]:
"""Intervals are waves of nearly uniform widths. Control width of waves with char_length."""
intervals = []
for y in range(image.size[1]):
intervals.append([])
x = 0
while True:
x += char_length + randint(0, 10)
if x > image.size[0]:
break
else:
intervals[y].append(x)
return intervals
def file_mask(image, interval_image, **_) -> typing.List[typing.List[int]]:
"""Intervals taken from another specified input image. Must be black and white, and the same size as the input
image."""
intervals = []
data = interval_image.load()
for y in range(image.size[1]):
intervals.append([])
flag = True
for x in range(image.size[0]):
if data[x, y]:
flag = True
elif flag:
intervals[y].append(x)
flag = False
return intervals
def file_edges(image, interval_image, lower_threshold, **_) -> typing.List[typing.List[int]]:
"""Intervals defined by performing edge detection on the file specified by -f. Must be the same size as the input
image."""
edge_data = interval_image.filter(
ImageFilter.FIND_EDGES).convert('RGBA').load()
intervals = []
for y in range(image.size[1]):
intervals.append([])
flag = True
for x in range(image.size[0]):
if lightness(edge_data[x, y]) < lower_threshold * 255:
flag = True
elif flag:
intervals[y].append(x)
flag = False
return intervals
def none(image, **_) -> typing.List[typing.List[int]]:
"""Sort whole rows, only stopping at image borders."""
intervals = []
for y in range(image.size[1]):
intervals.append([])
return intervals
choices = {
"random": random,
"threshold": threshold,
"edges": edge,
"waves": waves,
"file": file_mask,
"file-edges": file_edges,
"none": none
}
| 31.19685
| 120
| 0.595154
|
import typing
from random import randint, random as random_range
from PIL import ImageFilter, Image
from pixelsort.sorting import lightness
def edge(image: Image.Image, lower_threshold: float, **_) -> typing.List[typing.List[int]]:
"""Performs an edge detection, which is used to define intervals. Tweak threshold with threshold."""
edge_data = image.filter(ImageFilter.FIND_EDGES).convert('RGBA').load()
intervals = []
for y in range(image.size[1]):
intervals.append([])
flag = True
for x in range(image.size[0]):
if lightness(edge_data[x, y]) < lower_threshold * 255:
flag = True
elif flag:
intervals[y].append(x)
flag = False
return intervals
def threshold(image: Image.Image, lower_threshold: float, upper_threshold: float, **_) -> typing.List[typing.List[int]]:
"""Intervals defined by lightness thresholds; only pixels with a lightness between the upper and lower thresholds
are sorted."""
intervals = []
image_data = image.load()
for y in range(image.size[1]):
intervals.append([])
for x in range(image.size[0]):
level = lightness(image_data[x, y])
if level < lower_threshold * 255 or level > upper_threshold * 255:
intervals[y].append(x)
return intervals
def random(image, char_length, **_) -> typing.List[typing.List[int]]:
"""Randomly generate intervals. Distribution of widths is linear by default. Interval widths can be scaled using
char_length."""
intervals = []
for y in range(image.size[1]):
intervals.append([])
x = 0
while True:
x += int(char_length * random_range())
if x > image.size[0]:
break
else:
intervals[y].append(x)
return intervals
def waves(image, char_length, **_) -> typing.List[typing.List[int]]:
"""Intervals are waves of nearly uniform widths. Control width of waves with char_length."""
intervals = []
for y in range(image.size[1]):
intervals.append([])
x = 0
while True:
x += char_length + randint(0, 10)
if x > image.size[0]:
break
else:
intervals[y].append(x)
return intervals
def file_mask(image, interval_image, **_) -> typing.List[typing.List[int]]:
"""Intervals taken from another specified input image. Must be black and white, and the same size as the input
image."""
intervals = []
data = interval_image.load()
for y in range(image.size[1]):
intervals.append([])
flag = True
for x in range(image.size[0]):
if data[x, y]:
flag = True
elif flag:
intervals[y].append(x)
flag = False
return intervals
def file_edges(image, interval_image, lower_threshold, **_) -> typing.List[typing.List[int]]:
"""Intervals defined by performing edge detection on the file specified by -f. Must be the same size as the input
image."""
edge_data = interval_image.filter(
ImageFilter.FIND_EDGES).convert('RGBA').load()
intervals = []
for y in range(image.size[1]):
intervals.append([])
flag = True
for x in range(image.size[0]):
if lightness(edge_data[x, y]) < lower_threshold * 255:
flag = True
elif flag:
intervals[y].append(x)
flag = False
return intervals
def none(image, **_) -> typing.List[typing.List[int]]:
"""Sort whole rows, only stopping at image borders."""
intervals = []
for y in range(image.size[1]):
intervals.append([])
return intervals
choices = {
"random": random,
"threshold": threshold,
"edges": edge,
"waves": waves,
"file": file_mask,
"file-edges": file_edges,
"none": none
}
| 0
| 0
| 0
|
39675b4eae9bd0f8017c2c07deb058b60fae1c72
| 8,960
|
py
|
Python
|
popup_field/views.py
|
spcrxj/django-popup-field
|
0b9284c039a7c6453a3e0bb877010d67cc3d83dd
|
[
"BSD-3-Clause"
] | 10
|
2018-09-14T09:49:41.000Z
|
2021-03-12T19:12:10.000Z
|
popup_field/views.py
|
spcrxj/django-popup-field
|
0b9284c039a7c6453a3e0bb877010d67cc3d83dd
|
[
"BSD-3-Clause"
] | 1
|
2019-03-11T00:53:24.000Z
|
2019-03-11T00:53:24.000Z
|
popup_field/views.py
|
yinkh/django-popup-field
|
0b9284c039a7c6453a3e0bb877010d67cc3d83dd
|
[
"BSD-3-Clause"
] | 7
|
2019-05-02T19:44:51.000Z
|
2021-06-28T23:31:32.000Z
|
import django
from django.conf import settings
from django.utils.decorators import classonlymethod
from django.views.generic import CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.http.response import JsonResponse
from django.template.response import TemplateResponse
from django.core.exceptions import ImproperlyConfigured
from .fields import ForeignKeyWidget, ManyToManyWidget
if django.VERSION >= (2, 0):
from django.urls import path, include
else:
from django.conf.urls import url, include
| 37.805907
| 119
| 0.642076
|
import django
from django.conf import settings
from django.utils.decorators import classonlymethod
from django.views.generic import CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.http.response import JsonResponse
from django.template.response import TemplateResponse
from django.core.exceptions import ImproperlyConfigured
from .fields import ForeignKeyWidget, ManyToManyWidget
if django.VERSION >= (2, 0):
from django.urls import path, include
else:
from django.conf.urls import url, include
class PopupCreateView(PermissionRequiredMixin, CreateView):
popup_name = None
def get_context_data(self, **kwargs):
if 'to_field' in self.request.GET:
kwargs['to_field'] = self.request.GET['to_field']
kwargs['popup_name'] = self.popup_name
return super(PopupCreateView, self).get_context_data(**kwargs)
def form_valid(self, form):
self.object = form.save()
context = {'op': 'create', 'id': self.object.id, 'value': self.object.__str__()}
if 'to_field' in self.request.GET:
context['to_field'] = self.request.GET['to_field']
return TemplateResponse(self.request, 'popup/success.html', context=context)
class PopupUpdateView(PermissionRequiredMixin, UpdateView):
slug_field = 'id'
context_object_name = 'popup'
popup_name = None
def get_context_data(self, **kwargs):
if 'to_field' in self.request.GET:
kwargs['to_field'] = self.request.GET['to_field']
kwargs['popup_name'] = self.popup_name
return super(PopupUpdateView, self).get_context_data(**kwargs)
def form_valid(self, form):
self.object = form.save()
context = {'op': 'update', 'id': self.object.id, 'value': self.object.__str__()}
if 'to_field' in self.request.GET:
context['to_field'] = self.request.GET['to_field']
return TemplateResponse(self.request, 'popup/success.html', context=context)
class PopupDeleteView(PermissionRequiredMixin, DeleteView):
slug_field = 'id'
def delete(self, request, *args, **kwargs):
if not self.model:
raise ImproperlyConfigured('model must be override in PopupDeleteView')
self.object = self.get_object()
data = {'op': 'delete', 'id': self.object.id, 'value': self.object.__str__()}
self.object.delete()
return JsonResponse(data=data)
class PopupCRUDViewSet(object):
model = None
form_class = None
class_name = None
class_verbose_name = None
template_name_create = None
template_name_update = None
template_name_fk = None
template_name_m2m = None
context_for_all = {}
context_for_create = {}
context_for_update = {}
# parent class for PopupCreateView、PopupUpdateView、PopupDeleteView
parent_class = object
"""
permissions_required = {
'create': ('post.add_category',),
'update': ('post.update_category',),
'delete': ('post.delete_category',)
}
"""
raise_exception = True
permissions_required = {}
@classonlymethod
def get_template_name_create(cls):
if cls.template_name_create is None:
template_name = getattr(settings, 'POPUP_TEMPLATE_NAME_CREATE', None)
if template_name is None:
raise ImproperlyConfigured('You must set template_name_create in PopupCRUDViewSet or '
'set POPUP_TEMPLATE_NAME_CREATE in django settings')
else:
return template_name
else:
return cls.template_name_create
@classonlymethod
def get_template_name_update(cls):
if cls.template_name_update is None:
template_name = getattr(settings, 'POPUP_TEMPLATE_NAME_UPDATE', None)
if template_name is None:
raise ImproperlyConfigured('You must set template_name_update in PopupCRUDViewSet or '
'set POPUP_TEMPLATE_NAME_UPDATE in django settings')
else:
return template_name
else:
return cls.template_name_update
@classonlymethod
def get_class_name(cls):
if cls.class_name is None:
return cls.model.__name__.lower()
else:
return cls.class_name
@classonlymethod
def get_class_verbose_name(cls):
if cls.class_verbose_name is None:
return cls.model._meta.verbose_name
else:
return cls.class_verbose_name
@classonlymethod
def create(cls):
"""
Returns the create view that can be specified as the second argument
to url() in urls.py.
"""
class NewPopupCreateView(PopupCreateView, cls.parent_class):
model = cls.model
form_class = cls.form_class
popup_name = cls.get_class_verbose_name()
template_name = cls.get_template_name_create()
permission_required = cls.get_permission_required('create')
def get_context_data(self, **kwargs):
kwargs.update(cls.context_for_all)
kwargs.update(cls.context_for_create)
return super(NewPopupCreateView, self).get_context_data(**kwargs)
return NewPopupCreateView
@staticmethod
def create_view_context(self, kwargs):
return kwargs
@classonlymethod
def update(cls):
"""
Returns the update view that can be specified as the second argument
to url() in urls.py.
"""
class NewPopupUpdateView(PopupUpdateView, cls.parent_class):
model = cls.model
form_class = cls.form_class
popup_name = cls.get_class_verbose_name()
template_name = cls.get_template_name_update()
permission_required = cls.get_permission_required('update')
def get_context_data(self, **kwargs):
kwargs.update(cls.context_for_all)
kwargs.update(cls.context_for_update)
return super(NewPopupUpdateView, self).get_context_data(**kwargs)
return NewPopupUpdateView
@classonlymethod
def delete(cls):
"""
Returns the delete view that can be specified as the second argument
to url() in urls.py.
"""
class PopupDeleteViewView(PopupDeleteView, cls.parent_class):
model = cls.model
form_class = cls.form_class
permission_required = cls.get_permission_required('delete')
return PopupDeleteViewView
@classonlymethod
def urls(cls):
"""
generate url and url_name for create、update and delete view
default url_name is classname_name
"""
class_name = cls.get_class_name()
if django.VERSION >= (2, 0):
return path('{}/'.format(class_name), include([
path('popup/', cls.create().as_view(), name='{}_popup_create'.format(class_name)),
path('popup/<int:pk>/', cls.update().as_view(), name='{}_popup_update'.format(class_name)),
path('popup/delete/<int:pk>/', cls.delete().as_view(), name='{}_popup_delete'.format(class_name)),
]))
else:
return url(r'^{}/'.format(class_name), include([
url(r'^popup/$', cls.create().as_view(), name='{}_popup_create'.format(class_name)),
url(r'^popup/(?P<pk>\d+)/$', cls.update().as_view(), name='{}_popup_update'.format(class_name)),
url(r'^popup/delete/(?P<pk>\d+)/$', cls.delete().as_view(), name='{}_popup_delete'.format(class_name)),
]))
@classonlymethod
def get_fk_popup_field(cls, *args, **kwargs):
"""
generate fk field related to class wait popup crud
"""
kwargs['popup_name'] = cls.get_class_verbose_name()
kwargs['permissions_required'] = cls.permissions_required
if cls.template_name_fk is not None:
kwargs['template_name'] = cls.template_name_fk
return ForeignKeyWidget('{}_popup_create'.format(cls.get_class_name()), *args, **kwargs)
@classonlymethod
def get_m2m_popup_field(cls, *args, **kwargs):
"""
generate m2m field related to class wait popup crud
"""
kwargs['popup_name'] = cls.get_class_verbose_name()
kwargs['permissions_required'] = cls.permissions_required
if cls.template_name_m2m is not None:
kwargs['template_name'] = cls.template_name_m2m
return ManyToManyWidget('{}_popup_create'.format(cls.get_class_name()), *args, **kwargs)
@classonlymethod
def get_permission_required(cls, action):
"""
Return the permission required for the CRUD operation specified in action.
Default implementation returns the value of one
"""
return cls.permissions_required.get(action, [])
| 3,161
| 5,144
| 92
|
7297dac8ad7e2a204b1900de986dabdeacbb7cbb
| 458
|
py
|
Python
|
chapter10/10-7.py
|
alberthao/Python-Crash-Course-Homework
|
105ffb3075db075425d6cf0d08d9837ef0548866
|
[
"MIT"
] | 138
|
2019-07-26T13:42:31.000Z
|
2021-04-13T23:51:49.000Z
|
chapter10/10-7.py
|
alberthao/Python-Crash-Course-Homework
|
105ffb3075db075425d6cf0d08d9837ef0548866
|
[
"MIT"
] | 6
|
2019-07-20T13:47:47.000Z
|
2019-08-04T06:49:06.000Z
|
chapter10/10-7.py
|
alberthao/Python-Crash-Course-Homework
|
105ffb3075db075425d6cf0d08d9837ef0548866
|
[
"MIT"
] | 51
|
2019-07-26T09:46:28.000Z
|
2021-03-29T07:58:16.000Z
|
print("Enter 'q' at any time to quit.\n")
while True:
try:
x = input("\nGive me a number: ")
if x == 'q':
break
x = int(x)
y = input("Give me another number: ")
if y == 'q':
break
y = int(y)
except ValueError:
print("Sorry, I really needed a number.")
else:
sum = x + y
print("The sum of " + str(x) + " and " + str(y) + " is " + str(sum) + ".")
| 20.818182
| 82
| 0.434498
|
print("Enter 'q' at any time to quit.\n")
while True:
try:
x = input("\nGive me a number: ")
if x == 'q':
break
x = int(x)
y = input("Give me another number: ")
if y == 'q':
break
y = int(y)
except ValueError:
print("Sorry, I really needed a number.")
else:
sum = x + y
print("The sum of " + str(x) + " and " + str(y) + " is " + str(sum) + ".")
| 0
| 0
| 0
|
199819f81209ebbe29e58acb795f12caf099835a
| 1,402
|
py
|
Python
|
bin/cds-efas.py
|
fmidev/harvesterseasons-smartmet
|
2a0538f1d7f95f8085273d4ff93d39066780c2a7
|
[
"MIT"
] | 1
|
2021-04-09T16:48:26.000Z
|
2021-04-09T16:48:26.000Z
|
bin/cds-efas.py
|
fmidev/harvesterseasons-smartmet
|
2a0538f1d7f95f8085273d4ff93d39066780c2a7
|
[
"MIT"
] | null | null | null |
bin/cds-efas.py
|
fmidev/harvesterseasons-smartmet
|
2a0538f1d7f95f8085273d4ff93d39066780c2a7
|
[
"MIT"
] | 1
|
2021-07-21T11:27:02.000Z
|
2021-07-21T11:27:02.000Z
|
#!/usr/bin/env python3
import sys
import cdsapi
year= sys.argv[1]
month= sys.argv[2]
c = cdsapi.Client()
c.retrieve(
'efas-historical',
{
'format': 'grib',
'origin': 'ecmwf',
'simulation_version': 'version_3_5',
'variable': [
'soil_depth', 'volumetric_soil_moisture',
],
'model_levels': 'soil_levels',
'soil_level': [
'1', '2', '3',
],
'hyear': year,
'hmonth': month,
'hday': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
'13', '14', '15',
'16', '17', '18',
'19', '20', '21',
'22', '23', '24',
'25', '26', '27',
'28', '29', '30',
'31',
]
},
'/home/smartmet/data/efas-ana-%s.grib'%(year))
| 35.948718
| 77
| 0.245364
|
#!/usr/bin/env python3
import sys
import cdsapi
year= sys.argv[1]
month= sys.argv[2]
c = cdsapi.Client()
c.retrieve(
'efas-historical',
{
'format': 'grib',
'origin': 'ecmwf',
'simulation_version': 'version_3_5',
'variable': [
'soil_depth', 'volumetric_soil_moisture',
],
'model_levels': 'soil_levels',
'soil_level': [
'1', '2', '3',
],
'hyear': year,
'hmonth': month,
'hday': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
'13', '14', '15',
'16', '17', '18',
'19', '20', '21',
'22', '23', '24',
'25', '26', '27',
'28', '29', '30',
'31',
]
},
'/home/smartmet/data/efas-ana-%s.grib'%(year))
| 0
| 0
| 0
|
9aeb8240c4110e464b0d04ac28750b16f3b40727
| 8,197
|
py
|
Python
|
mcpipy/mcpi/block.py
|
sprintingkiwi/pycraft_mod
|
a1ce8817ccff3e701aa787a2e531df5a6cc8f887
|
[
"MIT"
] | 15
|
2017-08-27T15:33:19.000Z
|
2021-05-05T07:30:57.000Z
|
pycraft_minetest-0.7/pycraft_minetest/blocklist.py
|
Lopastudio/complete-builder-for-minetest
|
23f1bca1f2aa87146c759ad3e8e5b8d62aec57a4
|
[
"MIT"
] | 2
|
2018-11-30T19:56:02.000Z
|
2021-09-24T05:20:39.000Z
|
pycraft_minetest-0.7/pycraft_minetest/blocklist.py
|
Lopastudio/complete-builder-for-minetest
|
23f1bca1f2aa87146c759ad3e8e5b8d62aec57a4
|
[
"MIT"
] | 4
|
2018-05-10T19:37:06.000Z
|
2019-09-02T19:28:14.000Z
|
from . import settings
class Block:
"""Minecraft PI block description. Can be sent to Minecraft.setBlock/s"""
def __iter__(self):
"""Allows a Block to be sent whenever id [and data] is needed"""
if self.nbt is not None:
return iter((self.id, self.data, self.nbt))
else:
return iter((self.id, self.data))
AIR = Block(0)
STONE = Block(1)
GRASS = Block(2)
DIRT = Block(3)
COBBLESTONE = Block(4)
WOOD_PLANKS = Block(5)
SAPLING = Block(6)
BEDROCK = Block(7)
WATER_FLOWING = Block(8)
WATER = WATER_FLOWING
WATER_STATIONARY = Block(9)
LAVA_FLOWING = Block(10)
LAVA = LAVA_FLOWING
LAVA_STATIONARY = Block(11)
SAND = Block(12)
GRAVEL = Block(13)
GOLD_ORE = Block(14)
IRON_ORE = Block(15)
COAL_ORE = Block(16)
WOOD = Block(17)
LEAVES = Block(18)
GLASS = Block(20)
LAPIS_LAZULI_ORE = Block(21)
LAPIS_LAZULI_BLOCK = Block(22)
SANDSTONE = Block(24)
BED = Block(26)
COBWEB = Block(30)
GRASS_TALL = Block(31)
WOOL = Block(35)
FLOWER_YELLOW = Block(37)
FLOWER_CYAN = Block(38)
MUSHROOM_BROWN = Block(39)
MUSHROOM_RED = Block(40)
GOLD_BLOCK = Block(41)
IRON_BLOCK = Block(42)
STONE_SLAB_DOUBLE = Block(43)
STONE_SLAB = Block(44)
BRICK_BLOCK = Block(45)
TNT = Block(46)
BOOKSHELF = Block(47)
MOSS_STONE = Block(48)
OBSIDIAN = Block(49)
TORCH = Block(50)
FIRE = Block(51)
STAIRS_WOOD = Block(53)
CHEST = Block(54)
DIAMOND_ORE = Block(56)
DIAMOND_BLOCK = Block(57)
CRAFTING_TABLE = Block(58)
FARMLAND = Block(60)
FURNACE_INACTIVE = Block(61)
FURNACE_ACTIVE = Block(62)
DOOR_WOOD = Block(64)
LADDER = Block(65)
STAIRS_COBBLESTONE = Block(67)
DOOR_IRON = Block(71)
REDSTONE_ORE = Block(73)
STONE_BUTTON = Block(77)
SNOW = Block(78)
ICE = Block(79)
SNOW_BLOCK = Block(80)
CACTUS = Block(81)
CLAY = Block(82)
SUGAR_CANE = Block(83)
FENCE = Block(85)
GLOWSTONE_BLOCK = Block(89)
BEDROCK_INVISIBLE = Block(95)
if settings.isPE:
STAINED_GLASS = WOOL
else:
STAINED_GLASS = Block(95)
STONE_BRICK = Block(98)
GLASS_PANE = Block(102)
MELON = Block(103)
FENCE_GATE = Block(107)
WOOD_BUTTON = Block(143)
REDSTONE_BLOCK = Block(152)
QUARTZ_BLOCK = Block(155)
if settings.isPE:
HARDENED_CLAY_STAINED = WOOL
else:
HARDENED_CLAY_STAINED = Block(159)
if settings.isPE:
SEA_LANTERN = Block(246) # glowing obsidian
else:
SEA_LANTERN = Block(169)
CARPET = Block(171)
COAL_BLOCK = Block(173)
if settings.isPE:
GLOWING_OBSIDIAN = Block(246)
NETHER_REACTOR_CORE = Block(247)
REDSTONE_LAMP_INACTIVE = OBSIDIAN
REDSTONE_LAMP_ACTIVE = GLOWING_OBSIDIAN
else:
GLOWING_OBSIDIAN = SEA_LANTERN
NETHER_REACTOR_CORE = SEA_LANTERN
REDSTONE_LAMP_INACTIVE = Block(123)
REDSTONE_LAMP_ACTIVE = Block(124)
SUNFLOWER = Block(175,0)
LILAC = Block(175,1)
DOUBLE_TALLGRASS = Block(175,2)
LARGE_FERN = Block(175,3)
ROSE_BUSH = Block(175,4)
PEONY = Block(175,5)
WOOL_WHITE = Block(WOOL.id, 0)
WOOL_ORANGE = Block(WOOL.id, 1)
WOOL_MAGENTA = Block(WOOL.id, 2)
WOOL_LIGHT_BLUE = Block(WOOL.id, 3)
WOOL_YELLOW = Block(WOOL.id, 4)
WOOL_LIME = Block(WOOL.id, 5)
WOOL_PINK = Block(WOOL.id, 6)
WOOL_GRAY = Block(WOOL.id, 7)
WOOL_LIGHT_GRAY = Block(WOOL.id, 8)
WOOL_CYAN = Block(WOOL.id, 9)
WOOL_PURPLE = Block(WOOL.id, 10)
WOOL_BLUE = Block(WOOL.id, 11)
WOOL_BROWN = Block(WOOL.id, 12)
WOOL_GREEN = Block(WOOL.id, 13)
WOOL_RED = Block(WOOL.id, 14)
WOOL_BLACK = Block(WOOL.id, 15)
CARPET_WHITE = Block(CARPET.id, 0)
CARPET_ORANGE = Block(CARPET.id, 1)
CARPET_MAGENTA = Block(CARPET.id, 2)
CARPET_LIGHT_BLUE = Block(CARPET.id, 3)
CARPET_YELLOW = Block(CARPET.id, 4)
CARPET_LIME = Block(CARPET.id, 5)
CARPET_PINK = Block(CARPET.id, 6)
CARPET_GRAY = Block(CARPET.id, 7)
CARPET_LIGHT_GRAY = Block(CARPET.id, 8)
CARPET_CYAN = Block(CARPET.id, 9)
CARPET_PURPLE = Block(CARPET.id, 10)
CARPET_BLUE = Block(CARPET.id, 11)
CARPET_BROWN = Block(CARPET.id, 12)
CARPET_GREEN = Block(CARPET.id, 13)
CARPET_RED = Block(CARPET.id, 14)
CARPET_BLACK = Block(CARPET.id, 15)
STAINED_GLASS_WHITE = Block(STAINED_GLASS.id, 0)
STAINED_GLASS_ORANGE = Block(STAINED_GLASS.id, 1)
STAINED_GLASS_MAGENTA = Block(STAINED_GLASS.id, 2)
STAINED_GLASS_LIGHT_BLUE = Block(STAINED_GLASS.id, 3)
STAINED_GLASS_YELLOW = Block(STAINED_GLASS.id, 4)
STAINED_GLASS_LIME = Block(STAINED_GLASS.id, 5)
STAINED_GLASS_PINK = Block(STAINED_GLASS.id, 6)
STAINED_GLASS_GRAY = Block(STAINED_GLASS.id, 7)
STAINED_GLASS_LIGHT_GRAY = Block(STAINED_GLASS.id, 8)
STAINED_GLASS_CYAN = Block(STAINED_GLASS.id, 9)
STAINED_GLASS_PURPLE = Block(STAINED_GLASS.id, 10)
STAINED_GLASS_BLUE = Block(STAINED_GLASS.id, 11)
STAINED_GLASS_BROWN = Block(STAINED_GLASS.id, 12)
STAINED_GLASS_GREEN = Block(STAINED_GLASS.id, 13)
STAINED_GLASS_RED = Block(STAINED_GLASS.id, 14)
STAINED_GLASS_BLACK = Block(STAINED_GLASS.id, 15)
HARDENED_CLAY_STAINED_WHITE = Block(HARDENED_CLAY_STAINED.id, 0)
HARDENED_CLAY_STAINED_ORANGE = Block(HARDENED_CLAY_STAINED.id, 1)
HARDENED_CLAY_STAINED_MAGENTA = Block(HARDENED_CLAY_STAINED.id, 2)
HARDENED_CLAY_STAINED_LIGHT_BLUE = Block(HARDENED_CLAY_STAINED.id, 3)
HARDENED_CLAY_STAINED_YELLOW = Block(HARDENED_CLAY_STAINED.id, 4)
HARDENED_CLAY_STAINED_LIME = Block(HARDENED_CLAY_STAINED.id, 5)
HARDENED_CLAY_STAINED_PINK = Block(HARDENED_CLAY_STAINED.id, 6)
HARDENED_CLAY_STAINED_GRAY = Block(HARDENED_CLAY_STAINED.id, 7)
HARDENED_CLAY_STAINED_LIGHT_GRAY = Block(HARDENED_CLAY_STAINED.id, 8)
HARDENED_CLAY_STAINED_CYAN = Block(HARDENED_CLAY_STAINED.id, 9)
HARDENED_CLAY_STAINED_PURPLE = Block(HARDENED_CLAY_STAINED.id, 10)
HARDENED_CLAY_STAINED_BLUE = Block(HARDENED_CLAY_STAINED.id, 11)
HARDENED_CLAY_STAINED_BROWN = Block(HARDENED_CLAY_STAINED.id, 12)
HARDENED_CLAY_STAINED_GREEN = Block(HARDENED_CLAY_STAINED.id, 13)
HARDENED_CLAY_STAINED_RED = Block(HARDENED_CLAY_STAINED.id, 14)
HARDENED_CLAY_STAINED_BLACK = Block(HARDENED_CLAY_STAINED.id, 15)
LEAVES_OAK_DECAYABLE = Block(LEAVES.id, 0)
LEAVES_SPRUCE_DECAYABLE = Block(LEAVES.id, 1)
LEAVES_BIRCH_DECAYABLE = Block(LEAVES.id, 2)
LEAVES_JUNGLE_DECAYABLE = Block(LEAVES.id, 3)
LEAVES_OAK_PERMANENT = Block(LEAVES.id, 4)
LEAVES_SPRUCE_PERMANENT = Block(LEAVES.id, 5)
LEAVES_BIRCH_PERMANENT = Block(LEAVES.id, 6)
LEAVES_JUNGLE_PERMANENT = Block(LEAVES.id, 7)
if settings.isPE:
LEAVES_ACACIA_DECAYABLE = Block(161,0)
LEAVES_DARK_OAK_DECAYABLE = Block(161,1)
LEAVES_ACACIA_PERMANENT = Block(161,2)
LEAVES_DARK_OAK_PERMANENT = Block(161,3)
else:
LEAVES_ACACIA_DECAYABLE = LEAVES_OAK_DECAYABLE
LEAVES_DARK_OAK_DECAYABLE = LEAVES_JUNGLE_DECAYABLE
LEAVES_ACACIA_PERMANENT = LEAVES_OAK_PERMANENT
LEAVES_DARK_OAK_PERMANENT = LEAVES_JUNGLE_PERMANENT
| 34.012448
| 86
| 0.665487
|
from . import settings
class Block:
"""Minecraft PI block description. Can be sent to Minecraft.setBlock/s"""
def __init__(self, id, data=0, nbt=None):
self.id = id
self.data = data
if nbt is not None and len(nbt)==0:
self.nbt = None
else:
self.nbt = nbt
def __eq__(self, rhs):
try:
return self.id == rhs.id and self.data == rhs.data and self.nbt == rhs.nbt
except:
return self.data == 0 and self.nbt is None and self.id == rhs
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
h = (self.id << 8) + self.data
if self.nbt is not None:
h ^= hash(self.nbt)
def withData(self, data):
return Block(self.id, data)
def __iter__(self):
"""Allows a Block to be sent whenever id [and data] is needed"""
if self.nbt is not None:
return iter((self.id, self.data, self.nbt))
else:
return iter((self.id, self.data))
def __repr__(self):
if self.nbt is None:
return "Block(%d, %d)"%(self.id, self.data)
else:
return "Block(%d, %d, %s)"%(self.id, self.data, repr(self.nbt))
AIR = Block(0)
STONE = Block(1)
GRASS = Block(2)
DIRT = Block(3)
COBBLESTONE = Block(4)
WOOD_PLANKS = Block(5)
SAPLING = Block(6)
BEDROCK = Block(7)
WATER_FLOWING = Block(8)
WATER = WATER_FLOWING
WATER_STATIONARY = Block(9)
LAVA_FLOWING = Block(10)
LAVA = LAVA_FLOWING
LAVA_STATIONARY = Block(11)
SAND = Block(12)
GRAVEL = Block(13)
GOLD_ORE = Block(14)
IRON_ORE = Block(15)
COAL_ORE = Block(16)
WOOD = Block(17)
LEAVES = Block(18)
GLASS = Block(20)
LAPIS_LAZULI_ORE = Block(21)
LAPIS_LAZULI_BLOCK = Block(22)
SANDSTONE = Block(24)
BED = Block(26)
COBWEB = Block(30)
GRASS_TALL = Block(31)
WOOL = Block(35)
FLOWER_YELLOW = Block(37)
FLOWER_CYAN = Block(38)
MUSHROOM_BROWN = Block(39)
MUSHROOM_RED = Block(40)
GOLD_BLOCK = Block(41)
IRON_BLOCK = Block(42)
STONE_SLAB_DOUBLE = Block(43)
STONE_SLAB = Block(44)
BRICK_BLOCK = Block(45)
TNT = Block(46)
BOOKSHELF = Block(47)
MOSS_STONE = Block(48)
OBSIDIAN = Block(49)
TORCH = Block(50)
FIRE = Block(51)
STAIRS_WOOD = Block(53)
CHEST = Block(54)
DIAMOND_ORE = Block(56)
DIAMOND_BLOCK = Block(57)
CRAFTING_TABLE = Block(58)
FARMLAND = Block(60)
FURNACE_INACTIVE = Block(61)
FURNACE_ACTIVE = Block(62)
DOOR_WOOD = Block(64)
LADDER = Block(65)
STAIRS_COBBLESTONE = Block(67)
DOOR_IRON = Block(71)
REDSTONE_ORE = Block(73)
STONE_BUTTON = Block(77)
SNOW = Block(78)
ICE = Block(79)
SNOW_BLOCK = Block(80)
CACTUS = Block(81)
CLAY = Block(82)
SUGAR_CANE = Block(83)
FENCE = Block(85)
GLOWSTONE_BLOCK = Block(89)
BEDROCK_INVISIBLE = Block(95)
if settings.isPE:
STAINED_GLASS = WOOL
else:
STAINED_GLASS = Block(95)
STONE_BRICK = Block(98)
GLASS_PANE = Block(102)
MELON = Block(103)
FENCE_GATE = Block(107)
WOOD_BUTTON = Block(143)
REDSTONE_BLOCK = Block(152)
QUARTZ_BLOCK = Block(155)
if settings.isPE:
HARDENED_CLAY_STAINED = WOOL
else:
HARDENED_CLAY_STAINED = Block(159)
if settings.isPE:
SEA_LANTERN = Block(246) # glowing obsidian
else:
SEA_LANTERN = Block(169)
CARPET = Block(171)
COAL_BLOCK = Block(173)
if settings.isPE:
GLOWING_OBSIDIAN = Block(246)
NETHER_REACTOR_CORE = Block(247)
REDSTONE_LAMP_INACTIVE = OBSIDIAN
REDSTONE_LAMP_ACTIVE = GLOWING_OBSIDIAN
else:
GLOWING_OBSIDIAN = SEA_LANTERN
NETHER_REACTOR_CORE = SEA_LANTERN
REDSTONE_LAMP_INACTIVE = Block(123)
REDSTONE_LAMP_ACTIVE = Block(124)
SUNFLOWER = Block(175,0)
LILAC = Block(175,1)
DOUBLE_TALLGRASS = Block(175,2)
LARGE_FERN = Block(175,3)
ROSE_BUSH = Block(175,4)
PEONY = Block(175,5)
WOOL_WHITE = Block(WOOL.id, 0)
WOOL_ORANGE = Block(WOOL.id, 1)
WOOL_MAGENTA = Block(WOOL.id, 2)
WOOL_LIGHT_BLUE = Block(WOOL.id, 3)
WOOL_YELLOW = Block(WOOL.id, 4)
WOOL_LIME = Block(WOOL.id, 5)
WOOL_PINK = Block(WOOL.id, 6)
WOOL_GRAY = Block(WOOL.id, 7)
WOOL_LIGHT_GRAY = Block(WOOL.id, 8)
WOOL_CYAN = Block(WOOL.id, 9)
WOOL_PURPLE = Block(WOOL.id, 10)
WOOL_BLUE = Block(WOOL.id, 11)
WOOL_BROWN = Block(WOOL.id, 12)
WOOL_GREEN = Block(WOOL.id, 13)
WOOL_RED = Block(WOOL.id, 14)
WOOL_BLACK = Block(WOOL.id, 15)
CARPET_WHITE = Block(CARPET.id, 0)
CARPET_ORANGE = Block(CARPET.id, 1)
CARPET_MAGENTA = Block(CARPET.id, 2)
CARPET_LIGHT_BLUE = Block(CARPET.id, 3)
CARPET_YELLOW = Block(CARPET.id, 4)
CARPET_LIME = Block(CARPET.id, 5)
CARPET_PINK = Block(CARPET.id, 6)
CARPET_GRAY = Block(CARPET.id, 7)
CARPET_LIGHT_GRAY = Block(CARPET.id, 8)
CARPET_CYAN = Block(CARPET.id, 9)
CARPET_PURPLE = Block(CARPET.id, 10)
CARPET_BLUE = Block(CARPET.id, 11)
CARPET_BROWN = Block(CARPET.id, 12)
CARPET_GREEN = Block(CARPET.id, 13)
CARPET_RED = Block(CARPET.id, 14)
CARPET_BLACK = Block(CARPET.id, 15)
STAINED_GLASS_WHITE = Block(STAINED_GLASS.id, 0)
STAINED_GLASS_ORANGE = Block(STAINED_GLASS.id, 1)
STAINED_GLASS_MAGENTA = Block(STAINED_GLASS.id, 2)
STAINED_GLASS_LIGHT_BLUE = Block(STAINED_GLASS.id, 3)
STAINED_GLASS_YELLOW = Block(STAINED_GLASS.id, 4)
STAINED_GLASS_LIME = Block(STAINED_GLASS.id, 5)
STAINED_GLASS_PINK = Block(STAINED_GLASS.id, 6)
STAINED_GLASS_GRAY = Block(STAINED_GLASS.id, 7)
STAINED_GLASS_LIGHT_GRAY = Block(STAINED_GLASS.id, 8)
STAINED_GLASS_CYAN = Block(STAINED_GLASS.id, 9)
STAINED_GLASS_PURPLE = Block(STAINED_GLASS.id, 10)
STAINED_GLASS_BLUE = Block(STAINED_GLASS.id, 11)
STAINED_GLASS_BROWN = Block(STAINED_GLASS.id, 12)
STAINED_GLASS_GREEN = Block(STAINED_GLASS.id, 13)
STAINED_GLASS_RED = Block(STAINED_GLASS.id, 14)
STAINED_GLASS_BLACK = Block(STAINED_GLASS.id, 15)
HARDENED_CLAY_STAINED_WHITE = Block(HARDENED_CLAY_STAINED.id, 0)
HARDENED_CLAY_STAINED_ORANGE = Block(HARDENED_CLAY_STAINED.id, 1)
HARDENED_CLAY_STAINED_MAGENTA = Block(HARDENED_CLAY_STAINED.id, 2)
HARDENED_CLAY_STAINED_LIGHT_BLUE = Block(HARDENED_CLAY_STAINED.id, 3)
HARDENED_CLAY_STAINED_YELLOW = Block(HARDENED_CLAY_STAINED.id, 4)
HARDENED_CLAY_STAINED_LIME = Block(HARDENED_CLAY_STAINED.id, 5)
HARDENED_CLAY_STAINED_PINK = Block(HARDENED_CLAY_STAINED.id, 6)
HARDENED_CLAY_STAINED_GRAY = Block(HARDENED_CLAY_STAINED.id, 7)
HARDENED_CLAY_STAINED_LIGHT_GRAY = Block(HARDENED_CLAY_STAINED.id, 8)
HARDENED_CLAY_STAINED_CYAN = Block(HARDENED_CLAY_STAINED.id, 9)
HARDENED_CLAY_STAINED_PURPLE = Block(HARDENED_CLAY_STAINED.id, 10)
HARDENED_CLAY_STAINED_BLUE = Block(HARDENED_CLAY_STAINED.id, 11)
HARDENED_CLAY_STAINED_BROWN = Block(HARDENED_CLAY_STAINED.id, 12)
HARDENED_CLAY_STAINED_GREEN = Block(HARDENED_CLAY_STAINED.id, 13)
HARDENED_CLAY_STAINED_RED = Block(HARDENED_CLAY_STAINED.id, 14)
HARDENED_CLAY_STAINED_BLACK = Block(HARDENED_CLAY_STAINED.id, 15)
LEAVES_OAK_DECAYABLE = Block(LEAVES.id, 0)
LEAVES_SPRUCE_DECAYABLE = Block(LEAVES.id, 1)
LEAVES_BIRCH_DECAYABLE = Block(LEAVES.id, 2)
LEAVES_JUNGLE_DECAYABLE = Block(LEAVES.id, 3)
LEAVES_OAK_PERMANENT = Block(LEAVES.id, 4)
LEAVES_SPRUCE_PERMANENT = Block(LEAVES.id, 5)
LEAVES_BIRCH_PERMANENT = Block(LEAVES.id, 6)
LEAVES_JUNGLE_PERMANENT = Block(LEAVES.id, 7)
if settings.isPE:
LEAVES_ACACIA_DECAYABLE = Block(161,0)
LEAVES_DARK_OAK_DECAYABLE = Block(161,1)
LEAVES_ACACIA_PERMANENT = Block(161,2)
LEAVES_DARK_OAK_PERMANENT = Block(161,3)
else:
LEAVES_ACACIA_DECAYABLE = LEAVES_OAK_DECAYABLE
LEAVES_DARK_OAK_DECAYABLE = LEAVES_JUNGLE_DECAYABLE
LEAVES_ACACIA_PERMANENT = LEAVES_OAK_PERMANENT
LEAVES_DARK_OAK_PERMANENT = LEAVES_JUNGLE_PERMANENT
| 722
| 0
| 161
|
52878d327589645b14ad5e2436d611def2a25388
| 2,280
|
py
|
Python
|
docs/code/wk8b.py
|
bond-lab/Language-and-the-Computer
|
58d808cdbe6873256f1d0fa091ebc5d909f211bb
|
[
"CC-BY-4.0"
] | null | null | null |
docs/code/wk8b.py
|
bond-lab/Language-and-the-Computer
|
58d808cdbe6873256f1d0fa091ebc5d909f211bb
|
[
"CC-BY-4.0"
] | null | null | null |
docs/code/wk8b.py
|
bond-lab/Language-and-the-Computer
|
58d808cdbe6873256f1d0fa091ebc5d909f211bb
|
[
"CC-BY-4.0"
] | null | null | null |
#Write a function shorten(text, n) to process a text,
# omitting the n most frequently occurring words of the text.
# How readable is it?
import nltk
def shorten(text, n):
"""Delete the most frequent n words from a text (list of words)"""
assert isinstance(text, list), "The text should be a list of words"
most_frequent_words = set(w for (w,f) in nltk.FreqDist(text).most_common(n))
return [w for w in text if w not in most_frequent_words]
text = "to be or not to be that is the question".split()
out = "or not that is the question".split()
print(text)
print (shorten(text,0))
print (shorten(text,1))
print (shorten(text,1))
print (shorten(text,2))
##print shorten("to be or not to be that is the question", 2)
#Write a list comprehension that sorts a list of WordNet synsets
#for proximity to a given synset.
#For example, given the synsets minke_whale.n.01, orca.n.01, novel.n.01,
# and tortoise.n.01, sort them according to their shortest_path_distance()
# from right_whale.n.01
from nltk.corpus import wordnet as wn
whales = [wn.synset(s) for s in
"minke_whale.n.01, orca.n.01, novel.n.01, tortoise.n.01".split(', ')]
print (whales)
def semantic_sort(sslist,ss):
"""return a list of synsets, sorted by similarity to another synset"""
sim = [(ss.shortest_path_distance(s), s) for s in sslist]
return [s for (sm, s) in sorted(sim)]
print (semantic_sort(whales, wn.synset('right_whale.n.01')))
def semantic_sort1(sslist,ss):
"""return a list of synsets, sorted by similarity to another synset"""
return sorted(sslist, key=lambda x: ss.shortest_path_distance(x))
print (semantic_sort1(whales, wn.synset('right_whale.n.01')))
# Write a function that takes a list of words (containing duplicates)
# and returns a list of words (with no duplicates) sorted by
# decreasing frequency.
# E.g. if the input list contained 10 instances of the word table and
# 9 instances of the word chair, then table would appear before chair
# in the output list.
def dec_freq(liszt):
"""take a list and returns a list of types in decreasing frequency"""
return list(nltk.FreqDist(liszt).keys())
# Write a program to sort words by length.
#
print (sorted(text, key=lambda x:len(x)))
print (sorted(text, key=len))
| 35.076923
| 80
| 0.713596
|
#Write a function shorten(text, n) to process a text,
# omitting the n most frequently occurring words of the text.
# How readable is it?
import nltk
def shorten(text, n):
"""Delete the most frequent n words from a text (list of words)"""
assert isinstance(text, list), "The text should be a list of words"
most_frequent_words = set(w for (w,f) in nltk.FreqDist(text).most_common(n))
return [w for w in text if w not in most_frequent_words]
text = "to be or not to be that is the question".split()
out = "or not that is the question".split()
print(text)
print (shorten(text,0))
print (shorten(text,1))
print (shorten(text,1))
print (shorten(text,2))
##print shorten("to be or not to be that is the question", 2)
#Write a list comprehension that sorts a list of WordNet synsets
#for proximity to a given synset.
#For example, given the synsets minke_whale.n.01, orca.n.01, novel.n.01,
# and tortoise.n.01, sort them according to their shortest_path_distance()
# from right_whale.n.01
from nltk.corpus import wordnet as wn
whales = [wn.synset(s) for s in
"minke_whale.n.01, orca.n.01, novel.n.01, tortoise.n.01".split(', ')]
print (whales)
def semantic_sort(sslist,ss):
"""return a list of synsets, sorted by similarity to another synset"""
sim = [(ss.shortest_path_distance(s), s) for s in sslist]
return [s for (sm, s) in sorted(sim)]
print (semantic_sort(whales, wn.synset('right_whale.n.01')))
def semantic_sort1(sslist,ss):
"""return a list of synsets, sorted by similarity to another synset"""
return sorted(sslist, key=lambda x: ss.shortest_path_distance(x))
print (semantic_sort1(whales, wn.synset('right_whale.n.01')))
# Write a function that takes a list of words (containing duplicates)
# and returns a list of words (with no duplicates) sorted by
# decreasing frequency.
# E.g. if the input list contained 10 instances of the word table and
# 9 instances of the word chair, then table would appear before chair
# in the output list.
def dec_freq(liszt):
"""take a list and returns a list of types in decreasing frequency"""
return list(nltk.FreqDist(liszt).keys())
# Write a program to sort words by length.
#
print (sorted(text, key=lambda x:len(x)))
print (sorted(text, key=len))
| 0
| 0
| 0
|
9d85cfeb79dfeff38bfba2d9825cce4992fdc056
| 8,208
|
py
|
Python
|
database/backends/elastic_database.py
|
zhouhongf/loader_database
|
85e3d606d0bd2316eb5ae2c645766b3e5f4a7832
|
[
"MIT"
] | 1
|
2021-11-27T06:40:43.000Z
|
2021-11-27T06:40:43.000Z
|
database/backends/elastic_database.py
|
zhouhongf/loader_database
|
85e3d606d0bd2316eb5ae2c645766b3e5f4a7832
|
[
"MIT"
] | null | null | null |
database/backends/elastic_database.py
|
zhouhongf/loader_database
|
85e3d606d0bd2316eb5ae2c645766b3e5f4a7832
|
[
"MIT"
] | null | null | null |
import time
from config import singleton, Logger
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from datetime import datetime
from numpy import long
log = Logger().logger
@singleton
| 37.309091
| 119
| 0.510599
|
import time
from config import singleton, Logger
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from datetime import datetime
from numpy import long
log = Logger().logger
@singleton
class ElasticDatabase:
def __init__(self):
# 无用户名密码状态
# self.es = Elasticsearch([ip])
# self.es = Elasticsearch(hosts='root:password@192.168.50.172:9200')
self.es = Elasticsearch(hosts='192.168.50.172:9200')
# 用户名密码状态
# self.es = Elasticsearch([ip], http_auth=('elastic', 'password'), port=9200)
def generate_bulk_datas_with_ids(self, index_name, datas):
for data in datas:
yield {'_index': index_name, '_id': data['_id'], '_source': data}
def generate_bulk_datas_and_ids(self, index_name, datas):
for idx, da in enumerate(datas):
idx += 1
yield {'_index': index_name, '_id': idx, '_source': da}
# 如果不存在此index, 则新建
def create_index_with_mappings(self, index_name, mappings):
if not self.es.indices.exists(index=index_name):
res = self.es.indices.create(index=index_name, ignore=400)
print(res)
res_map = self.es.indices.put_mapping(index=index_name, body=mappings)
print(res_map)
def delete_index(self, index_name):
if self.es.indices.exists(index=index_name):
res = self.es.indices.delete(index=index_name, ignore=[400, 404])
print(res)
def insert_one_data_with_id(self, index_name, data):
self.es.index(index=index_name, id=data['_id'], body=data)
def insert_one_data_and_id(self, index_name, data):
self.es.index(index=index_name, body=data)
def bulk_data_with_id(self, index_name, datas):
bulk(self.es, self.generate_bulk_datas_with_ids(index_name, datas))
def bulk_data_and_id(self, index_name, datas):
bulk(self.es, self.generate_bulk_datas_and_ids(index_name, datas))
def search_all(self, index_name):
return self.es.search(index=index_name)
def delete_one_data(self, index_name, id):
res = self.es.delete(index=index_name, id=id)
print(res)
def query_keywords(self, index_name: str, query_target: str, keywords: list):
query_content = ''
for word in keywords:
query_content += (word + ' ')
query_content = query_content[:-1]
dsl = {'query': {'match': {query_target: query_content}}}
return self.es.search(index=index_name, body=dsl)
def query_multi_keywords(self, index_name, keyOne, valueOne, keyTwo, valueTwo):
dsl = {'query': {'bool': {'must': [
{'match': {keyOne: valueOne}},
{'match': {keyTwo: valueTwo}}
]}}}
return self.es.search(index=index_name, body=dsl)
def query_multi_match(self, index_name, keyOne, keyTwo, value):
query = {'query': {'multi_match': {'query': value, 'fields': [keyOne, keyTwo]}}}
return self.es.search(index=index_name, body=query)
# 参考https://techoverflow.net/?s=ElasticSearch
def es_iterate_all_documents(self, index_name, pagesize=250, scroll_timeout="1m", **kwargs):
"""
Helper to iterate ALL values from a single index
Yields all the documents.
"""
global scroll_id
is_first = True
while True:
# Scroll next
if is_first: # Initialize scroll
result = self.es.search(index=index_name, scroll="1m", **kwargs, body={"size": pagesize})
is_first = False
else:
result = self.es.scroll(body={"scroll_id": scroll_id, "scroll": scroll_timeout})
scroll_id = result["_scroll_id"]
hits = result["hits"]["hits"]
# Stop after no more docs
if not hits:
break
# Yield each entry
yield from (hit['_source'] for hit in hits)
def update_wealth(dataList: list):
time_start = time.perf_counter()
print('================= 运行ElasticDB更新WEALTH: %s =================' % time_start)
elastic = ElasticDatabase()
index_name = 'wealth'
mappings = {
'dynamic': False,
'properties': {
'name': {
'type': 'text',
'analyzer': 'ik_max_word',
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
'bank_name': {
'type': 'text',
'analyzer': 'ik_max_word',
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
'code': {'type': 'keyword'},
'code_register': {'type': 'keyword'},
'create_time': {'type': 'date'}
}
}
elastic.create_index_with_mappings(index_name, mappings)
for data in dataList:
_id = data['_id']
if not elastic.es.exists(index=index_name, id=_id):
local_datetime = datetime.strptime(data['create_time'], "%Y-%m-%d %H:%M:%S")
create_time = long(time.mktime(local_datetime.timetuple()) * 1000.0 + local_datetime.microsecond / 1000.0)
dataIn = {
'name': data['name'],
'bank_name': data['bank_name'],
'code': data['code'],
'code_register': data['code_register'],
'create_time': create_time
}
elastic.es.index(index=index_name, id=_id, body=dataIn)
time_end = time.perf_counter()
print('=============== 完成ElasticDB更新WEALTH: %s, 用时: %s ==============' % (time_end, (time_end - time_start)))
def update_text(dataList: list):
time_start = time.perf_counter()
print('================= 运行ElasticDB更新TEXT: %s =================' % time_start)
elastic = ElasticDatabase()
index_name = 'news'
mappings = {
'dynamic': False,
'properties': {
'name': {
'type': 'text',
'analyzer': 'ik_max_word',
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
'bank_name': {
'type': 'text',
'analyzer': 'ik_max_word',
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
'content': {
'type': 'text',
'analyzer': 'ik_max_word',
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
'type_main': {'type': 'keyword'},
'create_time': {'type': 'date'}
}
}
elastic.create_index_with_mappings(index_name, mappings)
for data in dataList:
_id = data['_id']
if not elastic.es.exists(index=index_name, id=_id):
local_datetime = datetime.strptime(data['create_time'], "%Y-%m-%d %H:%M:%S")
create_time = long(time.mktime(local_datetime.timetuple()) * 1000.0 + local_datetime.microsecond / 1000.0)
dataIn = {
'name': data['name'],
'bank_name': data['bank_name'],
'content': data['content'],
'type_main': data['type_main'],
'create_time': create_time
}
elastic.es.index(index=index_name, id=_id, body=dataIn)
time_end = time.perf_counter()
print('=============== 完成ElasticDB更新TEXT: %s, 用时: %s ==============' % (time_end, (time_end - time_start)))
| 6,601
| 1,394
| 73
|
82dd314634453ecfc01231902539794f306a6122
| 1,742
|
py
|
Python
|
arctic/project_template/app/views.py
|
sanoma/django-arctic
|
18edd63e46b31ce0492b5991d036df896bd14bc6
|
[
"MIT"
] | 73
|
2016-08-15T11:37:08.000Z
|
2020-04-11T14:12:19.000Z
|
arctic/project_template/app/views.py
|
sanoma/django-arctic
|
18edd63e46b31ce0492b5991d036df896bd14bc6
|
[
"MIT"
] | 331
|
2016-08-16T12:05:04.000Z
|
2020-04-16T18:39:46.000Z
|
arctic/project_template/app/views.py
|
sanoma/django-arctic
|
18edd63e46b31ce0492b5991d036df896bd14bc6
|
[
"MIT"
] | 26
|
2016-08-17T12:58:30.000Z
|
2019-10-16T08:07:41.000Z
|
from django.urls import reverse, reverse_lazy
from django.utils.translation import gettext_lazy as _
from arctic.generics import (
CreateView,
DeleteView,
ListView,
UpdateView,
)
from .forms import {{ camel_case_app_name }}Form
from .models import {{ camel_case_app_name }}
class {{ camel_case_app_name }}ListView(ListView):
model = {{ camel_case_app_name }}
fields = '__all__'
permission_required = 'view_{{ app_name }}'
# Delete and detail action link
action_links = [
("detail", "{{ app_name}}:detail", "fa-edit"),
("delete", "{{ app_name}}:delete", "fa-trash"),
]
# tool link to create
tool_links = [
(_("Create {{ app_name }}"), "{{ app_name }}:create", "fa-plus"),
]
# Some optional fields
# paginate_by = 10
# ordering_fields = ['field_name1', ..]
# search_fields = ['field_name', ...]
# allowed_exports = ["csv"]
class {{ camel_case_app_name }}CreateView(CreateView):
model = {{ camel_case_app_name }}
form_class = {{ camel_case_app_name }}Form
permission_required = 'add_{{ app_name }}'
class {{ camel_case_app_name }}UpdateView(UpdateView):
model = {{ camel_case_app_name }}
form_class = {{ camel_case_app_name }}Form
permission_required = 'change_{{ app_name }}'
success_url = reverse_lazy('{{app_name}}:list')
actions = [
(_("Cancel"), "cancel"),
(_("Save"), "submit"),
]
class {{ camel_case_app_name }}DeleteView(DeleteView):
model = {{ camel_case_app_name }}
success_url = reverse_lazy('{{app_name}}:list')
permission_required = 'delete_{{ app_name }}'
| 28.096774
| 73
| 0.639495
|
from django.urls import reverse, reverse_lazy
from django.utils.translation import gettext_lazy as _
from arctic.generics import (
CreateView,
DeleteView,
ListView,
UpdateView,
)
from .forms import {{ camel_case_app_name }}Form
from .models import {{ camel_case_app_name }}
class {{ camel_case_app_name }}ListView(ListView):
model = {{ camel_case_app_name }}
fields = '__all__'
permission_required = 'view_{{ app_name }}'
# Delete and detail action link
action_links = [
("detail", "{{ app_name}}:detail", "fa-edit"),
("delete", "{{ app_name}}:delete", "fa-trash"),
]
# tool link to create
tool_links = [
(_("Create {{ app_name }}"), "{{ app_name }}:create", "fa-plus"),
]
# Some optional fields
# paginate_by = 10
# ordering_fields = ['field_name1', ..]
# search_fields = ['field_name', ...]
# allowed_exports = ["csv"]
class {{ camel_case_app_name }}CreateView(CreateView):
model = {{ camel_case_app_name }}
form_class = {{ camel_case_app_name }}Form
permission_required = 'add_{{ app_name }}'
def get_success_url(self):
return reverse("{{app_name}}:detail", args=(self.object.pk,))
class {{ camel_case_app_name }}UpdateView(UpdateView):
model = {{ camel_case_app_name }}
form_class = {{ camel_case_app_name }}Form
permission_required = 'change_{{ app_name }}'
success_url = reverse_lazy('{{app_name}}:list')
actions = [
(_("Cancel"), "cancel"),
(_("Save"), "submit"),
]
class {{ camel_case_app_name }}DeleteView(DeleteView):
model = {{ camel_case_app_name }}
success_url = reverse_lazy('{{app_name}}:list')
permission_required = 'delete_{{ app_name }}'
| 75
| 0
| 27
|
5c02859d29c36b1595a05d5d8893582c61cb71b7
| 5,112
|
py
|
Python
|
tests/model/test_model.py
|
agdsn/sipa
|
a733bce0a54ad3a79732de82e7f32b35bd87d4e2
|
[
"MIT"
] | 22
|
2015-04-17T15:58:30.000Z
|
2021-04-19T08:26:32.000Z
|
tests/model/test_model.py
|
agdsn/sipa
|
a733bce0a54ad3a79732de82e7f32b35bd87d4e2
|
[
"MIT"
] | 329
|
2015-04-14T23:34:31.000Z
|
2022-01-21T03:02:46.000Z
|
tests/model/test_model.py
|
agdsn/sipa
|
a733bce0a54ad3a79732de82e7f32b35bd87d4e2
|
[
"MIT"
] | 18
|
2015-04-17T13:57:56.000Z
|
2018-05-30T14:20:59.000Z
|
import re
from base64 import urlsafe_b64encode
from os import urandom
from typing import cast
from unittest import TestCase
from unittest.mock import MagicMock
from ipaddress import IPv4Network
from flask import Flask
from sipa.backends import Backends, DataSource, Dormitory, InitContextCallable
| 33.631579
| 78
| 0.632433
|
import re
from base64 import urlsafe_b64encode
from os import urandom
from typing import cast
from unittest import TestCase
from unittest.mock import MagicMock
from ipaddress import IPv4Network
from flask import Flask
from sipa.backends import Backends, DataSource, Dormitory, InitContextCallable
class TestBackendInitializationCase(TestCase):
def setUp(self):
super().setUp()
self.app = Flask('sipa')
self.app.config['BACKENDS'] = ['foo']
self.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
datasource = DataSource(
name='foo',
user_class=object,
mail_server="",
webmailer_url="",
support_mail="",
init_context=lambda app: None
)
Dormitory(name='test', display_name="",
datasource=datasource, subnets=[IPv4Network('127.0.0.0/8')])
self.backends = Backends()
self.backends.register(datasource)
self.backends.init_app(self.app)
self.backends.init_backends()
def test_extension_registrated(self):
assert 'backends' in self.app.extensions
def test_datasource_names_unique(self):
names = [dsrc.name for dsrc in self.backends.datasources]
self.assertEqual(len(names), len(set(names)))
def test_dormitory_names_unique(self):
names = [dorm.name for dorm in self.backends.dormitories]
self.assertEqual(len(names), len(set(names)))
def test_all_dormitories_names_unique(self):
names = [dorm.name for dorm in self.backends.all_dormitories]
self.assertEqual(len(names), len(set(names)))
def test_all_dormitories_greater(self):
assert (set(self.backends.all_dormitories) >=
set(self.backends.dormitories))
def assert_dormitories_namelist(self, list, base):
"""Asserts whether the list consists of (str, str) tuples
…and has the correct length
"""
self.assertEqual(len(list), len(base))
for name, display_name in list:
assert isinstance(name, str)
assert isinstance(display_name, str)
def test_all_dormitories_list(self):
self.assert_dormitories_namelist(
self.backends.dormitories_short,
self.backends.all_dormitories,
)
def test_supported_dormitories_list(self):
self.assert_dormitories_namelist(
self.backends.supported_dormitories_short,
self.backends.dormitories,
)
def test_get_dormitory(self):
for dormitory in self.backends.dormitories:
self.assertEqual(self.backends.get_dormitory(dormitory.name),
dormitory)
possible_names = [
dorm.name for dorm in self.backends.dormitories
]
for str_length in range(10):
random_string = None
while random_string in possible_names:
random_string = urlsafe_b64encode(urandom(str_length))
assert self.backends.get_dormitory(random_string) is None
def test_dormitory_from_ip(self):
for dorm in self.backends.dormitories:
first_ip = next(dorm.subnets.subnets[0].hosts())
self.assertEqual(self.backends.dormitory_from_ip(first_ip), dorm)
# TODO: Find an ip not in any dormitory
class DatasourceTestCase(TestCase):
def setUp(self):
super().setUp()
self.default_args = {
'name': 'test',
'user_class': object,
'mail_server': "",
}
self.app = MagicMock()
self.app.config = {}
def test_init_context_gets_called_correctly(self):
init_mock = cast(InitContextCallable, MagicMock())
datasource = DataSource(
**self.default_args,
init_context=init_mock,
)
datasource.init_context(self.app)
self.assertEqual(init_mock.call_args[0], (self.app,))
def test_init_context_reads_mail(self):
datasource = DataSource(**self.default_args)
config = {
'support_mail': 'bazingle.foo@shizzle.xxx'
}
self.app.config['BACKENDS_CONFIG'] = {datasource.name: config}
datasource.init_context(self.app)
self.assertEqual(datasource.support_mail, config['support_mail'])
def test_init_context_warns_on_unknown_keys(self):
bad_keys = ['unknown', 'foo', 'bar', 'mail']
datasource = DataSource(**self.default_args)
bad_config = {key: None for key in bad_keys}
self.app.config['BACKENDS_CONFIG'] = {datasource.name: bad_config}
with self.assertLogs('sipa.backends', level='WARNING') as context:
datasource.init_context(self.app)
for log in context.output:
self.assertRegex(log, re.compile("ignoring.*unknown",
flags=re.IGNORECASE))
self.assertTrue(any(key in log for key in bad_keys),
msg="Log warning raised not containing any "
"of the given invalid keys")
| 4,002
| 658
| 153
|
9d3749347780f434f22f67fb2805945ba774561c
| 1,707
|
py
|
Python
|
src/dep/cndict/gen_simp_trad.py
|
abrookins/RediSearch
|
773a04106e4f0cfbad3a75a84ce69f3a36fd16e1
|
[
"Apache-2.0",
"Ruby",
"BSD-3-Clause",
"MIT"
] | 2,098
|
2019-05-13T09:11:54.000Z
|
2022-03-31T06:24:50.000Z
|
src/dep/cndict/gen_simp_trad.py
|
abrookins/RediSearch
|
773a04106e4f0cfbad3a75a84ce69f3a36fd16e1
|
[
"Apache-2.0",
"Ruby",
"BSD-3-Clause",
"MIT"
] | 1,659
|
2019-05-13T07:55:29.000Z
|
2022-03-31T02:42:57.000Z
|
src/dep/cndict/gen_simp_trad.py
|
abrookins/RediSearch
|
773a04106e4f0cfbad3a75a84ce69f3a36fd16e1
|
[
"Apache-2.0",
"Ruby",
"BSD-3-Clause",
"MIT"
] | 227
|
2019-05-17T07:54:49.000Z
|
2022-03-28T03:50:19.000Z
|
#!/usr/bin/env python
"""
This script takes a JSON dictionary containing traditional chinese characters
as keys, and the simplified equivalents as values. It then outputs a header file
appropriate for inclusion. The header output file contains an array,
`Cn_T2S` which can be used as
```
simpChr = Cn_T2S[tradChr];
```
the variable Cn_T2S_MinChr contains the smallest key in the dictionary, whereas
Cn_T2S_MaxChr contains the largest key in the dictionary.
"""
import json
import datetime
import sys
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('-f', '--file', help='Chinese map file', required=True)
ap.add_argument('-o', '--output', help='Where to place the output C source')
options = ap.parse_args()
with open(options.file, 'r') as fp:
txt = json.load(fp)
if options.output is None or ap.output == '-':
ofp = sys.stdout
else:
ofp = open(ap.output, 'w')
CP_MIN = 0xffffffff
CP_MAX = 0x00
for k in txt:
v = ord(k)
if v > CP_MAX:
CP_MAX = v
if v < CP_MIN:
CP_MIN = v
ofp.write('''
/**
* Generated by {script} on {date}
*
*/
#include <stdint.h>
static const uint16_t Cn_T2S_MinChr = {cp_min};
static const uint16_t Cn_T2S_MaxChr = {cp_max};
static uint16_t Cn_T2S[{cap}]={{
'''.format(
script=' '.join(sys.argv),
date=datetime.datetime.now(),
cp_min=CP_MIN,
cp_max=CP_MAX,
cap=CP_MAX+1))
num_items = 0
ITEMS_PER_LINE = 5
for trad, simp in txt.items():
ix = ord(trad)
val = ord(simp)
ofp.write(' [0x{:X}]=0x{:X},'.format(ix, val))
num_items += 1
if num_items >= ITEMS_PER_LINE:
ofp.write('\n')
num_items = 0
ofp.write('};\n')
ofp.flush()
| 21.3375
| 80
| 0.657879
|
#!/usr/bin/env python
"""
This script takes a JSON dictionary containing traditional chinese characters
as keys, and the simplified equivalents as values. It then outputs a header file
appropriate for inclusion. The header output file contains an array,
`Cn_T2S` which can be used as
```
simpChr = Cn_T2S[tradChr];
```
the variable Cn_T2S_MinChr contains the smallest key in the dictionary, whereas
Cn_T2S_MaxChr contains the largest key in the dictionary.
"""
import json
import datetime
import sys
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('-f', '--file', help='Chinese map file', required=True)
ap.add_argument('-o', '--output', help='Where to place the output C source')
options = ap.parse_args()
with open(options.file, 'r') as fp:
txt = json.load(fp)
if options.output is None or ap.output == '-':
ofp = sys.stdout
else:
ofp = open(ap.output, 'w')
CP_MIN = 0xffffffff
CP_MAX = 0x00
for k in txt:
v = ord(k)
if v > CP_MAX:
CP_MAX = v
if v < CP_MIN:
CP_MIN = v
ofp.write('''
/**
* Generated by {script} on {date}
*
*/
#include <stdint.h>
static const uint16_t Cn_T2S_MinChr = {cp_min};
static const uint16_t Cn_T2S_MaxChr = {cp_max};
static uint16_t Cn_T2S[{cap}]={{
'''.format(
script=' '.join(sys.argv),
date=datetime.datetime.now(),
cp_min=CP_MIN,
cp_max=CP_MAX,
cap=CP_MAX+1))
num_items = 0
ITEMS_PER_LINE = 5
for trad, simp in txt.items():
ix = ord(trad)
val = ord(simp)
ofp.write(' [0x{:X}]=0x{:X},'.format(ix, val))
num_items += 1
if num_items >= ITEMS_PER_LINE:
ofp.write('\n')
num_items = 0
ofp.write('};\n')
ofp.flush()
| 0
| 0
| 0
|
0a9aafed602e4e30f7389bd6554aa255f58a62e3
| 5,001
|
py
|
Python
|
src/cm/models/public_ip.py
|
cc1-cloud/cc1
|
8113673fa13b6fe195cea99dedab9616aeca3ae8
|
[
"Apache-2.0"
] | 11
|
2015-05-06T14:16:54.000Z
|
2022-02-08T23:21:31.000Z
|
src/cm/models/public_ip.py
|
fortress-shell/cc1
|
8113673fa13b6fe195cea99dedab9616aeca3ae8
|
[
"Apache-2.0"
] | 1
|
2015-10-30T21:08:11.000Z
|
2015-10-30T21:08:11.000Z
|
src/cm/models/public_ip.py
|
fortress-shell/cc1
|
8113673fa13b6fe195cea99dedab9616aeca3ae8
|
[
"Apache-2.0"
] | 5
|
2016-02-12T22:01:38.000Z
|
2021-12-06T16:56:54.000Z
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.cm.views.user.network
@author Maciej Nabożny <mn@mnabozny.pl>
Database model describing public ip addresses, which could be mapped on vm ip
lease (Lease entity). Attached ips are redirected by nodes, on which vm are
running. This is done by one-to-one NAT (SNAT+DNAT)
"""
import subprocess
from django.db import models
from cm.utils import log
from cm.utils.exception import CMException
| 38.175573
| 170
| 0.530494
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.cm.views.user.network
@author Maciej Nabożny <mn@mnabozny.pl>
Database model describing public ip addresses, which could be mapped on vm ip
lease (Lease entity). Attached ips are redirected by nodes, on which vm are
running. This is done by one-to-one NAT (SNAT+DNAT)
"""
import subprocess
from django.db import models
from cm.utils import log
from cm.utils.exception import CMException
class PublicIP(models.Model):
address = models.IPAddressField()
lease = models.ForeignKey('Lease', blank=True, null=True)
user = models.ForeignKey('User', blank=True, null=True, related_name='public_ips')
class Meta:
app_label = 'cm'
def __unicode__(self):
return self.address
@property
def dict(self):
"""
@returns{dict} this PublicLease's data
\n fields:
@dictkey{id,int} this PublicLease's id
@dictkey{ip,string} IP address corresponding to this PublicLease
@dictkey{lease_id,int} id of the wrapped Lease
@dictkey{vm_name,string} VM, to which IP is attached
@dictkey{user_id,int} owner, if there is any
"""
d = {}
d['ip_id'] = self.id
d['public_ip_id'] = self.id
d['address'] = self.address
if self.lease:
d['lease_id'] = self.lease.id
if self.lease.vm:
d['vm_name'] = self.lease.vm.name if self.lease and self.lease.vm else ''
else:
d['vm_name'] = ''
else:
d['lease_id'] = ''
d['vm_name'] = ''
if self.user:
d['user_id'] = self.user.id
else:
d['user_id'] = ''
return d
def assign(self, lease):
if lease.vm == None:
raise CMException('lease_not_attached')
self.lease = lease
self.save()
log.debug(0, "Attaching ip with comand: %s" % str(['ssh',
'-i',
'/var/lib/cc1/.ssh/id_rsa',
'%s@%s' % (lease.vm.node.username, lease.vm.node.address),
'sudo /usr/sbin/cc1_node_public_ip attach %d %s %s' % (lease.vm.id, lease.vm_address, self.address)]))
p = subprocess.Popen(['ssh',
'-i',
'/var/lib/cc1/.ssh/id_rsa',
'%s@%s' % (lease.vm.node.username, lease.vm.node.address),
'sudo /usr/sbin/cc1_node_public_ip attach %d %s %s' % (lease.vm.id, lease.vm_address, self.address)],
stdout=subprocess.PIPE)
p.wait()
log.debug(self.user.id, p.stdout.read())
if p.returncode != 0:
log.error(self.user.id, "SSH error: %d" % p.returncode)
raise CMException('public_ip_failed')
def unassign(self):
if self.lease == None:
raise CMException('public_ip_not_attached')
if self.lease.vm == None:
raise CMException('lease_not_attached')
log.debug(0, "Detaching ip with comand: %s" % str(['ssh',
'-i',
'/var/lib/cc1/.ssh/id_rsa',
'%s@%s' % (self.lease.vm.node.username, self.lease.vm.node.address),
'sudo /usr/sbin/cc1_node_public_ip detach %d %s %s' % (self.lease.vm.id, self.lease.vm_address, self.address)]))
p = subprocess.Popen(['ssh',
'-i',
'/var/lib/cc1/.ssh/id_rsa',
'%s@%s' % (self.lease.vm.node.username, self.lease.vm.node.address),
'sudo /usr/sbin/cc1_node_public_ip detach %d %s %s' % (self.lease.vm.id, self.lease.vm_address, self.address)],
stdout=subprocess.PIPE)
p.wait()
log.debug(self.user.id, p.stdout.read())
self.lease = None
self.save()
if p.returncode != 0:
raise CMException('public_ip_failed')
| 2,587
| 1,284
| 23
|
fe60666df6a3b848589becd47d6491e0d26078cb
| 8,013
|
py
|
Python
|
blog/views.py
|
nixrajput/nixlab-blog-api-django
|
9934ee9055e919f101fc53061aa7e297c812d80d
|
[
"BSD-3-Clause"
] | 1
|
2020-09-04T19:23:42.000Z
|
2020-09-04T19:23:42.000Z
|
blog/views.py
|
nixrajput/nixlab-blog-api-django
|
9934ee9055e919f101fc53061aa7e297c812d80d
|
[
"BSD-3-Clause"
] | null | null | null |
blog/views.py
|
nixrajput/nixlab-blog-api-django
|
9934ee9055e919f101fc53061aa7e297c812d80d
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import ListAPIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from blog.models import BlogPost
from blog.utils import validate_uuid4
from blog.serializers import (
BlogPostSerializer,
BlogPostUpdateSerializer,
BlogPostCreateSerializer,
)
@api_view(["GET"])
@permission_classes((IsAuthenticated,))
@authentication_classes([TokenAuthentication])
@api_view(["POST"])
@permission_classes((IsAuthenticated,))
@api_view(["PUT"])
@permission_classes((IsAuthenticated,))
@api_view(["DELETE"])
@permission_classes((IsAuthenticated,))
@api_view(["GET"])
@permission_classes((IsAuthenticated,))
@api_view(['GET', ])
@permission_classes((IsAuthenticated,))
| 33.95339
| 98
| 0.672782
|
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import ListAPIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from blog.models import BlogPost
from blog.utils import validate_uuid4
from blog.serializers import (
BlogPostSerializer,
BlogPostUpdateSerializer,
BlogPostCreateSerializer,
)
class ApiBlogListView(ListAPIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
serializer_class = BlogPostSerializer
pagination_class = PageNumberPagination
filter_backends = (SearchFilter, OrderingFilter)
search_fields = ('content', 'author__username')
def get_queryset(self, *args, **kwargs):
queryset = BlogPost.objects.filter(is_draft=False).order_by('-date_published')
return queryset
class ApiUserBlogListView(ListAPIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
serializer_class = BlogPostSerializer
pagination_class = PageNumberPagination
filter_backends = (SearchFilter, OrderingFilter)
search_fields = ('content', 'author__username')
lookup_url_kwarg = "uid"
def get_queryset(self, *args, **kwargs):
uid = self.kwargs.get(self.lookup_url_kwarg)
queryset = BlogPost.objects.filter(is_draft=False, author=uid).order_by('-date_published')
return queryset
@api_view(["GET"])
@permission_classes((IsAuthenticated,))
@authentication_classes([TokenAuthentication])
def api_detail_blog_view(request, post_id):
data = {}
is_uuid = validate_uuid4(post_id)
if not is_uuid:
data['response'] = "error"
data["message"] = "Post ID is invalid."
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
try:
blog_post = BlogPost.objects.get(id=post_id, is_draft=False)
except BlogPost.DoesNotExist:
data['response'] = "error"
data["message"] = "Post doesn't found."
return Response(data=data, status=status.HTTP_404_NOT_FOUND)
serializer = BlogPostSerializer(blog_post, context={'request': request})
if request.method == "GET":
return Response(serializer.data, status=status.HTTP_200_OK)
data["response"] = "error"
data["message"] = serializer.errors.__str__()
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
@api_view(["POST"])
@permission_classes((IsAuthenticated,))
def api_create_blog_view(request):
if request.method == "POST":
req_data = request.data
req_data['author'] = request.user.id
serializer = BlogPostCreateSerializer(data=req_data)
data = {}
if serializer.is_valid():
blog_post = serializer.save()
data['response'] = "success"
data['id'] = blog_post.id
data['message'] = "Post created successfully."
return Response(data=data, status=status.HTTP_201_CREATED)
else:
data["response"] = "error"
data["message"] = serializer.errors.__str__()
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
@api_view(["PUT"])
@permission_classes((IsAuthenticated,))
def api_update_blog_view(request, post_id):
data = {}
is_uuid = validate_uuid4(post_id)
if not is_uuid:
data['response'] = "error"
data["message"] = "Post ID is invalid."
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
try:
blog_post = BlogPost.objects.get(id=post_id, is_draft=False)
except BlogPost.DoesNotExist:
data['response'] = "error"
data["message"] = "Post doesn't found."
return Response(data=data, status=status.HTTP_404_NOT_FOUND)
user = request.user
if blog_post.author != user:
data['response'] = "error"
data["message"] = "You don't have permission to delete this post."
return Response(data=data, status=status.HTTP_401_UNAUTHORIZED)
if request.method == "PUT":
serializer = BlogPostUpdateSerializer(
blog_post,
data=request.data,
partial=True
)
if serializer.is_valid():
serializer.save()
data['response'] = "success"
data['message'] = "Post updated successfully"
data['id'] = blog_post.id
return Response(data=data, status=status.HTTP_200_OK)
return Response(serializer.errors.__str__(), status=status.HTTP_400_BAD_REQUEST)
@api_view(["DELETE"])
@permission_classes((IsAuthenticated,))
def api_delete_blog_view(request, post_id):
data = {}
is_uuid = validate_uuid4(post_id)
if not is_uuid:
data['response'] = "error"
data["message"] = "Post ID is invalid."
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
try:
blog_post = BlogPost.objects.get(id=post_id, is_draft=False)
except BlogPost.DoesNotExist:
data['response'] = "error"
data["message"] = "Post doesn't found."
return Response(data=data, status=status.HTTP_404_NOT_FOUND)
user = request.user
if blog_post.author != user:
data['response'] = "error"
data["message"] = "You don't have permission to delete this post."
return Response(data=data, status=status.HTTP_401_UNAUTHORIZED)
if request.method == 'DELETE':
operation = blog_post.delete()
if operation:
data['response'] = "success"
data["message"] = "Post deleted successfully."
return Response(data=data, status=status.HTTP_200_OK)
data["response"] = "error"
data["message"] = "Post deletion failed."
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET"])
@permission_classes((IsAuthenticated,))
def api_like_toggle_view(request, post_id):
data = {}
is_uuid = validate_uuid4(post_id)
if not is_uuid:
data['response'] = "error"
data["message"] = "Post ID is invalid."
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
try:
blog_post = BlogPost.objects.get(id=post_id, is_draft=False)
except BlogPost.DoesNotExist:
data['response'] = "error"
data["message"] = "Post doesn't found."
return Response(data=data, status=status.HTTP_404_NOT_FOUND)
if request.user.is_authenticated:
if request.user in blog_post.likes.all():
blog_post.likes.remove(request.user)
liked = False
message = "Like removed."
else:
blog_post.likes.add(request.user)
liked = True
message = "Post liked."
updated = True
data["updated"] = updated
data["liked"] = liked
data["message"] = message
return Response(data, status=status.HTTP_200_OK)
else:
data["response"] = "error"
data["message"] = "An error occurred"
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', ])
@permission_classes((IsAuthenticated,))
def api_is_author_of_blogpost(request, post_id):
try:
blog_post = BlogPost.objects.get(id=post_id, is_draft=False)
except BlogPost.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
data = {}
user = request.user
if blog_post.author != user:
data['response'] = "error"
data["message"] = "You don't have any permission to this post."
return Response(data=data, status=status.HTTP_401_UNAUTHORIZED)
data["response"] = "success"
data['message'] = "You have permission to edit this post."
return Response(data=data, status=status.HTTP_200_OK)
| 6,080
| 687
| 178
|
1dccb3f4def3185980f12c8a2b142b665864fb7f
| 555
|
py
|
Python
|
external_libs/cryptopy/crypto/cipher/aes_cbc.py
|
rgrr/smartmeshsdk
|
a95f3e4d9e2254d59d326428fef8c77319cd4373
|
[
"BSD-3-Clause"
] | 29
|
2015-02-17T14:22:14.000Z
|
2021-02-19T06:01:10.000Z
|
external_libs/cryptopy/crypto/cipher/aes_cbc.py
|
rgrr/smartmeshsdk
|
a95f3e4d9e2254d59d326428fef8c77319cd4373
|
[
"BSD-3-Clause"
] | 104
|
2016-04-10T19:22:20.000Z
|
2018-11-20T15:47:14.000Z
|
external_libs/cryptopy/crypto/cipher/aes_cbc.py
|
rgrr/smartmeshsdk
|
a95f3e4d9e2254d59d326428fef8c77319cd4373
|
[
"BSD-3-Clause"
] | 35
|
2015-07-10T18:58:15.000Z
|
2022-03-20T08:56:25.000Z
|
""" crypto.cipher.aes_cbc
AES_CBC Encryption Algorithm
Copyright (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-06-14
"""
from crypto.cipher.aes import AES
from crypto.cipher.cbc import CBC
from crypto.cipher.base import BlockCipher, padWithPadLen, noPadding
class AES_CBC(CBC):
""" AES encryption in CBC feedback mode """
| 26.428571
| 70
| 0.695495
|
""" crypto.cipher.aes_cbc
AES_CBC Encryption Algorithm
Copyright (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-06-14
"""
from crypto.cipher.aes import AES
from crypto.cipher.cbc import CBC
from crypto.cipher.base import BlockCipher, padWithPadLen, noPadding
class AES_CBC(CBC):
""" AES encryption in CBC feedback mode """
def __init__(self, key=None, padding=padWithPadLen(), keySize=16):
CBC.__init__( self, AES(key, noPadding(), keySize), padding)
self.name = 'AES_CBC'
| 150
| 0
| 26
|
12f659110ffd6cf2190c795f1dc4cd027e955400
| 569
|
py
|
Python
|
fpakman/util/memory.py
|
vinifmor/fpakman
|
a719991b8f7ecf366d44fdf074f5950767bdf121
|
[
"Zlib"
] | 39
|
2019-06-15T08:27:12.000Z
|
2021-11-08T03:33:01.000Z
|
fpakman/util/memory.py
|
vinifmor/fpakman
|
a719991b8f7ecf366d44fdf074f5950767bdf121
|
[
"Zlib"
] | 10
|
2019-06-16T12:16:19.000Z
|
2020-06-21T18:49:05.000Z
|
fpakman/util/memory.py
|
vinifmor/fpakman
|
a719991b8f7ecf366d44fdf074f5950767bdf121
|
[
"Zlib"
] | 3
|
2019-08-01T12:38:46.000Z
|
2020-04-30T20:40:23.000Z
|
import time
from threading import Thread
from typing import List
from fpakman.util.cache import Cache
| 23.708333
| 70
| 0.634446
|
import time
from threading import Thread
from typing import List
from fpakman.util.cache import Cache
class CacheCleaner(Thread):
def __init__(self, caches: List[Cache], check_interval: int = 15):
super(CacheCleaner, self).__init__(daemon=True)
self.caches = [c for c in caches if c.is_enabled()]
self.check_interval = check_interval
def run(self):
if self.caches:
while True:
for cache in self.caches:
cache.clean_expired()
time.sleep(self.check_interval)
| 381
| 6
| 77
|
e6e8efd159bb1ab67df8cb926bb72fb6a4a0b0e5
| 2,484
|
py
|
Python
|
2021/18.py
|
pubkraal/Advent
|
286517fe7911490a817adceefc8a84a1032e7bcc
|
[
"MIT"
] | null | null | null |
2021/18.py
|
pubkraal/Advent
|
286517fe7911490a817adceefc8a84a1032e7bcc
|
[
"MIT"
] | null | null | null |
2021/18.py
|
pubkraal/Advent
|
286517fe7911490a817adceefc8a84a1032e7bcc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import json
from util.aoc import file_to_day
from util.input import load_data
if __name__ == "__main__":
test = len(sys.argv) > 1 and sys.argv[1] == "test"
main(test)
| 22.178571
| 79
| 0.457327
|
#!/usr/bin/env python3
import sys
import json
from util.aoc import file_to_day
from util.input import load_data
def main(test=False):
numbers = [
json.loads(line) for line in load_data(file_to_day(__file__), test)
]
p1_res = numbers[0]
for num in numbers[1:]:
p1_res = add(p1_res, num)
p2 = 0
for num in numbers:
for num2 in numbers:
if num != num2:
p2 = max(p2, magnitude(add(num, num2)))
print("2021:18:1 =", magnitude(p1_res))
print("2021:18:2 =", p2)
def add(n1, n2):
return reduce([n1, n2])
def reduce(n):
d1, n1 = explode(n)
if d1:
return reduce(n1)
d2, n2 = split(n)
if d2:
return reduce(n2)
return n2
def split(n):
if isinstance(n, list):
did1, n1 = split(n[0])
if did1:
return True, [n1, n[1]]
did2, n2 = split(n[1])
return did2, [n1, n2]
if n >= 10:
return True, [n // 2, (n + 1) // 2]
return False, n
def explode(n):
ns = str(n)
parts = []
i = 0
while i < len(ns):
if ns[i] in "[,]":
parts.append(ns[i])
i += 1
elif ns[i] == " ":
i += 1
else:
j = i
while j < len(ns) and ns[j].isdigit():
j += 1
parts.append(int(ns[i:j]))
i = j
depth = 0
for i, c in enumerate(parts):
if c == "[":
depth += 1
if depth <= 4:
continue
left = parts[i + 1]
right = parts[i + 3]
left_i = None
right_i = None
for j in range(len(parts)):
if isinstance(parts[j], int) and j < i:
left_i = j
elif (
isinstance(parts[j], int) and j > i + 3 and right_i is None
):
right_i = j
if right_i is not None:
parts[right_i] += right
parts = parts[:i] + [0] + parts[i + 5 :]
if left_i is not None:
parts[left_i] += left
return True, json.loads("".join([str(p) for p in parts]))
elif c == "]":
depth -= 1
return False, n
def magnitude(n):
if isinstance(n, list):
return 3 * magnitude(n[0]) + 2 * magnitude(n[1])
return n
if __name__ == "__main__":
test = len(sys.argv) > 1 and sys.argv[1] == "test"
main(test)
| 2,127
| 0
| 138
|
493b41243d9efaa3cac240165261296652fea502
| 3,649
|
py
|
Python
|
src/darknet/py/util.py
|
zeroae/darknet.py
|
c234fb4d986adb8b0534f938c2408622b8d3aa3f
|
[
"MIT"
] | null | null | null |
src/darknet/py/util.py
|
zeroae/darknet.py
|
c234fb4d986adb8b0534f938c2408622b8d3aa3f
|
[
"MIT"
] | 3
|
2020-07-23T20:52:28.000Z
|
2020-11-19T22:26:26.000Z
|
src/darknet/py/util.py
|
zeroae/darknet.py
|
c234fb4d986adb8b0534f938c2408622b8d3aa3f
|
[
"MIT"
] | null | null | null |
import os
import fsspec
import numpy as np
from PIL import Image
from PIL import ImageDraw
| 33.477064
| 97
| 0.620444
|
import os
import fsspec
import numpy as np
from PIL import Image
from PIL import ImageDraw
def fsspec_cache_open(
urlpath: str,
mode="rb",
compression=None,
encoding="utf8",
errors=None,
protocol=None,
newline=None,
**kwargs,
) -> fsspec.core.OpenFile:
chain = urlpath.split("::")
if chain[0].startswith("github"):
chain[0], kwargs = fsspec_split_github_url(chain[0], kwargs)
# Because darknet is written in C, we need real file names for it to open
if chain[0] not in {"filecache", "simplecache"}:
first_scheme = chain[0].split("://")[0]
urlpath = f"filecache::{urlpath}"
filecache = dict(cache_storage=f"{os.environ['HOME']}/.cache/darknet.py")
kwargs = {"filecache": filecache, first_scheme: kwargs}
return fsspec.open(urlpath, mode, compression, encoding, errors, protocol, newline, **kwargs)
def fsspec_split_github_url(github_url: str, kwargs: dict) -> (str, dict):
# TODO: Remove this once fsspec > 0.7.5
from urllib.parse import urlparse
rv = github_url
github_url = urlparse(github_url)
keys = {"org", "repo", "sha"}
# If that metadata is not passed as kwargs, we need to extract it
# netloc = "{org}:{repo}@{sha}"
kwargs = kwargs or dict()
if (keys & kwargs.keys()) != keys:
org, repo, sha = github_url.username, github_url.password, github_url.hostname
if org is None or repo is None or sha is None:
raise ValueError(
f"The github url {github_url} does not match `github://<org>:<repo>@<sha>/path`"
)
kwargs.update(dict(org=org, repo=repo, sha=sha))
rv = github_url.geturl().replace(f"{github_url.netloc}/", "")
return rv, kwargs
def image_to_3darray(image, target_shape):
# We assume the original size matches the target_shape (height, width)
orig_size = target_shape
if isinstance(image, str):
with fsspec.open(image, mode="rb") as f:
image = Image.open(f)
image.load()
if isinstance(image, Image.Image):
orig_size = image.size
image = image_scale_and_pad(image, target_shape)
image = np.asarray(image)
if isinstance(image, np.ndarray):
if image.shape[0:2] != target_shape:
image = Image.fromarray(image)
orig_size = image.size
image = image_scale_and_pad(image, target_shape)
image = np.asarray(image)
image = image.transpose((2, 0, 1)).astype(dtype=np.float32, order="C") / 255
return image, orig_size
def image_scale_and_pad(image: Image.Image, target_shape) -> Image.Image:
image = image.convert("RGB")
if (image.width, image.height) != target_shape:
from PIL import ImageOps
image = image.copy()
image.thumbnail(target_shape)
image = ImageOps.pad(image, target_shape)
return image
def image_draw_detections(img: Image.Image, detections) -> Image.Image:
img = img.copy()
draw = ImageDraw.Draw(img)
colors = ["purple", "blue", "green", "pink", "brown"]
def xywh_to_bounds(x, y, w, h):
return x - w / 2, y - h / 2, x + w / 2, y + h / 2
for i, (cat, prob, xywh) in enumerate(detections):
text = f"{cat}@{prob:.2%}"
bounds = xywh_to_bounds(*xywh)
t_w, t_h = draw.textsize(text)
draw.rectangle(xywh_to_bounds(*xywh), outline=colors[i % 5], width=4)
draw.rectangle(
(bounds[0], bounds[1] - t_h, bounds[0] + t_w + 4, bounds[1]), fill=colors[i % 5]
)
draw.text((bounds[0] + 2, bounds[1] - t_h), text, fill="white")
return img
| 3,437
| 0
| 115
|
903af003069ad52f30b885bf744257637efbd592
| 227
|
py
|
Python
|
liveedit/urls.py
|
jonny5532/wagtail-liveedit
|
c44d46070891a1c9a4959bc120bf1b74e8f153b6
|
[
"MIT"
] | null | null | null |
liveedit/urls.py
|
jonny5532/wagtail-liveedit
|
c44d46070891a1c9a4959bc120bf1b74e8f153b6
|
[
"MIT"
] | null | null | null |
liveedit/urls.py
|
jonny5532/wagtail-liveedit
|
c44d46070891a1c9a4959bc120bf1b74e8f153b6
|
[
"MIT"
] | null | null | null |
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'^action/', views.action_view),
re_path(r'^append-block/', views.append_block_view),
re_path(r'^edit-block/', views.edit_block_view),
]
| 22.7
| 56
| 0.709251
|
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'^action/', views.action_view),
re_path(r'^append-block/', views.append_block_view),
re_path(r'^edit-block/', views.edit_block_view),
]
| 0
| 0
| 0
|
bc4ad2574746fcb990fe2f5613171952429a286e
| 9,063
|
py
|
Python
|
src/libtakiyasha/ncm.py
|
nukemiko/takiyasha
|
d0e2ebab2a476646313a1fd412f55b9d1300ea87
|
[
"MIT"
] | 15
|
2022-01-16T16:13:20.000Z
|
2022-03-27T15:30:51.000Z
|
src/libtakiyasha/ncm.py
|
nukemiko/takiyasha
|
d0e2ebab2a476646313a1fd412f55b9d1300ea87
|
[
"MIT"
] | null | null | null |
src/libtakiyasha/ncm.py
|
nukemiko/takiyasha
|
d0e2ebab2a476646313a1fd412f55b9d1300ea87
|
[
"MIT"
] | 2
|
2022-01-20T18:59:07.000Z
|
2022-01-26T08:09:15.000Z
|
from __future__ import annotations
import json
from base64 import b64decode, b64encode
from copy import deepcopy as dp
from io import BytesIO
from random import randrange
from string import digits as strdigits
from typing import Generator, IO
from . import utils
from .common import Cipher, Crypter
from .exceptions import FileTypeMismatchError
from .standardciphers import StreamedAESWithModeECB
__all__ = ['NCM', 'NCMRC4Cipher']
class NCM(Crypter):
"""读写网易云音乐 NCM 格式的文件。
读取:
>>> ncmfile = NCM('./test1.ncm')
>>> data = ncmfile.read()
写入:
>>> ncmfile.write(b'Writted bytes')
创建、写入并保存:
>>> new_ncmfile = NCM() # 随机生成一个密钥
>>> with open('./metal.flac', 'rb') as f: # 写入未加密的文件数据
... new_ncmfile.write(f.read())
>>> new_ncmfile.save('./result.ncm')
>>> """
@staticmethod
@staticmethod
@staticmethod
def __init__(self,
filething: utils.FileThing | None = None,
**kwargs
) -> None:
"""读写网易云音乐 NCM 格式的文件。
Args:
filething (file): 源 NCM 文件的路径或文件对象;留空则视为创建一个空 NCM 文件
Keyword Args:
key (bytes): 加/解密数据所需的密钥;留空则会随机生成一个
所有未知的关键字参数都会被忽略。
"""
if filething is None:
self._raw = BytesIO()
self._name = None
key: bytes | None = kwargs.get('key', None)
if key is not None:
self._cipher: NCMRC4Cipher = NCMRC4Cipher(key)
else:
# 如果没有指定密钥,也没有指定文件,那么随机生成一个长度等于 111 或 113 的密钥
key_left = utils.gen_random_string(
randrange(27, 30), strdigits
).encode()
key_right = b'E7fT49x7dof9OKCgg9cdvhEuezy3iZCL1nFvBFd1T4uSktAJKmwZXsijPbijliionVUXXg9plTbXEclAE9Lb'
self._cipher = NCMRC4Cipher(key_left + key_right)
self._tagdata = {}
self.coverdata = b''
else:
super().__init__(filething, **kwargs)
def load(self,
filething: utils.FileThing,
**kwargs
) -> None:
"""将一个 NCM 文件加载到当前 NCM 对象中。
Args:
filething (file): 源 NCM 文件的路径或文件对象
Keyword Args:
skip_tagdata (bool): 加载文件时跳过加载标签信息和封面数据,默认为 ``False``
Raises:
FileTypeMismatchError: ``filething`` 不是一个 NCM 格式文件
所有未知的关键字参数都会被忽略。
"""
skip_tagdata: bool = kwargs.get('skip_tagdata', False)
if utils.is_filepath(filething):
fileobj: IO[bytes] = open(filething, 'rb') # type: ignore
self._name = fileobj.name
else:
fileobj: IO[bytes] = filething # type: ignore
self._name = None
utils.verify_fileobj_readable(fileobj, bytes)
utils.verify_fileobj_seekable(fileobj)
fileobj.seek(0, 0)
file_header = fileobj.read(10)
for header in self.file_headers():
if file_header.startswith(header):
break
else:
raise FileTypeMismatchError('not a NCM file: bad file header')
# 获取加密的主密钥数据
encrypted_masterkey_len = int.from_bytes(fileobj.read(4), 'little')
encrypted_masterkey = bytes(b ^ 0x64 for b in fileobj.read(encrypted_masterkey_len))
masterkey_cipher = StreamedAESWithModeECB(self.core_key())
masterkey = masterkey_cipher.decrypt(encrypted_masterkey)[17:] # 去除密钥开头的 b'neteasecloudmusic'
# 获取加密的标签信息
raw_encrypted_tagdata_len = int.from_bytes(fileobj.read(4), 'little')
tagdata = {}
if skip_tagdata:
fileobj.seek(raw_encrypted_tagdata_len, 1)
else:
raw_encrypted_tagdata = bytes(
b ^ 0x63 for b in fileobj.read(raw_encrypted_tagdata_len)
)
encrypted_tagdata = b64decode(raw_encrypted_tagdata[22:], validate=True) # 在 b64decode 之前,去除原始数据开头的 b"163 key(Don't modify):"
identifier = raw_encrypted_tagdata
tagdata_cipher = StreamedAESWithModeECB(self.meta_key())
tagdata.update(json.loads(tagdata_cipher.decrypt(encrypted_tagdata)[6:])) # 在 JSON 反序列化之前,去除字节串开头的 b'music:'
tagdata['identifier'] = identifier.decode()
fileobj.seek(5, 1)
# 获取封面数据
cover_alloc = int.from_bytes(fileobj.read(4), 'little')
coverdata = b''
if skip_tagdata:
fileobj.seek(cover_alloc, 1)
else:
cover_size = int.from_bytes(fileobj.read(4), 'little')
if cover_size:
coverdata = fileobj.read(cover_size)
fileobj.seek(cover_alloc - cover_size, 1)
# 将以上步骤所得信息,连同加密音频数据设置为属性
self._tagdata = tagdata
self.coverdata = coverdata
self._cipher: NCMRC4Cipher = NCMRC4Cipher(masterkey)
self._raw = BytesIO(fileobj.read())
def save(self,
filething: utils.FileThing | None = None,
**kwargs
) -> None:
"""将当前 NCM 对象保存为一个 NCM 格式文件。
Args:
filething (file): 目标 NCM 文件的路径或文件对象,
留空则尝试使用 ``self.name``;如果两者都为空,
抛出 ``ValueError``
Keyword Args:
tagdata (dict): 向目标文件写入的标签信息;留空则使用 ``self.tagdata``
coverdata (bytes): 向目标文件写入的封面数据;留空则使用 ``self.coverdata``
Raises:
ValueError: 同时缺少参数 ``filething`` 和属性 ``self.name``
所有未知的关键字参数都会被忽略。
"""
tagdata: dict | None = kwargs.get('tagdata', None)
coverdata: bytes | None = kwargs.get('coverdata', None)
if filething:
if utils.is_filepath(filething):
fileobj: IO[bytes] = open(filething, 'wb') # type: ignore
else:
fileobj: IO[bytes] = filething # type: ignore
utils.verify_fileobj_writable(fileobj, bytes)
elif self._name:
fileobj: IO[bytes] = open(self._name, 'wb')
else:
raise ValueError('missing filepath or fileobj')
if tagdata is None:
tagdata = dp(self._tagdata)
else:
tagdata = dp(tagdata)
if coverdata is None:
coverdata = bytes(self.coverdata)
fileobj.seek(0, 0)
fileobj.write(b'CTENFDAM\x00\x00')
# 加密并写入主密钥
masterkey = b'neteasecloudmusic' + self._cipher.key
masterkey_cipher = StreamedAESWithModeECB(self.core_key())
encrypted_masterkey = bytes(b ^ 0x64 for b in masterkey_cipher.encrypt(masterkey))
fileobj.write(len(encrypted_masterkey).to_bytes(4, 'little'))
fileobj.write(encrypted_masterkey)
# 加密并写入标签信息
tagdata.pop('identifier', None)
plain_tagdata = b'music:' + json.dumps(tagdata).encode()
tagdata_cipher = StreamedAESWithModeECB(self.meta_key())
encrypted_tagdata = tagdata_cipher.encrypt(plain_tagdata)
raw_encrypted_tagdata = bytes(b ^ 0x63 for b in b"163 key(Don't modify):" + b64encode(encrypted_tagdata))
fileobj.write(len(raw_encrypted_tagdata).to_bytes(4, 'little'))
fileobj.write(raw_encrypted_tagdata)
fileobj.seek(5, 1)
# 写入封面数据
cover_alloc = len(coverdata)
cover_size = cover_alloc
fileobj.write(cover_alloc.to_bytes(4, 'little'))
fileobj.write(cover_size.to_bytes(4, 'little'))
fileobj.write(coverdata)
# 写入加密的音频数据
self._raw.seek(0, 0)
fileobj.write(self._raw.read())
if utils.is_filepath(filething):
fileobj.close()
@property
| 32.956364
| 138
| 0.589871
|
from __future__ import annotations
import json
from base64 import b64decode, b64encode
from copy import deepcopy as dp
from io import BytesIO
from random import randrange
from string import digits as strdigits
from typing import Generator, IO
from . import utils
from .common import Cipher, Crypter
from .exceptions import FileTypeMismatchError
from .standardciphers import StreamedAESWithModeECB
__all__ = ['NCM', 'NCMRC4Cipher']
class NCMRC4Cipher(Cipher):
@staticmethod
def cipher_name() -> str:
return 'RC4'
def __init__(self, key: bytes) -> None:
super().__init__(key)
# 使用 RC4-KSA 生成 S-box
S = bytearray(range(256))
j = 0
key_len = len(key)
for i in range(256):
j = (j + S[i] + key[i % key_len]) & 0xff
S[i], S[j] = S[j], S[i]
# 使用 PRGA 从 S-box 生成密钥流
stream_short = bytearray(256)
for i in range(256):
_ = (i + 1) & 0xff
si = S[_] & 0xff
sj = S[(_ + si) & 0xff] & 0xff
stream_short[i] = S[(si + sj) & 0xff]
self._keystream_short = stream_short
def yield_keystream(self, src_len: int, offset: int) -> Generator[int, None, None]:
keystream_short = self._keystream_short
for i in range(offset, offset + src_len):
yield keystream_short[i & 0xff]
def decrypt(self, cipherdata: bytes, start_offset: int = 0) -> bytes:
cipherdata_len = len(cipherdata)
return utils.bytesxor(cipherdata, bytes(self.yield_keystream(cipherdata_len, start_offset)))
class NCM(Crypter):
"""读写网易云音乐 NCM 格式的文件。
读取:
>>> ncmfile = NCM('./test1.ncm')
>>> data = ncmfile.read()
写入:
>>> ncmfile.write(b'Writted bytes')
创建、写入并保存:
>>> new_ncmfile = NCM() # 随机生成一个密钥
>>> with open('./metal.flac', 'rb') as f: # 写入未加密的文件数据
... new_ncmfile.write(f.read())
>>> new_ncmfile.save('./result.ncm')
>>> """
@staticmethod
def core_key() -> bytes:
return b'\x68\x7a\x48\x52\x41\x6d\x73\x6f\x35\x6b\x49\x6e\x62\x61\x78\x57'
@staticmethod
def meta_key():
return b'\x23\x31\x34\x6c\x6a\x6b\x5f\x21\x5c\x5d\x26\x30\x55\x3c\x27\x28'
@staticmethod
def file_headers() -> dict[bytes, str]:
return {
b'CTENFDAM': 'NCM'
}
def __init__(self,
filething: utils.FileThing | None = None,
**kwargs
) -> None:
"""读写网易云音乐 NCM 格式的文件。
Args:
filething (file): 源 NCM 文件的路径或文件对象;留空则视为创建一个空 NCM 文件
Keyword Args:
key (bytes): 加/解密数据所需的密钥;留空则会随机生成一个
所有未知的关键字参数都会被忽略。
"""
if filething is None:
self._raw = BytesIO()
self._name = None
key: bytes | None = kwargs.get('key', None)
if key is not None:
self._cipher: NCMRC4Cipher = NCMRC4Cipher(key)
else:
# 如果没有指定密钥,也没有指定文件,那么随机生成一个长度等于 111 或 113 的密钥
key_left = utils.gen_random_string(
randrange(27, 30), strdigits
).encode()
key_right = b'E7fT49x7dof9OKCgg9cdvhEuezy3iZCL1nFvBFd1T4uSktAJKmwZXsijPbijliionVUXXg9plTbXEclAE9Lb'
self._cipher = NCMRC4Cipher(key_left + key_right)
self._tagdata = {}
self.coverdata = b''
else:
super().__init__(filething, **kwargs)
def load(self,
filething: utils.FileThing,
**kwargs
) -> None:
"""将一个 NCM 文件加载到当前 NCM 对象中。
Args:
filething (file): 源 NCM 文件的路径或文件对象
Keyword Args:
skip_tagdata (bool): 加载文件时跳过加载标签信息和封面数据,默认为 ``False``
Raises:
FileTypeMismatchError: ``filething`` 不是一个 NCM 格式文件
所有未知的关键字参数都会被忽略。
"""
skip_tagdata: bool = kwargs.get('skip_tagdata', False)
if utils.is_filepath(filething):
fileobj: IO[bytes] = open(filething, 'rb') # type: ignore
self._name = fileobj.name
else:
fileobj: IO[bytes] = filething # type: ignore
self._name = None
utils.verify_fileobj_readable(fileobj, bytes)
utils.verify_fileobj_seekable(fileobj)
fileobj.seek(0, 0)
file_header = fileobj.read(10)
for header in self.file_headers():
if file_header.startswith(header):
break
else:
raise FileTypeMismatchError('not a NCM file: bad file header')
# 获取加密的主密钥数据
encrypted_masterkey_len = int.from_bytes(fileobj.read(4), 'little')
encrypted_masterkey = bytes(b ^ 0x64 for b in fileobj.read(encrypted_masterkey_len))
masterkey_cipher = StreamedAESWithModeECB(self.core_key())
masterkey = masterkey_cipher.decrypt(encrypted_masterkey)[17:] # 去除密钥开头的 b'neteasecloudmusic'
# 获取加密的标签信息
raw_encrypted_tagdata_len = int.from_bytes(fileobj.read(4), 'little')
tagdata = {}
if skip_tagdata:
fileobj.seek(raw_encrypted_tagdata_len, 1)
else:
raw_encrypted_tagdata = bytes(
b ^ 0x63 for b in fileobj.read(raw_encrypted_tagdata_len)
)
encrypted_tagdata = b64decode(raw_encrypted_tagdata[22:], validate=True) # 在 b64decode 之前,去除原始数据开头的 b"163 key(Don't modify):"
identifier = raw_encrypted_tagdata
tagdata_cipher = StreamedAESWithModeECB(self.meta_key())
tagdata.update(json.loads(tagdata_cipher.decrypt(encrypted_tagdata)[6:])) # 在 JSON 反序列化之前,去除字节串开头的 b'music:'
tagdata['identifier'] = identifier.decode()
fileobj.seek(5, 1)
# 获取封面数据
cover_alloc = int.from_bytes(fileobj.read(4), 'little')
coverdata = b''
if skip_tagdata:
fileobj.seek(cover_alloc, 1)
else:
cover_size = int.from_bytes(fileobj.read(4), 'little')
if cover_size:
coverdata = fileobj.read(cover_size)
fileobj.seek(cover_alloc - cover_size, 1)
# 将以上步骤所得信息,连同加密音频数据设置为属性
self._tagdata = tagdata
self.coverdata = coverdata
self._cipher: NCMRC4Cipher = NCMRC4Cipher(masterkey)
self._raw = BytesIO(fileobj.read())
def save(self,
filething: utils.FileThing | None = None,
**kwargs
) -> None:
"""将当前 NCM 对象保存为一个 NCM 格式文件。
Args:
filething (file): 目标 NCM 文件的路径或文件对象,
留空则尝试使用 ``self.name``;如果两者都为空,
抛出 ``ValueError``
Keyword Args:
tagdata (dict): 向目标文件写入的标签信息;留空则使用 ``self.tagdata``
coverdata (bytes): 向目标文件写入的封面数据;留空则使用 ``self.coverdata``
Raises:
ValueError: 同时缺少参数 ``filething`` 和属性 ``self.name``
所有未知的关键字参数都会被忽略。
"""
tagdata: dict | None = kwargs.get('tagdata', None)
coverdata: bytes | None = kwargs.get('coverdata', None)
if filething:
if utils.is_filepath(filething):
fileobj: IO[bytes] = open(filething, 'wb') # type: ignore
else:
fileobj: IO[bytes] = filething # type: ignore
utils.verify_fileobj_writable(fileobj, bytes)
elif self._name:
fileobj: IO[bytes] = open(self._name, 'wb')
else:
raise ValueError('missing filepath or fileobj')
if tagdata is None:
tagdata = dp(self._tagdata)
else:
tagdata = dp(tagdata)
if coverdata is None:
coverdata = bytes(self.coverdata)
fileobj.seek(0, 0)
fileobj.write(b'CTENFDAM\x00\x00')
# 加密并写入主密钥
masterkey = b'neteasecloudmusic' + self._cipher.key
masterkey_cipher = StreamedAESWithModeECB(self.core_key())
encrypted_masterkey = bytes(b ^ 0x64 for b in masterkey_cipher.encrypt(masterkey))
fileobj.write(len(encrypted_masterkey).to_bytes(4, 'little'))
fileobj.write(encrypted_masterkey)
# 加密并写入标签信息
tagdata.pop('identifier', None)
plain_tagdata = b'music:' + json.dumps(tagdata).encode()
tagdata_cipher = StreamedAESWithModeECB(self.meta_key())
encrypted_tagdata = tagdata_cipher.encrypt(plain_tagdata)
raw_encrypted_tagdata = bytes(b ^ 0x63 for b in b"163 key(Don't modify):" + b64encode(encrypted_tagdata))
fileobj.write(len(raw_encrypted_tagdata).to_bytes(4, 'little'))
fileobj.write(raw_encrypted_tagdata)
fileobj.seek(5, 1)
# 写入封面数据
cover_alloc = len(coverdata)
cover_size = cover_alloc
fileobj.write(cover_alloc.to_bytes(4, 'little'))
fileobj.write(cover_size.to_bytes(4, 'little'))
fileobj.write(coverdata)
# 写入加密的音频数据
self._raw.seek(0, 0)
fileobj.write(self._raw.read())
if utils.is_filepath(filething):
fileobj.close()
@property
def tagdata(self) -> dict:
return self._tagdata
| 1,288
| 131
| 127
|
c5c8b58f25c93553d4b99dc4493692bf12596e3d
| 153
|
py
|
Python
|
unstable_baselines/lib/__init__.py
|
Ending2015a/unstable_baselines
|
1d304115406f6e29186cedb0160811d4139e2733
|
[
"MIT"
] | 10
|
2021-04-26T17:48:27.000Z
|
2022-03-10T14:32:26.000Z
|
unstable_baselines/lib/__init__.py
|
Ending2015a/unstable_baselines
|
1d304115406f6e29186cedb0160811d4139e2733
|
[
"MIT"
] | null | null | null |
unstable_baselines/lib/__init__.py
|
Ending2015a/unstable_baselines
|
1d304115406f6e29186cedb0160811d4139e2733
|
[
"MIT"
] | null | null | null |
from . import base
from . import data
from . import envs
from . import nets
from . import prob
from . import sche
from . import patch
from . import utils
| 19.125
| 19
| 0.745098
|
from . import base
from . import data
from . import envs
from . import nets
from . import prob
from . import sche
from . import patch
from . import utils
| 0
| 0
| 0
|
ead8825c7772e8fc261d2f1de80ea3eefc760d6d
| 759
|
py
|
Python
|
application/configs.py
|
Jasbeauty/mine_blog
|
22899be4db20a92ad40cf35f84a3123dd0ebeee6
|
[
"Apache-2.0"
] | null | null | null |
application/configs.py
|
Jasbeauty/mine_blog
|
22899be4db20a92ad40cf35f84a3123dd0ebeee6
|
[
"Apache-2.0"
] | null | null | null |
application/configs.py
|
Jasbeauty/mine_blog
|
22899be4db20a92ad40cf35f84a3123dd0ebeee6
|
[
"Apache-2.0"
] | null | null | null |
from datetime import timedelta
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:root@127.0.0.1:3306/mine_blog"
SQLALCHEMY_TRACK_MODIFICATIONS = True
JWT_SECRET_KEY = 'jwt_secret'
JWT_AUTH_URL_RULE = '/api/v1/auth'
JWT_EXPIRATION_DELTA = timedelta(seconds=12000)
SYS_UPLOAD_PATH = '/home/laowang/gitwarehouse/mine_blog/application/static/img/'
GITHUB_OAUTH = {
'CLIENT_ID': 'f9fa118d12389497686b',
'CLIENT_SECRET': 'a67149f74ce50c1e95c2d9bdeba7bbd579eb8d45',
'AUTHORIZE_PATH': 'https://github.com/login/oauth/authorize',
'ACCESS_TOKEN_PATH': 'https://github.com/login/oauth/access_token',
'USER_MESSAGE_PATH': 'https://api.github.com/user',
}
TENCENT_OAUTH = {
'secret_id': '',
'secret_key': '',
'region': '',
'bucket': ''
}
| 36.142857
| 80
| 0.73386
|
from datetime import timedelta
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:root@127.0.0.1:3306/mine_blog"
SQLALCHEMY_TRACK_MODIFICATIONS = True
JWT_SECRET_KEY = 'jwt_secret'
JWT_AUTH_URL_RULE = '/api/v1/auth'
JWT_EXPIRATION_DELTA = timedelta(seconds=12000)
SYS_UPLOAD_PATH = '/home/laowang/gitwarehouse/mine_blog/application/static/img/'
GITHUB_OAUTH = {
'CLIENT_ID': 'f9fa118d12389497686b',
'CLIENT_SECRET': 'a67149f74ce50c1e95c2d9bdeba7bbd579eb8d45',
'AUTHORIZE_PATH': 'https://github.com/login/oauth/authorize',
'ACCESS_TOKEN_PATH': 'https://github.com/login/oauth/access_token',
'USER_MESSAGE_PATH': 'https://api.github.com/user',
}
TENCENT_OAUTH = {
'secret_id': '',
'secret_key': '',
'region': '',
'bucket': ''
}
| 0
| 0
| 0
|
4f293f13da8b2b212878611a5435d18eee9f1452
| 387
|
py
|
Python
|
user/migrations/0002_customuser_address.py
|
Zomba4okk/MailApp
|
2d5cb4f26e3abe2b8e70f15ead888c502741c90b
|
[
"MIT"
] | null | null | null |
user/migrations/0002_customuser_address.py
|
Zomba4okk/MailApp
|
2d5cb4f26e3abe2b8e70f15ead888c502741c90b
|
[
"MIT"
] | null | null | null |
user/migrations/0002_customuser_address.py
|
Zomba4okk/MailApp
|
2d5cb4f26e3abe2b8e70f15ead888c502741c90b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2020-12-17 12:05
from django.db import migrations, models
| 20.368421
| 62
| 0.591731
|
# Generated by Django 3.1.4 on 2020-12-17 12:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='address',
field=models.CharField(max_length=100, null=True),
),
]
| 0
| 273
| 23
|
ff0709071d96c95956e90d661b9d512c8f75af12
| 712
|
py
|
Python
|
Intro CS/Py4e/sum_comments.py
|
willian-pessoa/My-codes-in-CS-learning-journey
|
21970780b42435c6be5d5c240b033ff469eaddc5
|
[
"MIT"
] | null | null | null |
Intro CS/Py4e/sum_comments.py
|
willian-pessoa/My-codes-in-CS-learning-journey
|
21970780b42435c6be5d5c240b033ff469eaddc5
|
[
"MIT"
] | null | null | null |
Intro CS/Py4e/sum_comments.py
|
willian-pessoa/My-codes-in-CS-learning-journey
|
21970780b42435c6be5d5c240b033ff469eaddc5
|
[
"MIT"
] | null | null | null |
# To run this, download the BeautifulSoup zip file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
total = 0
count = 0
# Retrieve all of the span tags
tags = soup('span')
for tag in tags:
# Look at the parts of a tag
number = int(tag.contents[0])
total += number
count += 1
print("Sum:", total)
print("Count:", count)
| 24.551724
| 51
| 0.685393
|
# To run this, download the BeautifulSoup zip file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
total = 0
count = 0
# Retrieve all of the span tags
tags = soup('span')
for tag in tags:
# Look at the parts of a tag
number = int(tag.contents[0])
total += number
count += 1
print("Sum:", total)
print("Count:", count)
| 0
| 0
| 0
|
af2dccf910778c1f8687b312a34eabc6f703bd55
| 328
|
py
|
Python
|
app/config.py
|
Mehranalam/Emoji
|
2f6fbc554589f05e599239b48a4ee0ab445071da
|
[
"MIT"
] | 3
|
2022-01-16T05:08:25.000Z
|
2022-02-17T07:39:12.000Z
|
app/config.py
|
Mehranalam/Emoji
|
2f6fbc554589f05e599239b48a4ee0ab445071da
|
[
"MIT"
] | null | null | null |
app/config.py
|
Mehranalam/Emoji
|
2f6fbc554589f05e599239b48a4ee0ab445071da
|
[
"MIT"
] | 1
|
2022-01-22T13:17:33.000Z
|
2022-01-22T13:17:33.000Z
|
"""
Donate to Createor : { just follow me on github
and star to this project }
"""
# complement this fields
| 16.4
| 48
| 0.704268
|
"""
Donate to Createor : { just follow me on github
and star to this project }
"""
class configBot():
GROUP_NAME = "YOUR_GROUP_NAME"
SESSION_NAME = "YOUR_SESSION_NAME"
API_ID = "YOUR_API_ID"
API_HASH = "YOUR_API_HASH"
BOT_TOKEN = "BOT_TOKEN"
WELCOMING_PASSED = "PASSED_MASSEAGE"
# complement this fields
| 0
| 180
| 23
|
b22343d97b25c2391d780a533d3c31468e2929e7
| 1,973
|
py
|
Python
|
PizzaHat/bot.py
|
alexyy802/PizzaHat
|
ab544c9928bebcb3d6fe418c2753edd1d4f4dfc6
|
[
"Apache-2.0"
] | 1
|
2021-12-06T23:38:26.000Z
|
2021-12-06T23:38:26.000Z
|
PizzaHat/bot.py
|
alexyy802/PizzaHat
|
ab544c9928bebcb3d6fe418c2753edd1d4f4dfc6
|
[
"Apache-2.0"
] | null | null | null |
PizzaHat/bot.py
|
alexyy802/PizzaHat
|
ab544c9928bebcb3d6fe418c2753edd1d4f4dfc6
|
[
"Apache-2.0"
] | null | null | null |
import discord
from discord.ext import commands
import asyncpg
import datetime
from dotenv import load_dotenv
import os
load_dotenv('.env')
INITIAL_EXTENSIONS = [
'cogs.dev',
'cogs.events',
'cogs.fun',
'cogs.games',
'cogs.help',
'cogs.image',
'cogs.mod',
'cogs.utility'
]
bot = PizzaHat()
if __name__ == '__main__':
bot.run()
| 31.31746
| 124
| 0.603649
|
import discord
from discord.ext import commands
import asyncpg
import datetime
from dotenv import load_dotenv
import os
load_dotenv('.env')
INITIAL_EXTENSIONS = [
'cogs.dev',
'cogs.events',
'cogs.fun',
'cogs.games',
'cogs.help',
'cogs.image',
'cogs.mod',
'cogs.utility'
]
class PizzaHat(commands.Bot):
def __init__(self):
super().__init__(
command_prefix=commands.when_mentioned_or('p!','P!'),
intents=discord.Intents.all(),
case_insensitive=True,
strip_after_prefix=True,
activity=discord.Activity(type=discord.ActivityType.watching, name='dsc.gg/pizza-invite | discord.gg/WhNVDTF'),
mentions=discord.AllowedMentions(everyone=False, roles=False, users=True, replied_user=True)
)
self._BotBase__cogs = commands.core._CaseInsensitiveDict()
self.yes = '<:yes:813819712953647206>'
self.no = '<:no:829841023445631017>'
self.color = discord.Color.blue()
self.christmas = discord.Color.red()
self.token = os.environ['token']
self.loop.run_until_complete(self.create_db_pool())
for extension in INITIAL_EXTENSIONS:
try:
self.load_extension(extension)
except Exception as e:
print('Failed to load extension {}\n{}: {}'.format(
extension, type(e).__name__, e))
async def on_ready(self):
if not hasattr(self, 'uptime'):
self.uptime = datetime.datetime.utcnow()
print('Bot online')
async def create_db_pool(self):
self.db = await asyncpg.create_pool(database="PizzaHat", user="postgres", password=os.getenv('PG_PASS'))
async def close(self):
await super().close()
def run(self):
super().run(self.token, reconnect=True)
bot = PizzaHat()
if __name__ == '__main__':
bot.run()
| 1,405
| 8
| 172
|
b8fa7a10579f9d489fdc7c485b64a1be798e2fae
| 6,671
|
py
|
Python
|
WaveBlocksND/HagedornBasisEvaluationPhi.py
|
raoulbq/WaveBlocksND
|
225b5dd9b1af1998bd40b5f6467ee959292b6a83
|
[
"BSD-3-Clause"
] | 3
|
2016-09-01T21:13:54.000Z
|
2020-03-23T15:45:32.000Z
|
WaveBlocksND/HagedornBasisEvaluationPhi.py
|
raoulbq/WaveBlocksND
|
225b5dd9b1af1998bd40b5f6467ee959292b6a83
|
[
"BSD-3-Clause"
] | null | null | null |
WaveBlocksND/HagedornBasisEvaluationPhi.py
|
raoulbq/WaveBlocksND
|
225b5dd9b1af1998bd40b5f6467ee959292b6a83
|
[
"BSD-3-Clause"
] | 6
|
2016-03-16T15:22:01.000Z
|
2021-03-13T14:06:54.000Z
|
"""The WaveBlocks Project
The basic common algorithms for evaluation Hagedorn basis functions
of the old kind.
@author: R. Bourquin
@copyright: Copyright (C) 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import complexfloating, dot, vstack, zeros, conjugate
from scipy import sqrt
from scipy.linalg import det, inv
from WaveBlocksND.HagedornBasisEvaluationCommon import HagedornBasisEvaluationCommon
__all__ = ["HagedornBasisEvaluationPhi"]
class HagedornBasisEvaluationPhi(HagedornBasisEvaluationCommon):
r"""
"""
def evaluate_basis_at(self, grid, component, *, prefactor=False):
r"""Evaluate the basis functions :math:`\phi_k` recursively at the given nodes :math:`\gamma`.
:param grid: The grid :math:`\Gamma` containing the nodes :math:`\gamma`.
:type grid: A class having a :py:meth:`get_nodes(...)` method.
:param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate.
:param prefactor: Whether to include a factor of :math:`\frac{1}{\sqrt{\det(Q)}}`.
:type prefactor: Boolean, default is ``False``.
:return: A two-dimensional ndarray :math:`H` of shape :math:`(|\mathfrak{K}_i|, |\Gamma|)` where
the entry :math:`H[\mu(k), i]` is the value of :math:`\phi_k(\gamma_i)`.
"""
D = self._dimension
bas = self._basis_shapes[component]
bs = self._basis_sizes[component]
# The grid
grid = self._grid_wrap(grid)
nodes = grid.get_nodes()
nn = grid.get_number_nodes(overall=True)
# Allocate the storage array
phi = zeros((bs, nn), dtype=complexfloating)
# Precompute some constants
Pi = self.get_parameters(component=component)
q, p, Q, P, _ = Pi
Qinv = inv(Q)
Qbar = conjugate(Q)
QQ = dot(Qinv, Qbar)
# Compute the ground state phi_0 via direct evaluation
mu0 = bas[tuple(D * [0])]
phi[mu0, :] = self._evaluate_phi0(component, nodes, prefactor=False)
# Compute all higher order states phi_k via recursion
for d in range(D):
# Iterator for all valid index vectors k
indices = bas.get_node_iterator(mode="chain", direction=d)
for k in indices:
# Current index vector
ki = vstack(k)
# Access predecessors
phim = zeros((D, nn), dtype=complexfloating)
for j, kpj in bas.get_neighbours(k, selection="backward"):
mukpj = bas[kpj]
phim[j, :] = phi[mukpj, :]
# Compute 3-term recursion
p1 = (nodes - q) * phi[bas[k], :]
p2 = sqrt(ki) * phim
t1 = sqrt(2.0 / self._eps**2) * dot(Qinv[d, :], p1)
t2 = dot(QQ[d, :], p2)
# Find multi-index where to store the result
kped = bas.get_neighbours(k, selection="forward", direction=d)
# Did we find this k?
if len(kped) > 0:
kped = kped[0]
# Store computed value
phi[bas[kped[1]], :] = (t1 - t2) / sqrt(ki[d] + 1.0)
if prefactor is True:
phi = phi / self._get_sqrt(component)(det(Q))
return phi
def slim_recursion(self, grid, component, *, prefactor=False):
r"""Evaluate the Hagedorn wavepacket :math:`\Psi` at the given nodes :math:`\gamma`.
This routine is a slim version compared to the full basis evaluation. At every moment
we store only the data we really need to compute the next step until we hit the highest
order basis functions.
:param grid: The grid :math:`\Gamma` containing the nodes :math:`\gamma`.
:type grid: A class having a :py:meth:`get_nodes(...)` method.
:param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate.
:param prefactor: Whether to include a factor of :math:`\frac{1}{\sqrt{\det(Q)}}`.
:type prefactor: Boolean, default is ``False``.
:return: A list of arrays or a single array containing the values of the :math:`\Phi_i`
at the nodes :math:`\gamma`.
Note that this function does not include the global phase :math:`\exp(\frac{i S}{\varepsilon^2})`.
"""
D = self._dimension
# Precompute some constants
Pi = self.get_parameters(component=component)
q, p, Q, P, _ = Pi
Qinv = inv(Q)
Qbar = conjugate(Q)
QQ = dot(Qinv, Qbar)
# The basis shape
bas = self._basis_shapes[component]
Z = tuple(D * [0])
# Book keeping
todo = []
newtodo = [Z]
olddelete = []
delete = []
tmp = {}
# The grid nodes
grid = self._grid_wrap(grid)
nn = grid.get_number_nodes(overall=True)
nodes = grid.get_nodes()
# Evaluate phi0
tmp[Z] = self._evaluate_phi0(component, nodes, prefactor=False)
psi = self._coefficients[component][bas[Z], 0] * tmp[Z]
# Iterate for higher order states
while len(newtodo) != 0:
# Delete results that never will be used again
for d in olddelete:
del tmp[d]
# Exchange queues
todo = newtodo
newtodo = []
olddelete = delete
delete = []
# Compute new results
for k in todo:
# Center stencil at node k
ki = vstack(k)
# Access predecessors
phim = zeros((D, nn), dtype=complexfloating)
for j, kpj in bas.get_neighbours(k, selection="backward"):
phim[j, :] = tmp[kpj]
# Compute the neighbours
for d, n in bas.get_neighbours(k, selection="forward"):
if n not in tmp.keys():
# Compute 3-term recursion
p1 = (nodes - q) * tmp[k]
p2 = sqrt(ki) * phim
t1 = sqrt(2.0 / self._eps**2) * dot(Qinv[d, :], p1)
t2 = dot(QQ[d, :], p2)
# Store computed value
tmp[n] = (t1 - t2) / sqrt(ki[d] + 1.0)
# And update the result
psi = psi + self._coefficients[component][bas[n], 0] * tmp[n]
newtodo.append(n)
delete.append(k)
if prefactor is True:
psi = psi / self._get_sqrt(component)(det(Q))
return psi
| 35.296296
| 106
| 0.546695
|
"""The WaveBlocks Project
The basic common algorithms for evaluation Hagedorn basis functions
of the old kind.
@author: R. Bourquin
@copyright: Copyright (C) 2016 R. Bourquin
@license: Modified BSD License
"""
from numpy import complexfloating, dot, vstack, zeros, conjugate
from scipy import sqrt
from scipy.linalg import det, inv
from WaveBlocksND.HagedornBasisEvaluationCommon import HagedornBasisEvaluationCommon
__all__ = ["HagedornBasisEvaluationPhi"]
class HagedornBasisEvaluationPhi(HagedornBasisEvaluationCommon):
r"""
"""
def evaluate_basis_at(self, grid, component, *, prefactor=False):
r"""Evaluate the basis functions :math:`\phi_k` recursively at the given nodes :math:`\gamma`.
:param grid: The grid :math:`\Gamma` containing the nodes :math:`\gamma`.
:type grid: A class having a :py:meth:`get_nodes(...)` method.
:param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate.
:param prefactor: Whether to include a factor of :math:`\frac{1}{\sqrt{\det(Q)}}`.
:type prefactor: Boolean, default is ``False``.
:return: A two-dimensional ndarray :math:`H` of shape :math:`(|\mathfrak{K}_i|, |\Gamma|)` where
the entry :math:`H[\mu(k), i]` is the value of :math:`\phi_k(\gamma_i)`.
"""
D = self._dimension
bas = self._basis_shapes[component]
bs = self._basis_sizes[component]
# The grid
grid = self._grid_wrap(grid)
nodes = grid.get_nodes()
nn = grid.get_number_nodes(overall=True)
# Allocate the storage array
phi = zeros((bs, nn), dtype=complexfloating)
# Precompute some constants
Pi = self.get_parameters(component=component)
q, p, Q, P, _ = Pi
Qinv = inv(Q)
Qbar = conjugate(Q)
QQ = dot(Qinv, Qbar)
# Compute the ground state phi_0 via direct evaluation
mu0 = bas[tuple(D * [0])]
phi[mu0, :] = self._evaluate_phi0(component, nodes, prefactor=False)
# Compute all higher order states phi_k via recursion
for d in range(D):
# Iterator for all valid index vectors k
indices = bas.get_node_iterator(mode="chain", direction=d)
for k in indices:
# Current index vector
ki = vstack(k)
# Access predecessors
phim = zeros((D, nn), dtype=complexfloating)
for j, kpj in bas.get_neighbours(k, selection="backward"):
mukpj = bas[kpj]
phim[j, :] = phi[mukpj, :]
# Compute 3-term recursion
p1 = (nodes - q) * phi[bas[k], :]
p2 = sqrt(ki) * phim
t1 = sqrt(2.0 / self._eps**2) * dot(Qinv[d, :], p1)
t2 = dot(QQ[d, :], p2)
# Find multi-index where to store the result
kped = bas.get_neighbours(k, selection="forward", direction=d)
# Did we find this k?
if len(kped) > 0:
kped = kped[0]
# Store computed value
phi[bas[kped[1]], :] = (t1 - t2) / sqrt(ki[d] + 1.0)
if prefactor is True:
phi = phi / self._get_sqrt(component)(det(Q))
return phi
def slim_recursion(self, grid, component, *, prefactor=False):
r"""Evaluate the Hagedorn wavepacket :math:`\Psi` at the given nodes :math:`\gamma`.
This routine is a slim version compared to the full basis evaluation. At every moment
we store only the data we really need to compute the next step until we hit the highest
order basis functions.
:param grid: The grid :math:`\Gamma` containing the nodes :math:`\gamma`.
:type grid: A class having a :py:meth:`get_nodes(...)` method.
:param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate.
:param prefactor: Whether to include a factor of :math:`\frac{1}{\sqrt{\det(Q)}}`.
:type prefactor: Boolean, default is ``False``.
:return: A list of arrays or a single array containing the values of the :math:`\Phi_i`
at the nodes :math:`\gamma`.
Note that this function does not include the global phase :math:`\exp(\frac{i S}{\varepsilon^2})`.
"""
D = self._dimension
# Precompute some constants
Pi = self.get_parameters(component=component)
q, p, Q, P, _ = Pi
Qinv = inv(Q)
Qbar = conjugate(Q)
QQ = dot(Qinv, Qbar)
# The basis shape
bas = self._basis_shapes[component]
Z = tuple(D * [0])
# Book keeping
todo = []
newtodo = [Z]
olddelete = []
delete = []
tmp = {}
# The grid nodes
grid = self._grid_wrap(grid)
nn = grid.get_number_nodes(overall=True)
nodes = grid.get_nodes()
# Evaluate phi0
tmp[Z] = self._evaluate_phi0(component, nodes, prefactor=False)
psi = self._coefficients[component][bas[Z], 0] * tmp[Z]
# Iterate for higher order states
while len(newtodo) != 0:
# Delete results that never will be used again
for d in olddelete:
del tmp[d]
# Exchange queues
todo = newtodo
newtodo = []
olddelete = delete
delete = []
# Compute new results
for k in todo:
# Center stencil at node k
ki = vstack(k)
# Access predecessors
phim = zeros((D, nn), dtype=complexfloating)
for j, kpj in bas.get_neighbours(k, selection="backward"):
phim[j, :] = tmp[kpj]
# Compute the neighbours
for d, n in bas.get_neighbours(k, selection="forward"):
if n not in tmp.keys():
# Compute 3-term recursion
p1 = (nodes - q) * tmp[k]
p2 = sqrt(ki) * phim
t1 = sqrt(2.0 / self._eps**2) * dot(Qinv[d, :], p1)
t2 = dot(QQ[d, :], p2)
# Store computed value
tmp[n] = (t1 - t2) / sqrt(ki[d] + 1.0)
# And update the result
psi = psi + self._coefficients[component][bas[n], 0] * tmp[n]
newtodo.append(n)
delete.append(k)
if prefactor is True:
psi = psi / self._get_sqrt(component)(det(Q))
return psi
| 0
| 0
| 0
|
45da6e5c720cded5d5fb12ac3b7741e6077ab544
| 981
|
py
|
Python
|
server/gestion/models/orderRando.py
|
JetLightStudio/Jet-Gest-stock-management
|
333cbc3dd1b379f53f67250fbd581cbce8e20ca8
|
[
"MIT"
] | 1
|
2021-08-18T18:53:02.000Z
|
2021-08-18T18:53:02.000Z
|
server/gestion/models/orderRando.py
|
JetLightStudio/Jet-Gest-stock-management
|
333cbc3dd1b379f53f67250fbd581cbce8e20ca8
|
[
"MIT"
] | null | null | null |
server/gestion/models/orderRando.py
|
JetLightStudio/Jet-Gest-stock-management
|
333cbc3dd1b379f53f67250fbd581cbce8e20ca8
|
[
"MIT"
] | 1
|
2021-08-04T23:53:52.000Z
|
2021-08-04T23:53:52.000Z
|
from datetime import datetime
from django.db import models
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db.models.signals import post_save
from django.dispatch import receiver
from serverConfig.models.gestionParams import GestionParam
from gestion.models.client import Client
from computedfields.models import ComputedFieldsModel, computed
| 35.035714
| 94
| 0.769623
|
from datetime import datetime
from django.db import models
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db.models.signals import post_save
from django.dispatch import receiver
from serverConfig.models.gestionParams import GestionParam
from gestion.models.client import Client
from computedfields.models import ComputedFieldsModel, computed
class OrderRando(ComputedFieldsModel):
seller = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)
client = models.CharField(max_length=40, blank=False, null=False)
creationDate = models.DateTimeField(default=datetime.now, blank=False)
total = models.FloatField(default=0)
def orderList(self):
from .productRandoOrder import ProductRandoOrder
return ProductRandoOrder.objects.filter(order=self)
def __str__(self):
return "#BONPOUR" + str(self.id).zfill(7)
| 166
| 362
| 24
|
5dfbc25fa139beaf44702988ff3b331c046b4a4f
| 2,913
|
py
|
Python
|
Pong/pong.py
|
Grantlee11/PyPong
|
af5c2cf41b361a9f91b5f3b43fbb5eec9e6a771b
|
[
"MIT"
] | null | null | null |
Pong/pong.py
|
Grantlee11/PyPong
|
af5c2cf41b361a9f91b5f3b43fbb5eec9e6a771b
|
[
"MIT"
] | null | null | null |
Pong/pong.py
|
Grantlee11/PyPong
|
af5c2cf41b361a9f91b5f3b43fbb5eec9e6a771b
|
[
"MIT"
] | null | null | null |
# Simple PyPong in Python 3
import turtle
window = turtle.Screen()
window.title("PyPong by Grantlee11")
window.bgcolor("black")
window.setup(width = 800, height = 600)
window.tracer(0)
# Score
scoreA = 0
scoreB = 0
# Paddle A
paddleA = turtle.Turtle()
paddleA.speed(0)
paddleA.shape("square")
paddleA.color("white")
paddleA.shapesize(stretch_wid = 5, stretch_len = 1)
paddleA.penup()
paddleA.goto(-350, 0)
# Paddle B
paddleB = turtle.Turtle()
paddleB.speed(0)
paddleB.shape("square")
paddleB.color("white")
paddleB.shapesize(stretch_wid = 5, stretch_len = 1)
paddleB.penup()
paddleB.goto(350, 0)
# Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("square")
ball.color("white")
ball.penup()
ball.goto(0, 0)
# These numbers are computer specific, if you use this code you may need to change this on your computer as it controls the speed of the ball
ball.dx = 0.1
ball.dy = 0.1
# Pen
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Player A: 0 Player B: 0", align = "center", font = ("Courier", 24, "normal"))
# Functions
# Keyboard binding
window.listen()
window.onkeypress(paddleAUp, "w")
window.onkeypress(paddleADown, "s")
window.onkeypress(paddleBUp, "Up")
window.onkeypress(paddleBDown, "Down")
# Main game loop
while True:
window.update()
# Move the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Border checking
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
if ball.xcor() > 390:
ball.goto(0, 0)
ball.dx *= -1
scoreA += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(scoreA, scoreB), align = "center", font = ("Courier", 24, "normal"))
if ball.xcor() < -390:
ball.goto(0, 0)
ball.dx *= -1
scoreB += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(scoreA, scoreB), align = "center", font = ("Courier", 24, "normal"))
# Paddle and ball collisions
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddleB.ycor() + 40 and ball.ycor() > paddleB.ycor() - 40):
ball.setx(340)
ball.dx *= -1
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddleA.ycor() + 40 and ball.ycor() > paddleA.ycor() - 40):
ball.setx(-340)
ball.dx *= -1
| 20.370629
| 141
| 0.599725
|
# Simple PyPong in Python 3
import turtle
window = turtle.Screen()
window.title("PyPong by Grantlee11")
window.bgcolor("black")
window.setup(width = 800, height = 600)
window.tracer(0)
# Score
scoreA = 0
scoreB = 0
# Paddle A
paddleA = turtle.Turtle()
paddleA.speed(0)
paddleA.shape("square")
paddleA.color("white")
paddleA.shapesize(stretch_wid = 5, stretch_len = 1)
paddleA.penup()
paddleA.goto(-350, 0)
# Paddle B
paddleB = turtle.Turtle()
paddleB.speed(0)
paddleB.shape("square")
paddleB.color("white")
paddleB.shapesize(stretch_wid = 5, stretch_len = 1)
paddleB.penup()
paddleB.goto(350, 0)
# Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("square")
ball.color("white")
ball.penup()
ball.goto(0, 0)
# These numbers are computer specific, if you use this code you may need to change this on your computer as it controls the speed of the ball
ball.dx = 0.1
ball.dy = 0.1
# Pen
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Player A: 0 Player B: 0", align = "center", font = ("Courier", 24, "normal"))
# Functions
def paddleAUp():
y = paddleA.ycor()
if paddleA.ycor() < 260:
y += 20
paddleA.sety(y)
def paddleADown():
y = paddleA.ycor()
if paddleA.ycor() > -260:
y -= 20
paddleA.sety(y)
def paddleBUp():
y = paddleB.ycor()
if paddleB.ycor() < 260:
y += 20
paddleB.sety(y)
def paddleBDown():
y = paddleB.ycor()
if paddleB.ycor() > -260:
y -= 20
paddleB.sety(y)
# Keyboard binding
window.listen()
window.onkeypress(paddleAUp, "w")
window.onkeypress(paddleADown, "s")
window.onkeypress(paddleBUp, "Up")
window.onkeypress(paddleBDown, "Down")
# Main game loop
while True:
window.update()
# Move the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Border checking
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
if ball.xcor() > 390:
ball.goto(0, 0)
ball.dx *= -1
scoreA += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(scoreA, scoreB), align = "center", font = ("Courier", 24, "normal"))
if ball.xcor() < -390:
ball.goto(0, 0)
ball.dx *= -1
scoreB += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(scoreA, scoreB), align = "center", font = ("Courier", 24, "normal"))
# Paddle and ball collisions
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddleB.ycor() + 40 and ball.ycor() > paddleB.ycor() - 40):
ball.setx(340)
ball.dx *= -1
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddleA.ycor() + 40 and ball.ycor() > paddleA.ycor() - 40):
ball.setx(-340)
ball.dx *= -1
| 338
| 0
| 91
|
ac3d3e180986a17fca6dc21810f6f92611cd25d5
| 3,317
|
py
|
Python
|
tests/test_types.py
|
alexcrichton/wasmtime-py
|
90ba6da0a0c5e8a13bdc28c019330f71067823df
|
[
"Apache-2.0"
] | 4
|
2020-03-25T20:55:29.000Z
|
2020-03-26T08:57:43.000Z
|
tests/test_types.py
|
alexcrichton/wasmtime-py
|
90ba6da0a0c5e8a13bdc28c019330f71067823df
|
[
"Apache-2.0"
] | null | null | null |
tests/test_types.py
|
alexcrichton/wasmtime-py
|
90ba6da0a0c5e8a13bdc28c019330f71067823df
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from wasmtime import *
| 33.505051
| 65
| 0.59391
|
import unittest
from wasmtime import *
class TestTypes(unittest.TestCase):
def test_valtype(self):
i32 = ValType.i32()
i64 = ValType.i64()
f32 = ValType.f32()
f64 = ValType.f64()
anyref = ValType.anyref()
funcref = ValType.funcref()
self.assertEqual(i32, i32)
self.assertNotEqual(i32, f32)
self.assertNotEqual(i32, 1.0)
self.assertEqual(i32, ValType.i32())
self.assertEqual(str(i32), 'i32')
self.assertEqual(str(i64), 'i64')
self.assertEqual(str(f32), 'f32')
self.assertEqual(str(f64), 'f64')
self.assertEqual(str(anyref), 'anyref')
self.assertEqual(str(funcref), 'funcref')
def test_func_type(self):
ty = FuncType([], [])
self.assertEqual([], ty.params())
self.assertEqual([], ty.results())
ty = FuncType([ValType.i32()], [ValType.i64()])
self.assertEqual([ValType.i32()], ty.params())
self.assertEqual([ValType.i64()], ty.results())
self.assertTrue(ty.as_extern().func_type() is not None)
self.assertTrue(ty.as_extern().global_type() is None)
GlobalType(ty.params()[0], True)
def test_global_type(self):
ty = GlobalType(ValType.i32(), True)
self.assertTrue(ty.mutable())
self.assertEqual(ty.content(), ValType.i32())
ty = GlobalType(ValType.i64(), False)
self.assertFalse(ty.mutable())
self.assertEqual(ty.content(), ValType.i64())
self.assertTrue(ty.as_extern().global_type() is not None)
self.assertTrue(ty.as_extern().func_type() is None)
def test_table_type(self):
ty = TableType(ValType.i32(), Limits(1, None))
self.assertEqual(ty.element(), ValType.i32())
self.assertEqual(ty.limits(), Limits(1, None))
ty = TableType(ValType.f32(), Limits(1, 2))
self.assertEqual(ty.element(), ValType.f32())
self.assertEqual(ty.limits(), Limits(1, 2))
self.assertTrue(ty.as_extern().table_type() is not None)
self.assertTrue(ty.as_extern().memory_type() is None)
def test_memory_type(self):
ty = MemoryType(Limits(1, None))
self.assertEqual(ty.limits(), Limits(1, None))
ty = MemoryType(Limits(1, 2))
self.assertEqual(ty.limits(), Limits(1, 2))
self.assertTrue(ty.as_extern().memory_type() is not None)
self.assertTrue(ty.as_extern().table_type() is None)
def test_invalid(self):
with self.assertRaises(TypeError):
MemoryType(1)
with self.assertRaises(TypeError):
TableType(1)
with self.assertRaises(TypeError):
TableType(ValType.i32())
ty = ValType.i32()
TableType(ty, Limits(1, None))
TableType(ty, Limits(1, None))
ty = ValType.i32()
TableType(ty, Limits(1, None))
GlobalType(ty, True)
with self.assertRaises(TypeError):
GlobalType(1, True)
with self.assertRaises(TypeError):
FuncType([1], [])
with self.assertRaises(TypeError):
FuncType([], [2])
ty = ValType.i32()
TableType(ty, Limits(1, None))
FuncType([ty], [])
FuncType([], [ty])
with self.assertRaises(RuntimeError):
ValType()
| 3,078
| 14
| 184
|
929bb8c6c049704512bd15ad70f453b83423c37d
| 2,075
|
py
|
Python
|
examples/moderate/moderate.py
|
dneural/python-nullnude
|
1035d1a23d9d6a7a93b9efac4bdc99c13a8b92f4
|
[
"X11"
] | 1
|
2016-05-31T09:05:38.000Z
|
2016-05-31T09:05:38.000Z
|
examples/moderate/moderate.py
|
dneural/python-nullnude
|
1035d1a23d9d6a7a93b9efac4bdc99c13a8b92f4
|
[
"X11"
] | null | null | null |
examples/moderate/moderate.py
|
dneural/python-nullnude
|
1035d1a23d9d6a7a93b9efac4bdc99c13a8b92f4
|
[
"X11"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015, dNeural.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Except as contained in this notice, the name of dNeural and or its trademarks
# (and among others NullNude) shall not be used in advertising or otherwise to
# promote the sale, use or other dealings in this Software without prior
# written authorization from dNeural.
from nullnude import Nullnude
api_key = 'your_api_key'
api_secret = 'your_api_secret'
images = [
'https://nullnude.com/wp-content/uploads/2016/01/vintage_porn_1.jpg',
'https://nullnude.com/wp-content/uploads/2016/01/vintage_porn_2.jpg',
'https://nullnude.com/wp-content/uploads/2016/01/vintage_porn_3.jpg',
'../bird.jpg'
]
# Create the NullNude SDK interface
nullnude = Nullnude(api_key, api_secret)
# Call the Nullnude servers to check for nudity. You can either pass a public URL or a local path.
for image in images:
output = nullnude.moderate.image(image)
print ('url: %s moderated as: %s' % (output.url, output.moderated_url))
| 42.346939
| 98
| 0.758554
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015, dNeural.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Except as contained in this notice, the name of dNeural and or its trademarks
# (and among others NullNude) shall not be used in advertising or otherwise to
# promote the sale, use or other dealings in this Software without prior
# written authorization from dNeural.
from nullnude import Nullnude
api_key = 'your_api_key'
api_secret = 'your_api_secret'
images = [
'https://nullnude.com/wp-content/uploads/2016/01/vintage_porn_1.jpg',
'https://nullnude.com/wp-content/uploads/2016/01/vintage_porn_2.jpg',
'https://nullnude.com/wp-content/uploads/2016/01/vintage_porn_3.jpg',
'../bird.jpg'
]
# Create the NullNude SDK interface
nullnude = Nullnude(api_key, api_secret)
# Call the Nullnude servers to check for nudity. You can either pass a public URL or a local path.
for image in images:
output = nullnude.moderate.image(image)
print ('url: %s moderated as: %s' % (output.url, output.moderated_url))
| 0
| 0
| 0
|
8c012d95d514c2b11377fbe82f7ae8f1e7c9c04c
| 15,265
|
py
|
Python
|
DQMOffline/Trigger/python/SusyMonitoring_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQMOffline/Trigger/python/SusyMonitoring_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQMOffline/Trigger/python/SusyMonitoring_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.RazorMonitor_cff import *
from DQMOffline.Trigger.VBFSUSYMonitor_cff import *
from DQMOffline.Trigger.LepHTMonitor_cff import *
from DQMOffline.Trigger.susyHLTEleCaloJets_cff import *
from DQMOffline.Trigger.SoftMuHardJetMETSUSYMonitor_cff import *
from DQMOffline.Trigger.TopMonitor_cfi import hltTOPmonitoring
# muon
double_soft_muon_muonpt = hltTOPmonitoring.clone()
double_soft_muon_muonpt.FolderName = cms.string('HLT/SUSY/SOS/Muon/')
# Selections
double_soft_muon_muonpt.nmuons = cms.uint32(2)
double_soft_muon_muonpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_muonpt.HTcut = cms.double(60)
double_soft_muon_muonpt.enableMETPlot = True
double_soft_muon_muonpt.metSelection =cms.string('pt>150')
double_soft_muon_muonpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_muonpt.MHTcut = cms.double(150)
double_soft_muon_muonpt.invMassUppercut = cms.double(50)
double_soft_muon_muonpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_muonpt.histoPSet.muPtBinning =cms.vdouble(0,2,5,7,10,12,15,17,20,25,30,50)
double_soft_muon_muonpt.histoPSet.muPtBinning2D =cms.vdouble(0,2,5,7,10,12,15,17,20,25,30,50)
# Triggers
double_soft_muon_muonpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET50_PFMHT60_v*')
double_soft_muon_muonpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_PFMET140_PFMHT140_v*')
# met
double_soft_muon_metpt = hltTOPmonitoring.clone()
double_soft_muon_metpt.FolderName = cms.string('HLT/SUSY/SOS/MET/')
# Selections
double_soft_muon_metpt.nmuons = cms.uint32(2)
double_soft_muon_metpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_metpt.HTcut = cms.double(60)
double_soft_muon_metpt.muoSelection =cms.string('pt>18 & abs(eta)<2.4')
double_soft_muon_metpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_metpt.MHTcut = cms.double(150)
double_soft_muon_metpt.invMassUppercut = cms.double(50)
double_soft_muon_metpt.invMassLowercut = cms.double(10)
double_soft_muon_metpt.enableMETPlot = True
# Binning
double_soft_muon_metpt.histoPSet.metPSet =cms.PSet(nbins=cms.uint32(50),xmin=cms.double(50),xmax=cms.double(300) )
# Triggers
double_soft_muon_metpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET50_PFMHT60_v*')
double_soft_muon_metpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# inv Mass
double_soft_muon_mll = hltTOPmonitoring.clone()
double_soft_muon_mll.FolderName = cms.string('HLT/SUSY/SOS/Mll/')
# Selections
double_soft_muon_mll.nmuons = cms.uint32(2)
double_soft_muon_mll.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_mll.HTcut = cms.double(60)
double_soft_muon_mll.muoSelection =cms.string('pt>10 & abs(eta)<2.4')
double_soft_muon_mll.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_mll.MHTcut = cms.double(150)
double_soft_muon_mll.enableMETPlot = True
double_soft_muon_mll.metSelection = cms.string('pt>150')
# Binning
double_soft_muon_mll.histoPSet.invMassVariableBinning =cms.vdouble(8,12,15,20,25,30,35,40,45,47,50,60)
# Triggers
double_soft_muon_mll.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET50_PFMHT60_v*')
double_soft_muon_mll.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Dimuon12_Upsilon_eta1p5_v*')
# mht
double_soft_muon_mhtpt = hltTOPmonitoring.clone()
double_soft_muon_mhtpt.FolderName = cms.string('HLT/SUSY/SOS/MHT/')
# Selections
double_soft_muon_mhtpt.nmuons = cms.uint32(2)
double_soft_muon_mhtpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_mhtpt.HTcut = cms.double(60)
double_soft_muon_mhtpt.muoSelection =cms.string('pt>18 & abs(eta)<2.0')
double_soft_muon_mhtpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_mhtpt.enableMETPlot = True
double_soft_muon_mhtpt.metSelection = cms.string('pt>150')
double_soft_muon_mhtpt.invMassUppercut = cms.double(50)
double_soft_muon_mhtpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_mhtpt.histoPSet.MHTVariableBinning =cms.vdouble(50,60,70,80,90,100,110,120,130,150,200,300)
# Triggers
double_soft_muon_mhtpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET50_PFMHT60_v*')
double_soft_muon_mhtpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# backup1, met
double_soft_muon_backup_70_metpt = hltTOPmonitoring.clone()
double_soft_muon_backup_70_metpt.FolderName = cms.string('HLT/SUSY/SOS/backup70/MET/')
# Selections
double_soft_muon_backup_70_metpt.nmuons = cms.uint32(2)
double_soft_muon_backup_70_metpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_70_metpt.HTcut = cms.double(60)
double_soft_muon_backup_70_metpt.muoSelection =cms.string('pt>18 & abs(eta)<2.4')
double_soft_muon_backup_70_metpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_70_metpt.MHTcut = cms.double(150)
double_soft_muon_backup_70_metpt.invMassUppercut = cms.double(50)
double_soft_muon_backup_70_metpt.invMassLowercut = cms.double(10)
double_soft_muon_backup_70_metpt.enableMETPlot = True
# Binning
double_soft_muon_backup_70_metpt.histoPSet.metPSet =cms.PSet(nbins=cms.uint32(50),xmin=cms.double(50),xmax=cms.double(300) )
# Triggers
double_soft_muon_backup_70_metpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET70_PFMHT70_v*')
double_soft_muon_backup_70_metpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# backup1, mht
double_soft_muon_backup_70_mhtpt = hltTOPmonitoring.clone()
double_soft_muon_backup_70_mhtpt.FolderName = cms.string('HLT/SUSY/SOS/backup70/MHT/')
# Selections
double_soft_muon_backup_70_mhtpt.nmuons = cms.uint32(2)
double_soft_muon_backup_70_mhtpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_70_mhtpt.HTcut = cms.double(60)
double_soft_muon_backup_70_mhtpt.muoSelection =cms.string('pt>18 & abs(eta)<2.0')
double_soft_muon_backup_70_mhtpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_70_mhtpt.enableMETPlot = True
double_soft_muon_backup_70_mhtpt.metSelection = cms.string('pt>150')
double_soft_muon_backup_70_mhtpt.invMassUppercut = cms.double(50)
double_soft_muon_backup_70_mhtpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_backup_70_mhtpt.histoPSet.MHTVariableBinning =cms.vdouble(50,60,70,80,90,100,110,120,130,150,200,300)
# Triggers
double_soft_muon_backup_70_mhtpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET70_PFMHT70_v*')
double_soft_muon_backup_70_mhtpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# backup2, met
double_soft_muon_backup_90_metpt = hltTOPmonitoring.clone()
double_soft_muon_backup_90_metpt.FolderName = cms.string('HLT/SUSY/SOS/backup90/MET/')
# Selections
double_soft_muon_backup_90_metpt.nmuons = cms.uint32(2)
double_soft_muon_backup_90_metpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_90_metpt.HTcut = cms.double(60)
double_soft_muon_backup_90_metpt.muoSelection =cms.string('pt>18 & abs(eta)<2.4')
double_soft_muon_backup_90_metpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_90_metpt.MHTcut = cms.double(150)
double_soft_muon_backup_90_metpt.invMassUppercut = cms.double(50)
double_soft_muon_backup_90_metpt.invMassLowercut = cms.double(10)
double_soft_muon_backup_90_metpt.enableMETPlot = True
# Binning
double_soft_muon_backup_90_metpt.histoPSet.metPSet =cms.PSet(nbins=cms.uint32(50),xmin=cms.double(50),xmax=cms.double(300) )
# Triggers
double_soft_muon_backup_90_metpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET90_PFMHT90_v*')
double_soft_muon_backup_90_metpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# backup2, mht
double_soft_muon_backup_90_mhtpt = hltTOPmonitoring.clone()
double_soft_muon_backup_90_mhtpt.FolderName = cms.string('HLT/SUSY/SOS/backup90/MHT/')
# Selections
double_soft_muon_backup_90_mhtpt.nmuons = cms.uint32(2)
double_soft_muon_backup_90_mhtpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_90_mhtpt.HTcut = cms.double(60)
double_soft_muon_backup_90_mhtpt.muoSelection =cms.string('pt>18 & abs(eta)<2.0')
double_soft_muon_backup_90_mhtpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_90_mhtpt.enableMETPlot = True
double_soft_muon_backup_90_mhtpt.metSelection = cms.string('pt>150')
double_soft_muon_backup_90_mhtpt.invMassUppercut = cms.double(50)
double_soft_muon_backup_90_mhtpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_backup_90_mhtpt.histoPSet.MHTVariableBinning =cms.vdouble(50,60,70,80,90,100,110,120,130,150,200,300)
# Triggers
double_soft_muon_backup_90_mhtpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET90_PFMHT90_v*')
double_soft_muon_backup_90_mhtpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# triple muon
triple_muon_mupt = hltTOPmonitoring.clone()
triple_muon_mupt.FolderName = cms.string('HLT/SUSY/SOS/TripleMu/Muon')
# Selections
triple_muon_mupt.nmuons = cms.uint32(3)
triple_muon_mupt.muoSelection =cms.string('isGlobalMuon()')
triple_muon_mupt.invMassUppercut = cms.double(50)
triple_muon_mupt.invMassLowercut = cms.double(10)
triple_muon_mupt.invMassCutInAllMuPairs=cms.bool(True)
# Triggers
triple_muon_mupt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_TripleMu_5_3_3_Mass3p8to60_DZ_v*')
triple_muon_mupt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Trimuon5_3p5_2_Upsilon_Muon_v*')
# triplemu dca
triple_muon_dca_mupt = hltTOPmonitoring.clone()
triple_muon_dca_mupt.FolderName = cms.string('HLT/SUSY/SOS/TripleMu/DCA/Muon')
# Selections
triple_muon_dca_mupt.nmuons = cms.uint32(3)
triple_muon_dca_mupt.muoSelection =cms.string('isGlobalMuon()')
triple_muon_dca_mupt.invMassUppercut = cms.double(50)
triple_muon_dca_mupt.invMassLowercut = cms.double(10)
triple_muon_dca_mupt.invMassCutInAllMuPairs=cms.bool(True)
# Triggers
triple_muon_dca_mupt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_TripleMu_5_3_3_Mass3p8to60_DCA_v*')
triple_muon_dca_mupt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Trimuon5_3p5_2_Upsilon_Muon_v*')
# MuonEG
susyMuEGMonitoring = hltTOPmonitoring.clone()
susyMuEGMonitoring.FolderName = cms.string('HLT/SUSY/MuonEG/')
susyMuEGMonitoring.nmuons = cms.uint32(1)
susyMuEGMonitoring.nphotons = cms.uint32(1)
susyMuEGMonitoring.nelectrons = cms.uint32(0)
susyMuEGMonitoring.njets = cms.uint32(0)
susyMuEGMonitoring.enablePhotonPlot = cms.bool(True)
susyMuEGMonitoring.muoSelection = cms.string('pt>26 & abs(eta)<2.1 & isPFMuon & isGlobalMuon & isTrackerMuon & numberOfMatches>1 & innerTrack.hitPattern.trackerLayersWithMeasurement>5 & innerTrack.hitPattern.numberOfValidPixelHits>0 & globalTrack.hitPattern.numberOfValidMuonHits>0 & globalTrack.normalizedChi2<10 & (pfIsolationR04.sumChargedHadronPt + max(pfIsolationR04.sumNeutralHadronEt + pfIsolationR04.sumPhotonEt - (pfIsolationR04.sumPUPt)/2.,0.) )/pt<0.15')
susyMuEGMonitoring.phoSelection = cms.string('(pt > 30 && abs(eta)<1.4442 && hadTowOverEm<0.0597 && full5x5_sigmaIetaIeta()<0.01031 && chargedHadronIso<1.295 && neutralHadronIso < 5.931+0.0163*pt+0.000014*pt*pt && photonIso < 6.641+0.0034*pt) || (pt > 30 && abs(eta)>1.4442 && hadTowOverEm<0.0481 && full5x5_sigmaIetaIeta()<0.03013 && chargedHadronIso<1.011 && neutralHadronIso < 1.715+0.0163*pt+0.000014*pt*pt && photonIso < 3.863+0.0034*pt)')
susyMuEGMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_Photon30_IsoCaloId*')
susyMuEGMonitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring('')
# muon dca
double_soft_muon_dca_muonpt = hltTOPmonitoring.clone()
double_soft_muon_dca_muonpt.FolderName = cms.string('HLT/SUSY/SOS/DCA/Muon/')
# Selections
double_soft_muon_dca_muonpt.nmuons = cms.uint32(2)
double_soft_muon_dca_muonpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_dca_muonpt.HTcut = cms.double(60)
double_soft_muon_dca_muonpt.enableMETPlot = True
double_soft_muon_dca_muonpt.metSelection =cms.string('pt>150')
double_soft_muon_dca_muonpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_dca_muonpt.MHTcut = cms.double(150)
double_soft_muon_dca_muonpt.invMassUppercut = cms.double(50)
double_soft_muon_dca_muonpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_dca_muonpt.histoPSet.muPtBinning =cms.vdouble(0,2,5,7,10,12,15,17,20,25,30,50)
double_soft_muon_dca_muonpt.histoPSet.muPtBinning2D =cms.vdouble(0,2,5,7,10,12,15,17,20,25,30,50)
# Triggers
double_soft_muon_dca_muonpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DCA_PFMET50_PFMHT60_v*')
double_soft_muon_dca_muonpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_PFMET140_PFMHT140_v*')
# met
double_soft_muon_dca_metpt = hltTOPmonitoring.clone()
double_soft_muon_dca_metpt.FolderName = cms.string('HLT/SUSY/SOS/DCA/MET/')
# Selections
double_soft_muon_dca_metpt.nmuons = cms.uint32(2)
double_soft_muon_dca_metpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_dca_metpt.HTcut = cms.double(60)
double_soft_muon_dca_metpt.muoSelection =cms.string('pt>18 & abs(eta)<2.4')
double_soft_muon_dca_metpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_dca_metpt.MHTcut = cms.double(150)
double_soft_muon_dca_metpt.invMassUppercut = cms.double(50)
double_soft_muon_dca_metpt.invMassLowercut = cms.double(10)
double_soft_muon_dca_metpt.enableMETPlot = True
# Binning
double_soft_muon_dca_metpt.histoPSet.metPSet =cms.PSet(nbins=cms.uint32(50),xmin=cms.double(50),xmax=cms.double(300) )
# Triggers
double_soft_muon_dca_metpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DCA_PFMET50_PFMHT60_v*')
double_soft_muon_dca_metpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_v*')
susyMonitorHLT = cms.Sequence(
susyHLTRazorMonitoring
+ susyHLTVBFMonitoring
+ LepHTMonitor
+ susyHLTEleCaloJets
+ double_soft_muon_muonpt
+ double_soft_muon_metpt
+ double_soft_muon_mhtpt
+ double_soft_muon_mll
+ double_soft_muon_backup_70_metpt
+ double_soft_muon_backup_70_mhtpt
+ double_soft_muon_backup_90_metpt
+ double_soft_muon_backup_90_mhtpt
+ triple_muon_mupt
+ triple_muon_dca_mupt
+ susyMuEGMonitoring
+ double_soft_muon_dca_muonpt
+ double_soft_muon_dca_metpt
+ susyHLTSoftMuHardJetMETMonitoring
)
susHLTDQMSourceExtra = cms.Sequence(
)
| 57.387218
| 468
| 0.797707
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.RazorMonitor_cff import *
from DQMOffline.Trigger.VBFSUSYMonitor_cff import *
from DQMOffline.Trigger.LepHTMonitor_cff import *
from DQMOffline.Trigger.susyHLTEleCaloJets_cff import *
from DQMOffline.Trigger.SoftMuHardJetMETSUSYMonitor_cff import *
from DQMOffline.Trigger.TopMonitor_cfi import hltTOPmonitoring
# muon
double_soft_muon_muonpt = hltTOPmonitoring.clone()
double_soft_muon_muonpt.FolderName = cms.string('HLT/SUSY/SOS/Muon/')
# Selections
double_soft_muon_muonpt.nmuons = cms.uint32(2)
double_soft_muon_muonpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_muonpt.HTcut = cms.double(60)
double_soft_muon_muonpt.enableMETPlot = True
double_soft_muon_muonpt.metSelection =cms.string('pt>150')
double_soft_muon_muonpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_muonpt.MHTcut = cms.double(150)
double_soft_muon_muonpt.invMassUppercut = cms.double(50)
double_soft_muon_muonpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_muonpt.histoPSet.muPtBinning =cms.vdouble(0,2,5,7,10,12,15,17,20,25,30,50)
double_soft_muon_muonpt.histoPSet.muPtBinning2D =cms.vdouble(0,2,5,7,10,12,15,17,20,25,30,50)
# Triggers
double_soft_muon_muonpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET50_PFMHT60_v*')
double_soft_muon_muonpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_PFMET140_PFMHT140_v*')
# met
double_soft_muon_metpt = hltTOPmonitoring.clone()
double_soft_muon_metpt.FolderName = cms.string('HLT/SUSY/SOS/MET/')
# Selections
double_soft_muon_metpt.nmuons = cms.uint32(2)
double_soft_muon_metpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_metpt.HTcut = cms.double(60)
double_soft_muon_metpt.muoSelection =cms.string('pt>18 & abs(eta)<2.4')
double_soft_muon_metpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_metpt.MHTcut = cms.double(150)
double_soft_muon_metpt.invMassUppercut = cms.double(50)
double_soft_muon_metpt.invMassLowercut = cms.double(10)
double_soft_muon_metpt.enableMETPlot = True
# Binning
double_soft_muon_metpt.histoPSet.metPSet =cms.PSet(nbins=cms.uint32(50),xmin=cms.double(50),xmax=cms.double(300) )
# Triggers
double_soft_muon_metpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET50_PFMHT60_v*')
double_soft_muon_metpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# inv Mass
double_soft_muon_mll = hltTOPmonitoring.clone()
double_soft_muon_mll.FolderName = cms.string('HLT/SUSY/SOS/Mll/')
# Selections
double_soft_muon_mll.nmuons = cms.uint32(2)
double_soft_muon_mll.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_mll.HTcut = cms.double(60)
double_soft_muon_mll.muoSelection =cms.string('pt>10 & abs(eta)<2.4')
double_soft_muon_mll.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_mll.MHTcut = cms.double(150)
double_soft_muon_mll.enableMETPlot = True
double_soft_muon_mll.metSelection = cms.string('pt>150')
# Binning
double_soft_muon_mll.histoPSet.invMassVariableBinning =cms.vdouble(8,12,15,20,25,30,35,40,45,47,50,60)
# Triggers
double_soft_muon_mll.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET50_PFMHT60_v*')
double_soft_muon_mll.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Dimuon12_Upsilon_eta1p5_v*')
# mht
double_soft_muon_mhtpt = hltTOPmonitoring.clone()
double_soft_muon_mhtpt.FolderName = cms.string('HLT/SUSY/SOS/MHT/')
# Selections
double_soft_muon_mhtpt.nmuons = cms.uint32(2)
double_soft_muon_mhtpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_mhtpt.HTcut = cms.double(60)
double_soft_muon_mhtpt.muoSelection =cms.string('pt>18 & abs(eta)<2.0')
double_soft_muon_mhtpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_mhtpt.enableMETPlot = True
double_soft_muon_mhtpt.metSelection = cms.string('pt>150')
double_soft_muon_mhtpt.invMassUppercut = cms.double(50)
double_soft_muon_mhtpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_mhtpt.histoPSet.MHTVariableBinning =cms.vdouble(50,60,70,80,90,100,110,120,130,150,200,300)
# Triggers
double_soft_muon_mhtpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET50_PFMHT60_v*')
double_soft_muon_mhtpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# backup1, met
double_soft_muon_backup_70_metpt = hltTOPmonitoring.clone()
double_soft_muon_backup_70_metpt.FolderName = cms.string('HLT/SUSY/SOS/backup70/MET/')
# Selections
double_soft_muon_backup_70_metpt.nmuons = cms.uint32(2)
double_soft_muon_backup_70_metpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_70_metpt.HTcut = cms.double(60)
double_soft_muon_backup_70_metpt.muoSelection =cms.string('pt>18 & abs(eta)<2.4')
double_soft_muon_backup_70_metpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_70_metpt.MHTcut = cms.double(150)
double_soft_muon_backup_70_metpt.invMassUppercut = cms.double(50)
double_soft_muon_backup_70_metpt.invMassLowercut = cms.double(10)
double_soft_muon_backup_70_metpt.enableMETPlot = True
# Binning
double_soft_muon_backup_70_metpt.histoPSet.metPSet =cms.PSet(nbins=cms.uint32(50),xmin=cms.double(50),xmax=cms.double(300) )
# Triggers
double_soft_muon_backup_70_metpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET70_PFMHT70_v*')
double_soft_muon_backup_70_metpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# backup1, mht
double_soft_muon_backup_70_mhtpt = hltTOPmonitoring.clone()
double_soft_muon_backup_70_mhtpt.FolderName = cms.string('HLT/SUSY/SOS/backup70/MHT/')
# Selections
double_soft_muon_backup_70_mhtpt.nmuons = cms.uint32(2)
double_soft_muon_backup_70_mhtpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_70_mhtpt.HTcut = cms.double(60)
double_soft_muon_backup_70_mhtpt.muoSelection =cms.string('pt>18 & abs(eta)<2.0')
double_soft_muon_backup_70_mhtpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_70_mhtpt.enableMETPlot = True
double_soft_muon_backup_70_mhtpt.metSelection = cms.string('pt>150')
double_soft_muon_backup_70_mhtpt.invMassUppercut = cms.double(50)
double_soft_muon_backup_70_mhtpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_backup_70_mhtpt.histoPSet.MHTVariableBinning =cms.vdouble(50,60,70,80,90,100,110,120,130,150,200,300)
# Triggers
double_soft_muon_backup_70_mhtpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET70_PFMHT70_v*')
double_soft_muon_backup_70_mhtpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# backup2, met
double_soft_muon_backup_90_metpt = hltTOPmonitoring.clone()
double_soft_muon_backup_90_metpt.FolderName = cms.string('HLT/SUSY/SOS/backup90/MET/')
# Selections
double_soft_muon_backup_90_metpt.nmuons = cms.uint32(2)
double_soft_muon_backup_90_metpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_90_metpt.HTcut = cms.double(60)
double_soft_muon_backup_90_metpt.muoSelection =cms.string('pt>18 & abs(eta)<2.4')
double_soft_muon_backup_90_metpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_90_metpt.MHTcut = cms.double(150)
double_soft_muon_backup_90_metpt.invMassUppercut = cms.double(50)
double_soft_muon_backup_90_metpt.invMassLowercut = cms.double(10)
double_soft_muon_backup_90_metpt.enableMETPlot = True
# Binning
double_soft_muon_backup_90_metpt.histoPSet.metPSet =cms.PSet(nbins=cms.uint32(50),xmin=cms.double(50),xmax=cms.double(300) )
# Triggers
double_soft_muon_backup_90_metpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET90_PFMHT90_v*')
double_soft_muon_backup_90_metpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# backup2, mht
double_soft_muon_backup_90_mhtpt = hltTOPmonitoring.clone()
double_soft_muon_backup_90_mhtpt.FolderName = cms.string('HLT/SUSY/SOS/backup90/MHT/')
# Selections
double_soft_muon_backup_90_mhtpt.nmuons = cms.uint32(2)
double_soft_muon_backup_90_mhtpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_90_mhtpt.HTcut = cms.double(60)
double_soft_muon_backup_90_mhtpt.muoSelection =cms.string('pt>18 & abs(eta)<2.0')
double_soft_muon_backup_90_mhtpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_backup_90_mhtpt.enableMETPlot = True
double_soft_muon_backup_90_mhtpt.metSelection = cms.string('pt>150')
double_soft_muon_backup_90_mhtpt.invMassUppercut = cms.double(50)
double_soft_muon_backup_90_mhtpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_backup_90_mhtpt.histoPSet.MHTVariableBinning =cms.vdouble(50,60,70,80,90,100,110,120,130,150,200,300)
# Triggers
double_soft_muon_backup_90_mhtpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DZ_PFMET90_PFMHT90_v*')
double_soft_muon_backup_90_mhtpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*')
# triple muon
triple_muon_mupt = hltTOPmonitoring.clone()
triple_muon_mupt.FolderName = cms.string('HLT/SUSY/SOS/TripleMu/Muon')
# Selections
triple_muon_mupt.nmuons = cms.uint32(3)
triple_muon_mupt.muoSelection =cms.string('isGlobalMuon()')
triple_muon_mupt.invMassUppercut = cms.double(50)
triple_muon_mupt.invMassLowercut = cms.double(10)
triple_muon_mupt.invMassCutInAllMuPairs=cms.bool(True)
# Triggers
triple_muon_mupt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_TripleMu_5_3_3_Mass3p8to60_DZ_v*')
triple_muon_mupt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Trimuon5_3p5_2_Upsilon_Muon_v*')
# triplemu dca
triple_muon_dca_mupt = hltTOPmonitoring.clone()
triple_muon_dca_mupt.FolderName = cms.string('HLT/SUSY/SOS/TripleMu/DCA/Muon')
# Selections
triple_muon_dca_mupt.nmuons = cms.uint32(3)
triple_muon_dca_mupt.muoSelection =cms.string('isGlobalMuon()')
triple_muon_dca_mupt.invMassUppercut = cms.double(50)
triple_muon_dca_mupt.invMassLowercut = cms.double(10)
triple_muon_dca_mupt.invMassCutInAllMuPairs=cms.bool(True)
# Triggers
triple_muon_dca_mupt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_TripleMu_5_3_3_Mass3p8to60_DCA_v*')
triple_muon_dca_mupt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Trimuon5_3p5_2_Upsilon_Muon_v*')
# MuonEG
susyMuEGMonitoring = hltTOPmonitoring.clone()
susyMuEGMonitoring.FolderName = cms.string('HLT/SUSY/MuonEG/')
susyMuEGMonitoring.nmuons = cms.uint32(1)
susyMuEGMonitoring.nphotons = cms.uint32(1)
susyMuEGMonitoring.nelectrons = cms.uint32(0)
susyMuEGMonitoring.njets = cms.uint32(0)
susyMuEGMonitoring.enablePhotonPlot = cms.bool(True)
susyMuEGMonitoring.muoSelection = cms.string('pt>26 & abs(eta)<2.1 & isPFMuon & isGlobalMuon & isTrackerMuon & numberOfMatches>1 & innerTrack.hitPattern.trackerLayersWithMeasurement>5 & innerTrack.hitPattern.numberOfValidPixelHits>0 & globalTrack.hitPattern.numberOfValidMuonHits>0 & globalTrack.normalizedChi2<10 & (pfIsolationR04.sumChargedHadronPt + max(pfIsolationR04.sumNeutralHadronEt + pfIsolationR04.sumPhotonEt - (pfIsolationR04.sumPUPt)/2.,0.) )/pt<0.15')
susyMuEGMonitoring.phoSelection = cms.string('(pt > 30 && abs(eta)<1.4442 && hadTowOverEm<0.0597 && full5x5_sigmaIetaIeta()<0.01031 && chargedHadronIso<1.295 && neutralHadronIso < 5.931+0.0163*pt+0.000014*pt*pt && photonIso < 6.641+0.0034*pt) || (pt > 30 && abs(eta)>1.4442 && hadTowOverEm<0.0481 && full5x5_sigmaIetaIeta()<0.03013 && chargedHadronIso<1.011 && neutralHadronIso < 1.715+0.0163*pt+0.000014*pt*pt && photonIso < 3.863+0.0034*pt)')
susyMuEGMonitoring.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_Photon30_IsoCaloId*')
susyMuEGMonitoring.denGenericTriggerEventPSet.hltPaths = cms.vstring('')
# muon dca
double_soft_muon_dca_muonpt = hltTOPmonitoring.clone()
double_soft_muon_dca_muonpt.FolderName = cms.string('HLT/SUSY/SOS/DCA/Muon/')
# Selections
double_soft_muon_dca_muonpt.nmuons = cms.uint32(2)
double_soft_muon_dca_muonpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_dca_muonpt.HTcut = cms.double(60)
double_soft_muon_dca_muonpt.enableMETPlot = True
double_soft_muon_dca_muonpt.metSelection =cms.string('pt>150')
double_soft_muon_dca_muonpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_dca_muonpt.MHTcut = cms.double(150)
double_soft_muon_dca_muonpt.invMassUppercut = cms.double(50)
double_soft_muon_dca_muonpt.invMassLowercut = cms.double(10)
# Binning
double_soft_muon_dca_muonpt.histoPSet.muPtBinning =cms.vdouble(0,2,5,7,10,12,15,17,20,25,30,50)
double_soft_muon_dca_muonpt.histoPSet.muPtBinning2D =cms.vdouble(0,2,5,7,10,12,15,17,20,25,30,50)
# Triggers
double_soft_muon_dca_muonpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DCA_PFMET50_PFMHT60_v*')
double_soft_muon_dca_muonpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_PFMET140_PFMHT140_v*')
# met
double_soft_muon_dca_metpt = hltTOPmonitoring.clone()
double_soft_muon_dca_metpt.FolderName = cms.string('HLT/SUSY/SOS/DCA/MET/')
# Selections
double_soft_muon_dca_metpt.nmuons = cms.uint32(2)
double_soft_muon_dca_metpt.HTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_dca_metpt.HTcut = cms.double(60)
double_soft_muon_dca_metpt.muoSelection =cms.string('pt>18 & abs(eta)<2.4')
double_soft_muon_dca_metpt.MHTdefinition = cms.string('pt>30 & abs(eta)<2.4')
double_soft_muon_dca_metpt.MHTcut = cms.double(150)
double_soft_muon_dca_metpt.invMassUppercut = cms.double(50)
double_soft_muon_dca_metpt.invMassLowercut = cms.double(10)
double_soft_muon_dca_metpt.enableMETPlot = True
# Binning
double_soft_muon_dca_metpt.histoPSet.metPSet =cms.PSet(nbins=cms.uint32(50),xmin=cms.double(50),xmax=cms.double(300) )
# Triggers
double_soft_muon_dca_metpt.numGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_DoubleMu3_DCA_PFMET50_PFMHT60_v*')
double_soft_muon_dca_metpt.denGenericTriggerEventPSet.hltPaths = cms.vstring('HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_v*')
susyMonitorHLT = cms.Sequence(
susyHLTRazorMonitoring
+ susyHLTVBFMonitoring
+ LepHTMonitor
+ susyHLTEleCaloJets
+ double_soft_muon_muonpt
+ double_soft_muon_metpt
+ double_soft_muon_mhtpt
+ double_soft_muon_mll
+ double_soft_muon_backup_70_metpt
+ double_soft_muon_backup_70_mhtpt
+ double_soft_muon_backup_90_metpt
+ double_soft_muon_backup_90_mhtpt
+ triple_muon_mupt
+ triple_muon_dca_mupt
+ susyMuEGMonitoring
+ double_soft_muon_dca_muonpt
+ double_soft_muon_dca_metpt
+ susyHLTSoftMuHardJetMETMonitoring
)
susHLTDQMSourceExtra = cms.Sequence(
)
| 0
| 0
| 0
|
71bd3b77d216fb6bd6e19a6e951bade7f6e13e2c
| 7,401
|
py
|
Python
|
prototype/src/frontend/app.py
|
nmilovanovic/rep
|
70a7853f54250d26eb14be86643c3b3d5ed233c4
|
[
"MIT"
] | null | null | null |
prototype/src/frontend/app.py
|
nmilovanovic/rep
|
70a7853f54250d26eb14be86643c3b3d5ed233c4
|
[
"MIT"
] | null | null | null |
prototype/src/frontend/app.py
|
nmilovanovic/rep
|
70a7853f54250d26eb14be86643c3b3d5ed233c4
|
[
"MIT"
] | null | null | null |
from flask import Flask, redirect, url_for, request, render_template, make_response, session, send_from_directory, send_file
import uuid
import datetime
import io
import grpc
import prototype_pb2_grpc
import prototype_pb2
import base64
import json
from prometheus_client import Counter, start_http_server
import os
app = Flask(__name__)
app.config["DEBUG"] = False
REQUESTS = Counter('http_requests_total', 'Total number of requests to this API.')
PICTURES_CONN_STRING = os.environ['PICTURES']
SPECS_CONN_STRING = os.environ['SPECS']
CART_CONN_STRING = os.environ['CART']
CHECKOUT_CONN_STRING = os.environ['CHECKOUT']
@app.route('/')
@app.route('/addproduct', methods = ['POST'])
@app.route('/addtocart',methods = ['POST'])
@app.route('/listcart',methods = ['GET'])
@app.route('/checkoutcart',methods = ['GET'])
@app.route('/listcheckouts', methods = ['GET'])
@app.route('/addproduct',methods = ['GET'])
@app.route('/listproducts', methods = ['GET'])
@app.route('/checkout', methods = ['GET'])
@app.route('/logout/')
@app.route('/images/<productid>')
if __name__ == '__main__':
print(PICTURES_CONN_STRING)
print(SPECS_CONN_STRING)
print(CART_CONN_STRING)
print(CHECKOUT_CONN_STRING)
start_http_server(8000)
app.run('0.0.0.0', port=80)
| 38.149485
| 124
| 0.706391
|
from flask import Flask, redirect, url_for, request, render_template, make_response, session, send_from_directory, send_file
import uuid
import datetime
import io
import grpc
import prototype_pb2_grpc
import prototype_pb2
import base64
import json
from prometheus_client import Counter, start_http_server
import os
app = Flask(__name__)
app.config["DEBUG"] = False
REQUESTS = Counter('http_requests_total', 'Total number of requests to this API.')
PICTURES_CONN_STRING = os.environ['PICTURES']
SPECS_CONN_STRING = os.environ['SPECS']
CART_CONN_STRING = os.environ['CART']
CHECKOUT_CONN_STRING = os.environ['CHECKOUT']
@app.route('/')
def index():
REQUESTS.inc()
if not 'userid' in request.cookies:
userid = uuid.uuid4().hex
else:
userid = request.cookies['userid']
response = make_response(render_template('index.html', id=userid))
expires = datetime.datetime.now() + datetime.timedelta(days=90)
response.set_cookie('userid', userid, expires=expires)
return response
@app.route('/addproduct', methods = ['POST'])
def add_product_post():
REQUESTS.inc()
if not 'userid' in request.cookies:
return redirect(url_for('/'))
f = request.files['image']
image_contents = f.read()
product_dict = dict()
productid = uuid.uuid4().hex
product_dict['productid'] = productid
product_dict['name'] = request.form['name']
product_dict['description'] = request.form['description']
with grpc.insecure_channel(SPECS_CONN_STRING) as channel:
stub = prototype_pb2_grpc.SpecsStub(channel)
req = prototype_pb2.SpecsRequest(content=json.dumps(product_dict))
stub.AddProduct(req)
with grpc.insecure_channel(PICTURES_CONN_STRING) as channel:
stub = prototype_pb2_grpc.PicturesStub(channel)
req = prototype_pb2.AddPictureRequest(productid=productid, content=image_contents)
stub.AddPicture(req)
return 'Product added successfully'
@app.route('/addtocart',methods = ['POST'])
def add_to_cart():
REQUESTS.inc()
if not 'userid' in request.cookies:
return redirect(url_for('/'))
productid = request.form['productid']
userid = request.cookies['userid']
with grpc.insecure_channel(CART_CONN_STRING) as channel:
stub = prototype_pb2_grpc.CartStub(channel)
req = prototype_pb2.CartRequest(userid=userid, productid=productid)
stub.AddProduct(req)
return 'Product successfully added to the cart!'
@app.route('/listcart',methods = ['GET'])
def list_cart():
REQUESTS.inc()
if not 'userid' in request.cookies:
return redirect(url_for('/'))
response = None
userid = request.cookies['userid']
with grpc.insecure_channel(CART_CONN_STRING) as channel:
stub = prototype_pb2_grpc.CartStub(channel)
req = prototype_pb2.CartRequest(userid=userid)
response = stub.ListProducts(req)
products_dict = json.loads(response.content)
products_list = products_dict['list']
products = dict()
for product in products_list:
with grpc.insecure_channel(SPECS_CONN_STRING) as channel:
stub = prototype_pb2_grpc.SpecsStub(channel)
req = prototype_pb2.CartRequest(content=product)
response = stub.GetProduct(req)
response = json.loads(response.content)
products[response['productid']] = response
return render_template('listcart.html', userid=userid, products_dict=products)
@app.route('/checkoutcart',methods = ['GET'])
def checkout_cart():
REQUESTS.inc()
if not 'userid' in request.cookies:
return redirect(url_for('/'))
response = None
userid = request.cookies['userid']
with grpc.insecure_channel(CART_CONN_STRING) as channel:
stub = prototype_pb2_grpc.CartStub(channel)
req = prototype_pb2.CartRequest(userid=userid)
response = stub.CheckoutCart(req)
print(response.content)
with grpc.insecure_channel(CHECKOUT_CONN_STRING) as channel:
stub = prototype_pb2_grpc.CheckoutStub(channel)
req = prototype_pb2.CheckoutRequest(userid=userid, content=response.content)
response = stub.AddCart(req)
return 'You have successfully checked out your cart!'
@app.route('/listcheckouts', methods = ['GET'])
def list_checkouts():
REQUESTS.inc()
userid = request.cookies['userid']
if not 'userid' in request.cookies:
return redirect(url_for('/'))
response = None
with grpc.insecure_channel(CHECKOUT_CONN_STRING) as channel:
stub = prototype_pb2_grpc.CheckoutStub(channel)
req = prototype_pb2.CheckoutRequest()
response = stub.ListCheckouts(req)
print(response.content)
return render_template('listcheckouts.html', userid=userid, checkouts_dict=json.loads(response.content))
@app.route('/addproduct',methods = ['GET'])
def add_product_get():
REQUESTS.inc()
if not 'userid' in request.cookies:
return redirect(url_for('/'))
return render_template('addproduct.html')
@app.route('/listproducts', methods = ['GET'])
def list_products():
REQUESTS.inc()
if not 'userid' in request.cookies:
return redirect(url_for('/'))
userid = request.cookies['userid']
response = None
with grpc.insecure_channel(SPECS_CONN_STRING) as channel:
stub = prototype_pb2_grpc.SpecsStub(channel)
req = prototype_pb2.SpecsRequest()
response = stub.ListProducts(req)
products_dict = json.loads(response.content)
return render_template('listproducts.html', userid=userid, products_dict=products_dict)
@app.route('/checkout', methods = ['GET'])
def checkout():
REQUESTS.inc()
if not 'userid' in request.cookies:
return redirect(url_for('/'))
userid = request.cookies['userid']
response = None
with grpc.insecure_channel(CART_CONN_STRING) as channel:
stub = prototype_pb2_grpc.CartStub(channel)
req = prototype_pb2.CartRequest(userid=userid)
response = stub.ListProducts(req)
with grpc.insecure_channel(CART_CONN_STRING) as channel:
stub = prototype_pb2_grpc.CartStub(channel)
req = prototype_pb2.CartRequest(userid=userid)
stub.CheckoutCart(req)
with grpc.insecure_channel(CHECKOUT_CONN_STRING) as channel:
stub = prototype_pb2_grpc.CheckoutStub(channel)
req = prototype_pb2.CheckoutRequest(content=response.content)
response = stub.AddCart(req)
return 'You have successfully checked out your cart!'
@app.route('/logout/')
def logout():
REQUESTS.inc()
if not 'userid' in request.cookies:
return redirect(url_for('/'))
response = make_response(render_template('logout.html'))
response.set_cookie('userid', '', expires=0)
return response
@app.route('/images/<productid>')
def images(productid):
REQUESTS.inc()
response = None
with grpc.insecure_channel(PICTURES_CONN_STRING) as channel:
stub = prototype_pb2_grpc.PicturesStub(channel)
request = prototype_pb2.PictureRequest(productid=productid)
response = stub.GetPicture(request)
response = make_response(send_file(io.BytesIO(response.content), mimetype='image/jpg'))
return response
if __name__ == '__main__':
print(PICTURES_CONN_STRING)
print(SPECS_CONN_STRING)
print(CART_CONN_STRING)
print(CHECKOUT_CONN_STRING)
start_http_server(8000)
app.run('0.0.0.0', port=80)
| 5,885
| 0
| 243
|
dc56af512a538f9b748aec086cc85450f71fbc47
| 794
|
py
|
Python
|
modules/help.py
|
cernymichal/go2
|
82869e12a4fed4f9a61b55b81b3453a14fa42d5d
|
[
"Unlicense"
] | null | null | null |
modules/help.py
|
cernymichal/go2
|
82869e12a4fed4f9a61b55b81b3453a14fa42d5d
|
[
"Unlicense"
] | null | null | null |
modules/help.py
|
cernymichal/go2
|
82869e12a4fed4f9a61b55b81b3453a14fa42d5d
|
[
"Unlicense"
] | null | null | null |
import asyncio
import json
import os
from threading import Timer
name = "Help"
description = """
Module for displaying help
modules: prints all modules
help *module*: prints help for a Module
"""
metadata = {}
functions = {
"modules": Modules,
"help": Help
}
| 20.358974
| 87
| 0.671285
|
import asyncio
import json
import os
from threading import Timer
name = "Help"
description = """
Module for displaying help
modules: prints all modules
help *module*: prints help for a Module
"""
metadata = {}
async def OnReady(client, modules):
global metadata
for module in modules:
key = module.name if hasattr(module, "name") else module.__name__
metadata[key] = {
"description": module.description if hasattr(module, "description") else ""
}
async def Modules(client, message):
await message.channel.send(", ".join(metadata.keys()))
async def Help(client, message):
module = " ".join(message.content[1:])
await message.channel.send(metadata[module]["description"])
functions = {
"modules": Modules,
"help": Help
}
| 450
| 0
| 69
|
31ff2f7c4e2d996eca1f2ec08a6bd9a3b13e6416
| 152
|
py
|
Python
|
fiaas_deploy_daemon/fake_consumer/fake_consumer.py
|
j-boivie/fiaas-deploy-daemon
|
3bba04b3b329f4c9c1418d80a9a007e8ed22f804
|
[
"Apache-2.0"
] | null | null | null |
fiaas_deploy_daemon/fake_consumer/fake_consumer.py
|
j-boivie/fiaas-deploy-daemon
|
3bba04b3b329f4c9c1418d80a9a007e8ed22f804
|
[
"Apache-2.0"
] | null | null | null |
fiaas_deploy_daemon/fake_consumer/fake_consumer.py
|
j-boivie/fiaas-deploy-daemon
|
3bba04b3b329f4c9c1418d80a9a007e8ed22f804
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8
| 12.666667
| 27
| 0.585526
|
#!/usr/bin/env python
# -*- coding: utf-8
class FakeConsumer(object):
def start(self):
pass
def is_alive(self):
return True
| 26
| 6
| 77
|
1d298b1988affcb7a833af9bfcb9056a30c05653
| 1,795
|
py
|
Python
|
windy_gridworld.py
|
CurtisChris7/GridwoRLd
|
17e718314074e28ed87820056099b0060b191b7b
|
[
"MIT"
] | null | null | null |
windy_gridworld.py
|
CurtisChris7/GridwoRLd
|
17e718314074e28ed87820056099b0060b191b7b
|
[
"MIT"
] | null | null | null |
windy_gridworld.py
|
CurtisChris7/GridwoRLd
|
17e718314074e28ed87820056099b0060b191b7b
|
[
"MIT"
] | null | null | null |
from absgridworld import AbsGridworld
class WindyGridworld(AbsGridworld):
"""Class representing a windy girdworld"""
def __init__(self, schema: list, actions: list, stepPenalty: float, columnToWindMap: dict) -> None:
"""
Description
----------
Constructor used for processing the schema into the internal gridworld
representation that all concrete gridworld implementations would use.
Parameters
----------
schema : list
The schema used
actions : list
A list of all possible actions that can be taken
stepPenalty : float
The penalty for taking an action
columnToWindMap : dict
Maps the column to the wind value to be experienced
"""
super().__init__(schema, actions)
self.stepPenalty = stepPenalty
self.columnToWindMap = columnToWindMap
"""OVERLOADED METHODS"""
| 29.42623
| 103
| 0.585515
|
from absgridworld import AbsGridworld
class WindyGridworld(AbsGridworld):
"""Class representing a windy girdworld"""
def __init__(self, schema: list, actions: list, stepPenalty: float, columnToWindMap: dict) -> None:
"""
Description
----------
Constructor used for processing the schema into the internal gridworld
representation that all concrete gridworld implementations would use.
Parameters
----------
schema : list
The schema used
actions : list
A list of all possible actions that can be taken
stepPenalty : float
The penalty for taking an action
columnToWindMap : dict
Maps the column to the wind value to be experienced
"""
super().__init__(schema, actions)
self.stepPenalty = stepPenalty
self.columnToWindMap = columnToWindMap
"""OVERLOADED METHODS"""
def getStates(self) -> list:
return [(row, col) for row in range(self.rowCount) for col in range(self.colCount)]
def getAvailableActions(self, state) -> list:
row = state[0]
col = state[1]
actions = []
for a in self.getActions():
newRow = row + a[0]
newCol = col + a[1]
if newRow < 0 or newRow >= self.rowCount or newCol < 0 or newCol >= self.colCount:
pass
else:
actions.append(a)
return actions
def step(self, state, action):
row = state[0]
col = state[1]
newRow = max(row + action[0] - self.columnToWindMap[col], 0)
newCol = col + action[1]
return (newRow, newCol)
def getRewardFromAction(self, state, action) -> float:
return self.stepPenalty
| 737
| 0
| 108
|
eec6bebfa984569eb78f8114c1b0fb5d42b088c0
| 485
|
py
|
Python
|
MahaPolApp/models.py
|
safir72347/AngularJs-Django-Form-Validation
|
b86caa4b27e20e3ded90481a9bf256fcdce82a9b
|
[
"MIT"
] | null | null | null |
MahaPolApp/models.py
|
safir72347/AngularJs-Django-Form-Validation
|
b86caa4b27e20e3ded90481a9bf256fcdce82a9b
|
[
"MIT"
] | null | null | null |
MahaPolApp/models.py
|
safir72347/AngularJs-Django-Form-Validation
|
b86caa4b27e20e3ded90481a9bf256fcdce82a9b
|
[
"MIT"
] | 1
|
2021-07-29T17:03:35.000Z
|
2021-07-29T17:03:35.000Z
|
from django.db import models
# Create your models here.
| 34.642857
| 45
| 0.793814
|
from django.db import models
# Create your models here.
class register_data(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
dob = models.CharField(max_length=30)
mobile = models.CharField(max_length=20)
address = models.CharField(max_length=150)
user_name = models.CharField(max_length=30)
email = models.CharField(max_length=30)
password = models.CharField(max_length=100)
created_on = models.CharField(max_length=30)
| 0
| 406
| 23
|
6e013e327003dbb74a3af18c948acf50a8373ca5
| 7,215
|
py
|
Python
|
src/api/datahub/access/tests/modules/access/conftest.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/api/datahub/access/tests/modules/access/conftest.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/api/datahub/access/tests/modules/access/conftest.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import pytest
from datahub.access.tests import db_helper
@pytest.fixture(scope="session")
def django_db_setup():
"""Avoid creating/setting up the test database"""
pass
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 29.44898
| 111
| 0.585031
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import pytest
from datahub.access.tests import db_helper
@pytest.fixture(scope="session")
def django_db_setup():
"""Avoid creating/setting up the test database"""
pass
@pytest.fixture
def test_data_id():
data_id = 3000
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.insert(
cur,
"access_raw_data",
id=data_id,
bk_biz_id=2,
raw_data_name="fixture_data1",
raw_data_alias="fixture_data1_alias",
sensitivity="private",
data_source="server",
data_encoding="UTF-8",
data_category="",
data_scenario="log",
bk_app_code="bk_data",
storage_channel_id=0,
created_by="admin",
created_at="2019-01-01 00:00:00",
updated_by="admin",
updated_at="2019-01-01 00:00:00",
description="description",
maintainer="admin1,admin2,admin3",
)
try:
yield data_id
finally:
delete_data_id(data_id)
@pytest.fixture
def test_encode():
encode_id = 1
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.insert(
cur,
"encoding_config",
id=encode_id,
encoding_name="UTF8",
encoding_alias="UTF8",
created_at="2018-10-23T12:02:18",
updated_at="2018-10-23T12:02:18",
created_by="admin",
updated_by="admin",
active=1,
description="",
)
try:
yield encode_id
finally:
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(cur, "DELETE FROM encoding_config where id = 1")
@pytest.fixture
def test_field_type():
field_type = "double"
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.insert(
cur,
"field_type_config",
field_type=field_type,
field_type_name="double",
field_type_alias="浮点型",
updated_at="2021-05-28 12:02:18",
created_at="2021-05-28 12:02:18",
created_by="admin",
updated_by="admin",
active=1,
description="",
)
try:
yield
finally:
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(cur, "DELETE FROM field_type_config")
@pytest.fixture
def test_time_format():
time_format_id = 111
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.insert(
cur,
"time_format_config",
id=time_format_id,
time_format_name="yyyyMMdd",
time_format_alias="yyyyMMdd",
time_format_example="20191010",
timestamp_len=8,
format_unit="y,h,m",
created_at="2018-10-23T12:02:18",
updated_at="2018-10-23T12:02:18",
created_by="admin",
updated_by="admin",
active=1,
description="",
)
try:
yield time_format_id
finally:
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(cur, "DELETE FROM time_format_config where id = 111")
@pytest.fixture
def test_scenario():
scenario_id = 111
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.insert(
cur,
"access_scenario_config",
id=scenario_id,
data_scenario_name="log",
data_scenario_alias="log",
created_at="2018-10-23T12:02:18",
updated_at="2018-10-23T12:02:18",
created_by="admin",
updated_by="admin",
active=1,
description="",
)
try:
yield scenario_id
finally:
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(cur, "DELETE FROM access_scenario_config where id = 111")
@pytest.fixture
def test_category():
category_id = 11
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.insert(
cur,
"data_category_config",
id=category_id,
data_category_alias="",
updated_by="admin",
data_category_name="",
created_at="2018-10-23T12:02:18",
updated_at="2018-10-23T12:02:18",
created_by="admin",
active=1,
description="",
)
try:
yield category_id
finally:
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(
cur,
"""
DELETE FROM data_category_config WHERE id = %(category_id)s
""",
category_id=category_id,
)
@pytest.fixture
def test_oper_log():
oper_log_id = 11
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.insert(
cur,
"access_operation_log",
id=oper_log_id,
raw_data_id=123,
updated_by="admin",
args='{"test":"111"}',
status="success",
created_at="2018-10-23T12:02:18",
updated_at="2018-10-23T12:02:18",
created_by="admin",
description="",
)
try:
yield oper_log_id
finally:
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(
cur,
"""
DELETE FROM access_operation_log WHERE id = 11""",
)
def delete_data_id(data_id):
with db_helper.open_cursor("mapleleaf") as cur:
db_helper.execute(
cur,
"""
DELETE FROM access_raw_data WHERE id = %(data_id)s
""",
data_id=data_id,
)
| 5,325
| 0
| 177
|
184cf49ab5aa52ae09e6f247d56792442b3ec425
| 1,423
|
py
|
Python
|
GUI Application/main.py
|
vladBaciu/PPG-EduKit
|
2959ca884a2df84ea0d9993658c89d7fdc3452e7
|
[
"BSD-2-Clause"
] | 1
|
2022-03-21T12:53:51.000Z
|
2022-03-21T12:53:51.000Z
|
GUI Application/main.py
|
vladBaciu/PPG-EduKit
|
2959ca884a2df84ea0d9993658c89d7fdc3452e7
|
[
"BSD-2-Clause"
] | null | null | null |
GUI Application/main.py
|
vladBaciu/PPG-EduKit
|
2959ca884a2df84ea0d9993658c89d7fdc3452e7
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 17:47:46 2021
@author: vlad-eusebiu.baciu@vub.be
"""
import pyqtgraph as pg
import sys
import logo_qrc
import ctypes
from PyQt5 import QtWidgets, uic
from pyqtgraph import PlotWidget
from application1 import App1_Gui
from application2 import App2_Gui
if __name__ == '__main__':
main()
| 25.410714
| 106
| 0.64863
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 17:47:46 2021
@author: vlad-eusebiu.baciu@vub.be
"""
import pyqtgraph as pg
import sys
import logo_qrc
import ctypes
from PyQt5 import QtWidgets, uic
from pyqtgraph import PlotWidget
from application1 import App1_Gui
from application2 import App2_Gui
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
#Load the UI Page
uic.loadUi('app_entry.ui', self)
self.setWindowTitle("PPG EduKit Demos")
self.app1_button.clicked.connect(self.open_app1)
self.app2_button.clicked.connect(self.open_app2)
self.show_pdf_list.triggered.connect(self.open_app_pdf)
self.show_info_box.triggered.connect(self.show_info)
def open_app_pdf(self):
print("open_app_pdf")
def show_info(self):
print("show_info")
ctypes.windll.user32.MessageBoxW(1, "Bla bla bla text text text", "About PPG EduKit platform", 64)
def open_app1(self):
self.main = App1_Gui()
self.main.show()
self.close()
def open_app2(self):
self.main = App2_Gui()
self.main.show()
self.close()
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 852
| 19
| 209
|
fedd4ce27f3dfcab21b697c24ca2b7461be665eb
| 1,968
|
py
|
Python
|
bin_packing.py
|
kappapidelta/Apress-AI
|
6faf15f18c840c95eafce8f3ee5a41065c590019
|
[
"MIT"
] | 66
|
2018-03-26T15:08:28.000Z
|
2022-03-17T13:07:13.000Z
|
bin_packing.py
|
kappapidelta/Apress-AI
|
6faf15f18c840c95eafce8f3ee5a41065c590019
|
[
"MIT"
] | 1
|
2022-01-13T06:06:17.000Z
|
2022-01-13T06:06:17.000Z
|
bin_packing.py
|
kappapidelta/Apress-AI
|
6faf15f18c840c95eafce8f3ee5a41065c590019
|
[
"MIT"
] | 63
|
2018-03-06T08:47:11.000Z
|
2022-03-25T16:35:26.000Z
|
from random import randint,uniform
from math import ceil
from my_or_tools import newSolver, ObjVal, SolVal
| 31.741935
| 65
| 0.524898
|
from random import randint,uniform
from math import ceil
def gen_data(n):
R,T=[],0
for i in range(n):
RR=[randint(6,10),randint(200,500)]
T+=RR[0]*RR[1]
R.append(RR)
return R,randint(1200, 1500)
from my_or_tools import newSolver, ObjVal, SolVal
def solve_model(D,W,symmetry_break=False,knapsack=True):
s = newSolver('Bin Packing',True)
nbC,nbP = len(D),sum([P[0] for P in D])
w = [e for sub in [[d[1]]*d[0] for d in D] for e in sub]
nbT,nbTmin = bound_trucks(w,W)
x = [[[s.IntVar(0,1,'') for _ in range(nbT)] \
for _ in range(d[0])] for d in D]
y = [s.IntVar(0,1,'') for _ in range(nbT)]
for k in range(nbT):
sxk = sum(D[i][1]*x[i][j][k] \
for i in range(nbC) for j in range(D[i][0]))
s.Add(sxk <= W*y[k])
for i in range(nbC):
for j in range(D[i][0]):
s.Add(sum([x[i][j][k] for k in range(nbT)]) == 1)
if symmetry_break:
for k in range(nbT-1):
s.Add(y[k] >= y[k+1])
for i in range(nbC):
for j in range(D[i][0]):
for k in range(nbT):
for jj in range(max(0,j-1),j):
s.Add(sum(x[i][jj][kk] \
for kk in range(k+1)) >= x[i][j][k])
for jj in range(j+1,min(j+2,D[i][0])):
s.Add(sum(x[i][jj][kk] \
for kk in range(k,nbT))>=x[i][j][k])
if knapsack:
s.Add(sum(W*y[i] for i in range(nbT)) >= sum(w))
s.Add(sum(y[k] for k in range(nbT)) >= nbTmin)
s.Minimize(sum(y[k] for k in range(nbT)))
rc = s.Solve()
P2T=[[D[i][1], [k for j in range(D[i][0]) for k in range(nbT)
if SolVal(x[i][j][k])>0]] for i in range(nbC) ]
T2P=[[k, [(i,j,D[i][1]) \
for i in range(nbC) for j in range(D[i][0])\
if SolVal(x[i][j][k])>0]] for k in range(nbT)]
return rc,ObjVal(s),P2T,T2P
def bound_trucks(w,W):
nb,tot = 1,0
for i in range(len(w)):
if tot+w[i] < W:
tot += w[i]
else:
tot = w[i]
nb = nb+1
return nb,ceil(sum(w)/W)
| 1,791
| 0
| 68
|
c0e909dbb046be66edd190e071e7dd8efbfcda7a
| 858
|
py
|
Python
|
solutions/problem23.py
|
wy/ProjectEuler
|
842d45994fe76ef0ea70dbe6b6f2a559078f45f3
|
[
"Apache-1.1"
] | null | null | null |
solutions/problem23.py
|
wy/ProjectEuler
|
842d45994fe76ef0ea70dbe6b6f2a559078f45f3
|
[
"Apache-1.1"
] | null | null | null |
solutions/problem23.py
|
wy/ProjectEuler
|
842d45994fe76ef0ea70dbe6b6f2a559078f45f3
|
[
"Apache-1.1"
] | null | null | null |
# coding: utf8
# Author: Wing Yung Chan (~wy)
# Date: 2017
#non-abundant sums
# N is non-abundant if the sum of its proper divisors is <= N
# prop(N) = Set{1 <= i < N | N % i == 0}
print(problem23())
| 16.823529
| 61
| 0.536131
|
# coding: utf8
# Author: Wing Yung Chan (~wy)
# Date: 2017
#non-abundant sums
# N is non-abundant if the sum of its proper divisors is <= N
# prop(N) = Set{1 <= i < N | N % i == 0}
def properdivisors(N):
acc = 0
for i in range(1,N):
if N % i == 0:
acc = acc + i
return acc
def abundant(N):
return properdivisors(N) > N
def generateA():
AList = []
for i in range(1, 28124):
if abundant(i):
AList.append(i)
return AList
def generateAPairs(AList):
APairs = set()
for i in AList:
for j in AList:
if i+j <= 28123:
APairs.add(i+j)
return APairs
def problem23():
acc = 0
APairs = generateAPairs(generateA())
for i in range(1, 28124):
if i not in APairs:
acc = acc + i
return acc
print(problem23())
| 537
| 0
| 115
|
1c7029d58f3d83ba1ff6470775aed8811fcd0cda
| 1,618
|
py
|
Python
|
examples/docs_snippets/docs_snippets/intro_tutorial/advanced/scheduling/scheduler.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-01-31T19:16:29.000Z
|
2021-01-31T19:16:29.000Z
|
examples/docs_snippets/docs_snippets/intro_tutorial/advanced/scheduling/scheduler.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | null | null | null |
examples/docs_snippets/docs_snippets/intro_tutorial/advanced/scheduling/scheduler.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-12-08T18:13:19.000Z
|
2021-12-08T18:13:19.000Z
|
# start_scheduler_marker_0
import csv
from datetime import datetime
import requests
from dagster import get_dagster_logger, job, op, repository, schedule
@op
@job
# end_scheduler_marker_0
# start_scheduler_marker_1
@schedule(
cron_schedule="45 6 * * *",
job=hello_cereal_job,
execution_timezone="US/Central",
)
# end_scheduler_marker_1
# start_scheduler_marker_2
@repository
# end_scheduler_marker_2
# start_scheduler_marker_3
# end_scheduler_marker_3
# start_scheduler_marker_4
@schedule(
cron_schedule="45 6 * * *",
job=hello_cereal_job,
execution_timezone="US/Central",
should_execute=weekday_filter,
)
# end_scheduler_marker_4
| 23.114286
| 75
| 0.721261
|
# start_scheduler_marker_0
import csv
from datetime import datetime
import requests
from dagster import get_dagster_logger, job, op, repository, schedule
@op
def hello_cereal(context):
response = requests.get("https://docs.dagster.io/assets/cereal.csv")
lines = response.text.split("\n")
cereals = [row for row in csv.DictReader(lines)]
date = context.op_config["date"]
get_dagster_logger().info(
f"Today is {date}. Found {len(cereals)} cereals."
)
@job
def hello_cereal_job():
hello_cereal()
# end_scheduler_marker_0
# start_scheduler_marker_1
@schedule(
cron_schedule="45 6 * * *",
job=hello_cereal_job,
execution_timezone="US/Central",
)
def good_morning_schedule(context):
date = context.scheduled_execution_time.strftime("%Y-%m-%d")
return {"ops": {"hello_cereal": {"config": {"date": date}}}}
# end_scheduler_marker_1
# start_scheduler_marker_2
@repository
def hello_cereal_repository():
return [hello_cereal_job, good_morning_schedule]
# end_scheduler_marker_2
# start_scheduler_marker_3
def weekday_filter(_context):
weekno = datetime.today().weekday()
# Returns true if current day is a weekday
return weekno < 5
# end_scheduler_marker_3
# start_scheduler_marker_4
@schedule(
cron_schedule="45 6 * * *",
job=hello_cereal_job,
execution_timezone="US/Central",
should_execute=weekday_filter,
)
def good_weekday_morning_schedule(context):
date = context.scheduled_execution_time.strftime("%Y-%m-%d")
return {"ops": {"hello_cereal": {"inputs": {"date": {"value": date}}}}}
# end_scheduler_marker_4
| 808
| 0
| 132
|
b3fcb197fa56808b5ac3439e00e1aac34437d381
| 6,613
|
py
|
Python
|
counterfit/targets/movie_reviews/movie_reviews.py
|
ScriptBox99/azure-counterfit
|
757a51e082f47d446cf9cc0c93011c3a4953be52
|
[
"MIT"
] | 495
|
2021-05-03T17:11:52.000Z
|
2022-03-31T19:22:40.000Z
|
counterfit/targets/movie_reviews/movie_reviews.py
|
ScriptBox99/azure-counterfit
|
757a51e082f47d446cf9cc0c93011c3a4953be52
|
[
"MIT"
] | 25
|
2021-05-06T00:07:19.000Z
|
2022-03-31T12:16:25.000Z
|
counterfit/targets/movie_reviews/movie_reviews.py
|
ScriptBox99/azure-counterfit
|
757a51e082f47d446cf9cc0c93011c3a4953be52
|
[
"MIT"
] | 79
|
2021-05-03T21:31:44.000Z
|
2022-03-13T14:42:08.000Z
|
import pickle
import re
import copy
import numpy as np
import pandas as pd
from torch import nn
import torch
from counterfit.core.targets import Target
class MovieReviewsSentimentLSTM(nn.Module):
"""pre-trained LSTM model on 25 epochs for building sentiment analysis model on IMDB movies review dataset.
"""
def forward(self, x, hidden):
"""Forward process of LSTM model
Args:
x ([tensor]): training data/batch_first
Returns:
Last sigmoid output and hidden state
"""
batch_size = x.size(0)
# embeddings and lstm_out
# shape: Batch x Sequence x Feature since batch_first = True
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
return sig_out, hidden
class MovieReviewsTarget(Target):
"""Defining movie reviews target which is responsible for predicting the scores for a given input and convert scores to labels.
"""
target_data_type = "text"
target_name = "movie_reviews"
target_endpoint = f"movie_reviews_sentiment_analysis.pt"
target_input_shape = (1,)
target_output_classes = [0, 1]
target_classifier = "blackbox"
sample_input_path = f"movie-reviews-scores-full.csv"
vocab_file = f"movie-reviews-vocab.pkl"
X = []
def load(self):
"""[summary]
"""
self.data = pd.read_csv(self.fullpath(self.sample_input_path))
print(f"\n[+] Total Movie Reviews: {len(self.data)}\n")
self._load_x()
self.vocab = self._load_vocab()
self.model = self._load_model()
def _load_x(self):
"""[summary]
"""
# Append input reviews to X list
for idx in range(len(self.data)):
self.X.append(self.data['review'][idx])
def _load_vocab(self):
"""[summary]
Returns:
[type]: [description]
"""
# Load vocabulary file; 1000 most occurence words
with open(self.fullpath(self.vocab_file), 'rb') as fp:
vocab = pickle.load(fp)
return vocab
def preprocess_string(self, s):
"""[summary]
Args:
s ([type]): [description]
Returns:
[type]: [description]
"""
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", '', s)
# Replace all runs of whitespaces with no space
s = re.sub(r"\s+", '', s)
# replace digits with no space
s = re.sub(r"\d", '', s)
return s
def _load_model(self):
"""[summary]
Returns:
[type]: [description]
"""
# Load the LST model that's already trained
no_layers = 2
vocab_size = len(self.vocab) + 1 # extra 1 for padding purpose
embedding_dim = 64
output_dim = 1
hidden_dim = 256
model = MovieReviewsSentimentLSTM(
no_layers, vocab_size, hidden_dim, embedding_dim, output_dim, drop_prob=0.5)
model.load_state_dict(copy.deepcopy(
torch.load(self.fullpath(self.target_endpoint), 'cpu')))
model.eval()
return model
def predict(self, x):
"""This function takes list of input texts. For example., ["how are you?"]
Args:
x (list): [input_text]
Returns:
final_prob_scores: [[0.98, 0.02]] 0.98 probability score represents the sentence tone is positive and 0.02 score represents
"""
final_prob_scores = []
for text in x:
word_seq = np.array([self.vocab[self.preprocess_string(word)] for word in text.split()
if self.preprocess_string(word) in self.vocab.keys()])
word_seq = np.expand_dims(word_seq, axis=0)
pad = torch.from_numpy(self.padding_(word_seq, 500))
inputs = pad.to('cpu')
batch_size = 1
h = self.model.init_hidden(batch_size)
h = tuple([each.data for each in h])
output, h = self.model(inputs, h)
probability = output.item()
final_prob_scores.append([probability, 1.0-probability])
return final_prob_scores # this must produce a list of class probabilities
| 34.442708
| 138
| 0.613337
|
import pickle
import re
import copy
import numpy as np
import pandas as pd
from torch import nn
import torch
from counterfit.core.targets import Target
class MovieReviewsSentimentLSTM(nn.Module):
"""pre-trained LSTM model on 25 epochs for building sentiment analysis model on IMDB movies review dataset.
"""
def __init__(self, no_layers, vocab_size, hidden_dim, embedding_dim, output_dim, drop_prob=0.5):
# embedding_dim: number of expected features in the input `x`
# hidden_dim: number of features in the hidden state `h`
super(MovieReviewsSentimentLSTM, self).__init__()
self.no_layers = no_layers # number of recurrent layers
self.vocab_size = vocab_size
self.hidden_dim = hidden_dim # The number of features in the hidden state h
# embedding and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.proj_size = 0
self.output_dim = output_dim # The size of the output you desire from your RNN
# dropout layer
# a Dropout layer on the outputs of each LSTM layer except the last layer, with dropout probability equal to dropout
self.dropout = nn.Dropout(drop_prob)
# lstm
self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=self.hidden_dim,
num_layers=no_layers, batch_first=True, proj_size=self.proj_size)
# final fully connected linear and sigmoid layer
self.fc = nn.Linear(self.hidden_dim, self.output_dim)
self.sig = nn.Sigmoid()
def forward(self, x, hidden):
"""Forward process of LSTM model
Args:
x ([tensor]): training data/batch_first
Returns:
Last sigmoid output and hidden state
"""
batch_size = x.size(0)
# embeddings and lstm_out
# shape: Batch x Sequence x Feature since batch_first = True
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
return sig_out, hidden
def init_hidden(self, batch_size, device='cpu'):
# initialize hidden weights (h,c) to 0
weights = next(self.parameters()).data
h = (weights.new(self.no_layers, batch_size, self.hidden_dim).zero_().to(device),
weights.new(self.no_layers, batch_size, self.hidden_dim).zero_().to(device))
return h
class MovieReviewsTarget(Target):
"""Defining movie reviews target which is responsible for predicting the scores for a given input and convert scores to labels.
"""
target_data_type = "text"
target_name = "movie_reviews"
target_endpoint = f"movie_reviews_sentiment_analysis.pt"
target_input_shape = (1,)
target_output_classes = [0, 1]
target_classifier = "blackbox"
sample_input_path = f"movie-reviews-scores-full.csv"
vocab_file = f"movie-reviews-vocab.pkl"
X = []
def load(self):
"""[summary]
"""
self.data = pd.read_csv(self.fullpath(self.sample_input_path))
print(f"\n[+] Total Movie Reviews: {len(self.data)}\n")
self._load_x()
self.vocab = self._load_vocab()
self.model = self._load_model()
def _load_x(self):
"""[summary]
"""
# Append input reviews to X list
for idx in range(len(self.data)):
self.X.append(self.data['review'][idx])
def _load_vocab(self):
"""[summary]
Returns:
[type]: [description]
"""
# Load vocabulary file; 1000 most occurence words
with open(self.fullpath(self.vocab_file), 'rb') as fp:
vocab = pickle.load(fp)
return vocab
def preprocess_string(self, s):
"""[summary]
Args:
s ([type]): [description]
Returns:
[type]: [description]
"""
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", '', s)
# Replace all runs of whitespaces with no space
s = re.sub(r"\s+", '', s)
# replace digits with no space
s = re.sub(r"\d", '', s)
return s
def _load_model(self):
"""[summary]
Returns:
[type]: [description]
"""
# Load the LST model that's already trained
no_layers = 2
vocab_size = len(self.vocab) + 1 # extra 1 for padding purpose
embedding_dim = 64
output_dim = 1
hidden_dim = 256
model = MovieReviewsSentimentLSTM(
no_layers, vocab_size, hidden_dim, embedding_dim, output_dim, drop_prob=0.5)
model.load_state_dict(copy.deepcopy(
torch.load(self.fullpath(self.target_endpoint), 'cpu')))
model.eval()
return model
def padding_(self, sentences, seq_len):
# Padding with zeros if sentence is less than required seq length
features = np.zeros((len(sentences), seq_len), dtype=int)
for ii, review in enumerate(sentences):
if len(review) != 0:
features[ii, -len(review):] = np.array(review)[:seq_len]
return features
def predict(self, x):
"""This function takes list of input texts. For example., ["how are you?"]
Args:
x (list): [input_text]
Returns:
final_prob_scores: [[0.98, 0.02]] 0.98 probability score represents the sentence tone is positive and 0.02 score represents
"""
final_prob_scores = []
for text in x:
word_seq = np.array([self.vocab[self.preprocess_string(word)] for word in text.split()
if self.preprocess_string(word) in self.vocab.keys()])
word_seq = np.expand_dims(word_seq, axis=0)
pad = torch.from_numpy(self.padding_(word_seq, 500))
inputs = pad.to('cpu')
batch_size = 1
h = self.model.init_hidden(batch_size)
h = tuple([each.data for each in h])
output, h = self.model(inputs, h)
probability = output.item()
final_prob_scores.append([probability, 1.0-probability])
return final_prob_scores # this must produce a list of class probabilities
| 1,867
| 0
| 81
|