hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a8e27fe84476933e035a3939488c30dcc66b58ba | 17,061 | py | Python | rb/processings/sentiment/utils_new.py | readerbench/ReaderBench | f0588a9a63ba21e3b8c2e5e5bc474904c07f6897 | [
"Apache-2.0"
] | 1 | 2022-03-05T17:12:55.000Z | 2022-03-05T17:12:55.000Z | rb/processings/sentiment/utils_new.py | rwth-acis/readerbenchpy | 1a070ae678f58ccd6f358c0802bdf0b3b3dde9d3 | [
"Apache-2.0"
] | 2 | 2021-10-17T14:00:52.000Z | 2021-10-17T14:00:52.000Z | rb/processings/sentiment/utils_new.py | rwth-acis/readerbenchpy | 1a070ae678f58ccd6f358c0802bdf0b3b3dde9d3 | [
"Apache-2.0"
] | null | null | null | import json
import sys
# import matplotlib.pyplot as plt
import copy
import numpy as np
import tensorflow as tf
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import class_weight
from collections import Counter
import random
from tensorflow.keras.callbacks import Callback
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from nltk.tokenize import sent_tokenize
import os
# read entire json file
# if loading the original dataset ignore the reviews with score 0
# return a list of json entries
def readJson(file_path, original=False):
data = []
with open(file_path, encoding="utf8") as json_file:
for line in json_file:
entry = json.loads(line)
if original == True:
if entry['_source']['Review']['ReviewRating'] == 0:
continue
data.append(entry)
return data
# compute histogram of review scores
# input -> list of jsons
# output -> dict score -> #reviews
def computeScoreHistogram(data, normalize = False):
histo = {}
for entry in data:
score = entry['_source']['Review']['ReviewRating']
if score in histo:
histo[score] += 1
else:
histo[score] = 1
if normalize == True:
for key, value in histo.items():
histo[key] = 1.0 * value / len(data)
print(histo)
return histo
def computeTextStatistics(data, superior_threshold=None, inferior_threshold=None):
histo_char = {}
histo_word = {}
histo_category = {}
sup_threshold = 0
inf_threshold = 0
for entry in data:
text = entry['_source']['Review']['ReviewBody']
category = entry['_source']['Product']['ProductCategory']
chars = len(text)
words = len(text.split(" "))
if superior_threshold != None and words > superior_threshold:
sup_threshold += 1
if inferior_threshold != None and words < inferior_threshold:
inf_threshold += 1
if chars in histo_char:
histo_char[chars] += 1
else:
histo_char[chars] = 1
if words in histo_word:
histo_word[words] += 1
else:
histo_word[words] = 1
if category in histo_category:
histo_category[category] += 1
else:
histo_category[category] = 1
return histo_char, histo_word, histo_category, sup_threshold, inf_threshold
def computeDatasetStatistics(data, superior_threshold=None, inferior_threshold=None):
histo_scores = computeScoreHistogram(data)
histo_chars, histo_words, histo_category, sup_threshold, inf_threshold = computeTextStatistics(data, superior_threshold, inferior_threshold)
print("Reviews with number of words over", superior_threshold, "=", sup_threshold, "percentage =", 100.0*sup_threshold/len(data))
print("Reviews with number of words under", inferior_threshold, "=", inf_threshold, "percentage =", 100.0*inf_threshold/len(data))
print(histo_category)
plt.bar(histo_scores.keys(), histo_scores.values(), 1.0, color='g')
plt.title("Scores")
plt.show()
plt.bar(histo_chars.keys(), histo_chars.values(), 1.0, color='g')
plt.title("Chars")
plt.show()
plt.bar(histo_words.keys(), histo_words.values(), 1.0, color='g')
plt.title("Words")
plt.show()
# split the dataset in 5 vs ALL -> 1,2,3,4 -> label 0
# 5 -> label 1
# input -> dataset list of jsons
# output -> dataset list of jsons
def splitData5vAll(data):
new_data = copy.deepcopy(data)
for entry in new_data:
if entry['_source']['Review']['ReviewRating'] == 5:
entry['_source']['Review']['ReviewRating'] = 1
else:
entry['_source']['Review']['ReviewRating'] = 0
return new_data
# save the dataset
# input -> dataset list of jsons, filename to save
def saveData(data, filename):
with open(filename, 'w') as outfile:
for entry in data:
json.dump(entry, outfile)
outfile.write("\n")
# get features from data
# input -> data list of json
# sample_majority -> sample or not from majority class
# sample_count -> how many entries to sample from majority class
# set seed -> random seed value
# output -> list of dicts | one entry is a dict with features and labels
def getFeatures(data, use_review_text=True, sample_majority=False, sample_count=0, seed=None, majority_class=3):
if sample_majority == False:
train_list = []
for data_entry in data:
train_entry = {}
if use_review_text == True:
train_entry['features:review_text'] = data_entry['_source']['Review']['ReviewBody']
train_entry['label'] = data_entry['_source']['Review']['ReviewRating']
train_list.append(train_entry)
return train_list
elif sample_majority == True:
majority_list = []
for data_entry in data:
majority_entry = {}
if data_entry['_source']['Review']['ReviewRating'] == majority_class:
if use_review_text == True:
majority_entry['features:review_text'] = data_entry['_source']['Review']['ReviewBody']
majority_entry['label'] = data_entry['_source']['Review']['ReviewRating']
majority_list.append(majority_entry)
random.seed(seed)
sampled_majority_list = random.sample(majority_list, sample_count)
random.seed()
train_list = []
for data_entry in data:
train_entry = {}
if data_entry['_source']['Review']['ReviewRating'] != majority_class:
if use_review_text == True:
train_entry['features:review_text'] = data_entry['_source']['Review']['ReviewBody']
train_entry['label'] = data_entry['_source']['Review']['ReviewRating']
# train_list.append(train_entry)
sampled_majority_list.append(train_entry)
# train_list.extend(sampled_majority_list)
train_list = sampled_majority_list
return train_list
# get processed features and labels
# input -> features
# output -> list of processed features, list of labels, dict of class_weights
def processFeatures(data, bert_proc):
features = []
labels = []
iids = []
sids = []
i = 0
for entry in data:
review_text = entry["features:review_text"]
input_ids, segment_ids = bert_proc.process_text(review_text)
iids.append(input_ids)
sids.append(segment_ids)
labels.append(entry['label'])
features = [np.array(iids), np.array(sids)]
class_weights = class_weight.compute_class_weight('balanced', np.unique(labels), labels)
class_weights = class_weights.astype(np.float32)
return features, labels, class_weights
# get processed features and labels from texst
# input -> features
# output -> list of processed features, list of labels, dict of class_weights
def processFeaturesRawText(data, bert_proc):
features = []
iids = []
sids = []
i = 0
for entry in data:
review_text = entry
input_ids, segment_ids = bert_proc.process_text(review_text)
iids.append(input_ids)
sids.append(segment_ids)
features = [np.array(iids), np.array(sids)]
return features
# split data in train dev test split using stratified
# input -> data
# output -> train, dev, test data
def splitTrainDevTest(data):
train_data = []
dev_data = []
test_data = []
full_indices = np.array(range(len(data)))
full_classes = np.array(list(map(lambda x: x['_source']['Review']['ReviewRating'], data)))
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1)
for tr, te in sss.split(full_indices, full_classes):
aux_train_indexes = tr
test_indexes = te
aux_train_data = []
for i in test_indexes:
test_data.append(data[i])
for i in aux_train_indexes:
aux_train_data.append(data[i])
indices = np.array(range(len(aux_train_data)))
classes = np.array(list(map(lambda x: x['_source']['Review']['ReviewRating'], aux_train_data)))
sss_ = StratifiedShuffleSplit(n_splits=1, test_size=0.111111)
for tr, de in sss_.split(indices, classes):
train_indexes = tr
dev_indexes = de
for i in dev_indexes:
dev_data.append(aux_train_data[i])
for i in train_indexes:
train_data.append(aux_train_data[i])
print(len(train_data), len(dev_data), len(test_data), len(train_data) + len(dev_data) + len(test_data), len(data))
print(len(list(set(train_indexes) & set(dev_indexes) & set(test_indexes))))
return train_data, dev_data, test_data
# split the dataset in 4 classes -> 1 -> label 0
# 2,3 -> label 1
# 4 -> label 2
# 5 -> label 3
# input -> dataset list of jsons
# output -> dataset list of jsons
def splitData4Classes(data):
new_data = copy.deepcopy(data)
for entry in new_data:
if entry['_source']['Review']['ReviewRating'] == 1:
entry['_source']['Review']['ReviewRating'] = 0
elif entry['_source']['Review']['ReviewRating'] == 2 or entry['_source']['Review']['ReviewRating'] == 3:
entry['_source']['Review']['ReviewRating'] = 1
elif entry['_source']['Review']['ReviewRating'] == 4:
entry['_source']['Review']['ReviewRating'] = 2
elif entry['_source']['Review']['ReviewRating'] == 5:
entry['_source']['Review']['ReviewRating'] = 3
return new_data
class FScoreCallback(Callback):
def __init__(self, dataset, steps, labels):
super().__init__()
self.steps = steps
self.dataset = dataset
self.labels_int = []
for x in labels:
self.labels_int.append(np.argmax(x))
def on_test_end(self, epoch, logs={}):
y_pred = []
y_true = self.labels_int
predict_results = self.model.predict(self.dataset, steps=self.steps)
for prediction in predict_results:
y_pred.append(np.argmax(prediction))
print()
print(classification_report(y_true, y_pred, digits=4))
def compute_parameters(model_folder_path):
# define input
input_ids = tf.keras.layers.Input(shape=(64), dtype=tf.int32, name="input_ids")
segment_ids = tf.keras.layers.Input(shape=(64), dtype=tf.int32, name="segment_ids")
import BertModel
import tensorflow.keras as keras
import bert
# define model
bert_model = BertModel.BertModel(model_folder_path, 64)
bert_output = bert_model.bert_layer([input_ids, segment_ids])
cls_output = keras.layers.Lambda(lambda seq: seq[:, 0, :])(bert_output)
cls_drop = keras.layers.Dropout(0.1)(cls_output)
fc1 = keras.layers.Dense(units=100, activation="relu")(cls_drop)
prediction = keras.layers.Dense(units=10, activation="softmax")(fc1)
# build model
model = keras.Model(inputs=[input_ids, segment_ids], outputs=prediction)
model.build(input_shape=[(None, 64), (None, 64)])
# load pretrained
bert.load_bert_weights(bert_model.bert_layer, model_folder_path+"bert_model.ckpt")
model.compile(optimizer=keras.optimizers.Adam(lr=0.1), loss = 'categorical_crossentropy', metrics = [tf.keras.metrics.categorical_accuracy])
model.summary()
from tensorflow.python.keras.utils.layer_utils import count_params
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print(trainable_count/1e6)
print(non_trainable_count)
# return model, bert_model
def build_reallife_corpus(model_folder_path):
new_model_folder_path = "/".join(model_folder_path.split("/")[:-2])
new_model_folder_path = os.path.join(new_model_folder_path, "reallife")
train_data = readJson(model_folder_path+"train.json")
train_data = clean_dict(train_data)
new_train_data = add_last_sentence_to_data(train_data)
new_train_data_over = perform_oversampling(new_train_data)
print(len(train_data), len(new_train_data), len(new_train_data_over))
saveData(new_train_data_over, os.path.join(new_model_folder_path, "train.json"))
dev_data = readJson(model_folder_path+"dev.json")
dev_data = clean_dict(dev_data)
new_dev_data = add_last_sentence_to_data(dev_data)
new_dev_data_over = perform_oversampling(new_dev_data)
print(len(dev_data), len(new_dev_data), len(new_dev_data_over))
saveData(new_dev_data_over, os.path.join(new_model_folder_path, "dev.json"))
test_data = readJson(model_folder_path+"test.json")
test_data = clean_dict(test_data)
new_test_data = add_last_sentence_to_data(test_data)
new_test_data_over = perform_oversampling(new_test_data)
print(len(test_data), len(new_test_data), len(new_test_data_over))
saveData(new_test_data_over, os.path.join(new_model_folder_path, "test.json"))
def add_last_sentence_to_data(data):
new_data = copy.deepcopy(data)
new_entries = []
count = 0
for entry in new_data:
review_text = entry['_source']['Review']['ReviewBody']
sentences = sent_tokenize(review_text)
if len(sentences) > 1:
# add new entry to dataset
new_entry = copy.deepcopy(entry)
new_entry['_source']['Review']['ReviewBody'] = sentences[-1]
new_entry['_score'] = 2
new_entries.append(new_entry)
if entry == new_entry:
print(entry)
print(new_entry)
sys.exit()
count += 1
# print(new_entries)
new_data.extend(new_entries)
return new_data
def perform_oversampling(data):
new_data = copy.deepcopy(data)
new_entries = []
counter = [0,0,0,0,0]
for entry in new_data:
label = entry['_source']['Review']['ReviewRating']
counter[label-1] += 1
while True:
random_entry = random.choice(data)
random_label = random_entry['_source']['Review']['ReviewRating']
if counter[random_label-1] == counter[-1]:
continue
else:
new_entries.append(random_entry)
counter[random_label-1] += 1
if counter[0] == counter[1] and counter[1] == counter[2] and counter[2] == counter[3] and counter[3] == counter[4]:
break
print(counter)
new_data.extend(new_entries)
return new_data
def clean_dict(data):
new_data = copy.deepcopy(data)
for entry in new_data:
del entry["_index"]
del entry["_type"]
del entry["_id"]
del entry["_score"]
del entry["_source"]["Review"]["ReviewTitle"]
del entry["_source"]["Review"]["ReviewDate"]
del entry["_source"]["Review"]["ReviewProductVerified"]
del entry["_source"]["Product"]
return new_data
if __name__ == "__main__":
# data = readJson("../Dataset/Reviews/4Classes/train.json")
# computeDatasetStatistics(data, 32, 32)
# print("--------------------------DEV--------------------------")
# data = readJson("../Dataset/Reviews/4Classes/dev.json")
# computeDatasetStatistics(data, 32, 32)
# print("--------------------------TEST--------------------------")
# data = readJson("../Dataset/Reviews/4Classes/test.json")
# computeDatasetStatistics(data, 32, 32)
# compute_parameters("../Models/raw/small/clean/trained_512/ro2/")
# sys.exit()
# # split data
# raw = readJson("../Dataset/Reviews/all_reviews.json", original=True)
# # computeDatasetStatistics(raw, 256, 256)
# train_data, dev_data, test_data = splitTrainDevTest(raw)
# saveData(train_data, "../Dataset/Reviews/emag_train.json")
# saveData(dev_data, "../Dataset/Reviews/emag_dev.json")
# saveData(test_data, "../Dataset/Reviews/emag_test.json")
# raw = readJson("../Dataset/Reviews/all_reviews.json", original=True)
train_data = readJson("../Dataset/Reviews/emag_train.json")
# computeDatasetStatistics(train_data, 256, 256)
dev_data = readJson("../Dataset/Reviews/emag_dev.json")
test_data = readJson("../Dataset/Reviews/emag_test.json")
computeScoreHistogram(train_data, normalize=True)
split_train = splitData4Classes(train_data)
computeScoreHistogram(split_train, normalize=True)
saveData(split_train, "../Dataset/Reviews/4Classes/train.json")
computeScoreHistogram(dev_data, normalize=True)
split_dev = splitData4Classes(dev_data)
computeScoreHistogram(split_dev, normalize=True)
saveData(split_dev, "../Dataset/Reviews/4Classes/dev.json")
computeScoreHistogram(test_data, normalize=True)
split_test = splitData4Classes(test_data)
computeScoreHistogram(split_test, normalize=True)
saveData(split_test, "../Dataset/Reviews/4Classes/test.json")
| 35.104938 | 144 | 0.649083 | 615 | 0.036047 | 0 | 0 | 0 | 0 | 0 | 0 | 4,331 | 0.253854 |
a8e295359a28a2381f6b58817f1595af8035d0d8 | 4,200 | py | Python | trie.py | kawasaki-kento/LOUDS | 6a5e157e04ad0f9a50e7c3858b382f9189d044db | [
"MIT"
] | 1 | 2020-12-02T04:38:12.000Z | 2020-12-02T04:38:12.000Z | trie.py | kawasaki-kento/LOUDS | 6a5e157e04ad0f9a50e7c3858b382f9189d044db | [
"MIT"
] | 1 | 2022-02-17T05:39:27.000Z | 2022-02-17T05:39:27.000Z | trie.py | kawasaki-kento/LOUDS | 6a5e157e04ad0f9a50e7c3858b382f9189d044db | [
"MIT"
] | null | null | null | from constructor import ArrayConstructor
from measure import MeasureMemory
import re
import array
class Trie(object):
def __init__(self, words, unit_scale=8):
bit_array, labels = self.create_tree(words)
self.rank1 = self.get_rank(1)
self.unit_scale = unit_scale
self.split_list = BitVector(bit_array, self.unit_scale).split_array()
self.zero_pos = [0]
c = 1
for i, v in enumerate(bit_array):
if v == 0:
self.zero_pos.append(i)
c+=1
self.zero_pos = array.array('I', self.zero_pos)
self.bit_array = array.array('B',bit_array)
self.labels = array.array('u',labels)
# Trie木作成
def create_tree(self, words):
words = [word.lower() for word in words]
words.sort()
constructor = ArrayConstructor()
for word in words:
constructor.add(word)
bit_array, labels = constructor.dump()
return bit_array, labels
def rank(self, position, target_bit):
n = 0
for bit in self.bit_array[:position+1]:
if(bit == target_bit):
n += 1
return n
def select0(self, n):
return self.zero_pos[n]
def sub_rank1(self, position):
unit_num = int(position / self.unit_scale)
n = self.split_list[unit_num-1]
n+=sum(self.bit_array[unit_num * self.unit_scale : position+1])
return n
def get_rank(self, target_bit):
return lambda position: self.rank(position, target_bit)
# ノード探索
def trace_children(self, current_node, character, cnt):
# ビット列の先頭から見て、n 個目の 0 ビットの次の位置
index = self.select0(current_node) + 1
while(self.bit_array[index] == 1):
# ビット列の先頭から位置 k までに、1 のビットがいくつあるかを返す
if cnt == 0:
node = self.rank1(index)
else:
node = self.sub_rank1(index)
if(self.labels[node] == character):
cnt=1
return node, cnt
index += 1
return None, cnt
# 単語検索
def search(self, query):
query = query.lower()
cnt = 0
node = 1
for c in query:
node, cnt = self.trace_children(node, c, cnt)
if(node is None):
return None
return node
# 子ノードのindexを取得
def get_children(self, parent_node_seq):
return [i for j in parent_node_seq for i in range(self.select0(int(j)), self.select0(int(j+1)))[1:]]
# 検索ノード以下のwordをすべて取得する
def get_below_nodes(self, node_list):
below_nodes = []
below_nodes.extend(node_list)
cnt = 0
# 子ノードが存在する限り実行
while self.get_children(node_list) != []:
tmp_list = [self.sub_rank1(i) for i in self.get_children(node_list)]
below_nodes.extend(tmp_list)
node_list = tmp_list
cnt+=1
return below_nodes
# rank
class BitVector:
def __init__(self, bit_array, unit_scale):
self.bit_array = bit_array
self.splited_array = None
self.n = 0
self.split_list = []
self.unit_scale = unit_scale
self.split_size = int(len(self.bit_array) / self.unit_scale)
def rank(self, position, target_bit):
n = 0
for bit in self.splited_array[:position+1]:
if(bit == target_bit):
n += 1
return n
def get_rank(self, target_bit):
return lambda position: self.rank(position, target_bit)
def split_array(self):
for i in range(self.split_size):
if i == self.split_size-1:
self.splited_array = self.bit_array[i*self.unit_scale:]
rank1 = self.get_rank(1)
else:
self.splited_array = self.bit_array[i*self.unit_scale:(i+1)*self.unit_scale]
rank1 = self.get_rank(1)
self.n+=rank1(len(self.splited_array))
self.split_list.append(self.n)
self.split_list = array.array('I', self.split_list)
return self.split_list | 28.378378 | 108 | 0.561667 | 4,282 | 0.97274 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.081781 |
a8e480da6a075138e07033a8fe888a7b13527f5b | 3,813 | py | Python | metglyphs/__init__.py | informatics-lab/metglyphs | 5726502d5873a44fcee270b3e058d681fee84be7 | [
"BSD-3-Clause"
] | 4 | 2018-05-12T03:12:29.000Z | 2020-05-29T06:10:31.000Z | metglyphs/__init__.py | informatics-lab/metglyphs | 5726502d5873a44fcee270b3e058d681fee84be7 | [
"BSD-3-Clause"
] | null | null | null | metglyphs/__init__.py | informatics-lab/metglyphs | 5726502d5873a44fcee270b3e058d681fee84be7 | [
"BSD-3-Clause"
] | 1 | 2021-04-10T23:58:45.000Z | 2021-04-10T23:58:45.000Z | """A library for converting weather codes to symbols."""
import os.path
from io import BytesIO
import cairosvg
import imageio
from .glyphs import WMO_GLYPH_LOOKUP, DEFAULT_GLYPHS
from .codes import DATAPOINT_TO_WMO_LOOKUP, DARKSKY_TO_WMO_LOOKUP
class GlyphSet():
"""A set of glyphs."""
def __init__(self, name=None, recolor=None):
"""Load the lookup tables and cache all svgs into memory."""
self.name = name or DEFAULT_GLYPHS
self.glyph_set = WMO_GLYPH_LOOKUP[self.name]
self.recolor = recolor
self.cache = {}
for wmo_code in self.glyph_set:
self._load_svg(wmo_code)
def _repr_html_(self):
"""Return an inline HTML object of the unique glyphs in the set."""
response = ""
for _, svg in self.cache.items():
response += "{}".format(
Glyph(svg, recolor=self.recolor).repr_html())
return response
def _load_svg(self, wmo_code):
"""Load the svg image for a given WMO code as a bytestring."""
try:
svg_path = os.path.join(
os.path.dirname(__file__),
"assets",
self.name,
self.glyph_set[wmo_code])
except KeyError:
svg_path = os.path.join(
os.path.dirname(__file__), "assets", "missing.svg")
if svg_path in self.cache:
return self.cache[svg_path]
else:
with open(svg_path, 'rb') as svg:
self.cache[svg_path] = svg.read()
return self.cache[svg_path]
@staticmethod
def datapoint_to_wmo(datapoint_code):
"""Convert a datapoint code to a WMO code."""
return DATAPOINT_TO_WMO_LOOKUP[str(datapoint_code)]
@staticmethod
def darksky_to_wmo(darksky_code):
"""Convert a darksky code to a WMO code."""
return DARKSKY_TO_WMO_LOOKUP[str(darksky_code)]
def get_glyph(self, wmo_code=None, datapoint_code=None,
darksky_code=None, recolor=None):
"""Return a Glyph for a given weather code."""
if wmo_code is not None:
return Glyph(self._load_svg(wmo_code),
recolor=recolor or self.recolor)
if datapoint_code is not None:
return Glyph(self._load_svg(self.datapoint_to_wmo(datapoint_code)),
recolor=recolor or self.recolor)
if darksky_code is not None:
return Glyph(self._load_svg(self.darksky_to_wmo(darksky_code)),
recolor=recolor or self.recolor)
raise Exception("You must specify a valid type code")
class Glyph():
"""An individual glyph with methods to convert between types."""
def __init__(self, svg, recolor=None):
"""Init method."""
self.svg = svg
if recolor:
decoded_svg = self.svg.decode('utf-8')
for old_color, new_color in recolor.items():
decoded_svg = decoded_svg.replace(old_color, new_color)
self.svg = decoded_svg.encode('utf-8')
def _repr_html_(self):
"""Return an inline HTML object of the raw SVG."""
html = "<div style='width:40px;display:inline-block;'>{}</div>"
return html.format(self.svg.decode("utf-8"))
def repr_html(self):
"""Public version of _repr_html_."""
return self._repr_html_()
def to_svg(self):
"""Return a SVG bytestring."""
return self.svg
def to_png(self, scale=1):
"""Convert to a PNG bytestring."""
return cairosvg.svg2png(bytestring=self.svg,
scale=scale)
def to_np_array(self, scale=1):
"""Convert to a numpy array of RGB values."""
return imageio.imread(BytesIO(self.to_png(scale=scale)))
| 34.044643 | 79 | 0.602151 | 3,559 | 0.933386 | 0 | 0 | 328 | 0.086022 | 0 | 0 | 830 | 0.217676 |
a8e5b8ce05b40fbc469c45e36983e0d032a99b84 | 1,581 | py | Python | showyourwork/exceptions/other.py | katiebreivik/showyourwork | 77a15de6778e14c3a3936e86e181539cc31cc693 | [
"MIT"
] | null | null | null | showyourwork/exceptions/other.py | katiebreivik/showyourwork | 77a15de6778e14c3a3936e86e181539cc31cc693 | [
"MIT"
] | null | null | null | showyourwork/exceptions/other.py | katiebreivik/showyourwork | 77a15de6778e14c3a3936e86e181539cc31cc693 | [
"MIT"
] | null | null | null | from .base import ShowyourworkException
class RequestError(ShowyourworkException):
def __init__(
self,
status="",
message="An error occurred while accessing a remote server.",
):
super().__init__(f"Request error {status}: {message}")
class CondaNotFoundError(ShowyourworkException):
def __init__(self):
super().__init__(
f"Conda package manager not found. Is it installed and available in the system PATH?"
)
class ShowyourworkNotFoundError(ShowyourworkException):
def __init__(self, path):
super().__init__(
f"The requested version of showyourwork was not found at {path}."
)
class ConfigError(ShowyourworkException):
pass
class MissingFigureOutputError(ShowyourworkException):
pass
class MissingDependencyError(ShowyourworkException):
pass
class FigureGenerationError(ShowyourworkException):
pass
class ConfigError(ShowyourworkException):
pass
class MissingConfigFile(ShowyourworkException):
pass
class NotImplementedError(ShowyourworkException):
pass
class TarballExtractionError(ShowyourworkException):
pass
class MissingCondaEnvironmentInUserRule(ShowyourworkException):
pass
class RunDirectiveNotAllowedInUserRules(ShowyourworkException):
def __init__(self, name):
super().__init__(
f"The `run` directive is not allowed in user-defined rules. "
f"Please use `script` or `shell` instead in rule {name}."
)
class CalledProcessError(ShowyourworkException):
pass
| 21.657534 | 97 | 0.71537 | 1,499 | 0.948134 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.226439 |
a8e65d0852311bd81e04a9f8be156d61168eac0b | 1,222 | py | Python | seq_util/pull_longest_seq_from_img_fa.py | fandemonium/code | 4498f97658c146de8e693776e05bedeaaf2d5a1d | [
"MIT"
] | 2 | 2017-12-25T09:14:52.000Z | 2021-05-18T06:39:26.000Z | seq_util/pull_longest_seq_from_img_fa.py | fandemonium/code | 4498f97658c146de8e693776e05bedeaaf2d5a1d | [
"MIT"
] | 1 | 2018-09-29T22:34:30.000Z | 2018-09-29T22:34:30.000Z | seq_util/pull_longest_seq_from_img_fa.py | fandemonium/code | 4498f97658c146de8e693776e05bedeaaf2d5a1d | [
"MIT"
] | 1 | 2015-01-30T20:29:25.000Z | 2015-01-30T20:29:25.000Z | import sys
from Bio import SeqIO
import operator
# 1. get genome img_oid from the genecart text file
# 2. create gene sequence dictionary
# 3. add genome img_oid to the gene sequence dictionary
# 4. for genes from the same organism, pull the longest sequence out
gene_cart = open(sys.argv[1], 'rU')
firstline = gene_cart.readline()
oid_dict = {}
for lines in gene_cart:
lexeme = lines.strip().split("\t")
gene_id = lexeme[0]
img_oid = lexeme[3]
if img_oid not in oid_dict:
oid_dict[img_oid] = [gene_id]
else:
oid_dict[img_oid].append(gene_id)
seq_dict = SeqIO.to_dict(SeqIO.parse(open(sys.argv[2]), 'fasta'))
MIN_LENGTH = int(sys.argv[3])
no_genes_out = open(sys.argv[4], 'w')
l = []
for oid in oid_dict:
gene_dict = {}
for gene in oid_dict[oid]:
gene_dict[gene] = seq_dict[gene]
if len(gene_dict) > 0:
longest_seq_key = max(gene_dict.iteritems(), key=operator.itemgetter(1))[0]
if len(gene_dict[longest_seq_key]) >= MIN_LENGTH:
print ">"+ oid + "::" + longest_seq_key + "::" + gene_dict[longest_seq_key].description + "\n" +gene_dict[longest_seq_key].seq
else:
l.append(oid)
else:
l.append(oid)
no_genes_out.write("\n".join(l))
| 28.418605 | 144 | 0.673486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.202128 |
a8e727decc6f36450568f08df6b418bcc7070425 | 3,444 | py | Python | chapter_projects/quiz_generator/quiz_generator.py | zspatter/automate-the-boring-stuff | 5efc5ab7112be5b5e1197c86472aee212c3a829b | [
"Unlicense"
] | 15 | 2019-08-16T19:44:30.000Z | 2021-09-05T20:19:40.000Z | chapter_projects/quiz_generator/quiz_generator.py | zspatter/automate-the-boring-stuff | 5efc5ab7112be5b5e1197c86472aee212c3a829b | [
"Unlicense"
] | 1 | 2020-01-05T10:06:33.000Z | 2020-01-17T22:28:07.000Z | chapter_projects/quiz_generator/quiz_generator.py | zspatter/automate-the-boring-stuff | 5efc5ab7112be5b5e1197c86472aee212c3a829b | [
"Unlicense"
] | 7 | 2019-08-16T20:42:11.000Z | 2022-03-10T10:33:18.000Z | #! /usr/bin/env python3
# randomQuizGenerator.py - Creates quizzes with questions and answers in
# random order, along with the answer key
import random
# The quiz data. Keys are states and values are their capitals.
capitals = {'Alabama': 'Montgomery',
'Alaska': 'Juneau',
'Arizona': 'Phoenix',
'Arkansas': 'Little Rock',
'California': 'Sacramento',
'Colorado': 'Denver',
'Connecticut': 'Hartford',
'Delaware': 'Dover',
'Florida': 'Tallahassee',
'Georgia': 'Atlanta',
'Hawaii': 'Honolulu',
'Idaho': 'Boise',
'Illinois': 'Springfield',
'Indiana': 'Indianapolis',
'Iowa': 'Des Moines',
'Kansas': 'Topeka',
'Kentucky': 'Frankfort',
'Louisiana': 'Baton Rouge',
'Maine': 'Augusta',
'Maryland': 'Annapolis',
'Massachusetts': 'Boston',
'Michigan': 'Lansing',
'Minnesota': 'Saint Paul',
'Mississippi': 'Jackson',
'Missouri': 'Jefferson City',
'Montana': 'Helena',
'Nebraska': 'Lincoln',
'Nevada': 'Carson City',
'New Hampshire': 'Concord',
'New Jersey': 'Trenton',
'New Mexico': 'Santa Fe',
'New York': 'Albany',
'North Carolina': 'Raleigh',
'North Dakota': 'Bismarck',
'Ohio': 'Columbus',
'Oklahoma': 'Oklahoma City',
'Oregon': 'Salem',
'Pennsylvania': 'Harrisburg',
'Rhode Island': 'Providence',
'South Carolina': 'Columbia',
'South Dakota': 'Pierre',
'Tennessee': 'Nashville',
'Texas': 'Austin',
'Utah': 'Salt Lake City',
'Vermont': 'Montpelier',
'Virginia': 'Richmond',
'Washington': 'Olympia',
'West Virginia': 'Charleston',
'Wisconsin': 'Madison',
'Wyoming': 'Cheyenne'
}
# generates 35 quiz/answer key files (can be altered to any number)
for x in range(35):
quiz = open(f'quizzes/capitals_quiz{x + 1}.txt', 'w')
answer_key = open(f'quizzes/capitals_quiz_answers{x + 1}.txt', 'w')
quiz.write('Name:\n\nDate:\n\nPeriod\n\n')
quiz.write((' ' * 20) + f'State Capitals Quiz (Form {x + 1})\n\n')
states = list(capitals.keys())
random.shuffle(states)
# iterates over each state
for Q in range(50):
correct_answer = capitals[states[Q]]
wrong_answers = list(capitals.values())
wrong_answers.remove(capitals[states[Q]])
answer_options = random.sample(wrong_answers, 3)
answer_options += [correct_answer]
random.shuffle(answer_options)
quiz.write(f'{Q + 1}. What\'s the capital of {states[Q]}?\n')
# creates 4 possible choices
for i in range(4):
quiz.write(f'\t{"ABCD"[i]}.\t{answer_options[i]}\n')
quiz.write('\n')
answer_key.write(f'{Q + 1}.\t{"ABCD"[answer_options.index(correct_answer)]}\n')
quiz.close()
answer_key.close()
| 38.696629 | 87 | 0.485772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,673 | 0.485772 |
a8e92779935c13faae8293404567f0278d30ae7e | 138 | py | Python | scripts/reactor/autogen_ludiquest2.py | hsienjan/SideQuest-Server | 3e88debaf45615b759d999255908f99a15283695 | [
"MIT"
] | null | null | null | scripts/reactor/autogen_ludiquest2.py | hsienjan/SideQuest-Server | 3e88debaf45615b759d999255908f99a15283695 | [
"MIT"
] | null | null | null | scripts/reactor/autogen_ludiquest2.py | hsienjan/SideQuest-Server | 3e88debaf45615b759d999255908f99a15283695 | [
"MIT"
] | null | null | null | # ParentID: 2202002
# Character field ID when accessed: 220020000
# ObjectID: 1000016
# Object Position X: -228
# Object Position Y: -198
| 23 | 45 | 0.746377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.963768 |
a8e9720c8eef1700f8da8b99e8e23353e5960428 | 565 | py | Python | GN.py | Aashutosh-922/News-Notifier | fef0665f09f87feeaaa39533905c75ca67098559 | [
"MIT"
] | 1 | 2021-12-09T06:24:46.000Z | 2021-12-09T06:24:46.000Z | GN.py | Aashutosh-922/News-Notifier | fef0665f09f87feeaaa39533905c75ca67098559 | [
"MIT"
] | null | null | null | GN.py | Aashutosh-922/News-Notifier | fef0665f09f87feeaaa39533905c75ca67098559 | [
"MIT"
] | null | null | null | import feedparser
def parseRSS( rss_url ):
return feedparser.parse( rss_url )
def getHeadlines(rss_url):
headlines = []
feed = parseRSS(rss_url)
for newsitem in feed['items']:
headlines.append(newsitem['title'])
return headlines
allheadlines = []
newsurls = {
'googlenews': 'https://news.google.com/news/rss/?hl=en&ned=us&gl=US',
}
for key, url in newsurls.items():
allheadlines.extend(getHeadlines(url))
for hl in allheadlines:
print(hl) | 15.27027 | 82 | 0.59115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.155752 |
a8e9cca9c462f701ccbd95ce0e087cddfc60329e | 3,070 | py | Python | src/jarvis/jarvis/skills/collection/remember.py | jameswynn/Python-ai-assistant | 2acc2982350e8500e3fbd534b26bcdaa1c00a14b | [
"MIT"
] | 424 | 2020-04-19T06:01:00.000Z | 2022-03-31T10:54:03.000Z | src/jarvis/jarvis/skills/collection/remember.py | jameswynn/Python-ai-assistant | 2acc2982350e8500e3fbd534b26bcdaa1c00a14b | [
"MIT"
] | 40 | 2020-05-11T18:14:27.000Z | 2022-03-20T14:26:55.000Z | src/jarvis/jarvis/skills/collection/remember.py | mdanisurrahmanrony/Python-ai-assistant | d519731478e809ca085b67de4eaf1d59cd0a64f5 | [
"MIT"
] | 164 | 2020-04-15T11:46:39.000Z | 2022-03-31T14:28:20.000Z | # MIT License
# Copyright (c) 2019 Georgios Papachristou
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from jarvis.skills.skill import AssistantSkill
from jarvis.utils.mongoDB import db
from jarvis.utils import input
header = """
-----------------------------------------------------------------------------------------------
I would like to learn, tell me the right answer!
-----------------------------------------------------------------------------------------------
* Note: Create new skill! Write your question and the appropriate answer.
\n
"""
class RememberSkills(AssistantSkill):
@classmethod
def remember(cls, **kwargs):
cls.console(header)
continue_add = True
while continue_add:
cls.console(text='Question: ')
tags = cls.user_input()
cls.console(text='Suggested Response: ')
response = cls.user_input()
new_skill = {'name': 'learned_skill',
'enable': True,
'func': cls.tell_response.__name__,
'response': response,
'tags': tags,
},
cls.response('Add more? ', refresh_console=False)
continue_add = input.check_input_to_continue()
db.insert_many_documents(collection='learned_skills', documents=new_skill)
@classmethod
def tell_response(cls, **kwargs):
cls.response(kwargs.get('skill').get('response'))
@classmethod
def clear_learned_skills(cls, **kwargs):
if db.is_collection_empty(collection='learned_skills'):
cls.response("I can't find learned skills in my database")
else:
cls.response('I found learned skills..')
cls.response('Are you sure to remove learned skills? ', refresh_console=False)
user_answer = input.check_input_to_continue()
if user_answer:
db.drop_collection(collection='learned_skills')
cls.response("Perfect I have deleted them all")
| 41.486486 | 95 | 0.630945 | 1,503 | 0.489577 | 0 | 0 | 1,448 | 0.471661 | 0 | 0 | 1,722 | 0.560912 |
a8ea590dba805a42688e0ba0bfb3a1410eed7819 | 1,977 | py | Python | kubernetes/e2e_test/test_batch.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 2 | 2021-03-09T12:42:05.000Z | 2021-03-09T13:27:50.000Z | kubernetes/e2e_test/test_batch.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 8 | 2020-10-28T01:18:36.000Z | 2021-06-11T01:06:15.000Z | kubernetes/e2e_test/test_batch.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 1 | 2021-06-13T09:21:37.000Z | 2021-06-13T09:21:37.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import uuid
from kubernetes.client import api_client
from kubernetes.client.api import batch_v1_api
from kubernetes.e2e_test import base
class TestClientBatch(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = base.get_e2e_configuration()
def test_job_apis(self):
client = api_client.ApiClient(configuration=self.config)
api = batch_v1_api.BatchV1Api(client)
name = 'test-job-' + str(uuid.uuid4())
job_manifest = {
'kind': 'Job',
'spec': {
'template':
{'spec':
{'containers': [
{'image': 'busybox',
'name': name,
'command': ["sh", "-c", "sleep 5"]
}],
'restartPolicy': 'Never'},
'metadata': {'name': name}}},
'apiVersion': 'batch/v1',
'metadata': {'name': name}}
resp = api.create_namespaced_job(
body=job_manifest, namespace='default')
self.assertEqual(name, resp.metadata.name)
resp = api.read_namespaced_job(
name=name, namespace='default')
self.assertEqual(name, resp.metadata.name)
resp = api.delete_namespaced_job(
name=name, body={}, namespace='default') | 33.508475 | 75 | 0.587759 | 1,250 | 0.632271 | 0 | 0 | 87 | 0.044006 | 0 | 0 | 764 | 0.386444 |
a8ebb8c7ba04d8c8e2be86842c467f1340eb08b4 | 3,223 | py | Python | ZZZ/DES/match_bliss.py | ivmfnal/striped | eef1a4d544fa1b97fde39d7ee5ef779071218891 | [
"BSD-3-Clause"
] | 1 | 2019-07-01T15:19:43.000Z | 2019-07-01T15:19:43.000Z | ZZZ/DES/match_bliss.py | ivmfnal/striped | eef1a4d544fa1b97fde39d7ee5ef779071218891 | [
"BSD-3-Clause"
] | null | null | null | ZZZ/DES/match_bliss.py | ivmfnal/striped | eef1a4d544fa1b97fde39d7ee5ef779071218891 | [
"BSD-3-Clause"
] | 1 | 2020-04-21T21:18:01.000Z | 2020-04-21T21:18:01.000Z | from striped.common import Tracer
T = Tracer()
with T["run"]:
with T["imports"]:
from striped.job import SinglePointStripedSession as Session
import numpy as np
from numpy.lib.recfunctions import append_fields
import fitsio, healpy as hp
import sys, time
#job_server_address = ("dbwebdev.fnal.gov", 8765) #development
job_server_address = ("ifdb01.fnal.gov", 8765) #production
session = Session(job_server_address)
input_file = sys.argv[1]
input_filename = input_file.rsplit("/",1)[-1].rsplit(".",1)[-1]
with T["fits/read"]:
input_data = fitsio.read(input_file, ext=2, columns=["ALPHAWIN_J2000","DELTAWIN_J2000"])
with T["hpix"]:
hpix = hp.ang2pix(nside=16384,theta=input_data['ALPHAWIN_J2000'],phi=input_data['DELTAWIN_J2000'],
lonlat=True, nest=True)
hpix = np.asarray(hpix, np.float64)
input_data = append_fields(input_data, "HPIX", hpix)
np.sort(input_data, order="HPIX")
input_data = np.array(zip(input_data['ALPHAWIN_J2000'], input_data['DELTAWIN_J2000'], input_data['HPIX']))
matches = []
class Callback:
def on_streams_update(self, nevents, data):
if "matches" in data:
for m in data["matches"]:
matches.append(m)
for obs_i, cat_id, obs_ra, obs_dec, cat_ra, cat_dec in m:
print "Match: index: %10d RA: %9.4f Dec: %9.4f" % (int(obs_i), obs_ra, obs_dec)
print " COADD oject id: %10d %9.4f %9.4f" % (int(cat_id), cat_ra, cat_dec)
if "message" in data:
for msg in data["message"]:
print msg
def on_exception(self, wid, info):
print "Worker exception:\n--------------------"
print info
print "--------------------"
job = session.createJob("Y3A2",
user_callback = Callback(),
worker_class_file="bliss_match_worker.py",
user_params = {"observations":input_data})
with T["job"]:
job.run()
runtime = job.TFinish - job.TStart
catalog_objects = job.EventsProcessed
print "Compared %d observations against %d catalog objects, elapsed time=%f" % (len(input_data), catalog_objects, runtime)
if matches:
matches = np.concatenate(matches, axis=0)
matches = np.array(matches, dtype=[("INDEX", int),("COADD_OBJECT_ID", int)])
save_fn = input_filename + "_match.fits"
with T["fits/write"]:
fitsio.write(save_fn, matches, clobber=True)
print "Saved %d matches in %s" % (len(matches), save_fn)
else:
print "No matches"
T.printStats()
| 39.790123 | 131 | 0.501396 | 910 | 0.282346 | 0 | 0 | 0 | 0 | 0 | 0 | 647 | 0.200745 |
a8ebf8bbb141395df211d1beccebe31440e682f7 | 109 | py | Python | chk.py | benhur98/GazeUI_RH3 | 3e633474bcb78ab30897692fbcb75c8ad1f5c92e | [
"MIT"
] | null | null | null | chk.py | benhur98/GazeUI_RH3 | 3e633474bcb78ab30897692fbcb75c8ad1f5c92e | [
"MIT"
] | null | null | null | chk.py | benhur98/GazeUI_RH3 | 3e633474bcb78ab30897692fbcb75c8ad1f5c92e | [
"MIT"
] | null | null | null | import numpy as np
a=np.load("train-data-{}.npy".format(input()))
while 1:
print(a[int(input())][1])
| 21.8 | 47 | 0.605505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.174312 |
a8ec8c2dda24550165672ed485cf1f2b5ef00950 | 30 | py | Python | python/orcreader/__init__.py | nqbao/python-orc-reader | c4d6a06851b12a309f485ef208c0d84e80b22f8b | [
"BSD-3-Clause"
] | 15 | 2016-07-04T17:05:31.000Z | 2020-06-28T02:15:49.000Z | python/orcreader/__init__.py | nqbao/python-orc-reader | c4d6a06851b12a309f485ef208c0d84e80b22f8b | [
"BSD-3-Clause"
] | 3 | 2017-05-15T06:01:18.000Z | 2018-04-18T21:14:17.000Z | python/orcreader/__init__.py | nqbao/python-orc-reader | c4d6a06851b12a309f485ef208c0d84e80b22f8b | [
"BSD-3-Clause"
] | 6 | 2017-01-23T23:47:52.000Z | 2018-11-01T17:43:40.000Z | from .reader import OrcReader
| 15 | 29 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a8ec95c8cb86838f72fe414c4922ade690fedb2c | 4,509 | py | Python | materials-downloader.py | goDoCer/imperial-computing-materials-downloader | 7ab906d3b2720d1f7739c50908a367d4a6d3155e | [
"MIT"
] | 10 | 2020-11-02T12:27:16.000Z | 2020-12-23T05:31:03.000Z | materials-downloader.py | prnvbn/imperial-computing-materials-downloader | 7ab906d3b2720d1f7739c50908a367d4a6d3155e | [
"MIT"
] | null | null | null | materials-downloader.py | prnvbn/imperial-computing-materials-downloader | 7ab906d3b2720d1f7739c50908a367d4a6d3155e | [
"MIT"
] | null | null | null | import sys
import os
import json
import subprocess
import datetime as dt
sys.path.insert(1, './lib')
from config import *
from webhelpers import *
from argsparser import *
from getpass import getpass
from distutils.dir_util import remove_tree, copy_tree
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import WebDriverException
if __name__ == "__main__":
args = get_args()
exit = False
############################# NON SELENIUM FLAGS #############################
# open("lib/auth.json")
try:
open("auth.json")
except FileNotFoundError:
file = open("auth.json", "w+")
file.write('{"shortcode": "XXXXX", "password": "XXXXX", "directory": "XXXXX"}')
quit()
with open("lib/auth.json") as authfile:
auth = json.load(authfile)
if args.credentials:
[print(f"Your {key} is set as {auth[key]}")
for key in ["shortcode", "directory"]]
exit = True
if args.update_chromedriver:
subprocess.call(["sh", "./get_chromedriver.sh"])
exit = True
if s := args.shortcode:
auth["shortcode"] = s
print(f"Shortcode set to {s}")
exit = True
if args.password:
pswd = getpass('Password:')
auth["password"] = pswd
if pswd == "":
print("Password can not be empty")
print(f"Password has been set")
exit = True
if d := args.dir:
if os.path.isdir(d):
print(f"Directory set to {d}")
else:
print(f"{d} is not a valid directory!!!")
response = input(
f"Do you want to create directory {d}? (Y/n) ").lower()
if response == "y" or response == "or":
print(f"Made directory {d}")
os.mkdir(d)
else:
print(f"Please pass in a valid directory")
auth["directory"] = d
exit = True
with open("lib/auth.json", "wt") as authfile:
json.dump(auth, authfile)
if exit:
quit()
headless = not args.real
verbose = args.verbose
############################# CHROME WEBDRIVER OPTIONS #############################
chrome_options = Options()
if headless:
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-notifications")
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--verbose')
chrome_options.add_experimental_option("prefs", {
"download.default_directory": "./",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
})
options = webdriver.ChromeOptions()
if headless:
options.add_argument('headless')
try:
driver = webdriver.Chrome(
options=chrome_options, executable_path=CHROMEDRIVER_PATH)
except WebDriverException or FileNotFoundError:
print("There is something wrong with your chromedriver installation")
print(
f"Run 'sh get_chromedriver.sh' in {os.getcwd()} to get the latest version")
print("You can also run this command with the -u (--update-chromedriver) flag.")
quit()
driver.get(MATERIALS_URL)
print("authenticating...")
authenticate(driver)
############################# DOWNLOADING #############################
base_dir = "./downloads"
try:
os.makedirs(base_dir)
except Exception:
pass
if args.quick:
download_course(driver, args.quick, base_dir=base_dir, verbose=verbose)
else:
download_courses(driver, base_dir=base_dir, verbose=verbose)
driver.quit()
print("Finishing...")
############################# CLEAN UP #############################
for parent, dirnames, filenames in os.walk(base_dir):
for fn in filenames:
if fn.lower().endswith('.crdownload'):
os.remove(os.path.join(parent, fn))
# Moving the dowloads to the specified directory
save_dir = DIRECTORY
if args.location is not None:
save_dir = args.location
copy_tree(base_dir, save_dir)
remove_tree(base_dir)
print("DONE!!!")
| 30.466216 | 88 | 0.569306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,420 | 0.314926 |
a8ed6afe97f49d0c92eeaeda111ff0c3ba602a10 | 16,551 | py | Python | PoliCmm/src/parser.py | jutge-org/cpp2many | 2d2fb1784f2515b3c1a1056e163640e556331766 | [
"MIT"
] | 4 | 2018-04-06T00:18:20.000Z | 2021-10-11T20:25:38.000Z | PoliCmm/src/parser.py | jutge-org/cpp2many | 2d2fb1784f2515b3c1a1056e163640e556331766 | [
"MIT"
] | 1 | 2019-02-27T17:04:43.000Z | 2019-02-28T08:25:12.000Z | PoliCmm/src/parser.py | jutge-org/cpp2many | 2d2fb1784f2515b3c1a1056e163640e556331766 | [
"MIT"
] | 4 | 2019-02-27T17:05:17.000Z | 2021-03-12T10:36:04.000Z | import ply.lex as lex
import ply.yacc as yacc
import lexer
import sys
import ast
tokens = lexer.tokens
precedence = (
('right', 'ELSE'),
)
def p_start (t):
'''start : program'''
t[0] = t[1]
def p_program_01 (t):
'''program : program_part'''
t[0] = ast.Program(t[1])
def p_program_02 (t):
'''program : program program_part'''
t[1].add(t[2])
t[0] = t[1]
def p_program_part (t):
'''program_part : include_directive
| typedef
| structdef
| using_directive
| function_definition
| declaration_statement
| comment
'''
t[0] = t[1]
def p_typedef_01 (t):
'''typedef : typedef_body SEMI'''
t[0] = t[1]
def p_typedef_body (t):
'''typedef_body : TYPEDEF type IDENTIFIER'''
lexer.typedefs[t[3]] = 'TYPEID'
t[0] = ast.TypeDef(t[2], t[3])
def p_structdef (t):
'''structdef : struct_name LBRA struct_elem_list RBRA SEMI'''
t[3].id = t[1]
t[0] = t[3]
def p_struct_name (t):
'''struct_name : STRUCT IDENTIFIER'''
print "Added typeid " + t[2]
lexer.typedefs[t[2]] = 'TYPEID'
t[0] = t[2]
def p_struct_elem_list_01 (t):
'''struct_elem_list : declaration_statement'''
t[0] = ast.StructDef(t[1])
def p_struct_elem_list_02 (t):
'''struct_elem_list : struct_elem_list declaration_statement'''
t[1].add(t[2])
t[0] = t[1]
def p_struct_elem (t):
'''struct_elem : type identifier_list SEMI'''
for c in t[2].children:
c.type = t[1]
t[0] = t[2]
def p_identifier_list_01 (t):
'''identifier_list : IDENTIFIER'''
t[0] = ast.VariableDeclarationStatement(ast.VariableDeclaration(t[1]))
def p_identifier_list_02 (t):
'''identifier_list : identifier_list COMMA IDENTIFIER'''
t[1].add(ast.VariableDeclaration(t[3]))
t[0] = t[1]
def p_comment_01 (t):
'''comment : LINECOM'''
t[0] = ast.LineComment(t[1])
def p_comment_02 (t):
'''comment : BLOCKCOM'''
t[0] = ast.BlockComment(t[1])
def p_include_directive_01 (t):
'''include_directive : INCLUDE LT IDENTIFIER GT
| INCLUDE LT STRING GT
| INCLUDE LT VECTOR GT'''
t[0] = ast.Include(t[3])
def p_include_directive_02 (t):
'''include_directive : INCLUDE STRING_LIT'''
t[0] = ast.Include(t[2])
def p_using_directive (t):
'''using_directive : USING NAMESPACE IDENTIFIER SEMI'''
t[0] = ast.UsingNamespace(t[3])
def p_function_definition_01 (t):
'''function_definition : type IDENTIFIER LPAR RPAR block'''
t[0] = ast.Function(t[2], t[1], ast.FormalParametersList(), t[5])
def p_function_definition_02 (t):
'''function_definition : type IDENTIFIER LPAR formal_parameters_list RPAR block'''
t[0] = ast.Function(t[2], t[1], t[4], t[6])
def p_empty (t):
'''empty :'''
pass
def p_formal_parameters_list_01 (t):
'''formal_parameters_list : formal_parameter'''
t[0] = ast.FormalParametersList(t[1])
def p_formal_parameters_list_02 (t):
'''formal_parameters_list : formal_parameters_list COMMA formal_parameter'''
t[1].add(t[3])
t[0] = t[1]
def p_formal_parameter_01 (t):
'''formal_parameter : type IDENTIFIER'''
t[0] = ast.FormalParameter(t[2], t[1])
t[0].is_ref = False
def p_formal_parameter_02 (t):
'''formal_parameter : type AND IDENTIFIER'''
t[0] = ast.FormalParameter(t[3], t[1])
t[0].is_ref = True
t[0].type.is_reference = True
def p_statement_list_01 (t):
'''statement_list : statement'''
t[1].isStatement = True
t[0] = ast.CompoundStatement(t[1])
def p_statement_list_02 (t):
'''statement_list : statement_list statement'''
t[2].isStatement = True
t[1].add(t[2])
t[0] = t[1]
def p_statement (t):
'''statement : declaration_statement
| cout_statement
| cin_statement
| while_statement
| for_statement
| if_statement
| assignment_statement
| return_statement
| block
| comment
| empty_statement
'''
# | while_statement_cin
t[0] = t[1]
def p_empty_statement (t):
'''empty_statement : '''
t[0] = ast.NullNode()
def p_block (t):
'''block : LBRA statement_list RBRA'''
t[0] = t[2]
def p_cout_statement_01 (t):
'''cout_statement : COUT cout_elements_list SEMI'''
t[0] = t[2]
def p_cout_statement_02 (t):
'''cout_statement : CERR cout_elements_list SEMI'''
t[0] = t[2]
def p_cout_statement_03 (t):
'''cout_statement : COUT DOT IDENTIFIER LPAR actual_parameters_list RPAR SEMI'''
t[0] = ast.CoutModifier(t[3], t[5])
def p_cout_statement_04 (t):
'''cout_statement : CERR DOT IDENTIFIER LPAR actual_parameters_list RPAR SEMI'''
t[0] = ast.CoutModifier(t[3], t[5])
def p_cout_elements_list_01 (t):
'''cout_elements_list : LPUT cout_element'''
t[0] = ast.CoutStatement(t[2])
def p_cout_elements_list_02 (t):
'''cout_elements_list : cout_elements_list LPUT cout_element'''
t[1].add(t[3])
t[0] = t[1]
def p_cout_element_01 (t):
'''cout_element : ENDL'''
t[0] = ast.CoutBreakLine();
def p_cout_element_02 (t):
'''cout_element : lor_expression'''
t[0] = ast.CoutElement(t[1])
def p_cin_bloc (t):
'''cin_bloc : CIN cin_elements_list'''
t[0] = t[2]
t[0].is_expression = True
def p_cin_statement (t):
'''cin_statement : CIN cin_elements_list SEMI'''
t[0] = t[2]
t[0].is_expression = False
def p_cin_elements_list_01 (t):
'''cin_elements_list : RPUT reference_expression'''
t[0] = ast.CinStatement(t[2])
def p_cin_elements_list_02 (t):
'''cin_elements_list : cin_elements_list RPUT reference_expression'''
t[1].add(t[3])
t[0] = t[1]
def p_literal_01 (t):
'''literal : INTEGER_LIT'''
t[0]=ast.IntLiteral(t[1])
def p_literal_02 (t):
'''literal : REAL_LIT'''
t[0]=ast.FloatLiteral(t[1])
def p_literal_03 (t):
'''literal : TRUE
| FALSE'''
t[0]=ast.BoolLiteral(t[1])
def p_literal_04 (t):
'''literal : STRING_LIT'''
t[0]=ast.StringLiteral(t[1])
def p_literal_05 (t):
'''literal : CHAR_LIT'''
t[0]=ast.CharLiteral(t[1])
def p_factor_01 (t):
'''factor : literal'''
t[0] = t[1]
def p_factor_02 (t):
'''factor : reference_expression'''
t[0] = t[1]
def p_factor_03(t):
'''factor : LPAR assignment_expression RPAR'''
t[0] = ast.Parenthesis(t[2])
def p_factor_04 (t):
'''factor : IDENTIFIER LPAR actual_parameters_list RPAR'''
t[0] = ast.FunctionCall(t[1], t[3])
def p_factor_05 (t):
'''factor : IDENTIFIER COLONCOLON assignment_expression'''
t[0] = t[3]
def p_factor_06 (t):
'''factor : reference_expression DOT IDENTIFIER LPAR actual_parameters_list RPAR'''
t[0] = ast.FunctionCall(t[3], t[5], t[1])
def p_factor_07 (t):
'''factor : type LPAR actual_parameters_list RPAR'''
t[0] = ast.Constructor(t[1], t[3])
def p_factor_08 (t):
'''factor : LPAR type RPAR assignment_expression'''
t[0] = ast.CastExpression(t[2], t[4])
def p_reference_expression_01 (t):
'''reference_expression : IDENTIFIER'''
t[0] = ast.Identifier(t[1])
def p_reference_expression_02 (t):
'''reference_expression : reference_expression LCOR relational_expression RCOR'''
t[0] = ast.Reference(t[1], t[3])
def p_reference_expression_03 (t):
'''reference_expression : reference_expression DOT IDENTIFIER'''
t[0] = ast.StructReference(t[1], t[3])
def p_unary_expression_01(t):
'''unary_expression : unary_operator factor
| PLUSPLUS unary_expression
| MINUSMINUS unary_expression
'''
t[0]=ast.UnaryOp(t[1],t[2])
t[0].pre = True
def p_unary_expression_02(t):
'''unary_expression : unary_expression PLUSPLUS
| unary_expression MINUSMINUS
'''
t[0]=ast.UnaryOp(t[2],t[1])
t[0].pre = False
def p_unary_expression_03(t):
'''unary_expression : factor
'''
t[0]=t[1]
# me faltara tema ++
def p_cast_expression_01(t):
'''
cast_expression : unary_expression
'''
t[0]=t[1]
def p_cast_expression_02(t):
'''
cast_expression : type LPAR lor_expression RPAR
'''
t[0]=ast.CastExpression(t[1],t[3])
def p_multiplicative_expression_01(t):
'''
multiplicative_expression : unary_expression
'''
t[0]=t[1]
def p_multiplicative_expression_02(t):
'''
multiplicative_expression : multiplicative_expression multiplicative_operator unary_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3]);
def p_additive_expression_01(t):
'''
additive_expression : multiplicative_expression
'''
t[0]=t[1]
def p_additive_expression_02(t):
'''
additive_expression : additive_expression additive_operator multiplicative_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
#def p_shift_expression_01(t):
#'''
#shift_expression : additive_expression
#'''
#t[0]=t[1]
#def p_shift_expression_02(t):
#'''
#shift_expression : shift_expression shift_operator additive_expression
#'''
#t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_relational_expression_01(t):
'''
relational_expression : additive_expression
'''
t[0]=t[1]
def p_relational_expression_02(t):
'''
relational_expression : relational_expression relational_operator additive_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_equality_expression_01(t):
'''
equality_expression : relational_expression
'''
t[0]=t[1]
def p_equality_expression_02(t):
'''
equality_expression : equality_expression equality_operator relational_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_and_expression_01(t):
'''
and_expression : equality_expression
'''
t[0]=t[1]
def p_and_expression_02(t):
'''
and_expression : and_expression AND equality_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_xor_expression_01(t):
'''
xor_expression : and_expression
'''
t[0]=t[1]
def p_xor_expression_02(t):
'''
xor_expression : xor_expression XOR and_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_or_expression_01(t):
'''
or_expression : xor_expression
| cin_bloc
'''
t[0]=t[1]
def p_or_expression_02(t):
'''
or_expression : or_expression OR xor_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_land_expression_01(t):
'''
land_expression : or_expression
'''
t[0]=t[1]
def p_land_expression_02(t):
'''
land_expression : land_expression LAND or_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_lor_expression_01(t):
'''
lor_expression : land_expression
'''
t[0]=t[1]
def p_lor_expression_02(t):
'''
lor_expression : lor_expression LOR land_expression
'''
t[0]=ast.BinaryOp(t[1],t[2],t[3])
def p_assignment_expression_01(t):
'''
assignment_expression : lor_expression
'''
t[0]=t[1]
def p_assignment_expression_02(t): # a=b=3
'''
assignment_expression : reference_expression assignment_operator assignment_expression
'''
t[0]=ast.AssignmentStatement(t[1],t[2],t[3]) # ojo q se puede liar una buena asignandoCONTROLAR
def p_declaration_statement_01(t):
'''
declaration_statement : type declaration_list SEMI
'''
# para cada elemento de la declarator list crear un nodo declaracion
for c in t[2].children:
c.type=t[1]
t[0]=t[2]
#def p_declaration_statement_02(t):
#'''
#declaration_statement : declaration_statement_init
#'''
## para cada elemento de la declarator list crear un nodo declaracion
#t[0]=t[1]
#def p_declaration_statement_init(t):
#'''
#declaration_statement_init : type declaration_list EQUALS initializer SEMI
#'''
## para cada elemento de la declarator list crear un nodo declaracion
#for c in t[2].children:
#c.type=t[1]
#c.init=t[4]
#t[0]=t[2]
#def p_declaration_statement_03(t):
# '''
# declaration_statement : struct ID LBRA RBRA
# '''
def p_declaration_list_01(t):
'''
declaration_list : declaration_list COMMA declaration
'''
t[1].add(t[3])
t[0]=t[1]
def p_declaration_list_02(t):
'''
declaration_list : declaration
'''
t[0]=ast.VariableDeclarationStatement(t[1])
def p_declaration_01(t):
'''
declaration : IDENTIFIER
'''
t[0]=ast.VariableDeclaration(t[1])
def p_declaration_02(t):
'''
declaration : IDENTIFIER EQUALS initializer
'''
t[0]=ast.VariableDeclaration(t[1])
t[0].init = t[3]
def p_declaration_03(t):
'''
declaration : IDENTIFIER LPAR actual_parameters_list RPAR
'''
t[0]=ast.VariableDeclaration(t[1])
t[0].params = t[3]
def p_declaration_04(t):
'''
declaration : IDENTIFIER LPAR RPAR
'''
t[0]=ast.VariableDeclaration(t[1])
t[0].cons = ast.ActualParametersList()
def p_initializer(t): # ampliable con vectores
'''
initializer : lor_expression
'''
t[0]=t[1]
def p_assignment_statement(t):
'''
assignment_statement : assignment_expression SEMI
'''
t[0]=t[1]
def p_type_01 (t):
'''type : TYPEID'''
t[0] = ast.CustomType(t[1])
def p_type_02 (t):
'''type : VOID
| INT
| FLOAT
| DOUBLE
| CHAR
| BOOL
| STRING'''
t[0] = ast.Type(t[1])
def p_type_03 (t): #PRODUCE AMBIGUEDAD
'''type : CONST type'''
t[0] = t[2]
t[0].constant = True
def p_type_04 (t):
'''type : VECTOR LT type GT'''
t[0] = ast.VectorType(t[1], t[3])
def p_unary_operator(t):
'''
unary_operator : MINUS
| LNOT
'''
t[0]=t[1]
def p_multiplicative_operator(t):
'''
multiplicative_operator : MULT
| DIV
| MOD
'''
t[0]=t[1]
def p_additive_operator(t):
'''
additive_operator : PLUS
| MINUS
'''
t[0]=t[1]
def p_shift_operator(t):
'''
shift_operator : RPUT
| LPUT
'''
t[0]=t[1]
def p_relational_operator(t):
'''
relational_operator : GT
| LT
| LE
| GE
'''
t[0]=t[1]
def p_equality_operator(t):
'''
equality_operator : EQ
| NE
'''
t[0]=t[1]
def p_assignment_operator(t):
'''
assignment_operator : EQUALS
| MULTEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| ANDEQUAL
| OREQUAL
| XOREQUAL
| RIGHTSHIFTEQUAL
| LEFTSHIFTEQUAL
'''
t[0]=t[1]
def p_while_statement_01 (t):
'''while_statement : WHILE LPAR lor_expression RPAR statement'''
t[0] = ast.WhileStatement(t[3], t[5])
t[5].isStatement = True
def p_while_statement_02 (t):
'''while_statement : WHILE LPAR lor_expression RPAR SEMI'''
t[0] = ast.WhileStatement(t[3], ast.NullNode())
#def p_while_statement_cin (t):
#'''while_statement_cin : WHILE LPAR cin_bloc RPAR statement'''
#t[0] = ast.WhileStatementCin(t[3], t[5])
def p_for_statement (t):
'''for_statement : FOR LPAR assignment_statement assignment_statement assignment_expression RPAR statement'''
t[0] = ast.ForStatement(t[3], t[4], t[5], t[7])
t[7].isStatement = True
def p_for_statement_init (t):
'''for_statement : FOR LPAR declaration_statement assignment_statement assignment_expression RPAR statement'''
t[0] = ast.ForStatementInit(t[3], t[4], t[5], t[7])
t[7].isStatement = True
def p_if_statement_01 (t):
'''if_statement : IF LPAR assignment_expression RPAR statement'''
t[0] = ast.IfStatement(t[3], t[5])
t[5].isStatement = True
def p_if_statement_02(t):
'''if_statement : IF LPAR assignment_expression RPAR statement ELSE statement'''
t[0] = ast.IfStatement(t[3], t[5], t[7])
t[5].isStatement = True
t[7].isStatement = True
def p_return_statement_01 (t):
'''return_statement : RETURN assignment_statement'''
t[0] = ast.ReturnStatement(t[2])
def p_return_statement_02 (t):
'''return_statement : RETURN SEMI'''
t[0] = ast.ReturnStatement(None)
def p_actual_parameters_list_01 (t):
'''actual_parameters_list : empty'''
t[0] = ast.ActualParametersList()
def p_actual_parameters_list_02 (t):
'''actual_parameters_list : actual_parameter'''
t[0] = ast.ActualParametersList(t[1])
def p_actual_parameters_list_03 (t):
'''actual_parameters_list : actual_parameters_list COMMA actual_parameter'''
t[1].add(t[3])
t[0] = t[1]
def p_actual_parameter (t):
'''actual_parameter : assignment_expression'''
t[0] = t[1]
def p_error (t):
print 'Syntax error around line %d in token %s.' % (t.lineno, t.type)
yacc.errok()
#raise Exception('Syntax error around line %d in token %s.' % (t.lineno, t.type))
# Build the parser
parser = yacc.yacc()
| 21.921854 | 114 | 0.638995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,996 | 0.483113 |
a8eda53e827a5f782990a22fc39cd52fd4859e3a | 6,530 | py | Python | src/models/backbones/resnet.py | DIVA-DIA/DIVA-DAF | 0ae3b873d04f1852d9053cb4cb2fbc7bda73471c | [
"MIT"
] | 3 | 2022-02-10T17:35:41.000Z | 2022-03-04T10:38:58.000Z | src/models/backbones/resnet.py | DIVA-DIA/DIVA-DAF | 0ae3b873d04f1852d9053cb4cb2fbc7bda73471c | [
"MIT"
] | 3 | 2022-02-02T09:12:18.000Z | 2022-02-16T13:42:30.000Z | src/models/backbones/resnet.py | DIVA-DIA/DIVA-DAF | 0ae3b873d04f1852d9053cb4cb2fbc7bda73471c | [
"MIT"
] | null | null | null | """
Model definition adapted from: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
import math
from typing import Optional, List, Union, Type
import torch.nn as nn
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=(3, 3), stride=stride,
padding=1, bias=False)
class _BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation: int = 1):
super(_BasicBlock, self).__init__()
if dilation > 1:
raise NotImplementedError("Dilation > 1 not implemented in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class _Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation: int = 1):
super(_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=(3, 3), stride=stride,
padding=(1, 1), bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=(1, 1), bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block: Type[Union[_BasicBlock, _Bottleneck]], layers: List[int],
replace_stride_with_dilation: Optional[List[bool]] = None, **kwargs):
super(ResNet, self).__init__()
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
f"replace_stride_with_dilation should be None or a 3-tuple, got {replace_stride_with_dilation}")
self.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3),
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block: Type[Union[_BasicBlock, _Bottleneck]], planes: int, blocks: int, stride: int = 1,
dilate: bool = False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=(1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(inplanes=self.inplanes, planes=planes, stride=stride, downsample=downsample,
dilation=previous_dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResNet18(ResNet):
def __init__(self, **kwargs):
super(ResNet18, self).__init__(_BasicBlock, [2, 2, 2, 2], **kwargs)
class ResNet34(ResNet):
def __init__(self, **kwargs):
super(ResNet34, self).__init__(_BasicBlock, [3, 4, 6, 3], **kwargs)
class ResNet50(ResNet):
def __init__(self, **kwargs):
super(ResNet50, self).__init__(_Bottleneck, [3, 4, 6, 3], **kwargs)
class ResNet101(ResNet):
def __init__(self, **kwargs):
super(ResNet101, self).__init__(_Bottleneck, [3, 4, 23, 3], **kwargs)
class ResNet152(ResNet):
def __init__(self, **kwargs):
super(ResNet152, self).__init__(_Bottleneck, [3, 8, 36, 3], **kwargs)
| 34.550265 | 114 | 0.606126 | 5,693 | 0.871822 | 0 | 0 | 0 | 0 | 0 | 0 | 635 | 0.097243 |
a8f137a79f39517bbee4123c05c0f4f0f49019c3 | 5,652 | py | Python | feature_extraction.py | Tina-Rezaei/malware-detection-based-on-pe-header | f897f5b3e9ac8158ee4d7bf6a002cd7f1498f8f7 | [
"MIT"
] | 1 | 2020-11-15T18:43:07.000Z | 2020-11-15T18:43:07.000Z | feature_extraction.py | Tina-Rezaei/malware-detection-based-on-pe-header | f897f5b3e9ac8158ee4d7bf6a002cd7f1498f8f7 | [
"MIT"
] | null | null | null | feature_extraction.py | Tina-Rezaei/malware-detection-based-on-pe-header | f897f5b3e9ac8158ee4d7bf6a002cd7f1498f8f7 | [
"MIT"
] | null | null | null | import os
import pefile
import time
import re
import click
import subprocess
data_directory_list = ['DIRECTORY_ENTRY_DEBUG', 'DIRECTORY_ENTRY_EXPORT', 'DIRECTORY_ENTRY_LOAD_CONFIG',
'DIRECTORY_ENTRY_RESOURCE', 'DIRECTORY_ENTRY_BASERELOC', 'DIRECTORY_ENTRY_TLS']
normal_section_names = ['.text', '.rdata', '.data', '.pdata', '.rsrc', '.idata', '.bss', '.code', '.edata']
def entropy(name, path):
entropy_list = []
entropy = subprocess.check_output("ent '{}' | head -n 1 | cut -d' ' -f 3".format((path + name)),
shell=True).decode('utf8')
entropy_list.append(entropy[0:-1])
pe = pefile.PE(path + name)
text_flag = False
data_flag = False
for section in pe.sections:
try:
section_name = (section.Name).decode('utf-8')
section_name = section_name.replace('\x00','')
if section_name =='.text':
text_entropy = section.get_entropy()
text_flag = True
elif section_name =='.data':
data_entropy = section.get_entropy()
data_flag = True
except:
continue
entropy_list.append(text_entropy if text_flag else -1)
entropy_list.append(data_entropy if data_flag else -1)
return entropy_list
def section_name_checker(section_names):
"""
:param section_names:
an array of section names of a program
:return:
a 1*2d array that indicate number of nonsuspicious sections and number of suspicious sections,respectively
"""
number_of_suspicious_names = 0
number_of_nonsuspicious_names = 0
for name in section_names:
if name in normal_section_names:
number_of_nonsuspicious_names += 1
else:
number_of_suspicious_names += 1
return number_of_suspicious_names,number_of_nonsuspicious_names
def empty_section_name_checker(section_names):
#---- normalize names --------
for i in range(len(section_names)):
section_names[i] = re.sub(' +', ' ',section_names[i])
if '' in section_names or ' ' in section_names:
# print(file_name)
return 0
else:
return 1
def data_directory_checker(pe,data_directory_name):
try:
if getattr(pe,data_directory_name):
return 1
else:
return 0
except:
return 0
@click.command()
@click.option('--path', required=True, help='path of samples')
@click.option('--outputfile', default='features.txt', help='output file name for storing extracted features')
def feature_extractor(path,outputfile):
start_time = time.time()
samples = os.listdir(path)
features_outputfile = open(outputfile,'w')
for sample in samples:
try:
pe = pefile.PE(path + sample)
# ----------------- Data Directories --------------------
temp = ''
for data_directory in data_directory_list:
temp += str(data_directory_checker(pe, data_directory))
features_outputfile.write('{},'.format(int(temp,2)))
print(int(temp,2))
# ---------------------- file_info -----------------------
count = 0
try:
for entry in pe.FileInfo:
if entry[0].Key == b'StringFileInfo':
entry = entry[0]
for st in entry.StringTable:
for entry in (st.entries.items()):
count += 1
if entry[1].Key == b'StringFileInfo':
entry = entry[1]
for st in entry.StringTable:
for entry in (st.entries.items()):
count += 1
features_outputfile.write('{},'.format(count))
except:
features_outputfile.write('{},'.format(count))
print(count)
# ---------------------- checksum ------------------------
try:
checksum = pe.OPTIONAL_HEADER.CheckSum
features_outputfile.write('0,'.format(sample)) if checksum == 0 else features_outputfile.write(
'1,'.format(sample))
except:
features_outputfile.write('0,'.format(sample))
# ------------------------- entropy ---------------------------
entropies = entropy(sample, path)
for entro in entropies:
features_outputfile.write('{},'.format(entro))
print(entropies)
# ----------------------- section names -----------------------
section_names = []
try:
sections = pe.sections
for section in sections:
name = (section.Name).decode('utf-8')
name = name.replace('\x00', '')
section_names.append(name)
except:
continue
section_name_features = section_name_checker(section_names)
features_outputfile.write('{},{},'.format(section_name_features[0], section_name_features[1]))
empty_section_names = empty_section_name_checker(section_names)
features_outputfile.write('{},{}\n'.format(empty_section_names, sample))
print(section_name_features)
print(empty_section_names)
except:
print('{} is not a pe file'.format(sample))
end_time = time.time()
print('feature extraction time: {}s'.format(end_time - start_time))
if __name__ == '__main__':
feature_extractor()
| 36.701299 | 111 | 0.548478 | 0 | 0 | 0 | 0 | 3,205 | 0.567056 | 0 | 0 | 1,119 | 0.197983 |
a8f190e009aea7f92f01038fb21a86895c98af57 | 18,148 | py | Python | research/steve/toy_demo.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | 1 | 2021-05-17T01:42:29.000Z | 2021-05-17T01:42:29.000Z | research/steve/toy_demo.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | research/steve/toy_demo.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
### Hyperparameters
NONTERMINAL_STATE_COUNT = 100
NOISE_AMOUNT = 0.1
TRAIN_STEPS = 10000
Q_ENSEMBLE_SIZE = 8
MODEL_ENSEMBLE_SIZE = 8
HORIZON = 5
TRIAL_N = 10
### Helper functions
initial_state = 0
terminal_state = NONTERMINAL_STATE_COUNT + 1
nonterminal_state_count = NONTERMINAL_STATE_COUNT
state_count = NONTERMINAL_STATE_COUNT + 1
final_reward = NONTERMINAL_STATE_COUNT
colors = sns.color_palette('husl', 4)
plt.rcParams["figure.figsize"] = (6, 5)
def step(state):
if state == terminal_state:
next_state = terminal_state
else:
next_state = state + 1
if state == terminal_state:
reward = 0
elif state + 1 == terminal_state:
reward = final_reward
else:
reward = -1
return next_state, reward
def noisy_step(state):
if state == terminal_state:
next_state = terminal_state
elif np.random.random([]) < NOISE_AMOUNT:
next_state = np.random.randint(0, state_count)
else:
next_state = state + 1
if state == terminal_state:
reward = 0
elif state + 1 == terminal_state:
reward = final_reward
else:
reward = -1
return next_state, reward
def get_error(Q):
losses = np.square(np.arange(state_count) - Q[:-1])
return np.mean(losses)
def downsample(array, factor):
pad_size = np.ceil(old_div(float(array.size), factor)) * factor - array.size
array_padded = np.append(array, np.zeros([pad_size.astype(np.int64)]) * np.NaN)
return scipy.nanmean(array_padded.reshape(-1, factor), axis=1)
######################
### Main experiments
######################
# Basic Q
if True:
print("Running basic Q-learning.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [state_count + 1]).astype(np.float64)
Q[state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
Q[state] = reward + Q[next_state]
losses.append(get_error(Q))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="Basic Q-learning", color=colors[0])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[0])
with open('Toy-v1/baseline.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# Ensemble Q
if True:
print("Running ensemble Q-learning.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
Q[q_ensemble_i, state] = reward + np.mean(Q[:, next_state])
losses.append(get_error(np.mean(Q, axis=0)))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="Ensemble Q-learning", color=colors[1])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[1])
# Ensemble MVE-Oracle
if True:
print("Running ensemble oracle MVE.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# MVE rollout
target = reward
for _ in range(HORIZON):
next_state, reward = step(next_state)
target += reward
target += np.mean(Q[:, next_state])
Q[q_ensemble_i, state] = target
losses.append(get_error(np.mean(Q, axis=0)))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="MVE-oracle", color=colors[2])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[2])
with open('Toy-v1/mve_oracle.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# Ensemble MVE-Noisy
if True:
print("Running ensemble noisy MVE.")
trial_results = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
for step_i in range(TRAIN_STEPS):
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# MVE rollout
targets = []
first_next_state, first_reward = next_state, reward
for model_ensemble_i in range(MODEL_ENSEMBLE_SIZE):
next_state, reward = first_next_state, first_reward
target = reward
for _ in range(HORIZON):
next_state, reward = noisy_step(next_state)
target += reward
target += np.mean(Q[:, next_state])
targets.append(target)
Q[q_ensemble_i, state] = np.mean(targets)
losses.append(get_error(np.mean(Q, axis=0)))
trial_results.append(losses)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="MVE-noisy", color=colors[2], linestyle='dotted')
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[2])
with open('Toy-v1/mve_noisy.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# STEVE-Oracle
if True:
print("Running ensemble oracle STEVE.")
trial_results = []
oracle_q_estimate_errors = []
oracle_mve_estimate_errors = []
oracle_steve_estimate_errors = []
oracle_opt_estimate_errors = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
q_estimate_errors = []
mve_estimate_errors = []
steve_estimate_errors = []
opt_estimate_errors = []
steve_beat_freq = []
for step_i in range(TRAIN_STEPS):
_q_estimate_errors = []
_mve_estimate_errors = []
_steve_estimate_errors = []
_opt_estimate_errors = []
_steve_beat_freq = []
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# STEVE rollout
Q_est_mat = np.zeros([HORIZON + 1, Q_ENSEMBLE_SIZE])
reward_est_mat = np.zeros([HORIZON + 1, 1])
first_next_state, first_reward = next_state, reward
next_state, reward = first_next_state, first_reward
Q_est_mat[0, :] = Q[:, next_state]
reward_est_mat[0, 0] = reward
for timestep_i in range(1, HORIZON + 1):
next_state, reward = step(next_state)
Q_est_mat[timestep_i, :] = Q[:, next_state]
reward_est_mat[timestep_i, 0] = reward
all_targets = Q_est_mat + np.cumsum(reward_est_mat, axis=0)
# STEVE weight calculation
estimates = np.mean(all_targets, axis=1)
confidences = old_div(1., (np.var(all_targets, axis=1) + 1e-8))
coefficients = old_div(confidences, np.sum(confidences))
target = np.sum(estimates * coefficients)
Q[q_ensemble_i, state] = target
true_target = state + 1. if state != terminal_state else 0.
_q_estimate_errors.append(np.square(estimates[0] - true_target))
_mve_estimate_errors.append(np.square(estimates[-1] - true_target))
_steve_estimate_errors.append(np.square(np.sum(estimates * coefficients) - true_target))
_opt_estimate_errors.append(np.min(np.square(estimates - true_target)))
losses.append(get_error(np.mean(Q, axis=0)))
q_estimate_errors.append(np.mean(_q_estimate_errors))
mve_estimate_errors.append(np.mean(_mve_estimate_errors))
steve_estimate_errors.append(np.mean(_steve_estimate_errors))
opt_estimate_errors.append(np.mean(_opt_estimate_errors))
trial_results.append(losses)
oracle_q_estimate_errors.append(q_estimate_errors)
oracle_mve_estimate_errors.append(mve_estimate_errors)
oracle_steve_estimate_errors.append(steve_estimate_errors)
oracle_opt_estimate_errors.append(opt_estimate_errors)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="STEVE-oracle", color=colors[3])
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[3])
with open('Toy-v1/steve_oracle.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# STEVE-Noisy
if True:
print("Running ensemble noisy STEVE.")
trial_results = []
noisy_q_estimate_errors = []
noisy_mve_estimate_errors = []
noisy_steve_estimate_errors = []
noisy_opt_estimate_errors = []
noisy_steve_beat_freq = []
for run_i in range(TRIAL_N):
print("Trial %d" % run_i)
Q = np.random.randint(0, state_count, [Q_ENSEMBLE_SIZE, state_count + 1]).astype(np.float64)
Q[:, state_count] = 0
losses = []
q_estimate_errors = []
mve_estimate_errors = []
steve_estimate_errors = []
opt_estimate_errors = []
steve_beat_freq = []
for step_i in range(TRAIN_STEPS):
_q_estimate_errors = []
_mve_estimate_errors = []
_steve_estimate_errors = []
_opt_estimate_errors = []
_steve_beat_freq = []
for q_ensemble_i in range(Q_ENSEMBLE_SIZE):
state = np.random.randint(0, state_count)
next_state, reward = step(state)
# STEVE rollout
Q_est_mat = np.zeros([HORIZON + 1, MODEL_ENSEMBLE_SIZE, Q_ENSEMBLE_SIZE])
reward_est_mat = np.zeros([HORIZON + 1, MODEL_ENSEMBLE_SIZE, 1])
first_next_state, first_reward = next_state, reward
for model_ensemble_i in range(MODEL_ENSEMBLE_SIZE):
next_state, reward = first_next_state, first_reward
Q_est_mat[0, model_ensemble_i, :] = Q[:, next_state]
reward_est_mat[0, model_ensemble_i, 0] = reward
for timestep_i in range(1, HORIZON + 1):
next_state, reward = noisy_step(next_state)
Q_est_mat[timestep_i, model_ensemble_i, :] = Q[:, next_state]
reward_est_mat[timestep_i, model_ensemble_i, 0] = reward
all_targets = Q_est_mat + np.cumsum(reward_est_mat, axis=0)
# STEVE weight calculation
all_targets = np.reshape(all_targets, [HORIZON + 1, MODEL_ENSEMBLE_SIZE * Q_ENSEMBLE_SIZE])
estimates = np.mean(all_targets, axis=1)
confidences = old_div(1., (np.var(all_targets, axis=1) + 1e-8))
coefficients = old_div(confidences, np.sum(confidences))
target = np.sum(estimates * coefficients)
# target = estimates[0]
Q[q_ensemble_i, state] = target
true_target = state + 1. if state != terminal_state else 0.
_q_estimate_errors.append(np.square(estimates[0] - true_target))
_mve_estimate_errors.append(np.square(estimates[-1] - true_target))
_steve_estimate_errors.append(np.square(np.sum(estimates * coefficients) - true_target))
_opt_estimate_errors.append(np.min(np.square(estimates - true_target)))
_steve_beat_freq.append(float(np.square(estimates[0] - true_target) > np.square(target - true_target)))
losses.append(get_error(np.mean(Q, axis=0)))
q_estimate_errors.append(np.mean(_q_estimate_errors))
mve_estimate_errors.append(np.mean(_mve_estimate_errors))
steve_estimate_errors.append(np.mean(_steve_estimate_errors))
opt_estimate_errors.append(np.mean(_opt_estimate_errors))
steve_beat_freq.append(np.mean(_steve_beat_freq))
trial_results.append(losses)
noisy_q_estimate_errors.append(q_estimate_errors)
noisy_mve_estimate_errors.append(mve_estimate_errors)
noisy_steve_estimate_errors.append(steve_estimate_errors)
noisy_opt_estimate_errors.append(opt_estimate_errors)
noisy_steve_beat_freq.append(steve_beat_freq)
print("...complete.\n")
result = np.stack(trial_results, axis=1)
means = np.mean(result, axis=1)
stdevs = np.std(result, axis=1)
plt.plot(means, label="STEVE-noisy", color=colors[3], linestyle='dotted')
plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[3])
with open('Toy-v1/steve_noisy.csv', 'w') as f:
data = []
for frame_i in range(result.shape[0]):
for loss in result[frame_i]:
data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss))
f.write("\n".join(data))
# ### Display results
# plt.title("Comparison of convergence rates")
# plt.legend()
# plt.savefig("comparison.pdf")
# plt.show()
#
# ### Display secondary results - error comparison
# DOWNSAMPLE = 50
# colors = sns.color_palette('husl', 8)
# for i, (error_curve, label) in enumerate([
# (oracle_q_estimate_errors, "Oracle Q error"),
# (oracle_mve_estimate_errors, "Oracle MVE error"),
# (oracle_steve_estimate_errors, "Oracle STEVE error"),
# # (oracle_opt_estimate_errors, "Oracle minimum single-estimate error"),
# ]):
# result = np.stack(error_curve, axis=1)
# means = downsample(np.mean(result, axis=1), DOWNSAMPLE)
# stdevs = downsample(np.std(result, axis=1), DOWNSAMPLE)
# plt.plot(means, label=label, color=colors[i])
# plt.fill_between(np.arange(means.shape[0]), means - stdevs, means + stdevs, alpha=.2, color=colors[i])
#
# plt.title("Comparison of errors for oracle dynamics")
# plt.legend()
# plt.show()
#
# for i, (error_curve, label) in enumerate([
# (noisy_q_estimate_errors, "Noisy Q error"),
# (noisy_mve_estimate_errors, "Noisy MVE error"),
# (noisy_steve_estimate_errors, "Noisy STEVE error"),
# # (noisy_opt_estimate_errors, "Noisy minimum single-estimate error"),
# # (trial_steve_beat_freq, "STEVE beat freq"),
# ]):
# result = np.stack(error_curve, axis=1)
# means = downsample(np.mean(result, axis=1), DOWNSAMPLE)
# stdevs = downsample(np.std(result, axis=1), DOWNSAMPLE)
# plt.plot(means, label=label, color=colors[i])
# plt.fill_between(np.arange(means.shape[0]), means - stdevs, means + stdevs, alpha=.2, color=colors[i])
#
# plt.title("Comparison of errors for noisy dynamics")
# plt.legend()
# plt.show()
| 40.782022 | 119 | 0.605356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,747 | 0.206469 |
a8f4b9efd55de6bdf846467727516214fa245f0e | 7,112 | py | Python | ldbs.py | Greg-Bernard/EloquaDataLoader | 523aebb519758f086177cc4124508c0cda88610b | [
"MIT"
] | 11 | 2018-02-02T03:02:17.000Z | 2022-03-01T03:52:20.000Z | ldbs.py | Greg-Bernard/EloquaDataLoader | 523aebb519758f086177cc4124508c0cda88610b | [
"MIT"
] | null | null | null | ldbs.py | Greg-Bernard/EloquaDataLoader | 523aebb519758f086177cc4124508c0cda88610b | [
"MIT"
] | null | null | null | #!/usr/bin/python
# ElqBulk scheduler by Greg Bernard
import schedule
import time
from ElqBulk import ElqBulk
from ElqRest import ElqRest
import TableNames
import geoip
from closest_city import CityAppend
def initialise_database(filename='EloquaDB.db'):
"""
Initialise entire database in one run
"""
for item in TableNames.tables:
initialise_table(item, filename)
def initialise_table(table, filename='ElqData.db'):
"""
Initialise only the data for a single table
:param table: the name of the table you're syncing from Eloqua
:param filename: the name of the file you're dumping the data into
"""
# Only load/update all values for a single table
tb = ElqBulk(filename=filename, table=table)
tb.create_table()
tb.get_initial_data()
tb.load_to_database()
tb.commit()
tb.close()
def sync_database(filename='EloquaDB.db'):
"""
Sync entire database in one run
"""
for item in TableNames.tables:
sync_table(item, filename)
def sync_table(table, filename='EloquaDB.db'):
"""
Sync only the data for a single table
:param table: the name of the table you're syncing from Eloqua
:param filename: the name of the file you're dumping the data into
"""
# Only load/update all values for a single table
tb = ElqBulk(filename=filename, table=table)
tb.create_table()
tb.get_sync_data()
tb.load_to_database()
tb.commit()
tb.close()
def sync_tables(tables, filename='EloquaDB.db'):
"""
Initialize the data for 1 to many tables
:param tables: the list of the tables you're syncing from Eloqua
:param filename: the name of the file you're dumping the data into
"""
if set(tables).issubset(TableNames.tables) is False:
print("The inputs must be within the accepted list of Eloqua tables.")
exit()
for item in tables:
sync_table(item, filename)
def sync_external_activities(filename='EloquaDB.db', start=None, end=99999):
"""
Syncs external activities to the database
:param filename: the name of the file you're dumping the data into
:param start: number of the record you wish to start you pull from, defaults to last record created
:param end: number of the last record you wish to pull, non-inclusive
"""
db = ElqRest(filename=filename, sync='external')
db.export_external(start=start, end=end)
def sync_campaigns(filename='EloquaDB.db'):
"""
Syncs campaigns to the database
:param filename: the name of the file you're dumping the data into
"""
db = ElqRest(filename=filename, sync='campaigns')
db.export_campaigns()
def sync_users(filename='EloquaDB.db'):
"""
Syncs campaigns to the database
:param filename: the name of the file you're dumping the data into
"""
db = ElqRest(filename=filename, sync='users')
db.export_users()
def full_geoip(**kwargs):
"""
Run geoip on all tables that contain the column IpAddress.
:param filename: file to sync to
:param tables_with_ip: list of tables containing IP Addresses to cycle through
"""
tables_with_ip = kwargs.get('tables_with_ip', ['EmailClickthrough', 'EmailOpen', 'PageView', 'WebVisit'])
filename = kwargs.get('filename', 'EloquaDB.db')
for tb in tables_with_ip:
run_geoip(filename=filename, tablename=tb)
def run_geoip(**kwargs):
"""
Runs the IP lookup on specified tables that creates a table indexing all
IP Address Geolocations where at least the city was provided
:param filename: file to sync to
:param tablename: table to take IP Addresses from to geolocate
"""
table = kwargs.get('table','EmailClickthrough')
filename = kwargs.get('filename', 'EloquaDB.db')
db = geoip.IpLoc(filename=filename, tablename=table)
db.create_table()
db.save_location_data()
db.commit_and_close()
def closest_city(**kwargs):
"""
Takes every coordinate in the GeoIP table and calculates the closest city against every major population center in NA
:param kwargs: table = name of the table (GeoIP), filename = name of database file (EloquaDB.db)
"""
table = kwargs.get('table', 'GeoIP')
filename = kwargs.get('filename', 'EloquaDB.db')
cc = CityAppend(filename=filename, table=table)
cc.closest_cities()
cc.load_to_database()
def daily_sync(**kwargs):
"""
Schedule a sync every day at specified time, default to midnight
:param daytime: which time of day to perform the sync Format: hh:mm
:param sync: which sync function to perform
:param filename: file to sync to
"""
daytime = kwargs.get('daytime', "00:00")
filename = kwargs.get('filename', 'EloquaDB.db')
sync = kwargs.get('sync', sync_database(filename=filename))
print("Scheduling a daily Eloqua sync at {}.".format(daytime))
schedule.every().day.at(daytime).do(sync)
while True:
schedule.run_pending()
time.sleep(1)
def hourly_sync(**kwargs):
"""
Schedule a sync every set number of hours
:param hours: how many hours to wait between syncs
:param sync: which sync function to perform
:param filename: file to sync to
"""
hours = kwargs.get('hours', 4)
filename = kwargs.get('filename', 'EloquaDB.db')
sync = kwargs.get('sync', sync_database(filename=filename))
print("Scheduling an Eloqua sync every {} hours.".format(hours))
schedule.every(hours).hours.do(sync)
while True:
schedule.run_pending()
time.sleep(1)
def available_tables():
"""
Return available table names for export.
"""
print(TableNames.tables)
def main(filename='EloquaDB.db'):
"""
Main function runs when file is run as main.
"""
# Performs full database sync, only updating records modified since the last sync
sync_database(filename=filename)
# Iterates through all tables with IP addresses and logs the IP with
# its geolocation in the GeoIP table
full_geoip(filename=filename)
# Calculates the distance from a given point to every major population center in North America
# Then returns that population center, the distance from it in km, and the country that city is in
closest_city(filename=filename)
# Performs a full sync of all users in Eloqua
sync_users(filename=filename)
# Performs a full campaign sync, updates the last 'page' of campaigns (default page size is set to 100)
sync_campaigns(filename=filename)
# Performs full external activity sync, only updating records created since the last sync
# WARNING THIS CAN USE A HIGH NUMBER OF API CALLS AND TAKE A LONG TIME - CHECK YOUR API LIMIT BEFORE USING THIS
sync_external_activities(filename=filename)
# Exports GeoIP table inner joined with tables that contain activities
# with IP addresses in csv format
geoip.export_geoip(filename=filename)
# When using schedulers
# To clear all functions
# schedule.clear()
# if this module is run as main it will execute the main routine
if __name__ == '__main__':
main()
| 30.135593 | 121 | 0.695585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,121 | 0.579443 |
a8f537040119b192daaa8da6c2ebd5f6aff85c58 | 22,554 | py | Python | test/FileTest.py | ytyaru/Python.File.Dir.Stat.20180402093000 | f66e5eff603c62e24dd25f4aea034ce288059c66 | [
"CC0-1.0"
] | null | null | null | test/FileTest.py | ytyaru/Python.File.Dir.Stat.20180402093000 | f66e5eff603c62e24dd25f4aea034ce288059c66 | [
"CC0-1.0"
] | null | null | null | test/FileTest.py | ytyaru/Python.File.Dir.Stat.20180402093000 | f66e5eff603c62e24dd25f4aea034ce288059c66 | [
"CC0-1.0"
] | null | null | null | import sys, os, os.path, pathlib
print(pathlib.Path(__file__).parent.parent / 'src')
sys.path.append(str(pathlib.Path(__file__).parent.parent / 'src'))
from File import File
from Directory import Directory
import unittest
import time, datetime
class FileTest(unittest.TestCase):
# ----------------------------
# クラスメソッド
# ----------------------------
def test_IsExist(self):
self.assertTrue(File.IsExist(__file__))
# 存在するがファイルでないためFalse
self.assertTrue(not File.IsExist(os.path.dirname(__file__)))
self.assertTrue(not File.IsExist('/NotExistDir.txt'))
def test_Create_Delete(self):
target = '/tmp/work/__TEST__/a.txt'
self.assertTrue(not File.IsExist(target))
File.Create(target)
self.assertTrue(File.IsExist(target))
self.assertTrue(0 == File.GetSize(target))
File.Delete(target)
self.assertTrue(not File.IsExist(target))
target = '/tmp/work/__TEST__/A/B/C/d.e'
self.assertTrue(not File.IsExist(target))
File.Create(target)
self.assertTrue(File.IsExist(target))
File.Delete(target)
self.assertTrue(not File.IsExist(target))
target = '/tmp/work/__TEST__'
Directory.Delete(target)
def test_CreateDummy(self):
target = '/tmp/work/__TEST__/a.txt'
self.assertTrue(not File.IsExist(target))
File.CreateDummy(target, 1024)
self.assertTrue(File.IsExist(target))
self.assertTrue(1024 == File.GetSize(target))
File.Delete(target)
self.assertTrue(not File.IsExist(target))
target = '/tmp/work/__TEST__/A/B/C/d.e'
self.assertTrue(not File.IsExist(target))
File.CreateDummy(target, 4096)
self.assertTrue(File.IsExist(target))
self.assertTrue(4096 == File.GetSize(target))
File.Delete(target)
self.assertTrue(not File.IsExist(target))
target = '/tmp/work/__TEST__'
Directory.Delete(target)
def test_Copy(self):
target = '/tmp/work/__TEST__/a.txt'
self.assertTrue(not File.IsExist(target))
File.CreateDummy(target, 1024)
File.Copy(target, '/tmp/work/__TEST__/b.txt')
self.assertTrue(File.IsExist('/tmp/work/__TEST__/b.txt'))
self.assertTrue(1024 == File.GetSize('/tmp/work/__TEST__/a.txt'))
self.assertTrue(1024 == File.GetSize('/tmp/work/__TEST__/b.txt'))
self.assertTrue(not os.path.exists('/tmp/work/__TEST_2__'))
with self.assertRaises(IsADirectoryError) as e:
File.Copy('/tmp/work/__TEST__', '/tmp/work/__TEST_2__')
self.assertTrue(not os.path.exists('/tmp/work/__TEST_2__'))
with self.assertRaises(IsADirectoryError) as e:
File.Copy('/tmp/work/__TEST__', '/tmp/work/__TEST_2__/c.txt')
self.assertTrue(not os.path.exists('/tmp/work/__TEST_2__/c.txt'))
File.Copy('/tmp/work/__TEST__/a.txt', '/tmp/work/__TEST_2__')
self.assertTrue(os.path.exists('/tmp/work/__TEST_2__'))
self.assertTrue(1024 == File.GetSize('/tmp/work/__TEST_2__'))
File.Delete('/tmp/work/__TEST__/a.txt')
File.Delete('/tmp/work/__TEST__/b.txt')
File.Delete('/tmp/work/__TEST_2__')
Directory.Delete('/tmp/work/__TEST__')
def test_Move_single(self):
target = '/tmp/work/__TEST__/a.txt'
self.assertTrue(not File.IsExist(target))
self.assertTrue(not File.IsExist('/tmp/work/__TEST_2__'))
File.Create(target)
File.Move(target, '/tmp/work/__TEST_2__/b.txt')
self.assertTrue(not File.IsExist(target))
self.assertTrue(File.IsExist('/tmp/work/__TEST_2__/b.txt'))
Directory.Delete('/tmp/work/__TEST_2__')
Directory.Delete('/tmp/work/__TEST__')
# ----------------------------
# インスタンスメソッド
# ----------------------------
def test_init_relative_error(self):
with self.assertRaises(ValueError) as e:
d = File('A')
self.assertEqual('引数pathは絶対パスにしてください。path=\'{}\''.format('A'), e.exception.args[0])
def test_mk_rm(self):
target_root = '/tmp/work/__TEST__'
target = '/tmp/work/__TEST__/a.txt'
d = File(target)
self.assertTrue(not File.IsExist(target))
d.mk()
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist(target))
self.assertTrue(not File.IsExist(os.path.join(target_root, 'A/a.txt')))
d.mk('A/a.txt')
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist(os.path.join(target_root, 'A/a.txt')))
self.assertTrue(not File.IsExist(os.path.join(target_root, 'B/BB/BBB/b.txt')))
d.mk('B/BB/BBB/b.txt')
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist(os.path.join(target_root, 'B/BB/BBB/b.txt')))
self.assertTrue(not File.IsExist(os.path.join('/tmp/work/__TEST__/C/c.txt')))
d.mk('/tmp/work/__TEST__/C/c.txt')
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist(os.path.join('/tmp/work/__TEST__/C/c.txt')))
d.rm()
Directory.Delete('/tmp/work/__TEST__')
def test_mk_dummy(self):
target_root = '/tmp/work/__TEST__'
target = '/tmp/work/__TEST__/a.txt'
d = File(target)
self.assertTrue(not File.IsExist(target_root))
self.assertTrue(d.Stat is None)
d.mk_dummy(1024)
self.assertEqual(target, d.Path)
self.assertEqual(1024, d.Size)
self.assertTrue(File.IsExist(target))
self.assertTrue(not File.IsExist(os.path.join(target_root, 'A/a.txt')))
d.mk_dummy(2048, 'A/a.txt')
self.assertEqual(target, d.Path)
self.assertEqual(2048, File.GetSize('/tmp/work/__TEST__/A/a.txt'))
self.assertTrue(File.IsExist(os.path.join(target_root, 'A/a.txt')))
self.assertTrue(not File.IsExist(os.path.join(target_root, 'B/BB/BBB/b.txt')))
d.mk_dummy(3072, 'B/BB/BBB/b.txt')
self.assertEqual(target, d.Path)
#self.assertEqual(3072, d.Size)
self.assertEqual(3072, File.GetSize('/tmp/work/__TEST__/B/BB/BBB/b.txt'))
self.assertTrue(File.IsExist(os.path.join(target_root, 'B/BB/BBB/b.txt')))
self.assertTrue(not File.IsExist(os.path.join('/tmp/work/__TEST__/C/c.txt')))
d.mk_dummy(4096, '/tmp/work/__TEST__/C/c.txt')
self.assertEqual(target, d.Path)
#self.assertEqual(4096, d.Size)
self.assertEqual(4096, File.GetSize('/tmp/work/__TEST__/C/c.txt'))
self.assertTrue(File.IsExist(os.path.join('/tmp/work/__TEST__/C/c.txt')))
Directory.Delete('/tmp/work/__TEST__')
def test_mk_rm_raise(self):
target_root = '/tmp/work/__TEST__'
target = '/tmp/work/__TEST__/a.txt'
d = File(target)
self.assertTrue(not File.IsExist(target_root))
with self.assertRaises(ValueError) as e:
d.mk('/tmp/work/A')
self.assertEqual('引数pathは未指定か次のパスの相対パス、または次のパス配下を指定してください。{}'.format(target_root), e.exception.args[0])
with self.assertRaises(ValueError) as e:
d.rm('/tmp/work/A')
self.assertEqual('引数pathは未指定か次のパスの相対パス、または次のパス配下を指定してください。{}'.format(target_root), e.exception.args[0])
Directory.Delete('/tmp/work/__TEST__')
def test_cp_single(self):
target_root = '/tmp/work/__TEST__'
target= '/tmp/work/__TEST__/a.txt'
d = File(target)
self.assertEqual(target, d.Path)
self.assertTrue(not File.IsExist(target))
d.mk()
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist(target))
self.assertTrue(not File.IsExist('/tmp/work/__TEST_2__'))
res = d.cp('/tmp/work/__TEST_2__/a.txt')
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist('/tmp/work/__TEST_2__/a.txt'))
self.assertEqual('/tmp/work/__TEST_2__/a.txt', res)
self.assertEqual('/tmp/work/__TEST__/a.txt', d.Path)
d.rm()
self.assertTrue(not File.IsExist('/tmp/work/__TEST__/a.txt'))
self.assertTrue(Directory.IsExist('/tmp/work/__TEST__'))
self.assertEqual(target, d.Path)
Directory.Delete('/tmp/work/__TEST__')
Directory.Delete('/tmp/work/__TEST_2__')
self.assertTrue(not Directory.IsExist('/tmp/work/__TEST_2__'))
self.assertTrue(not Directory.IsExist('/tmp/work/__TEST__'))
def test_cp_tree(self):
target_root = '/tmp/work/__TEST__'
target = '/tmp/work/__TEST__/a.txt'
d = File(target)
self.assertEqual(target, d.Path)
self.assertTrue(not File.IsExist(d.Path))
with self.assertRaises(FileNotFoundError) as e:
d.cp('/tmp/work/__TEST_2__/a.txt')
d.mk()
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist(d.Path))
d.mk('A/a.txt')
self.assertEqual(target, d.Path)
self.assertTrue(not Directory.IsExist('/tmp/work/__TEST_2__'))
d.cp('/tmp/work/__TEST_2__/A/a.txt')
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist('/tmp/work/__TEST_2__/A/a.txt'))
d.rm()
self.assertEqual(target, d.Path)
Directory.Delete('/tmp/work/__TEST_2__')
Directory.Delete('/tmp/work/__TEST__')
self.assertTrue(not File.IsExist('/tmp/work/__TEST__'))
self.assertTrue(not File.IsExist('/tmp/work/__TEST_2__'))
def test_mv_single(self):
target = '/tmp/work/__TEST__/a.txt'
self.assertTrue(not File.IsExist(target))
d = File(target)
self.assertEqual(target, d.Path)
with self.assertRaises(FileNotFoundError) as e:
d.mv('/tmp/work/__TEST_2__/a.txt')
d.mk()
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist(target))
self.assertTrue(not File.IsExist('/tmp/work/__TEST_2__/a.txt'))
d.mv('/tmp/work/__TEST_2__/a.txt')
self.assertEqual('/tmp/work/__TEST_2__/a.txt', d.Path)
self.assertTrue(not File.IsExist(target))
self.assertTrue(File.IsExist('/tmp/work/__TEST_2__/a.txt'))
Directory.Delete('/tmp/work/__TEST_2__')
Directory.Delete('/tmp/work/__TEST__')
def test_mv_tree(self):
target = '/tmp/work/__TEST__/a.txt'
self.assertTrue(not File.IsExist(target))
self.assertTrue(not File.IsExist('/tmp/work/__TEST_2__/a.txt'))
d = File(target)
self.assertEqual(target, d.Path)
with self.assertRaises(FileNotFoundError) as e:
d.mv('/tmp/work/__TEST_2__/a.txt')
d.mk('B/b.txt')
self.assertEqual(target, d.Path)
self.assertTrue(File.IsExist('/tmp/work/__TEST__/B/b.txt'))
self.assertTrue(not File.IsExist('/tmp/work/__TEST__/a.txt'))
d.mk()
d.mv('/tmp/work/__TEST_2__/a.txt')
self.assertEqual('/tmp/work/__TEST_2__/a.txt', d.Path)
self.assertTrue(File.IsExist('/tmp/work/__TEST_2__/a.txt'))
self.assertTrue(not File.IsExist(target))
self.assertTrue(Directory.IsExist('/tmp/work/__TEST_2__'))
self.assertTrue(not File.IsExist('/tmp/work/__TEST_2__/B/b.txt'))
Directory.Delete('/tmp/work/__TEST_2__')
Directory.Delete('/tmp/work/__TEST__')
# ----------------------------
# Stat
# ----------------------------
def __MakeDummy(self, path, size):
os.makedirs(os.path.dirname(path), exist_ok=True)
if os.path.isfile(path): os.remove(path) # メタデータ初期化
with open(path, 'wb') as f:
f.write(b'\0'*size)
# ----------------------------
# クラスメソッド
# ----------------------------
def test_GetSize(self):
target_root = '/tmp/work/__TEST__'
path_a = os.path.join(target_root, 'a.dummy')
File.CreateDummy(path_a, 1024)
self.assertEqual(1024, File.GetSize(path_a))
path_b = os.path.join(target_root, 'B', 'b.dummy')
File.CreateDummy(path_b , 2048)
self.assertEqual(2048, File.GetSize(path_b))
path_c = os.path.join(target_root, 'C', 'c.dummy')
File.CreateDummy(path_c, 3072)
self.assertEqual(3072, File.GetSize(path_c))
path_d = os.path.join(target_root, 'D/DD/d.dummy')
File.CreateDummy(path_d, 4096)
self.assertEqual(4096, File.GetSize(path_d))
Directory.Delete(target_root)
def test_Mode_Get_Set_Name(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
mode = File.GetMode(target_dummy)
print(mode)
print(oct(mode))
File.SetMode(target_dummy, 0o755)
self.assertEqual(0o100755, File.GetMode(target_dummy))
self.assertEqual('-rwxr-xr-x', File.GetModeName(target_dummy))
File.SetMode(target_dummy, '-rwxrwxrwx')
self.assertEqual(0o100777, File.GetMode(target_dummy))
File.SetMode(target_dummy, 0o644)
self.assertEqual(0o100644, File.GetMode(target_dummy))
self.assertEqual('-rw-r--r--', File.GetModeName(target_dummy))
Directory.Delete(target_root)
def test_SetModeFromName_Error(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
mode_name = 'Invalid-Text'
with self.assertRaises(ValueError) as e:
File.SetMode(target_dummy, mode_name )
mode_names = [
'---',
'--x',
'-w-',
'-wx',
'r--',
'r-x',
'rw-',
'rwx'
]
self.assertEqual('引数mode_nameが不正値です。\'{}\'。\'-rwxrwxrwx\'の書式で入力してください。owner, group, other, の順に次のパターンのいずれかを指定します。pattern={}。r,w,xはそれぞれ、読込、書込、実行の権限です。-は権限なしを意味します。'.format(mode_name, mode_names), e.exception.args[0])
Directory.Delete(target_root)
def test_Modified_Get_Set(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
self.assertTrue(tuple == type(File.GetModified(target_dummy)))
self.assertTrue(2 == len(File.GetModified(target_dummy)))
self.assertTrue(float == type(File.GetModified(target_dummy)[0]))
self.assertTrue(datetime.datetime == type(File.GetModified(target_dummy)[1]))
#print(type(File.GetModified(target_dummy)[0]))
#print(type(File.GetModified(target_dummy)[1]))
dt1 = datetime.datetime.strptime('1999/12/31 23:59:59', '%Y/%m/%d %H:%M:%S')
dt2 = datetime.datetime.strptime('2345/01/02 12:34:56', '%Y/%m/%d %H:%M:%S')
epoch, dt = File.GetModified(target_dummy)
self.assertTrue(dt1 != dt)
self.assertTrue(dt2 != dt)
File.SetModified(target_dummy, dt1)
self.assertTrue(int(time.mktime(dt1.timetuple())) == File.GetModified(target_dummy)[0])
self.assertTrue(dt1 == File.GetModified(target_dummy)[1])
self.assertTrue(dt1 != File.GetChangedMeta(target_dummy)[1])
self.assertTrue(dt1 != File.GetAccessed(target_dummy)[1])
Directory.Delete(target_root)
def test_Accessed_Get_Set(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
self.assertTrue(tuple == type(File.GetAccessed(target_dummy)))
self.assertTrue(2 == len(File.GetAccessed(target_dummy)))
self.assertTrue(float == type(File.GetAccessed(target_dummy)[0]))
self.assertTrue(datetime.datetime == type(File.GetAccessed(target_dummy)[1]))
dt1 = datetime.datetime.strptime('1999/12/31 23:59:59', '%Y/%m/%d %H:%M:%S')
dt2 = datetime.datetime.strptime('2345/01/02 12:34:56', '%Y/%m/%d %H:%M:%S')
epoch, dt = File.GetAccessed(target_dummy)
self.assertTrue(dt1 != dt)
self.assertTrue(dt2 != dt)
File.SetAccessed(target_dummy, dt1)
self.assertTrue(int(time.mktime(dt1.timetuple())) == File.GetAccessed(target_dummy)[0])
self.assertTrue(dt1 == File.GetAccessed(target_dummy)[1])
self.assertTrue(dt1 != File.GetModified(target_dummy)[1])
self.assertTrue(dt1 != File.GetChangedMeta(target_dummy)[1])
Directory.Delete(target_root)
def test_GetChangedMeta(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
self.assertTrue(hasattr(File, 'GetChangedMeta'))
self.assertTrue(hasattr(File, 'GetCreated'))
print(File.GetChangedMeta(target_dummy))
print(File.GetCreated(target_dummy))
Directory.Delete(target_root)
def test_Ids(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
self.assertTrue(hasattr(File, 'OwnUserId'))
self.assertTrue(hasattr(File, 'OwnGroupId'))
self.assertTrue(hasattr(File, 'HardLinkNum'))
self.assertTrue(hasattr(File, 'INode'))
self.assertTrue(hasattr(File, 'DeviceId'))
print(File.GetOwnUserId(target_dummy))
print(File.GetOwnGroupId(target_dummy))
print(File.GetHardLinkNum(target_dummy))
print(File.GetINode(target_dummy))
print(File.GetDeviceId(target_dummy))
Directory.Delete(target_root)
# ----------------------------
# インスタンスメソッド
# ----------------------------
"""
def test_Stat(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
s = File(target_root)
self.assertEqual(File, type(s))
self.assertEqual(os.stat_result, type(s.Stat))
Directory.Delete(target_root)
"""
def test_Path(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
s = File(target_root)
self.assertEqual('/tmp/work/__TEST__', s.Path)
Directory.Delete(target_root)
def test_Size(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
s = File(target_dummy)
s.mk_dummy(1024)
self.assertEqual(1024, s.Size)
s = File('/tmp/work/__TEST__/B/b.txt')
s.mk_dummy(2048)
self.assertEqual(2048, s.Size)
s = File('/tmp/work/__TEST__/C/c.txt')
s.mk_dummy(3072)
self.assertEqual(3072, s.Size)
s = File('/tmp/work/__TEST__/D/DD/d.txt')
s.mk_dummy(4096)
self.assertEqual(4096, s.Size)
Directory.Delete(target_root)
def test_Mode(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
s = File(target_root)
s.Mode = 0o777
self.assertEqual(0o40777, s.Mode)
self.assertEqual('drwxrwxrwx', s.ModeName)
s.Mode = 0o644
self.assertEqual(0o40644, s.Mode)
self.assertEqual('drw-r--r--', s.ModeName)
s.Mode = '-rwxrwxrwx'
self.assertEqual(0o40777, s.Mode)
self.assertEqual('drwxrwxrwx', s.ModeName)
Directory.Delete(target_root)
def test_Modified(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
s = File(target_root)
self.assertTrue(tuple == type(s.Modified))
self.assertTrue(2 == len(s.Modified))
self.assertTrue(float == type(s.Modified[0]))
self.assertTrue(datetime.datetime == type(s.Modified[1]))
dt1 = datetime.datetime.strptime('1999/12/31 23:59:59', '%Y/%m/%d %H:%M:%S')
dt2 = datetime.datetime.strptime('2345/01/02 12:34:56', '%Y/%m/%d %H:%M:%S')
epoch, dt = s.Modified
self.assertTrue(dt1 != dt)
self.assertTrue(dt2 != dt)
s.Modified = dt1
self.assertTrue(int(time.mktime(dt1.timetuple())) == s.Modified[0])
self.assertTrue(dt1 == s.Modified[1])
self.assertTrue(dt1 != s.Accessed[1])
self.assertTrue(dt1 != s.Created[1])
self.assertTrue(dt1 != s.ChangedMeta[1])
Directory.Delete(target_root)
def test_Accessed(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
s = File(target_root)
self.assertTrue(tuple == type(s.Accessed))
self.assertTrue(2 == len(s.Accessed))
self.assertTrue(float == type(s.Accessed[0]))
self.assertTrue(datetime.datetime == type(s.Accessed[1]))
dt1 = datetime.datetime.strptime('1999/12/31 23:59:59', '%Y/%m/%d %H:%M:%S')
dt2 = datetime.datetime.strptime('2345/01/02 12:34:56', '%Y/%m/%d %H:%M:%S')
epoch, dt = s.Accessed
self.assertTrue(dt1 != dt)
self.assertTrue(dt2 != dt)
s.Accessed = dt1
self.assertTrue(int(time.mktime(dt1.timetuple())) == s.Accessed[0])
self.assertTrue(dt1 == s.Accessed[1])
self.assertTrue(dt1 != s.Modified[1])
self.assertTrue(dt1 != s.Created[1])
self.assertTrue(dt1 != s.ChangedMeta[1])
Directory.Delete(target_root)
def test_ChangedMeta(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
s = File(target_root)
self.assertTrue(hasattr(s, 'ChangedMeta'))
self.assertTrue(hasattr(s, 'Created'))
print(s.ChangedMeta)
print(s.Created)
Directory.Delete(target_root)
def test_Ids_Property(self):
target_root = '/tmp/work/__TEST__'
target_dummy = os.path.join(target_root, 'a.dummy')
File.CreateDummy(target_dummy, 1024)
s = File(target_root)
self.assertTrue(hasattr(s, 'OwnUserId'))
self.assertTrue(hasattr(s, 'OwnGroupId'))
self.assertTrue(hasattr(s, 'HardLinkNum'))
self.assertTrue(hasattr(s, 'INode'))
self.assertTrue(hasattr(s, 'DeviceId'))
print(s.OwnUserId)
print(s.OwnGroupId)
print(s.HardLinkNum)
print(s.INode)
print(s.DeviceId)
Directory.Delete(target_root)
if __name__ == '__main__':
unittest.main()
| 41.689464 | 222 | 0.6202 | 22,699 | 0.987171 | 0 | 0 | 0 | 0 | 0 | 0 | 5,304 | 0.230669 |
a8f571492f94df0b230565b81e8284f0b4160ad7 | 1,994 | py | Python | cogs/StatCollector.py | galaxyAbstractor/rvnBot | a013b92c924cc218811e801680bf7d4318406a4c | [
"MIT"
] | null | null | null | cogs/StatCollector.py | galaxyAbstractor/rvnBot | a013b92c924cc218811e801680bf7d4318406a4c | [
"MIT"
] | null | null | null | cogs/StatCollector.py | galaxyAbstractor/rvnBot | a013b92c924cc218811e801680bf7d4318406a4c | [
"MIT"
] | null | null | null | from discord import TextChannel
from discord.ext import commands
from stats import StatService
from users import UserService
class StatCollector(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.stats = StatService(bot.pool)
self.users = UserService(bot.pool)
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
if not isinstance(message.channel, TextChannel):
return
await self.stats.handle_message_stat(message)
@commands.Cog.listener()
async def on_typing(self, channel, user, when):
if user == self.bot.user:
return
if not isinstance(channel, TextChannel):
return
@commands.Cog.listener()
async def on_raw_message_delete(self, payload):
message = payload.cached_message
return
@commands.Cog.listener()
async def on_raw_bulk_message_delete(self, payload):
messages = payload.cached_messages
return
@commands.Cog.listener()
async def on_raw_message_edit(self, payload):
messages = payload.cached_messages
return
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
return
@commands.Cog.listener()
async def on_reaction_remove(self, reaction, user):
return
@commands.Cog.listener()
async def on_member_join(self, member):
return
@commands.Cog.listener()
async def on_member_remove(self, member):
return
@commands.Cog.listener()
async def on_member_update(self, member):
return
@commands.Cog.listener()
async def on_user_update(self, user):
return
@commands.Cog.listener()
async def on_member_ban(self, guild, user):
return
@commands.Cog.listener()
async def on_member_unban(self, guild, user):
return
def setup(bot):
bot.add_cog(StatCollector(bot))
| 23.458824 | 56 | 0.654965 | 1,812 | 0.908726 | 0 | 0 | 1,561 | 0.782849 | 1,184 | 0.593781 | 0 | 0 |
a8f671f7ebe45c7c676519c46355c82e84f18cea | 1,369 | py | Python | dredis/gc.py | keang/dredis | 520b3c10a1cee6de9d0f73cd2c43298ce3f9598a | [
"MIT"
] | 53 | 2018-09-19T15:19:09.000Z | 2022-03-06T17:05:32.000Z | dredis/gc.py | keang/dredis | 520b3c10a1cee6de9d0f73cd2c43298ce3f9598a | [
"MIT"
] | 31 | 2018-09-19T16:45:46.000Z | 2021-05-05T15:12:20.000Z | dredis/gc.py | keang/dredis | 520b3c10a1cee6de9d0f73cd2c43298ce3f9598a | [
"MIT"
] | 5 | 2018-09-19T16:42:25.000Z | 2022-03-07T11:36:57.000Z | import threading
import time
from dredis.db import NUMBER_OF_REDIS_DATABASES, DB_MANAGER, KEY_CODEC
DEFAULT_GC_INTERVAL = 500 # milliseconds
DEFAULT_GC_BATCH_SIZE = 10000 # number of storage keys to delete in a batch
class KeyGarbageCollector(threading.Thread):
def __init__(self, gc_interval=DEFAULT_GC_INTERVAL, batch_size=DEFAULT_GC_BATCH_SIZE):
threading.Thread.__init__(self, name="Key Garbage Collector")
self._gc_interval_in_secs = gc_interval / 1000.0 # convert to seconds
self._batch_size = batch_size
def run(self):
while True:
self.collect()
time.sleep(self._gc_interval_in_secs)
def collect(self):
for db_id in range(NUMBER_OF_REDIS_DATABASES):
with DB_MANAGER.thread_lock:
self._collect(DB_MANAGER.get_db(db_id))
def _collect(self, db):
deleted = 0
with db.write_batch() as batch:
for deleted_db_key, _ in db.iterator(prefix=KEY_CODEC.MIN_DELETED_VALUE):
_, _, deleted_key_value = KEY_CODEC.decode_key(deleted_db_key)
for db_key, _ in db.iterator(prefix=deleted_key_value):
deleted += 1
batch.delete(db_key)
if deleted == self._batch_size:
return
batch.delete(deleted_db_key)
| 35.102564 | 90 | 0.65084 | 1,144 | 0.835646 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.074507 |
a8f6ec64b56c58fb68898e4ec50ed4fb8d84702a | 431 | py | Python | week08/states_utils.py | thashmadech/is445_spring2022 | 034f71ca545bf06fb2491d818ceb3f8dd6bba8b7 | [
"BSD-3-Clause"
] | 1 | 2019-08-11T04:03:24.000Z | 2019-08-11T04:03:24.000Z | week08/states_utils.py | thashmadech/is445_spring2022 | 034f71ca545bf06fb2491d818ceb3f8dd6bba8b7 | [
"BSD-3-Clause"
] | 1 | 2020-03-02T00:11:33.000Z | 2020-03-02T00:11:33.000Z | week08/states_utils.py | thashmadech/is445_spring2022 | 034f71ca545bf06fb2491d818ceb3f8dd6bba8b7 | [
"BSD-3-Clause"
] | 5 | 2022-01-30T19:45:48.000Z | 2022-03-07T04:15:37.000Z | import numpy as np
def get_ids_and_names(states_map):
ids = []
state_names = []
state_data_vec = states_map.map_data['objects']['subunits']['geometries']
for i in range(len(state_data_vec)):
if state_data_vec[i]['properties'] is not None:
state_names.append(state_data_vec[i]['properties']['name'])
ids.append(state_data_vec[i]['id'])
return np.array(ids), np.array(state_names)
| 39.181818 | 77 | 0.663573 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.150812 |
a8fb94dedf1844c0d8c37c6a506f33161aed70db | 464 | py | Python | 1005.py | TheLurkingCat/TIOJ | 077e1cd22239d8f6bc1cd7561f27c68143e80263 | [
"MIT"
] | 1 | 2018-10-21T10:03:42.000Z | 2018-10-21T10:03:42.000Z | 1005.py | TheLurkingCat/TIOJ | 077e1cd22239d8f6bc1cd7561f27c68143e80263 | [
"MIT"
] | null | null | null | 1005.py | TheLurkingCat/TIOJ | 077e1cd22239d8f6bc1cd7561f27c68143e80263 | [
"MIT"
] | 2 | 2018-10-12T16:40:11.000Z | 2021-04-05T12:05:36.000Z | from itertools import combinations
from math import gcd, sqrt
a = int(input())
while a:
s = set()
total = 0
coprime = 0
for _ in range(a):
s.add(int(input()))
for (x, y) in combinations(list(s), 2):
total += 1
if gcd(x, y) == 1:
coprime += 1
try:
print('{:.6f}'.format(sqrt(6 * total / coprime)))
except ZeroDivisionError:
print('No estimate for this data set.')
a = int(input())
| 24.421053 | 57 | 0.538793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.086207 |
a8fbf8ce338a6262976301ea199c5ca131183f5f | 49 | py | Python | reloader/__init__.py | gerardroche/AutomaticPackageReloader | e90c22a50f6bfb195394cc6eedab0e7977a0011d | [
"MIT"
] | 30 | 2017-03-05T12:28:31.000Z | 2022-03-23T11:32:23.000Z | reloader/__init__.py | gerardroche/AutomaticPackageReloader | e90c22a50f6bfb195394cc6eedab0e7977a0011d | [
"MIT"
] | 34 | 2017-03-14T05:59:58.000Z | 2021-08-24T16:25:05.000Z | reloader/__init__.py | randy3k/PackageReloader | 1255fcb0bc8effb66956e2240c42b7ae10615860 | [
"MIT"
] | 16 | 2017-03-09T12:03:21.000Z | 2019-10-18T08:19:37.000Z | from .reloader import reload_package, load_dummy
| 24.5 | 48 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a8fc954825cc770935a579719186567eddd9a42d | 1,899 | py | Python | tests/bugs/core_2361_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_2361_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_2361_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: bugs.core_2361
# title: String truncation reading 8859-1 Spanish column using isc_dsql_fetch with UTF-8 connection..
# decription:
# tracker_id: CORE-2361
# min_versions: []
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """create table "'Master by Reseller$'" (
"Tier" VARCHAR(20) CHARACTER SET ISO8859_1 COLLATE ES_ES_CI_AI
);
commit;
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('(blank)');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('Approved');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('Bronze');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('DMR');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('Domestic Distributor');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('End-User');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('Evaluation');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('Gold');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('New');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('Silver');
insert into "'Master by Reseller$'" ( "Tier" ) VALUES ('VAM');
commit;
"""
db_1 = db_factory(page_size=4096, charset='ISO8859_1', sql_dialect=3, init=init_script_1)
test_script_1 = """select case when 1 = 0 then '(blank)' else "'Master by Reseller$'"."Tier" end from "'Master by Reseller$'";
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
CASE
====================
(blank)
Approved
Bronze
DMR
Domestic Distributor
End-User
Evaluation
Gold
New
Silver
VAM
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 27.128571 | 126 | 0.674039 | 0 | 0 | 0 | 0 | 183 | 0.096367 | 0 | 0 | 1,414 | 0.744602 |
a8fcc6ac8aab4f48607db30ce376a669495d6728 | 229 | py | Python | lorem/data.py | Ahsoka/python-lorem | 5252a5819fcdf87955794a4f1d06284d152e2c8a | [
"MIT"
] | 21 | 2016-06-16T22:33:40.000Z | 2022-03-13T22:56:39.000Z | lorem/data.py | Ahsoka/python-lorem | 5252a5819fcdf87955794a4f1d06284d152e2c8a | [
"MIT"
] | null | null | null | lorem/data.py | Ahsoka/python-lorem | 5252a5819fcdf87955794a4f1d06284d152e2c8a | [
"MIT"
] | 10 | 2017-02-09T14:33:02.000Z | 2021-08-07T15:02:04.000Z | WORDS = ("adipisci aliquam amet consectetur dolor dolore dolorem eius est et"
"incidunt ipsum labore magnam modi neque non numquam porro quaerat qui"
"quia quisquam sed sit tempora ut velit voluptatem").split()
| 57.25 | 80 | 0.733624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.829694 |
a8fdf92716ffae4bc0c6399c929dc082d01dc0eb | 886 | py | Python | motto/readers.py | attakei/jamproject | f3a677f4f95c112b89fb38957e6ba1a3c923ea85 | [
"Apache-2.0"
] | null | null | null | motto/readers.py | attakei/jamproject | f3a677f4f95c112b89fb38957e6ba1a3c923ea85 | [
"Apache-2.0"
] | 1 | 2020-01-05T14:04:35.000Z | 2020-01-05T14:04:35.000Z | motto/readers.py | attakei/motto | f3a677f4f95c112b89fb38957e6ba1a3c923ea85 | [
"Apache-2.0"
] | null | null | null | """Core custom readers for docutils
"""
from typing import List, Type
from docutils import readers
from docutils.transforms import Transform
from .skill import SkillBase
from .transforms import InitializeReportTransform, TokenizeTransform
class Reader(readers.Reader):
"""Basic custom reader class.
Includes
- Tokenize transform
- Skills
"""
def __init__(self, parser=None, parser_name=None):
super().__init__(parser=parser, parser_name=parser_name)
self._skills: List[SkillBase] = []
def add_skill(self, skill: SkillBase):
self._skills.append(skill)
def get_transforms(self) -> List[Type[Transform]]:
"""Return all transforms.
"""
transforms = super().get_transforms()
transforms += [TokenizeTransform, InitializeReportTransform]
transforms += self._skills
return transforms
| 27.6875 | 68 | 0.694131 | 644 | 0.726862 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.18623 |
d100ae3537b01cb4189c0cbb205089a37a69ed98 | 14,136 | py | Python | flybrainlab/utilities/neurometry.py | FlyBrainLab/FBLClient | c85de23d428a38fe13491b2f5eb30b690610108e | [
"BSD-3-Clause"
] | 3 | 2020-07-23T05:51:22.000Z | 2021-12-24T11:40:30.000Z | flybrainlab/utilities/neurometry.py | FlyBrainLab/FBLClient | c85de23d428a38fe13491b2f5eb30b690610108e | [
"BSD-3-Clause"
] | 3 | 2020-07-31T05:08:35.000Z | 2021-01-08T17:55:16.000Z | flybrainlab/utilities/neurometry.py | FlyBrainLab/FBLClient | c85de23d428a38fe13491b2f5eb30b690610108e | [
"BSD-3-Clause"
] | 1 | 2019-02-03T02:03:00.000Z | 2019-02-03T02:03:00.000Z | import pandas as pd
import numpy as np
from scipy.spatial.distance import pdist
from sklearn.metrics import pairwise_distances
import networkx as nx
def generate_neuron_stats(_input, scale = 'mum', scale_coefficient = 1., log=False):
"""Generates statistics for a given neuron.
# Arguments:
_input (str or np.array): Name of the file to use, or the numpy array to get as input.
scale (str): Optional. Name of the measurement scale. Default to 'mum'.
scale_coefficient (float): Optional. A number to multiply the input with if needed. Defaults to 1.
# Returns:
dict: A result dictionary with all results.
"""
if isinstance(_input, str):
a = pd.read_csv(_input, sep=' ', header=None, comment='#')
X = a.values
else:
X = _input
if X.shape[1]>7:
X = X[:, X.shape[1]-7:]
G = nx.DiGraph()
distance = 0
surface_area = 0
volume = 0
X[:,2:5] = X[:,2:5] * scale_coefficient
for i in range(X.shape[0]):
if X[i,6] != -1:
G.add_node(i)
parent = np.where(X[:,0] == X[i,6])[0][0]
x_parent = X[parent,2:5]
x = X[i,2:5]
h = np.sqrt(np.sum(np.square(x_parent-x)))
G.add_edge(parent,i,weight=h)
distance += h
r_parent = X[parent,5]
r = X[i,5]
surface_area += np.pi * (r + r_parent) * np.sqrt(np.square(r-r_parent)+np.square(h))
volume += np.pi/3.*(r*r+r*r_parent+r_parent*r_parent)*h
XX = X[:,2:5]
w = np.abs(np.max(XX[:,0])-np.min(XX[:,0]))
h = np.abs(np.max(XX[:,1])-np.min(XX[:,1]))
d = np.abs(np.max(XX[:,2])-np.min(XX[:,2]))
bifurcations = len(X[:,6])-len(np.unique(X[:,6]))
max_euclidean_dist = np.max(pdist(XX))
max_path_dist = nx.dag_longest_path_length(G)
if log == True:
print('Total Length: ', distance, scale)
print('Total Surface Area: ', surface_area, scale+'^2')
print('Total Volume: ', volume, scale+'^3')
print('Maximum Euclidean Distance: ', max_euclidean_dist, scale)
print('Width (Orientation Variant): ', w, scale)
print('Height (Orientation Variant): ', h, scale)
print('Depth (Orientation Variant): ', d, scale)
print('Average Diameter: ', 2*np.mean(X[:,5]), scale)
print('Number of Bifurcations:', bifurcations)
print('Max Path Distance: ', max_path_dist, scale)
results = {}
results['Total Length'] = distance
results['Total Surface Area'] = surface_area
results['Total Volume'] = volume
results['Maximum Euclidean Distance'] = max_euclidean_dist
results['Width (Orientation Variant)'] = w
results['Height (Orientation Variant)'] = h
results['Depth (Orientation Variant)'] = d
results['Average Diameter'] = 2*np.mean(X[:,5])
results['Number of Bifurcations'] = bifurcations
results['Max Path Distance'] = max_path_dist
return results
def generate_naquery_neuron_stats(res, node):
"""Generates statistics for a given NAqueryResult.
# Arguments:
res (NAqueryResult): Name of the NAqueryResult structure to use.
node (str): id of the node to use.
# Returns:
dict: A result dictionary with all results.
"""
x = res.graph.nodes[node]
X = np.vstack((np.array(x['sample']),
np.array(x['identifier']),
np.array(x['x']),
np.array(x['y']),
np.array(x['z']),
np.array(x['r']),
np.array(x['parent']))).T
return generate_neuron_stats(X)
def morphometrics(res):
""" computes the morphometric measurements of neurons in NAqueryResult.
# Arguments:
res (flybrainlab.graph.NAqueryResult): query result from an NeuroArch query.
# Returns
pandas.DataFrame: a data frame with morphometric measurements in each row and neuron unames in each column
"""
metrics = {}
for rid, attributes in res.neurons.items():
morphology_data = [res.graph.nodes[n] for n in res.getData(rid) \
if res.graph.nodes[n]['class'] == 'MorphologyData' \
and res.graph.nodes[n]['morph_type'] == 'swc']
if len(morphology_data):
x = morphology_data[0]
X = np.vstack((np.array(x['sample']),
np.array(x['identifier']),
np.array(x['x']),
np.array(x['y']),
np.array(x['z']),
np.array(x['r']),
np.array(x['parent']))).T
uname = attributes['uname']
metrics[uname] = generate_neuron_stats(X)
return pd.DataFrame.from_dict(metrics)
def generate_neuron_shape(_input, scale = 'mum', scale_coefficient = 1., log=False):
"""Generates shape structures for the specified neuron.
# Arguments:
_input (str or np.array): Name of the file to use, or the numpy array to get as input.
scale (str): Optional. Name of the measurement scale. Default to 'mum'.
scale_coefficient (float): Optional. A number to multiply the input with if needed. Defaults to 1.
# Returns:
X: A result matrix with the contents of the input.
G: A directed networkx graph with the contents of the input.
distances: List of all distances in the .swc file.
"""
if isinstance(_input, str):
a = pd.read_csv(_input, sep=' ', header=None, comment='#')
X = a.values
else:
X = _input
if X.shape[1]>7:
X = X[:, X.shape[1]-7:]
G = nx.DiGraph()
X[:,2:5] = X[:,2:5] * scale_coefficient
distances = []
for i in range(X.shape[0]):
if X[i,6] != -1:
parent = np.where(X[:,0] == X[i,6])[0][0]
x_parent = X[parent,2:5]
G.add_node(i, position_data = X[i,2:5], parent_position_data = X[parent,2:5], r = X[i,5])
x = X[i,2:5]
h = np.sqrt(np.sum(np.square(x_parent-x)))
G.add_edge(parent,i,weight=h)
distances.append(h)
else:
G.add_node(i, position_data = X[i,2:5], parent_position_data = X[i,2:5], r = X[i,5])
return X, G, distances
def fix_swc(swc_file, new_swc_file,
percentile_cutoff = 50,
similarity_cutoff = 0.40,
distance_multiplier = 5):
"""Tries to fix connectivity errors in a given swc file.
# Arguments:
swc_file (str or np.array): Name of the file to use, or the numpy array to get as input.
new_swc_file (str): Name of the new swc file to use as output.
percentile_cutoff (int): Optional. Percentile to use for inter-node cutoff distance during reconstruction for connecting two nodes. Defaults to 50.
similarity_cutoff (float): Optional. Cosine similarity cutoff value between two endpoints' branches during reconstruction. Defaults to 0.8.
distance_multiplier (float): Optional. A multiplier to multiply percentile_cutoff with. Defaults to 8.
# Returns:
G: A directed networkx graph with the contents of the input.
G_d: A directed networkx graph with the contents of the input after the fixes.
"""
X, G, distances = generate_neuron_shape(swc_file)
endpoints = []
endpoint_vectors = []
endpoint_dirs = []
for i in G.nodes():
if len(list(G.successors(i)))==0:
endpoints.append(i)
endpoint_vectors.append(G.nodes()[i]['position_data'])
direction = G.nodes()[i]['position_data'] - G.nodes()[i]['parent_position_data']
if np.sqrt(np.sum(np.square(direction)))>0.:
direction = direction / np.sqrt(np.sum(np.square(direction)))
endpoint_dirs.append(direction)
endpoint_vectors = np.array(endpoint_vectors)
endpoint_dirs = np.array(endpoint_dirs)
distance_cutoff = np.percentile(distances,percentile_cutoff)
X_additions = []
X_a_idx = int(np.max(X[:,0]))+1
G_d = G.copy()
for idx_a_i in range(len(endpoints)):
for idx_b_j in range(idx_a_i+1,len(endpoints)):
idx_a = endpoints[idx_a_i]
idx_b = endpoints[idx_b_j]
if np.abs(np.sum(np.multiply(endpoint_dirs[idx_a_i],endpoint_dirs[idx_b_j])))>similarity_cutoff:
x = X[idx_b,2:5]
x_parent = X[idx_a,2:5]
if np.sqrt(np.sum(np.square(x_parent-x)))<distance_multiplier * distance_cutoff:
X_additions.append([X_a_idx,0,X[idx_b,2],X[idx_b,3],X[idx_b,4],X[idx_b,5],X[idx_a,0]])
X_a_idx += 1
G_d.add_edge(idx_a, idx_b)
X_additions = np.array(X_additions)
X_all = np.vstack((X, X_additions))
X_pd = pd.DataFrame(X_all)
X_pd[0] = X_pd[0].astype(int)
X_pd[1] = X_pd[1].astype(int)
X_pd[6] = X_pd[6].astype(int)
X_pd.to_csv(new_swc_file, sep=' ', header=None, index=None)
return G, G_d
def fix_swc_components(swc_file, new_swc_file,
percentile_cutoff = 50,
similarity_cutoff = 0.40,
distance_multiplier = 5):
"""Tries to fix connectivity errors in a given swc file and connect disconnected components.
# Arguments:
swc_file (str or np.array): Name of the file to use, or the numpy array to get as input.
new_swc_file (str): Name of the new swc file to use as output.
percentile_cutoff (int): Optional. Percentile to use for inter-node cutoff distance during reconstruction for connecting two nodes. Defaults to 50.
similarity_cutoff (float): Optional. Cosine similarity cutoff value between two endpoints' branches during reconstruction. Defaults to 0.8.
distance_multiplier (float): Optional. A multiplier to multiply percentile_cutoff with. Defaults to 8.
# Returns:
G: A directed networkx graph with the contents of the input.
G_d: A directed networkx graph with the contents of the input after the fixes.
G_d_uncon: An undirected networkx graph with the contents of the input after the fixes with no disconnected components.
"""
X, G, distances = generate_neuron_shape(swc_file)
endpoints = []
endpoint_vectors = []
endpoint_dirs = []
for i in G.nodes():
if len(list(G.successors(i)))==0:
endpoints.append(i)
endpoint_vectors.append(G.nodes()[i]['position_data'])
direction = G.nodes()[i]['position_data'] - G.nodes()[i]['parent_position_data']
if np.sqrt(np.sum(np.square(direction)))>0.:
direction = direction / np.sqrt(np.sum(np.square(direction)))
endpoint_dirs.append(direction)
endpoint_vectors = np.array(endpoint_vectors)
endpoint_dirs = np.array(endpoint_dirs)
distance_cutoff = np.percentile(distances,percentile_cutoff)
X_additions = []
X_a_idx = int(np.max(X[:,0]))+1
G_d = G.copy()
for idx_a_i in range(len(endpoints)):
for idx_b_j in range(idx_a_i+1,len(endpoints)):
idx_a = endpoints[idx_a_i]
idx_b = endpoints[idx_b_j]
if np.abs(np.sum(np.multiply(endpoint_dirs[idx_a_i],endpoint_dirs[idx_b_j])))>similarity_cutoff:
x = X[idx_b,2:5]
x_parent = X[idx_a,2:5]
if np.sqrt(np.sum(np.square(x_parent-x)))<distance_multiplier * distance_cutoff:
X_additions.append([X_a_idx,0,X[idx_b,2],X[idx_b,3],X[idx_b,4],X[idx_b,5],X[idx_a,0]])
X_a_idx += 1
G_d.add_edge(idx_a, idx_b)
G_d_uncon = nx.Graph(G_d)
processing = True
X_disconnected_additions = []
while processing == True:
components = []
for component in nx.connected_components(G_d_uncon):
components.append(list(component))
if len(components)<2:
processing = False
else:
print(len(components))
components_endpoints = []
component_matrices = []
for component in components:
component_endpoints = []
component_matrix = []
for i in component:
if i in endpoints:
component_endpoints.append(i)
component_matrix.append(G_d_uncon.nodes()[i]['position_data'])
component_matrix = np.array(component_matrix)
components_endpoints.append(component_endpoints)
component_matrices.append(component_matrix)
max_dist = 10000.
min_a = 0
min_b = 0
min_vals = None
for component_idx in range(len(components)):
for component_idx_b in range(component_idx+1, len(components)):
DD = pairwise_distances(component_matrices[component_idx], component_matrices[component_idx_b])
if np.min(DD)<max_dist:
max_dist = np.min(DD)
min_a = component_idx
min_b = component_idx_b
min_vals = np.unravel_index(DD.argmin(), DD.shape)
G_d_uncon.add_edge(components_endpoints[min_a][min_vals[0]], components_endpoints[min_b][min_vals[1]])
print(min_a, min_b)
X_disconnected_additions.append([X_a_idx,0,X[components_endpoints[min_a][min_vals[0]],2],X[components_endpoints[min_a][min_vals[0]],3],X[components_endpoints[min_a][min_vals[0]],4],X[components_endpoints[min_a][min_vals[0]],5],X[components_endpoints[min_b][min_vals[1]],0]])
X_a_idx += 1
X_additions = np.array(X_additions)
X_disconnected_additions = np.array(X_disconnected_additions)
X_all = np.vstack((X, X_additions, X_disconnected_additions))
X_pd = pd.DataFrame(X_all)
X_pd[0] = X_pd[0].astype(int)
X_pd[1] = X_pd[1].astype(int)
X_pd[6] = X_pd[6].astype(int)
X_pd.to_csv(new_swc_file, sep=' ', header=None, index=None)
return G, G_d, G_d_uncon | 44.037383 | 286 | 0.599392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,208 | 0.29768 |
d1010b9ee7ff4151215a648eb88588c6174fb854 | 2,495 | py | Python | server/external/youtube-dl/youtube_dl/extractor/promptfile.py | yycc179/urlp | d272b74c4aed18f03ccada8817ecf5c572a1bf71 | [
"MIT"
] | null | null | null | server/external/youtube-dl/youtube_dl/extractor/promptfile.py | yycc179/urlp | d272b74c4aed18f03ccada8817ecf5c572a1bf71 | [
"MIT"
] | null | null | null | server/external/youtube-dl/youtube_dl/extractor/promptfile.py | yycc179/urlp | d272b74c4aed18f03ccada8817ecf5c572a1bf71 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
urlencode_postdata,
)
class PromptFileIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?promptfile\.com/l/(?P<id>[0-9A-Z\-]+)'
_TEST = {
'url': 'http://www.promptfile.com/l/86D1CE8462-576CAAE416',
'md5': '5a7e285a26e0d66d9a263fae91bc92ce',
'info_dict': {
'id': '86D1CE8462-576CAAE416',
'ext': 'mp4',
'title': 'oceans.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if re.search(r'<div.+id="not_found_msg".+>(?!We are).+</div>[^-]', webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id,
expected=True)
chash = self._search_regex(
r'val\("([^"]*)"\s*\+\s*\$\("#chash"\)', webpage, 'chash')
fields = self._hidden_inputs(webpage)
keys = list(fields.keys())
chash_key = keys[0] if len(keys) == 1 else next(
key for key in keys if key.startswith('cha'))
fields[chash_key] = chash + fields[chash_key]
webpage = self._download_webpage(
url, video_id, 'Downloading video page',
data=urlencode_postdata(fields),
headers={'Content-type': 'application/x-www-form-urlencoded'})
video_url = self._search_regex(
(r'<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1[^>]*>\s*Download File',
r'<a[^>]+href=(["\'])(?P<url>https?://(?:www\.)?promptfile\.com/file/(?:(?!\1).)+)\1'),
webpage, 'video url', group='url')
title = self._html_search_regex(
r'<span.+title="([^"]+)">', webpage, 'title')
thumbnail = self._html_search_regex(
r'<div id="player_overlay">.*button>.*?<img src="([^"]+)"',
webpage, 'thumbnail', fatal=False, flags=re.DOTALL)
formats = [{
'format_id': 'sd',
'url': video_url,
'ext': determine_ext(title),
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| 35.140845 | 101 | 0.515832 | 2,289 | 0.917435 | 0 | 0 | 0 | 0 | 0 | 0 | 799 | 0.32024 |
d10113d3430723c74844cb610a13bb918fa54c11 | 46 | py | Python | bunruija/modules/__init__.py | tma15/bunruija | 64a5c993a06e9de75f8f382cc4b817f91965223f | [
"MIT"
] | 4 | 2020-12-22T11:12:35.000Z | 2021-12-15T13:30:02.000Z | bunruija/modules/__init__.py | tma15/bunruija | 64a5c993a06e9de75f8f382cc4b817f91965223f | [
"MIT"
] | 4 | 2021-01-16T07:34:22.000Z | 2021-08-14T06:56:07.000Z | bunruija/modules/__init__.py | tma15/bunruija | 64a5c993a06e9de75f8f382cc4b817f91965223f | [
"MIT"
] | null | null | null | from .static_embedding import StaticEmbedding
| 23 | 45 | 0.891304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d103d48eee96afdf2e4285944634993cf755898d | 23,504 | py | Python | RPiAntDrv.py | N7IFC/RPi_Antenna_Driver | 57cf57b6093f893b7e47fda64d721ec77234b032 | [
"MIT"
] | null | null | null | RPiAntDrv.py | N7IFC/RPi_Antenna_Driver | 57cf57b6093f893b7e47fda64d721ec77234b032 | [
"MIT"
] | 1 | 2020-05-20T12:35:51.000Z | 2020-05-20T12:35:51.000Z | RPiAntDrv.py | N7IFC/RPi_Antenna_Driver | 57cf57b6093f893b7e47fda64d721ec77234b032 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
##################################################################
#
# Raspberry Pi Antenna Driver (RPiAntDrv.py)
#
# Python GUI script to control H-Bridge via RPi.
# H-Bridge drives single DC motor tuned antenna.
#
# Name Call Date(s)
# Authors: Bill Peterson N7IFC Mar-May2020
#
##################################################################
from tkinter import Tk, ttk, messagebox, Frame, Menu, Label, Button
from tkinter import Scale, IntVar, StringVar, Toplevel
from tkinter import RAISED, HORIZONTAL, LEFT, S, W, SW, NW
from pathlib import Path
import configparser
import RPi.GPIO as GPIO
class Window(Frame):
# Define settings upon initialization
def __init__(self, master=None):
# parameters to send through the Frame class.
Frame.__init__(self, master)
#reference to the master widget, which is the tk window
self.master = master
# Retrieve parent script directory for absolute addressing
self.base_path = Path(__file__).parent
self.ini_path = str(self.base_path)+'/RPiAntDrv.ini'
#print (self.ini_path)
# Raspberry Pi I/O pins get reassigned when ini file is read
self.pwm_freq = 4000 # PWM Freq in Hz
self.pwm_duty = 0 # PWM Duty in percent, default to 0%
self.stall_time = 250 # Motor stall time in mS
self.encoder_count = IntVar() # Antenna reed switch count
self.encoder_count.set(0)
self.motor_running = False # Motor running flag
self.motor_stalled = False # Motor stalled flag
self.stall_active = False # Stall detection active
self.stall_count = 0 # Encoder count during stall detection
self.full_speed = 100 # Full speed PWM duty cycle
self.slow_speed = 25 # Slow speed PWM duty cycle
self.antenna_raising = False # Motor direction flag
self.ant_config_sect = ("null") # Active ini file config section
self.ant_preset_sect = ("null") # Active ini file preset section
self.ant_preset_val = 0 # Preset encoder target value from ini presets
self.status_message = StringVar() # Status message text for text_2
# Run init_window, which doesn't yet exist
self.init_window()
#Creation of init_window
def init_window(self):
self.master.title('RPi Antenna Driver (v1.6)')
# Set up root window & size (width x height + x_offset + y_offset)
self.bg_color = 'azure'
self.master.geometry("350x275+150+100")
self.master.configure(bg= self.bg_color)
# Create menu entry and sub-options
menubar = Menu(self.master)
self.master.config(menu=menubar)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open", command=self.about)
filemenu.add_command(label="Save", command=self.about)
filemenu.add_command(label="Save as...", command=self.about)
filemenu.add_separator()
filemenu.add_command(label="Quit", command=self.close)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Default ini", command=self.confirm_newini)
editmenu.add_command(label="Sync Count", command=self.confirm_sync)
editmenu.add_command(label="Undefined 2", command=self.about)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=self.about)
menubar.add_cascade(label="Help", menu=helpmenu)
text_1 = Label(textvariable=self.encoder_count, font = ('Helvetica', 30),
bg = self.bg_color, fg='black', pady=5, height=1)
text_1.grid(row=0, column=0, rowspan=2, pady=1, sticky=S)
text_2 = Label(text='Status:', font = ('Helvetica', 14),
bg = self.bg_color, fg='black', height=1,
anchor=SW, width=22, justify=LEFT)
text_2.grid(row=0, column=1, columnspan=1, sticky=SW)
text_3 = Label(textvariable=self.status_message, font = ('Helvetica', 12),
bg='white', fg='black', height=1, anchor=NW, width=22,
borderwidth=1, relief="solid")
text_3.grid(row=1, column=1, sticky=NW)
text_4 = Label(text='Motor Speed (%):', font = ('Helvetica', 14),
bg = self.bg_color, fg='black', padx=1, height=1,
anchor=SW, width=22, justify=LEFT)
text_4.grid(row=2, column=1, columnspan=1, sticky=S)
text_5 = Label(text='Antenna Selection:', font = ('Helvetica', 14),
bg = self.bg_color, fg='black', padx=1, height=1,
anchor=SW, width=22, justify=LEFT)
text_5.grid(row=4, column=1, columnspan=1, sticky=S)
text_6 = Label(text='Preset Selection:', font = ('Helvetica', 14),
bg = self.bg_color, fg='black', padx=1, height=1,
anchor=W, width=22, justify=LEFT)
text_6.grid(row=6, column=1, columnspan=1, sticky=S)
self.raise_button = Button(text='Raise', relief=RAISED, bd=4, padx=1,
pady=1, height=2, width=6, font=('Helvetica', 14))
self.raise_button.grid(row=2, column=0, padx=20, pady=5, rowspan=2)
self.raise_button.bind("<ButtonPress>", self.raise_button_press)
self.raise_button.bind("<ButtonRelease>", self.RL_button_release)
self.lower_button = Button(text='Lower', relief=RAISED, bd=4, padx=1,
pady=1, height=2, width=6, font=('Helvetica', 14))
self.lower_button.grid(row=4, column=0, padx=20, pady=5, rowspan=2)
self.lower_button.bind("<ButtonPress>", self.lower_button_press)
self.lower_button.bind("<ButtonRelease>", self.RL_button_release)
self.preset_button = Button(text='Preset', relief=RAISED, bd=4, padx=1,
pady=1, height=2, width=6, font=('Helvetica', 14))
self.preset_button.grid(row=6, column=0, padx=5, pady=5, rowspan=2)
self.preset_button.bind("<ButtonPress>", self.preset_button_press)
self.duty_scale = Scale(from_=1, to=100, orient = HORIZONTAL,
resolution = 1, length=200,
command = self.update_pwm_duty)
self.duty_scale.grid(row=3,column=1, sticky=NW)
# Antenna preset combo box is populated with values from ini file
self.antenna_combobox = ttk.Combobox(width=19, font=('Helvetica', 14),
state='readonly')
self.antenna_combobox.grid(row=5, column=1, sticky=NW)
self.antenna_combobox.bind("<<ComboboxSelected>>", self.get_antenna_val)
# Antenna preset combo box is populated with values from ini file
self.preset_combobox = ttk.Combobox(width=19, font=('Helvetica', 14),
state='readonly')
self.preset_combobox.grid(row=7, column=1, sticky=NW)
self.preset_combobox.bind("<<ComboboxSelected>>", self.get_preset_val)
self.ini_test () # Check for ini file existence
self.ini_read() # Retrieve ini file settings
self.gpioconfig() # Set up GPIO for antenna control
return
def raise_button_press(self, _unused):
self.motor_stalled = 0
self.motor_up ()
def lower_button_press(self, _unused):
self.motor_stalled = 0
self.motor_down ()
def RL_button_release(self, _unused):
self.motor_stop ()
self.status_message.set ("Ready")
def preset_button_press(self, _unused):
self.motor_stalled = 0
self.motor_move()
def confirm_newini(self):
okay = messagebox.askokcancel('RPiAntDrv',
'Overwrite Configuration File?',
detail='This will overwrite the '
'RPiAntDrv.ini file with default '
'values.', icon='question')
if okay:
# Overwrite the ini file and refresh values
self.ini_new()
self.ini_read()
self.status_message.set ("RPiAntDrv.ini written")
else:
self.status_message.set ("Operation cancelled")
def confirm_sync(self):
okay = messagebox.askokcancel('RPiAntDrv',
'Proceed with Sync?',
detail='This will sychronize the '
'antenna encoder count to the preset '
'value selected.', icon='question')
if okay:
# Sychronize encoder count with current preset value
self.encoder_count.set(self.ant_preset_val)
self.status_message.set ("Encoder syncronized")
else:
self.status_message.set ("Encoder sync canceled")
def motor_up(self):
# We can change speed on the fly
self.pwm_set.ChangeDutyCycle(self.pwm_duty)
# If motor is not already running and in correct direction
if not(self.motor_running and self.antenna_raising):
# check reverse motor lead flag
GPIO.output(self.dir1_pin, GPIO.HIGH) # Run motor FWD
GPIO.output(self.dir2_pin, GPIO.LOW)
self.antenna_raising = 1
self.motor_running = 1
# Initialize stall counter and start stall timer
self.motor_stall()
def motor_down(self):
# We can change speed on the fly
self.pwm_set.ChangeDutyCycle(self.pwm_duty)
# If motor is not running and in correct direction
if not(self.motor_running and not self.antenna_raising):
GPIO.output(self.dir1_pin, GPIO.LOW) # Run motor
GPIO.output(self.dir2_pin, GPIO.HIGH)
self.motor_running = 1
self.antenna_raising = 0
# Initialize stall detection
self.motor_stall()
def motor_stop(self):
GPIO.output(self.dir1_pin, GPIO.LOW) # Stop motor
GPIO.output(self.dir2_pin, GPIO.LOW)
self.pwm_set.ChangeDutyCycle(0) # Kill PWM
self.motor_running = 0
#self.ini_update()
def motor_stall(self):
# Set stall period proportional to motor speed
self.stall_period = int((100 / self.duty_scale.get())* self.stall_time)
# If motor is still running, perform stall check
if (self.motor_running):
# If stall detection is not already active
if not(self.stall_active):
self.stall_count = self.encoder_count.get()
self.stall_active = 1
self.master.after(self.stall_period, self.motor_stall)
# Otherwise see if we stalled
elif (self.stall_count == self.encoder_count.get()):
self.motor_stalled = 1
self.motor_stop()
self.stall_active = 0
self.status_message.set ("! Antenna Stalled !")
# Else reset stall count and timer
else:
self.stall_count = self.encoder_count.get()
self.master.after(self.stall_period, self.motor_stall)
else:
self.stall_active = 0
def motor_move(self):
# If motor is stalled, exit
if (self.motor_stalled == 1):
return
# If encoder count = preset, stop and exit
if self.encoder_count.get() == (self.ant_preset_val):
self.motor_stop()
self.status_message.set ("We have arrived")
return
# If encoder count within 5 counts of preset, slow down
Lval= (self.ant_preset_val -5)
Hval= (self.ant_preset_val +6)
if self.encoder_count.get() in range(Lval, Hval):
self.status_message.set ("Slowing down")
self.duty_scale.set(self.slow_speed)
# Else run full speed
else:
self.status_message.set ("Full speed")
self.duty_scale.set(self.full_speed)
# If encoder count > preset drive antenna down
if self.encoder_count.get() > (self.ant_preset_val):
self.motor_down()
# Else drive antenna up
else:
self.motor_up()
# after 100mS, call this function again
self.master.after(100, self.motor_move)
def get_antenna_val(self, _unused):
# fetch new antenna configuration and presets
config = configparser.ConfigParser()
config.read (self.ini_path)
self.last_antenna = self.antenna_combobox.get()
self.ant_refresh(config)
self.pwm_set.ChangeFrequency(self.pwm_freq)
def get_preset_val(self, _unused):
# get the preset value stored in the ini file
config = configparser.ConfigParser()
config.read (self.ini_path)
self.ant_preset_val = (config.getint(self.ant_preset_sect,
self.preset_combobox.get()))
#print (self.ant_preset_val)
def update_pwm_duty(self, _unused):
self.pwm_duty = self.duty_scale.get()
#print (_unused)
def gpioconfig(self): # Configure GPIO pins
GPIO.setwarnings(False)
GPIO.cleanup() # In case user changes running configuration
GPIO.setmode(GPIO.BOARD) # Refer to IO as Board header pins
GPIO.setup(self.dir1_pin, GPIO.OUT) # Direction output 1 to H-bridge
GPIO.setup(self.dir2_pin, GPIO.OUT) # Direction output 2 to H-bridge
GPIO.output(self.dir1_pin, GPIO.LOW) # Turn direction output 1 off
GPIO.output(self.dir2_pin, GPIO.LOW) # Turn direction output 2 off
GPIO.setup(self.pwm_pin, GPIO.OUT) # PWM output to H-bridge
# Set up the simple encoder switch input and add de-bounce time in mS
# GPIO.RISING interrupts on both edges, GPIO.FALLING seems better behaved
GPIO.setup(self.encoder_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(self.encoder_pin, GPIO.FALLING,
bouncetime=40, callback=self.encoder_ISR)
# Note GPIO.PWM is software not hardware PWM
self.pwm_set = GPIO.PWM(self.pwm_pin, self.pwm_freq) # Set up PWM for use
#self.pwm_set.stop() # Stop pwm output
self.pwm_set.start(self.pwm_duty) # Start pwm output at 0%
GPIO.setwarnings(True)
def encoder_ISR(self, _channel):
# Do as little as possible in the ISR, get in and get out!
# Increment the encoder count and jump out
if self.antenna_raising == 1:
self.encoder_count.set (self.encoder_count.get()+1)
else:
self.encoder_count.set (self.encoder_count.get()-1)
def ini_new(self): # Set up an ini file if it does not exist
# Configuration file parser to read and write ini file
config = configparser.ConfigParser()
# User configurable program settings
config['Settings'] = {'pwm_pin':'19',
'dir1_pin':'13',
'dir2_pin':'15',
'encoder_pin':'11',
'antennas':'Antenna 1, Antenna 2',
'last_position':'0',
'last_antenna':'Antenna 1',
'last_preset':'20m 14.400 (037)'}
# Set up default antennas
config['Antenna 1_Config'] = {'pwm_freq':'4000',
'full_speed':'100',
'slow_speed':'25',
'stall_time':'250'}
config['Antenna 1_Preset'] = {'maximum (270)':'270',
'80m _3.500 (226)':'226',
'80m _3.580 (221)':'221',
'80m _3.800 (206)':'206',
'80m _3.900 (199)':'199',
'80m _4.000 (192)':'192',
'60m _5.300 (130)':'130',
'60m _5.400 (127)':'127',
'40m _7.035 (091)':'91',
'40m _7.175 (089)':'89',
'40m _7.300 (087)':'87',
'30m 10.000 (056)':'56',
'30m 10.100 (055)':'55',
'30m 10.200 (054)':'54',
'20m 14.000 (039)':'39',
'20m 14.200 (038)':'38',
'20m 14.400 (037)':'37',
'15m 21.275 (019)':'19',
'12m 24.930 (014)':'14',
'10m 28.000 (008)':'8',
'10m 29.700 (006)':'6',
'minimum (000)':'0'}
config['Antenna 2_Config'] = {'pwm_freq':'4000',
'full_speed':'95',
'slow_speed':'20',
'stall_time':'250'}
config['Antenna 2_Preset'] = {'maximum (270)':'270',
'80m _3.700 (200)':'200',
'60m _5.350 (129)':'129',
'40m _7.250 (090)':'90',
'30m 10.100 (055)':'55',
'20m 14.200 (038)':'38',
'minimum (000)':'0'}
# Save the default configuration file
with open(self.ini_path, 'w') as configfile:
config.write(configfile)
def ini_test(self):
# Test to see if configuration file exists
try:
with open(self.ini_path) as _file:
# pass condition
self.status_message.set ("Configuration file loaded")
except IOError as _e:
#Does not exist OR no read permissions
self.status_message.set ("Configuration file created")
self.ini_new ()
def ini_read(self):
# Read ini file and set up parameters
config = configparser.ConfigParser()
config.read (self.ini_path)
# Retrieve I/O pin assignments
self.pwm_pin = (config.getint ('Settings','pwm_pin',fallback=19))
self.dir1_pin = (config.getint ('Settings','dir1_pin',fallback=13))
self.dir2_pin = (config.getint ('Settings','dir2_pin',fallback=15))
self.encoder_pin = (config.getint ('Settings','encoder_pin',fallback=11))
# Restore the encoder count to preset value
self.encoder_count.set (config.getint('Settings','last_position',fallback=0))
self.ant_preset_val = self.encoder_count.get()
# Retrieve the last antenna used and restore saved state
# Grab CSV list of antennas to act as combobox values and keys
# The .strip method removes leading and trailing spaces from .split list
_antennas = (config.get('Settings','antennas',fallback="Antenna 1"))
self.antenna_combobox['values']=[item.strip() for item in _antennas.split(',')]
self.last_antenna = (config.get('Settings','last_antenna',fallback="Antenna 1"))
self.antenna_combobox.set(self.last_antenna)
self.preset_combobox.set(config.get('Settings','last_preset',fallback='None'))
# refresh antenna settings and presets
self.ant_refresh(config)
def ant_refresh (self,config):
# Using selected antenna refresh antenna settings and presets
self.ant_config_sect = (self.last_antenna + '_Config')
self.ant_preset_sect = (self.last_antenna + '_Preset')
self.pwm_freq = (config.getint (self.ant_config_sect,'pwm_freq',fallback=4000))
self.full_speed = (config.getint (self.ant_config_sect,'full_speed',fallback=100))
self.slow_speed = (config.getint (self.ant_config_sect,'slow_speed',fallback=25))
self.stall_time = (config.getint (self.ant_config_sect,'stall_time',fallback=250))
self.preset_combobox['values']=(config.options(self.ant_preset_sect))
def ini_update(self):
config = configparser.ConfigParser()
# Perform read-modify-write of ini file
# Note: Anytyhing written must be a string value
config.read (self.ini_path)
config.set ('Settings','last_position',str(self.encoder_count.get()))
config.set ('Settings','last_antenna',self.antenna_combobox.get())
config.set ('Settings','last_preset',self.preset_combobox.get())
# Save modified configuration file
with open(self.ini_path, 'w') as configfile:
config.write(configfile)
self.status_message.set ("ini file updated")
def close(self): # Cleanly close the GUI and cleanup the GPIO
self.ini_update() # Save current settings
GPIO.cleanup()
#print ("GPIO cleanup executed")
self.master.destroy()
#print ("master window destroyed")
def about(self):
popup = Toplevel()
popup.title("About RPiAntDrv")
popup.geometry("325x225+162+168")
popup.configure(bg= 'snow')
popup_text1 = Label(popup, text='RPiAntDrv.py v1.6',
font = ('Helvetica', 12), wraplength=300, justify=LEFT,
bg = 'snow', fg='black', padx=10, pady=10)
popup_text1.grid(row=0, column=0, columnspan=1)
popup_text2 = Label(popup, text='This Python script is used to control '
'a motor tuned antenna like a screwdriver antenna or '
'tuned loop. Feedback from the antenna is provided by '
'a simple dry contact or pulse output relative to the '
'output shaft turning.',
font = ('Helvetica', 12), wraplength=300, justify=LEFT,
bg = 'snow', fg='black', padx=10, pady=10)
popup_text2.grid(row=1, column=0, columnspan=1)
popup.mainloop()
def main():
# root window created. Here, that would be the only window, but
# you can later have windows within windows.
root = Tk()
app = Window(root) #creation of an instance
root.protocol("WM_DELETE_WINDOW", app.close) # cleanup GPIO when X closes window
root.mainloop() # Loops forever
if __name__ == '__main__':
main()
| 47.967347 | 96 | 0.551481 | 22,484 | 0.956603 | 0 | 0 | 0 | 0 | 0 | 0 | 7,093 | 0.301778 |
d104b69824ddd7c1f8640b233b546c4955e4df9d | 4,021 | py | Python | extensions/customer_action.py | time-track-tool/time-track-tool | a1c280f32a7766e460c862633b748fa206256f24 | [
"MIT"
] | null | null | null | extensions/customer_action.py | time-track-tool/time-track-tool | a1c280f32a7766e460c862633b748fa206256f24 | [
"MIT"
] | 1 | 2019-07-03T13:32:38.000Z | 2019-07-03T13:32:38.000Z | extensions/customer_action.py | time-track-tool/time-track-tool | a1c280f32a7766e460c862633b748fa206256f24 | [
"MIT"
] | 1 | 2019-05-15T16:01:31.000Z | 2019-05-15T16:01:31.000Z | #! /usr/bin/python
# Copyright (C) 2006 Dr. Ralf Schlatterbeck Open Source Consulting.
# Reichergasse 131, A-3411 Weidling.
# Web: http://www.runtux.com Email: office@runtux.com
# All rights reserved
# ****************************************************************************
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# ****************************************************************************
#
#++
# Name
# customer_action
#
# Purpose
#
# Various actions for editing of customer data
#--
from roundup.cgi.actions import Action, EditItemAction, SearchAction
from roundup.cgi.exceptions import Redirect
from roundup.exceptions import Reject
from roundup.cgi import templating
from roundup.date import Date, Interval, Range
from roundup.cgi.TranslationService import get_translation
from roundup.hyperdb import Multilink
class Create_New_Address (Action) :
""" Create a new address to be added to the current item (used for
customer addresses, supply_address, invoice_address,
contact_person).
Optional @frm specifies the address to copy.
"""
copy_attributes = \
[ 'adr_type', 'birthdate', 'city', 'country'
, 'firstname', 'function', 'initial', 'lastname', 'lettertitle'
, 'postalcode', 'salutation', 'street', 'title'
, 'valid'
]
def handle (self) :
self.request = templating.HTMLRequest (self.client)
assert (self.client.nodeid)
klass = self.db.classes [self.request.classname]
id = self.client.nodeid
attr = self.form ['@attr'].value.strip ()
if '@frm' in self.form :
frm = self.form ['@frm'].value.strip ()
node = self.db.address.getnode (self.db.cust_supp.get (id, frm))
attributes = dict \
((k, node [k]) for k in self.copy_attributes
if node [k] is not None
)
else :
attributes = dict \
( function = klass.get (id, 'name')
, country = ' '
)
newvalue = newid = self.db.address.create (** attributes)
if isinstance (klass.properties [attr], Multilink) :
newvalue = klass.get (id, attr) [:]
newvalue.append (newid)
newvalue = dict.fromkeys (newvalue).keys ()
klass.set (id, ** {attr : newvalue})
self.db.commit ()
raise Redirect, "%s%s" % (self.request.classname, id)
# end def handle
# end class Create_New_Address
def del_link (classname, id) :
return \
( "document.forms.itemSynopsis ['@remove@%s'].value = '%s';"
"alert(document.forms.itemSynopsis ['@remove@%s'].value);"
% (classname, id, classname)
)
# end def del_link
def adress_button (db, adr_property_frm, adr_property_to) :
"""Compute address copy button inscription"""
adr_frm = db._ (adr_property_frm)
adr_to = db._ (adr_property_to)
return db._ (''"new %(adr_to)s from %(adr_frm)s") % locals ()
# end def adress_button
def init (instance) :
actn = instance.registerAction
actn ('create_new_address', Create_New_Address)
util = instance.registerUtil
util ("del_link", del_link)
util ("adress_button", adress_button)
# end def init
| 38.663462 | 79 | 0.605073 | 1,656 | 0.411838 | 0 | 0 | 0 | 0 | 0 | 0 | 1,850 | 0.460085 |
d104ca6c9b0546b1e6e69454841d4fb7d4af63b5 | 7,018 | py | Python | layouts/window_profile.py | TkfleBR/PyManager | d57f6cced4932d03b51902cbcf4d9b217c67bd3c | [
"MIT"
] | null | null | null | layouts/window_profile.py | TkfleBR/PyManager | d57f6cced4932d03b51902cbcf4d9b217c67bd3c | [
"MIT"
] | null | null | null | layouts/window_profile.py | TkfleBR/PyManager | d57f6cced4932d03b51902cbcf4d9b217c67bd3c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'window_profile.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow
class Profile(QMainWindow):
def __init__(self):
super().__init__()
self.setObjectName("MainWindow")
self.resize(397, 374)
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 381, 361))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.lineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit.setText("")
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 0, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 6, 0, 1, 1)
self.lineEdit_6 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_6.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_6.setObjectName("lineEdit_6")
self.gridLayout.addWidget(self.lineEdit_6, 7, 1, 1, 1)
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.dateEdit = QtWidgets.QDateEdit(self.verticalLayoutWidget)
self.dateEdit.setAlignment(QtCore.Qt.AlignCenter)
self.dateEdit.setObjectName("dateEdit")
self.gridLayout.addWidget(self.dateEdit, 6, 1, 1, 1)
self.lineEdit_8 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.gridLayout.addWidget(self.lineEdit_8, 8, 1, 1, 1)
self.lineEdit_8.setObjectName("lineEdit_8")
self.lineEdit_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 7, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 8, 0, 1, 1)
self.lineEdit_4 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_4.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_4.setObjectName("lineEdit_4")
self.gridLayout.addWidget(self.lineEdit_4, 4, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.lineEdit_5 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_5.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_5.setObjectName("lineEdit_5")
self.gridLayout.addWidget(self.lineEdit_5, 5, 1, 1, 1)
self.lineEdit_3 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_3.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_3.setObjectName("lineEdit_3")
self.gridLayout.addWidget(self.lineEdit_3, 3, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 5, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 4, 0, 1, 1)
self.lineEdit_2 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout.addWidget(self.lineEdit_2, 2, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(
self.label_9, 1, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.lineEdit_7 = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.lineEdit_7.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_7.setObjectName("lineEdit_7")
self.gridLayout.addWidget(self.lineEdit_7, 1, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.setCentralWidget(self.centralwidget)
self.retranslateUi()
QtCore.QMetaObject.connectSlotsByName(self)
self.show()
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_6.setText(_translate("MainWindow", "Birthday:"))
self.label.setText(_translate("MainWindow", "Name:"))
self.label_8.setText(_translate("MainWindow", "Company:"))
self.label_7.setText(_translate("MainWindow", "Salary:"))
self.label_3.setText(_translate("MainWindow", "Password:"))
self.label_2.setText(_translate("MainWindow", "Username:"))
self.label_5.setText(_translate("MainWindow", "Status:"))
self.label_4.setText(_translate("MainWindow", "Repeat Password:"))
self.label_9.setText(_translate("MainWindow", "CPF/CNPJ"))
self.pushButton_2.setText(_translate("MainWindow", "Cancel"))
self.pushButton.setText(_translate("MainWindow", "Save"))
| 53.572519 | 78 | 0.708891 | 6,730 | 0.958963 | 0 | 0 | 0 | 0 | 0 | 0 | 766 | 0.109148 |
d105481f869074fc1ccad9921caae59fba74e538 | 1,816 | py | Python | tests/test_inheritance_and_pydantic_generation/test_validators_in_generated_pydantic.py | ivangirko/ormar | 1f5d993716da0da83874cbdfd5b44dbf7af1b9c5 | [
"MIT"
] | 905 | 2020-08-31T19:13:34.000Z | 2022-03-31T08:38:10.000Z | tests/test_inheritance_and_pydantic_generation/test_validators_in_generated_pydantic.py | ivangirko/ormar | 1f5d993716da0da83874cbdfd5b44dbf7af1b9c5 | [
"MIT"
] | 359 | 2020-08-28T14:14:54.000Z | 2022-03-29T07:40:32.000Z | tests/test_inheritance_and_pydantic_generation/test_validators_in_generated_pydantic.py | ivangirko/ormar | 1f5d993716da0da83874cbdfd5b44dbf7af1b9c5 | [
"MIT"
] | 56 | 2020-10-26T02:22:14.000Z | 2022-03-20T06:41:31.000Z | import enum
import databases
import pydantic
import pytest
import sqlalchemy
from pydantic import ValidationError
import ormar
from tests.settings import DATABASE_URL
metadata = sqlalchemy.MetaData()
database = databases.Database(DATABASE_URL)
class BaseMeta(ormar.ModelMeta):
database = database
metadata = metadata
class EnumExample(str, enum.Enum):
A = "A"
B = "B"
C = "C"
class ModelExample(ormar.Model):
class Meta(ormar.ModelMeta):
database = database
metadata = metadata
tablename = "examples"
id: int = ormar.Integer(primary_key=True)
str_field: str = ormar.String(min_length=5, max_length=10, nullable=False)
enum_field: str = ormar.String(
max_length=1, nullable=False, choices=list(EnumExample)
)
@pydantic.validator("str_field")
def validate_str_field(cls, v):
if " " not in v:
raise ValueError("must contain a space")
return v
ModelExampleCreate = ModelExample.get_pydantic(exclude={"id"})
def test_ormar_validator():
ModelExample(str_field="a aaaaaa", enum_field="A")
with pytest.raises(ValidationError) as e:
ModelExample(str_field="aaaaaaa", enum_field="A")
assert "must contain a space" in str(e)
with pytest.raises(ValidationError) as e:
ModelExample(str_field="a aaaaaaa", enum_field="Z")
assert "not in allowed choices" in str(e)
def test_pydantic_validator():
ModelExampleCreate(str_field="a aaaaaa", enum_field="A")
with pytest.raises(ValidationError) as e:
ModelExampleCreate(str_field="aaaaaaa", enum_field="A")
assert "must contain a space" in str(e)
with pytest.raises(ValidationError) as e:
ModelExampleCreate(str_field="a aaaaaaa", enum_field="Z")
assert "not in allowed choices" in str(e)
| 26.705882 | 78 | 0.698789 | 703 | 0.387115 | 0 | 0 | 163 | 0.089758 | 0 | 0 | 229 | 0.126101 |
d105fd819be01d37eabcfc76984d50eb4c0e68fb | 20,654 | py | Python | tests/unit/test_searchtools.py | canonical/hotsos | 1960e80a3f7529045c44798b0d3ac27d75036562 | [
"Apache-2.0"
] | 6 | 2021-10-01T19:46:14.000Z | 2022-03-31T17:05:08.000Z | tests/unit/test_searchtools.py | canonical/hotsos | 1960e80a3f7529045c44798b0d3ac27d75036562 | [
"Apache-2.0"
] | 111 | 2021-10-01T18:18:17.000Z | 2022-03-29T12:23:20.000Z | tests/unit/test_searchtools.py | canonical/hotsos | 1960e80a3f7529045c44798b0d3ac27d75036562 | [
"Apache-2.0"
] | 10 | 2021-09-29T14:47:54.000Z | 2022-03-18T14:52:16.000Z | import glob
import os
import tempfile
from unittest import mock
from . import utils
from hotsos.core.config import setup_config, HotSOSConfig
from hotsos.core.searchtools import (
FileSearcher,
FilterDef,
SearchDef,
SearchResult,
SequenceSearchDef,
)
FILTER_TEST_1 = """blah blah ERROR blah
blah blah ERROR blah
blah blah INFO blah
"""
SEQ_TEST_1 = """a start point
leads to
an ending
"""
SEQ_TEST_2 = """a start point
another start point
leads to
an ending
"""
SEQ_TEST_3 = """a start point
another start point
leads to
an ending
a start point
"""
SEQ_TEST_4 = """a start point
another start point
value is 3
"""
SEQ_TEST_5 = """a start point
another start point
value is 3
another start point
value is 4
"""
SEQ_TEST_6 = """section 1
1_1
1_2
section 2
2_1
"""
SEQ_TEST_7 = """section 1
1_1
1_2
section 2
2_1
section 3
3_1
"""
MULTI_SEQ_TEST = """
sectionB 1
1_1
sectionA 1
1_1
sectionB 2
2_2
sectionA 2
2_1
"""
class TestSearchTools(utils.BaseTestCase):
@mock.patch.object(os, "environ", {})
@mock.patch.object(os, "cpu_count")
def test_filesearcher_num_cpus_no_override(self, mock_cpu_count):
mock_cpu_count.return_value = 3
with mock.patch.object(FileSearcher, 'num_files_to_search', 4):
s = FileSearcher()
self.assertEqual(s.num_cpus, 3)
@mock.patch.object(os, "environ", {})
@mock.patch.object(os, "cpu_count")
def test_filesearcher_num_cpus_files_capped(self, mock_cpu_count):
mock_cpu_count.return_value = 3
with mock.patch.object(FileSearcher, 'num_files_to_search', 2):
s = FileSearcher()
self.assertEqual(s.num_cpus, 2)
@mock.patch.object(os, "cpu_count")
def test_filesearcher_num_cpus_w_override(self, mock_cpu_count):
setup_config(MAX_PARALLEL_TASKS=2)
mock_cpu_count.return_value = 3
with mock.patch.object(FileSearcher, 'num_files_to_search', 4):
s = FileSearcher()
self.assertEqual(s.num_cpus, 2)
def test_filesearcher_logs(self):
expected = {9891: '2022-02-09 22:50:18.131',
9892: '2022-02-09 22:50:19.703'}
logs_root = "var/log/neutron/"
filepath = os.path.join(HotSOSConfig.DATA_ROOT, logs_root,
'neutron-openvswitch-agent.log.2.gz')
globpath = os.path.join(HotSOSConfig.DATA_ROOT, logs_root,
'neutron-l3-agent.log')
globpath_file1 = os.path.join(HotSOSConfig.DATA_ROOT, logs_root,
'neutron-l3-agent.log')
globpath_file2 = os.path.join(HotSOSConfig.DATA_ROOT, logs_root,
'neutron-l3-agent.log.1.gz')
s = FileSearcher()
sd = SearchDef(r'^(\S+\s+[0-9:\.]+)\s+.+full sync.+', tag="T1")
s.add_search_term(sd, filepath)
sd = SearchDef(r'^(\S+\s+[0-9:\.]+)\s+.+ERROR.+', tag="T2")
s.add_search_term(sd, filepath)
sd = SearchDef((r'^(\S+\s+[0-9:\.]+)\s+.+ INFO .+ Router [0-9a-f\-]+'
'.+'), tag="T3")
s.add_search_term(sd, globpath)
sd = SearchDef(r'non-existant-pattern', tag="T4")
# search for something that doesn't exist to test that code path
s.add_search_term(sd, globpath)
results = s.search()
self.assertEqual(set(results.files), set([filepath, globpath]))
self.assertEqual(len(results.find_by_path(filepath)), 1220)
tag_results = results.find_by_tag("T1", path=filepath)
self.assertEqual(len(tag_results), 2)
for result in tag_results:
ln = result.linenumber
self.assertEqual(result.tag, "T1")
self.assertEqual(result.get(1), expected[ln])
tag_results = results.find_by_tag("T1")
self.assertEqual(len(tag_results), 2)
for result in tag_results:
ln = result.linenumber
self.assertEqual(result.tag, "T1")
self.assertEqual(result.get(1), expected[ln])
self.assertEqual(len(results.find_by_path(globpath_file1)), 1)
self.assertEqual(len(results.find_by_path(globpath_file2)), 0)
# these files have the same content so expect same result from both
expected = {5380: '2022-02-10 16:09:22.641'}
path_results = results.find_by_path(globpath_file1)
for result in path_results:
ln = result.linenumber
self.assertEqual(result.tag, "T3")
self.assertEqual(result.get(1), expected[ln])
path_results = results.find_by_path(globpath_file2)
for result in path_results:
ln = result.linenumber
self.assertEqual(result.tag, "T3")
self.assertEqual(result.get(1), expected[ln])
def test_filesearcher_network_info(self):
filepath = os.path.join(HotSOSConfig.DATA_ROOT, 'sos_commands',
'networking', 'ip_-d_address')
filepath2 = os.path.join(HotSOSConfig.DATA_ROOT, 'sos_commands',
'networking', 'ip_-s_-d_link')
ip = "10.0.0.128"
mac = "22:c2:7b:1c:12:1b"
s = FileSearcher()
sd = SearchDef(r".+({}).+".format(ip))
s.add_search_term(sd, filepath)
sd = SearchDef(r"^\s+link/ether\s+({})\s+.+".format(mac))
s.add_search_term(sd, filepath2)
results = s.search()
self.assertEqual(set(results.files), set([filepath, filepath2]))
self.assertEqual(len(results.find_by_path(filepath)), 1)
self.assertEqual(len(results.find_by_path(filepath2)), 2)
self.assertEqual(results.find_by_path(filepath)[0].linenumber, 38)
for result in results.find_by_path(filepath):
self.assertEqual(result.get(1), ip)
expected = {52: mac,
141: mac}
for result in results.find_by_path(filepath2):
ln = result.linenumber
self.assertEqual(result.tag, None)
self.assertEqual(result.get(1), expected[ln])
def test_filesearcher_error(self):
s = FileSearcher()
with mock.patch.object(SearchResult, '__init__') as mock_init:
def fake_init(*args, **kwargs):
raise EOFError("some error")
mock_init.side_effect = fake_init
path = os.path.join(HotSOSConfig.DATA_ROOT)
s.add_search_term(SearchDef("."), path)
s.search()
def test_filesearch_filesort(self):
ordered_contents = []
self.maxDiff = None
with tempfile.TemporaryDirectory() as dtmp:
os.mknod(os.path.join(dtmp, "my-test-agent.log"))
ordered_contents.append("my-test-agent.log")
os.mknod(os.path.join(dtmp, "my-test-agent.log.1"))
ordered_contents.append("my-test-agent.log.1")
# add in an erroneous file that does not follow logrotate format
os.mknod(os.path.join(dtmp, "my-test-agent.log.tar.gz"))
for i in range(2, 100):
fname = "my-test-agent.log.{}.gz".format(i)
os.mknod(os.path.join(dtmp, fname))
ordered_contents.append(fname)
self.assertEqual(FileSearcher().logrotate_file_sort(fname), i)
ordered_contents.append("my-test-agent.log.tar.gz")
contents = os.listdir(dtmp)
self.assertEqual(sorted(contents,
key=FileSearcher().logrotate_file_sort),
ordered_contents)
def test_filesearch_glob_filesort(self):
dir_contents = []
self.maxDiff = None
with tempfile.TemporaryDirectory() as dtmp:
dir_contents.append(os.path.join(dtmp, "my-test-agent.0.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.1.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.1.log.1"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.2.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.16.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.49.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.49.log.1"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.77.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.100.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.100.log.1"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.110.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.142.log"))
dir_contents.append(os.path.join(dtmp, "my-test-agent.183.log"))
for e in dir_contents:
os.mknod(e)
for i in range(2, HotSOSConfig.MAX_LOGROTATE_DEPTH + 10):
fname = os.path.join(dtmp,
"my-test-agent.1.log.{}.gz".format(i))
os.mknod(fname)
if i <= HotSOSConfig.MAX_LOGROTATE_DEPTH:
dir_contents.append(fname)
for i in range(2, HotSOSConfig.MAX_LOGROTATE_DEPTH + 10):
fname = os.path.join(dtmp,
"my-test-agent.49.log.{}.gz".format(i))
os.mknod(fname)
if i <= HotSOSConfig.MAX_LOGROTATE_DEPTH:
dir_contents.append(fname)
for i in range(2, HotSOSConfig.MAX_LOGROTATE_DEPTH + 10):
fname = os.path.join(dtmp,
"my-test-agent.100.log.{}.gz".format(i))
os.mknod(fname)
if i <= HotSOSConfig.MAX_LOGROTATE_DEPTH:
dir_contents.append(fname)
exp = sorted(dir_contents)
path = os.path.join(dtmp, 'my-test-agent*.log*')
act = sorted(FileSearcher().filtered_paths(glob.glob(path)))
self.assertEqual(act, exp)
def test_sequence_searcher(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_1)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^a\S* (start\S*) point\S*"),
body=SearchDef(r"leads to"),
end=SearchDef(r"^an (ending)$"),
tag="seq-search-test1")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "start")
elif r.tag == sd.end_tag:
self.assertEqual(r.get(1), "ending")
os.remove(ftmp.name)
def test_sequence_searcher_overlapping(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_2)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^(a\S*) (start\S*) point\S*"),
body=SearchDef(r"leads to"),
end=SearchDef(r"^an (ending)$"),
tag="seq-search-test2")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "another")
elif r.tag == sd.end_tag:
self.assertEqual(r.get(1), "ending")
os.remove(ftmp.name)
def test_sequence_searcher_overlapping_incomplete(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_3)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^(a\S*) (start\S*) point\S*"),
body=SearchDef(r"leads to"),
end=SearchDef(r"^an (ending)$"),
tag="seq-search-test3")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "another")
elif r.tag == sd.end_tag:
self.assertEqual(r.get(1), "ending")
os.remove(ftmp.name)
def test_sequence_searcher_incomplete_eof_match(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_4)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^(a\S*) (start\S*) point\S*"),
body=SearchDef(r"value is (\S+)"),
end=SearchDef(r"^$"),
tag="seq-search-test4")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "another")
elif r.tag == sd.body_tag:
self.assertEqual(r.get(1), "3")
elif r.tag == sd.end_tag:
self.assertEqual(r.get(0), "")
os.remove(ftmp.name)
def test_sequence_searcher_multiple_sections(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_5)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(
r"^(a\S*) (start\S*) point\S*"),
body=SearchDef(r"value is (\S+)"),
end=SearchDef(r"^$"),
tag="seq-search-test5")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 2)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "another")
elif r.tag == sd.body_tag:
self.assertTrue(r.get(1) in ["3", "4"])
elif r.tag == sd.end_tag:
self.assertEqual(r.get(0), "")
os.remove(ftmp.name)
def test_sequence_searcher_eof(self):
"""
Test scenario:
* multiple sections that end with start of the next
* start def matches any start
* end def matches any start
* file ends before start of next
"""
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_6)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(r"^section (\d+)"),
body=SearchDef(r"\d_\d"),
tag="seq-search-test6")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 2)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
section = r.get(1)
self.assertTrue(r.get(1) in ["1", "2"])
elif r.tag == sd.body_tag:
if section == "1":
self.assertTrue(r.get(0) in ["1_1", "1_2"])
else:
self.assertTrue(r.get(0) in ["2_1"])
os.remove(ftmp.name)
def test_sequence_searcher_section_start_end_same(self):
"""
Test scenario:
* multiple sections that end with start of the next
* start def matches unique start
* end def matches any start
"""
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(SEQ_TEST_7)
ftmp.close()
s = FileSearcher()
sd = SequenceSearchDef(start=SearchDef(r"^section (2)"),
body=SearchDef(r"\d_\d"),
end=SearchDef(
r"^section (\d+)"),
tag="seq-search-test7")
s.add_search_term(sd, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sd)
self.assertEqual(len(sections), 1)
for id in sections:
for r in sections[id]:
if r.tag == sd.start_tag:
self.assertEqual(r.get(1), "2")
elif r.tag == sd.body_tag:
self.assertTrue(r.get(0) in ["2_1"])
os.remove(ftmp.name)
def test_sequence_searcher_multi_sequence(self):
"""
Test scenario:
* search containing multiple seqeunce definitions
* data containing 2 results of each where one is incomplete
* test that single incomplete result gets removed
"""
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(MULTI_SEQ_TEST)
ftmp.close()
s = FileSearcher()
sdA = SequenceSearchDef(start=SearchDef(r"^sectionA (\d+)"),
body=SearchDef(r"\d_\d"),
end=SearchDef(
r"^section\S+ (\d+)"),
tag="seqA-search-test")
sdB = SequenceSearchDef(start=SearchDef(r"^sectionB (\d+)"),
body=SearchDef(r"\d_\d"),
end=SearchDef(
r"^section\S+ (\d+)"),
tag="seqB-search-test")
s.add_search_term(sdA, path=ftmp.name)
s.add_search_term(sdB, path=ftmp.name)
results = s.search()
sections = results.find_sequence_sections(sdA)
self.assertEqual(len(sections), 1)
sections = results.find_sequence_sections(sdB)
self.assertEqual(len(sections), 2)
os.remove(ftmp.name)
def test_search_filter(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(FILTER_TEST_1)
ftmp.close()
s = FileSearcher()
fd = FilterDef(r" (INFO)")
s.add_filter_term(fd, path=ftmp.name)
sd = SearchDef(r".+ INFO (.+)")
s.add_search_term(sd, path=ftmp.name)
results = s.search().find_by_path(ftmp.name)
self.assertEqual(len(results), 1)
for r in results:
self.assertEqual(r.get(1), "blah")
os.remove(ftmp.name)
def test_search_filter_invert_match(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as ftmp:
ftmp.write(FILTER_TEST_1)
ftmp.close()
s = FileSearcher()
fd = FilterDef(r" (ERROR)", invert_match=True)
s.add_filter_term(fd, path=ftmp.name)
sd = SearchDef(r".+ INFO (.+)")
s.add_search_term(sd, path=ftmp.name)
results = s.search().find_by_path(ftmp.name)
self.assertEqual(len(results), 1)
for r in results:
self.assertEqual(r.get(1), "blah")
os.remove(ftmp.name)
| 39.56705 | 79 | 0.534812 | 19,705 | 0.954052 | 0 | 0 | 1,003 | 0.048562 | 0 | 0 | 3,379 | 0.1636 |
d1067d667a8681b9e02f92cf39cbd928fbd8b767 | 896 | py | Python | setup.py | KimWiese/bqtools | f874834167dddaae9da7dd5a8564d80a479d59ac | [
"MIT"
] | null | null | null | setup.py | KimWiese/bqtools | f874834167dddaae9da7dd5a8564d80a479d59ac | [
"MIT"
] | null | null | null | setup.py | KimWiese/bqtools | f874834167dddaae9da7dd5a8564d80a479d59ac | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
VERSION = '0.5.0'
with open('README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
with open('requirements.txt') as f:
DEPENDENCIES = f.read().split('\n')
setup(
name = 'bqtools',
version = VERSION,
description = 'Python Tools for BigQuery',
long_description = LONG_DESCRIPTION,
long_description_content_type = 'text/markdown',
author = 'Jonathan Rahn',
author_email = 'jonathan.rahn@42digital.de',
url = 'https://github.com/42DIGITAL/bqtools',
packages = find_packages(exclude=['tests']),
install_requires=DEPENDENCIES,
extras_require={'test': ['pytest']},
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Database',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
| 28.903226 | 52 | 0.639509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.401786 |
d10682fb60ec99068c4dbd7e5b8bac0fee1bcbce | 32 | py | Python | dag_executor/Executor/__init__.py | GennadiiTurutin/dag_executor | ddc7eab1e0e98753309e245247ac00e465e52ec1 | [
"MIT"
] | null | null | null | dag_executor/Executor/__init__.py | GennadiiTurutin/dag_executor | ddc7eab1e0e98753309e245247ac00e465e52ec1 | [
"MIT"
] | null | null | null | dag_executor/Executor/__init__.py | GennadiiTurutin/dag_executor | ddc7eab1e0e98753309e245247ac00e465e52ec1 | [
"MIT"
] | null | null | null | from .executor import Executor
| 16 | 31 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d106a8e65c56d421953b12f1ee56992d0d42670b | 2,546 | py | Python | tests/create_test_db.py | TargetProcess/duro | 3e70c17aed3d6d8714c94f0dfda539969d22157a | [
"MIT"
] | 4 | 2020-01-31T13:54:51.000Z | 2020-04-17T15:53:02.000Z | tests/create_test_db.py | TargetProcess/duro | 3e70c17aed3d6d8714c94f0dfda539969d22157a | [
"MIT"
] | null | null | null | tests/create_test_db.py | TargetProcess/duro | 3e70c17aed3d6d8714c94f0dfda539969d22157a | [
"MIT"
] | 1 | 2020-04-14T12:32:08.000Z | 2020-04-14T12:32:08.000Z | import sqlite3
ddl = """
create table commits
(
hash text,
processed integer
);
create table tables
(
table_name text,
query text,
interval integer,
config text,
last_created integer,
mean real,
times_run integer,
force integer,
started integer,
deleted integer,
waiting integer
);
create table timestamps
(
"table" text,
start int,
connect int,
"select" int,
create_temp int,
process int,
csv int,
s3 int,
"insert" int,
clean_csv int,
tests int,
replace_old int,
drop_old int,
make_snapshot int,
finish int
);
create table version
(
major INTEGER,
minor INTEGER
);
"""
inserts = """
INSERT INTO tables (table_name, query, interval, config, last_created, mean, times_run, force, started, deleted, waiting)
VALUES ('first.cities', 'select city, country
from first.cities_raw', 1440,
'{"grant_select": "jane, john"}',
null, 0, 0, null, null, null, null);
INSERT INTO tables
VALUES ('first.countries', 'select country, continent
from first.countries_raw;', 60,
'{"grant_select": "joan, john"}',
null, 0, 0, null, null, null, null);
INSERT INTO tables
VALUES ('second.child', 'select city, country from first.cities', null,
'{"diststyle": "all", "distkey": "city", "snapshots_interval": "24d", "snapshots_stored_for": "90d"}',
null, 0, 0, null, null, null, null);
INSERT INTO tables
VALUES ('second.parent', 'select * from second.child limit 10', 24,
'{"diststyle": "even"}', null, 0, 0, null, null, null, null);
INSERT INTO timestamps ("table", start, connect, "select", create_temp,
process, csv, s3, "insert", clean_csv, tests, replace_old, drop_old, make_snapshot,
finish)
VALUES ('first.cities', 1522151698, 1522151699, 1522151773, 1522151783, null,
1522151793, null, null, null, 1522151799, 1522151825, 1522151825, null, 1522151825);
INSERT INTO timestamps ("table", start, connect, "select", create_temp,
process, csv, s3, "insert", clean_csv, tests, replace_old, drop_old, make_snapshot,
finish)
VALUES ('first.cities', 1522151835, 1522151849, 1522152053, 1522152063, null,
1522152073, null, null, null, 1522152155, 1522152202, 1522152202, null, 1522152202);
INSERT INTO timestamps ("table", start, connect, "select", create_temp,
process, csv, s3, "insert", clean_csv, tests, replace_old, drop_old, make_snapshot,
finish)
VALUES ('first.cities', 1523544406, null, null, null, null, null, null, null,
null, null, null, null, null, null);
INSERT INTO version (major, minor) VALUES (1, 0);
"""
| 27.376344 | 122 | 0.689709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,511 | 0.986253 |
d106bcbb2782ae3b631b100e3fcc2d409f55aa2d | 6,534 | py | Python | dti_classification_pytorch.py | fyrdahl/ISMRM2018_Educational_DeepLearning | 8bd3fa4a6828e0bb3a9832a9ec90ab5a99836f52 | [
"MIT"
] | 17 | 2018-06-16T09:06:18.000Z | 2022-03-11T16:20:03.000Z | dti_classification_pytorch.py | fyrdahl/ISMRM2018_Educational_DeepLearning | 8bd3fa4a6828e0bb3a9832a9ec90ab5a99836f52 | [
"MIT"
] | null | null | null | dti_classification_pytorch.py | fyrdahl/ISMRM2018_Educational_DeepLearning | 8bd3fa4a6828e0bb3a9832a9ec90ab5a99836f52 | [
"MIT"
] | 8 | 2018-06-16T09:12:43.000Z | 2020-12-17T02:54:16.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
DTI classification demo ISMRM 2018
Created in May 2018 for ISMRM educational "How to Jump-Start Your Deep Learning Research"
Educational course Deep Learning: Everything You Want to Know, Saturday, June 16th 2018
Joint Annual meeting of ISMRM and ESMRMB, Paris, France, June 16th to 21st
Created with PyTorch 0.4 and Python 3.6 using CUDA 8.0
Please see import section for module dependencies
florian.knoll@nyumc.org
"""
#%reset
#%% Import modules
import numpy as np
np.random.seed(123) # for reproducibility
import pandas
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.utils.data as data_utils
import os
torch.manual_seed(123) # for reproducibility
plt.close("all")
#%% Load dataset
# The first case is used as an independent test set. Cases 2-4 are used for training and validation
#
#Entries in the CVS file are
#1: sample
#2: row
#3: column
#4: slice
#5: T1 weighted anatomical image
#6: FA
#7: MD
#8: AD
#9: RD
#10: Label
#
#Classes are
#1: left thalamus
#2: left genu of the corpus callosum
#3: left subcortical white matter of inferior frontal gyrus
data1 = pandas.read_csv("./data/dti/sampledata100206.csv", header=None).values
data2 = pandas.read_csv("./data/dti/sampledata105620.csv", header=None).values
data3 = pandas.read_csv("./data/dti/sampledata107725.csv", header=None).values
data4 = pandas.read_csv("./data/dti/sampledata112314.csv", header=None).values
data_cat = np.concatenate((data2,data3,data4),axis=0)
#%% Remove classes and slice position features
x_test = data1[:,4:9].astype(float)
y_test = data1[:,9]-1 # class labels are expected to start at 0
X = data_cat[:,4:9].astype(float)
Y = data_cat[:,9]-1 # class labels are expected to start at 0
#%% Normalize data
nSamples = np.size(Y)
nSamples_test = np.size(y_test)
nClasses = np.int(np.max(Y))+1
nFeatures = np.size(X,1)
for ii in range(0,nFeatures):
feature_normalization = max(X[:,ii])
X[:,ii] = X[:,ii]/feature_normalization
x_test[:,ii] = x_test[:,ii]/feature_normalization
#%% Separate training and validation
setsize_train = np.ceil(nSamples*0.8).astype(int)
setsize_val = np.ceil(nSamples*0.2).astype(int)
#random permuation of data and classes
idx = np.random.permutation(nSamples)
idx_train = idx[0:setsize_train]
idx_val = idx[setsize_train:setsize_train+setsize_val]
x_train = X[idx_train,:]
y_train = Y[idx_train]
x_val = X[idx_val,:]
y_val = Y[idx_val]
#%%Generate torch variables
x_train = torch.Tensor(x_train).float()
y_train = torch.Tensor(y_train).long()
x_val = torch.Tensor(x_val).float()
y_val = torch.Tensor(y_val).long()
x_test = torch.Tensor(x_test).float()
y_test = torch.Tensor(y_test).long()
#%% Check balancing of classes
#np.sum(y_train==0)
#np.sum(y_train==1)
#np.sum(y_train==2)
#
#np.sum(y_val==0)
#np.sum(y_val==1)
#np.sum(y_val==2)
#
#np.sum(y_test==0)
#np.sum(y_test==1)
#np.sum(y_test==2)
#
#np.sum(y_train==0)+np.sum(y_val==0)+np.sum(y_test==0)
#np.sum(y_train==1)+np.sum(y_val==1)+np.sum(y_test==1)
#np.sum(y_train==2)+np.sum(y_val==2)+np.sum(y_test==2)
#%% Define model
nElements = 100
nLayers = 3
model_name = 'dti_FC'
model = torch.nn.Sequential(
torch.nn.Linear(nFeatures, nElements, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(nElements, nElements, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(nElements, nElements, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(nElements, nElements, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(nElements, nClasses, bias=True),
)
print(model)
#%%choose optimizer and loss function
training_epochs = 250
lr = 0.001
batch_size = 1024
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
#%%Create minibatch data loading for training and validation
dataloader_train = data_utils.TensorDataset(x_train, y_train)
dataloader_train = data_utils.DataLoader(dataloader_train, batch_size=batch_size, shuffle=False,num_workers=4)
#%% Train model
loss_train = np.zeros(training_epochs)
acc_train = np.zeros(training_epochs)
loss_val = np.zeros(training_epochs)
acc_val = np.zeros(training_epochs)
for epoch in range(training_epochs):
for local_batch, local_labels in dataloader_train:
# feedforward - backpropagation
optimizer.zero_grad()
out = model(local_batch)
loss = criterion(out, local_labels)
loss.backward()
optimizer.step()
loss_train[epoch] = loss.item()
# Training data accuracy
[dummy, predicted] = torch.max(out.data, 1)
acc_train[epoch] = (torch.sum(local_labels==predicted).numpy() / np.size(local_labels.numpy(),0))
# Validation
out_val = model(x_val)
loss = criterion(out_val, y_val)
loss_val[epoch] = loss.item()
[dummy, predicted_val] = torch.max(out_val.data, 1)
acc_val[epoch] = ( torch.sum(y_val==predicted_val).numpy() / setsize_val)
print ('Epoch {}/{} train loss: {:.3}, train acc: {:.3}, val loss: {:.3}, val acc: {:.3}'.format(epoch+1, training_epochs, loss_train[epoch], acc_train[epoch], loss_val[epoch], acc_val[epoch]))
#%% Evaluate trained model
#Double check model on train data
out = model(x_train)
[dummy, predicted] = torch.max(out.data, 1)
acc_train_final = (torch.sum(y_train==predicted).numpy() / setsize_train)
print('Evaluation results train data: {:.2}'.format(acc_train_final))
#Double check model on validation data
out = model(x_val)
[dummy, predicted] = torch.max(out.data, 1)
acc_val_final = (torch.sum(y_val==predicted).numpy() / setsize_val)
print('Evaluation results validation data: {:.2}'.format(acc_val_final))
#Evaluate model on test data
out = model(x_test)
[dummy, predicted] = torch.max(out.data, 1)
acc_test_final = (torch.sum(y_test==predicted).numpy() / nSamples_test)
print('Evaluation results test data: {:.2}'.format(acc_test_final))
#%% Plot training overview
os.makedirs('./training_plots_pytorch')
plot_label = 'FC {} layers {} elements: train/val/test={:.2}/{:.2}/{:.2}'.format(nLayers,nElements,acc_train_final,acc_val_final,acc_test_final)
N=5
plt.figure(1)
plt.plot(np.convolve(acc_train, np.ones((N,))/N, mode='valid'))
plt.plot(np.convolve(acc_val, np.ones((N,))/N, mode='valid'))
plt.title(plot_label)
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['training', 'validation'], loc='lower right')
plt.ylim(0.5,0.9)
plt.show()
plt.savefig('./training_plots_pytorch/{}_{}layers_{}elements_epochs{}.png'.format(model_name,nLayers,nElements,training_epochs)) | 31.873171 | 197 | 0.716406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,421 | 0.370523 |
d108c51bdb007cb0e84b86a435237fb3a1b224b6 | 30,044 | py | Python | dasem/wikipedia.py | eaksnes/dasem | d8d1c5e68aedf758aee1ba83da063f1e0952c21d | [
"Apache-2.0"
] | 18 | 2017-03-28T15:36:49.000Z | 2021-11-02T12:09:17.000Z | dasem/wikipedia.py | eaksnes/dasem | d8d1c5e68aedf758aee1ba83da063f1e0952c21d | [
"Apache-2.0"
] | 9 | 2017-02-17T12:58:23.000Z | 2021-02-14T14:04:17.000Z | dasem/wikipedia.py | eaksnes/dasem | d8d1c5e68aedf758aee1ba83da063f1e0952c21d | [
"Apache-2.0"
] | 2 | 2018-10-04T09:29:12.000Z | 2019-08-15T10:04:55.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Wikipedia interface.
Usage:
dasem.wikipedia category-graph | count-category-pages
dasem.wikipedia count-pages | count-pages-per-user
dasem.wikipedia article-link-graph [options]
dasem.wikipedia download [options]
dasem.wikipedia get-all-article-sentences
dasem.wikipedia get-all-stripped-article-texts
dasem.wikipedia iter-pages | iter-article-words [options]
dasem.wikipedia doc-term-matrix [options]
dasem.wikipedia most-similar [options] <word>
dasem.wikipedia save-tfidf-vectorizer [options]
Options:
-h --help Help
--debug Debug messages
--filename=<str> Filename
--ie=encoding Input encoding [default: utf-8]
--max-n-pages=<int> Maximum number of pages to iterate over
--oe=encoding Output encoding [default: utf-8]
-o --output=<file> Output filename, default output to stdout
-v --verbose Verbose messages
Description:
This module and script handle the interface to the Wikipedia corpus.
The XML Dump file from Wikipedia should be downloaded. This can be
done by the `dasem.wikipedia download` command
Examples:
$ python -m dasem.wikipedia download --verbose
"""
from __future__ import division, print_function
from bz2 import BZ2File
import codecs
from collections import Counter
import logging
import os
from os import write
from os.path import isfile, join, sep, split
import re
from shutil import copyfileobj
import signal
from six import b
import json
import nltk
from nltk.stem.snowball import DanishStemmer
from nltk.tokenize import WordPunctTokenizer
import gzip
try:
import cPickle as pickle
except ImportError:
import pickle
import jsonpickle
import jsonpickle.ext.numpy as jsonpickle_numpy
from lxml import etree
import mwparserfromhell
import numpy as np
import requests
from scipy.sparse import lil_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
from .config import data_directory
from .utils import make_data_directory
from . import models
jsonpickle_numpy.register_handlers()
BZ2_XML_DUMP_FILENAME = 'dawiki-latest-pages-articles.xml.bz2'
DOC2VEC_FILENAME = 'wikipedia-doc2vec.pkl.gz'
TFIDF_VECTORIZER_FILENAME = 'wikipedia-tfidfvectorizer.json'
ESA_PKL_FILENAME = 'wikipedia-esa.pkl.gz'
ESA_JSON_FILENAME = 'wikipedia-esa.json.gz'
BASE_URL = 'https://dumps.wikimedia.org/dawiki/latest/'
def is_article_link(wikilink):
"""Return True is wikilink is an article link.
Parameters
----------
wikilink : str
Wikilink to be tested
Returns
-------
result : bool
True is wikilink is an article link
Examples
--------
>>> is_article_link('[[Danmark]]')
True
>>> is_article_link('[[Kategori:Danmark]]')
False
"""
if wikilink.startswith('[[') and len(wikilink) > 4:
wikilink = wikilink[2:]
if not (wikilink.startswith('Diskussion:')
or wikilink.startswith('Fil:')
or wikilink.startswith('File:')
or wikilink.startswith('Kategori:')
or wikilink.startswith('Kategoridiskussion:')
or wikilink.startswith('Wikipedia:')
or wikilink.startswith('Wikipedia-diskussion:')
or wikilink.startswith(u'Hjælp:')
or wikilink.startswith(u'Hjælp-diskussion')
or wikilink.startswith('Bruger:')
or wikilink.startswith('Brugerdiskussion:')):
return True
return False
def strip_wikilink_to_article(wikilink):
"""Strip wikilink to article.
Parameters
----------
wikilink : str
Wikilink
Returns
-------
stripped_wikilink : str
String with stripped wikilink.
Examples
--------
>>> strip_wikilink_to_article('[[dansk (sprog)|dansk]]')
'dansk (sprog)'
>>> strip_wikilink_to_article('Danmark')
'Danmark'
"""
if wikilink.startswith('[['):
wikilink = wikilink[2:-2]
return wikilink.split('|')[0]
def strip_to_category(category):
"""Strip prefix and postfix from category link.
Parameters
----------
category : str
Returns
-------
stripped_category : str
String with stripped category
"""
if category.startswith('[[Kategori:'):
category = category[11:-2]
elif category.startswith('Kategori:'):
category = category[9:]
return category.split('|')[0]
class XmlDumpFile(object):
"""XML Dump file.
For instance, dawiki-20160901-pages-articles.xml.bz2.
Attributes
----------
file : file
File object to read from.
filename : str
Filename of dump file.
word_pattern : _sre.SRE_Pattern
Compile regular expressions for finding words.
"""
def __init__(self, filename=BZ2_XML_DUMP_FILENAME):
"""Prepare dump file for reading.
Parameters
----------
filename : str
Filename or the XML dump file.
"""
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
full_filename = self.full_filename(filename)
self.filename = full_filename
self.sentence_tokenizer = nltk.data.load(
'tokenizers/punkt/danish.pickle')
self.whitespaces_pattern = re.compile(
'\s+', flags=re.DOTALL | re.UNICODE)
self.word_tokenizer = WordPunctTokenizer()
self.stemmer = DanishStemmer()
self.word_pattern = re.compile(
r"""{{.+?}}|
<!--.+?-->|
\[\[Fil.+?\]\]|
\[\[Kategori:.+?\]\]|
\[http.+?\]|(\w+(?:-\w+)*)""",
flags=re.UNICODE | re.VERBOSE | re.DOTALL)
self.paragraph_split_pattern = re.compile(
r'\n\s*\n', flags=re.DOTALL | re.UNICODE)
self.ignored_words_pattern = re.compile(
r"""
(?:(?:thumb|thumbnail|left|right|\d+px|upright(?:=[0-9\.]+)?)\|)+
|^\s*\|.+$
|^REDIRECT\b""",
flags=re.DOTALL | re.UNICODE | re.VERBOSE | re.MULTILINE)
self.itemized_split_pattern = re.compile(
r"^ |^Kategori:",
flags=re.DOTALL | re.UNICODE | re.MULTILINE)
def download(self, redownload=False):
"""Download Wikipedia XML dump file.
Parameters
----------
redownload : bool, optional
If true will download the database file anew even if it is already
downloaded.
Description
-----------
Download Wikipedia XML dump file from
https://dumps.wikimedia.org/dawiki/latest/.
"""
local_filename = self.filename
directory, filename = split(local_filename)
if not redownload and isfile(local_filename):
self.logger.info('File {} already downloaded'.format(
local_filename))
return
self.make_data_directory()
url = BASE_URL + filename
self.logger.info('Downloading {} to {}'.format(url, local_filename))
response = requests.get(url, stream=True)
with open(local_filename, 'wb') as fid:
copyfileobj(response.raw, fid)
def full_filename(self, filename):
"""Return filename with full filename path."""
if sep in filename:
return filename
else:
return join(data_directory(), 'wikipedia', filename)
def clean_tag(self, tag):
"""Remove namespace from tag.
Parameters
----------
tag : str
Tag with namespace prefix.
Returns
-------
cleaned_tag : str
Tag with namespace part removed.
"""
cleaned_tag = tag.split('}')[-1]
return cleaned_tag
def iter_elements(self, events=('end',)):
"""Iterate over elements in XML file.
Yields
------
event : str
'start' or 'end'
element : Element
XML element
"""
if self.filename.endswith('.bz2'):
self.file = BZ2File(self.filename)
else:
self.file = file(self.filename)
with self.file as f:
for event, element in etree.iterparse(f, events=events):
yield event, element
def iter_page_elements(self, events=('end',)):
"""Iterate over page XML elements."""
for event, element in self.iter_elements(events=events):
tag = self.clean_tag(element.tag)
if tag == 'page':
yield event, element
def iter_pages(self):
"""Iterate over pages yielding a dictionary.
Yields
------
page : dict
"""
for event, element in self.iter_page_elements(events=('end',)):
page = {}
for descendant in element.iterdescendants():
tag = self.clean_tag(descendant.tag)
if tag not in ['contributor', 'revision']:
page[tag] = descendant.text
yield page
def count_pages(self):
"""Return number of pages.
Returns
-------
count : int
Number of pages
"""
count = 0
for event, element in self.iter_page_elements():
count += 1
return count
def count_pages_per_user(self):
"""Count the number of pages per user.
Counts for both 'username' and 'ip' are recorded.
Returns
-------
counts : collections.Counter
Counter object containing counts as values.
"""
counts = Counter()
for page in self.iter_pages():
if 'username' in page:
counts[page['username']] += 1
elif 'ip' in page:
counts[page['ip']] += 1
return counts
def iter_article_pages(self, max_n_pages=None):
"""Iterate over article pages.
Parameters
----------
max_n_pages : int or None
Maximum number of pages to return.
Yields
------
page : dict
"""
n = 0
for page in self.iter_pages():
if page['ns'] == '0':
n += 1
yield page
if max_n_pages is not None and n >= max_n_pages:
break
def iter_stripped_article_texts(self, max_n_pages=None):
"""Iterate over article page text.
Parameters
----------
max_n_pages : int or None
Maximum number of pages to return.
Yields
------
text : str
Text.
"""
for page in self.iter_article_pages(max_n_pages=max_n_pages):
wikicode = mwparserfromhell.parse(page['text'])
# Make more space for the heading, so it is easier to match as
# a separate "sentence".
for node in wikicode.ifilter_headings():
wikicode.insert_after(node, "\n")
stripped_text = wikicode.strip_code()
# Parameters for media content is not stripped by mwparserfromhell
stripped_text = self.ignored_words_pattern.sub('', stripped_text)
yield stripped_text
def iter_article_sentences(self, max_n_pages=None):
"""Iterate over article sentences.
Parameters
----------
max_n_pages : int or None, optional
Maximum number of pages to return.
Yields
------
sentences : str
Sentences as strings.
"""
for text in self.iter_stripped_article_texts(max_n_pages=max_n_pages):
paragraphs = self.paragraph_split_pattern.split(text)
for paragraph in paragraphs:
sentences = self.sentence_tokenizer.tokenize(paragraph)
for sentence in sentences:
parts = self.itemized_split_pattern.split(sentence)
for part in parts:
if part:
yield part.strip()
def iter_article_sentence_words(
self, lower=True, max_n_pages=None):
"""Iterate over article sentences.
Parameters
----------
lower : bool, optional
Lower case words
max_n_pages : int or None, optional
Maximum number of pages to return.
Yields
------
sentences : list of str
Sentences as list of words represented as strings.
"""
for sentence in self.iter_article_sentences(max_n_pages=max_n_pages):
tokens = self.word_tokenizer.tokenize(sentence)
if lower:
yield [token.lower() for token in tokens]
else:
yield tokens
def iter_article_title_and_words(self, max_n_pages=None):
"""Iterate over articles returning word list.
Parameters
----------
max_n_pages : int or None
Maximum number of pages to iterate over.
Yields
------
title : str
Title of article
words : list of str
List of words
"""
for page in self.iter_article_pages(max_n_pages=max_n_pages):
words = self.word_pattern.findall(page['text'])
words = [word.lower() for word in words if word]
yield page['title'], words
def iter_article_words(self, lower=True, max_n_pages=None):
"""Iterate over articles returning word list.
Parameters
----------
max_n_pages : int or None
Maximum number of pages to iterate over.
Yields
------
title : str
Title of article
words : list of str
List of words
"""
self.logger.debug('Article words iterator')
for page in self.iter_article_pages(max_n_pages=max_n_pages):
words = self.word_pattern.findall(page['text'])
words = [word.lower() for word in words if word and lower]
yield words
def article_link_graph(self, verbose=False):
"""Return article link graph.
Returns
-------
graph : dict
Dictionary with values as a list where elements indicate
article linked to.
"""
graph = {}
for n, page in enumerate(self.iter_article_pages()):
wikicode = mwparserfromhell.parse(page['text'])
wikilinks = wikicode.filter_wikilinks()
article_links = []
for wikilink in wikilinks:
if is_article_link(wikilink):
article_link = strip_wikilink_to_article(wikilink)
article_links.append(article_link.title())
graph[page['title']] = article_links
if verbose and not n % 100:
print(n)
return graph
def iter_category_pages(self):
"""Iterate over category pages.
For dawiki-20160901-pages-articles.xml.bz2 this method
returns 51548
Yields
------
page : dict
"""
for page in self.iter_pages():
if page['ns'] == '14':
yield page
def count_category_pages(self):
"""Count category pages.
Returns
-------
count : int
Number of category pages.
"""
n = 0
for page in self.iter_category_pages():
n += 1
return n
def make_data_directory(self):
"""Make data directory for Wikipedia."""
make_data_directory(data_directory(), 'wikipedia')
def category_graph(self):
"""Return category graph.
Returns
-------
graph : dict
Dictionary with values indicating supercategories.
"""
graph = {}
for page in self.iter_category_pages():
wikicode = mwparserfromhell.parse(page['text'])
wikilinks = wikicode.filter_wikilinks()
categories = []
for wikilink in wikilinks:
if wikilink.startswith('[[Kategori:'):
categories.append(strip_to_category(wikilink))
category = strip_to_category(page['title'])
graph[category] = categories
return graph
def doc_term_matrix(self, max_n_pages=None, verbose=False):
"""Return doc-term matrix.
Parameters
----------
max_n_pages : int or None
Maximum number of Wikipedia articles to iterate over.
verbose : bool
Display message during processing.
"""
# Identify terms
n_pages = 0
all_terms = []
for title, words in self.iter_article_title_and_words(
max_n_pages=max_n_pages):
n_pages += 1
all_terms.extend(words)
if verbose and not n_pages % 100:
print(u"Identified terms from article {}".format(n_pages))
terms = list(set(all_terms))
n_terms = len(terms)
if verbose:
print("Constructing sparse matrix of size {}x{}".format(
n_pages, n_terms))
matrix = lil_matrix((n_pages, n_terms))
# Count terms wrt. articles
rows = []
columns = dict(zip(terms, range(len(terms))))
for n, (title, words) in enumerate(self.iter_article_title_and_words(
max_n_pages=max_n_pages)):
rows.append(title)
for word in words:
matrix[n, columns[word]] += 1
if verbose and not n % 100:
print(u"Sat counts in matrix from article {}".format(n))
return matrix, rows, terms
class ExplicitSemanticAnalysis(object):
"""Explicit semantic analysis.
References
----------
Evgeniy Gabrilovich, Shaul Markovitch, Computing semantic relatedness
using Wikipedia-based explicit semantic analysis, 2007.
"""
def __init__(
self, autosetup=True, stop_words=None, norm='l2', use_idf=True,
sublinear_tf=False, max_n_pages=None, display=False):
"""Set up model.
Several of the parameters are piped further on to sklearns
TfidfVectorizer.
Parameters
----------
stop_words : list of str or None, optional
List of stop words.
norm : 'l1', 'l2' or None, optional
Norm use to normalize term vectors of tfidf vectorizer.
use_idf : bool, optional
Enable inverse-document-frequency reweighting.
"""
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
if autosetup:
self.logger.info('Trying to load pickle files')
try:
self.load_pkl(display=display)
except:
self.setup(
stop_words=stop_words, norm=norm, use_idf=use_idf,
sublinear_tf=sublinear_tf, max_n_pages=max_n_pages,
display=display)
self.save_pkl(display=display)
def full_filename(self, filename):
"""Return filename with full filename path."""
if sep in filename:
return filename
else:
return join(data_directory(), 'models', filename)
def save_json(self, filename=ESA_JSON_FILENAME, display=False):
"""Save parameter to JSON file."""
full_filename = self.full_filename(filename)
self.logger.info('Writing parameters to JSON file {}'.format(
full_filename))
with gzip.open(full_filename, 'w') as f:
f.write(jsonpickle.encode(
{'Y': self._Y,
'transformer': self._transformer,
'titles': self._titles}))
def load_json(self, filename=ESA_JSON_FILENAME, display=False):
"""Load model parameters from JSON pickle file.
Parameters
----------
filename : str
Filename for gzipped JSON pickled file.
"""
full_filename = self.full_filename(filename)
self.logger.info('Reading parameters from JSON file {}'.format(
full_filename))
with gzip.open(full_filename) as f:
data = jsonpickle.decode(f.read())
self._Y = data['Y']
self._transformer = data['transformer']
self._titles = data['titles']
def save_pkl(self, display=False):
"""Save parameters to pickle files."""
items = [
('_titles', 'wikipedia-esa-titles.pkl.gz'),
('_Y', 'wikipedia-esa-y.pkl.gz'),
('_transformer', 'wikipedia-esa-transformer.pkl.gz')
]
for attr, filename in items:
full_filename = self.full_filename(filename)
self.logger.info('Writing parameters to pickle file {}'.format(
full_filename))
with gzip.open(full_filename, 'w') as f:
pickle.dump(getattr(self, attr), f, -1)
def load_pkl(self, display=False):
"""Load parameters from pickle files."""
items = [
('_titles', 'wikipedia-esa-titles.pkl.gz'),
('_Y', 'wikipedia-esa-y.pkl.gz'),
('_transformer', 'wikipedia-esa-transformer.pkl.gz')
]
for attr, filename in items:
full_filename = self.full_filename(filename)
self.logger.info('Reading parameters from pickle file {}'.format(
full_filename))
with gzip.open(full_filename) as f:
setattr(self, attr, pickle.load(f))
def setup(
self, stop_words=None, norm='l2', use_idf=True, sublinear_tf=False,
max_n_pages=None, display=False):
"""Set up wikipedia semantic model.
Returns
-------
self : ExplicitSemanticAnalysis
Self object.
"""
self._dump_file = XmlDumpFile()
self._titles = [
page['title'] for page in self._dump_file.iter_article_pages(
max_n_pages=max_n_pages)]
texts = (page['text']
for page in self._dump_file.iter_article_pages(
max_n_pages=max_n_pages))
self.logger.info('TFIDF vectorizing')
self._transformer = TfidfVectorizer(
stop_words=stop_words, norm=norm, use_idf=use_idf,
sublinear_tf=sublinear_tf)
self._Y = self._transformer.fit_transform(texts)
return self
def relatedness(self, phrases):
"""Return semantic relatedness between two phrases.
Parameters
----------
phrases : list of str
List of phrases as strings.
Returns
-------
relatedness : np.array
Array with value between 0 and 1 for semantic relatedness.
"""
Y = self._transformer.transform(phrases)
D = np.asarray((self._Y * Y.T).todense())
D = np.einsum('ij,j->ij', D,
1 / np.sqrt(np.multiply(D, D).sum(axis=0)))
return D.T.dot(D)
def related(self, phrase, n=10):
"""Return related articles.
Parameters
----------
phrase : str
Phrase
n : int
Number of articles to return.
Returns
-------
titles : list of str
List of articles as strings.
"""
if n is None:
n = 10
y = self._transformer.transform([phrase])
D = np.array((self._Y * y.T).todense())
indices = np.argsort(-D, axis=0)
titles = [self._titles[index] for index in indices[:n, 0]]
return titles
def sort_by_outlierness(self, phrases):
"""Return phrases based on outlierness.
Parameters
----------
phrases : list of str
List of phrases.
Returns
-------
sorted_phrases : list of str
List of sorted phrases.
Examples
--------
>>> esa = ExplicitSemanticAnalysis()
>>> esa.sort_by_outlierness(['hund', 'fogh', 'nyrup', 'helle'])
['hund', 'helle', 'fogh', 'nyrup']
"""
R = self.relatedness(phrases)
indices = np.argsort(R.sum(axis=0) - 1)
return [phrases[idx] for idx in indices]
class SentenceWordsIterable(object):
"""Iterable for words in a sentence.
Parameters
----------
lower : bool, default True
Lower case the words.
stem : bool, default False
Apply word stemming. DanishStemmer from nltk is used.
References
----------
https://stackoverflow.com/questions/34166369
"""
def __init__(self, lower=True, stem=False, max_n_pages=None):
"""Set up options."""
self.lower = lower
self.max_n_pages = max_n_pages
self.stem = stem
def __iter__(self):
"""Restart and return iterable."""
dump_file = XmlDumpFile()
sentences = dump_file.iter_article_sentence_words(
lower=self.lower,
max_n_pages=self.max_n_pages)
return sentences
class Word2Vec(models.Word2Vec):
"""Gensim Word2vec for Danish Wikipedia corpus.
Trained models can be saved and loaded via the `save` and `load` methods.
"""
def data_directory(self):
"""Return data directory.
Returns
-------
dir : str
Directory for data.
"""
dir = join(data_directory(), 'wikipedia')
return dir
def iterable_sentence_words(self, lower=True, stem=False):
"""Return iterable for sentence words.
Parameters
----------
lower : bool, default True
Lower case the words.
stem : bool, default False
Apply word stemming. DanishStemmer from nltk is used.
Returns
-------
sentence_words : iterable
Iterable over sentence words
"""
sentence_words = SentenceWordsIterable(lower=lower, stem=stem)
return sentence_words
def main():
"""Handle command-line interface."""
from docopt import docopt
arguments = docopt(__doc__)
logging_level = logging.WARN
if arguments['--debug']:
logging_level = logging.DEBUG
elif arguments['--verbose']:
logging_level = logging.INFO
logger = logging.getLogger()
logger.setLevel(logging_level)
logging_handler = logging.StreamHandler()
logging_handler.setLevel(logging_level)
logging_formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging_handler.setFormatter(logging_formatter)
logger.addHandler(logging_handler)
if arguments['--output']:
output_filename = arguments['--output']
output_file = os.open(output_filename, os.O_RDWR | os.O_CREAT)
else:
# stdout
output_file = 1
output_encoding = arguments['--oe']
input_encoding = arguments['--ie']
if arguments['--max-n-pages'] is None:
max_n_pages = None
else:
max_n_pages = int(arguments['--max-n-pages'])
# Ignore broken pipe errors
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
dump_file = XmlDumpFile()
if arguments['iter-pages']:
for page in dump_file.iter_pages():
print(json.dumps(page))
elif arguments['count-pages']:
count = dump_file.count_pages()
print(count)
elif arguments['count-pages-per-user']:
counts = dump_file.count_pages_per_user().most_common(100)
for n, (user, count) in enumerate(counts, 1):
print(u"{:4} {:6} {}".format(n, count, user))
elif arguments['article-link-graph']:
graph = dump_file.article_link_graph()
print(graph)
elif arguments['category-graph']:
graph = dump_file.category_graph()
print(graph)
elif arguments['count-category-pages']:
count = dump_file.count_category_pages()
print(count)
elif arguments['download']:
dump_file.download()
elif arguments['get-all-stripped-article-texts']:
for text in dump_file.iter_stripped_article_texts():
write(output_file, text.encode(output_encoding) + b('\n'))
elif arguments['get-all-article-sentences']:
for sentence in dump_file.iter_article_sentences():
write(output_file, sentence.encode(output_encoding) + b('\n'))
elif arguments['iter-article-words']:
for title, words in dump_file.iter_article_title_and_words(
max_n_pages=max_n_pages):
print(json.dumps([title, words]))
elif arguments['doc-term-matrix']:
matrix, rows, columns = dump_file.doc_term_matrix(
max_n_pages=int(arguments['--max-n-pages']))
print(matrix)
# df = DataFrame(matrix, index=rows, columns=columns)
# print(df.to_csv(encoding='utf-8'))
elif arguments['most-similar']:
word = arguments['<word>'].decode(input_encoding).lower()
word2vec = Word2Vec()
words_and_similarity = word2vec.most_similar(word)
for word, similarity in words_and_similarity:
write(output_file, word.encode(output_encoding) + b('\n'))
elif arguments['save-tfidf-vectorizer']:
if arguments['--filename']:
filename = arguments['--filename']
else:
filename = TFIDF_VECTORIZER_FILENAME
texts = (page['text'] for page in dump_file.iter_article_pages(
max_n_pages=max_n_pages))
# Cannot unzip the iterator
titles = [page['title']
for page in dump_file.iter_article_pages(
max_n_pages=max_n_pages)]
transformer = TfidfVectorizer()
transformer.fit(texts)
transformer.rows = titles
with codecs.open(filename, 'w', encoding='utf-8') as f:
f.write(jsonpickle.encode(transformer))
else:
assert False
if __name__ == '__main__':
main()
| 28.972035 | 79 | 0.580748 | 21,651 | 0.720595 | 5,776 | 0.192239 | 0 | 0 | 0 | 0 | 12,242 | 0.407442 |
d109fc9aa0086799eb6cc991bb49a126a301ba91 | 794 | py | Python | ViceVersus/users/migrations/0001_initial.py | ViceVersusMe/ViceVersus | 1f814f8a8c3ee0c156ebf400f4dac87c19d7747a | [
"MIT"
] | null | null | null | ViceVersus/users/migrations/0001_initial.py | ViceVersusMe/ViceVersus | 1f814f8a8c3ee0c156ebf400f4dac87c19d7747a | [
"MIT"
] | null | null | null | ViceVersus/users/migrations/0001_initial.py | ViceVersusMe/ViceVersus | 1f814f8a8c3ee0c156ebf400f4dac87c19d7747a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('gender', models.CharField(max_length=20, null=True, choices=[('male', 'Male'), ('female', 'Female')], blank=True)),
('city', models.CharField(max_length=250, null=True, blank=True)),
('dob', models.DateField(null=True, blank=True)),
('locale', models.CharField(max_length=10, null=True, blank=True)),
],
),
]
| 33.083333 | 133 | 0.584383 | 685 | 0.86272 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.124685 |
d10a3d88aef5a6ce7c6db2138dda46ee0331ecd1 | 10,992 | py | Python | VideoEncoding/Encoding_H264_OverlayImage/encoding-h264-overlayimage.py | IngridAtMicrosoft/media-services-v3-python | 2eb43f502cd8637961869faf8d0c365ffa1680d2 | [
"MIT"
] | null | null | null | VideoEncoding/Encoding_H264_OverlayImage/encoding-h264-overlayimage.py | IngridAtMicrosoft/media-services-v3-python | 2eb43f502cd8637961869faf8d0c365ffa1680d2 | [
"MIT"
] | null | null | null | VideoEncoding/Encoding_H264_OverlayImage/encoding-h264-overlayimage.py | IngridAtMicrosoft/media-services-v3-python | 2eb43f502cd8637961869faf8d0c365ffa1680d2 | [
"MIT"
] | null | null | null | from datetime import timedelta
from dotenv import load_dotenv
from azure.identity import DefaultAzureCredential
from azure.mgmt.media import AzureMediaServices
from azure.storage.blob import BlobServiceClient
from azure.mgmt.media.models import (
Asset,
Transform,
TransformOutput,
StandardEncoderPreset,
AacAudio,
AacAudioProfile,
H264Video,
H264Complexity,
H264Layer,
Mp4Format,
Filters,
Rectangle,
VideoOverlay,
Job,
JobInputs,
JobInputAsset,
JobOutputAsset,
OnErrorType,
Priority
)
import os
#Timer for checking job progress
import time
#Get environment variables
load_dotenv()
# Get the default Azure credential from the environment variables AZURE_CLIENT_ID and AZURE_CLIENT_SECRET and AZURE_TENTANT_ID
default_credential = DefaultAzureCredential()
# Get the environment variables SUBSCRIPTIONID, RESOURCEGROUP and ACCOUNTNAME
subscription_id = os.getenv('SUBSCRIPTIONID')
resource_group = os.getenv('RESOURCEGROUP')
account_name = os.getenv('ACCOUNTNAME')
# The file you want to upload. For this example, the file is placed under Media folder.
# The file ignite.mp4 has been provided for you.
source_file_location = os.chdir("../../Media/")
source_file = "ignite.mp4"
# This is a random string that will be added to the naming of things so that you don't have to keep doing this during testing
uniqueness = "encodeOverlayPng"
# Use the following PNG image to overlay on top of the video
overlay_file = "AzureMediaService.png"
overlay_label = "overlayCloud"
# Set the attributes of the input Asset using the random number
in_asset_name = 'inputassetName' + uniqueness
in_alternate_id = 'inputALTid' + uniqueness
in_description = 'inputdescription' + uniqueness
# Create an Asset object
# The asset_id will be used for the container parameter for the storage SDK after the asset is created by the AMS client.
in_asset = Asset(alternate_id=in_alternate_id, description=in_description)
# Create the JobInput for the PNG Image Overlay
overlay_asset_name = 'overlayassetName' + uniqueness
overlay_asset_alternate_id = 'inputALTid' + uniqueness
overlay_asset_description = 'inputdescription' + uniqueness
# Create an Asset object for PNG Image overlay
overlay_in_asset = Asset(alternate_id=overlay_asset_alternate_id, description=overlay_asset_description)
# Set the attributes of the output Asset using the random number
out_asset_name = 'outputassetName' + uniqueness
out_alternate_id = 'outputALTid' + uniqueness
out_description = 'outputdescription' + uniqueness
# Create Ouput Asset object
out_asset = Asset(alternate_id=out_alternate_id, description=out_description)
# The AMS Client
print("Creating AMS Client")
client = AzureMediaServices(default_credential, subscription_id)
# Create an input Asset
print(f"Creating input asset {in_asset_name}")
input_asset = client.assets.create_or_update(resource_group, account_name, in_asset_name, in_asset)
# An AMS asset is a container with a specific id that has "asset-" prepended to the GUID.
# So, you need to create the asset id to identify it as the container
# where Storage is to upload the video (as a block blob)
in_container = 'asset-' + input_asset.asset_id
# Create an Overlay input Asset
print(f"Creating input asset {overlay_asset_name}")
overlay_asset = client.assets.create_or_update(resource_group, account_name, overlay_asset_name, overlay_in_asset)
# # An AMS asset is a container with a specific id that has "asset-" prepended to the GUID.
# # So, you need to create the asset id to identify it as the container
# # where Storage is to upload the video (as a block blob)
overlay_container = 'asset-' + overlay_asset.asset_id
# create an output Asset
print(f"Creating output asset {out_asset_name}")
output_asset = client.assets.create_or_update(resource_group, account_name, out_asset_name, out_asset)
### Use the Storage SDK to upload the video ###
print(f"Uploading the file {source_file}")
blob_service_client = BlobServiceClient.from_connection_string(os.getenv('STORAGEACCOUNTCONNECTION'))
blob_client = blob_service_client.get_blob_client(in_container, source_file)
working_dir = os.getcwd()
print(f"Current working directory: {working_dir}")
upload_file_path = os.path.join(working_dir, source_file)
# WARNING: Depending on where you are launching the sample from, the path here could be off, and not include the BasicEncoding folder.
# Adjust the path as needed depending on how you are launching this python sample file.
# Upload the video to storage as a block blob
with open(upload_file_path, "rb") as data:
blob_client.upload_blob(data)
### Use the Storage SDK to upload the Overlay file
print(f"Uploading the file {overlay_file}")
blob_service_client = BlobServiceClient.from_connection_string(os.getenv('STORAGEACCOUNTCONNECTION'))
blob_client = blob_service_client.get_blob_client(overlay_container, overlay_file)
working_dir = os.getcwd()
print(f"Current working directory: {working_dir}")
upload_file_path = os.path.join(working_dir, overlay_file)
# WARNING: Depending on where you are launching the sample from, the path here could be off, and not include the BasicEncoding folder.
# Adjust the path as needed depending on how you are launching this python sample file.
# Upload the video to storage as a block blob
with open(upload_file_path, "rb") as data:
blob_client.upload_blob(data)
transform_name = 'H264EncodingOverlayImagePng'
# Create a new BuiltIn Standard encoding Transform for H264 ContentAware Constrained
print(f"Creating Standard Encoding transform named: {transform_name}")
# For this snippet, we are using 'StandardEncoderPreset' with Overlay Image
transform_output = TransformOutput(
preset = StandardEncoderPreset(
codecs=[
AacAudio(
channels=2,
sampling_rate=48000,
bitrate=128000,
profile=AacAudioProfile.AAC_LC
),
H264Video(
key_frame_interval=timedelta(seconds=2),
complexity=H264Complexity.BALANCED,
layers=[
H264Layer(
bitrate=3600000,
width="1280",
height="720",
label="HD-3600kbps"
),
H264Layer(
bitrate=1600000,
width="960",
height="540",
label="SD-1600kbps"
)
]
)
],
# Specify the format for the output files - one for video + audio, and another for the thumbnails
formats=[
Mp4Format(filename_pattern="Video-{Basename}-{Label}-{Bitrate}{Extension}")
],
filters=Filters(
overlays=[
VideoOverlay(
input_label=overlay_label, # same label that is used in the JobInput to identify which file in the asset is the actual overlay image .png file.
position=Rectangle(left="10%", top="10%"), # left and top position of the overlay in absolute pixel or percentage relative to the source video resolution.
# You can also set the height and width of the rectangle to draw into, but there is known problem here.
# If you use % for the top and left (or any of these) you have to stick with % for all or you will get a job configuration Error
# Also, it can alter your aspect ratio when using percentages, so you have to know the source video size in relation to the source image to
# provide the proper image size. Recommendation is to just use the right size image for the source video here and avoid passing in height and width for now.
# height: (if above is percentage based, this has to be also! Otherwise pixels are allowed. No mixing. )
# width: (if above is percentage based, this has to be also! Otherwise pixels are allowed No mixing. )
opacity=0.75, # Sets the blending opacity value to make the image slightly transparent over the video
start=timedelta(seconds=0), # Start at beginning of the video
fade_in_duration=timedelta(seconds=2), # 2 second fade in
fade_out_duration=timedelta(seconds=2), # 2 second fade out
end=timedelta(seconds=5) # end the fade out at 5 seconds on the timeline... fade will begin 2 seconds before this end time
)
]
)
),
# What should we do with the job if there is an error?
on_error=OnErrorType.STOP_PROCESSING_JOB,
# What is the relative priority of this job to others? Normal, high or low?
relative_priority=Priority.NORMAL
)
print("Creating encoding transform...")
# Adding transform details
my_transform = Transform()
my_transform.description="A simple custom H264 encoding transform that overlays a PNG image on the video source"
my_transform.outputs = [transform_output]
print(f"Creating transform {transform_name}")
transform = client.transforms.create_or_update(
resource_group_name=resource_group,
account_name=account_name,
transform_name=transform_name,
parameters=my_transform)
print(f"{transform_name} created (or updated if it existed already). ")
job_name = 'MyEncodingH264OverlayImagePng'+ uniqueness
print(f"Creating Encoding264OverlayImagePng job {job_name}")
files = (source_file, overlay_file)
# Create Video Input Asset
job_video_input_asset = JobInputAsset(asset_name=in_asset_name)
job_input_overlay = JobInputAsset(
asset_name=overlay_asset_name,
label=overlay_label # Order does not matter here, it is the "label" used on the Filter and the jobInput Overlay that is important!
)
# Create a list of job inputs - we will add both the video and overlay image assets here as the inputs to the job.
job_inputs=[
job_video_input_asset,
job_input_overlay
]
# Create Job Output Asset
outputs = JobOutputAsset(asset_name=out_asset_name)
# Create Job object and then create Trasnform Job
the_job = Job(input=JobInputs(inputs=job_inputs), outputs=[outputs], correlation_data={ "propertyname": "string" })
job: Job = client.jobs.create(resource_group, account_name, transform_name, job_name, parameters=the_job)
# Check Job State
job_state = client.jobs.get(resource_group, account_name, transform_name, job_name)
# First check
print("First job check")
print(job_state.state)
# Check the state of the job every 10 seconds. Adjust time_in_seconds = <how often you want to check for job state>
def countdown(t):
while t:
mins, secs = divmod(t, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print(timer, end="\r")
time.sleep(1)
t -= 1
job_current = client.jobs.get(resource_group, account_name, transform_name, job_name)
if(job_current.state == "Finished"):
print(job_current.state)
# TODO: Download the output file using blob storage SDK
return
if(job_current.state == "Error"):
print(job_current.state)
# TODO: Provide Error details from Job through API
return
else:
print(job_current.state)
countdown(int(time_in_seconds))
time_in_seconds = 10
countdown(int(time_in_seconds))
| 40.411765 | 170 | 0.744269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,575 | 0.507187 |
d10a8191767d3047473533170660c6af542ccdde | 3,858 | py | Python | fixture/project.py | shark-x/py_mantis_traning | d0e262f3833bd8ba570f6ca66f66f44279eac4e6 | [
"Apache-2.0"
] | null | null | null | fixture/project.py | shark-x/py_mantis_traning | d0e262f3833bd8ba570f6ca66f66f44279eac4e6 | [
"Apache-2.0"
] | null | null | null | fixture/project.py | shark-x/py_mantis_traning | d0e262f3833bd8ba570f6ca66f66f44279eac4e6 | [
"Apache-2.0"
] | null | null | null | from model.project import Project
import random
import string
class ProjectHelper:
def __init__(self, app):
self. app = app
def open_project_page(self):
wd = self.app.wd
if not wd.current_url.endswith("/manage_proj_page.php"):
wd.find_element_by_link_text("Manage").click()
wd.find_element_by_link_text("Manage Projects").click()
def create(self, project):
wd = self.app.wd
self.open_project_page()
wd.find_element_by_xpath("//input[@value='Create New Project']").click()
self.fill_form(project)
wd.find_element_by_xpath("//input[@value='Add Project']").click()
wd.find_element_by_link_text("Proceed").click()
self.app.open_home_page()
self.project_cache = None
def fill_form(self, project):
wd = self.app.wd
self.change_field_project("name", project.project_name)
# self.change_field_project("status", project.status)
# self.change_field_project("inherit_global", project.inherit_gl_cat)
# self.change_field_project("view_state", project.view_status)
# self.change_field_project("description", project.description)
def change_field_project(self, field, text):
wd = self.app.wd
wd.find_element_by_name(field).click()
wd.find_element_by_name(field).clear()
wd.find_element_by_name(field).send_keys(text)
# def there_is_no_projects(self):
# wd = self.app.wd
# project_table = self.select_project_table()
# return project_table.find_element_by_xpath("//tr[@class='row-1']") == 0
def select_project_table(self):
wd = self.app.wd
self.open_project_page()
return wd.find_element_by_xpath("//table[@class='width100'][@cellspacing='1']")
def project_that_name_exists(self, project_name):
wd = self.app.wd
project_list = self.get_project_list()
for element in project_list:
if element.project_name == project_name:
return True
project_cache = None
def get_project_list(self):
if self.project_cache is None:
wd = self.app.wd
self.open_project_page()
self.project_cache = []
project_table = self.select_project_table()
project_list = project_table.find_elements_by_tag_name("tr")
for element in project_list[2:]:
project_name = element.find_element_by_tag_name("a").text
st = element.find_element_by_tag_name("a").get_attribute("href")
from_x = st.find('id=') + 3
id = st[from_x:]
self.project_cache.append(Project(project_name=project_name, id=id))
return list(self.project_cache)
def generate_some_string(self):
wd = self.app.wd
length = random.randint(1, 9)
symbols = string.ascii_letters + string.digits
some_string = "".join([random.choice(symbols) for i in range(length)])
return some_string
def delete_by_name(self, project_name):
wd = self.app.wd
self.open_project_page()
project_list = self.get_project_list()
for element in project_list:
if element.project_name == project_name:
projects_table = self.select_project_table()
projects_table.find_element_by_link_text("%s" % project_name).click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
self.open_project_page()
self.project_cache = None
# def get_project_list(self):
# if self.project_cache is None:
# wd = self.app.wd
# self.open_project_page()
# self.project_cache = [] | 39.367347 | 87 | 0.634266 | 3,795 | 0.98367 | 0 | 0 | 0 | 0 | 0 | 0 | 859 | 0.222654 |
d10c10ebac6b3f7660217ed74c5259e0bf35c57f | 555 | py | Python | PropertyBazaar/urls.py | rudolphalmeida/PropertyBazaarAPI | bada589e415817b6b4e36a656adae9b70d047884 | [
"MIT"
] | null | null | null | PropertyBazaar/urls.py | rudolphalmeida/PropertyBazaarAPI | bada589e415817b6b4e36a656adae9b70d047884 | [
"MIT"
] | 1 | 2021-06-10T23:19:37.000Z | 2021-06-10T23:19:37.000Z | PropertyBazaar/urls.py | rudolphalmeida/PropertyBazaarAPI | bada589e415817b6b4e36a656adae9b70d047884 | [
"MIT"
] | 1 | 2018-12-22T16:43:52.000Z | 2018-12-22T16:43:52.000Z | from django.conf.urls import url
from PropertyBazaar.views import PropertyList, PropertyDetail, UserDetail, UserList
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^property/$', PropertyList.as_view(), name='property-list'),
url(r'^property/(?P<pk>[0-9]+)/$', PropertyDetail.as_view(), name='property-detail'),
url(r'^user/$', UserList.as_view(), name='user-list'),
url(r'^user/(?P<username>[a-zA-Z]+)/$', UserDetail.as_view(), name='user-detail')
]
urlpatterns = format_suffix_patterns(urlpatterns) | 42.692308 | 89 | 0.724324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.257658 |
d10c14d2ae3b5ea7f31dec6255423208caac1f0c | 1,339 | py | Python | cdedup/testsum.py | salotz/boar | c3022a65217e5befe37100a71632e6540e74992e | [
"Apache-2.0"
] | 2 | 2020-03-31T17:44:31.000Z | 2020-08-21T07:33:15.000Z | cdedup/testsum.py | salotz/boar | c3022a65217e5befe37100a71632e6540e74992e | [
"Apache-2.0"
] | null | null | null | cdedup/testsum.py | salotz/boar | c3022a65217e5befe37100a71632e6540e74992e | [
"Apache-2.0"
] | 1 | 2020-08-21T07:33:39.000Z | 2020-08-21T07:33:39.000Z | from __future__ import print_function
# Copyright 2010 Mats Ekberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
import rollingcs
from time import time
#rollingcs.benchmark()
#rollingcs.test()
one_mb_data = "x" * (2**20)
one_hundred_mb_data = "x" * (2**20 * 100)
t0 = time()
for i in range(0, 100):
rollingcs.calc_rolling(one_mb_data, len(one_mb_data))
print("rollingcs.calc_rolling(): 100 mb with 1 mb per call: ", time() - t0)
t0 = time()
rollingcs.calc_rolling(one_hundred_mb_data, len(one_hundred_mb_data))
print("rollingcs.calc_rolling(): 100 mb with 100 mb per call: ", time() - t0)
rs = rollingcs.RollingChecksum(1023, rollingcs.IntegerSet(1))
t0 = time()
rs.feed_string(one_hundred_mb_data)
rs.value()
print("rollingcs.RollingChecksum.feed_string(): 100 mb with 100 mb per call: ", time() - t0)
| 32.658537 | 92 | 0.74832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 789 | 0.589246 |
d10c91aeb1ebbaeec21b56fc9e92b2459863b1e8 | 160 | py | Python | app/models/forms.py | raimota/Gerador-Validador-CPF_CNPJ | e2af5ec35995b63b4bb739af92e9532563d9ed12 | [
"MIT"
] | null | null | null | app/models/forms.py | raimota/Gerador-Validador-CPF_CNPJ | e2af5ec35995b63b4bb739af92e9532563d9ed12 | [
"MIT"
] | null | null | null | app/models/forms.py | raimota/Gerador-Validador-CPF_CNPJ | e2af5ec35995b63b4bb739af92e9532563d9ed12 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
class Campos(FlaskForm):
es = StringField('es') | 26.666667 | 43 | 0.8125 | 51 | 0.31875 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.025 |
d10dfd254ad04354b6df951eaf41bb1393f023dc | 1,642 | py | Python | setup.py | mvandam/CEO | eedca8bafe2aaf5b434bafed445eb2c6367914bf | [
"Zlib"
] | 18 | 2016-02-29T12:41:52.000Z | 2021-12-03T15:10:34.000Z | setup.py | mvandam/CEO | eedca8bafe2aaf5b434bafed445eb2c6367914bf | [
"Zlib"
] | 23 | 2015-04-27T14:17:19.000Z | 2021-11-29T22:19:12.000Z | setup.py | mvandam/CEO | eedca8bafe2aaf5b434bafed445eb2c6367914bf | [
"Zlib"
] | 17 | 2015-04-09T14:13:16.000Z | 2022-02-17T10:03:00.000Z | #!/usr/bin/env python
import os
import sys
import distutils.cmd
import distutils.log
import setuptools
import subprocess
from distutils.core import setup
import setuptools.command.build_py
sys.path.append(os.path.dirname(__file__)+"/python")
print(sys.path)
class MakeCeoCommand(distutils.cmd.Command):
"""A custom command to run Pylint on all Python source files."""
description = 'Make CEO'
user_options = [
# The format is (long option, short option, description).
('none=', None, ''),
]
def initialize_options(self):
"""Set default values for options."""
# Each user option must be listed here with their default value.
pass
def finalize_options(self):
"""Post-process options."""
pass
def run(self):
"""Run command."""
command = ['/usr/bin/make']
#if self.pylint_rcfile:
# command.append('--rcfile=%s' % self.pylint_rcfile)
#command.append(os.getcwd())
command.append('all')
command.append('cython')
self.announce(
'Running command: %s' % str(command),
level=distutils.log.INFO)
subprocess.check_call(command)
class BuildPyCommand(setuptools.command.build_py.build_py):
"""Custom build command."""
def run(self):
self.run_command('make_ceo')
setuptools.command.build_py.build_py.run(self)
setuptools.setup(
cmdclass={
'make_ceo': MakeCeoCommand,
'build_py': BuildPyCommand,
},
name='ceo',
version='1.0',
description='Cuda--Engined Optics',
author='Rodolphe Conan',
author_email='conan.rod@gmail.com',
url='http://rconan.github.io/CEO/',
packages=['python.ceo']
)
| 24.507463 | 68 | 0.672351 | 1,050 | 0.639464 | 0 | 0 | 0 | 0 | 0 | 0 | 637 | 0.387942 |
d10e119827edf64c5992be98368eca488d789d1c | 263 | py | Python | users/tokens.py | maks-nurgazy/diploma-project | 66889488ffaa0269e1be2df6f6c76a3ca68a3cfb | [
"MIT"
] | null | null | null | users/tokens.py | maks-nurgazy/diploma-project | 66889488ffaa0269e1be2df6f6c76a3ca68a3cfb | [
"MIT"
] | null | null | null | users/tokens.py | maks-nurgazy/diploma-project | 66889488ffaa0269e1be2df6f6c76a3ca68a3cfb | [
"MIT"
] | null | null | null | from rest_framework_simplejwt.tokens import RefreshToken
def get_jwt_tokens_for_user(user, **kwargs):
"""
Generates a refresh token for the valid user
"""
refresh = RefreshToken.for_user(user)
return str(refresh), str(refresh.access_token)
| 23.909091 | 56 | 0.737643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.228137 |
d10f214ef69fa8548dfc5cc8ae64127b9968d418 | 376 | py | Python | tests/bitly/*REPL* [python].py | goldsborough/lnk | 1487d272a70329571c77c0ec17c394dc6a1d088f | [
"MIT"
] | 3 | 2017-06-16T18:51:54.000Z | 2018-04-08T19:36:12.000Z | tests/bitly/*REPL* [python].py | goldsborough/lnk | 1487d272a70329571c77c0ec17c394dc6a1d088f | [
"MIT"
] | 2 | 2021-02-08T20:17:54.000Z | 2021-04-30T20:35:44.000Z | tests/bitly/*REPL* [python].py | goldsborough/lnk | 1487d272a70329571c77c0ec17c394dc6a1d088f | [
"MIT"
] | 1 | 2019-11-06T19:05:30.000Z | 2019-11-06T19:05:30.000Z | Python 3.5.0 (default, Sep 14 2015, 02:37:27)
[GCC 4.2.1 Compatible Apple LLVM 6.1.0 (clang-602.0.53)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> a = [{'a': 1}, {'b': 2}]
>>> sorted(a)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unorderable types: dict() < dict()
>>> b = [{'b': 2}, {'a': 1}] | 41.777778 | 70 | 0.606383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.148936 |
d10fc18cdd03e862b01f58847357abc40d702c0b | 12,015 | py | Python | src/server/server.py | ZePaiva/Secure-Hearts | 3a2cde096156c56c29c43b21109e096e577c5346 | [
"MIT"
] | null | null | null | src/server/server.py | ZePaiva/Secure-Hearts | 3a2cde096156c56c29c43b21109e096e577c5346 | [
"MIT"
] | null | null | null | src/server/server.py | ZePaiva/Secure-Hearts | 3a2cde096156c56c29c43b21109e096e577c5346 | [
"MIT"
] | null | null | null | # logging
import logging
import coloredlogs
# server
import socket
import json
import sys
import traceback
# threading
from _thread import *
# croupier
from croupier import Croupier
# cryptography
from server_crypto import *
from utils.server_utils import *
from utils.server_utils import *
# server logging
server_log_colors=coloredlogs.parse_encoded_styles('asctime=green;hostname=magenta;levelname=white,bold;name=blue,bold;programname=cyan')
level_colors=coloredlogs.parse_encoded_styles('spam=white;info=blue;debug=green;warning=yellow;error=red;critical=red,bold')
server_logger=logging.getLogger('SERVER')
BUFFER_SIZE=512*1024
class SecureServer(object):
def __init__(self, host='0.0.0.0', port=8080, log_level='DEBUG', tables=4):
# logging
coloredlogs.install(level=log_level, fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level_styles=level_colors, field_styles=server_log_colors)
self.tables=tables
# server socket
print("ola")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("ola")
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("ola")
self.sock.bind((host,port))
print("ola")
self.sock.listen(4*self.tables)
server_logger.info('Server located @ HOST='+host+' | PORT='+str(port))
# game related
self.clients = {}
self.croupier = Croupier()
server_logger.debug('Croupier UP')
# security related
self.cryptography=CryptographyServer(log_level)
server_logger.debug('Cryptography UP')
def accept_client(self):
try:
conn, addr = self.sock.accept()
if conn in self.clients:
server_logger.warning('Client %s already exists', conn)
return None
self.clients[conn] = {
"address":addr,
"conn":conn
}
server_logger.info("Client " + str(conn) + " accepted.")
return conn, addr
except Exception as e:
return None
def send_payload(self, payload, conn):
payload=json.dumps(payload)
while payload:
to_send=payload[:BUFFER_SIZE]
conn.send(to_send.encode('utf-8'))
payload=payload[BUFFER_SIZE:]
def receive_payload(self, conn):
res=''
while True:
req=conn.recv(BUFFER_SIZE)
res+=req.decode('utf-8')
try:
r=json.loads(res)
server_logger.debug(str(r))
return r
except:
continue
def get_conn_from_username(self, username):
for connection in self.clients.keys():
if self.clients[connection]["username"]==username:
break
return connection
def require_action(self, conn, answer="", success=1, mode="pre-game", table=None, nplayers=0, username=None):
payload = {
"operation":"server@require_action",
"answer":answer,
"success":success,
"mode":mode,
"table":table,
"nplayers":nplayers,
"username":username
}
try:
payload=self.cryptography.secure_package(self.clients[conn]['address'], payload, 'server@require_action',update_public_key=True)
except KeyError:
server_logger.warning("Message not encapsulated")
try:
self.send_payload(payload, conn)
except OSError:
self.delete_client(conn)
server_logger.warning("Connection was closed")
def delete_client(self, conn):
try:
self.croupier.delete_player(conn)
except UnboundLocalError:
pass
# username = self.croupier.get_username(conn)
conn.close()
# server_logger.info("Disconnected " + str(username))
server_logger.info("Disconnected " + str(conn))
def communication_thread(self, conn, addr):
while 1:
try:
payload=self.receive_payload(conn)
except ConnectionResetError: # connection was reseted
self.delete_client(conn)
break
except OSError: # connection was closed
self.delete_client(conn)
break
# client dead
if not payload:
self.delete_client(conn)
break
# parsing data
operation = payload["operation"]
# MUST BE SAFE - handle client connecting
if operation=="client@register_player":
client,response=self.cryptography.sign_in(self.clients[conn]['address'], payload)
# client failed to pass security to log in
if not client:
server_logger.warning('bad client tried to sign in')
server_logger.debug(response)
response['operation']='server@register_failed'
self.send_payload(payload, conn)
conn.close()
os._exit(0)
# client passed security
success=self.croupier.add_player(conn, addr, client['username'])
# client was succesffully added to croupier
if success:
self.clients[conn]["username"]=client['username']
self.require_action(conn, answer="client@register_player", success=success, username=client['username'])
server_logger.info("Requested for cryptography data from client")
# or not
else:
payload = {
"operation":"server@register_failed",
"error":"error@username_taken"
}
self.send_payload(payload, conn)
server_logger.warning("Informed client that username is already taken")
# CAN BE UNSAFE - handle client disconnecting
elif operation=="client@disconnect_client":
self.delete_client(conn)
break
# CAN BE UNSAFE - handle client asking online users
elif operation=="player@request_online_users":
self.send_payload(self.croupier.send_online_players(conn), conn)
# CAN BE UNSAFE - handle client asking possible tables
elif operation=="player@request_tables_online":
self.send_payload(self.croupier.send_online_tables(conn), conn)
# CAN BE UNSAFE - handle client asking to create table
elif operation=="player@request_create_table":
success = self.croupier.create_table(payload, conn)
if success:
nplayers = self.croupier.tables[payload["table"]]["nplayers"]
self.require_action(conn, answer=operation, success=success, table=payload["table"], nplayers=nplayers)
else:
self.require_action(conn, answer=operation, success=success, table=None)
# CAN BE UNSAFE - handle client asking to delete table
elif operation=="player@request_delete_table":
success = self.croupier.delete_table(payload, conn)
if success:
self.require_action(conn, answer=operation, success=success, table=None)
else:
self.require_action(conn, answer=operation, success=success, table=payload["table"])
# CAN BE UNSAFE SOMETIMES - handling client asking to join table
elif operation=="player@request_join_table":
success = self.croupier.join_player_table(payload, conn)
if success==0:
self.require_action(conn, answer=operation, success=success, table=None)
elif success==1:
nplayers = self.croupier.tables[payload["table"]]["nplayers"]
self.require_action(conn, answer=operation, success=success, table=payload["table"], nplayers=nplayers)
else:
# game has started
connections = success
nplayers = self.croupier.tables[payload["table"]]["nplayers"]
# send information about game starting
for connection in connections:
self.require_action(connection, answer="player@game_start", success=1, mode="in-game", table=payload["table"], nplayers=nplayers)
server_logger.info("Sent information about the starting of the game to " + self.croupier.get_username(connection))
# send cards to the first player in the queue (table's order)
player_order = self.croupier.tables[payload["table"]]["order"]
connection = self.croupier.players_conn[player_order[0]]
# increment distribution idx
self.croupier.tables[payload["table"]]["cards_dist_idx"] += 1
print("PAYLOAD TABLE: " + str(payload["table"]))
payload=self.croupier.give_shuffled_cards(payload["table"], connection)
# TODO cipher cards
self.send_payload(
self.cryptography.secure_package(
payload
),
conn
)
# CAN BE UNSAFE - handling client asking to leave table
elif operation=="player@request_leave_table":
success = self.croupier.remove_player_table(payload, conn)
if success:
self.require_action(conn, answer=operation, success=success, table=None)
else:
self.require_action(conn, answer=operation, success=success, table=payload["table"])
# CAN BE UNSAFE - handling client asking to leave game
elif operation=="player@request_leave_croupier":
self.delete_client(conn)
break
# MUST BE SAFE - player returns the shuffled cards
elif operation=="player@return_shuffled_cards":
idx = self.croupier.tables[payload["table"]]["cards_dist_idx"]
if(idx != 0): # if distribution isn't complete
player_order = self.croupier.tables[payload["table"]]["order"]
connection = self.croupier.players_conn[player_order[idx]]
payload=self.croupier.give_shuffled_cards(payload["table"], connection)
# cipher/decipher cards
self.send_payload(
self.cryptography.secure_package(payload),
conn
)
# update table order idx
self.croupier.tables[payload["table"]]["cards_dist_idx"] = (idx + 1) % 4
else:
server_logger.warning("DISTRIBUTION OF DECKS COMPLETED")
def run(self):
while True:
try:
conn, addr = self.accept_client()
start_new_thread(self.communication_thread,(conn, addr, ))
except Exception as e:
server_logger.exception(e)
def pause(self):
server_logger.info('Server paused, press CTRL+C again to exit')
try:
self.sock.close()
except:
server_logger.exception("Server Stopping")
for client in self.clients:
client.close()
self.clients=[]
time.sleep(5)
def exit(self):
server_logger.info('Exiting...')
self.sock.close()
sys.exit(0)
def emergency_exit(self, exception):
server_logger.critical('An Exception caused an emergency exit')
server_logger.exception(exception)
sys.exit(1)
| 43.690909 | 163 | 0.574282 | 11,372 | 0.946484 | 0 | 0 | 0 | 0 | 0 | 0 | 2,817 | 0.234457 |
d11150f9da78258d94fd1eac873f91d70d526a04 | 1,098 | py | Python | alphatwirl/parallel/parallel.py | shane-breeze/AlphaTwirl | 59dbd5348af31d02e133d43fd5bfaad6b99a155e | [
"BSD-3-Clause"
] | null | null | null | alphatwirl/parallel/parallel.py | shane-breeze/AlphaTwirl | 59dbd5348af31d02e133d43fd5bfaad6b99a155e | [
"BSD-3-Clause"
] | 7 | 2018-02-26T10:32:26.000Z | 2018-03-19T12:27:12.000Z | alphatwirl/parallel/parallel.py | shane-breeze/AlphaTwirl | 59dbd5348af31d02e133d43fd5bfaad6b99a155e | [
"BSD-3-Clause"
] | null | null | null | # Tai Sakuma <tai.sakuma@gmail.com>
##__________________________________________________________________||
class Parallel(object):
def __init__(self, progressMonitor, communicationChannel, workingarea=None):
self.progressMonitor = progressMonitor
self.communicationChannel = communicationChannel
self.workingarea = workingarea
def __repr__(self):
name_value_pairs = (
('progressMonitor', self.progressMonitor),
('communicationChannel', self.communicationChannel),
('workingarea', self.workingarea)
)
return '{}({})'.format(
self.__class__.__name__,
', '.join(['{}={!r}'.format(n, v) for n, v in name_value_pairs]),
)
def begin(self):
self.progressMonitor.begin()
self.communicationChannel.begin()
def terminate(self):
self.communicationChannel.terminate()
def end(self):
self.progressMonitor.end()
self.communicationChannel.end()
##__________________________________________________________________||
| 32.294118 | 80 | 0.65847 | 917 | 0.835155 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.225865 |
d112e5e4e9404a987fd2539dd0c5729a2d97741e | 210 | py | Python | app/events/client/commands/template.py | Hacker-1202/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 14 | 2021-11-05T11:27:25.000Z | 2022-02-28T02:04:32.000Z | app/events/client/commands/template.py | CssHammer/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 2 | 2022-01-24T22:00:44.000Z | 2022-01-31T13:13:27.000Z | app/events/client/commands/template.py | CssHammer/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 5 | 2022-01-02T13:33:17.000Z | 2022-02-26T13:09:50.000Z | from app.vars.client import client
from app.helpers import Notify, params
from app.filesystem import cfg
@client.command()
async def template(ctx):
notify = Notify(ctx=ctx, title='Template File...')
| 23.333333 | 54 | 0.733333 | 0 | 0 | 0 | 0 | 97 | 0.461905 | 79 | 0.37619 | 18 | 0.085714 |
d113b6f34bb07a1a16e4fef926c78e11ef306ee3 | 129 | py | Python | cwf2neo/tests/__init__.py | sintax1/cwf2neo | 25a8186798a6611f91e4b39052c3baa2023fb5b1 | [
"Apache-2.0"
] | 1 | 2021-06-02T11:44:00.000Z | 2021-06-02T11:44:00.000Z | cwf2neo/tests/__init__.py | sintax1/cwf2neo | 25a8186798a6611f91e4b39052c3baa2023fb5b1 | [
"Apache-2.0"
] | null | null | null | cwf2neo/tests/__init__.py | sintax1/cwf2neo | 25a8186798a6611f91e4b39052c3baa2023fb5b1 | [
"Apache-2.0"
] | 1 | 2021-11-27T00:33:28.000Z | 2021-11-27T00:33:28.000Z | import sys # NOQA
import os
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, current_path + '/../')
| 21.5 | 57 | 0.72093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.093023 |
d11621515563b1ecfde422cf5290bbe6f0dd81a9 | 1,799 | py | Python | core/setup.py | kdart/pycopia | 1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d | [
"Apache-2.0"
] | 89 | 2015-03-26T11:25:20.000Z | 2022-01-12T06:25:14.000Z | core/setup.py | kdart/pycopia | 1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d | [
"Apache-2.0"
] | 1 | 2015-07-05T03:27:43.000Z | 2015-07-11T06:21:20.000Z | core/setup.py | kdart/pycopia | 1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d | [
"Apache-2.0"
] | 30 | 2015-04-30T01:35:54.000Z | 2022-01-12T06:19:49.000Z | #!/usr/bin/python2.7
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
import sys
from setuptools import setup
from glob import glob
NAME = "pycopia-core"
VERSION = "1.0"
if sys.platform.startswith("linux"):
DATA_FILES = [('/etc/pycopia', glob("etc/*"))]
else:
DATA_FILES = []
setup (name=NAME, version=VERSION,
namespace_packages = ["pycopia"],
packages = ["pycopia",
"pycopia.physics",
"pycopia.ISO",
"pycopia.inet",
"pycopia.OS",
"pycopia.OS.CYGWIN_NT",
"pycopia.OS.Darwin",
"pycopia.OS.FreeBSD",
"pycopia.OS.SunOS",
"pycopia.OS.Win32",
"pycopia.OS.Linux",
"pycopia.OS.Linux.proc",
"pycopia.OS.Linux.proc.net",
],
# install_requires = ['pycopia-utils>=1.0.dev-r138,==dev'],
dependency_links = [
"http://www.pycopia.net/download/"
],
package_data = {
'': ['*.txt', '*.doc'],
},
test_suite = "test.CoreTests",
data_files = DATA_FILES,
scripts = glob("bin/*"),
zip_safe = False,
description = "Core components of the Pycopia application framework.",
long_description = """Core components of the Pycopia application framework.
Modules used by other PYcopia packages, that you can also use in your
applications. There is a asynchronous handler interface, CLI tools,
and misc modules.
""",
license = "LGPL",
author = "Keith Dart",
author_email = "keith@kdart.com",
keywords = "pycopia framework core Linux",
url = "http://www.pycopia.net/",
#download_url = "ftp://ftp.pycopia.net/pub/python/%s.%s.tar.gz" % (NAME, VERSION),
classifiers = ["Operating System :: POSIX",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers"],
)
| 28.555556 | 86 | 0.619233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,070 | 0.594775 |
d116b13ce48393f3923096c921dfa4b0c8f125c6 | 6,646 | py | Python | datasets_sysu.py | mpeven/ntu_rgb | 4a8b43c521500907d2f241e4b440381cf8c62350 | [
"MIT"
] | 19 | 2017-12-21T12:06:01.000Z | 2021-03-13T08:15:38.000Z | datasets_sysu.py | 3huo/ntu_rgb | 4a8b43c521500907d2f241e4b440381cf8c62350 | [
"MIT"
] | 2 | 2019-07-26T02:27:32.000Z | 2019-12-13T06:56:22.000Z | datasets_sysu.py | mpeven/ntu_rgb | 4a8b43c521500907d2f241e4b440381cf8c62350 | [
"MIT"
] | 7 | 2018-09-20T06:54:18.000Z | 2021-03-16T09:12:50.000Z | from sysu_dataset import SYSU
import numpy as np
import scipy
import itertools
import cv2
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from config import *
vox_size=54
all_tups = np.array(list(itertools.product(range(vox_size), repeat=2)))
rot_array = np.arange(vox_size*vox_size).reshape([vox_size,vox_size])
K = 5
T = 10
class SYSUdataset(Dataset):
def __init__(self, test=False, full_train=False):
# Underlying dataset and features
self.dataset = SYSU()
# What to return
self.images = DATA_IMAGES
self.images_3D = DATA_IMAGES_3D
self.op_flow = DATA_OP_FLOW
self.op_flow_2D = DATA_OP_FLOW_2D
self.single_feature = DATA_SINGLE_FEAT
self.augmentation = DATA_AUGMENTATION
# Train, validation, test split
self.train = full_train
if test:
self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[1]
else:
self.vid_ids = self.dataset.get_splits(SPLIT_NUMBER)[0]
def __len__(self):
return len(self.vid_ids)
def image_transforms(self, numpy_imgs):
''' Transformations on a list of images
Returns
-------
images : Torch Tensor
Stacked tensor of all images with the transformations applied
'''
# Get random parameters to apply same transformation to all images in list
color_jitter = transforms.ColorJitter.get_params(.25,.25,.25,.25)
rotation_param = transforms.RandomRotation.get_params((-15,15))
crop_params = None
# Apply transformations
images = []
for numpy_img in numpy_imgs:
i = transforms.functional.to_pil_image(numpy_img)
i = transforms.functional.resize(i, (224,224))
if self.train:
i = color_jitter(i)
i = transforms.functional.rotate(i, rotation_param)
i = transforms.functional.to_tensor(i)
i = transforms.functional.normalize(i, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
images.append(i)
return torch.stack(images)
def op_flow_transforms(self, op_flow):
''' Transformations on a tensor of optical flow voxel grids
Parameters
----------
op_flow : ndarray
Returns
-------
op_flow : Torch Tensor
A torch tensor of an optical flow voxel grid with the
transformations (rotation, scale, translation) applied to it
'''
def translate(op_flow):
# op_flow[:,0::3,:,:,:] ---> x axis vectors
# op_flow = scipy.ndimage.interpolation.shift(op_flow, [0,0,x_move,y_move,z_move], cval=0, order=0) # Slower alternative
# Get amount to shift
max_shift = int(op_flow.shape[2] * 0.10)
x_move, y_move, z_move = np.random.randint(-max_shift, max_shift, 3)
# Translate values
if x_move > 0:
op_flow[:,:,x_move:,:,:] = op_flow[:,:,:-x_move,:,:]
op_flow[:,:,:x_move,:,:] = 0
elif x_move < 0:
op_flow[:,:,:x_move,:,:] = op_flow[:,:,-x_move:,:,:]
op_flow[:,:,x_move:,:,:] = 0
if y_move > 0:
op_flow[:,:,:,y_move:,:] = op_flow[:,:,:,:-y_move,:]
op_flow[:,:,:,:y_move,:] = 0
elif y_move < 0:
op_flow[:,:,:,:y_move,:] = op_flow[:,:,:,-y_move:,:]
op_flow[:,:,:,y_move:,:] = 0
if z_move > 0:
op_flow[:,:,:,:,z_move:] = op_flow[:,:,:,:,:-z_move]
op_flow[:,:,:,:,:z_move] = 0
elif z_move < 0:
op_flow[:,:,:,:,:z_move] = op_flow[:,:,:,:,-z_move:]
op_flow[:,:,:,:,z_move:] = 0
return op_flow
def rotate(op_flow):
''' Rotate an optical flow tensor a random amount about the y axis '''
# Get angle
angle = np.random.randint(-45, 45)
# Rotate positions
rot_mat = scipy.ndimage.interpolation.rotate(rot_array, angle, (0,1), reshape=False, order=0)
op_flow_new = np.zeros(op_flow.shape, dtype=np.float32)
tup = all_tups[rot_mat]
op_flow_new = op_flow[:,:,tup[:, :, 0],:,tup[:, :, 1]].transpose(2,3,0,4,1)
# Rotate flow vectors
cos = np.cos(np.radians(-angle))
sin = np.sin(np.radians(-angle))
x_copy = op_flow_new[:,0].copy()
z_copy = op_flow_new[:,2].copy()
op_flow_new[:,0] = x_copy * cos + z_copy * sin
op_flow_new[:,2] = x_copy * -sin + z_copy * cos
return op_flow_new
def scale(op_flow):
return op_flow
# import datetime as dt
if self.train:
op_flow = translate(op_flow)
op_flow = rotate(op_flow)
return torch.from_numpy(op_flow)
def get_3D_op_flow(self, vid_id):
# Load the data
feat_values = np.load("{}/{:05}.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
feat_nonzero = np.load("{}/{:05}.nonzeros.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
feat_shape = np.load("{}/{:05}.shape.npy".format(CACHE_3D_VOX_FLOW_SYSU, vid_id))
# Rebuild the feature from the saved data
feature = np.zeros(feat_shape, np.float32)
feature[tuple(feat_nonzero)] = feat_values
return feature
def __getitem__(self, idx):
vid_id = self.vid_ids[idx]
to_return = []
# Images
if self.images:
images = np.load('{}/{:05}.npy'.format(CACHE_2D_IMAGES_SYSU, vid_id))
images = self.image_transforms(images)
to_return.append(images)
# Optical flow 3D
if self.op_flow:
op_flow = self.get_3D_op_flow(vid_id)
op_flow = self.op_flow_transforms(op_flow)
to_return.append(op_flow)
# Labels
to_return.append(self.dataset.get_label(vid_id))
return to_return
def get_train_loader():
dataset = SYSUdataset(full_train=True)
return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,
shuffle=True, num_workers=NUM_WORKERS,
pin_memory=True)
def get_test_loader():
dataset = SYSUdataset(test=True)
return torch.utils.data.DataLoader(dataset, batch_size=DATA_BATCH_SIZE,
shuffle=False, num_workers=NUM_WORKERS,
pin_memory=True)
| 32.578431 | 132 | 0.567108 | 5,711 | 0.859314 | 0 | 0 | 0 | 0 | 0 | 0 | 1,209 | 0.181914 |
d1170ce42494e908356aac4f4b85a5adc8b67a14 | 2,814 | py | Python | CNN.py | psmishra7/CryptocurrencyPrediction | 96f85ba45d1acbd531ad86f7f9ba32b9acd3ddaf | [
"MIT"
] | null | null | null | CNN.py | psmishra7/CryptocurrencyPrediction | 96f85ba45d1acbd531ad86f7f9ba32b9acd3ddaf | [
"MIT"
] | null | null | null | CNN.py | psmishra7/CryptocurrencyPrediction | 96f85ba45d1acbd531ad86f7f9ba32b9acd3ddaf | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as numpy
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv1D, MaxPooling1D, LeakyReLU, PReLU
from keras.utils import np_utils
from keras.callbacks import CSVLogger, ModelCheckpoint
import h5py
import os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
# Use CNN to capture local temporal dependency of data in risk prediction or other related tasks.
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
with h5py.File(''.join(['bitcoin2012_2017_50_30_prediction.h5']), 'r') as hf:
datas = hf['inputs'].value
labels = hf['outputs'].value
output_file_name='bitcoin2015to2017_close_CNN_2_relu'
step_size = datas.shape[1]
batch_size= 8
nb_features = datas.shape[2]
epochs = 100
#split training validation
training_size = int(0.8* datas.shape[0])
training_datas = datas[:training_size,:]
training_labels = labels[:training_size,:]
validation_datas = datas[training_size:,:]
validation_labels = labels[training_size:,:]
#build model
# 2 layers
model = Sequential()
model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=3, filters=8, kernel_size=20))
#model.add(PReLU())
model.add(Dropout(0.5))
model.add(Conv1D( strides=4, filters=nb_features, kernel_size=16))
'''
# 3 Layers
model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=3, filters=8, kernel_size=8))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D(activation='relu', strides=2, filters=8, kernel_size=8))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D( strides=2, filters=nb_features, kernel_size=8))
# 4 layers
model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=2, filters=8, kernel_size=2))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D(activation='relu', strides=2, filters=8, kernel_size=2))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D(activation='relu', strides=2, filters=8, kernel_size=2))
#model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Conv1D( strides=2, filters=nb_features, kernel_size=2))
'''
model.compile(loss='mse', optimizer='adam')
model.fit(training_datas, training_labels,verbose=1, batch_size=batch_size,validation_data=(validation_datas,validation_labels), epochs = epochs, callbacks=[CSVLogger(output_file_name+'.csv', append=True),ModelCheckpoint('weights/'+output_file_name+'-{epoch:02d}-{val_loss:.5f}.hdf5', monitor='val_loss', verbose=1,mode='min')])
# model.fit(datas,labels)
#model.save(output_file_name+'.h5')
| 33.105882 | 328 | 0.767235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,332 | 0.473348 |
d11727512e7e8babcf124e894f743183a114c424 | 586 | py | Python | wordcloud.py | jim-spyropoulos/NLP-in-Neswpaper-articles | 1a229874f535e198e635d50e5d9bdfc75685feca | [
"Apache-2.0"
] | 1 | 2019-07-25T05:54:10.000Z | 2019-07-25T05:54:10.000Z | wordcloud.py | jJimo/NLP-in-Neswpaper-articles | 1a229874f535e198e635d50e5d9bdfc75685feca | [
"Apache-2.0"
] | null | null | null | wordcloud.py | jJimo/NLP-in-Neswpaper-articles | 1a229874f535e198e635d50e5d9bdfc75685feca | [
"Apache-2.0"
] | 1 | 2022-02-22T13:03:19.000Z | 2022-02-22T13:03:19.000Z | import pandas as pd
import matplotlib.pyplot as plt
from os import path
from wordcloud import WordCloud
#d = path.dirname(__file__)
df=pd.read_csv("train_set.csv",sep="\t")
categories=["Business","Film","Football","Politics","Technology"]
for category in categories:
text=""
for index,row in df.iterrows():
if row["Category"]==category:
text=text+row["Title"]
wordcloud = WordCloud(max_font_size=40, relative_scaling=.5).generate(text)
plt.imshow(wordcloud)
plt.axis("off")
fig1=plt.gcf()
plt.show()
fig1.savefig(category+'.png',dpi=100)
| 21.703704 | 77 | 0.691126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.213311 |
d1183372da6062d23499982cf0b0d29bf10d59d1 | 3,260 | py | Python | openqemist/tests/problem_decomposition/dmet/test_dmet_orbitals.py | 1QB-Information-Technologies/openqemist | e2ab887af31d78d03dcb92cfa3a0705b2436823d | [
"Apache-2.0"
] | 35 | 2019-05-31T22:37:23.000Z | 2022-01-06T12:01:18.000Z | openqemist/tests/problem_decomposition/dmet/test_dmet_orbitals.py | rickyHong/1Qbit-QEMIST-repl | 863fafdbc5bcd2c267b6a57dfa06b050aa053a6d | [
"Apache-2.0"
] | 2 | 2021-03-23T22:34:23.000Z | 2021-06-23T13:09:46.000Z | openqemist/tests/problem_decomposition/dmet/test_dmet_orbitals.py | rickyHong/1Qbit-QEMIST-repl | 863fafdbc5bcd2c267b6a57dfa06b050aa053a6d | [
"Apache-2.0"
] | 10 | 2019-06-06T23:14:18.000Z | 2021-12-02T21:56:13.000Z | # Copyright 2019 1QBit
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the construction of localized orbitals for DMET calculation
"""
import unittest
from openqemist.problem_decomposition.dmet._helpers.dmet_orbitals import dmet_orbitals
from openqemist.problem_decomposition.electron_localization import iao_localization
from pyscf import gto, scf
import numpy as np
def get_file_path_stub():
""" Gets the path of the test files from anywhere in the test tree."
The direcory structure should be $SOMETHING/openqemist/openqemist/tests/$SOMETHINGELSE
so we trim after "tests", then add the path to the results files so we can
run the tests from anywhere in the tree."""
import os
cwd = os.getcwd()
tests_root = cwd[0:cwd.find("tests") + 5]
return tests_root + "/problem_decomposition/dmet/"
class TestDMETorbitals(unittest.TestCase):
""" Generate the localized orbitals employing IAOs """
def test_orbital_construction(self):
# Initialize Molecule object with PySCF and input
mol = gto.Mole()
mol.atom = """
C 0.94764 -0.02227 0.05901
H 0.58322 0.35937 -0.89984
H 0.54862 0.61702 0.85300
H 0.54780 -1.03196 0.19694
C 2.46782 -0.03097 0.07887
H 2.83564 0.98716 -0.09384
H 2.83464 -0.65291 -0.74596
C 3.00694 -0.55965 1.40773
H 2.63295 -1.57673 1.57731
H 2.63329 0.06314 2.22967
C 4.53625 -0.56666 1.42449
H 4.91031 0.45032 1.25453
H 4.90978 -1.19011 0.60302
C 5.07544 -1.09527 2.75473
H 4.70164 -2.11240 2.92450
H 4.70170 -0.47206 3.57629
C 6.60476 -1.10212 2.77147
H 6.97868 -0.08532 2.60009
H 6.97839 -1.72629 1.95057
C 7.14410 -1.62861 4.10112
H 6.77776 -2.64712 4.27473
H 6.77598 -1.00636 4.92513
C 8.66428 -1.63508 4.12154
H 9.06449 -2.27473 3.32841
H 9.02896 -2.01514 5.08095
H 9.06273 -0.62500 3.98256"""
mol.basis = "3-21g"
mol.charge = 0
mol.spin = 0
mol.build(verbose=0)
mf = scf.RHF(mol)
mf.scf()
dmet_orbs = dmet_orbitals(mol, mf, range(mol.nao_nr()), iao_localization)
dmet_orbitals_ref = np.loadtxt(get_file_path_stub() + 'test_dmet_orbitals.txt')
# Test the construction of IAOs
for index, value_ref in np.ndenumerate(dmet_orbitals_ref):
self.assertAlmostEqual(value_ref, dmet_orbs.localized_mo[index], msg='DMET orbs error at index ' + str(index), delta=1e-6)
if __name__ == "__main__":
unittest.main()
| 36.629213 | 134 | 0.632822 | 1,853 | 0.568405 | 0 | 0 | 0 | 0 | 0 | 0 | 2,223 | 0.681902 |
d1192c11dd7c40a7a80b9ec12ae3bec4e88c356a | 1,684 | py | Python | books/techno/python/programming_python_4_ed_m_lutz/code/chapter_8/13_binding_events/bind.py | ordinary-developer/lin_education | 13d65b20cdbc3e5467b2383e5c09c73bbcdcb227 | [
"MIT"
] | 1 | 2017-05-04T08:23:46.000Z | 2017-05-04T08:23:46.000Z | books/techno/python/programming_python_4_ed_m_lutz/code/chapter_8/13_binding_events/bind.py | ordinary-developer/lin_education | 13d65b20cdbc3e5467b2383e5c09c73bbcdcb227 | [
"MIT"
] | null | null | null | books/techno/python/programming_python_4_ed_m_lutz/code/chapter_8/13_binding_events/bind.py | ordinary-developer/lin_education | 13d65b20cdbc3e5467b2383e5c09c73bbcdcb227 | [
"MIT"
] | null | null | null | from tkinter import *
def showPosEvent(event):
print('Widget ={} X={} Y={}'.format(event.widget, event.x, event.y))
def showAllEvent(event):
print(event)
for attr in dir(event):
if not attr.startswith('__'):
print(attr, '=>', getattr(event, attr))
def onKeyPress(event):
print('Got key press: ', event.char)
def onArrowKey(event):
print('Got up arrow key press')
def onReturnKey(event):
print('Got return key press')
def onLeftClick(event):
print('Got left mouse button click: ', end = ' ')
showPosEvent(event)
def onRightClick(event):
print('Got right mouse button click: ', end = ' ')
showPosEvent(event)
def onMiddleClick(event):
print('Got middle mouse button click:', end = ' ')
showPosEvent(event)
showAllEvent(event)
def onLeftDrag(event):
print('Got left mouse button drag: ', end = ' ')
showPosEvent(event)
def onDoubleLeftClick(event):
print('Got double left mouse click', end = ' ')
showPosEvent(event)
tkroot.quit()
tkroot = Tk()
labelfont = ('courier', 20, 'bold')
widget = Label(tkroot, text = 'Hello bind world')
widget.config(bg = 'red', font = labelfont)
widget.config(height = 5, width = 20)
widget.pack(expand = YES, fill = BOTH)
widget.bind('<Button-1>', onLeftClick)
widget.bind('<Button-3>', onRightClick)
widget.bind('<Button-2>', onMiddleClick)
widget.bind('<Double-1>', onDoubleLeftClick)
widget.bind('<B1-Motion>', onLeftDrag)
widget.bind('<KeyPress>', onKeyPress)
widget.bind('<Up>', onArrowKey)
widget.bind('<Return>', onReturnKey)
widget.focus()
tkroot.title('Click me')
tkroot.mainloop()
| 27.16129 | 73 | 0.644893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.236936 |
d1195e5aa794a4828c802143266203e6d6782d92 | 553 | py | Python | week9/api/migrations/0002_auto_20200324_1713.py | yestemir/web | 5bdead66c26a3c466701e25ecae9720f04ad4118 | [
"Unlicense"
] | null | null | null | week9/api/migrations/0002_auto_20200324_1713.py | yestemir/web | 5bdead66c26a3c466701e25ecae9720f04ad4118 | [
"Unlicense"
] | 13 | 2021-03-10T08:46:52.000Z | 2022-03-02T08:13:58.000Z | week9/api/migrations/0002_auto_20200324_1713.py | yestemir/web | 5bdead66c26a3c466701e25ecae9720f04ad4118 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-24 11:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='products',
name='category',
field=models.CharField(default='category 0', max_length=300),
),
migrations.AlterField(
model_name='products',
name='description',
field=models.TextField(default=''),
),
]
| 23.041667 | 73 | 0.56962 | 460 | 0.831826 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.222423 |
d11976dd8538123b7d77b35efa91d570c64fdb1d | 960 | py | Python | Computer Networks Lab/A11TCP/PeertoPeer/pptcpserv.py | prabu-5701/Third_Year_Lab_Assignments | db0353cff33c9811ec6df13ec982af161de311fe | [
"MIT"
] | 12 | 2020-10-24T17:57:09.000Z | 2021-12-29T07:13:36.000Z | Computer Networks Lab/A11TCP/PeertoPeer/pptcpserv.py | prabu-5701/Third_Year_Lab_Assignments | db0353cff33c9811ec6df13ec982af161de311fe | [
"MIT"
] | null | null | null | Computer Networks Lab/A11TCP/PeertoPeer/pptcpserv.py | prabu-5701/Third_Year_Lab_Assignments | db0353cff33c9811ec6df13ec982af161de311fe | [
"MIT"
] | 16 | 2020-04-21T13:38:07.000Z | 2022-03-20T23:40:37.000Z | '''
NAME: VAIBHAV SUDHAKAR BHAVSAR
TE-B
ROLL NO: 08
ASSIGNMENT NO: 11
PROBLEM STATEMENT:
Write a program using TCP sockets for wired network to implement
a. Peer to Peer Chat (server side)
'''
import socket
import sys
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost',23000))
sock.listen(1)
clisock, (ip,port) = sock.accept()
while True:
data = clisock.recv(16)
dt = data.decode()
if "stop."==dt:
break
else:
print("client: " + dt)
data = input("you: ")
clisock.send(str.encode(data))
if "stop."==data:
break
sock.close()
'''
res@res-HP-280-G2-MT-Legacy:~/Desktop/FINAL 1/assignment 14/tcp peer 2 peer$ sudo su
[sudo] password for res:
root@res-HP-280-G2-MT-Legacy:/home/res/Desktop/FINAL 1/assignment 14/tcp peer 2 peer# python pptcpserv.py
client: hi from client
you: hello!
client: hi
you: STOP.
root@res-HP-280-G2-MT-Legacy:/home/res/Desktop/FINAL 1/assignment 14/tcp peer 2 peer#
'''
| 22.857143 | 106 | 0.696875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 606 | 0.63125 |
d11ca10c2506a5eec12ca314bb1040463209b759 | 2,825 | py | Python | tutos/institutions/views.py | UVG-Teams/Tutos-System | 230dd9434f745c2e6e69e10f9908e9818c559d03 | [
"MIT"
] | null | null | null | tutos/institutions/views.py | UVG-Teams/Tutos-System | 230dd9434f745c2e6e69e10f9908e9818c559d03 | [
"MIT"
] | null | null | null | tutos/institutions/views.py | UVG-Teams/Tutos-System | 230dd9434f745c2e6e69e10f9908e9818c559d03 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import viewsets
from institutions.models import Institution, Career, Course
from institutions.serializers import InstitutionSerializer, CareerSerializer, CourseSerializer
from permissions.services import APIPermissionClassFactory
class InstitutionViewSet(viewsets.ModelViewSet):
queryset = Institution.objects.all()
serializer_class = InstitutionSerializer
permission_classes = (
APIPermissionClassFactory(
name='InstitutionPermission',
permission_configuration={
'base': {
'create': lambda user, req: user.is_authenticated,
'list': lambda user, req: user.is_authenticated,
},
'instance': {
'retrieve': lambda user, obj, req: user.is_authenticated,
'update': lambda user, obj, req: user.is_authenticated,
'partial_update': lambda user, obj, req: user.is_authenticated,
'destroy': lambda user, obj, req: user.is_authenticated,
}
}
),
)
class CareerViewSet(viewsets.ModelViewSet):
queryset = Career.objects.all()
serializer_class = CareerSerializer
permission_classes = (
APIPermissionClassFactory(
name='CareerPermission',
permission_configuration={
'base': {
'create': lambda user, req: user.is_authenticated,
'list': lambda user, req: user.is_authenticated,
},
'instance': {
'retrieve': lambda user, obj, req: user.is_authenticated,
'update': lambda user, obj, req: user.is_authenticated,
'partial_update': lambda user, obj, req: user.is_authenticated,
'destroy': lambda user, obj, req: user.is_authenticated,
}
}
),
)
class CourseViewSet(viewsets.ModelViewSet):
queryset = Course.objects.all()
serializer_class = CourseSerializer
permission_classes = (
APIPermissionClassFactory(
name='CoursePermission',
permission_configuration={
'base': {
'create': lambda user, req: user.is_authenticated,
'list': lambda user, req: user.is_authenticated,
},
'instance': {
'retrieve': lambda user, obj, req: user.is_authenticated,
'update': lambda user, obj, req: user.is_authenticated,
'partial_update': lambda user, obj, req: user.is_authenticated,
'destroy': lambda user, obj, req: user.is_authenticated,
}
}
),
)
| 38.175676 | 94 | 0.572743 | 2,528 | 0.894867 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.098407 |
d11e05aec66f3501ed77fd7b7c80b254c9a474b3 | 4,923 | py | Python | vendor/guardian/tests/decorators_test.py | AhmadManzoor/jazzpos | 7b771095b8df52d036657f33f36a97efb575d36c | [
"MIT"
] | 5 | 2015-12-05T15:39:51.000Z | 2020-09-16T20:14:29.000Z | vendor/guardian/tests/decorators_test.py | AhmadManzoor/jazzpos | 7b771095b8df52d036657f33f36a97efb575d36c | [
"MIT"
] | null | null | null | vendor/guardian/tests/decorators_test.py | AhmadManzoor/jazzpos | 7b771095b8df52d036657f33f36a97efb575d36c | [
"MIT"
] | 2 | 2019-11-23T17:47:46.000Z | 2022-01-14T11:05:21.000Z | from django.test import TestCase
from django.contrib.auth.models import User, Group, AnonymousUser
from django.http import HttpRequest
from django.http import HttpResponse
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from guardian.decorators import permission_required, permission_required_or_403
from guardian.exceptions import GuardianError
from guardian.shortcuts import assign
class PermissionRequiredTest(TestCase):
fixtures = ['tests.json']
def setUp(self):
self.anon = AnonymousUser()
self.user = User.objects.get(username='jack')
self.group = Group.objects.get(name='jackGroup')
def _get_request(self, user=None):
if user is None:
user = AnonymousUser()
request = HttpRequest()
request.user = user
return request
def test_no_args(self):
try:
@permission_required
def dummy_view(request):
return HttpResponse('dummy_view')
except GuardianError:
pass
else:
self.fail("Trying to decorate using permission_required without "
"permission as first argument should raise exception")
def test_anonymous_user_wrong_app(self):
request = self._get_request(self.anon)
@permission_required_or_403('not_installed_app.change_user')
def dummy_view(request):
return HttpResponse('dummy_view')
self.assertTrue(isinstance(dummy_view(request), HttpResponseForbidden))
def test_anonymous_user_wrong_codename(self):
request = self._get_request()
@permission_required_or_403('auth.wrong_codename')
def dummy_view(request):
return HttpResponse('dummy_view')
self.assertTrue(isinstance(dummy_view(request), HttpResponseForbidden))
def test_anonymous_user(self):
request = self._get_request()
@permission_required_or_403('auth.change_user')
def dummy_view(request):
return HttpResponse('dummy_view')
self.assertTrue(isinstance(dummy_view(request), HttpResponseForbidden))
def test_wrong_lookup_variables_number(self):
request = self._get_request()
try:
@permission_required_or_403('auth.change_user', (User, 'username'))
def dummy_view(request, username):
pass
dummy_view(request, username='jack')
except GuardianError:
pass
else:
self.fail("If lookup variables are passed they must be tuple of: "
"(ModelClass/app_label.ModelClass/queryset, "
"<pair of lookup_string and view_arg>)\n"
"Otherwise GuardianError should be raised")
def test_wrong_lookup_variables(self):
request = self._get_request()
args = (
(2010, 'username', 'username'),
('User', 'username', 'username'),
(User, 'username', 'no_arg'),
)
for tup in args:
try:
@permission_required_or_403('auth.change_user', tup)
def show_user(request, username):
user = get_object_or_404(User, username=username)
return HttpResponse("It's %s here!" % user.username)
show_user(request, 'jack')
except GuardianError:
pass
else:
self.fail("Wrong arguments given but GuardianError not raised")
def test_model_lookup(self):
request = self._get_request(self.user)
perm = 'auth.change_user'
joe, created = User.objects.get_or_create(username='joe')
assign(perm, self.user, obj=joe)
models = (
'auth.User',
User,
User.objects.filter(is_active=True),
)
for model in models:
@permission_required_or_403(perm, (model, 'username', 'username'))
def dummy_view(request, username):
get_object_or_404(User, username=username)
return HttpResponse('hello')
response = dummy_view(request, username=joe.username)
self.assertEqual(response.content, 'hello')
def test_redirection(self):
request = self._get_request(self.user)
foo = User.objects.create(username='foo')
foobar = Group.objects.create(name='foobar')
foo.groups.add(foobar)
@permission_required('auth.change_group',
(User, 'groups__name', 'group_name'),
login_url='/foobar/')
def dummy_view(request, group_name):
pass
response = dummy_view(request, group_name='foobar')
self.assertTrue(isinstance(response, HttpResponseRedirect))
self.assertTrue(response._headers['location'][1].startswith(
'/foobar/'))
| 33.719178 | 79 | 0.630713 | 4,444 | 0.902702 | 0 | 0 | 1,285 | 0.26102 | 0 | 0 | 792 | 0.160878 |
d11ea5562b964e94c6dcce86e39ac739e687f11e | 1,004 | py | Python | default_colours.py | ARCowie28/SyntheticWeather | c1c7c2b0b820d35306891ae52b44cc0240f0323d | [
"BSD-3-Clause"
] | 11 | 2019-03-22T01:33:28.000Z | 2021-04-18T03:58:04.000Z | default_colours.py | ARCowie28/SyntheticWeather | c1c7c2b0b820d35306891ae52b44cc0240f0323d | [
"BSD-3-Clause"
] | 1 | 2019-09-16T13:37:33.000Z | 2019-09-17T13:23:34.000Z | default_colours.py | ARCowie28/SyntheticWeather | c1c7c2b0b820d35306891ae52b44cc0240f0323d | [
"BSD-3-Clause"
] | 6 | 2019-05-05T15:05:01.000Z | 2019-12-04T15:39:58.000Z | # Declare default colours for the code which calls this script.
# import numpy as np
from numpy import array
# Deep blue.
blue = array((25, 100, 200)) / 255
# Pure f***ing blue.
bluest = array((0, 0, 255)) / 255
# Distinguished looking grey.
grey = array((0.3, 0.3, 0.3))
# Also distinguished but contrasts better with black.
lgrey = array((0.6, 0.6, 0.6))
# Indian flag orange (saffron).
orange = array((255, 128, 0)) / 255
# Orange like nice satsumas.
orangest = array((255, 165, 0)) / 255
# Oxygenated blood red.
red = array((160, 30, 30)) / 255
# Pure f***ing red.
reddest = array((255, 0, 0)) / 255
# Gandalf white.
whitest = array((1, 1, 1))
# Black as coal, black as night.
blackest = array((0, 0, 0))
# Veering towards gold.
yellow = array((237, 177, 32)) / 255
# Do not use unless you REALLY need to.
teal = array((50, 180, 165)) / 255
# Gentle dark green.
green = array((40, 180, 20)) / 255
# Pure f***ing green.
greenest = array((0, 255, 0)) / 255
| 25.74359 | 64 | 0.621514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 465 | 0.463147 |
d11eb59a6a47321994cee5f85b00b5df52bdc914 | 1,840 | py | Python | reverb/reverb_types.py | tfboyd/reverb | 3bd2826f23ededd40003bffc86f01162a0feb334 | [
"Apache-2.0"
] | 2 | 2021-10-30T16:59:48.000Z | 2021-11-17T10:21:17.000Z | reverb/reverb_types.py | tfboyd/reverb | 3bd2826f23ededd40003bffc86f01162a0feb334 | [
"Apache-2.0"
] | null | null | null | reverb/reverb_types.py | tfboyd/reverb | 3bd2826f23ededd40003bffc86f01162a0feb334 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytype helpers."""
import collections
from typing import Any, Iterable, Mapping, NamedTuple, Optional, Union, get_type_hints
from reverb import pybind
import tensorflow.compat.v1 as tf
from reverb.cc import schema_pb2
Fifo = pybind.FifoSelector
Heap = pybind.HeapSelector
Lifo = pybind.LifoSelector
Prioritized = pybind.PrioritizedSelector
Uniform = pybind.UniformSelector
DistributionType = Union[Fifo, Heap, Lifo, Prioritized, Uniform]
# Note that this is effectively treated as `Any`; see b/109648354.
SpecNest = Union[
tf.TensorSpec, Iterable['SpecNest'], Mapping[str, 'SpecNest']] # pytype: disable=not-supported-yet
_table_info_proto_types = get_type_hints(schema_pb2.TableInfo) or {}
_table_info_type_dict = collections.OrderedDict(
(descr.name, _table_info_proto_types.get(descr.name, Any))
for descr in schema_pb2.TableInfo.DESCRIPTOR.fields)
_table_info_type_dict['signature'] = Optional[SpecNest]
"""A tuple describing Table information.
The main difference between this object and a `schema_pb2.TableInfo` message
is that the signature is a nested structure of `tf.TypeSpec` objects,
instead of a raw proto.
"""
TableInfo = NamedTuple('TableInfo', tuple(_table_info_type_dict.items()))
| 34.074074 | 103 | 0.778804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 979 | 0.532065 |
d11ede3c3eb4d35af8cddf5255e4407c0eabdb71 | 4,380 | py | Python | ed2d/shaders.py | explosiveduck/cubix | 16e7a298a83fe53174bda8ec77dfcf6869ed5336 | [
"BSD-2-Clause"
] | 1 | 2015-11-02T02:11:18.000Z | 2015-11-02T02:11:18.000Z | ed2d/shaders.py | explosiveduck/cubix | 16e7a298a83fe53174bda8ec77dfcf6869ed5336 | [
"BSD-2-Clause"
] | 29 | 2015-06-09T19:27:49.000Z | 2016-03-08T06:13:24.000Z | ed2d/shaders.py | explosiveduck/cubix | 16e7a298a83fe53174bda8ec77dfcf6869ed5336 | [
"BSD-2-Clause"
] | null | null | null | from ed2d.opengl import gl, pgl
from ed2d import files
from ed2d import typeutils
from gem import vector
class ShaderBase(object):
def create(self):
self.shader = gl.glCreateShader(self.shaderType)
pgl.glShaderSource(self.shader, self.shaderData)
gl.glCompileShader(self.shader)
status = pgl.glGetShaderiv(self.shader, gl.GL_COMPILE_STATUS)
# TODO - Implement this with logging when that is finished.
if not status:
print(self.shaderErrorMessage)
print(pgl.glGetShaderInfoLog(self.shader))
else:
print(self.shaderSuccessMessage)
class VertexShader(ShaderBase):
def __init__(self, path):
self.shaderData = files.read_file(path)
self.shaderType = gl.GL_VERTEX_SHADER
self.shaderErrorMessage = 'Vertex Shader compilation error.'
self.shaderSuccessMessage = 'Vertex Shader compiled successfully.'
class FragmentShader(ShaderBase):
def __init__(self, path):
self.shaderData = files.read_file(path)
self.shaderType = gl.GL_FRAGMENT_SHADER
self.shaderErrorMessage = 'Fragment Shader compilation error.'
self.shaderSuccessMessage = 'Fragment Shader compiled successfully.'
class ShaderProgram(object):
def __init__(self, vertex, fragment):
self.uniforms = []
self.uniformNames = {}
self.vertex = vertex
self.fragment = fragment
self.vertex.create()
self.fragment.create()
self.program = gl.glCreateProgram()
gl.glAttachShader(self.program, self.vertex.shader)
gl.glAttachShader(self.program, self.fragment.shader)
gl.glLinkProgram(self.program)
status = pgl.glGetProgramiv(self.program, gl.GL_LINK_STATUS)
if not status:
print('Linking error:')
print(pgl.glGetProgramInfoLog(self.program))
else:
print('Program Linked successfully.')
def use(self, using=True):
if using is False:
prog = 0
else:
prog = self.program
gl.glUseProgram(prog)
def get_attribute(self, name):
return gl.glGetAttribLocation(self.program, name)
def get_uniform_name(self, uniID):
return self.uniformNames[uniID]
def new_uniform(self, name):
uniID = len(self.uniforms)
self.uniformNames[uniID] = name
self.uniforms.append(gl.glGetUniformLocation(self.program, name))
return uniID
def set_uniform_matrix(self, uniID, value, uniform=None, size=None):
if not uniform:
uniform = self.uniforms[uniID]
if not size:
size = value.size
# use non wrapped funcion for performance reasons
if size == 4:
gl.glUniformMatrix4fv(uniform, 1, gl.GL_FALSE, value.c_matrix[0])
elif size == 3:
gl.glUniformMatrix3fv(uniform, 1, gl.GL_FALSE, value.c_matrix[0])
elif size == 2:
gl.glUniformMatrix2fv(uniform, 1, gl.GL_FALSE, value.c_matrix[0])
def set_uniform_array(self, uniID, value):
uniform = self.uniforms[uniID]
try:
if isinstance(value, vector.Vector):
value = value.vector
size = len(value)
if isinstance(value[0], int):
if size == 4:
gl.glUniform4i(uniform, *value)
elif size == 3:
gl.glUniform3i(uniform, *value)
elif size == 2:
gl.glUniform2i(uniform, *value)
elif isinstance(value[0], float):
if size == 4:
gl.glUniform4f(uniform, *value)
elif size == 3:
gl.glUniform3f(uniform, *value)
elif size == 2:
gl.glUniform2f(uniform, *value)
except:
raise TypeError
def get_uniform(self, uniID):
uniform = self.uniforms[uniID]
return uniform
def set_uniform(self, uniID, value):
uniform = self.uniforms[uniID]
# Need to imeplement the matrix uniforms after I
# Implement the matrix math library
if isinstance(value, int):
gl.glUniform1i(uniform, value)
elif isinstance(value, float):
gl.glUniform1f(uniform, value)
else:
raise TypeError
| 30.84507 | 77 | 0.605936 | 4,263 | 0.973288 | 0 | 0 | 0 | 0 | 0 | 0 | 385 | 0.0879 |
d11f4b63fbf5bd854138ff819170bf1b5bcc07d8 | 1,532 | py | Python | poky/meta/lib/oeqa/sdk/cases/gcc.py | buildlinux/unityos | dcbe232d0589013d77a62c33959d6a69f9bfbc5e | [
"Apache-2.0"
] | 1 | 2020-01-13T13:16:52.000Z | 2020-01-13T13:16:52.000Z | poky/meta/lib/oeqa/sdk/cases/gcc.py | buildlinux/unityos | dcbe232d0589013d77a62c33959d6a69f9bfbc5e | [
"Apache-2.0"
] | 3 | 2019-11-20T02:53:01.000Z | 2019-12-26T03:00:15.000Z | sources/poky/meta/lib/oeqa/sdk/cases/gcc.py | zwg0106/imx-yocto | e378ca25352a59d1ef84ee95f3386b7314f4565b | [
"MIT"
] | null | null | null | import os
import shutil
import unittest
from oeqa.core.utils.path import remove_safe
from oeqa.sdk.case import OESDKTestCase
class GccCompileTest(OESDKTestCase):
td_vars = ['MACHINE']
@classmethod
def setUpClass(self):
files = {'test.c' : self.tc.files_dir, 'test.cpp' : self.tc.files_dir,
'testsdkmakefile' : self.tc.sdk_files_dir}
for f in files:
shutil.copyfile(os.path.join(files[f], f),
os.path.join(self.tc.sdk_dir, f))
def setUp(self):
machine = self.td.get("MACHINE")
if not (self.tc.hasTargetPackage("packagegroup-cross-canadian-%s" % machine) or
self.tc.hasTargetPackage("gcc")):
raise unittest.SkipTest("GccCompileTest class: SDK doesn't contain a cross-canadian toolchain")
def test_gcc_compile(self):
self._run('$CC %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
def test_gpp_compile(self):
self._run('$CXX %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
def test_gpp2_compile(self):
self._run('$CXX %s/test.cpp -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
def test_make(self):
self._run('cd %s; make -f testsdkmakefile' % self.tc.sdk_dir)
@classmethod
def tearDownClass(self):
files = [os.path.join(self.tc.sdk_dir, f) \
for f in ['test.c', 'test.cpp', 'test.o', 'test',
'testsdkmakefile']]
for f in files:
remove_safe(f)
| 34.818182 | 107 | 0.611619 | 1,404 | 0.916449 | 0 | 0 | 560 | 0.365535 | 0 | 0 | 335 | 0.218668 |
d1208d36f83d1a05d8b97f40944b08d56a4cd522 | 338 | py | Python | backend/api/routes/__init__.py | senavs/todo-list | 6476805583d0edbb9df85111cfc799a2144e2c54 | [
"Apache-2.0"
] | null | null | null | backend/api/routes/__init__.py | senavs/todo-list | 6476805583d0edbb9df85111cfc799a2144e2c54 | [
"Apache-2.0"
] | null | null | null | backend/api/routes/__init__.py | senavs/todo-list | 6476805583d0edbb9df85111cfc799a2144e2c54 | [
"Apache-2.0"
] | null | null | null | from fastapi import APIRouter
from . import auth, index, list, task
router = APIRouter()
router.include_router(index.router)
router.include_router(auth.router, prefix='/auth', tags=['Authenticate'])
router.include_router(list.router, prefix='/lists', tags=['Lists'])
router.include_router(task.router, prefix='/lists', tags=['Tasks'])
| 30.727273 | 73 | 0.754438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.150888 |
d12140f39c7a66d081a5359972998bfd95ac1b4b | 5,657 | py | Python | datasets/mnist_data.py | shijack/vae-system | 14506b3b5966162a3502b26dd68d1a77ccbcfb34 | [
"MIT"
] | null | null | null | datasets/mnist_data.py | shijack/vae-system | 14506b3b5966162a3502b26dd68d1a77ccbcfb34 | [
"MIT"
] | null | null | null | datasets/mnist_data.py | shijack/vae-system | 14506b3b5966162a3502b26dd68d1a77ccbcfb34 | [
"MIT"
] | null | null | null | # Some code was borrowed from https://github.com/petewarden/tensorflow_makefile/blob/master/tensorflow/models/image/mnist/convolutional.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import numpy
import tensorflow as tf
from scipy import ndimage
from six.moves import urllib
SOURCE_URL_MNIST = 'http://yann.lecun.com/exdb/mnist/'
# Params for MNIST
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
# Download MNIST data
def maybe_download(dataset_dir, filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
filepath = os.path.join(dataset_dir, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL_MNIST + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
# Extract the images
def extract_data(filename, num_images, norm_shift=False, norm_scale=True):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
if norm_shift:
data = data - (PIXEL_DEPTH / 2.0)
if norm_scale:
data = data / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
data = numpy.reshape(data, [num_images, -1])
return data
# Extract the labels
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
num_labels_data = len(labels)
one_hot_encoding = numpy.zeros((num_labels_data, NUM_LABELS))
one_hot_encoding[numpy.arange(num_labels_data), labels] = 1
one_hot_encoding = numpy.reshape(one_hot_encoding, [-1, NUM_LABELS])
return one_hot_encoding
# Augment training data
def expend_training_data(images, labels):
expanded_images = []
expanded_labels = []
j = 0 # counter
for x, y in zip(images, labels):
j = j + 1
if j % 100 == 0:
print('expanding data : %03d / %03d' % (j, numpy.size(images, 0)))
# register original data
expanded_images.append(x)
expanded_labels.append(y)
# get a value for the background
# zero is the expected value, but median() is used to estimate background's value
bg_value = numpy.median(x) # this is regarded as background's value
image = numpy.reshape(x, (-1, 28))
for i in range(4):
# rotate the image with random degree
angle = numpy.random.randint(-15, 15, 1)
new_img = ndimage.rotate(image, angle, reshape=False, cval=bg_value)
# shift the image with random distance
shift = numpy.random.randint(-2, 2, 2)
new_img_ = ndimage.shift(new_img, shift, cval=bg_value)
# register new training data
expanded_images.append(numpy.reshape(new_img_, 784))
expanded_labels.append(y)
# images and labels are concatenated for random-shuffle at each epoch
# notice that pair of image and label should not be broken
expanded_train_total_data = numpy.concatenate((expanded_images, expanded_labels), axis=1)
numpy.random.shuffle(expanded_train_total_data)
return expanded_train_total_data
# Prepare MNISt data
def prepare_MNIST_data(dataset_dir, use_norm_shift=False, use_norm_scale=True, use_data_augmentation=False):
# Get the data.
train_data_filename = maybe_download(dataset_dir, 'train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download(dataset_dir, 'train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download(dataset_dir, 't10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download(dataset_dir, 't10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000, use_norm_shift, use_norm_scale)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000, use_norm_shift, use_norm_scale)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, :]
validation_labels = train_labels[:VALIDATION_SIZE, :]
train_data = train_data[VALIDATION_SIZE:, :]
train_labels = train_labels[VALIDATION_SIZE:, :]
# Concatenate train_data & train_labels for random shuffle
if use_data_augmentation:
train_total_data = expend_training_data(train_data, train_labels)
else:
train_total_data = numpy.concatenate((train_data, train_labels), axis=1)
train_size = train_total_data.shape[0]
return train_total_data, train_size, validation_data, validation_labels, test_data, test_labels
if __name__ == '__main__':
train_total_data, train_size, _, _, test_data, test_labels = prepare_MNIST_data('./datasets/data')
| 38.482993 | 138 | 0.705851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,360 | 0.24041 |
d1214c4589f598426173369d768239200b8619fe | 2,079 | py | Python | module/server/view/login/routes.py | antkrit/project | 89172be482a640fe656c45a1c35ea1242cd98347 | [
"MIT"
] | null | null | null | module/server/view/login/routes.py | antkrit/project | 89172be482a640fe656c45a1c35ea1242cd98347 | [
"MIT"
] | 1 | 2021-05-20T18:15:46.000Z | 2021-05-21T14:27:47.000Z | module/server/view/login/routes.py | antkrit/project | 89172be482a640fe656c45a1c35ea1242cd98347 | [
"MIT"
] | null | null | null | """Define the route of the login form"""
from flask import (
render_template,
redirect,
url_for,
request,
current_app,
session,
flash,
)
from flask_login import current_user, login_user, logout_user
from module.server import messages
from module.server.models.user import User
from module.server.view.login import bp, forms as f
@bp.route("/", methods=["GET", "POST"])
def login_view():
"""
View for the login page.
Once the user tries to get to his account, he will be redirected to the login page.
Methods: GET, POST
"""
if current_user.is_authenticated: # if the user is already logged in - redirect to his cabinet
if current_user.username == "admin": # If current user is admin
return redirect(url_for("admin.admin_view"))
return redirect(url_for("cabinet.cabinet_view"))
login_form = f.LoginForm()
if request.method == "POST":
data = request.form
if login_form.validate_on_submit() or (data and current_app.testing):
# If user clicked the "Sign In" button or there is data in the request while testing app
username = data.get("username")
password = data.get("password")
user = User.query.filter_by(username=username).first()
if user and user.check_password(password): # If such login exists, login and password match - login user
session.clear()
login_user(user)
flash(messages["success_login"], "info")
if user.username == "admin": # If user is admin - redirect him to the admin interface
return redirect(url_for("admin.admin_view"))
return redirect(url_for("cabinet.cabinet_view"))
return redirect(url_for("login.login_view"))
return render_template("auth/login.html", title="Login", form=login_form)
@bp.route("/logout", methods=["GET"])
def logout():
"""
Log out current user.
Methods: GET
"""
logout_user()
return redirect(url_for("login.login_view"))
| 34.65 | 117 | 0.64406 | 0 | 0 | 0 | 0 | 1,715 | 0.824916 | 0 | 0 | 765 | 0.367965 |
d12155bf89a126b14c330ae2a6b76778e54cc62a | 1,736 | py | Python | true_coders/urls.py | tanvirtareq/clist | 7be17dc463e838778ef5dd6c6bc48eb09a8d98dd | [
"Apache-2.0"
] | 1 | 2021-11-30T23:00:31.000Z | 2021-11-30T23:00:31.000Z | true_coders/urls.py | tanvirtareq/clist | 7be17dc463e838778ef5dd6c6bc48eb09a8d98dd | [
"Apache-2.0"
] | null | null | null | true_coders/urls.py | tanvirtareq/clist | 7be17dc463e838778ef5dd6c6bc48eb09a8d98dd | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import re_path
from true_coders import views
app_name = 'coder'
urlpatterns = [
re_path(r'^settings/$', views.settings, name='settings'),
re_path(r'^settings/(?P<tab>preferences|social|accounts|filters|notifications|lists)/$',
views.settings,
name='settings'),
re_path(r'^settings/notifications/unsubscribe/$', views.unsubscribe, name='unsubscribe'),
re_path(r'^settings/change/$', views.change, name='change'),
re_path(r'^settings/search/$', views.search, name='search'),
re_path(r'^coder/$', views.my_profile, name='my_profile'),
re_path(r'^coder/(?P<username>[^/]*)/ratings/$', views.ratings, name='ratings'),
re_path(r'^coder/([^/]*)/$', views.profile, name='profile'),
re_path(r'^coders/$', views.coders, name='coders'),
re_path(r'^account/(?P<key>.*)/resource/(?P<host>.*)/ratings/$', views.ratings),
re_path(r'^account/(?P<key>.*)/resource/(?P<host>.*)/$', views.account, name='account'),
re_path(r'^accounts/$', views.accounts, name='accounts'),
re_path(r'^profile/(?P<query>.*)/ratings/$', views.ratings),
re_path(r'^profile/(?P<query>.*)/$', views.profiles, name='mixed_profile'),
re_path(r'^api/key/$', views.get_api_key, name='api-key'),
re_path(r'^remove/api/key/$', views.remove_api_key, name='remove-api-key'),
re_path(r'^party/([^/]*)/(join|leave)/$', views.party_action, name='party-action'),
re_path(r'^party/([^/]*)/contests/$', views.party_contests, name='party-contests'),
re_path(r'^party/([^/]*)/(?:(calendar|ranking|information)/)?$', views.party, name='party'),
re_path(r'^parties/$', views.parties, name='parties'),
re_path(r'^list/([^/]*)/$', views.view_list, name='list'),
]
| 54.25 | 96 | 0.641129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 818 | 0.471198 |
d123ae3da1f9bbc3e25f9668062bd9940c2f2120 | 991 | py | Python | inkcut-master/inkcut/device/protocols/debug.py | ilnanny/Inkscape-addons | a30cdde2093fa2da68b90213e057519d0304433f | [
"X11"
] | 3 | 2019-03-08T23:32:29.000Z | 2019-05-11T23:53:46.000Z | inkcut-master/inkcut/device/protocols/debug.py | ilnanny/Inkscape-addons | a30cdde2093fa2da68b90213e057519d0304433f | [
"X11"
] | null | null | null | inkcut-master/inkcut/device/protocols/debug.py | ilnanny/Inkscape-addons | a30cdde2093fa2da68b90213e057519d0304433f | [
"X11"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Created on Oct 23, 2015
@author: jrm
'''
from inkcut.device.plugin import DeviceProtocol
from inkcut.core.utils import async_sleep, log
class DebugProtocol(DeviceProtocol):
""" A protocol that just logs what is called """
def connection_made(self):
log.debug("protocol.connectionMade()")
def move(self, x, y, z, absolute=True):
log.debug("protocol.move({x},{y},{z})".format(x=x, y=y, z=z))
#: Wait some time before we get there
return async_sleep(0.1)
def set_pen(self, p):
log.debug("protocol.set_pen({p})".format(p=p))
def set_velocity(self, v):
log.debug("protocol.set_velocity({v})".format(v=v))
def set_force(self, f):
log.debug("protocol.set_force({f})".format(f=f))
def data_received(self, data):
log.debug("protocol.data_received({}".format(data))
def connection_lost(self):
log.debug("protocol.connection_lost()") | 29.147059 | 69 | 0.619576 | 824 | 0.831483 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.342079 |
d123bf2051577f1b1d94b482d255562a77a61f9a | 1,262 | py | Python | config/qtile/Managers/LayoutManager.py | dat-adi/Dotfiles | 7a541aba2bbdd88736bebc9e82f6921ab4a3e03b | [
"Apache-2.0"
] | 2 | 2021-05-06T15:58:29.000Z | 2021-10-02T14:12:08.000Z | config/qtile/Managers/LayoutManager.py | dat-adi/dotfiles | 7a541aba2bbdd88736bebc9e82f6921ab4a3e03b | [
"Apache-2.0"
] | null | null | null | config/qtile/Managers/LayoutManager.py | dat-adi/dotfiles | 7a541aba2bbdd88736bebc9e82f6921ab4a3e03b | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
from libqtile import layout
def get_layouts():
layout_theme = {
"border_width": 2,
"margin": 8,
"border_focus": "#F0F0F0",
"border_normal": "#1D233F",
}
layouts = [
# layout.Bsp(),
# layout.MonadWide(),
# layout.Tile(**layout_theme),
# layout.VerticalTile(),
# layout.Zoomy(),
# layout.Max(**layout_theme),
layout.Columns(**layout_theme),
layout.Stack(num_stacks=2, **layout_theme),
layout.Matrix(**layout_theme),
layout.RatioTile(**layout_theme),
layout.MonadTall(**layout_theme),
layout.TreeTab(
font="Source Code Pro",
fontsize=10,
sections=["FIRST", "SECOND", "THIRD", "FOURTH"],
section_fontsize=10,
border_width=2,
bg_color="1c1f24",
active_bg="2E7588",
active_fg="000000",
inactive_bg="a9a1e1",
inactive_fg="1c1f24",
padding_left=0,
padding_x=0,
padding_y=5,
section_top=10,
section_bottom=20,
level_shift=8,
vspace=3,
panel_width=200,
),
]
return layouts
| 26.291667 | 60 | 0.510301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.248811 |
d126cd392241e8011737fa6ded3a808ff6d9fb33 | 22,200 | py | Python | x.py | douboer/lianghua | ec55208e7aa5f9435ffe59ffa419a2acdc60eeb4 | [
"Apache-2.0"
] | null | null | null | x.py | douboer/lianghua | ec55208e7aa5f9435ffe59ffa419a2acdc60eeb4 | [
"Apache-2.0"
] | null | null | null | x.py | douboer/lianghua | ec55208e7aa5f9435ffe59ffa419a2acdc60eeb4 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf8 -*-
# version 1.11
import tkinter.messagebox,os
from tkinter import *
from tkinter.ttk import *
from tkinter import Menu
import datetime
import threading
import pickle
import time
import tushare as ts
import pywinauto
import pywinauto.clipboard
import pywinauto.application
NUM_OF_STOCKS = 5 # 自定义股票数量
is_start = False
is_monitor = True
set_stocks_info = []
actual_stocks_info = []
consignation_info = []
is_ordered = [1] * NUM_OF_STOCKS # 1:未下单 0:已下单
is_dealt = [0] * NUM_OF_STOCKS # 0: 未成交 负整数:卖出数量, 正整数:买入数量
stock_codes = [''] * NUM_OF_STOCKS
class OperationThs:
def __init__(self):
try:
self.__app = pywinauto.application.Application()
self.__app.connect(title='网上股票交易系统5.0')
top_hwnd = pywinauto.findwindows.find_window(title='网上股票交易系统5.0')
dialog_hwnd = pywinauto.findwindows.find_windows(top_level_only=False, class_name='#32770', parent=top_hwnd)[0]
wanted_hwnds = pywinauto.findwindows.find_windows(top_level_only=False, parent=dialog_hwnd)
print('wanted_hwnds length', len(wanted_hwnds))
if len(wanted_hwnds) not in (99,97,96,98,100,101):
tkinter.messagebox.showerror('错误', '无法获得“同花顺双向委托界面”的窗口句柄,请将同花顺交易系统切换到“双向委托界面”!')
exit()
self.__main_window = self.__app.window_(handle=top_hwnd)
self.__dialog_window = self.__app.window_(handle=dialog_hwnd)
except:
pass
def __buy(self, code, quantity):
"""买函数
:param code: 代码, 字符串
:param quantity: 数量, 字符串
"""
self.__dialog_window.Edit1.SetFocus()
time.sleep(0.2)
self.__dialog_window.Edit1.SetEditText(code)
time.sleep(0.2)
if quantity != '0':
self.__dialog_window.Edit3.SetEditText(quantity)
time.sleep(0.2)
self.__dialog_window.Button1.Click()
time.sleep(0.2)
def __sell(self, code, quantity):
"""
卖函数
:param code: 股票代码, 字符串
:param quantity: 数量, 字符串
"""
self.__dialog_window.Edit4.SetFocus()
time.sleep(0.2)
self.__dialog_window.Edit4.SetEditText(code)
time.sleep(0.2)
if quantity != '0':
self.__dialog_window.Edit6.SetEditText(quantity)
time.sleep(0.2)
self.__dialog_window.Button2.Click()
time.sleep(0.2)
def __closePopupWindow(self):
"""
关闭一个弹窗。
:return: 如果有弹出式对话框,返回True,否则返回False
"""
popup_hwnd = self.__main_window.PopupWindow()
if popup_hwnd:
popup_window = self.__app.window_(handle=popup_hwnd)
popup_window.SetFocus()
popup_window.Button.Click()
return True
return False
def __closePopupWindows(self):
"""
关闭多个弹出窗口
:return:
"""
while self.__closePopupWindow():
time.sleep(0.5)
def order(self, code, direction, quantity):
"""
下单函数
:param code: 股票代码, 字符串
:param direction: 买卖方向, 字符串
:param quantity: 买卖数量, 字符串
"""
if direction == 'B':
self.__buy(code, quantity)
if direction == 'S':
self.__sell(code, quantity)
self.__closePopupWindows()
def maxWindow(self):
"""
最大化窗口
"""
if self.__main_window.GetShowState() != 3:
self.__main_window.Maximize()
self.__main_window.SetFocus()
def minWindow(self):
"""
最小化窗体
"""
if self.__main_window.GetShowState() != 2:
self.__main_window.Minimize()
def refresh(self, t=0.5):
"""
点击刷新按钮
:param t:刷新后的等待时间
"""
self.__dialog_window.Button5.Click()
time.sleep(t)
def getMoney(self):
"""
获取可用资金
"""
return float(self.__dialog_window.Static19.WindowText())
@staticmethod
def __cleanClipboardData(data, cols=11):
"""
清洗剪贴板数据
:param data: 数据
:param cols: 列数
:return: 清洗后的数据,返回列表
"""
lst = data.strip().split()[:-1]
matrix = []
for i in range(0, len(lst) // cols):
matrix.append(lst[i * cols:(i + 1) * cols])
return matrix[1:]
def __copyToClipboard(self):
"""
拷贝持仓信息至剪贴板
:return:
"""
self.__dialog_window.CVirtualGridCtrl.RightClick(coords=(30, 30))
self.__main_window.TypeKeys('C')
def __getCleanedData(self):
"""
读取ListView中的信息
:return: 清洗后的数据
"""
self.__copyToClipboard()
data = pywinauto.clipboard.GetData()
return self.__cleanClipboardData(data)
def __selectWindow(self, choice):
"""
选择tab窗口信息
:param choice: 选择个标签页。持仓,撤单,委托,成交
:return:
"""
rect = self.__dialog_window.CCustomTabCtrl.ClientRect()
x = rect.width() // 8
y = rect.height() // 2
if choice == 'W':
x = x
elif choice == 'E':
x *= 3
elif choice == 'R':
x *= 5
elif choice == 'A':
x *= 7
self.__dialog_window.CCustomTabCtrl.ClickInput(coords=(x, y))
time.sleep(0.5)
def __getInfo(self, choice):
"""
获取股票信息
"""
self.__selectWindow(choice=choice)
return self.__getCleanedData()
def getPosition(self):
"""
获取持仓
:return:
"""
return self.__getInfo(choice='W')
@staticmethod
def getDeal(code, pre_position, cur_position):
"""
获取成交数量
:param code: 需检查的股票代码, 字符串
:param pre_position: 下单前的持仓
:param cur_position: 下单后的持仓
:return: 0-未成交, 正整数是买入的数量, 负整数是卖出的数量
"""
if pre_position == cur_position:
return 0
pre_len = len(pre_position)
cur_len = len(cur_position)
if pre_len == cur_len:
for row in range(cur_len):
if cur_position[row][0] == code:
return int(float(cur_position[row][1]) - float(pre_position[row][1]))
if cur_len > pre_len:
return int(float(cur_position[-1][1]))
def withdraw(self, code, direction):
"""
指定撤单
:param code: 股票代码
:param direction: 方向 B, S
:return:
"""
row_pos = []
info = self.__getInfo(choice='R')
if direction == 'B':
direction = '买入'
elif direction == 'S':
direction = '卖出'
if info:
for index, element in enumerate(info):
if element[0] == code:
if element[1] == direction:
row_pos.append(index)
if row_pos:
for row in row_pos:
self.__dialog_window.CVirtualGridCtrl.ClickInput(coords=(7, 28 + 16 * row))
self.__dialog_window.Button12.Click()
self.__closePopupWindows()
def withdrawBuy(self):
"""
撤买
:return:
"""
self.__selectWindow(choice='R')
self.__dialog_window.Button8.Click()
self.__closePopupWindows()
def withdrawSell(self):
"""
撤卖
:return:
"""
self.__selectWindow(choice='R')
self.__dialog_window.Button9.Click()
self.__closePopupWindows()
def withdrawAll(self):
"""
全撤
:return:
"""
self.__selectWindow(choice='R')
self.__dialog_window.Button7.Click()
self.__closePopupWindows()
def getStockData():
"""
获取股票实时数据
:return:股票实时数据
"""
global stock_codes
code_name_price = []
try:
df = ts.get_realtime_quotes(stock_codes)
df_len = len(df)
for stock_code in stock_codes:
is_found = False
for i in range(df_len):
actual_code = df['code'][i]
if stock_code == actual_code:
code_name_price.append((actual_code, df['name'][i], float(df['price'][i])))
is_found = True
break
if is_found is False:
code_name_price.append(('', '', 0))
except:
code_name_price = [('', '', 0)] * NUM_OF_STOCKS # 网络不行,返回空
return code_name_price
def monitor():
"""
实时监控函数
"""
global actual_stocks_info, consignation_info, is_ordered, is_dealt, set_stocks_info
count = 1
pre_position = []
try:
operation = OperationThs()
operation.maxWindow()
pre_position = operation.getPosition()
# print(pre_position)
while is_monitor:
if is_start:
actual_stocks_info = getStockData()
for row, (actual_code, actual_name, actual_price) in enumerate(actual_stocks_info):
if actual_code and is_start and is_ordered[row] == 1 and actual_price > 0 \
and set_stocks_info[row][1] and set_stocks_info[row][2] > 0 \
and set_stocks_info[row][3] and set_stocks_info[row][4] \
and datetime.datetime.now().time() > set_stocks_info[row][5]:
if (set_stocks_info[row][1] == '>' and actual_price > set_stocks_info[row][2]) or \
(set_stocks_info[row][1] == '<' and float(actual_price) < set_stocks_info[row][2]):
operation.maxWindow()
operation.order(actual_code, set_stocks_info[row][3], set_stocks_info[row][4])
dt = datetime.datetime.now()
is_ordered[row] = 0
operation.refresh()
cur_position = operation.getPosition()
is_dealt[row] = operation.getDeal(actual_code, pre_position, cur_position)
consignation_info.append(
(dt.strftime('%x'), dt.strftime('%X'), actual_code,
actual_name, set_stocks_info[row][3],
actual_price, set_stocks_info[row][4], '已委托', is_dealt[row]))
pre_position = cur_position
if count % 200 == 0:
operation.refresh()
time.sleep(3)
count += 1
except:
tkinter.messagebox.showerror('错误', '请先打开“同花顺双向委托界面”后在打开自动交易系统!')
sys.exit()
class StockGui:
global is_monitor
def __init__(self):
self.window = Tk()
self.window.title("自动化交易系统-同花顺")
# 左上角图标
self.window.iconbitmap('e:\ico.ico')
self.window.resizable(0, 0)
frame1 = Frame(self.window)
frame1.pack(padx=10, pady=10)
Label(frame1, text="股票代码", width=8, justify=CENTER).grid(
row=1, column=1, padx=5, pady=5)
Label(frame1, text="股票名称", width=8, justify=CENTER).grid(
row=1, column=2, padx=5, pady=5)
Label(frame1, text="实时价格", width=8, justify=CENTER).grid(
row=1, column=3, padx=5, pady=5)
Label(frame1, text="关系", width=4, justify=CENTER).grid(
row=1, column=4, padx=5, pady=5)
Label(frame1, text="设定价格", width=8, justify=CENTER).grid(
row=1, column=5, padx=5, pady=5)
Label(frame1, text="方向", width=4, justify=CENTER).grid(
row=1, column=6, padx=5, pady=5)
Label(frame1, text="数量", width=8, justify=CENTER).grid(
row=1, column=7, padx=5, pady=5)
Label(frame1, text="时间可选", width=8, justify=CENTER).grid(
row=1, column=8, padx=5, pady=5)
Label(frame1, text="委托", width=6, justify=CENTER).grid(
row=1, column=9, padx=5, pady=5)
Label(frame1, text="成交", width=6, justify=CENTER).grid(
row=1, column=10, padx=5, pady=5)
self.rows = NUM_OF_STOCKS
self.cols = 10
self.variable = []
for row in range(self.rows):
self.variable.append([])
for col in range(self.cols):
self.variable[row].append(StringVar())
for row in range(self.rows):
Entry(frame1, textvariable=self.variable[row][0],
width=8).grid(row=row + 2, column=1, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][1], state=DISABLED,
width=8).grid(row=row + 2, column=2, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][2], state=DISABLED, justify=RIGHT,
width=8).grid(row=row + 2, column=3, padx=5, pady=5)
Combobox(frame1, values=('<', '>'), textvariable=self.variable[row][3],
width=2).grid(row=row + 2, column=4, padx=5, pady=5)
Spinbox(frame1, from_=0, to=999, textvariable=self.variable[row][4], justify=RIGHT,
increment=0.01, width=6).grid(row=row + 2, column=5, padx=5, pady=5)
Combobox(frame1, values=('B', 'S'), textvariable=self.variable[row][5],
width=2).grid(row=row + 2, column=6, padx=5, pady=5)
Spinbox(frame1, from_=0, to=10000, textvariable=self.variable[row][6], justify=RIGHT,
increment=100, width=6).grid(row=row + 2, column=7, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][7],
width=8).grid(row=row + 2, column=8, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][8], state=DISABLED, justify=CENTER,
width=6).grid(row=row + 2, column=9, padx=5, pady=5)
Entry(frame1, textvariable=self.variable[row][9], state=DISABLED, justify=RIGHT,
width=6).grid(row=row + 2, column=10, padx=5, pady=5)
frame3 = Frame(self.window)
frame3.pack(padx=10, pady=10)
# 创建菜单功能
self.menuBar = Menu(self.window)
self.window.config(menu=self.menuBar)
# tearoff=0 代表将菜单项最上面的一条虚线去掉,默认是存在的
self.fileMenu = Menu(self.menuBar,tearoff=0)
# 创建一个名为“帮助”的菜单项
self.menuBar.add_cascade(label="帮助",menu=self.fileMenu)
# 在“帮助”项下添加一个名为“关于”的选项
self.fileMenu.add_command(label="关于",command =self.about)
# 增加一条横线
self.fileMenu.add_separator()
# 在“帮助”项下添加一个名为“退出”的选项,并绑定执行函数
self.fileMenu.add_command(label="退出",command=self.close)
# 增加第二个导航栏
# self.helpMenu = Menu(self.menuBar,tearoff=0)
# self.menuBar.add_cascade(label="Help", menu=self.helpMenu)
# self.helpMenu.add_command(label="About")
self.start_bt = Button(frame3, text="开始", command=self.start)
self.start_bt.pack(side=LEFT)
self.set_bt = Button(frame3, text='重置买卖', command=self.setFlags)
self.set_bt.pack(side=LEFT)
Button(frame3, text="历史记录", command=self.displayHisRecords).pack(side=LEFT)
Button(frame3, text='保存', command=self.save).pack(side=LEFT)
self.load_bt = Button(frame3, text='载入', command=self.load)
self.load_bt.pack(side=LEFT)
self.window.protocol(name="WM_DELETE_WINDOW", func=self.close)
self.window.after(100, self.updateControls)
self.window.mainloop()
def displayHisRecords(self):
"""
显示历史信息
"""
global consignation_info
tp = Toplevel()
tp.title('历史记录')
tp.iconbitmap('e:\ico.ico')
tp.resizable(0, 1)
scrollbar = Scrollbar(tp)
scrollbar.pack(side=RIGHT, fill=Y)
col_name = ['日期', '时间', '证券代码', '证券名称', '方向', '价格', '数量', '委托', '成交']
tree = Treeview(
tp, show='headings', columns=col_name, height=30, yscrollcommand=scrollbar.set)
tree.pack(expand=1, fill=Y)
scrollbar.config(command=tree.yview)
for name in col_name:
tree.heading(name, text=name)
tree.column(name, width=70, anchor=CENTER)
for msg in consignation_info:
tree.insert('', 0, values=msg)
def save(self):
"""
保存设置
"""
global set_stocks_info, consignation_info
self.getItems()
with open('stockInfo.dat', 'wb') as fp:
pickle.dump(set_stocks_info, fp)
pickle.dump(consignation_info, fp)
def load(self):
"""
载入设置
"""
global set_stocks_info, consignation_info
try:
with open('stockInfo.dat', 'rb') as fp:
set_stocks_info = pickle.load(fp)
consignation_info = pickle.load(fp)
for row in range(self.rows):
for col in range(self.cols):
if col == 0:
self.variable[row][col].set(set_stocks_info[row][0])
elif col == 3:
self.variable[row][col].set(set_stocks_info[row][1])
elif col == 4:
self.variable[row][col].set(set_stocks_info[row][2])
elif col == 5:
self.variable[row][col].set(set_stocks_info[row][3])
elif col == 6:
self.variable[row][col].set(set_stocks_info[row][4])
elif col == 7:
temp = set_stocks_info[row][5].strftime('%X')
if temp == '01:00:00':
self.variable[row][col].set('')
else:
self.variable[row][col].set(temp)
except Exception :
tkinter.messagebox.showerror('错误', "没有找到配置保存文件,请先进行股票买卖配置信息保存!")
def setFlags(self):
"""
重置买卖标志
"""
global is_start, is_ordered
if is_start is False:
is_ordered = [1] * NUM_OF_STOCKS
tkinter.messagebox.showinfo('重置成功', "重置成功!")
def updateControls(self):
"""
实时股票名称、价格、状态信息
"""
global actual_stocks_info, is_start
if is_start:
for row, (actual_code, actual_name, actual_price) in enumerate(actual_stocks_info):
if actual_code:
self.variable[row][1].set(actual_name)
self.variable[row][2].set(str(actual_price))
if is_ordered[row] == 1:
self.variable[row][8].set('监控中')
elif is_ordered[row] == 0:
self.variable[row][8].set('已委托')
self.variable[row][9].set(str(is_dealt[row]))
else:
self.variable[row][1].set('')
self.variable[row][2].set('')
self.variable[row][8].set('')
self.variable[row][9].set('')
self.window.after(3000, self.updateControls)
@staticmethod
def __pickCodeFromItems(items_info):
"""
提取股票代码
:param items_info: UI下各项输入信息
:return:股票代码列表
"""
stock_codes = []
for item in items_info:
stock_codes.append(item[0])
return stock_codes
def start(self):
"""
启动停止
"""
global is_start, stock_codes, set_stocks_info
if is_start is False:
is_start = True
else:
is_start = False
if is_start:
self.getItems()
stock_codes = self.__pickCodeFromItems(set_stocks_info)
self.start_bt['text'] = '停止'
self.set_bt['state'] = DISABLED
self.load_bt['state'] = DISABLED
tkinter.messagebox.showinfo('成功','启动成功!')
else:
self.start_bt['text'] = '开始'
self.set_bt['state'] = NORMAL
self.load_bt['state'] = NORMAL
def about(self):
tkinter.messagebox.showinfo("关于",'\r此系统仅适应于同花顺网上交易5.0,使用时请先登陆同花顺网上交易系统并切换到“同花顺双向委托界面”。\r 版本号:v 1.0.0 \r 作者:水域\r 发布日期:2017.01.11')
def close(self):
"""
关闭程序时,停止monitor线程
"""
global is_monitor
is_monitor = False
self.window.quit()
def getItems(self):
"""
获取UI上用户输入的各项数据,
"""
global set_stocks_info
set_stocks_info = []
# 获取买卖价格数量输入项等
for row in range(self.rows):
set_stocks_info.append([])
for col in range(self.cols):
temp = self.variable[row][col].get().strip()
if col == 0:
if len(temp) == 6 and temp.isdigit(): # 判断股票代码是否为6位数
set_stocks_info[row].append(temp)
else:
set_stocks_info[row].append('')
elif col == 3:
if temp in ('>', '<'):
set_stocks_info[row].append(temp)
else:
set_stocks_info[row].append('')
elif col == 4:
try:
price = float(temp)
if price > 0:
set_stocks_info[row].append(price) # 把价格转为数字
else:
set_stocks_info[row].append(0)
except ValueError:
set_stocks_info[row].append(0)
elif col == 5:
if temp in ('B', 'S'):
set_stocks_info[row].append(temp)
else:
set_stocks_info[row].append('')
elif col == 6:
if temp.isdigit() and int(temp) >= 0:
set_stocks_info[row].append(str(int(temp) // 100 * 100))
else:
set_stocks_info[row].append('')
elif col == 7:
try:
set_stocks_info[row].append(datetime.datetime.strptime(temp, '%H:%M:%S').time())
except ValueError:
set_stocks_info[row].append(datetime.datetime.strptime('1:00:00', '%H:%M:%S').time())
if __name__ == '__main__':
# StockGui()
t1 = threading.Thread(target=StockGui)
t1.start()
t2 = threading.Thread(target=monitor)
t2.start()
| 36.513158 | 137 | 0.532883 | 20,058 | 0.839247 | 0 | 0 | 1,506 | 0.063013 | 0 | 0 | 4,728 | 0.197824 |
d127426de54b0b22ee00ffd0de5d1aed5a26e875 | 2,519 | py | Python | torchbnn/functional.py | Harry24k/bayesian-neural-network-pytorch | d2272f09e0d08c1abe1f53ce6df56b31494d7020 | [
"MIT"
] | 178 | 2019-12-08T14:46:56.000Z | 2022-03-23T04:12:35.000Z | torchbnn/functional.py | Harry24k/bayesian-neural-network-pytorch | d2272f09e0d08c1abe1f53ce6df56b31494d7020 | [
"MIT"
] | 8 | 2019-11-07T05:45:37.000Z | 2020-12-07T11:07:05.000Z | torchbnn/functional.py | Harry24k/bayesian-neural-network-pytorch | d2272f09e0d08c1abe1f53ce6df56b31494d7020 | [
"MIT"
] | 24 | 2020-02-04T12:32:33.000Z | 2022-03-18T13:13:08.000Z | import math
import torch
from .modules import *
def _kl_loss(mu_0, log_sigma_0, mu_1, log_sigma_1) :
"""
An method for calculating KL divergence between two Normal distribtuion.
Arguments:
mu_0 (Float) : mean of normal distribution.
log_sigma_0 (Float): log(standard deviation of normal distribution).
mu_1 (Float): mean of normal distribution.
log_sigma_1 (Float): log(standard deviation of normal distribution).
"""
kl = log_sigma_1 - log_sigma_0 + \
(torch.exp(log_sigma_0)**2 + (mu_0-mu_1)**2)/(2*math.exp(log_sigma_1)**2) - 0.5
return kl.sum()
def bayesian_kl_loss(model, reduction='mean', last_layer_only=False) :
"""
An method for calculating KL divergence of whole layers in the model.
Arguments:
model (nn.Module): a model to be calculated for KL-divergence.
reduction (string, optional): Specifies the reduction to apply to the output:
``'mean'``: the sum of the output will be divided by the number of
elements of the output.
``'sum'``: the output will be summed.
last_layer_only (Bool): True for return only the last layer's KL divergence.
"""
device = torch.device("cuda" if next(model.parameters()).is_cuda else "cpu")
kl = torch.Tensor([0]).to(device)
kl_sum = torch.Tensor([0]).to(device)
n = torch.Tensor([0]).to(device)
for m in model.modules() :
if isinstance(m, (BayesLinear, BayesConv2d)):
kl = _kl_loss(m.weight_mu, m.weight_log_sigma, m.prior_mu, m.prior_log_sigma)
kl_sum += kl
n += len(m.weight_mu.view(-1))
if m.bias :
kl = _kl_loss(m.bias_mu, m.bias_log_sigma, m.prior_mu, m.prior_log_sigma)
kl_sum += kl
n += len(m.bias_mu.view(-1))
if isinstance(m, BayesBatchNorm2d):
if m.affine :
kl = _kl_loss(m.weight_mu, m.weight_log_sigma, m.prior_mu, m.prior_log_sigma)
kl_sum += kl
n += len(m.weight_mu.view(-1))
kl = _kl_loss(m.bias_mu, m.bias_log_sigma, m.prior_mu, m.prior_log_sigma)
kl_sum += kl
n += len(m.bias_mu.view(-1))
if last_layer_only or n == 0 :
return kl
if reduction == 'mean' :
return kl_sum/n
elif reduction == 'sum' :
return kl_sum
else :
raise ValueError(reduction + " is not valid")
| 34.986111 | 93 | 0.590711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 930 | 0.369194 |
d12838004d4065065c15278b26ac7643b7d1e6b3 | 8,553 | py | Python | tests/settings/test_custom_metrics.py | proknow/proknow-python | c4ca0be6f606db655b711d3490febdec9c139570 | [
"MIT"
] | 2 | 2019-03-16T21:41:45.000Z | 2022-02-09T16:01:58.000Z | tests/settings/test_custom_metrics.py | proknow/proknow-python | c4ca0be6f606db655b711d3490febdec9c139570 | [
"MIT"
] | 7 | 2019-02-25T15:04:30.000Z | 2021-12-13T15:15:38.000Z | tests/settings/test_custom_metrics.py | proknow/proknow-python | c4ca0be6f606db655b711d3490febdec9c139570 | [
"MIT"
] | 3 | 2020-07-10T14:18:55.000Z | 2021-09-14T09:47:41.000Z | import pytest
import re
from proknow import Exceptions
def test_create(app, custom_metric_generator):
pk = app.pk
# Verify returned CustomMetricItem
params, custom_metric = custom_metric_generator()
assert custom_metric.name == params["name"]
assert custom_metric.context == params["context"]
assert custom_metric.type == params["type"]
# Assert item can be found in query
custom_metrics = pk.custom_metrics.query()
for custom_metric in custom_metrics:
if custom_metric.name == params["name"]:
custom_metric_match = custom_metric
break
else:
custom_metric_match = None
assert custom_metric_match is not None
assert custom_metric_match.name == params["name"]
assert custom_metric_match.context == params["context"]
assert custom_metric_match.type == params["type"]
def test_create_failure(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator()
# Assert error is raised for duplicate custom metric
with pytest.raises(Exceptions.HttpError) as err_wrapper:
pk.custom_metrics.create(**params)
assert err_wrapper.value.status_code == 409
assert err_wrapper.value.body == 'Custom metric already exists with name "' + params["name"] + '"'
def test_delete(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator(do_not_mark=True)
# Verify custom metric was deleted successfully
custom_metric.delete()
for custom_metric in pk.custom_metrics.query():
if custom_metric.name == params["name"]:
match = custom_metric
break
else:
match = None
assert match is None
def test_delete_failure(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator(do_not_mark=True)
custom_metric.delete()
# Assert error is raised when attempting to delete protected custom metric
with pytest.raises(Exceptions.HttpError) as err_wrapper:
custom_metric.delete()
assert err_wrapper.value.status_code == 404
assert err_wrapper.value.body == 'Custom metric "' + custom_metric.id + '" not found'
def test_find(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator(name="Find Me")
expr = re.compile(r"ind M")
# Find with no args
found = pk.custom_metrics.find()
assert found is None
# Find using predicate
found = pk.custom_metrics.find(lambda ws: expr.search(ws.data["name"]) is not None)
assert found is not None
assert found.name == params["name"]
assert found.context == params["context"]
assert found.type == params["type"]
# Find using props
found = pk.custom_metrics.find(id=custom_metric.id, name=params["name"])
assert found is not None
assert found.name == params["name"]
assert found.context == params["context"]
assert found.type == params["type"]
# Find using both
found = pk.custom_metrics.find(lambda ws: expr.search(ws.data["name"]) is not None, id=custom_metric.id, name=params["name"])
assert found is not None
assert found.name == params["name"]
assert found.context == params["context"]
assert found.type == params["type"]
# Find failure
found = pk.custom_metrics.find(lambda ws: expr.search(ws.data["id"]) is not None)
assert found is None
found = pk.custom_metrics.find(id=custom_metric.id, name=params["name"].lower())
assert found is None
def test_query(app, custom_metric_generator):
pk = app.pk
params1, custom_metric1 = custom_metric_generator()
params2, custom_metric2 = custom_metric_generator()
# Verify test 1
for custom_metric in pk.custom_metrics.query():
if custom_metric.name == params1["name"]:
match = custom_metric
break
else:
match = None
assert match is not None
assert match.name == params1["name"]
assert match.context == params1["context"]
assert match.type == params1["type"]
# Verify test 2
for custom_metric in pk.custom_metrics.query():
if custom_metric.name == params2["name"]:
match = custom_metric
break
else:
match = None
assert match is not None
assert match.name == params2["name"]
assert match.context == params2["context"]
assert match.type == params2["type"]
def test_resolve(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator()
# Test resolve by id
resolved = pk.custom_metrics.resolve(custom_metric.id)
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
# Test resolve by name
resolved = pk.custom_metrics.resolve(params["name"])
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
def test_resolve_failure(app):
pk = app.pk
# Test resolve by id
with pytest.raises(Exceptions.CustomMetricLookupError) as err_wrapper:
pk.custom_metrics.resolve("00000000000000000000000000000000")
assert err_wrapper.value.message == "Custom metric with id `00000000000000000000000000000000` not found."
# Test resolve by name
with pytest.raises(Exceptions.CustomMetricLookupError) as err_wrapper:
pk.custom_metrics.resolve("My Metric")
assert err_wrapper.value.message == "Custom metric with name `My Metric` not found."
def test_resolve_by_id(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator()
resolved = pk.custom_metrics.resolve_by_id(custom_metric.id)
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
def test_resolve_by_id_failure(app):
pk = app.pk
with pytest.raises(Exceptions.CustomMetricLookupError) as err_wrapper:
pk.custom_metrics.resolve_by_id("00000000000000000000000000000000")
assert err_wrapper.value.message == "Custom metric with id `00000000000000000000000000000000` not found."
def test_resolve_by_name(app, custom_metric_generator):
pk = app.pk
params, custom_metric = custom_metric_generator(name="custom-lower1")
resolved = pk.custom_metrics.resolve_by_name(params["name"])
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
resolved = pk.custom_metrics.resolve_by_name(params["name"].upper())
assert resolved is not None
assert resolved.name == params["name"]
assert resolved.context == params["context"]
assert resolved.type == params["type"]
def test_resolve_by_name_failure(app):
pk = app.pk
with pytest.raises(Exceptions.CustomMetricLookupError) as err_wrapper:
pk.custom_metrics.resolve("My Custom Metric")
assert err_wrapper.value.message == "Custom metric with name `My Custom Metric` not found."
def test_update(app, custom_metric_generator):
pk = app.pk
resource_prefix = app.resource_prefix
params, custom_metric = custom_metric_generator()
# Verify custom metric was updated successfully
updated_name = resource_prefix + "Updated Custom Metric Name"
custom_metric.name = updated_name
custom_metric.context = "image_set"
custom_metric.save()
custom_metrics = pk.custom_metrics.query()
for custom_metric in custom_metrics:
if custom_metric.name == updated_name:
custom_metric_match = custom_metric
break
else:
custom_metric_match = None
assert custom_metric_match is not None
assert custom_metric_match.name == updated_name
assert custom_metric_match.context == "image_set"
assert custom_metric_match.type == params["type"]
def test_update_failure(app, custom_metric_generator):
pk = app.pk
params1, _ = custom_metric_generator()
params2, custom_metric = custom_metric_generator()
# Assert error is raised for duplicate workspace
with pytest.raises(Exceptions.HttpError) as err_wrapper:
custom_metric.name = params1["name"]
custom_metric.save()
assert err_wrapper.value.status_code == 409
assert err_wrapper.value.body == 'Custom metric already exists with name "' + params1["name"] + '"'
| 35.342975 | 129 | 0.70677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,433 | 0.167544 |
d12936ab2356f3f48183dc8e8ae9b3d0e4578ebf | 10,086 | py | Python | models/script/attention.py | junkunyuan/CSAC | 70d918ed2fe65a0a503b56d66136032031cd67e4 | [
"MIT"
] | 3 | 2022-01-06T06:42:12.000Z | 2022-01-20T04:00:40.000Z | models/script/attention.py | junkunyuan/CSAC | 70d918ed2fe65a0a503b56d66136032031cd67e4 | [
"MIT"
] | null | null | null | models/script/attention.py | junkunyuan/CSAC | 70d918ed2fe65a0a503b56d66136032031cd67e4 | [
"MIT"
] | null | null | null | import torch
from torch import dtype, nn
import torch.nn.functional as F
class PAM_Module(nn.Module):
def __init__(self, num, sizes,mode=None):
super(PAM_Module, self).__init__()
self.sizes = sizes
self.mode = mode
for i in range(num):
setattr(self, "query" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
setattr(self, "value" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
setattr(self, "key" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
def forward(self, feat_sources, feat_targets):
"""calculate the attention weight and alpha"""
ret_feats, ret_alphas = [], []
for i, query in enumerate(feat_targets):
Bt, Ct, Ht, Wt = query.size()
pro_query = getattr(self, "query"+str(i)
)(query).view(Bt, -1, Ht*Wt).permute(0, 2, 1)
attentions, means = [], []
for j, key in enumerate(feat_sources):
pro_key = getattr(self, "key" + str(j))(key).view(Bt, -1, Ht * Wt)
energy = torch.bmm(pro_query, pro_key)
means.append(energy.mean().item())
attentions.append(torch.softmax(energy, dim=-1))
if self.mode.find('alpha')>=0:
ret_alphas.append(torch.softmax(torch.tensor(means), dim=0))
else:
ret_alphas.append(torch.tensor(means).mean())
if self.mode in ['all', 'pam', 'cam', 'alpha_cam', 'alpha_cam', 'alpha_all']:
attention = torch.stack(attentions, dim=0).sum(0)
value = getattr(self, "value" + str(i))(query).view(Bt, -1, Ht * Wt)
out = torch.bmm(value, attention.permute(0, 2, 1)).view(Bt, Ct, Ht, Wt)
ret_feats.append(out)
if self.mode.find('alpha') >= 0:
ret_alphas = torch.stack(ret_alphas, dim=0)
else:
ret_alphas = torch.softmax(torch.tensor(ret_alphas), dim=0)
return ret_feats, ret_alphas
class CAM_Module(nn.Module):
def __init__(self, num, sizes, mode=None):
super(CAM_Module, self).__init__()
self.sizes = sizes
self.mode = mode
for i in range(num):
setattr(self, "value" + str(i),
nn.Conv2d(in_channels=sizes[1], out_channels=sizes[1], kernel_size=1))
def forward(self, feat_sources, feat_targets):
ret_feats, ret_alphas = [], []
for i, query in enumerate(feat_targets):
Bt, Ct, Ht, Wt = query.size()
pro_query = query.view(Bt, Ct, -1)
attentions, means = [], []
for j, key in enumerate(feat_sources):
pro_key = key.view(Bt, Ct, -1).permute(0, 2, 1)
energy = torch.bmm(pro_query, pro_key)
means.append(energy.mean().item())
attentions.append(torch.softmax(energy, dim=-1))
if self.mode.find('alpha') >= 0:
ret_alphas.append(torch.softmax(torch.tensor(means), dim=0))
else:
ret_alphas.append(torch.tensor(means).mean())
if self.mode in ['all', 'pam', 'cam', 'alpha_cam', 'alpha_cam', 'alpha_all']:
attention = torch.stack(attentions, dim=0).sum(0)
value = getattr(self, "value"+str(i))(query).view(Bt, Ct, -1)
out = torch.bmm(attention, value).view(Bt, Ct, Ht, Wt)
ret_feats.append(out)
if self.mode.find('alpha') >= 0:
ret_alphas = torch.stack(ret_alphas, dim=0)
else:
ret_alphas = torch.softmax(torch.tensor(ret_alphas), dim=0)
return ret_feats, ret_alphas
class ConvReg(nn.Module):
def __init__(self, s_shape, t_shape, factor=1):
super(ConvReg, self).__init__()
s_N, s_C, s_H, s_W = s_shape
t_N, t_C, t_H, t_W = t_shape
if s_H == 2 * t_H:
self.conv = nn.Conv2d(
s_C, t_C // factor, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(
s_C, t_C // factor, kernel_size=4, stride=2, padding=1)
elif s_H >= t_H:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=(1 + s_H - t_H, 1 + s_W - t_W))
else:
raise NotImplemented(
'student size {}, teacher size {}'.format(s_H, t_H))
def forward(self, x):
x = self.conv(x)
return x
class Fit(nn.Module):
def __init__(self, s_shape, t_shape, factor=1):
super(Fit, self).__init__()
_, s_C, s_H, s_W = s_shape
_, t_C, t_H, t_W = t_shape
if s_H == 2*t_H:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(
s_C, t_C//factor, kernel_size=4, stride=2, padding=1)
elif s_H == t_H:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=1, stride=1, padding=0)
else:
self.conv = nn.Conv2d(
s_C, t_C//factor, kernel_size=(1+s_H-t_H, 1 + s_W-t_W))
# if channels:
# self.conv = nn.Conv2d(s_C,channels,kernel_size=(1+s_H-t_H, 1+s_W-t_W))
# else:
# self.conv = nn.Conv2d(s_C,t_C//factor,kernel_size=(1+s_H-t_H, 1+s_W-t
def forward(self, x):
x = self.conv(x)
return x
# torch.Size([16, 128, 28, 28]) torch.Size([16, 256, 14, 14]) torch.Size([16, 512, 7, 7])
class Project(nn.Module):
def __init__(self, origin_sizes, new_size=torch.Size([-1, 16, 14, 14]), factor=1):
super(Project, self).__init__()
for i, size_o in enumerate(origin_sizes):
setattr(self, "target"+str(i),
Fit(size_o, new_size, factor=factor))
setattr(self, "source"+str(i),
Fit(size_o, new_size, factor=factor))
def forward(self, feat_sources, feat_targets):
new_feat_sources, new_feat_targets = [], []
for i, source in enumerate(feat_sources):
new_feat_sources.append(getattr(self, "source" + str(i))(source))
for i, target in enumerate(feat_targets):
new_feat_targets.append(getattr(self, "target" + str(i))(target))
return new_feat_sources, new_feat_targets
class DAAttention(nn.Module):
def __init__(self, origin_sizes, new_size=torch.Size([-1, 32, 7, 7]), factor=1, mode="all"):
super(DAAttention, self).__init__()
self.pro = Project(origin_sizes, new_size=new_size, factor=factor)
self.mode = mode
self.layer_num = len(origin_sizes)
if mode in ['all', 'alpha', 'pam', 'alpha_pam', 'alpha_all']:
self.pam = PAM_Module(self.layer_num, new_size, self.mode)
if mode in ['all', 'alpha', 'cam', 'alpha_cam', 'alpha_all']:
self.cam = CAM_Module(self.layer_num, new_size, self.mode)
self.C = new_size[1]
self.H = new_size[2]
self.W = new_size[3]
def forward(self, feat_sources, feat_targets):
new_feat_sources, new_feat_targets = self.pro(
feat_sources, feat_targets)
if self.mode in ['pam', 'all', 'alpha', 'alpha_pam', 'alpha_all']:
feat_pam, alpha_pam = self.pam(new_feat_sources, new_feat_targets)
if self.mode in ['cam', 'all', 'alpha', 'alpha_cam', 'alpha_all']:
feat_cam, alpha_cam = self.cam(new_feat_sources, new_feat_targets)
ret_alpha = None
ret_targets, ret_sources = [], []
for i in range(self.layer_num):
if self.mode in ['all', 'alpha_all']:
ret_targets.append(((feat_pam[i] + feat_cam[i]) * 0.5).view(-1, self.C * self.H * self.W))
ret_alpha = (alpha_cam+alpha_pam) * 0.5
elif self.mode == 'cam':
ret_targets.append(feat_cam[i].view(-1, self.C * self.H * self.W))
ret_alpha = alpha_cam
elif self.mode == 'pam':
ret_targets.append(feat_pam[i].view(-1, self.C * self.H * self.W))
ret_alpha = alpha_pam
elif self.mode in ['alpha', 'alpha_pam', 'alpha_cam']:
if self.mode == 'alpha':ret_alpha = (alpha_pam + alpha_cam) * 0.5
elif self.mode == 'alpha_cam': ret_alpha = alpha_cam
elif self.mode == 'alpha_pam': ret_alpha = alpha_pam
elif self.mode[:3] == 'noa':
ret_targets.append(new_feat_targets[i].view(-1, self.C * self.H * self.W))
ret_sources.append(new_feat_sources[i].view(-1, self.C * self.H * self.W))
return ret_sources, ret_alpha, ret_targets
if __name__ == '__main__':
# feat_source1 = torch.rand((16,512,28,28))
# feat_source2 = torch.rand((16,1024,14,14))
# feat_source3 = torch.rand((16,2048,7,7))
# feat_target1 = torch.rand((16, 512, 28, 28))
# feat_target2 = torch.rand((16, 1024, 14, 14))
# feat_target3 = torch.rand((16, 2048, 7, 7))
# att = DAAttention([feat_source1.size(),feat_source2.size(),feat_source3.size()])
# out,alpha = att([feat_source1,feat_source2,feat_source3],[feat_target1,feat_target2,feat_target3])
# print(out[0].size(),alpha.size())
# print(out[1].size(),alpha.size())
# print(out[2].size(),alpha.size())
# import sys
# sys.path.append('../..')
# sys.path.append('..')
# from models.fullnet import FLDGFullNet
# from models.backbone import resnet18
# backbone = resnet18()
# net = FLDGFullNet(backbone, 7)
# data = torch.rand((16, 3, 224, 224))
# a, b, c, d, e = net(data)
# print(c.size(), d.size(), e.size())
# torch.Size([16, 128, 28, 28]) torch.Size([16, 256, 14, 14]) torch.Size([16, 512, 7, 7])
import torch
a = torch.rand(3, 3)
print(a, a[0, 0].item())
| 41.506173 | 106 | 0.559984 | 8,751 | 0.867638 | 0 | 0 | 0 | 0 | 0 | 0 | 1,748 | 0.17331 |
d129876e7873140e41030905edf7719f9275b25b | 430 | py | Python | src/bpmn_python/graph/classes/events/start_event_type.py | ToJestKrzysio/ProcessVisualization | 9a359a31816bf1be65e3684a571509e3a2c2c0ac | [
"MIT"
] | null | null | null | src/bpmn_python/graph/classes/events/start_event_type.py | ToJestKrzysio/ProcessVisualization | 9a359a31816bf1be65e3684a571509e3a2c2c0ac | [
"MIT"
] | null | null | null | src/bpmn_python/graph/classes/events/start_event_type.py | ToJestKrzysio/ProcessVisualization | 9a359a31816bf1be65e3684a571509e3a2c2c0ac | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Class used for representing tStartEvent of BPMN 2.0 graph
"""
import graph.classes.events.catch_event_type as catch_event
class StartEvent(catch_event.CatchEvent):
"""
Class used for representing tStartEvent of BPMN 2.0 graph
"""
def __init__(self):
"""
Default constructor, initializes object fields with new instances.
"""
super(StartEvent, self).__init__()
| 23.888889 | 74 | 0.683721 | 286 | 0.665116 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.562791 |
d12a04518d215e03a7f4e83338618949672d5216 | 389 | py | Python | euler/p001.py | 2Cubed/ProjectEuler | 1702fbc607816544c28a8f2895a82d234226e48b | [
"MIT"
] | 1 | 2016-06-02T11:25:04.000Z | 2016-06-02T11:25:04.000Z | euler/p001.py | 2Cubed/ProjectEuler | 1702fbc607816544c28a8f2895a82d234226e48b | [
"MIT"
] | null | null | null | euler/p001.py | 2Cubed/ProjectEuler | 1702fbc607816544c28a8f2895a82d234226e48b | [
"MIT"
] | null | null | null | """Solution to Project Euler Problem 1
https://projecteuler.net/problem=1
"""
NUMBERS = 3, 5
MAXIMUM = 1000
def compute(*numbers, maximum=MAXIMUM):
"""Compute the sum of the multiples of `numbers` below `maximum`."""
if not numbers:
numbers = NUMBERS
multiples = tuple(set(range(0, maximum, number)) for number in numbers)
return sum(set().union(*multiples))
| 21.611111 | 75 | 0.676093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.372751 |
d12a4a231179a8246d9be0624c4f9ed8ed7b90e3 | 690 | py | Python | scripts/write_kepler_format.py | 0bLondon/VizFinal | 316240e12fc04b269274b53a3bd0a3412886dccf | [
"MIT"
] | null | null | null | scripts/write_kepler_format.py | 0bLondon/VizFinal | 316240e12fc04b269274b53a3bd0a3412886dccf | [
"MIT"
] | null | null | null | scripts/write_kepler_format.py | 0bLondon/VizFinal | 316240e12fc04b269274b53a3bd0a3412886dccf | [
"MIT"
] | 1 | 2021-01-05T21:40:03.000Z | 2021-01-05T21:40:03.000Z | import csv
input_file = 'output.csv'
output_file = 'kepler.txt'
cols_to_remove = [9]
cols_to_remove = sorted(cols_to_remove, reverse=True)
row_count = 0
with open(input_file, "r") as source:
reader = csv.reader(source)
with open(output_file, "w", newline='') as result:
for row in reader:
result.write("[\n")
for i in range(len(row)):
if i is len(row)-1:
break
if i in [0, 2, 3, 5, 6]:
result.write("\t\t'{}',\n".format(row[i].replace("'", '')))
else:
result.write("\t\t{},\n".format(row[i].replace("'", '')))
result.write("\t\t{}\n".format(row[-1]))
result.write("],\n")
| 28.75 | 70 | 0.54058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.126087 |
d12a7191b4c49b6eb3dffbf58c4bda9e9deb59fa | 682 | py | Python | src/python/WMCore/WMBS/MySQL/Locations/ListSites.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | 1 | 2015-02-05T13:43:46.000Z | 2015-02-05T13:43:46.000Z | src/python/WMCore/WMBS/MySQL/Locations/ListSites.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | 1 | 2016-10-13T14:57:35.000Z | 2016-10-13T14:57:35.000Z | src/python/WMCore/WMBS/MySQL/Locations/ListSites.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
_ListSites_
MySQL implementation of Locations.ListSites
"""
__all__ = []
from WMCore.Database.DBFormatter import DBFormatter
import logging
class ListSites(DBFormatter):
sql = "SELECT site_name FROM wmbs_location"
def format(self, results):
if len(results) == 0:
return False
else:
format = []
for i in results[0].fetchall():
format.append(i.values()[0])
return format
def execute(self, conn = None, transaction = False):
results = self.dbi.processData(self.sql, {}, conn = conn, transaction = transaction)
return self.format(results)
| 19.485714 | 92 | 0.618768 | 508 | 0.744868 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.178886 |
d12cbf73d9dbe8b05ab050d9a56ee79d0f5da6e7 | 1,158 | py | Python | accountant.py | MKTSTK/Runover | 95242345e6a472f7741eba13885fa7b850c79d13 | [
"BSD-3-Clause"
] | 15 | 2015-08-07T19:27:32.000Z | 2019-05-24T03:23:01.000Z | accountant.py | webclinic017/Runover | 95242345e6a472f7741eba13885fa7b850c79d13 | [
"BSD-3-Clause"
] | 1 | 2015-08-08T16:07:00.000Z | 2015-08-08T16:07:00.000Z | accountant.py | webclinic017/Runover | 95242345e6a472f7741eba13885fa7b850c79d13 | [
"BSD-3-Clause"
] | 8 | 2015-08-08T00:38:40.000Z | 2021-11-11T11:32:09.000Z | from inside_market import *
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# the accountant class can do neat things like
#
# 1) Tally up the total pnl of your trade
# 2) plot equity curves
# 3) other neat stuff down the road, probably
class accountant():
def __init__(self, min_tick, tick_value):
self._trades = []
self._min_tick = min_tick
self._tick_value = tick_value
def push_trades(self, new_trades):
self._trades.extend(new_trades)
def get_final_closed_pnl(self):
# calculates the total pnl of all trades, assuming you are flat
position = 0.0
for trade in self._trades:
if trade[0] == BID:
position -= trade[1]
else:
position += trade[1]
return (position / self._min_tick) * self._tick_value
def get_final_open_pnl(self, mark_price):
pos = 0
position = 0.0
for trade in self._trades:
if trade[0] == BID:
position -= trade[1]
pos += 1
else:
position += trade[1]
pos -= 1
margin = -(pos * -mark_price) + position
return (margin / self._min_tick) * self._tick_value
| 25.733333 | 67 | 0.654577 | 872 | 0.753022 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.189119 |
d12f9186081828c736de8dcb09176bf8a7fdf2c8 | 584 | py | Python | src/python/test.py | jdrprod/miniBlock | c233dae5380a851e85d78d297e560833b81cf6b8 | [
"MIT"
] | null | null | null | src/python/test.py | jdrprod/miniBlock | c233dae5380a851e85d78d297e560833b81cf6b8 | [
"MIT"
] | null | null | null | src/python/test.py | jdrprod/miniBlock | c233dae5380a851e85d78d297e560833b81cf6b8 | [
"MIT"
] | null | null | null | from cheater import *
from main import *
# new Chain instance with
# mining difficulty = 4
c = Chain(4)
c.createGenesis()
# simulate transactions
c.addBlock(Block("3$ to Arthur"))
c.addBlock(Block("5$ to Bob"))
c.addBlock(Block("12$ to Jean"))
c.addBlock(Block("7$ to Jake"))
c.addBlock(Block("2$ to Camille"))
c.addBlock(Block("13$ to Marth"))
c.addBlock(Block("9$ to Felix"))
# chech chain validity
c.isChainValid()
# fake transaction
cheat(c, 1, "6 to jean")
# check chain validity
c.isChainValid()
# print all blocks
c.printChain()
print("len", len(c.blocks[0].hash) + 15)
| 18.83871 | 40 | 0.693493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.443493 |
d1308da11d5da22dc10b14287bdad38de1760631 | 1,466 | py | Python | Python3/537.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 854 | 2018-11-09T08:06:16.000Z | 2022-03-31T06:05:53.000Z | Python3/537.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 29 | 2019-06-02T05:02:25.000Z | 2021-11-15T04:09:37.000Z | Python3/537.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 347 | 2018-12-23T01:57:37.000Z | 2022-03-12T14:51:21.000Z | __________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def complexNumberMultiply(self, a: str, b: str) -> str:
A = [int(x) for x in a.replace('i','').split('+')]
B = [int(x) for x in b.replace('i','').split('+')]
return str(A[0]*B[0]-A[1]*B[1])+'+'+str(A[0]*B[1]+A[1]*B[0])+'i'
__________________________________________________________________________________________________
sample 13124 kb submission
class Solution:
def getrc(self, strs):
val = ''
r, c = 0, 0
positive = True
for char in strs:
if char == '-':
positive = False
elif char != '+' and char != 'i':
val += char
else:
val = int(val)
if not positive: val = -val
if char == '+': r = val
else: c = val
val = ''
positive = True
return (r, c)
def complexNumberMultiply(self, a: str, b: str) -> str:
ra, ca = self.getrc(a)
rb, cb = self.getrc(b)
r = ra*rb-ca*cb
c = ra*cb+rb*ca
if r >= 0: r = str(r)
else: r = '-' + str(-r)
if c >= 0: c = str(c)
else: c = '-' + str(-c)
return r + '+' + c + 'i'
__________________________________________________________________________________________________
| 36.65 | 98 | 0.527967 | 1,116 | 0.761255 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.034106 |
d131ead143f7ae14b44aa50e156995d7274d1c57 | 3,991 | py | Python | mindwavemobile/MindwaveMobileRawReader.py | martinezmizael/Escribir-con-la-mente | f93456bc2ff817cf0ae808a0f711168f82e142ff | [
"MIT"
] | null | null | null | mindwavemobile/MindwaveMobileRawReader.py | martinezmizael/Escribir-con-la-mente | f93456bc2ff817cf0ae808a0f711168f82e142ff | [
"MIT"
] | null | null | null | mindwavemobile/MindwaveMobileRawReader.py | martinezmizael/Escribir-con-la-mente | f93456bc2ff817cf0ae808a0f711168f82e142ff | [
"MIT"
] | null | null | null | import bluetooth
import time
import textwrap
class MindwaveMobileRawReader:
START_OF_PACKET_BYTE = 0xaa;
def __init__(self, address=None):
self._buffer = [];
self._bufferPosition = 0;
self._isConnected = False;
self._mindwaveMobileAddress = address
def connectToMindWaveMobile(self):
# First discover mindwave mobile address, then connect.
# Headset address of my headset was'9C:B7:0D:72:CD:02';
# not sure if it really can be different?
# now discovering address because of https://github.com/robintibor/python-mindwave-mobile/issues/4
if (self._mindwaveMobileAddress is None):
self._mindwaveMobileAddress = self._findMindwaveMobileAddress()
if (self._mindwaveMobileAddress is not None):
print ("Discovered Mindwave Mobile...")
self._connectToAddress(self._mindwaveMobileAddress)
else:
self._printErrorDiscoveryMessage()
def _findMindwaveMobileAddress(self):
nearby_devices = bluetooth.discover_devices(lookup_names = True)
for address, name in nearby_devices:
if (name == "MindWave Mobile"):
return address
return None
def _connectToAddress(self, mindwaveMobileAddress):
self.mindwaveMobileSocket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
while (not self._isConnected):
try:
self.mindwaveMobileSocket.connect(
(mindwaveMobileAddress, 1))
self._isConnected = True
except bluetooth.btcommon.BluetoothError as error:
print "Could not connect: ", error, "; Retrying in 5s..."
time.sleep(5)
def isConnected(self):
return self._isConnected
def _printErrorDiscoveryMessage(self):
print(textwrap.dedent("""\
Could not discover Mindwave Mobile. Please make sure the
Mindwave Mobile device is in pairing mode and your computer
has bluetooth enabled.""").replace("\n", " "))
def _readMoreBytesIntoBuffer(self, amountOfBytes):
newBytes = self._readBytesFromMindwaveMobile(amountOfBytes)
self._buffer += newBytes
def _readBytesFromMindwaveMobile(self, amountOfBytes):
missingBytes = amountOfBytes
receivedBytes = ""
# Sometimes the socket will not send all the requested bytes
# on the first request, therefore a loop is necessary...
while(missingBytes > 0):
receivedBytes += self.mindwaveMobileSocket.recv(missingBytes)
missingBytes = amountOfBytes - len(receivedBytes)
return receivedBytes;
def peekByte(self):
self._ensureMoreBytesCanBeRead();
return ord(self._buffer[self._bufferPosition])
def getByte(self):
self._ensureMoreBytesCanBeRead(100);
return self._getNextByte();
def _ensureMoreBytesCanBeRead(self, amountOfBytes):
if (self._bufferSize() <= self._bufferPosition + amountOfBytes):
self._readMoreBytesIntoBuffer(amountOfBytes)
def _getNextByte(self):
nextByte = ord(self._buffer[self._bufferPosition]);
self._bufferPosition += 1;
return nextByte;
def getBytes(self, amountOfBytes):
self._ensureMoreBytesCanBeRead(amountOfBytes);
return self._getNextBytes(amountOfBytes);
def _getNextBytes(self, amountOfBytes):
nextBytes = map(ord, self._buffer[self._bufferPosition: self._bufferPosition + amountOfBytes])
self._bufferPosition += amountOfBytes
return nextBytes
def clearAlreadyReadBuffer(self):
self._buffer = self._buffer[self._bufferPosition : ]
self._bufferPosition = 0;
def _bufferSize(self):
return len(self._buffer);
#------------------------------------------------------------------------------
| 38.747573 | 106 | 0.636181 | 3,857 | 0.966424 | 0 | 0 | 0 | 0 | 0 | 0 | 751 | 0.188173 |
d131f783f629cb72883f07af0450c47b4a358d42 | 505 | py | Python | docassemble/InterviewStats/snapshot_statistics.py | BryceStevenWilley/docassemble-InterviewStats | e1225001671f83213841d9cc7748cd1fff0f49c5 | [
"MIT"
] | null | null | null | docassemble/InterviewStats/snapshot_statistics.py | BryceStevenWilley/docassemble-InterviewStats | e1225001671f83213841d9cc7748cd1fff0f49c5 | [
"MIT"
] | 8 | 2021-01-14T00:49:44.000Z | 2022-03-30T13:33:43.000Z | docassemble/InterviewStats/snapshot_statistics.py | BryceStevenWilley/docassemble-InterviewStats | e1225001671f83213841d9cc7748cd1fff0f49c5 | [
"MIT"
] | 1 | 2020-11-30T20:59:53.000Z | 2020-11-30T20:59:53.000Z | from docassemble.base.util import variables_snapshot_connection, user_info
__all__ = ['get_stats']
def get_stats(filename: str):
conn = variables_snapshot_connection()
cur = conn.cursor()
# use a parameterized query to prevent SQL injection
query = "select modtime, data from jsonstorage where filename=%(filename)s"
cur.execute(query, {'filename': filename})
records = list()
for record in cur.fetchall():
records.append(record)
conn.close()
return records
| 29.705882 | 79 | 0.710891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.277228 |
d1340b77da734e63775de7b3f26a9ce848c63723 | 2,160 | py | Python | radicalsdk/radardsp.py | moodoki/radical_sdk | 4438678cf73e156e5058ddb035ec8e5875fca84e | [
"Apache-2.0"
] | 7 | 2021-05-20T01:12:39.000Z | 2021-12-30T12:38:07.000Z | radicalsdk/radardsp.py | moodoki/radical_sdk | 4438678cf73e156e5058ddb035ec8e5875fca84e | [
"Apache-2.0"
] | null | null | null | radicalsdk/radardsp.py | moodoki/radical_sdk | 4438678cf73e156e5058ddb035ec8e5875fca84e | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_radardsp.ipynb (unless otherwise specified).
__all__ = ['cfar_nms', 'range_azimuth_ca_cfar']
# Cell
import numpy as np
from mmwave import dsp
# Cell
def cfar_nms(cfar_in, beamformed_ra, nhood_size=1):
"""non-maxumim suppression for cfar detections"""
def get_nhood(xx, yy):
return beamformed_ra[yy-nhood_size:yy+nhood_size+1, xx-nhood_size:xx+nhood_size+1]
nms_arr = np.zeros_like(cfar_in)
for yy, xx in zip(*np.where(cfar_in == 1)):
nms_arr[yy, xx] = 1 if np.all(beamformed_ra[yy, xx] >= get_nhood(xx, yy)) else 0
return nms_arr
def range_azimuth_ca_cfar(beamformed_radar_cube, nms=True):
"""Cell-Averaging CFAR on beamformed radar signal
inputs:
- `beamformed_radar_cube`
- `nms`: default `True` whether to perform non-maximum suppression
"""
range_az = np.abs(beamformed_radar_cube)
heatmap_log = np.log2(range_az)
first_pass, _ = np.apply_along_axis(func1d=dsp.cago_,
axis=0,
arr=heatmap_log,
l_bound=1.5,
guard_len=4,
noise_len=16)
# --- cfar in range direction
second_pass, noise_floor = np.apply_along_axis(func1d=dsp.caso_,
axis=0,
arr=heatmap_log.T,
l_bound=3,
guard_len=4,
noise_len=16)
# --- classify peaks and caclulate snrs
SKIP_SIZE = 4
noise_floor = noise_floor.T
first_pass = (heatmap_log > first_pass)
second_pass = (heatmap_log > second_pass.T)
peaks = (first_pass & second_pass)
peaks[:SKIP_SIZE, :] = 0
peaks[-SKIP_SIZE:, :] = 0
peaks[:, :SKIP_SIZE] = 0
peaks[:, -SKIP_SIZE:] = 0
peaks = peaks.astype('float32')
if nms:
peaks = peaks * cfar_nms(peaks, range_az, 1)
return peaks | 32.727273 | 95 | 0.539815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 442 | 0.20463 |
d1367794248b2e3c030b18062325fb8aedea6ff8 | 4,020 | py | Python | src/primaires/perso/commandes/prompt/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/perso/commandes/prompt/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/perso/commandes/prompt/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'prompt'.
Dans ce fichier ne se trouve que la commande.
Les sous-commandes peuvent être trouvées dans le package.
"""
from primaires.interpreteur.commande.commande import Commande
from .defaut import PrmDefaut
# Constantes
AIDE = """
Cette commande permet de configurer vos différents prompts. Le prompt
est un message qui s'affiche généralement après l'entrée d'une commande ou une
action quelconque dans l'univers. Ce message donne des informations
générales sur votre personnage (par défaut, sa vitalité, mana et
endurance).
Il existe plusieurs prompts. Par exemple, celui que vous verrez à
votre première connexion est le prompt par défaut qui s'affiche dans
la plupart des circonstances. Il existe également un prompt de combat
qui est affiché quand votre personnage est en combat et peut donner
des informations supplémentaires.
Vous pouvez ici configurer votre prompt, c'est-à-dire changer ce
message. En utilisant une des sous-commandes ci-dessous, vous pouvez
soit consulter, masquer, modifier ou réinitialiser votre prompt.
Ce que vous entrez grâce à cette commande deviendra votre prompt. Vous
pouvez aussi utiliser des symboles (par exemple, vous pouvez entrer
%prompt% %prompt:défaut%|cmd| Vit(|pc|v) Man(|pc|m) End(|pc|e)|ff| pour
avoir un prompt sous la forme |ent|Vit(50) Man(50) End(50)|ff|.
Les symboles sont des combinaisons de lettres précédées du signe
pourcent (|pc|). Voici les symboles que vous pouvez utiliser pour tous
les prompts :
|pc|v Vitalité actuelle
|pc|m Mana actuelle
|pc|e Endurance actuelle
|pc|vx Vitalité maximum
|pc|mx Mana maximum
|pc|ex Endurance maximum
|pc|sl Saut de ligne (pour avoir un prompt sur deux lignes)
|pc|f Force
|pc|a Agilité
|pc|r Robustesse
|pc|i Intelligence
|pc|c Charisme
|pc|s Sensibilité
""".strip()
class CmdPrompt(Commande):
"""Commande 'prompt'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "prompt", "prompt")
self.schema = ""
self.aide_courte = "affiche ou configure votre prompt"
self.aide_longue = AIDE
def ajouter_parametres(self):
"""Ajout dynamique des paramètres."""
for prompt in importeur.perso.prompts.values():
self.ajouter_parametre(PrmDefaut(prompt))
| 42.765957 | 79 | 0.732587 | 493 | 0.121728 | 0 | 0 | 0 | 0 | 0 | 0 | 3,567 | 0.880741 |
d13730c1037ff6002b629d64c271d177aacb851b | 908 | py | Python | dyndb2csv.py | donwellus/dyndb2csv | 4f4fcb733818b7afed2b4d1c798a5f97825a233d | [
"Apache-2.0"
] | null | null | null | dyndb2csv.py | donwellus/dyndb2csv | 4f4fcb733818b7afed2b4d1c798a5f97825a233d | [
"Apache-2.0"
] | null | null | null | dyndb2csv.py | donwellus/dyndb2csv | 4f4fcb733818b7afed2b4d1c798a5f97825a233d | [
"Apache-2.0"
] | null | null | null | import click
import json
import csv
import sys
@click.command()
@click.argument('input', type=click.File('rb'))
def cli(input):
"""Dynamodb to CSV
Convert the aws dynamodb output (Scalar types, JSON) to CSV.
\b
Process from stdin:
dyndb2csv -
\b
Process from a file:
dyndb2csv foo.txt
"""
data = json.load(input)
header_keys = get_keys(data['Items'])
writer = csv.DictWriter(sys.stdout, fieldnames=header_keys)
writer.writeheader()
for item in data['Items']:
i = get_row(item)
writer.writerow(i)
def get_keys(items):
head = {}
for item in items:
for col in item:
head[col] = True
return head.keys()
def get_row(item):
row = {}
for col, val in item.items():
key = list(val.keys())[0]
if key in ['S','N','BOOL','B']:
row[col] = val[key]
return row
| 19.319149 | 64 | 0.580396 | 0 | 0 | 0 | 0 | 533 | 0.587004 | 0 | 0 | 242 | 0.26652 |
d13741b5a1723b88af21cde7c9133072b2ca56c6 | 2,370 | py | Python | testing/testing-data-gradient.py | jenfly/atmos | c0a733b78749098d8cc2caaaacee245e6aeeac07 | [
"MIT"
] | 16 | 2015-10-08T06:14:35.000Z | 2020-02-12T02:47:33.000Z | testing/testing-data-gradient.py | jenfly/atmos | c0a733b78749098d8cc2caaaacee245e6aeeac07 | [
"MIT"
] | null | null | null | testing/testing-data-gradient.py | jenfly/atmos | c0a733b78749098d8cc2caaaacee245e6aeeac07 | [
"MIT"
] | 3 | 2018-10-16T07:58:14.000Z | 2021-09-17T06:39:00.000Z | import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import xray
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = atm.homedir() + 'datastore/merra/daily/'
year = 2014
subset = '_40E-120E_90S-90N'
def get_var(datadir, varnm, subset, year):
filenm = '%smerra_%s%s_%d.nc' % (datadir, varnm, subset, year)
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
return var
uq_int = get_var(datadir, 'UFLXQV', subset, year)
vq_int = get_var(datadir, 'VFLXQV', subset, year)
mfc = atm.moisture_flux_conv(uq_int, vq_int, already_int=True)
mfcbar = mfc.mean(dim='YDim').mean(dim='XDim')
# Test atm.gradient
a = atm.constants.radius_earth.values
latdim, londim = 1, 2
lat = atm.get_coord(uq_int, 'lat')
latrad = np.radians(lat)
latrad[abs(lat) > 89] = np.nan
coslat = xray.DataArray(np.cos(latrad), coords={'YDim' : lat})
lon = atm.get_coord(uq_int, 'lon')
lonrad = np.radians(lon)
mfc_x = atm.gradient(uq_int, lonrad, londim) / (a*coslat)
mfc_y = atm.gradient(vq_int * coslat, latrad, latdim) / (a*coslat)
mfc_test = mfc_x + mfc_y
mfc_test = - atm.precip_convert(mfc_test, 'kg/m2/s', 'mm/day')
mfc_test_bar = mfc_test.mean(dim='YDim').mean(dim='XDim')
diff = mfc_test - mfc
print(diff.max())
print(diff.min())
plt.plot(mfcbar)
plt.plot(mfc_test_bar)
print(mfc_test_bar - mfcbar)
# ----------------------------------------------------------------------
# Vertical gradient du/dp
lon1, lon2 = 40, 120
pmin, pmax = 100, 300
subset_dict = {'XDim' : (lon1, lon2), 'Height' : (pmin, pmax)}
urls = merra.merra_urls([year])
month, day = 7, 15
url = urls['%d%02d%02d' % (year, month, day)]
with xray.open_dataset(url) as ds:
u = atm.subset(ds['U'], subset_dict, copy=False)
u = u.mean(dim='TIME')
pres = u['Height']
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dp = np.gradient(pres)
# Calc 1
dims = u.shape
dudp = np.nan * u
for i in range(dims[1]):
for j in range(dims[2]):
dudp.values[:, i, j] = np.gradient(u[:, i, j], dp)
# Test atm.gradient
dudp_test = atm.gradient(u, pres, axis=0)
diff = dudp_test - dudp
print(diff.max())
print(diff.min())
| 27.55814 | 72 | 0.645148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 490 | 0.206751 |
d1376d1e03ff5d7a9167c8134995b7b59da1d297 | 1,686 | py | Python | adventofcode/solutions/y2021/d06.py | andreasbjornstrom/adventofcode-python | 71db65568bf0f327dd56e5e1c7488e356a24f403 | [
"MIT"
] | null | null | null | adventofcode/solutions/y2021/d06.py | andreasbjornstrom/adventofcode-python | 71db65568bf0f327dd56e5e1c7488e356a24f403 | [
"MIT"
] | null | null | null | adventofcode/solutions/y2021/d06.py | andreasbjornstrom/adventofcode-python | 71db65568bf0f327dd56e5e1c7488e356a24f403 | [
"MIT"
] | null | null | null | '''
Solution for day 6 of the 2021 Advent of Code calendar.
Run it with the command `python -m adventofcode run_solution -y 2021 6` from the project root.
'''
import time
from adventofcode.types import Solution
class LanternFish:
def __init__(self, timer_til_fork):
self.timer_til_fork = timer_til_fork
def __repr__(self):
return f"{self.timer_til_fork}"
def evolve(self) -> bool:
if self.timer_til_fork == 0:
self.timer_til_fork = 6
return True
self.timer_til_fork -= 1
return False
def part1(data):
fishes = [LanternFish(int(age)) for age in data.split(",")]
for day in range(0, 80):
start = time.time()
print(f"Generating: {day}")
evolve = (lantern_fish.evolve() for lantern_fish in fishes)
fishes += [LanternFish(8) for fork in evolve if fork]
end = time.time()
print(f"Took: {end - start}, generated {len(fishes)} fish")
return len(fishes)
def part2(data):
fish_age = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for age in data.split(","):
fish_age[int(age)] += 1
for day in range(0, 256):
start = time.time()
print(f"Generating: {day}")
should_fork = fish_age.pop(0)
fish_age.append(should_fork)
fish_age[6] += should_fork
end = time.time()
print(f"{len(fish_age)}{fish_age}, {sum(fish_age)} fish, took {end - start}, ")
return sum(fish_age)
# Each day, a 0 becomes a 6 and adds a new 8 to the end of the list,
# while each other number decreases by 1 if it was present at the start of the day.
def run(data: str) -> Solution:
return part1(data), part2(data)
| 27.193548 | 94 | 0.618624 | 350 | 0.207592 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.298339 |
d1388b41170cb3a74153d7fe1845f6ab7af64949 | 4,938 | py | Python | eval_detector.py | arushi1372/caltech-ee148-spring2021-hw02 | 980ffc277fbe2a8c0874034c40e4014e816d400c | [
"MIT"
] | null | null | null | eval_detector.py | arushi1372/caltech-ee148-spring2021-hw02 | 980ffc277fbe2a8c0874034c40e4014e816d400c | [
"MIT"
] | null | null | null | eval_detector.py | arushi1372/caltech-ee148-spring2021-hw02 | 980ffc277fbe2a8c0874034c40e4014e816d400c | [
"MIT"
] | null | null | null | import os
import json
import numpy as np
import matplotlib.pyplot as plt
def compute_iou(box_1, box_2):
'''
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
'''
intersection = 0
tlr1, tlc1, brr1, brc1 = box_1[0], box_1[1], box_1[2], box_1[3]
tlr2, tlc2, brr2, brc2 = box_2[0], box_2[1], box_2[2], box_2[3]
dx = min(brr1, brr2) - max(tlr1, tlr2)
dy = min(brc1, brc1) - max(tlc1, tlc2)
if (dx>=0) and (dy>=0):
intersection = dx * dy
area1 = (brc1 - tlc1) * (brr1 - tlr1)
area2 = (brc2 - tlc2) * (brr2 - tlr2)
union = area1 + area2 - intersection
iou = intersection / union
assert (iou >= 0) and (iou <= 1.0)
return iou
def compute_counts(preds, gts, iou_thr=0.5, conf_thr=0.5):
'''
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<gts> is a dictionary containing ground truth bounding boxes for a
collection of images.
'''
TP = 0
FP = 0
FN = 0
for pred_file, pred in preds.items():
gt = gts[pred_file]
for i in range(len(gt)):
not_found = True
for j in range(len(pred)):
iou = compute_iou(pred[j][:4], gt[i])
if iou >= iou_thr and pred[j][4] >= conf_thr:
TP += 1
not_found = False
break
elif pred[j][4] >= conf_thr:
FP += 1
not_found = False
break
if not_found:
FN += 1
return TP, FP, FN
# set a path for predictions and annotations:
preds_path = 'hw02_preds'
gts_path = 'hw02_annotations'
# load splits:
split_path = 'hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
# Set this parameter to True when you're done with algorithm development:
done_tweaking = True
'''
Load training data.
'''
with open(os.path.join(preds_path,'preds_train.json'),'r') as f:
preds_train = json.load(f)
with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
gts_train = json.load(f)
if done_tweaking:
'''
Load test data.
'''
with open(os.path.join(preds_path,'preds_test.json'),'r') as f:
preds_test = json.load(f)
with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
gts_test = json.load(f)
# For a fixed IoU threshold, vary the confidence thresholds.
# The code below gives an example on the training set for one IoU threshold.
def compute_PR(iou, preds, gts):
lst = []
for fname in preds:
if preds[fname] != []:
for pred in preds[fname]:
lst.append(pred[4])
confidence_thrs = np.sort(np.array(lst,dtype=float)) # using (ascending) list of confidence scores as thresholds
tp = np.zeros(len(confidence_thrs))
fp = np.zeros(len(confidence_thrs))
fn = np.zeros(len(confidence_thrs))
for i, conf_thr in enumerate(confidence_thrs):
tp[i], fp[i], fn[i] = compute_counts(preds, gts, iou_thr=iou, conf_thr=conf_thr)
# Plot training set PR curves
recall = np.zeros(len(confidence_thrs))
precision = np.zeros(len(confidence_thrs))
for i, elem in enumerate(tp):
precision[i] = tp[i]/(tp[i] + fp[i])
recall[i] = tp[i]/(tp[i] + fn[i])
return recall, precision
recall, precision = compute_PR(0.5, preds_train, gts_train)
recall_l, precision_l = compute_PR(0.25, preds_train, gts_train)
recall_m, precision_m = compute_PR(0.75, preds_train, gts_train)
plt.plot(recall, precision, color='black', marker='o')
plt.plot(recall_l, precision_l, color='blue', marker='o')
plt.plot(recall_m, precision_m, color='green', marker='o')
plt.legend(["IOU 0.5", "IOU 0.25", "IOU 0.75"])
plt.title("PR Curves Training")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
if done_tweaking:
print('Code for plotting test set PR curves.')
recall, precision = compute_PR(0.5, preds_test, gts_test)
recall_l, precision_l = compute_PR(0.25, preds_test, gts_test)
recall_m, precision_m = compute_PR(0.75, preds_test, gts_test)
plt.figure()
plt.plot(recall, precision, color='black', marker='o')
plt.plot(recall_l, precision_l, color='blue', marker='o')
plt.plot(recall_m, precision_m, color='green', marker='o')
plt.legend(["IOU 0.5", "IOU 0.25", "IOU 0.75"])
plt.title("PR Curves Testing")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
| 32.27451 | 116 | 0.632442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,443 | 0.292224 |
d13a70d1c898cfcc6e99db346677e88b8a15d87e | 4,937 | py | Python | enaml/enaml/wx/wx_widget.py | ContinuumIO/ashiba | a93e7785d1fcf397baeb8a0b687a162a2b2aef3d | [
"BSD-3-Clause"
] | 11 | 2015-03-14T14:30:51.000Z | 2022-03-15T13:01:44.000Z | enaml/wx/wx_widget.py | ContinuumIO/enaml | 15c20b035a73187e8e66fa20a43c3a4372d008bd | [
"BSD-3-Clause-Clear"
] | 3 | 2015-01-31T11:12:56.000Z | 2022-03-14T00:53:25.000Z | enaml/enaml/wx/wx_widget.py | ContinuumIO/ashiba | a93e7785d1fcf397baeb8a0b687a162a2b2aef3d | [
"BSD-3-Clause"
] | 4 | 2015-01-27T01:56:14.000Z | 2021-02-23T07:21:20.000Z | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import wx
from atom.api import Typed
from enaml.widgets.widget import ProxyWidget
from .wx_layout_request import wxEvtLayoutRequested
from .wx_resource_helpers import get_cached_wxcolor, get_cached_wxfont
from .wx_toolkit_object import WxToolkitObject
class WxWidget(WxToolkitObject, ProxyWidget):
""" A Wx implementation of an Enaml ProxyWidget.
"""
#: A reference to the toolkit widget created by the proxy.
widget = Typed(wx.Window)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Creates the underlying wx.Window widget.
"""
self.widget = wx.Window(self.parent_widget())
def init_widget(self):
""" Initialize the underlying widget.
"""
super(WxWidget, self).init_widget()
d = self.declaration
if d.background:
self.set_background(d.background)
if d.foreground:
self.set_foreground(d.foreground)
if d.font:
self.set_font(d.font)
if -1 not in d.minimum_size:
self.set_minimum_size(d.minimum_size)
if -1 not in d.maximum_size:
self.set_maximum_size(d.maximum_size)
if d.tool_tip:
self.set_tool_tip(d.tool_tip)
if d.status_tip:
self.set_status_tip(d.status_tip)
self.set_enabled(d.enabled)
self.set_visible(d.visible)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def update_geometry(self):
""" Notify the layout system that this widget has changed.
This method should be called when the geometry of the widget has
changed and the layout system should update the layout. This will
post a wxEvtLayoutRequested event to the parent of this widget.
"""
widget = self.widget
if widget:
parent = widget.GetParent()
if parent:
event = wxEvtLayoutRequested(widget.GetId())
wx.PostEvent(parent, event)
#--------------------------------------------------------------------------
# ProxyWidget API
#--------------------------------------------------------------------------
def set_minimum_size(self, min_size):
""" Sets the minimum size on the underlying widget.
"""
self.widget.SetMinSize(wx.Size(*min_size))
def set_maximum_size(self, max_size):
""" Sets the maximum size on the underlying widget.
"""
self.widget.SetMaxSize(wx.Size(*max_size))
def set_enabled(self, enabled):
""" Set the enabled state on the underlying widget.
"""
self.widget.Enable(enabled)
def set_visible(self, visible):
""" Set the visibility state on the underlying widget.
"""
self.widget.Show(visible)
def set_background(self, background):
""" Set the background color on the underlying widget.
"""
if background is None:
wxcolor = wx.NullColour
else:
wxcolor = get_cached_wxcolor(background)
widget = self.widget
widget.SetBackgroundColour(wxcolor)
widget.Refresh()
def set_foreground(self, foreground):
""" Set the foreground color on the underlying widget.
"""
if foreground is None:
wxcolor = wx.NullColour
else:
wxcolor = get_cached_wxcolor(foreground)
widget = self.widget
widget.SetForegroundColour(wxcolor)
widget.Refresh()
def set_font(self, font):
""" Set the font on the underlying widget.
"""
wxfont = get_cached_wxfont(font)
widget = self.widget
widget.SetFont(wxfont)
widget.Refresh()
def set_show_focus_rect(self, show):
""" This is not supported on Wx.
"""
pass
def set_tool_tip(self, tool_tip):
""" Set the tool tip of for this widget.
"""
self.widget.SetToolTipString(tool_tip)
def set_status_tip(self, status_tip):
""" This is not supported on Wx.
"""
pass
def ensure_visible(self):
""" Ensure the widget is visible.
"""
self.widget.Show(True)
def ensure_hidden(self):
""" Ensure the widget is hidden.
"""
self.widget.Show(False)
| 29.921212 | 79 | 0.538789 | 4,329 | 0.876848 | 0 | 0 | 0 | 0 | 0 | 0 | 2,037 | 0.412599 |
d13bb185fb284c7a1c5cf8f8b572524463eee700 | 276 | py | Python | duo_universal_auth/apps.py | tonningp/django-duo-universal-auth | 4a7dc91c48e0d3c6b11d2b6eebd9cedd83cd3275 | [
"BSD-3-Clause"
] | 1 | 2021-12-26T21:04:16.000Z | 2021-12-26T21:04:16.000Z | duo_universal_auth/apps.py | tonningp/django-duo-universal-auth | 4a7dc91c48e0d3c6b11d2b6eebd9cedd83cd3275 | [
"BSD-3-Clause"
] | null | null | null | duo_universal_auth/apps.py | tonningp/django-duo-universal-auth | 4a7dc91c48e0d3c6b11d2b6eebd9cedd83cd3275 | [
"BSD-3-Clause"
] | 1 | 2021-12-26T21:29:45.000Z | 2021-12-26T21:29:45.000Z | """
Module to register the Django application.
"""
from django.apps import AppConfig
class DuoUniversalAuthConfig(AppConfig):
"""
The specific AppConfig class to register for the Duo Universal
Authentication application.
"""
name = 'duo_universal_auth'
| 19.714286 | 66 | 0.728261 | 187 | 0.677536 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.652174 |
d13c2225b45393822148acad03a1b68ed0f512f8 | 2,170 | py | Python | metaworld/policies/sawyer_push_wall_v2_policy.py | yiwc/robotics-world | 48efda3a8ea6741b35828b02860f45753252e376 | [
"MIT"
] | 681 | 2019-09-09T19:34:37.000Z | 2022-03-31T12:17:58.000Z | metaworld/policies/sawyer_push_wall_v2_policy.py | yiwc/robotics-world | 48efda3a8ea6741b35828b02860f45753252e376 | [
"MIT"
] | 212 | 2019-09-18T14:43:44.000Z | 2022-03-27T22:21:00.000Z | metaworld/policies/sawyer_push_wall_v2_policy.py | yiwc/robotics-world | 48efda3a8ea6741b35828b02860f45753252e376 | [
"MIT"
] | 157 | 2019-09-12T05:06:05.000Z | 2022-03-29T14:47:24.000Z | import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerPushWallV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'unused_1': obs[3],
'obj_pos': obs[4:7],
'unused_2': obs[7:-3],
'goal_pos': obs[-3:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self.desired_pos(o_d), p=10.)
action['grab_effort'] = self.grab_effort(o_d)
return action.array
@staticmethod
def desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_obj = o_d['obj_pos'] + np.array([-0.005, 0, 0])
# If error in the XY plane is greater than 0.02, place end effector above the puck
if np.linalg.norm(pos_curr[:2] - pos_obj[:2]) > 0.02:
return pos_obj + np.array([0., 0., 0.2])
# Once XY error is low enough, drop end effector down on top of obj
elif abs(pos_curr[2] - pos_obj[2]) > 0.04:
return pos_obj + np.array([0., 0., 0.03])
# Move to the goal
else:
#if the wall is between the puck and the goal, go around the wall
if(-0.1 <= pos_obj[0] <= 0.3 and 0.65 <= pos_obj[1] <= 0.75):
return pos_curr + np.array([-1, 0, 0])
elif ((-0.15 < pos_obj[0] < 0.05 or 0.15 < pos_obj[0] < 0.35)
and 0.695 <= pos_obj[1] <= 0.755):
return pos_curr + np.array([0, 1, 0])
return o_d['goal_pos']
@staticmethod
def grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_obj = o_d['obj_pos']
if np.linalg.norm(pos_curr[:2] - pos_obj[:2]) > 0.02 or \
abs(pos_curr[2] - pos_obj[2]) > 0.1:
return 0.0
# While end effector is moving down toward the obj, begin closing the grabber
else:
return 0.6
| 33.384615 | 90 | 0.542396 | 2,030 | 0.935484 | 0 | 0 | 1,629 | 0.750691 | 0 | 0 | 464 | 0.213825 |
d13cc1f49508348eb5e8055c36f920d35acf4e17 | 12,634 | py | Python | solum/common/clients.py | dimtruck/solum | 7ec547039ab255052b954a102b9765e068a0f871 | [
"Apache-2.0"
] | null | null | null | solum/common/clients.py | dimtruck/solum | 7ec547039ab255052b954a102b9765e068a0f871 | [
"Apache-2.0"
] | null | null | null | solum/common/clients.py | dimtruck/solum | 7ec547039ab255052b954a102b9765e068a0f871 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 - Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient import client as glanceclient
from heatclient import client as heatclient
from mistralclient.api import client as mistralclient
from neutronclient.neutron import client as neutronclient
from oslo_config import cfg
from swiftclient import client as swiftclient
from zaqarclient.queues.v1 import client as zaqarclient
from solum.common import exception
from solum.common import solum_barbicanclient
from solum.common import solum_keystoneclient
from solum.openstack.common.gettextutils import _
from solum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
GLOBAL_CLIENT_OPTS = [
cfg.StrOpt('region_name',
default='RegionOne',
help=_(
'Region of endpoint in Identity service catalog to use'
' for all clients.')),
]
barbican_client_opts = [
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate for barbican "
"will not be verified.")), ]
# Note: this config is duplicated in many projects that use OpenStack
# clients. This should really be in the client.
# There is a place holder bug here:
# https://bugs.launchpad.net/solum/+bug/1292334
# that we use to track this.
glance_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the Glance service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.'))]
heat_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate will not "
"be verified."))]
zaqar_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Queue service catalog to use '
'for communication with the Zaqar service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate for zaqar "
"will not be verified."))]
neutron_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the Neutron service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.StrOpt('ca_cert',
help=_('Optional CA bundle file to use in SSL connections.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate for neutron "
"will not be verified."))]
swift_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the Swift service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.StrOpt('cacert',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set the server certificate will not be verified."))]
mistral_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the mistral service.')),
cfg.StrOpt('region_name',
default='',
help=_(
'Region of endpoint in Identity service catalog to use.')),
cfg.StrOpt('cacert',
help=_('Optional CA cert file to use in SSL connections '
'with Mistral.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set the server certificate will not be verified "
"while using Mistral."))]
cfg.CONF.register_opts(GLOBAL_CLIENT_OPTS)
cfg.CONF.register_opts(barbican_client_opts, group='barbican_client')
cfg.CONF.register_opts(glance_client_opts, group='glance_client')
cfg.CONF.register_opts(heat_client_opts, group='heat_client')
cfg.CONF.register_opts(zaqar_client_opts, group='zaqar_client')
cfg.CONF.register_opts(neutron_client_opts, group='neutron_client')
cfg.CONF.register_opts(swift_client_opts, group='swift_client')
cfg.CONF.register_opts(mistral_client_opts, group='mistral_client')
def get_client_option(client, option):
value = getattr(getattr(cfg.CONF, '%s_client' % client), option)
if option == 'region_name':
global_region = cfg.CONF.get(option)
return value or global_region
else:
return value
class OpenStackClients(object):
"""Convenience class to create and cache client instances."""
def __init__(self, context):
self.context = context
self._barbican = None
self._keystone = None
self._glance = None
self._heat = None
self._neutron = None
self._zaqar = None
self._mistral = None
def url_for(self, **kwargs):
return self.keystone().client.service_catalog.url_for(**kwargs)
@property
def auth_url(self):
return self.keystone().endpoint
@property
def auth_token(self):
return self.context.auth_token or self.keystone().auth_token
@exception.wrap_keystone_exception
def barbican(self):
if self._barbican:
return self._barbican
insecure = get_client_option('barbican', 'insecure')
self._barbican = solum_barbicanclient.BarbicanClient(
verify=not insecure)
return self._barbican
def keystone(self):
if self._keystone:
return self._keystone
self._keystone = solum_keystoneclient.KeystoneClient(self.context)
return self._keystone
@exception.wrap_keystone_exception
def zaqar(self):
if self._zaqar:
return self._zaqar
endpoint_type = get_client_option('zaqar', 'endpoint_type')
region_name = get_client_option('zaqar', 'region_name')
endpoint_url = self.url_for(service_type='queuing',
endpoint_type=endpoint_type,
region_name=region_name)
conf = {'auth_opts':
{'backend': 'keystone',
'options': {'os_auth_token': self.auth_token,
'os_auth_url': self.auth_url,
'insecure': get_client_option('zaqar',
'insecure')}
}
}
self._zaqar = zaqarclient.Client(endpoint_url, conf=conf)
return self._zaqar
@exception.wrap_keystone_exception
def neutron(self):
if self._neutron:
return self._neutron
endpoint_type = get_client_option('neutron', 'endpoint_type')
region_name = get_client_option('neutron', 'region_name')
endpoint_url = self.url_for(service_type='network',
endpoint_type=endpoint_type,
region_name=region_name)
args = {
'auth_url': self.auth_url,
'endpoint_url': endpoint_url,
'token': self.auth_token,
'username': None,
'password': None,
'insecure': get_client_option('neutron', 'insecure'),
'ca_cert': get_client_option('neutron', 'ca_cert')
}
self._neutron = neutronclient.Client('2.0', **args)
return self._neutron
@exception.wrap_keystone_exception
def glance(self):
if self._glance:
return self._glance
args = {
'token': self.auth_token,
}
endpoint_type = get_client_option('glance', 'endpoint_type')
region_name = get_client_option('glance', 'region_name')
endpoint = self.url_for(service_type='image',
endpoint_type=endpoint_type,
region_name=region_name)
self._glance = glanceclient.Client('2', endpoint, **args)
return self._glance
@exception.wrap_keystone_exception
def mistral(self):
if self._mistral:
return self._mistral
args = {
'auth_token': self.auth_token,
}
endpoint_type = get_client_option('mistral', 'endpoint_type')
region_name = get_client_option('mistral', 'region_name')
endpoint = self.url_for(service_type='workflow',
endpoint_type=endpoint_type,
region_name=region_name)
self._mistral = mistralclient.client(mistral_url=endpoint, **args)
return self._mistral
@exception.wrap_keystone_exception
def heat(self):
if self._heat:
return self._heat
endpoint_type = get_client_option('heat', 'endpoint_type')
args = {
'auth_url': self.auth_url,
'token': self.auth_token,
'username': None,
'password': None,
'ca_file': get_client_option('heat', 'ca_file'),
'cert_file': get_client_option('heat', 'cert_file'),
'key_file': get_client_option('heat', 'key_file'),
'insecure': get_client_option('heat', 'insecure')
}
region_name = get_client_option('heat', 'region_name')
endpoint = self.url_for(service_type='orchestration',
endpoint_type=endpoint_type,
region_name=region_name)
self._heat = heatclient.Client('1', endpoint, **args)
return self._heat
@exception.wrap_keystone_exception
def swift(self):
# Not caching swift connections because of range requests
# Check how glance_store uses swift client for a reference
endpoint_type = get_client_option('swift', 'endpoint_type')
region_name = get_client_option('swift', 'region_name')
args = {
'auth_version': '2.0',
'preauthtoken': self.auth_token,
'preauthurl': self.url_for(service_type='object-store',
endpoint_type=endpoint_type,
region_name=region_name),
'os_options': {'endpoint_type': endpoint_type,
'region_name': region_name},
'cacert': get_client_option('swift', 'cacert'),
'insecure': get_client_option('swift', 'insecure')
}
return swiftclient.Connection(**args)
| 38.054217 | 79 | 0.593795 | 5,968 | 0.472376 | 0 | 0 | 5,255 | 0.415941 | 0 | 0 | 4,079 | 0.322859 |
d13da41b3a4f220e015d9e442ca5b4d723221a8a | 299 | py | Python | xml_to_text.py | EvanHahn/xml-to-text | 4c064e8df978a9f857045e44b6665ce6a5f6f1af | [
"Unlicense"
] | 1 | 2015-01-23T19:28:56.000Z | 2015-01-23T19:28:56.000Z | xml_to_text.py | EvanHahn/xml-to-text | 4c064e8df978a9f857045e44b6665ce6a5f6f1af | [
"Unlicense"
] | null | null | null | xml_to_text.py | EvanHahn/xml-to-text | 4c064e8df978a9f857045e44b6665ce6a5f6f1af | [
"Unlicense"
] | 1 | 2021-05-26T12:34:59.000Z | 2021-05-26T12:34:59.000Z | #!/usr/bin/env python
from sys import argv
from bs4 import BeautifulSoup
def xml_to_text(file):
return BeautifulSoup(file).get_text()
if __name__ == "__main__":
if len(argv) < 2:
print "What file should I get plain text from?"
exit(1)
print xml_to_text(open(argv[1]))
| 21.357143 | 55 | 0.672241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.240803 |
d13f8e89e4edf3148661b4734118e0ef97c5a8a6 | 832 | py | Python | lib/exabgp/rib/__init__.py | lochiiconnectivity/exabgp | 2cb8a99af89969ff4b0b5561de6168a18179b704 | [
"BSD-3-Clause"
] | null | null | null | lib/exabgp/rib/__init__.py | lochiiconnectivity/exabgp | 2cb8a99af89969ff4b0b5561de6168a18179b704 | [
"BSD-3-Clause"
] | null | null | null | lib/exabgp/rib/__init__.py | lochiiconnectivity/exabgp | 2cb8a99af89969ff4b0b5561de6168a18179b704 | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
"""
rib/__init__.py
Created by Thomas Mangin on 2010-01-15.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.rib.store import Store
class RIB (object):
# when we perform a configuration reload using SIGUSR, we must not use the RIB
# without the cache, all the updates previously sent via the API are lost
_cache = {}
def __init__ (self,name,adjribout,families):
if name in self._cache:
self.incoming = self._cache[name].incoming
self.outgoing = self._cache[name].outgoing
if adjribout:
self.outgoing.resend(None,False)
else:
self.outgoing.clear()
else:
self.incoming = Store(families)
self.outgoing = Store(families)
self._cache[name] = self
self.outgoing.cache = adjribout
def reset (self):
self.incoming.reset()
self.outgoing.reset()
| 23.111111 | 79 | 0.716346 | 652 | 0.783654 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.34976 |
d13fa1d7b304be490a8f724e164effbbbcceaf63 | 184 | py | Python | tests/issues/yubikey.py | wjlei1990/radical.saga | de022ea4fb29d95e8acffff8a68aa8648de807d4 | [
"MIT"
] | 12 | 2019-04-13T21:41:45.000Z | 2021-08-03T09:43:25.000Z | tests/issues/yubikey.py | wjlei1990/radical.saga | de022ea4fb29d95e8acffff8a68aa8648de807d4 | [
"MIT"
] | 103 | 2019-04-10T14:23:41.000Z | 2022-03-15T19:43:56.000Z | tests/issues/yubikey.py | wjlei1990/radical.saga | de022ea4fb29d95e8acffff8a68aa8648de807d4 | [
"MIT"
] | 7 | 2019-07-11T07:59:56.000Z | 2022-02-02T22:28:24.000Z |
import radical.saga as saga
c = saga.Context ('ssh')
c.user_id = 'dinesh'
s = saga.Session ()
s.add_context (c)
js = saga.job.Service("lsf+ssh://yellowstone.ucar.edu", session=s)
| 15.333333 | 66 | 0.679348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.244565 |